| File: | src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp |
| Warning: | line 8981, column 5 Called C++ object pointer is null |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
| 1 | //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===// | |||
| 2 | // | |||
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | |||
| 4 | // See https://llvm.org/LICENSE.txt for license information. | |||
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | |||
| 6 | // | |||
| 7 | //===----------------------------------------------------------------------===// | |||
| 8 | // | |||
| 9 | // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops | |||
| 10 | // and generates target-independent LLVM-IR. | |||
| 11 | // The vectorizer uses the TargetTransformInfo analysis to estimate the costs | |||
| 12 | // of instructions in order to estimate the profitability of vectorization. | |||
| 13 | // | |||
| 14 | // The loop vectorizer combines consecutive loop iterations into a single | |||
| 15 | // 'wide' iteration. After this transformation the index is incremented | |||
| 16 | // by the SIMD vector width, and not by one. | |||
| 17 | // | |||
| 18 | // This pass has three parts: | |||
| 19 | // 1. The main loop pass that drives the different parts. | |||
| 20 | // 2. LoopVectorizationLegality - A unit that checks for the legality | |||
| 21 | // of the vectorization. | |||
| 22 | // 3. InnerLoopVectorizer - A unit that performs the actual | |||
| 23 | // widening of instructions. | |||
| 24 | // 4. LoopVectorizationCostModel - A unit that checks for the profitability | |||
| 25 | // of vectorization. It decides on the optimal vector width, which | |||
| 26 | // can be one, if vectorization is not profitable. | |||
| 27 | // | |||
| 28 | // There is a development effort going on to migrate loop vectorizer to the | |||
| 29 | // VPlan infrastructure and to introduce outer loop vectorization support (see | |||
| 30 | // docs/Proposal/VectorizationPlan.rst and | |||
| 31 | // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this | |||
| 32 | // purpose, we temporarily introduced the VPlan-native vectorization path: an | |||
| 33 | // alternative vectorization path that is natively implemented on top of the | |||
| 34 | // VPlan infrastructure. See EnableVPlanNativePath for enabling. | |||
| 35 | // | |||
| 36 | //===----------------------------------------------------------------------===// | |||
| 37 | // | |||
| 38 | // The reduction-variable vectorization is based on the paper: | |||
| 39 | // D. Nuzman and R. Henderson. Multi-platform Auto-vectorization. | |||
| 40 | // | |||
| 41 | // Variable uniformity checks are inspired by: | |||
| 42 | // Karrenberg, R. and Hack, S. Whole Function Vectorization. | |||
| 43 | // | |||
| 44 | // The interleaved access vectorization is based on the paper: | |||
| 45 | // Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved | |||
| 46 | // Data for SIMD | |||
| 47 | // | |||
| 48 | // Other ideas/concepts are from: | |||
| 49 | // A. Zaks and D. Nuzman. Autovectorization in GCC-two years later. | |||
| 50 | // | |||
| 51 | // S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of | |||
| 52 | // Vectorizing Compilers. | |||
| 53 | // | |||
| 54 | //===----------------------------------------------------------------------===// | |||
| 55 | ||||
| 56 | #include "llvm/Transforms/Vectorize/LoopVectorize.h" | |||
| 57 | #include "LoopVectorizationPlanner.h" | |||
| 58 | #include "VPRecipeBuilder.h" | |||
| 59 | #include "VPlan.h" | |||
| 60 | #include "VPlanHCFGBuilder.h" | |||
| 61 | #include "VPlanPredicator.h" | |||
| 62 | #include "VPlanTransforms.h" | |||
| 63 | #include "llvm/ADT/APInt.h" | |||
| 64 | #include "llvm/ADT/ArrayRef.h" | |||
| 65 | #include "llvm/ADT/DenseMap.h" | |||
| 66 | #include "llvm/ADT/DenseMapInfo.h" | |||
| 67 | #include "llvm/ADT/Hashing.h" | |||
| 68 | #include "llvm/ADT/MapVector.h" | |||
| 69 | #include "llvm/ADT/None.h" | |||
| 70 | #include "llvm/ADT/Optional.h" | |||
| 71 | #include "llvm/ADT/STLExtras.h" | |||
| 72 | #include "llvm/ADT/SmallPtrSet.h" | |||
| 73 | #include "llvm/ADT/SmallSet.h" | |||
| 74 | #include "llvm/ADT/SmallVector.h" | |||
| 75 | #include "llvm/ADT/Statistic.h" | |||
| 76 | #include "llvm/ADT/StringRef.h" | |||
| 77 | #include "llvm/ADT/Twine.h" | |||
| 78 | #include "llvm/ADT/iterator_range.h" | |||
| 79 | #include "llvm/Analysis/AssumptionCache.h" | |||
| 80 | #include "llvm/Analysis/BasicAliasAnalysis.h" | |||
| 81 | #include "llvm/Analysis/BlockFrequencyInfo.h" | |||
| 82 | #include "llvm/Analysis/CFG.h" | |||
| 83 | #include "llvm/Analysis/CodeMetrics.h" | |||
| 84 | #include "llvm/Analysis/DemandedBits.h" | |||
| 85 | #include "llvm/Analysis/GlobalsModRef.h" | |||
| 86 | #include "llvm/Analysis/LoopAccessAnalysis.h" | |||
| 87 | #include "llvm/Analysis/LoopAnalysisManager.h" | |||
| 88 | #include "llvm/Analysis/LoopInfo.h" | |||
| 89 | #include "llvm/Analysis/LoopIterator.h" | |||
| 90 | #include "llvm/Analysis/MemorySSA.h" | |||
| 91 | #include "llvm/Analysis/OptimizationRemarkEmitter.h" | |||
| 92 | #include "llvm/Analysis/ProfileSummaryInfo.h" | |||
| 93 | #include "llvm/Analysis/ScalarEvolution.h" | |||
| 94 | #include "llvm/Analysis/ScalarEvolutionExpressions.h" | |||
| 95 | #include "llvm/Analysis/TargetLibraryInfo.h" | |||
| 96 | #include "llvm/Analysis/TargetTransformInfo.h" | |||
| 97 | #include "llvm/Analysis/VectorUtils.h" | |||
| 98 | #include "llvm/IR/Attributes.h" | |||
| 99 | #include "llvm/IR/BasicBlock.h" | |||
| 100 | #include "llvm/IR/CFG.h" | |||
| 101 | #include "llvm/IR/Constant.h" | |||
| 102 | #include "llvm/IR/Constants.h" | |||
| 103 | #include "llvm/IR/DataLayout.h" | |||
| 104 | #include "llvm/IR/DebugInfoMetadata.h" | |||
| 105 | #include "llvm/IR/DebugLoc.h" | |||
| 106 | #include "llvm/IR/DerivedTypes.h" | |||
| 107 | #include "llvm/IR/DiagnosticInfo.h" | |||
| 108 | #include "llvm/IR/Dominators.h" | |||
| 109 | #include "llvm/IR/Function.h" | |||
| 110 | #include "llvm/IR/IRBuilder.h" | |||
| 111 | #include "llvm/IR/InstrTypes.h" | |||
| 112 | #include "llvm/IR/Instruction.h" | |||
| 113 | #include "llvm/IR/Instructions.h" | |||
| 114 | #include "llvm/IR/IntrinsicInst.h" | |||
| 115 | #include "llvm/IR/Intrinsics.h" | |||
| 116 | #include "llvm/IR/LLVMContext.h" | |||
| 117 | #include "llvm/IR/Metadata.h" | |||
| 118 | #include "llvm/IR/Module.h" | |||
| 119 | #include "llvm/IR/Operator.h" | |||
| 120 | #include "llvm/IR/PatternMatch.h" | |||
| 121 | #include "llvm/IR/Type.h" | |||
| 122 | #include "llvm/IR/Use.h" | |||
| 123 | #include "llvm/IR/User.h" | |||
| 124 | #include "llvm/IR/Value.h" | |||
| 125 | #include "llvm/IR/ValueHandle.h" | |||
| 126 | #include "llvm/IR/Verifier.h" | |||
| 127 | #include "llvm/InitializePasses.h" | |||
| 128 | #include "llvm/Pass.h" | |||
| 129 | #include "llvm/Support/Casting.h" | |||
| 130 | #include "llvm/Support/CommandLine.h" | |||
| 131 | #include "llvm/Support/Compiler.h" | |||
| 132 | #include "llvm/Support/Debug.h" | |||
| 133 | #include "llvm/Support/ErrorHandling.h" | |||
| 134 | #include "llvm/Support/InstructionCost.h" | |||
| 135 | #include "llvm/Support/MathExtras.h" | |||
| 136 | #include "llvm/Support/raw_ostream.h" | |||
| 137 | #include "llvm/Transforms/Utils/BasicBlockUtils.h" | |||
| 138 | #include "llvm/Transforms/Utils/InjectTLIMappings.h" | |||
| 139 | #include "llvm/Transforms/Utils/LoopSimplify.h" | |||
| 140 | #include "llvm/Transforms/Utils/LoopUtils.h" | |||
| 141 | #include "llvm/Transforms/Utils/LoopVersioning.h" | |||
| 142 | #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h" | |||
| 143 | #include "llvm/Transforms/Utils/SizeOpts.h" | |||
| 144 | #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h" | |||
| 145 | #include <algorithm> | |||
| 146 | #include <cassert> | |||
| 147 | #include <cstdint> | |||
| 148 | #include <cstdlib> | |||
| 149 | #include <functional> | |||
| 150 | #include <iterator> | |||
| 151 | #include <limits> | |||
| 152 | #include <memory> | |||
| 153 | #include <string> | |||
| 154 | #include <tuple> | |||
| 155 | #include <utility> | |||
| 156 | ||||
| 157 | using namespace llvm; | |||
| 158 | ||||
| 159 | #define LV_NAME"loop-vectorize" "loop-vectorize" | |||
| 160 | #define DEBUG_TYPE"loop-vectorize" LV_NAME"loop-vectorize" | |||
| 161 | ||||
| 162 | #ifndef NDEBUG1 | |||
| 163 | const char VerboseDebug[] = DEBUG_TYPE"loop-vectorize" "-verbose"; | |||
| 164 | #endif | |||
| 165 | ||||
| 166 | /// @{ | |||
| 167 | /// Metadata attribute names | |||
| 168 | const char LLVMLoopVectorizeFollowupAll[] = "llvm.loop.vectorize.followup_all"; | |||
| 169 | const char LLVMLoopVectorizeFollowupVectorized[] = | |||
| 170 | "llvm.loop.vectorize.followup_vectorized"; | |||
| 171 | const char LLVMLoopVectorizeFollowupEpilogue[] = | |||
| 172 | "llvm.loop.vectorize.followup_epilogue"; | |||
| 173 | /// @} | |||
| 174 | ||||
| 175 | STATISTIC(LoopsVectorized, "Number of loops vectorized")static llvm::Statistic LoopsVectorized = {"loop-vectorize", "LoopsVectorized" , "Number of loops vectorized"}; | |||
| 176 | STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization")static llvm::Statistic LoopsAnalyzed = {"loop-vectorize", "LoopsAnalyzed" , "Number of loops analyzed for vectorization"}; | |||
| 177 | STATISTIC(LoopsEpilogueVectorized, "Number of epilogues vectorized")static llvm::Statistic LoopsEpilogueVectorized = {"loop-vectorize" , "LoopsEpilogueVectorized", "Number of epilogues vectorized" }; | |||
| 178 | ||||
| 179 | static cl::opt<bool> EnableEpilogueVectorization( | |||
| 180 | "enable-epilogue-vectorization", cl::init(true), cl::Hidden, | |||
| 181 | cl::desc("Enable vectorization of epilogue loops.")); | |||
| 182 | ||||
| 183 | static cl::opt<unsigned> EpilogueVectorizationForceVF( | |||
| 184 | "epilogue-vectorization-force-VF", cl::init(1), cl::Hidden, | |||
| 185 | cl::desc("When epilogue vectorization is enabled, and a value greater than " | |||
| 186 | "1 is specified, forces the given VF for all applicable epilogue " | |||
| 187 | "loops.")); | |||
| 188 | ||||
| 189 | static cl::opt<unsigned> EpilogueVectorizationMinVF( | |||
| 190 | "epilogue-vectorization-minimum-VF", cl::init(16), cl::Hidden, | |||
| 191 | cl::desc("Only loops with vectorization factor equal to or larger than " | |||
| 192 | "the specified value are considered for epilogue vectorization.")); | |||
| 193 | ||||
| 194 | /// Loops with a known constant trip count below this number are vectorized only | |||
| 195 | /// if no scalar iteration overheads are incurred. | |||
| 196 | static cl::opt<unsigned> TinyTripCountVectorThreshold( | |||
| 197 | "vectorizer-min-trip-count", cl::init(16), cl::Hidden, | |||
| 198 | cl::desc("Loops with a constant trip count that is smaller than this " | |||
| 199 | "value are vectorized only if no scalar iteration overheads " | |||
| 200 | "are incurred.")); | |||
| 201 | ||||
| 202 | static cl::opt<unsigned> PragmaVectorizeMemoryCheckThreshold( | |||
| 203 | "pragma-vectorize-memory-check-threshold", cl::init(128), cl::Hidden, | |||
| 204 | cl::desc("The maximum allowed number of runtime memory checks with a " | |||
| 205 | "vectorize(enable) pragma.")); | |||
| 206 | ||||
| 207 | // Option prefer-predicate-over-epilogue indicates that an epilogue is undesired, | |||
| 208 | // that predication is preferred, and this lists all options. I.e., the | |||
| 209 | // vectorizer will try to fold the tail-loop (epilogue) into the vector body | |||
| 210 | // and predicate the instructions accordingly. If tail-folding fails, there are | |||
| 211 | // different fallback strategies depending on these values: | |||
| 212 | namespace PreferPredicateTy { | |||
| 213 | enum Option { | |||
| 214 | ScalarEpilogue = 0, | |||
| 215 | PredicateElseScalarEpilogue, | |||
| 216 | PredicateOrDontVectorize | |||
| 217 | }; | |||
| 218 | } // namespace PreferPredicateTy | |||
| 219 | ||||
| 220 | static cl::opt<PreferPredicateTy::Option> PreferPredicateOverEpilogue( | |||
| 221 | "prefer-predicate-over-epilogue", | |||
| 222 | cl::init(PreferPredicateTy::ScalarEpilogue), | |||
| 223 | cl::Hidden, | |||
| 224 | cl::desc("Tail-folding and predication preferences over creating a scalar " | |||
| 225 | "epilogue loop."), | |||
| 226 | cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue,llvm::cl::OptionEnumValue { "scalar-epilogue", int(PreferPredicateTy ::ScalarEpilogue), "Don't tail-predicate loops, create scalar epilogue" } | |||
| 227 | "scalar-epilogue",llvm::cl::OptionEnumValue { "scalar-epilogue", int(PreferPredicateTy ::ScalarEpilogue), "Don't tail-predicate loops, create scalar epilogue" } | |||
| 228 | "Don't tail-predicate loops, create scalar epilogue")llvm::cl::OptionEnumValue { "scalar-epilogue", int(PreferPredicateTy ::ScalarEpilogue), "Don't tail-predicate loops, create scalar epilogue" }, | |||
| 229 | clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue,llvm::cl::OptionEnumValue { "predicate-else-scalar-epilogue", int(PreferPredicateTy::PredicateElseScalarEpilogue), "prefer tail-folding, create scalar epilogue if tail " "folding fails." } | |||
| 230 | "predicate-else-scalar-epilogue",llvm::cl::OptionEnumValue { "predicate-else-scalar-epilogue", int(PreferPredicateTy::PredicateElseScalarEpilogue), "prefer tail-folding, create scalar epilogue if tail " "folding fails." } | |||
| 231 | "prefer tail-folding, create scalar epilogue if tail "llvm::cl::OptionEnumValue { "predicate-else-scalar-epilogue", int(PreferPredicateTy::PredicateElseScalarEpilogue), "prefer tail-folding, create scalar epilogue if tail " "folding fails." } | |||
| 232 | "folding fails.")llvm::cl::OptionEnumValue { "predicate-else-scalar-epilogue", int(PreferPredicateTy::PredicateElseScalarEpilogue), "prefer tail-folding, create scalar epilogue if tail " "folding fails." }, | |||
| 233 | clEnumValN(PreferPredicateTy::PredicateOrDontVectorize,llvm::cl::OptionEnumValue { "predicate-dont-vectorize", int(PreferPredicateTy ::PredicateOrDontVectorize), "prefers tail-folding, don't attempt vectorization if " "tail-folding fails." } | |||
| 234 | "predicate-dont-vectorize",llvm::cl::OptionEnumValue { "predicate-dont-vectorize", int(PreferPredicateTy ::PredicateOrDontVectorize), "prefers tail-folding, don't attempt vectorization if " "tail-folding fails." } | |||
| 235 | "prefers tail-folding, don't attempt vectorization if "llvm::cl::OptionEnumValue { "predicate-dont-vectorize", int(PreferPredicateTy ::PredicateOrDontVectorize), "prefers tail-folding, don't attempt vectorization if " "tail-folding fails." } | |||
| 236 | "tail-folding fails.")llvm::cl::OptionEnumValue { "predicate-dont-vectorize", int(PreferPredicateTy ::PredicateOrDontVectorize), "prefers tail-folding, don't attempt vectorization if " "tail-folding fails." })); | |||
| 237 | ||||
| 238 | static cl::opt<bool> MaximizeBandwidth( | |||
| 239 | "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, | |||
| 240 | cl::desc("Maximize bandwidth when selecting vectorization factor which " | |||
| 241 | "will be determined by the smallest type in loop.")); | |||
| 242 | ||||
| 243 | static cl::opt<bool> EnableInterleavedMemAccesses( | |||
| 244 | "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, | |||
| 245 | cl::desc("Enable vectorization on interleaved memory accesses in a loop")); | |||
| 246 | ||||
| 247 | /// An interleave-group may need masking if it resides in a block that needs | |||
| 248 | /// predication, or in order to mask away gaps. | |||
| 249 | static cl::opt<bool> EnableMaskedInterleavedMemAccesses( | |||
| 250 | "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden, | |||
| 251 | cl::desc("Enable vectorization on masked interleaved memory accesses in a loop")); | |||
| 252 | ||||
| 253 | static cl::opt<unsigned> TinyTripCountInterleaveThreshold( | |||
| 254 | "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden, | |||
| 255 | cl::desc("We don't interleave loops with a estimated constant trip count " | |||
| 256 | "below this number")); | |||
| 257 | ||||
| 258 | static cl::opt<unsigned> ForceTargetNumScalarRegs( | |||
| 259 | "force-target-num-scalar-regs", cl::init(0), cl::Hidden, | |||
| 260 | cl::desc("A flag that overrides the target's number of scalar registers.")); | |||
| 261 | ||||
| 262 | static cl::opt<unsigned> ForceTargetNumVectorRegs( | |||
| 263 | "force-target-num-vector-regs", cl::init(0), cl::Hidden, | |||
| 264 | cl::desc("A flag that overrides the target's number of vector registers.")); | |||
| 265 | ||||
| 266 | static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor( | |||
| 267 | "force-target-max-scalar-interleave", cl::init(0), cl::Hidden, | |||
| 268 | cl::desc("A flag that overrides the target's max interleave factor for " | |||
| 269 | "scalar loops.")); | |||
| 270 | ||||
| 271 | static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor( | |||
| 272 | "force-target-max-vector-interleave", cl::init(0), cl::Hidden, | |||
| 273 | cl::desc("A flag that overrides the target's max interleave factor for " | |||
| 274 | "vectorized loops.")); | |||
| 275 | ||||
| 276 | static cl::opt<unsigned> ForceTargetInstructionCost( | |||
| 277 | "force-target-instruction-cost", cl::init(0), cl::Hidden, | |||
| 278 | cl::desc("A flag that overrides the target's expected cost for " | |||
| 279 | "an instruction to a single constant value. Mostly " | |||
| 280 | "useful for getting consistent testing.")); | |||
| 281 | ||||
| 282 | static cl::opt<bool> ForceTargetSupportsScalableVectors( | |||
| 283 | "force-target-supports-scalable-vectors", cl::init(false), cl::Hidden, | |||
| 284 | cl::desc( | |||
| 285 | "Pretend that scalable vectors are supported, even if the target does " | |||
| 286 | "not support them. This flag should only be used for testing.")); | |||
| 287 | ||||
| 288 | static cl::opt<unsigned> SmallLoopCost( | |||
| 289 | "small-loop-cost", cl::init(20), cl::Hidden, | |||
| 290 | cl::desc( | |||
| 291 | "The cost of a loop that is considered 'small' by the interleaver.")); | |||
| 292 | ||||
| 293 | static cl::opt<bool> LoopVectorizeWithBlockFrequency( | |||
| 294 | "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden, | |||
| 295 | cl::desc("Enable the use of the block frequency analysis to access PGO " | |||
| 296 | "heuristics minimizing code growth in cold regions and being more " | |||
| 297 | "aggressive in hot regions.")); | |||
| 298 | ||||
| 299 | // Runtime interleave loops for load/store throughput. | |||
| 300 | static cl::opt<bool> EnableLoadStoreRuntimeInterleave( | |||
| 301 | "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, | |||
| 302 | cl::desc( | |||
| 303 | "Enable runtime interleaving until load/store ports are saturated")); | |||
| 304 | ||||
| 305 | /// Interleave small loops with scalar reductions. | |||
| 306 | static cl::opt<bool> InterleaveSmallLoopScalarReduction( | |||
| 307 | "interleave-small-loop-scalar-reduction", cl::init(false), cl::Hidden, | |||
| 308 | cl::desc("Enable interleaving for loops with small iteration counts that " | |||
| 309 | "contain scalar reductions to expose ILP.")); | |||
| 310 | ||||
| 311 | /// The number of stores in a loop that are allowed to need predication. | |||
| 312 | static cl::opt<unsigned> NumberOfStoresToPredicate( | |||
| 313 | "vectorize-num-stores-pred", cl::init(1), cl::Hidden, | |||
| 314 | cl::desc("Max number of stores to be predicated behind an if.")); | |||
| 315 | ||||
| 316 | static cl::opt<bool> EnableIndVarRegisterHeur( | |||
| 317 | "enable-ind-var-reg-heur", cl::init(true), cl::Hidden, | |||
| 318 | cl::desc("Count the induction variable only once when interleaving")); | |||
| 319 | ||||
| 320 | static cl::opt<bool> EnableCondStoresVectorization( | |||
| 321 | "enable-cond-stores-vec", cl::init(true), cl::Hidden, | |||
| 322 | cl::desc("Enable if predication of stores during vectorization.")); | |||
| 323 | ||||
| 324 | static cl::opt<unsigned> MaxNestedScalarReductionIC( | |||
| 325 | "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, | |||
| 326 | cl::desc("The maximum interleave count to use when interleaving a scalar " | |||
| 327 | "reduction in a nested loop.")); | |||
| 328 | ||||
| 329 | static cl::opt<bool> | |||
| 330 | PreferInLoopReductions("prefer-inloop-reductions", cl::init(false), | |||
| 331 | cl::Hidden, | |||
| 332 | cl::desc("Prefer in-loop vector reductions, " | |||
| 333 | "overriding the targets preference.")); | |||
| 334 | ||||
| 335 | cl::opt<bool> EnableStrictReductions( | |||
| 336 | "enable-strict-reductions", cl::init(false), cl::Hidden, | |||
| 337 | cl::desc("Enable the vectorisation of loops with in-order (strict) " | |||
| 338 | "FP reductions")); | |||
| 339 | ||||
| 340 | static cl::opt<bool> PreferPredicatedReductionSelect( | |||
| 341 | "prefer-predicated-reduction-select", cl::init(false), cl::Hidden, | |||
| 342 | cl::desc( | |||
| 343 | "Prefer predicating a reduction operation over an after loop select.")); | |||
| 344 | ||||
| 345 | cl::opt<bool> EnableVPlanNativePath( | |||
| 346 | "enable-vplan-native-path", cl::init(false), cl::Hidden, | |||
| 347 | cl::desc("Enable VPlan-native vectorization path with " | |||
| 348 | "support for outer loop vectorization.")); | |||
| 349 | ||||
| 350 | // FIXME: Remove this switch once we have divergence analysis. Currently we | |||
| 351 | // assume divergent non-backedge branches when this switch is true. | |||
| 352 | cl::opt<bool> EnableVPlanPredication( | |||
| 353 | "enable-vplan-predication", cl::init(false), cl::Hidden, | |||
| 354 | cl::desc("Enable VPlan-native vectorization path predicator with " | |||
| 355 | "support for outer loop vectorization.")); | |||
| 356 | ||||
| 357 | // This flag enables the stress testing of the VPlan H-CFG construction in the | |||
| 358 | // VPlan-native vectorization path. It must be used in conjuction with | |||
| 359 | // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the | |||
| 360 | // verification of the H-CFGs built. | |||
| 361 | static cl::opt<bool> VPlanBuildStressTest( | |||
| 362 | "vplan-build-stress-test", cl::init(false), cl::Hidden, | |||
| 363 | cl::desc( | |||
| 364 | "Build VPlan for every supported loop nest in the function and bail " | |||
| 365 | "out right after the build (stress test the VPlan H-CFG construction " | |||
| 366 | "in the VPlan-native vectorization path).")); | |||
| 367 | ||||
| 368 | cl::opt<bool> llvm::EnableLoopInterleaving( | |||
| 369 | "interleave-loops", cl::init(true), cl::Hidden, | |||
| 370 | cl::desc("Enable loop interleaving in Loop vectorization passes")); | |||
| 371 | cl::opt<bool> llvm::EnableLoopVectorization( | |||
| 372 | "vectorize-loops", cl::init(true), cl::Hidden, | |||
| 373 | cl::desc("Run the Loop vectorization passes")); | |||
| 374 | ||||
| 375 | cl::opt<bool> PrintVPlansInDotFormat( | |||
| 376 | "vplan-print-in-dot-format", cl::init(false), cl::Hidden, | |||
| 377 | cl::desc("Use dot format instead of plain text when dumping VPlans")); | |||
| 378 | ||||
| 379 | /// A helper function that returns true if the given type is irregular. The | |||
| 380 | /// type is irregular if its allocated size doesn't equal the store size of an | |||
| 381 | /// element of the corresponding vector type. | |||
| 382 | static bool hasIrregularType(Type *Ty, const DataLayout &DL) { | |||
| 383 | // Determine if an array of N elements of type Ty is "bitcast compatible" | |||
| 384 | // with a <N x Ty> vector. | |||
| 385 | // This is only true if there is no padding between the array elements. | |||
| 386 | return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty); | |||
| 387 | } | |||
| 388 | ||||
| 389 | /// A helper function that returns the reciprocal of the block probability of | |||
| 390 | /// predicated blocks. If we return X, we are assuming the predicated block | |||
| 391 | /// will execute once for every X iterations of the loop header. | |||
| 392 | /// | |||
| 393 | /// TODO: We should use actual block probability here, if available. Currently, | |||
| 394 | /// we always assume predicated blocks have a 50% chance of executing. | |||
| 395 | static unsigned getReciprocalPredBlockProb() { return 2; } | |||
| 396 | ||||
| 397 | /// A helper function that returns an integer or floating-point constant with | |||
| 398 | /// value C. | |||
| 399 | static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) { | |||
| 400 | return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C) | |||
| 401 | : ConstantFP::get(Ty, C); | |||
| 402 | } | |||
| 403 | ||||
| 404 | /// Returns "best known" trip count for the specified loop \p L as defined by | |||
| 405 | /// the following procedure: | |||
| 406 | /// 1) Returns exact trip count if it is known. | |||
| 407 | /// 2) Returns expected trip count according to profile data if any. | |||
| 408 | /// 3) Returns upper bound estimate if it is known. | |||
| 409 | /// 4) Returns None if all of the above failed. | |||
| 410 | static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) { | |||
| 411 | // Check if exact trip count is known. | |||
| 412 | if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L)) | |||
| 413 | return ExpectedTC; | |||
| 414 | ||||
| 415 | // Check if there is an expected trip count available from profile data. | |||
| 416 | if (LoopVectorizeWithBlockFrequency) | |||
| 417 | if (auto EstimatedTC = getLoopEstimatedTripCount(L)) | |||
| 418 | return EstimatedTC; | |||
| 419 | ||||
| 420 | // Check if upper bound estimate is known. | |||
| 421 | if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L)) | |||
| 422 | return ExpectedTC; | |||
| 423 | ||||
| 424 | return None; | |||
| 425 | } | |||
| 426 | ||||
| 427 | // Forward declare GeneratedRTChecks. | |||
| 428 | class GeneratedRTChecks; | |||
| 429 | ||||
| 430 | namespace llvm { | |||
| 431 | ||||
| 432 | /// InnerLoopVectorizer vectorizes loops which contain only one basic | |||
| 433 | /// block to a specified vectorization factor (VF). | |||
| 434 | /// This class performs the widening of scalars into vectors, or multiple | |||
| 435 | /// scalars. This class also implements the following features: | |||
| 436 | /// * It inserts an epilogue loop for handling loops that don't have iteration | |||
| 437 | /// counts that are known to be a multiple of the vectorization factor. | |||
| 438 | /// * It handles the code generation for reduction variables. | |||
| 439 | /// * Scalarization (implementation using scalars) of un-vectorizable | |||
| 440 | /// instructions. | |||
| 441 | /// InnerLoopVectorizer does not perform any vectorization-legality | |||
| 442 | /// checks, and relies on the caller to check for the different legality | |||
| 443 | /// aspects. The InnerLoopVectorizer relies on the | |||
| 444 | /// LoopVectorizationLegality class to provide information about the induction | |||
| 445 | /// and reduction variables that were found to a given vectorization factor. | |||
| 446 | class InnerLoopVectorizer { | |||
| 447 | public: | |||
| 448 | InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, | |||
| 449 | LoopInfo *LI, DominatorTree *DT, | |||
| 450 | const TargetLibraryInfo *TLI, | |||
| 451 | const TargetTransformInfo *TTI, AssumptionCache *AC, | |||
| 452 | OptimizationRemarkEmitter *ORE, ElementCount VecWidth, | |||
| 453 | unsigned UnrollFactor, LoopVectorizationLegality *LVL, | |||
| 454 | LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, | |||
| 455 | ProfileSummaryInfo *PSI, GeneratedRTChecks &RTChecks) | |||
| 456 | : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI), | |||
| 457 | AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor), | |||
| 458 | Builder(PSE.getSE()->getContext()), Legal(LVL), Cost(CM), BFI(BFI), | |||
| 459 | PSI(PSI), RTChecks(RTChecks) { | |||
| 460 | // Query this against the original loop and save it here because the profile | |||
| 461 | // of the original loop header may change as the transformation happens. | |||
| 462 | OptForSizeBasedOnProfile = llvm::shouldOptimizeForSize( | |||
| 463 | OrigLoop->getHeader(), PSI, BFI, PGSOQueryType::IRPass); | |||
| 464 | } | |||
| 465 | ||||
| 466 | virtual ~InnerLoopVectorizer() = default; | |||
| 467 | ||||
| 468 | /// Create a new empty loop that will contain vectorized instructions later | |||
| 469 | /// on, while the old loop will be used as the scalar remainder. Control flow | |||
| 470 | /// is generated around the vectorized (and scalar epilogue) loops consisting | |||
| 471 | /// of various checks and bypasses. Return the pre-header block of the new | |||
| 472 | /// loop. | |||
| 473 | /// In the case of epilogue vectorization, this function is overriden to | |||
| 474 | /// handle the more complex control flow around the loops. | |||
| 475 | virtual BasicBlock *createVectorizedLoopSkeleton(); | |||
| 476 | ||||
| 477 | /// Widen a single instruction within the innermost loop. | |||
| 478 | void widenInstruction(Instruction &I, VPValue *Def, VPUser &Operands, | |||
| 479 | VPTransformState &State); | |||
| 480 | ||||
| 481 | /// Widen a single call instruction within the innermost loop. | |||
| 482 | void widenCallInstruction(CallInst &I, VPValue *Def, VPUser &ArgOperands, | |||
| 483 | VPTransformState &State); | |||
| 484 | ||||
| 485 | /// Widen a single select instruction within the innermost loop. | |||
| 486 | void widenSelectInstruction(SelectInst &I, VPValue *VPDef, VPUser &Operands, | |||
| 487 | bool InvariantCond, VPTransformState &State); | |||
| 488 | ||||
| 489 | /// Fix the vectorized code, taking care of header phi's, live-outs, and more. | |||
| 490 | void fixVectorizedLoop(VPTransformState &State); | |||
| 491 | ||||
| 492 | // Return true if any runtime check is added. | |||
| 493 | bool areSafetyChecksAdded() { return AddedSafetyChecks; } | |||
| 494 | ||||
| 495 | /// A type for vectorized values in the new loop. Each value from the | |||
| 496 | /// original loop, when vectorized, is represented by UF vector values in the | |||
| 497 | /// new unrolled loop, where UF is the unroll factor. | |||
| 498 | using VectorParts = SmallVector<Value *, 2>; | |||
| 499 | ||||
| 500 | /// Vectorize a single GetElementPtrInst based on information gathered and | |||
| 501 | /// decisions taken during planning. | |||
| 502 | void widenGEP(GetElementPtrInst *GEP, VPValue *VPDef, VPUser &Indices, | |||
| 503 | unsigned UF, ElementCount VF, bool IsPtrLoopInvariant, | |||
| 504 | SmallBitVector &IsIndexLoopInvariant, VPTransformState &State); | |||
| 505 | ||||
| 506 | /// Vectorize a single first-order recurrence or pointer induction PHINode in | |||
| 507 | /// a block. This method handles the induction variable canonicalization. It | |||
| 508 | /// supports both VF = 1 for unrolled loops and arbitrary length vectors. | |||
| 509 | void widenPHIInstruction(Instruction *PN, VPWidenPHIRecipe *PhiR, | |||
| 510 | VPTransformState &State); | |||
| 511 | ||||
| 512 | /// A helper function to scalarize a single Instruction in the innermost loop. | |||
| 513 | /// Generates a sequence of scalar instances for each lane between \p MinLane | |||
| 514 | /// and \p MaxLane, times each part between \p MinPart and \p MaxPart, | |||
| 515 | /// inclusive. Uses the VPValue operands from \p Operands instead of \p | |||
| 516 | /// Instr's operands. | |||
| 517 | void scalarizeInstruction(Instruction *Instr, VPValue *Def, VPUser &Operands, | |||
| 518 | const VPIteration &Instance, bool IfPredicateInstr, | |||
| 519 | VPTransformState &State); | |||
| 520 | ||||
| 521 | /// Widen an integer or floating-point induction variable \p IV. If \p Trunc | |||
| 522 | /// is provided, the integer induction variable will first be truncated to | |||
| 523 | /// the corresponding type. | |||
| 524 | void widenIntOrFpInduction(PHINode *IV, Value *Start, TruncInst *Trunc, | |||
| 525 | VPValue *Def, VPValue *CastDef, | |||
| 526 | VPTransformState &State); | |||
| 527 | ||||
| 528 | /// Construct the vector value of a scalarized value \p V one lane at a time. | |||
| 529 | void packScalarIntoVectorValue(VPValue *Def, const VPIteration &Instance, | |||
| 530 | VPTransformState &State); | |||
| 531 | ||||
| 532 | /// Try to vectorize interleaved access group \p Group with the base address | |||
| 533 | /// given in \p Addr, optionally masking the vector operations if \p | |||
| 534 | /// BlockInMask is non-null. Use \p State to translate given VPValues to IR | |||
| 535 | /// values in the vectorized loop. | |||
| 536 | void vectorizeInterleaveGroup(const InterleaveGroup<Instruction> *Group, | |||
| 537 | ArrayRef<VPValue *> VPDefs, | |||
| 538 | VPTransformState &State, VPValue *Addr, | |||
| 539 | ArrayRef<VPValue *> StoredValues, | |||
| 540 | VPValue *BlockInMask = nullptr); | |||
| 541 | ||||
| 542 | /// Vectorize Load and Store instructions with the base address given in \p | |||
| 543 | /// Addr, optionally masking the vector operations if \p BlockInMask is | |||
| 544 | /// non-null. Use \p State to translate given VPValues to IR values in the | |||
| 545 | /// vectorized loop. | |||
| 546 | void vectorizeMemoryInstruction(Instruction *Instr, VPTransformState &State, | |||
| 547 | VPValue *Def, VPValue *Addr, | |||
| 548 | VPValue *StoredValue, VPValue *BlockInMask); | |||
| 549 | ||||
| 550 | /// Set the debug location in the builder \p Ptr using the debug location in | |||
| 551 | /// \p V. If \p Ptr is None then it uses the class member's Builder. | |||
| 552 | void setDebugLocFromInst(const Value *V, | |||
| 553 | Optional<IRBuilder<> *> CustomBuilder = None); | |||
| 554 | ||||
| 555 | /// Fix the non-induction PHIs in the OrigPHIsToFix vector. | |||
| 556 | void fixNonInductionPHIs(VPTransformState &State); | |||
| 557 | ||||
| 558 | /// Returns true if the reordering of FP operations is not allowed, but we are | |||
| 559 | /// able to vectorize with strict in-order reductions for the given RdxDesc. | |||
| 560 | bool useOrderedReductions(RecurrenceDescriptor &RdxDesc); | |||
| 561 | ||||
| 562 | /// Create a broadcast instruction. This method generates a broadcast | |||
| 563 | /// instruction (shuffle) for loop invariant values and for the induction | |||
| 564 | /// value. If this is the induction variable then we extend it to N, N+1, ... | |||
| 565 | /// this is needed because each iteration in the loop corresponds to a SIMD | |||
| 566 | /// element. | |||
| 567 | virtual Value *getBroadcastInstrs(Value *V); | |||
| 568 | ||||
| 569 | protected: | |||
| 570 | friend class LoopVectorizationPlanner; | |||
| 571 | ||||
| 572 | /// A small list of PHINodes. | |||
| 573 | using PhiVector = SmallVector<PHINode *, 4>; | |||
| 574 | ||||
| 575 | /// A type for scalarized values in the new loop. Each value from the | |||
| 576 | /// original loop, when scalarized, is represented by UF x VF scalar values | |||
| 577 | /// in the new unrolled loop, where UF is the unroll factor and VF is the | |||
| 578 | /// vectorization factor. | |||
| 579 | using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>; | |||
| 580 | ||||
| 581 | /// Set up the values of the IVs correctly when exiting the vector loop. | |||
| 582 | void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II, | |||
| 583 | Value *CountRoundDown, Value *EndValue, | |||
| 584 | BasicBlock *MiddleBlock); | |||
| 585 | ||||
| 586 | /// Create a new induction variable inside L. | |||
| 587 | PHINode *createInductionVariable(Loop *L, Value *Start, Value *End, | |||
| 588 | Value *Step, Instruction *DL); | |||
| 589 | ||||
| 590 | /// Handle all cross-iteration phis in the header. | |||
| 591 | void fixCrossIterationPHIs(VPTransformState &State); | |||
| 592 | ||||
| 593 | /// Fix a first-order recurrence. This is the second phase of vectorizing | |||
| 594 | /// this phi node. | |||
| 595 | void fixFirstOrderRecurrence(VPWidenPHIRecipe *PhiR, VPTransformState &State); | |||
| 596 | ||||
| 597 | /// Fix a reduction cross-iteration phi. This is the second phase of | |||
| 598 | /// vectorizing this phi node. | |||
| 599 | void fixReduction(VPReductionPHIRecipe *Phi, VPTransformState &State); | |||
| 600 | ||||
| 601 | /// Clear NSW/NUW flags from reduction instructions if necessary. | |||
| 602 | void clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc, | |||
| 603 | VPTransformState &State); | |||
| 604 | ||||
| 605 | /// Fixup the LCSSA phi nodes in the unique exit block. This simply | |||
| 606 | /// means we need to add the appropriate incoming value from the middle | |||
| 607 | /// block as exiting edges from the scalar epilogue loop (if present) are | |||
| 608 | /// already in place, and we exit the vector loop exclusively to the middle | |||
| 609 | /// block. | |||
| 610 | void fixLCSSAPHIs(VPTransformState &State); | |||
| 611 | ||||
| 612 | /// Iteratively sink the scalarized operands of a predicated instruction into | |||
| 613 | /// the block that was created for it. | |||
| 614 | void sinkScalarOperands(Instruction *PredInst); | |||
| 615 | ||||
| 616 | /// Shrinks vector element sizes to the smallest bitwidth they can be legally | |||
| 617 | /// represented as. | |||
| 618 | void truncateToMinimalBitwidths(VPTransformState &State); | |||
| 619 | ||||
| 620 | /// This function adds | |||
| 621 | /// (StartIdx * Step, (StartIdx + 1) * Step, (StartIdx + 2) * Step, ...) | |||
| 622 | /// to each vector element of Val. The sequence starts at StartIndex. | |||
| 623 | /// \p Opcode is relevant for FP induction variable. | |||
| 624 | virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step, | |||
| 625 | Instruction::BinaryOps Opcode = | |||
| 626 | Instruction::BinaryOpsEnd); | |||
| 627 | ||||
| 628 | /// Compute scalar induction steps. \p ScalarIV is the scalar induction | |||
| 629 | /// variable on which to base the steps, \p Step is the size of the step, and | |||
| 630 | /// \p EntryVal is the value from the original loop that maps to the steps. | |||
| 631 | /// Note that \p EntryVal doesn't have to be an induction variable - it | |||
| 632 | /// can also be a truncate instruction. | |||
| 633 | void buildScalarSteps(Value *ScalarIV, Value *Step, Instruction *EntryVal, | |||
| 634 | const InductionDescriptor &ID, VPValue *Def, | |||
| 635 | VPValue *CastDef, VPTransformState &State); | |||
| 636 | ||||
| 637 | /// Create a vector induction phi node based on an existing scalar one. \p | |||
| 638 | /// EntryVal is the value from the original loop that maps to the vector phi | |||
| 639 | /// node, and \p Step is the loop-invariant step. If \p EntryVal is a | |||
| 640 | /// truncate instruction, instead of widening the original IV, we widen a | |||
| 641 | /// version of the IV truncated to \p EntryVal's type. | |||
| 642 | void createVectorIntOrFpInductionPHI(const InductionDescriptor &II, | |||
| 643 | Value *Step, Value *Start, | |||
| 644 | Instruction *EntryVal, VPValue *Def, | |||
| 645 | VPValue *CastDef, | |||
| 646 | VPTransformState &State); | |||
| 647 | ||||
| 648 | /// Returns true if an instruction \p I should be scalarized instead of | |||
| 649 | /// vectorized for the chosen vectorization factor. | |||
| 650 | bool shouldScalarizeInstruction(Instruction *I) const; | |||
| 651 | ||||
| 652 | /// Returns true if we should generate a scalar version of \p IV. | |||
| 653 | bool needsScalarInduction(Instruction *IV) const; | |||
| 654 | ||||
| 655 | /// If there is a cast involved in the induction variable \p ID, which should | |||
| 656 | /// be ignored in the vectorized loop body, this function records the | |||
| 657 | /// VectorLoopValue of the respective Phi also as the VectorLoopValue of the | |||
| 658 | /// cast. We had already proved that the casted Phi is equal to the uncasted | |||
| 659 | /// Phi in the vectorized loop (under a runtime guard), and therefore | |||
| 660 | /// there is no need to vectorize the cast - the same value can be used in the | |||
| 661 | /// vector loop for both the Phi and the cast. | |||
| 662 | /// If \p VectorLoopValue is a scalarized value, \p Lane is also specified, | |||
| 663 | /// Otherwise, \p VectorLoopValue is a widened/vectorized value. | |||
| 664 | /// | |||
| 665 | /// \p EntryVal is the value from the original loop that maps to the vector | |||
| 666 | /// phi node and is used to distinguish what is the IV currently being | |||
| 667 | /// processed - original one (if \p EntryVal is a phi corresponding to the | |||
| 668 | /// original IV) or the "newly-created" one based on the proof mentioned above | |||
| 669 | /// (see also buildScalarSteps() and createVectorIntOrFPInductionPHI()). In the | |||
| 670 | /// latter case \p EntryVal is a TruncInst and we must not record anything for | |||
| 671 | /// that IV, but it's error-prone to expect callers of this routine to care | |||
| 672 | /// about that, hence this explicit parameter. | |||
| 673 | void recordVectorLoopValueForInductionCast( | |||
| 674 | const InductionDescriptor &ID, const Instruction *EntryVal, | |||
| 675 | Value *VectorLoopValue, VPValue *CastDef, VPTransformState &State, | |||
| 676 | unsigned Part, unsigned Lane = UINT_MAX(2147483647 *2U +1U)); | |||
| 677 | ||||
| 678 | /// Generate a shuffle sequence that will reverse the vector Vec. | |||
| 679 | virtual Value *reverseVector(Value *Vec); | |||
| 680 | ||||
| 681 | /// Returns (and creates if needed) the original loop trip count. | |||
| 682 | Value *getOrCreateTripCount(Loop *NewLoop); | |||
| 683 | ||||
| 684 | /// Returns (and creates if needed) the trip count of the widened loop. | |||
| 685 | Value *getOrCreateVectorTripCount(Loop *NewLoop); | |||
| 686 | ||||
| 687 | /// Returns a bitcasted value to the requested vector type. | |||
| 688 | /// Also handles bitcasts of vector<float> <-> vector<pointer> types. | |||
| 689 | Value *createBitOrPointerCast(Value *V, VectorType *DstVTy, | |||
| 690 | const DataLayout &DL); | |||
| 691 | ||||
| 692 | /// Emit a bypass check to see if the vector trip count is zero, including if | |||
| 693 | /// it overflows. | |||
| 694 | void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass); | |||
| 695 | ||||
| 696 | /// Emit a bypass check to see if all of the SCEV assumptions we've | |||
| 697 | /// had to make are correct. Returns the block containing the checks or | |||
| 698 | /// nullptr if no checks have been added. | |||
| 699 | BasicBlock *emitSCEVChecks(Loop *L, BasicBlock *Bypass); | |||
| 700 | ||||
| 701 | /// Emit bypass checks to check any memory assumptions we may have made. | |||
| 702 | /// Returns the block containing the checks or nullptr if no checks have been | |||
| 703 | /// added. | |||
| 704 | BasicBlock *emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass); | |||
| 705 | ||||
| 706 | /// Compute the transformed value of Index at offset StartValue using step | |||
| 707 | /// StepValue. | |||
| 708 | /// For integer induction, returns StartValue + Index * StepValue. | |||
| 709 | /// For pointer induction, returns StartValue[Index * StepValue]. | |||
| 710 | /// FIXME: The newly created binary instructions should contain nsw/nuw | |||
| 711 | /// flags, which can be found from the original scalar operations. | |||
| 712 | Value *emitTransformedIndex(IRBuilder<> &B, Value *Index, ScalarEvolution *SE, | |||
| 713 | const DataLayout &DL, | |||
| 714 | const InductionDescriptor &ID) const; | |||
| 715 | ||||
| 716 | /// Emit basic blocks (prefixed with \p Prefix) for the iteration check, | |||
| 717 | /// vector loop preheader, middle block and scalar preheader. Also | |||
| 718 | /// allocate a loop object for the new vector loop and return it. | |||
| 719 | Loop *createVectorLoopSkeleton(StringRef Prefix); | |||
| 720 | ||||
| 721 | /// Create new phi nodes for the induction variables to resume iteration count | |||
| 722 | /// in the scalar epilogue, from where the vectorized loop left off (given by | |||
| 723 | /// \p VectorTripCount). | |||
| 724 | /// In cases where the loop skeleton is more complicated (eg. epilogue | |||
| 725 | /// vectorization) and the resume values can come from an additional bypass | |||
| 726 | /// block, the \p AdditionalBypass pair provides information about the bypass | |||
| 727 | /// block and the end value on the edge from bypass to this loop. | |||
| 728 | void createInductionResumeValues( | |||
| 729 | Loop *L, Value *VectorTripCount, | |||
| 730 | std::pair<BasicBlock *, Value *> AdditionalBypass = {nullptr, nullptr}); | |||
| 731 | ||||
| 732 | /// Complete the loop skeleton by adding debug MDs, creating appropriate | |||
| 733 | /// conditional branches in the middle block, preparing the builder and | |||
| 734 | /// running the verifier. Take in the vector loop \p L as argument, and return | |||
| 735 | /// the preheader of the completed vector loop. | |||
| 736 | BasicBlock *completeLoopSkeleton(Loop *L, MDNode *OrigLoopID); | |||
| 737 | ||||
| 738 | /// Add additional metadata to \p To that was not present on \p Orig. | |||
| 739 | /// | |||
| 740 | /// Currently this is used to add the noalias annotations based on the | |||
| 741 | /// inserted memchecks. Use this for instructions that are *cloned* into the | |||
| 742 | /// vector loop. | |||
| 743 | void addNewMetadata(Instruction *To, const Instruction *Orig); | |||
| 744 | ||||
| 745 | /// Add metadata from one instruction to another. | |||
| 746 | /// | |||
| 747 | /// This includes both the original MDs from \p From and additional ones (\see | |||
| 748 | /// addNewMetadata). Use this for *newly created* instructions in the vector | |||
| 749 | /// loop. | |||
| 750 | void addMetadata(Instruction *To, Instruction *From); | |||
| 751 | ||||
| 752 | /// Similar to the previous function but it adds the metadata to a | |||
| 753 | /// vector of instructions. | |||
| 754 | void addMetadata(ArrayRef<Value *> To, Instruction *From); | |||
| 755 | ||||
| 756 | /// Allow subclasses to override and print debug traces before/after vplan | |||
| 757 | /// execution, when trace information is requested. | |||
| 758 | virtual void printDebugTracesAtStart(){}; | |||
| 759 | virtual void printDebugTracesAtEnd(){}; | |||
| 760 | ||||
| 761 | /// The original loop. | |||
| 762 | Loop *OrigLoop; | |||
| 763 | ||||
| 764 | /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies | |||
| 765 | /// dynamic knowledge to simplify SCEV expressions and converts them to a | |||
| 766 | /// more usable form. | |||
| 767 | PredicatedScalarEvolution &PSE; | |||
| 768 | ||||
| 769 | /// Loop Info. | |||
| 770 | LoopInfo *LI; | |||
| 771 | ||||
| 772 | /// Dominator Tree. | |||
| 773 | DominatorTree *DT; | |||
| 774 | ||||
| 775 | /// Alias Analysis. | |||
| 776 | AAResults *AA; | |||
| 777 | ||||
| 778 | /// Target Library Info. | |||
| 779 | const TargetLibraryInfo *TLI; | |||
| 780 | ||||
| 781 | /// Target Transform Info. | |||
| 782 | const TargetTransformInfo *TTI; | |||
| 783 | ||||
| 784 | /// Assumption Cache. | |||
| 785 | AssumptionCache *AC; | |||
| 786 | ||||
| 787 | /// Interface to emit optimization remarks. | |||
| 788 | OptimizationRemarkEmitter *ORE; | |||
| 789 | ||||
| 790 | /// LoopVersioning. It's only set up (non-null) if memchecks were | |||
| 791 | /// used. | |||
| 792 | /// | |||
| 793 | /// This is currently only used to add no-alias metadata based on the | |||
| 794 | /// memchecks. The actually versioning is performed manually. | |||
| 795 | std::unique_ptr<LoopVersioning> LVer; | |||
| 796 | ||||
| 797 | /// The vectorization SIMD factor to use. Each vector will have this many | |||
| 798 | /// vector elements. | |||
| 799 | ElementCount VF; | |||
| 800 | ||||
| 801 | /// The vectorization unroll factor to use. Each scalar is vectorized to this | |||
| 802 | /// many different vector instructions. | |||
| 803 | unsigned UF; | |||
| 804 | ||||
| 805 | /// The builder that we use | |||
| 806 | IRBuilder<> Builder; | |||
| 807 | ||||
| 808 | // --- Vectorization state --- | |||
| 809 | ||||
| 810 | /// The vector-loop preheader. | |||
| 811 | BasicBlock *LoopVectorPreHeader; | |||
| 812 | ||||
| 813 | /// The scalar-loop preheader. | |||
| 814 | BasicBlock *LoopScalarPreHeader; | |||
| 815 | ||||
| 816 | /// Middle Block between the vector and the scalar. | |||
| 817 | BasicBlock *LoopMiddleBlock; | |||
| 818 | ||||
| 819 | /// The unique ExitBlock of the scalar loop if one exists. Note that | |||
| 820 | /// there can be multiple exiting edges reaching this block. | |||
| 821 | BasicBlock *LoopExitBlock; | |||
| 822 | ||||
| 823 | /// The vector loop body. | |||
| 824 | BasicBlock *LoopVectorBody; | |||
| 825 | ||||
| 826 | /// The scalar loop body. | |||
| 827 | BasicBlock *LoopScalarBody; | |||
| 828 | ||||
| 829 | /// A list of all bypass blocks. The first block is the entry of the loop. | |||
| 830 | SmallVector<BasicBlock *, 4> LoopBypassBlocks; | |||
| 831 | ||||
| 832 | /// The new Induction variable which was added to the new block. | |||
| 833 | PHINode *Induction = nullptr; | |||
| 834 | ||||
| 835 | /// The induction variable of the old basic block. | |||
| 836 | PHINode *OldInduction = nullptr; | |||
| 837 | ||||
| 838 | /// Store instructions that were predicated. | |||
| 839 | SmallVector<Instruction *, 4> PredicatedInstructions; | |||
| 840 | ||||
| 841 | /// Trip count of the original loop. | |||
| 842 | Value *TripCount = nullptr; | |||
| 843 | ||||
| 844 | /// Trip count of the widened loop (TripCount - TripCount % (VF*UF)) | |||
| 845 | Value *VectorTripCount = nullptr; | |||
| 846 | ||||
| 847 | /// The legality analysis. | |||
| 848 | LoopVectorizationLegality *Legal; | |||
| 849 | ||||
| 850 | /// The profitablity analysis. | |||
| 851 | LoopVectorizationCostModel *Cost; | |||
| 852 | ||||
| 853 | // Record whether runtime checks are added. | |||
| 854 | bool AddedSafetyChecks = false; | |||
| 855 | ||||
| 856 | // Holds the end values for each induction variable. We save the end values | |||
| 857 | // so we can later fix-up the external users of the induction variables. | |||
| 858 | DenseMap<PHINode *, Value *> IVEndValues; | |||
| 859 | ||||
| 860 | // Vector of original scalar PHIs whose corresponding widened PHIs need to be | |||
| 861 | // fixed up at the end of vector code generation. | |||
| 862 | SmallVector<PHINode *, 8> OrigPHIsToFix; | |||
| 863 | ||||
| 864 | /// BFI and PSI are used to check for profile guided size optimizations. | |||
| 865 | BlockFrequencyInfo *BFI; | |||
| 866 | ProfileSummaryInfo *PSI; | |||
| 867 | ||||
| 868 | // Whether this loop should be optimized for size based on profile guided size | |||
| 869 | // optimizatios. | |||
| 870 | bool OptForSizeBasedOnProfile; | |||
| 871 | ||||
| 872 | /// Structure to hold information about generated runtime checks, responsible | |||
| 873 | /// for cleaning the checks, if vectorization turns out unprofitable. | |||
| 874 | GeneratedRTChecks &RTChecks; | |||
| 875 | }; | |||
| 876 | ||||
| 877 | class InnerLoopUnroller : public InnerLoopVectorizer { | |||
| 878 | public: | |||
| 879 | InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE, | |||
| 880 | LoopInfo *LI, DominatorTree *DT, | |||
| 881 | const TargetLibraryInfo *TLI, | |||
| 882 | const TargetTransformInfo *TTI, AssumptionCache *AC, | |||
| 883 | OptimizationRemarkEmitter *ORE, unsigned UnrollFactor, | |||
| 884 | LoopVectorizationLegality *LVL, | |||
| 885 | LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, | |||
| 886 | ProfileSummaryInfo *PSI, GeneratedRTChecks &Check) | |||
| 887 | : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, | |||
| 888 | ElementCount::getFixed(1), UnrollFactor, LVL, CM, | |||
| 889 | BFI, PSI, Check) {} | |||
| 890 | ||||
| 891 | private: | |||
| 892 | Value *getBroadcastInstrs(Value *V) override; | |||
| 893 | Value *getStepVector(Value *Val, int StartIdx, Value *Step, | |||
| 894 | Instruction::BinaryOps Opcode = | |||
| 895 | Instruction::BinaryOpsEnd) override; | |||
| 896 | Value *reverseVector(Value *Vec) override; | |||
| 897 | }; | |||
| 898 | ||||
| 899 | /// Encapsulate information regarding vectorization of a loop and its epilogue. | |||
| 900 | /// This information is meant to be updated and used across two stages of | |||
| 901 | /// epilogue vectorization. | |||
| 902 | struct EpilogueLoopVectorizationInfo { | |||
| 903 | ElementCount MainLoopVF = ElementCount::getFixed(0); | |||
| 904 | unsigned MainLoopUF = 0; | |||
| 905 | ElementCount EpilogueVF = ElementCount::getFixed(0); | |||
| 906 | unsigned EpilogueUF = 0; | |||
| 907 | BasicBlock *MainLoopIterationCountCheck = nullptr; | |||
| 908 | BasicBlock *EpilogueIterationCountCheck = nullptr; | |||
| 909 | BasicBlock *SCEVSafetyCheck = nullptr; | |||
| 910 | BasicBlock *MemSafetyCheck = nullptr; | |||
| 911 | Value *TripCount = nullptr; | |||
| 912 | Value *VectorTripCount = nullptr; | |||
| 913 | ||||
| 914 | EpilogueLoopVectorizationInfo(unsigned MVF, unsigned MUF, unsigned EVF, | |||
| 915 | unsigned EUF) | |||
| 916 | : MainLoopVF(ElementCount::getFixed(MVF)), MainLoopUF(MUF), | |||
| 917 | EpilogueVF(ElementCount::getFixed(EVF)), EpilogueUF(EUF) { | |||
| 918 | assert(EUF == 1 &&((void)0) | |||
| 919 | "A high UF for the epilogue loop is likely not beneficial.")((void)0); | |||
| 920 | } | |||
| 921 | }; | |||
| 922 | ||||
| 923 | /// An extension of the inner loop vectorizer that creates a skeleton for a | |||
| 924 | /// vectorized loop that has its epilogue (residual) also vectorized. | |||
| 925 | /// The idea is to run the vplan on a given loop twice, firstly to setup the | |||
| 926 | /// skeleton and vectorize the main loop, and secondly to complete the skeleton | |||
| 927 | /// from the first step and vectorize the epilogue. This is achieved by | |||
| 928 | /// deriving two concrete strategy classes from this base class and invoking | |||
| 929 | /// them in succession from the loop vectorizer planner. | |||
| 930 | class InnerLoopAndEpilogueVectorizer : public InnerLoopVectorizer { | |||
| 931 | public: | |||
| 932 | InnerLoopAndEpilogueVectorizer( | |||
| 933 | Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, | |||
| 934 | DominatorTree *DT, const TargetLibraryInfo *TLI, | |||
| 935 | const TargetTransformInfo *TTI, AssumptionCache *AC, | |||
| 936 | OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, | |||
| 937 | LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, | |||
| 938 | BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, | |||
| 939 | GeneratedRTChecks &Checks) | |||
| 940 | : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, | |||
| 941 | EPI.MainLoopVF, EPI.MainLoopUF, LVL, CM, BFI, PSI, | |||
| 942 | Checks), | |||
| 943 | EPI(EPI) {} | |||
| 944 | ||||
| 945 | // Override this function to handle the more complex control flow around the | |||
| 946 | // three loops. | |||
| 947 | BasicBlock *createVectorizedLoopSkeleton() final override { | |||
| 948 | return createEpilogueVectorizedLoopSkeleton(); | |||
| 949 | } | |||
| 950 | ||||
| 951 | /// The interface for creating a vectorized skeleton using one of two | |||
| 952 | /// different strategies, each corresponding to one execution of the vplan | |||
| 953 | /// as described above. | |||
| 954 | virtual BasicBlock *createEpilogueVectorizedLoopSkeleton() = 0; | |||
| 955 | ||||
| 956 | /// Holds and updates state information required to vectorize the main loop | |||
| 957 | /// and its epilogue in two separate passes. This setup helps us avoid | |||
| 958 | /// regenerating and recomputing runtime safety checks. It also helps us to | |||
| 959 | /// shorten the iteration-count-check path length for the cases where the | |||
| 960 | /// iteration count of the loop is so small that the main vector loop is | |||
| 961 | /// completely skipped. | |||
| 962 | EpilogueLoopVectorizationInfo &EPI; | |||
| 963 | }; | |||
| 964 | ||||
| 965 | /// A specialized derived class of inner loop vectorizer that performs | |||
| 966 | /// vectorization of *main* loops in the process of vectorizing loops and their | |||
| 967 | /// epilogues. | |||
| 968 | class EpilogueVectorizerMainLoop : public InnerLoopAndEpilogueVectorizer { | |||
| 969 | public: | |||
| 970 | EpilogueVectorizerMainLoop( | |||
| 971 | Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, | |||
| 972 | DominatorTree *DT, const TargetLibraryInfo *TLI, | |||
| 973 | const TargetTransformInfo *TTI, AssumptionCache *AC, | |||
| 974 | OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, | |||
| 975 | LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, | |||
| 976 | BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, | |||
| 977 | GeneratedRTChecks &Check) | |||
| 978 | : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, | |||
| 979 | EPI, LVL, CM, BFI, PSI, Check) {} | |||
| 980 | /// Implements the interface for creating a vectorized skeleton using the | |||
| 981 | /// *main loop* strategy (ie the first pass of vplan execution). | |||
| 982 | BasicBlock *createEpilogueVectorizedLoopSkeleton() final override; | |||
| 983 | ||||
| 984 | protected: | |||
| 985 | /// Emits an iteration count bypass check once for the main loop (when \p | |||
| 986 | /// ForEpilogue is false) and once for the epilogue loop (when \p | |||
| 987 | /// ForEpilogue is true). | |||
| 988 | BasicBlock *emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass, | |||
| 989 | bool ForEpilogue); | |||
| 990 | void printDebugTracesAtStart() override; | |||
| 991 | void printDebugTracesAtEnd() override; | |||
| 992 | }; | |||
| 993 | ||||
| 994 | // A specialized derived class of inner loop vectorizer that performs | |||
| 995 | // vectorization of *epilogue* loops in the process of vectorizing loops and | |||
| 996 | // their epilogues. | |||
| 997 | class EpilogueVectorizerEpilogueLoop : public InnerLoopAndEpilogueVectorizer { | |||
| 998 | public: | |||
| 999 | EpilogueVectorizerEpilogueLoop( | |||
| 1000 | Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, | |||
| 1001 | DominatorTree *DT, const TargetLibraryInfo *TLI, | |||
| 1002 | const TargetTransformInfo *TTI, AssumptionCache *AC, | |||
| 1003 | OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, | |||
| 1004 | LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, | |||
| 1005 | BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, | |||
| 1006 | GeneratedRTChecks &Checks) | |||
| 1007 | : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, | |||
| 1008 | EPI, LVL, CM, BFI, PSI, Checks) {} | |||
| 1009 | /// Implements the interface for creating a vectorized skeleton using the | |||
| 1010 | /// *epilogue loop* strategy (ie the second pass of vplan execution). | |||
| 1011 | BasicBlock *createEpilogueVectorizedLoopSkeleton() final override; | |||
| 1012 | ||||
| 1013 | protected: | |||
| 1014 | /// Emits an iteration count bypass check after the main vector loop has | |||
| 1015 | /// finished to see if there are any iterations left to execute by either | |||
| 1016 | /// the vector epilogue or the scalar epilogue. | |||
| 1017 | BasicBlock *emitMinimumVectorEpilogueIterCountCheck(Loop *L, | |||
| 1018 | BasicBlock *Bypass, | |||
| 1019 | BasicBlock *Insert); | |||
| 1020 | void printDebugTracesAtStart() override; | |||
| 1021 | void printDebugTracesAtEnd() override; | |||
| 1022 | }; | |||
| 1023 | } // end namespace llvm | |||
| 1024 | ||||
| 1025 | /// Look for a meaningful debug location on the instruction or it's | |||
| 1026 | /// operands. | |||
| 1027 | static Instruction *getDebugLocFromInstOrOperands(Instruction *I) { | |||
| 1028 | if (!I) | |||
| 1029 | return I; | |||
| 1030 | ||||
| 1031 | DebugLoc Empty; | |||
| 1032 | if (I->getDebugLoc() != Empty) | |||
| 1033 | return I; | |||
| 1034 | ||||
| 1035 | for (Use &Op : I->operands()) { | |||
| 1036 | if (Instruction *OpInst = dyn_cast<Instruction>(Op)) | |||
| 1037 | if (OpInst->getDebugLoc() != Empty) | |||
| 1038 | return OpInst; | |||
| 1039 | } | |||
| 1040 | ||||
| 1041 | return I; | |||
| 1042 | } | |||
| 1043 | ||||
| 1044 | void InnerLoopVectorizer::setDebugLocFromInst( | |||
| 1045 | const Value *V, Optional<IRBuilder<> *> CustomBuilder) { | |||
| 1046 | IRBuilder<> *B = (CustomBuilder == None) ? &Builder : *CustomBuilder; | |||
| 1047 | if (const Instruction *Inst = dyn_cast_or_null<Instruction>(V)) { | |||
| 1048 | const DILocation *DIL = Inst->getDebugLoc(); | |||
| 1049 | ||||
| 1050 | // When a FSDiscriminator is enabled, we don't need to add the multiply | |||
| 1051 | // factors to the discriminators. | |||
| 1052 | if (DIL && Inst->getFunction()->isDebugInfoForProfiling() && | |||
| 1053 | !isa<DbgInfoIntrinsic>(Inst) && !EnableFSDiscriminator) { | |||
| 1054 | // FIXME: For scalable vectors, assume vscale=1. | |||
| 1055 | auto NewDIL = | |||
| 1056 | DIL->cloneByMultiplyingDuplicationFactor(UF * VF.getKnownMinValue()); | |||
| 1057 | if (NewDIL) | |||
| 1058 | B->SetCurrentDebugLocation(NewDIL.getValue()); | |||
| 1059 | else | |||
| 1060 | LLVM_DEBUG(dbgs()do { } while (false) | |||
| 1061 | << "Failed to create new discriminator: "do { } while (false) | |||
| 1062 | << DIL->getFilename() << " Line: " << DIL->getLine())do { } while (false); | |||
| 1063 | } else | |||
| 1064 | B->SetCurrentDebugLocation(DIL); | |||
| 1065 | } else | |||
| 1066 | B->SetCurrentDebugLocation(DebugLoc()); | |||
| 1067 | } | |||
| 1068 | ||||
| 1069 | /// Write a \p DebugMsg about vectorization to the debug output stream. If \p I | |||
| 1070 | /// is passed, the message relates to that particular instruction. | |||
| 1071 | #ifndef NDEBUG1 | |||
| 1072 | static void debugVectorizationMessage(const StringRef Prefix, | |||
| 1073 | const StringRef DebugMsg, | |||
| 1074 | Instruction *I) { | |||
| 1075 | dbgs() << "LV: " << Prefix << DebugMsg; | |||
| 1076 | if (I != nullptr) | |||
| 1077 | dbgs() << " " << *I; | |||
| 1078 | else | |||
| 1079 | dbgs() << '.'; | |||
| 1080 | dbgs() << '\n'; | |||
| 1081 | } | |||
| 1082 | #endif | |||
| 1083 | ||||
| 1084 | /// Create an analysis remark that explains why vectorization failed | |||
| 1085 | /// | |||
| 1086 | /// \p PassName is the name of the pass (e.g. can be AlwaysPrint). \p | |||
| 1087 | /// RemarkName is the identifier for the remark. If \p I is passed it is an | |||
| 1088 | /// instruction that prevents vectorization. Otherwise \p TheLoop is used for | |||
| 1089 | /// the location of the remark. \return the remark object that can be | |||
| 1090 | /// streamed to. | |||
| 1091 | static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName, | |||
| 1092 | StringRef RemarkName, Loop *TheLoop, Instruction *I) { | |||
| 1093 | Value *CodeRegion = TheLoop->getHeader(); | |||
| 1094 | DebugLoc DL = TheLoop->getStartLoc(); | |||
| 1095 | ||||
| 1096 | if (I) { | |||
| 1097 | CodeRegion = I->getParent(); | |||
| 1098 | // If there is no debug location attached to the instruction, revert back to | |||
| 1099 | // using the loop's. | |||
| 1100 | if (I->getDebugLoc()) | |||
| 1101 | DL = I->getDebugLoc(); | |||
| 1102 | } | |||
| 1103 | ||||
| 1104 | return OptimizationRemarkAnalysis(PassName, RemarkName, DL, CodeRegion); | |||
| 1105 | } | |||
| 1106 | ||||
| 1107 | /// Return a value for Step multiplied by VF. | |||
| 1108 | static Value *createStepForVF(IRBuilder<> &B, Constant *Step, ElementCount VF) { | |||
| 1109 | assert(isa<ConstantInt>(Step) && "Expected an integer step")((void)0); | |||
| 1110 | Constant *StepVal = ConstantInt::get( | |||
| 1111 | Step->getType(), | |||
| 1112 | cast<ConstantInt>(Step)->getSExtValue() * VF.getKnownMinValue()); | |||
| 1113 | return VF.isScalable() ? B.CreateVScale(StepVal) : StepVal; | |||
| 1114 | } | |||
| 1115 | ||||
| 1116 | namespace llvm { | |||
| 1117 | ||||
| 1118 | /// Return the runtime value for VF. | |||
| 1119 | Value *getRuntimeVF(IRBuilder<> &B, Type *Ty, ElementCount VF) { | |||
| 1120 | Constant *EC = ConstantInt::get(Ty, VF.getKnownMinValue()); | |||
| 1121 | return VF.isScalable() ? B.CreateVScale(EC) : EC; | |||
| 1122 | } | |||
| 1123 | ||||
| 1124 | void reportVectorizationFailure(const StringRef DebugMsg, | |||
| 1125 | const StringRef OREMsg, const StringRef ORETag, | |||
| 1126 | OptimizationRemarkEmitter *ORE, Loop *TheLoop, | |||
| 1127 | Instruction *I) { | |||
| 1128 | LLVM_DEBUG(debugVectorizationMessage("Not vectorizing: ", DebugMsg, I))do { } while (false); | |||
| 1129 | LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE); | |||
| 1130 | ORE->emit( | |||
| 1131 | createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I) | |||
| 1132 | << "loop not vectorized: " << OREMsg); | |||
| 1133 | } | |||
| 1134 | ||||
| 1135 | void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag, | |||
| 1136 | OptimizationRemarkEmitter *ORE, Loop *TheLoop, | |||
| 1137 | Instruction *I) { | |||
| 1138 | LLVM_DEBUG(debugVectorizationMessage("", Msg, I))do { } while (false); | |||
| 1139 | LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE); | |||
| 1140 | ORE->emit( | |||
| 1141 | createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I) | |||
| 1142 | << Msg); | |||
| 1143 | } | |||
| 1144 | ||||
| 1145 | } // end namespace llvm | |||
| 1146 | ||||
| 1147 | #ifndef NDEBUG1 | |||
| 1148 | /// \return string containing a file name and a line # for the given loop. | |||
| 1149 | static std::string getDebugLocString(const Loop *L) { | |||
| 1150 | std::string Result; | |||
| 1151 | if (L) { | |||
| 1152 | raw_string_ostream OS(Result); | |||
| 1153 | if (const DebugLoc LoopDbgLoc = L->getStartLoc()) | |||
| 1154 | LoopDbgLoc.print(OS); | |||
| 1155 | else | |||
| 1156 | // Just print the module name. | |||
| 1157 | OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier(); | |||
| 1158 | OS.flush(); | |||
| 1159 | } | |||
| 1160 | return Result; | |||
| 1161 | } | |||
| 1162 | #endif | |||
| 1163 | ||||
| 1164 | void InnerLoopVectorizer::addNewMetadata(Instruction *To, | |||
| 1165 | const Instruction *Orig) { | |||
| 1166 | // If the loop was versioned with memchecks, add the corresponding no-alias | |||
| 1167 | // metadata. | |||
| 1168 | if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig))) | |||
| 1169 | LVer->annotateInstWithNoAlias(To, Orig); | |||
| 1170 | } | |||
| 1171 | ||||
| 1172 | void InnerLoopVectorizer::addMetadata(Instruction *To, | |||
| 1173 | Instruction *From) { | |||
| 1174 | propagateMetadata(To, From); | |||
| 1175 | addNewMetadata(To, From); | |||
| 1176 | } | |||
| 1177 | ||||
| 1178 | void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To, | |||
| 1179 | Instruction *From) { | |||
| 1180 | for (Value *V : To) { | |||
| 1181 | if (Instruction *I = dyn_cast<Instruction>(V)) | |||
| 1182 | addMetadata(I, From); | |||
| 1183 | } | |||
| 1184 | } | |||
| 1185 | ||||
| 1186 | namespace llvm { | |||
| 1187 | ||||
| 1188 | // Loop vectorization cost-model hints how the scalar epilogue loop should be | |||
| 1189 | // lowered. | |||
| 1190 | enum ScalarEpilogueLowering { | |||
| 1191 | ||||
| 1192 | // The default: allowing scalar epilogues. | |||
| 1193 | CM_ScalarEpilogueAllowed, | |||
| 1194 | ||||
| 1195 | // Vectorization with OptForSize: don't allow epilogues. | |||
| 1196 | CM_ScalarEpilogueNotAllowedOptSize, | |||
| 1197 | ||||
| 1198 | // A special case of vectorisation with OptForSize: loops with a very small | |||
| 1199 | // trip count are considered for vectorization under OptForSize, thereby | |||
| 1200 | // making sure the cost of their loop body is dominant, free of runtime | |||
| 1201 | // guards and scalar iteration overheads. | |||
| 1202 | CM_ScalarEpilogueNotAllowedLowTripLoop, | |||
| 1203 | ||||
| 1204 | // Loop hint predicate indicating an epilogue is undesired. | |||
| 1205 | CM_ScalarEpilogueNotNeededUsePredicate, | |||
| 1206 | ||||
| 1207 | // Directive indicating we must either tail fold or not vectorize | |||
| 1208 | CM_ScalarEpilogueNotAllowedUsePredicate | |||
| 1209 | }; | |||
| 1210 | ||||
| 1211 | /// ElementCountComparator creates a total ordering for ElementCount | |||
| 1212 | /// for the purposes of using it in a set structure. | |||
| 1213 | struct ElementCountComparator { | |||
| 1214 | bool operator()(const ElementCount &LHS, const ElementCount &RHS) const { | |||
| 1215 | return std::make_tuple(LHS.isScalable(), LHS.getKnownMinValue()) < | |||
| 1216 | std::make_tuple(RHS.isScalable(), RHS.getKnownMinValue()); | |||
| 1217 | } | |||
| 1218 | }; | |||
| 1219 | using ElementCountSet = SmallSet<ElementCount, 16, ElementCountComparator>; | |||
| 1220 | ||||
| 1221 | /// LoopVectorizationCostModel - estimates the expected speedups due to | |||
| 1222 | /// vectorization. | |||
| 1223 | /// In many cases vectorization is not profitable. This can happen because of | |||
| 1224 | /// a number of reasons. In this class we mainly attempt to predict the | |||
| 1225 | /// expected speedup/slowdowns due to the supported instruction set. We use the | |||
| 1226 | /// TargetTransformInfo to query the different backends for the cost of | |||
| 1227 | /// different operations. | |||
| 1228 | class LoopVectorizationCostModel { | |||
| 1229 | public: | |||
| 1230 | LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L, | |||
| 1231 | PredicatedScalarEvolution &PSE, LoopInfo *LI, | |||
| 1232 | LoopVectorizationLegality *Legal, | |||
| 1233 | const TargetTransformInfo &TTI, | |||
| 1234 | const TargetLibraryInfo *TLI, DemandedBits *DB, | |||
| 1235 | AssumptionCache *AC, | |||
| 1236 | OptimizationRemarkEmitter *ORE, const Function *F, | |||
| 1237 | const LoopVectorizeHints *Hints, | |||
| 1238 | InterleavedAccessInfo &IAI) | |||
| 1239 | : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal), | |||
| 1240 | TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F), | |||
| 1241 | Hints(Hints), InterleaveInfo(IAI) {} | |||
| 1242 | ||||
| 1243 | /// \return An upper bound for the vectorization factors (both fixed and | |||
| 1244 | /// scalable). If the factors are 0, vectorization and interleaving should be | |||
| 1245 | /// avoided up front. | |||
| 1246 | FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC); | |||
| 1247 | ||||
| 1248 | /// \return True if runtime checks are required for vectorization, and false | |||
| 1249 | /// otherwise. | |||
| 1250 | bool runtimeChecksRequired(); | |||
| 1251 | ||||
| 1252 | /// \return The most profitable vectorization factor and the cost of that VF. | |||
| 1253 | /// This method checks every VF in \p CandidateVFs. If UserVF is not ZERO | |||
| 1254 | /// then this vectorization factor will be selected if vectorization is | |||
| 1255 | /// possible. | |||
| 1256 | VectorizationFactor | |||
| 1257 | selectVectorizationFactor(const ElementCountSet &CandidateVFs); | |||
| 1258 | ||||
| 1259 | VectorizationFactor | |||
| 1260 | selectEpilogueVectorizationFactor(const ElementCount MaxVF, | |||
| 1261 | const LoopVectorizationPlanner &LVP); | |||
| 1262 | ||||
| 1263 | /// Setup cost-based decisions for user vectorization factor. | |||
| 1264 | /// \return true if the UserVF is a feasible VF to be chosen. | |||
| 1265 | bool selectUserVectorizationFactor(ElementCount UserVF) { | |||
| 1266 | collectUniformsAndScalars(UserVF); | |||
| 1267 | collectInstsToScalarize(UserVF); | |||
| 1268 | return expectedCost(UserVF).first.isValid(); | |||
| 1269 | } | |||
| 1270 | ||||
| 1271 | /// \return The size (in bits) of the smallest and widest types in the code | |||
| 1272 | /// that needs to be vectorized. We ignore values that remain scalar such as | |||
| 1273 | /// 64 bit loop indices. | |||
| 1274 | std::pair<unsigned, unsigned> getSmallestAndWidestTypes(); | |||
| 1275 | ||||
| 1276 | /// \return The desired interleave count. | |||
| 1277 | /// If interleave count has been specified by metadata it will be returned. | |||
| 1278 | /// Otherwise, the interleave count is computed and returned. VF and LoopCost | |||
| 1279 | /// are the selected vectorization factor and the cost of the selected VF. | |||
| 1280 | unsigned selectInterleaveCount(ElementCount VF, unsigned LoopCost); | |||
| 1281 | ||||
| 1282 | /// Memory access instruction may be vectorized in more than one way. | |||
| 1283 | /// Form of instruction after vectorization depends on cost. | |||
| 1284 | /// This function takes cost-based decisions for Load/Store instructions | |||
| 1285 | /// and collects them in a map. This decisions map is used for building | |||
| 1286 | /// the lists of loop-uniform and loop-scalar instructions. | |||
| 1287 | /// The calculated cost is saved with widening decision in order to | |||
| 1288 | /// avoid redundant calculations. | |||
| 1289 | void setCostBasedWideningDecision(ElementCount VF); | |||
| 1290 | ||||
| 1291 | /// A struct that represents some properties of the register usage | |||
| 1292 | /// of a loop. | |||
| 1293 | struct RegisterUsage { | |||
| 1294 | /// Holds the number of loop invariant values that are used in the loop. | |||
| 1295 | /// The key is ClassID of target-provided register class. | |||
| 1296 | SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs; | |||
| 1297 | /// Holds the maximum number of concurrent live intervals in the loop. | |||
| 1298 | /// The key is ClassID of target-provided register class. | |||
| 1299 | SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers; | |||
| 1300 | }; | |||
| 1301 | ||||
| 1302 | /// \return Returns information about the register usages of the loop for the | |||
| 1303 | /// given vectorization factors. | |||
| 1304 | SmallVector<RegisterUsage, 8> | |||
| 1305 | calculateRegisterUsage(ArrayRef<ElementCount> VFs); | |||
| 1306 | ||||
| 1307 | /// Collect values we want to ignore in the cost model. | |||
| 1308 | void collectValuesToIgnore(); | |||
| 1309 | ||||
| 1310 | /// Collect all element types in the loop for which widening is needed. | |||
| 1311 | void collectElementTypesForWidening(); | |||
| 1312 | ||||
| 1313 | /// Split reductions into those that happen in the loop, and those that happen | |||
| 1314 | /// outside. In loop reductions are collected into InLoopReductionChains. | |||
| 1315 | void collectInLoopReductions(); | |||
| 1316 | ||||
| 1317 | /// Returns true if we should use strict in-order reductions for the given | |||
| 1318 | /// RdxDesc. This is true if the -enable-strict-reductions flag is passed, | |||
| 1319 | /// the IsOrdered flag of RdxDesc is set and we do not allow reordering | |||
| 1320 | /// of FP operations. | |||
| 1321 | bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc) { | |||
| 1322 | return EnableStrictReductions && !Hints->allowReordering() && | |||
| 1323 | RdxDesc.isOrdered(); | |||
| 1324 | } | |||
| 1325 | ||||
| 1326 | /// \returns The smallest bitwidth each instruction can be represented with. | |||
| 1327 | /// The vector equivalents of these instructions should be truncated to this | |||
| 1328 | /// type. | |||
| 1329 | const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const { | |||
| 1330 | return MinBWs; | |||
| 1331 | } | |||
| 1332 | ||||
| 1333 | /// \returns True if it is more profitable to scalarize instruction \p I for | |||
| 1334 | /// vectorization factor \p VF. | |||
| 1335 | bool isProfitableToScalarize(Instruction *I, ElementCount VF) const { | |||
| 1336 | assert(VF.isVector() &&((void)0) | |||
| 1337 | "Profitable to scalarize relevant only for VF > 1.")((void)0); | |||
| 1338 | ||||
| 1339 | // Cost model is not run in the VPlan-native path - return conservative | |||
| 1340 | // result until this changes. | |||
| 1341 | if (EnableVPlanNativePath) | |||
| 1342 | return false; | |||
| 1343 | ||||
| 1344 | auto Scalars = InstsToScalarize.find(VF); | |||
| 1345 | assert(Scalars != InstsToScalarize.end() &&((void)0) | |||
| 1346 | "VF not yet analyzed for scalarization profitability")((void)0); | |||
| 1347 | return Scalars->second.find(I) != Scalars->second.end(); | |||
| 1348 | } | |||
| 1349 | ||||
| 1350 | /// Returns true if \p I is known to be uniform after vectorization. | |||
| 1351 | bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const { | |||
| 1352 | if (VF.isScalar()) | |||
| 1353 | return true; | |||
| 1354 | ||||
| 1355 | // Cost model is not run in the VPlan-native path - return conservative | |||
| 1356 | // result until this changes. | |||
| 1357 | if (EnableVPlanNativePath) | |||
| 1358 | return false; | |||
| 1359 | ||||
| 1360 | auto UniformsPerVF = Uniforms.find(VF); | |||
| 1361 | assert(UniformsPerVF != Uniforms.end() &&((void)0) | |||
| 1362 | "VF not yet analyzed for uniformity")((void)0); | |||
| 1363 | return UniformsPerVF->second.count(I); | |||
| 1364 | } | |||
| 1365 | ||||
| 1366 | /// Returns true if \p I is known to be scalar after vectorization. | |||
| 1367 | bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const { | |||
| 1368 | if (VF.isScalar()) | |||
| 1369 | return true; | |||
| 1370 | ||||
| 1371 | // Cost model is not run in the VPlan-native path - return conservative | |||
| 1372 | // result until this changes. | |||
| 1373 | if (EnableVPlanNativePath) | |||
| 1374 | return false; | |||
| 1375 | ||||
| 1376 | auto ScalarsPerVF = Scalars.find(VF); | |||
| 1377 | assert(ScalarsPerVF != Scalars.end() &&((void)0) | |||
| 1378 | "Scalar values are not calculated for VF")((void)0); | |||
| 1379 | return ScalarsPerVF->second.count(I); | |||
| 1380 | } | |||
| 1381 | ||||
| 1382 | /// \returns True if instruction \p I can be truncated to a smaller bitwidth | |||
| 1383 | /// for vectorization factor \p VF. | |||
| 1384 | bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const { | |||
| 1385 | return VF.isVector() && MinBWs.find(I) != MinBWs.end() && | |||
| 1386 | !isProfitableToScalarize(I, VF) && | |||
| 1387 | !isScalarAfterVectorization(I, VF); | |||
| 1388 | } | |||
| 1389 | ||||
| 1390 | /// Decision that was taken during cost calculation for memory instruction. | |||
| 1391 | enum InstWidening { | |||
| 1392 | CM_Unknown, | |||
| 1393 | CM_Widen, // For consecutive accesses with stride +1. | |||
| 1394 | CM_Widen_Reverse, // For consecutive accesses with stride -1. | |||
| 1395 | CM_Interleave, | |||
| 1396 | CM_GatherScatter, | |||
| 1397 | CM_Scalarize | |||
| 1398 | }; | |||
| 1399 | ||||
| 1400 | /// Save vectorization decision \p W and \p Cost taken by the cost model for | |||
| 1401 | /// instruction \p I and vector width \p VF. | |||
| 1402 | void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W, | |||
| 1403 | InstructionCost Cost) { | |||
| 1404 | assert(VF.isVector() && "Expected VF >=2")((void)0); | |||
| 1405 | WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); | |||
| 1406 | } | |||
| 1407 | ||||
| 1408 | /// Save vectorization decision \p W and \p Cost taken by the cost model for | |||
| 1409 | /// interleaving group \p Grp and vector width \p VF. | |||
| 1410 | void setWideningDecision(const InterleaveGroup<Instruction> *Grp, | |||
| 1411 | ElementCount VF, InstWidening W, | |||
| 1412 | InstructionCost Cost) { | |||
| 1413 | assert(VF.isVector() && "Expected VF >=2")((void)0); | |||
| 1414 | /// Broadcast this decicion to all instructions inside the group. | |||
| 1415 | /// But the cost will be assigned to one instruction only. | |||
| 1416 | for (unsigned i = 0; i < Grp->getFactor(); ++i) { | |||
| 1417 | if (auto *I = Grp->getMember(i)) { | |||
| 1418 | if (Grp->getInsertPos() == I) | |||
| 1419 | WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); | |||
| 1420 | else | |||
| 1421 | WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0); | |||
| 1422 | } | |||
| 1423 | } | |||
| 1424 | } | |||
| 1425 | ||||
| 1426 | /// Return the cost model decision for the given instruction \p I and vector | |||
| 1427 | /// width \p VF. Return CM_Unknown if this instruction did not pass | |||
| 1428 | /// through the cost modeling. | |||
| 1429 | InstWidening getWideningDecision(Instruction *I, ElementCount VF) const { | |||
| 1430 | assert(VF.isVector() && "Expected VF to be a vector VF")((void)0); | |||
| 1431 | // Cost model is not run in the VPlan-native path - return conservative | |||
| 1432 | // result until this changes. | |||
| 1433 | if (EnableVPlanNativePath) | |||
| 1434 | return CM_GatherScatter; | |||
| 1435 | ||||
| 1436 | std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF); | |||
| 1437 | auto Itr = WideningDecisions.find(InstOnVF); | |||
| 1438 | if (Itr == WideningDecisions.end()) | |||
| 1439 | return CM_Unknown; | |||
| 1440 | return Itr->second.first; | |||
| 1441 | } | |||
| 1442 | ||||
| 1443 | /// Return the vectorization cost for the given instruction \p I and vector | |||
| 1444 | /// width \p VF. | |||
| 1445 | InstructionCost getWideningCost(Instruction *I, ElementCount VF) { | |||
| 1446 | assert(VF.isVector() && "Expected VF >=2")((void)0); | |||
| 1447 | std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF); | |||
| 1448 | assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() &&((void)0) | |||
| 1449 | "The cost is not calculated")((void)0); | |||
| 1450 | return WideningDecisions[InstOnVF].second; | |||
| 1451 | } | |||
| 1452 | ||||
| 1453 | /// Return True if instruction \p I is an optimizable truncate whose operand | |||
| 1454 | /// is an induction variable. Such a truncate will be removed by adding a new | |||
| 1455 | /// induction variable with the destination type. | |||
| 1456 | bool isOptimizableIVTruncate(Instruction *I, ElementCount VF) { | |||
| 1457 | // If the instruction is not a truncate, return false. | |||
| 1458 | auto *Trunc = dyn_cast<TruncInst>(I); | |||
| 1459 | if (!Trunc) | |||
| 1460 | return false; | |||
| 1461 | ||||
| 1462 | // Get the source and destination types of the truncate. | |||
| 1463 | Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF); | |||
| 1464 | Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF); | |||
| 1465 | ||||
| 1466 | // If the truncate is free for the given types, return false. Replacing a | |||
| 1467 | // free truncate with an induction variable would add an induction variable | |||
| 1468 | // update instruction to each iteration of the loop. We exclude from this | |||
| 1469 | // check the primary induction variable since it will need an update | |||
| 1470 | // instruction regardless. | |||
| 1471 | Value *Op = Trunc->getOperand(0); | |||
| 1472 | if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy)) | |||
| 1473 | return false; | |||
| 1474 | ||||
| 1475 | // If the truncated value is not an induction variable, return false. | |||
| 1476 | return Legal->isInductionPhi(Op); | |||
| 1477 | } | |||
| 1478 | ||||
| 1479 | /// Collects the instructions to scalarize for each predicated instruction in | |||
| 1480 | /// the loop. | |||
| 1481 | void collectInstsToScalarize(ElementCount VF); | |||
| 1482 | ||||
| 1483 | /// Collect Uniform and Scalar values for the given \p VF. | |||
| 1484 | /// The sets depend on CM decision for Load/Store instructions | |||
| 1485 | /// that may be vectorized as interleave, gather-scatter or scalarized. | |||
| 1486 | void collectUniformsAndScalars(ElementCount VF) { | |||
| 1487 | // Do the analysis once. | |||
| 1488 | if (VF.isScalar() || Uniforms.find(VF) != Uniforms.end()) | |||
| 1489 | return; | |||
| 1490 | setCostBasedWideningDecision(VF); | |||
| 1491 | collectLoopUniforms(VF); | |||
| 1492 | collectLoopScalars(VF); | |||
| 1493 | } | |||
| 1494 | ||||
| 1495 | /// Returns true if the target machine supports masked store operation | |||
| 1496 | /// for the given \p DataType and kind of access to \p Ptr. | |||
| 1497 | bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment) const { | |||
| 1498 | return Legal->isConsecutivePtr(Ptr) && | |||
| 1499 | TTI.isLegalMaskedStore(DataType, Alignment); | |||
| 1500 | } | |||
| 1501 | ||||
| 1502 | /// Returns true if the target machine supports masked load operation | |||
| 1503 | /// for the given \p DataType and kind of access to \p Ptr. | |||
| 1504 | bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment) const { | |||
| 1505 | return Legal->isConsecutivePtr(Ptr) && | |||
| 1506 | TTI.isLegalMaskedLoad(DataType, Alignment); | |||
| 1507 | } | |||
| 1508 | ||||
| 1509 | /// Returns true if the target machine can represent \p V as a masked gather | |||
| 1510 | /// or scatter operation. | |||
| 1511 | bool isLegalGatherOrScatter(Value *V) { | |||
| 1512 | bool LI = isa<LoadInst>(V); | |||
| 1513 | bool SI = isa<StoreInst>(V); | |||
| 1514 | if (!LI && !SI) | |||
| 1515 | return false; | |||
| 1516 | auto *Ty = getLoadStoreType(V); | |||
| 1517 | Align Align = getLoadStoreAlignment(V); | |||
| 1518 | return (LI && TTI.isLegalMaskedGather(Ty, Align)) || | |||
| 1519 | (SI && TTI.isLegalMaskedScatter(Ty, Align)); | |||
| 1520 | } | |||
| 1521 | ||||
| 1522 | /// Returns true if the target machine supports all of the reduction | |||
| 1523 | /// variables found for the given VF. | |||
| 1524 | bool canVectorizeReductions(ElementCount VF) const { | |||
| 1525 | return (all_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { | |||
| 1526 | const RecurrenceDescriptor &RdxDesc = Reduction.second; | |||
| 1527 | return TTI.isLegalToVectorizeReduction(RdxDesc, VF); | |||
| 1528 | })); | |||
| 1529 | } | |||
| 1530 | ||||
| 1531 | /// Returns true if \p I is an instruction that will be scalarized with | |||
| 1532 | /// predication. Such instructions include conditional stores and | |||
| 1533 | /// instructions that may divide by zero. | |||
| 1534 | /// If a non-zero VF has been calculated, we check if I will be scalarized | |||
| 1535 | /// predication for that VF. | |||
| 1536 | bool isScalarWithPredication(Instruction *I) const; | |||
| 1537 | ||||
| 1538 | // Returns true if \p I is an instruction that will be predicated either | |||
| 1539 | // through scalar predication or masked load/store or masked gather/scatter. | |||
| 1540 | // Superset of instructions that return true for isScalarWithPredication. | |||
| 1541 | bool isPredicatedInst(Instruction *I) { | |||
| 1542 | if (!blockNeedsPredication(I->getParent())) | |||
| 1543 | return false; | |||
| 1544 | // Loads and stores that need some form of masked operation are predicated | |||
| 1545 | // instructions. | |||
| 1546 | if (isa<LoadInst>(I) || isa<StoreInst>(I)) | |||
| 1547 | return Legal->isMaskRequired(I); | |||
| 1548 | return isScalarWithPredication(I); | |||
| 1549 | } | |||
| 1550 | ||||
| 1551 | /// Returns true if \p I is a memory instruction with consecutive memory | |||
| 1552 | /// access that can be widened. | |||
| 1553 | bool | |||
| 1554 | memoryInstructionCanBeWidened(Instruction *I, | |||
| 1555 | ElementCount VF = ElementCount::getFixed(1)); | |||
| 1556 | ||||
| 1557 | /// Returns true if \p I is a memory instruction in an interleaved-group | |||
| 1558 | /// of memory accesses that can be vectorized with wide vector loads/stores | |||
| 1559 | /// and shuffles. | |||
| 1560 | bool | |||
| 1561 | interleavedAccessCanBeWidened(Instruction *I, | |||
| 1562 | ElementCount VF = ElementCount::getFixed(1)); | |||
| 1563 | ||||
| 1564 | /// Check if \p Instr belongs to any interleaved access group. | |||
| 1565 | bool isAccessInterleaved(Instruction *Instr) { | |||
| 1566 | return InterleaveInfo.isInterleaved(Instr); | |||
| 1567 | } | |||
| 1568 | ||||
| 1569 | /// Get the interleaved access group that \p Instr belongs to. | |||
| 1570 | const InterleaveGroup<Instruction> * | |||
| 1571 | getInterleavedAccessGroup(Instruction *Instr) { | |||
| 1572 | return InterleaveInfo.getInterleaveGroup(Instr); | |||
| 1573 | } | |||
| 1574 | ||||
| 1575 | /// Returns true if we're required to use a scalar epilogue for at least | |||
| 1576 | /// the final iteration of the original loop. | |||
| 1577 | bool requiresScalarEpilogue(ElementCount VF) const { | |||
| 1578 | if (!isScalarEpilogueAllowed()) | |||
| 1579 | return false; | |||
| 1580 | // If we might exit from anywhere but the latch, must run the exiting | |||
| 1581 | // iteration in scalar form. | |||
| 1582 | if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) | |||
| 1583 | return true; | |||
| 1584 | return VF.isVector() && InterleaveInfo.requiresScalarEpilogue(); | |||
| 1585 | } | |||
| 1586 | ||||
| 1587 | /// Returns true if a scalar epilogue is not allowed due to optsize or a | |||
| 1588 | /// loop hint annotation. | |||
| 1589 | bool isScalarEpilogueAllowed() const { | |||
| 1590 | return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed; | |||
| 1591 | } | |||
| 1592 | ||||
| 1593 | /// Returns true if all loop blocks should be masked to fold tail loop. | |||
| 1594 | bool foldTailByMasking() const { return FoldTailByMasking; } | |||
| 1595 | ||||
| 1596 | bool blockNeedsPredication(BasicBlock *BB) const { | |||
| 1597 | return foldTailByMasking() || Legal->blockNeedsPredication(BB); | |||
| 1598 | } | |||
| 1599 | ||||
| 1600 | /// A SmallMapVector to store the InLoop reduction op chains, mapping phi | |||
| 1601 | /// nodes to the chain of instructions representing the reductions. Uses a | |||
| 1602 | /// MapVector to ensure deterministic iteration order. | |||
| 1603 | using ReductionChainMap = | |||
| 1604 | SmallMapVector<PHINode *, SmallVector<Instruction *, 4>, 4>; | |||
| 1605 | ||||
| 1606 | /// Return the chain of instructions representing an inloop reduction. | |||
| 1607 | const ReductionChainMap &getInLoopReductionChains() const { | |||
| 1608 | return InLoopReductionChains; | |||
| 1609 | } | |||
| 1610 | ||||
| 1611 | /// Returns true if the Phi is part of an inloop reduction. | |||
| 1612 | bool isInLoopReduction(PHINode *Phi) const { | |||
| 1613 | return InLoopReductionChains.count(Phi); | |||
| 1614 | } | |||
| 1615 | ||||
| 1616 | /// Estimate cost of an intrinsic call instruction CI if it were vectorized | |||
| 1617 | /// with factor VF. Return the cost of the instruction, including | |||
| 1618 | /// scalarization overhead if it's needed. | |||
| 1619 | InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const; | |||
| 1620 | ||||
| 1621 | /// Estimate cost of a call instruction CI if it were vectorized with factor | |||
| 1622 | /// VF. Return the cost of the instruction, including scalarization overhead | |||
| 1623 | /// if it's needed. The flag NeedToScalarize shows if the call needs to be | |||
| 1624 | /// scalarized - | |||
| 1625 | /// i.e. either vector version isn't available, or is too expensive. | |||
| 1626 | InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF, | |||
| 1627 | bool &NeedToScalarize) const; | |||
| 1628 | ||||
| 1629 | /// Returns true if the per-lane cost of VectorizationFactor A is lower than | |||
| 1630 | /// that of B. | |||
| 1631 | bool isMoreProfitable(const VectorizationFactor &A, | |||
| 1632 | const VectorizationFactor &B) const; | |||
| 1633 | ||||
| 1634 | /// Invalidates decisions already taken by the cost model. | |||
| 1635 | void invalidateCostModelingDecisions() { | |||
| 1636 | WideningDecisions.clear(); | |||
| 1637 | Uniforms.clear(); | |||
| 1638 | Scalars.clear(); | |||
| 1639 | } | |||
| 1640 | ||||
| 1641 | private: | |||
| 1642 | unsigned NumPredStores = 0; | |||
| 1643 | ||||
| 1644 | /// \return An upper bound for the vectorization factors for both | |||
| 1645 | /// fixed and scalable vectorization, where the minimum-known number of | |||
| 1646 | /// elements is a power-of-2 larger than zero. If scalable vectorization is | |||
| 1647 | /// disabled or unsupported, then the scalable part will be equal to | |||
| 1648 | /// ElementCount::getScalable(0). | |||
| 1649 | FixedScalableVFPair computeFeasibleMaxVF(unsigned ConstTripCount, | |||
| 1650 | ElementCount UserVF); | |||
| 1651 | ||||
| 1652 | /// \return the maximized element count based on the targets vector | |||
| 1653 | /// registers and the loop trip-count, but limited to a maximum safe VF. | |||
| 1654 | /// This is a helper function of computeFeasibleMaxVF. | |||
| 1655 | /// FIXME: MaxSafeVF is currently passed by reference to avoid some obscure | |||
| 1656 | /// issue that occurred on one of the buildbots which cannot be reproduced | |||
| 1657 | /// without having access to the properietary compiler (see comments on | |||
| 1658 | /// D98509). The issue is currently under investigation and this workaround | |||
| 1659 | /// will be removed as soon as possible. | |||
| 1660 | ElementCount getMaximizedVFForTarget(unsigned ConstTripCount, | |||
| 1661 | unsigned SmallestType, | |||
| 1662 | unsigned WidestType, | |||
| 1663 | const ElementCount &MaxSafeVF); | |||
| 1664 | ||||
| 1665 | /// \return the maximum legal scalable VF, based on the safe max number | |||
| 1666 | /// of elements. | |||
| 1667 | ElementCount getMaxLegalScalableVF(unsigned MaxSafeElements); | |||
| 1668 | ||||
| 1669 | /// The vectorization cost is a combination of the cost itself and a boolean | |||
| 1670 | /// indicating whether any of the contributing operations will actually | |||
| 1671 | /// operate on vector values after type legalization in the backend. If this | |||
| 1672 | /// latter value is false, then all operations will be scalarized (i.e. no | |||
| 1673 | /// vectorization has actually taken place). | |||
| 1674 | using VectorizationCostTy = std::pair<InstructionCost, bool>; | |||
| 1675 | ||||
| 1676 | /// Returns the expected execution cost. The unit of the cost does | |||
| 1677 | /// not matter because we use the 'cost' units to compare different | |||
| 1678 | /// vector widths. The cost that is returned is *not* normalized by | |||
| 1679 | /// the factor width. If \p Invalid is not nullptr, this function | |||
| 1680 | /// will add a pair(Instruction*, ElementCount) to \p Invalid for | |||
| 1681 | /// each instruction that has an Invalid cost for the given VF. | |||
| 1682 | using InstructionVFPair = std::pair<Instruction *, ElementCount>; | |||
| 1683 | VectorizationCostTy | |||
| 1684 | expectedCost(ElementCount VF, | |||
| 1685 | SmallVectorImpl<InstructionVFPair> *Invalid = nullptr); | |||
| 1686 | ||||
| 1687 | /// Returns the execution time cost of an instruction for a given vector | |||
| 1688 | /// width. Vector width of one means scalar. | |||
| 1689 | VectorizationCostTy getInstructionCost(Instruction *I, ElementCount VF); | |||
| 1690 | ||||
| 1691 | /// The cost-computation logic from getInstructionCost which provides | |||
| 1692 | /// the vector type as an output parameter. | |||
| 1693 | InstructionCost getInstructionCost(Instruction *I, ElementCount VF, | |||
| 1694 | Type *&VectorTy); | |||
| 1695 | ||||
| 1696 | /// Return the cost of instructions in an inloop reduction pattern, if I is | |||
| 1697 | /// part of that pattern. | |||
| 1698 | Optional<InstructionCost> | |||
| 1699 | getReductionPatternCost(Instruction *I, ElementCount VF, Type *VectorTy, | |||
| 1700 | TTI::TargetCostKind CostKind); | |||
| 1701 | ||||
| 1702 | /// Calculate vectorization cost of memory instruction \p I. | |||
| 1703 | InstructionCost getMemoryInstructionCost(Instruction *I, ElementCount VF); | |||
| 1704 | ||||
| 1705 | /// The cost computation for scalarized memory instruction. | |||
| 1706 | InstructionCost getMemInstScalarizationCost(Instruction *I, ElementCount VF); | |||
| 1707 | ||||
| 1708 | /// The cost computation for interleaving group of memory instructions. | |||
| 1709 | InstructionCost getInterleaveGroupCost(Instruction *I, ElementCount VF); | |||
| 1710 | ||||
| 1711 | /// The cost computation for Gather/Scatter instruction. | |||
| 1712 | InstructionCost getGatherScatterCost(Instruction *I, ElementCount VF); | |||
| 1713 | ||||
| 1714 | /// The cost computation for widening instruction \p I with consecutive | |||
| 1715 | /// memory access. | |||
| 1716 | InstructionCost getConsecutiveMemOpCost(Instruction *I, ElementCount VF); | |||
| 1717 | ||||
| 1718 | /// The cost calculation for Load/Store instruction \p I with uniform pointer - | |||
| 1719 | /// Load: scalar load + broadcast. | |||
| 1720 | /// Store: scalar store + (loop invariant value stored? 0 : extract of last | |||
| 1721 | /// element) | |||
| 1722 | InstructionCost getUniformMemOpCost(Instruction *I, ElementCount VF); | |||
| 1723 | ||||
| 1724 | /// Estimate the overhead of scalarizing an instruction. This is a | |||
| 1725 | /// convenience wrapper for the type-based getScalarizationOverhead API. | |||
| 1726 | InstructionCost getScalarizationOverhead(Instruction *I, | |||
| 1727 | ElementCount VF) const; | |||
| 1728 | ||||
| 1729 | /// Returns whether the instruction is a load or store and will be a emitted | |||
| 1730 | /// as a vector operation. | |||
| 1731 | bool isConsecutiveLoadOrStore(Instruction *I); | |||
| 1732 | ||||
| 1733 | /// Returns true if an artificially high cost for emulated masked memrefs | |||
| 1734 | /// should be used. | |||
| 1735 | bool useEmulatedMaskMemRefHack(Instruction *I); | |||
| 1736 | ||||
| 1737 | /// Map of scalar integer values to the smallest bitwidth they can be legally | |||
| 1738 | /// represented as. The vector equivalents of these values should be truncated | |||
| 1739 | /// to this type. | |||
| 1740 | MapVector<Instruction *, uint64_t> MinBWs; | |||
| 1741 | ||||
| 1742 | /// A type representing the costs for instructions if they were to be | |||
| 1743 | /// scalarized rather than vectorized. The entries are Instruction-Cost | |||
| 1744 | /// pairs. | |||
| 1745 | using ScalarCostsTy = DenseMap<Instruction *, InstructionCost>; | |||
| 1746 | ||||
| 1747 | /// A set containing all BasicBlocks that are known to present after | |||
| 1748 | /// vectorization as a predicated block. | |||
| 1749 | SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization; | |||
| 1750 | ||||
| 1751 | /// Records whether it is allowed to have the original scalar loop execute at | |||
| 1752 | /// least once. This may be needed as a fallback loop in case runtime | |||
| 1753 | /// aliasing/dependence checks fail, or to handle the tail/remainder | |||
| 1754 | /// iterations when the trip count is unknown or doesn't divide by the VF, | |||
| 1755 | /// or as a peel-loop to handle gaps in interleave-groups. | |||
| 1756 | /// Under optsize and when the trip count is very small we don't allow any | |||
| 1757 | /// iterations to execute in the scalar loop. | |||
| 1758 | ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; | |||
| 1759 | ||||
| 1760 | /// All blocks of loop are to be masked to fold tail of scalar iterations. | |||
| 1761 | bool FoldTailByMasking = false; | |||
| 1762 | ||||
| 1763 | /// A map holding scalar costs for different vectorization factors. The | |||
| 1764 | /// presence of a cost for an instruction in the mapping indicates that the | |||
| 1765 | /// instruction will be scalarized when vectorizing with the associated | |||
| 1766 | /// vectorization factor. The entries are VF-ScalarCostTy pairs. | |||
| 1767 | DenseMap<ElementCount, ScalarCostsTy> InstsToScalarize; | |||
| 1768 | ||||
| 1769 | /// Holds the instructions known to be uniform after vectorization. | |||
| 1770 | /// The data is collected per VF. | |||
| 1771 | DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Uniforms; | |||
| 1772 | ||||
| 1773 | /// Holds the instructions known to be scalar after vectorization. | |||
| 1774 | /// The data is collected per VF. | |||
| 1775 | DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Scalars; | |||
| 1776 | ||||
| 1777 | /// Holds the instructions (address computations) that are forced to be | |||
| 1778 | /// scalarized. | |||
| 1779 | DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> ForcedScalars; | |||
| 1780 | ||||
| 1781 | /// PHINodes of the reductions that should be expanded in-loop along with | |||
| 1782 | /// their associated chains of reduction operations, in program order from top | |||
| 1783 | /// (PHI) to bottom | |||
| 1784 | ReductionChainMap InLoopReductionChains; | |||
| 1785 | ||||
| 1786 | /// A Map of inloop reduction operations and their immediate chain operand. | |||
| 1787 | /// FIXME: This can be removed once reductions can be costed correctly in | |||
| 1788 | /// vplan. This was added to allow quick lookup to the inloop operations, | |||
| 1789 | /// without having to loop through InLoopReductionChains. | |||
| 1790 | DenseMap<Instruction *, Instruction *> InLoopReductionImmediateChains; | |||
| 1791 | ||||
| 1792 | /// Returns the expected difference in cost from scalarizing the expression | |||
| 1793 | /// feeding a predicated instruction \p PredInst. The instructions to | |||
| 1794 | /// scalarize and their scalar costs are collected in \p ScalarCosts. A | |||
| 1795 | /// non-negative return value implies the expression will be scalarized. | |||
| 1796 | /// Currently, only single-use chains are considered for scalarization. | |||
| 1797 | int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts, | |||
| 1798 | ElementCount VF); | |||
| 1799 | ||||
| 1800 | /// Collect the instructions that are uniform after vectorization. An | |||
| 1801 | /// instruction is uniform if we represent it with a single scalar value in | |||
| 1802 | /// the vectorized loop corresponding to each vector iteration. Examples of | |||
| 1803 | /// uniform instructions include pointer operands of consecutive or | |||
| 1804 | /// interleaved memory accesses. Note that although uniformity implies an | |||
| 1805 | /// instruction will be scalar, the reverse is not true. In general, a | |||
| 1806 | /// scalarized instruction will be represented by VF scalar values in the | |||
| 1807 | /// vectorized loop, each corresponding to an iteration of the original | |||
| 1808 | /// scalar loop. | |||
| 1809 | void collectLoopUniforms(ElementCount VF); | |||
| 1810 | ||||
| 1811 | /// Collect the instructions that are scalar after vectorization. An | |||
| 1812 | /// instruction is scalar if it is known to be uniform or will be scalarized | |||
| 1813 | /// during vectorization. Non-uniform scalarized instructions will be | |||
| 1814 | /// represented by VF values in the vectorized loop, each corresponding to an | |||
| 1815 | /// iteration of the original scalar loop. | |||
| 1816 | void collectLoopScalars(ElementCount VF); | |||
| 1817 | ||||
| 1818 | /// Keeps cost model vectorization decision and cost for instructions. | |||
| 1819 | /// Right now it is used for memory instructions only. | |||
| 1820 | using DecisionList = DenseMap<std::pair<Instruction *, ElementCount>, | |||
| 1821 | std::pair<InstWidening, InstructionCost>>; | |||
| 1822 | ||||
| 1823 | DecisionList WideningDecisions; | |||
| 1824 | ||||
| 1825 | /// Returns true if \p V is expected to be vectorized and it needs to be | |||
| 1826 | /// extracted. | |||
| 1827 | bool needsExtract(Value *V, ElementCount VF) const { | |||
| 1828 | Instruction *I = dyn_cast<Instruction>(V); | |||
| 1829 | if (VF.isScalar() || !I || !TheLoop->contains(I) || | |||
| 1830 | TheLoop->isLoopInvariant(I)) | |||
| 1831 | return false; | |||
| 1832 | ||||
| 1833 | // Assume we can vectorize V (and hence we need extraction) if the | |||
| 1834 | // scalars are not computed yet. This can happen, because it is called | |||
| 1835 | // via getScalarizationOverhead from setCostBasedWideningDecision, before | |||
| 1836 | // the scalars are collected. That should be a safe assumption in most | |||
| 1837 | // cases, because we check if the operands have vectorizable types | |||
| 1838 | // beforehand in LoopVectorizationLegality. | |||
| 1839 | return Scalars.find(VF) == Scalars.end() || | |||
| 1840 | !isScalarAfterVectorization(I, VF); | |||
| 1841 | }; | |||
| 1842 | ||||
| 1843 | /// Returns a range containing only operands needing to be extracted. | |||
| 1844 | SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops, | |||
| 1845 | ElementCount VF) const { | |||
| 1846 | return SmallVector<Value *, 4>(make_filter_range( | |||
| 1847 | Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); })); | |||
| 1848 | } | |||
| 1849 | ||||
| 1850 | /// Determines if we have the infrastructure to vectorize loop \p L and its | |||
| 1851 | /// epilogue, assuming the main loop is vectorized by \p VF. | |||
| 1852 | bool isCandidateForEpilogueVectorization(const Loop &L, | |||
| 1853 | const ElementCount VF) const; | |||
| 1854 | ||||
| 1855 | /// Returns true if epilogue vectorization is considered profitable, and | |||
| 1856 | /// false otherwise. | |||
| 1857 | /// \p VF is the vectorization factor chosen for the original loop. | |||
| 1858 | bool isEpilogueVectorizationProfitable(const ElementCount VF) const; | |||
| 1859 | ||||
| 1860 | public: | |||
| 1861 | /// The loop that we evaluate. | |||
| 1862 | Loop *TheLoop; | |||
| 1863 | ||||
| 1864 | /// Predicated scalar evolution analysis. | |||
| 1865 | PredicatedScalarEvolution &PSE; | |||
| 1866 | ||||
| 1867 | /// Loop Info analysis. | |||
| 1868 | LoopInfo *LI; | |||
| 1869 | ||||
| 1870 | /// Vectorization legality. | |||
| 1871 | LoopVectorizationLegality *Legal; | |||
| 1872 | ||||
| 1873 | /// Vector target information. | |||
| 1874 | const TargetTransformInfo &TTI; | |||
| 1875 | ||||
| 1876 | /// Target Library Info. | |||
| 1877 | const TargetLibraryInfo *TLI; | |||
| 1878 | ||||
| 1879 | /// Demanded bits analysis. | |||
| 1880 | DemandedBits *DB; | |||
| 1881 | ||||
| 1882 | /// Assumption cache. | |||
| 1883 | AssumptionCache *AC; | |||
| 1884 | ||||
| 1885 | /// Interface to emit optimization remarks. | |||
| 1886 | OptimizationRemarkEmitter *ORE; | |||
| 1887 | ||||
| 1888 | const Function *TheFunction; | |||
| 1889 | ||||
| 1890 | /// Loop Vectorize Hint. | |||
| 1891 | const LoopVectorizeHints *Hints; | |||
| 1892 | ||||
| 1893 | /// The interleave access information contains groups of interleaved accesses | |||
| 1894 | /// with the same stride and close to each other. | |||
| 1895 | InterleavedAccessInfo &InterleaveInfo; | |||
| 1896 | ||||
| 1897 | /// Values to ignore in the cost model. | |||
| 1898 | SmallPtrSet<const Value *, 16> ValuesToIgnore; | |||
| 1899 | ||||
| 1900 | /// Values to ignore in the cost model when VF > 1. | |||
| 1901 | SmallPtrSet<const Value *, 16> VecValuesToIgnore; | |||
| 1902 | ||||
| 1903 | /// All element types found in the loop. | |||
| 1904 | SmallPtrSet<Type *, 16> ElementTypesInLoop; | |||
| 1905 | ||||
| 1906 | /// Profitable vector factors. | |||
| 1907 | SmallVector<VectorizationFactor, 8> ProfitableVFs; | |||
| 1908 | }; | |||
| 1909 | } // end namespace llvm | |||
| 1910 | ||||
| 1911 | /// Helper struct to manage generating runtime checks for vectorization. | |||
| 1912 | /// | |||
| 1913 | /// The runtime checks are created up-front in temporary blocks to allow better | |||
| 1914 | /// estimating the cost and un-linked from the existing IR. After deciding to | |||
| 1915 | /// vectorize, the checks are moved back. If deciding not to vectorize, the | |||
| 1916 | /// temporary blocks are completely removed. | |||
| 1917 | class GeneratedRTChecks { | |||
| 1918 | /// Basic block which contains the generated SCEV checks, if any. | |||
| 1919 | BasicBlock *SCEVCheckBlock = nullptr; | |||
| 1920 | ||||
| 1921 | /// The value representing the result of the generated SCEV checks. If it is | |||
| 1922 | /// nullptr, either no SCEV checks have been generated or they have been used. | |||
| 1923 | Value *SCEVCheckCond = nullptr; | |||
| 1924 | ||||
| 1925 | /// Basic block which contains the generated memory runtime checks, if any. | |||
| 1926 | BasicBlock *MemCheckBlock = nullptr; | |||
| 1927 | ||||
| 1928 | /// The value representing the result of the generated memory runtime checks. | |||
| 1929 | /// If it is nullptr, either no memory runtime checks have been generated or | |||
| 1930 | /// they have been used. | |||
| 1931 | Instruction *MemRuntimeCheckCond = nullptr; | |||
| 1932 | ||||
| 1933 | DominatorTree *DT; | |||
| 1934 | LoopInfo *LI; | |||
| 1935 | ||||
| 1936 | SCEVExpander SCEVExp; | |||
| 1937 | SCEVExpander MemCheckExp; | |||
| 1938 | ||||
| 1939 | public: | |||
| 1940 | GeneratedRTChecks(ScalarEvolution &SE, DominatorTree *DT, LoopInfo *LI, | |||
| 1941 | const DataLayout &DL) | |||
| 1942 | : DT(DT), LI(LI), SCEVExp(SE, DL, "scev.check"), | |||
| 1943 | MemCheckExp(SE, DL, "scev.check") {} | |||
| 1944 | ||||
| 1945 | /// Generate runtime checks in SCEVCheckBlock and MemCheckBlock, so we can | |||
| 1946 | /// accurately estimate the cost of the runtime checks. The blocks are | |||
| 1947 | /// un-linked from the IR and is added back during vector code generation. If | |||
| 1948 | /// there is no vector code generation, the check blocks are removed | |||
| 1949 | /// completely. | |||
| 1950 | void Create(Loop *L, const LoopAccessInfo &LAI, | |||
| 1951 | const SCEVUnionPredicate &UnionPred) { | |||
| 1952 | ||||
| 1953 | BasicBlock *LoopHeader = L->getHeader(); | |||
| 1954 | BasicBlock *Preheader = L->getLoopPreheader(); | |||
| 1955 | ||||
| 1956 | // Use SplitBlock to create blocks for SCEV & memory runtime checks to | |||
| 1957 | // ensure the blocks are properly added to LoopInfo & DominatorTree. Those | |||
| 1958 | // may be used by SCEVExpander. The blocks will be un-linked from their | |||
| 1959 | // predecessors and removed from LI & DT at the end of the function. | |||
| 1960 | if (!UnionPred.isAlwaysTrue()) { | |||
| 1961 | SCEVCheckBlock = SplitBlock(Preheader, Preheader->getTerminator(), DT, LI, | |||
| 1962 | nullptr, "vector.scevcheck"); | |||
| 1963 | ||||
| 1964 | SCEVCheckCond = SCEVExp.expandCodeForPredicate( | |||
| 1965 | &UnionPred, SCEVCheckBlock->getTerminator()); | |||
| 1966 | } | |||
| 1967 | ||||
| 1968 | const auto &RtPtrChecking = *LAI.getRuntimePointerChecking(); | |||
| 1969 | if (RtPtrChecking.Need) { | |||
| 1970 | auto *Pred = SCEVCheckBlock ? SCEVCheckBlock : Preheader; | |||
| 1971 | MemCheckBlock = SplitBlock(Pred, Pred->getTerminator(), DT, LI, nullptr, | |||
| 1972 | "vector.memcheck"); | |||
| 1973 | ||||
| 1974 | std::tie(std::ignore, MemRuntimeCheckCond) = | |||
| 1975 | addRuntimeChecks(MemCheckBlock->getTerminator(), L, | |||
| 1976 | RtPtrChecking.getChecks(), MemCheckExp); | |||
| 1977 | assert(MemRuntimeCheckCond &&((void)0) | |||
| 1978 | "no RT checks generated although RtPtrChecking "((void)0) | |||
| 1979 | "claimed checks are required")((void)0); | |||
| 1980 | } | |||
| 1981 | ||||
| 1982 | if (!MemCheckBlock && !SCEVCheckBlock) | |||
| 1983 | return; | |||
| 1984 | ||||
| 1985 | // Unhook the temporary block with the checks, update various places | |||
| 1986 | // accordingly. | |||
| 1987 | if (SCEVCheckBlock) | |||
| 1988 | SCEVCheckBlock->replaceAllUsesWith(Preheader); | |||
| 1989 | if (MemCheckBlock) | |||
| 1990 | MemCheckBlock->replaceAllUsesWith(Preheader); | |||
| 1991 | ||||
| 1992 | if (SCEVCheckBlock) { | |||
| 1993 | SCEVCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator()); | |||
| 1994 | new UnreachableInst(Preheader->getContext(), SCEVCheckBlock); | |||
| 1995 | Preheader->getTerminator()->eraseFromParent(); | |||
| 1996 | } | |||
| 1997 | if (MemCheckBlock) { | |||
| 1998 | MemCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator()); | |||
| 1999 | new UnreachableInst(Preheader->getContext(), MemCheckBlock); | |||
| 2000 | Preheader->getTerminator()->eraseFromParent(); | |||
| 2001 | } | |||
| 2002 | ||||
| 2003 | DT->changeImmediateDominator(LoopHeader, Preheader); | |||
| 2004 | if (MemCheckBlock) { | |||
| 2005 | DT->eraseNode(MemCheckBlock); | |||
| 2006 | LI->removeBlock(MemCheckBlock); | |||
| 2007 | } | |||
| 2008 | if (SCEVCheckBlock) { | |||
| 2009 | DT->eraseNode(SCEVCheckBlock); | |||
| 2010 | LI->removeBlock(SCEVCheckBlock); | |||
| 2011 | } | |||
| 2012 | } | |||
| 2013 | ||||
| 2014 | /// Remove the created SCEV & memory runtime check blocks & instructions, if | |||
| 2015 | /// unused. | |||
| 2016 | ~GeneratedRTChecks() { | |||
| 2017 | SCEVExpanderCleaner SCEVCleaner(SCEVExp, *DT); | |||
| 2018 | SCEVExpanderCleaner MemCheckCleaner(MemCheckExp, *DT); | |||
| 2019 | if (!SCEVCheckCond) | |||
| 2020 | SCEVCleaner.markResultUsed(); | |||
| 2021 | ||||
| 2022 | if (!MemRuntimeCheckCond) | |||
| 2023 | MemCheckCleaner.markResultUsed(); | |||
| 2024 | ||||
| 2025 | if (MemRuntimeCheckCond) { | |||
| 2026 | auto &SE = *MemCheckExp.getSE(); | |||
| 2027 | // Memory runtime check generation creates compares that use expanded | |||
| 2028 | // values. Remove them before running the SCEVExpanderCleaners. | |||
| 2029 | for (auto &I : make_early_inc_range(reverse(*MemCheckBlock))) { | |||
| 2030 | if (MemCheckExp.isInsertedInstruction(&I)) | |||
| 2031 | continue; | |||
| 2032 | SE.forgetValue(&I); | |||
| 2033 | SE.eraseValueFromMap(&I); | |||
| 2034 | I.eraseFromParent(); | |||
| 2035 | } | |||
| 2036 | } | |||
| 2037 | MemCheckCleaner.cleanup(); | |||
| 2038 | SCEVCleaner.cleanup(); | |||
| 2039 | ||||
| 2040 | if (SCEVCheckCond) | |||
| 2041 | SCEVCheckBlock->eraseFromParent(); | |||
| 2042 | if (MemRuntimeCheckCond) | |||
| 2043 | MemCheckBlock->eraseFromParent(); | |||
| 2044 | } | |||
| 2045 | ||||
| 2046 | /// Adds the generated SCEVCheckBlock before \p LoopVectorPreHeader and | |||
| 2047 | /// adjusts the branches to branch to the vector preheader or \p Bypass, | |||
| 2048 | /// depending on the generated condition. | |||
| 2049 | BasicBlock *emitSCEVChecks(Loop *L, BasicBlock *Bypass, | |||
| 2050 | BasicBlock *LoopVectorPreHeader, | |||
| 2051 | BasicBlock *LoopExitBlock) { | |||
| 2052 | if (!SCEVCheckCond) | |||
| 2053 | return nullptr; | |||
| 2054 | if (auto *C = dyn_cast<ConstantInt>(SCEVCheckCond)) | |||
| 2055 | if (C->isZero()) | |||
| 2056 | return nullptr; | |||
| 2057 | ||||
| 2058 | auto *Pred = LoopVectorPreHeader->getSinglePredecessor(); | |||
| 2059 | ||||
| 2060 | BranchInst::Create(LoopVectorPreHeader, SCEVCheckBlock); | |||
| 2061 | // Create new preheader for vector loop. | |||
| 2062 | if (auto *PL = LI->getLoopFor(LoopVectorPreHeader)) | |||
| 2063 | PL->addBasicBlockToLoop(SCEVCheckBlock, *LI); | |||
| 2064 | ||||
| 2065 | SCEVCheckBlock->getTerminator()->eraseFromParent(); | |||
| 2066 | SCEVCheckBlock->moveBefore(LoopVectorPreHeader); | |||
| 2067 | Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader, | |||
| 2068 | SCEVCheckBlock); | |||
| 2069 | ||||
| 2070 | DT->addNewBlock(SCEVCheckBlock, Pred); | |||
| 2071 | DT->changeImmediateDominator(LoopVectorPreHeader, SCEVCheckBlock); | |||
| 2072 | ||||
| 2073 | ReplaceInstWithInst( | |||
| 2074 | SCEVCheckBlock->getTerminator(), | |||
| 2075 | BranchInst::Create(Bypass, LoopVectorPreHeader, SCEVCheckCond)); | |||
| 2076 | // Mark the check as used, to prevent it from being removed during cleanup. | |||
| 2077 | SCEVCheckCond = nullptr; | |||
| 2078 | return SCEVCheckBlock; | |||
| 2079 | } | |||
| 2080 | ||||
| 2081 | /// Adds the generated MemCheckBlock before \p LoopVectorPreHeader and adjusts | |||
| 2082 | /// the branches to branch to the vector preheader or \p Bypass, depending on | |||
| 2083 | /// the generated condition. | |||
| 2084 | BasicBlock *emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass, | |||
| 2085 | BasicBlock *LoopVectorPreHeader) { | |||
| 2086 | // Check if we generated code that checks in runtime if arrays overlap. | |||
| 2087 | if (!MemRuntimeCheckCond) | |||
| 2088 | return nullptr; | |||
| 2089 | ||||
| 2090 | auto *Pred = LoopVectorPreHeader->getSinglePredecessor(); | |||
| 2091 | Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader, | |||
| 2092 | MemCheckBlock); | |||
| 2093 | ||||
| 2094 | DT->addNewBlock(MemCheckBlock, Pred); | |||
| 2095 | DT->changeImmediateDominator(LoopVectorPreHeader, MemCheckBlock); | |||
| 2096 | MemCheckBlock->moveBefore(LoopVectorPreHeader); | |||
| 2097 | ||||
| 2098 | if (auto *PL = LI->getLoopFor(LoopVectorPreHeader)) | |||
| 2099 | PL->addBasicBlockToLoop(MemCheckBlock, *LI); | |||
| 2100 | ||||
| 2101 | ReplaceInstWithInst( | |||
| 2102 | MemCheckBlock->getTerminator(), | |||
| 2103 | BranchInst::Create(Bypass, LoopVectorPreHeader, MemRuntimeCheckCond)); | |||
| 2104 | MemCheckBlock->getTerminator()->setDebugLoc( | |||
| 2105 | Pred->getTerminator()->getDebugLoc()); | |||
| 2106 | ||||
| 2107 | // Mark the check as used, to prevent it from being removed during cleanup. | |||
| 2108 | MemRuntimeCheckCond = nullptr; | |||
| 2109 | return MemCheckBlock; | |||
| 2110 | } | |||
| 2111 | }; | |||
| 2112 | ||||
| 2113 | // Return true if \p OuterLp is an outer loop annotated with hints for explicit | |||
| 2114 | // vectorization. The loop needs to be annotated with #pragma omp simd | |||
| 2115 | // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the | |||
| 2116 | // vector length information is not provided, vectorization is not considered | |||
| 2117 | // explicit. Interleave hints are not allowed either. These limitations will be | |||
| 2118 | // relaxed in the future. | |||
| 2119 | // Please, note that we are currently forced to abuse the pragma 'clang | |||
| 2120 | // vectorize' semantics. This pragma provides *auto-vectorization hints* | |||
| 2121 | // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd' | |||
| 2122 | // provides *explicit vectorization hints* (LV can bypass legal checks and | |||
| 2123 | // assume that vectorization is legal). However, both hints are implemented | |||
| 2124 | // using the same metadata (llvm.loop.vectorize, processed by | |||
| 2125 | // LoopVectorizeHints). This will be fixed in the future when the native IR | |||
| 2126 | // representation for pragma 'omp simd' is introduced. | |||
| 2127 | static bool isExplicitVecOuterLoop(Loop *OuterLp, | |||
| 2128 | OptimizationRemarkEmitter *ORE) { | |||
| 2129 | assert(!OuterLp->isInnermost() && "This is not an outer loop")((void)0); | |||
| 2130 | LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE); | |||
| 2131 | ||||
| 2132 | // Only outer loops with an explicit vectorization hint are supported. | |||
| 2133 | // Unannotated outer loops are ignored. | |||
| 2134 | if (Hints.getForce() == LoopVectorizeHints::FK_Undefined) | |||
| 2135 | return false; | |||
| 2136 | ||||
| 2137 | Function *Fn = OuterLp->getHeader()->getParent(); | |||
| 2138 | if (!Hints.allowVectorization(Fn, OuterLp, | |||
| 2139 | true /*VectorizeOnlyWhenForced*/)) { | |||
| 2140 | LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n")do { } while (false); | |||
| 2141 | return false; | |||
| 2142 | } | |||
| 2143 | ||||
| 2144 | if (Hints.getInterleave() > 1) { | |||
| 2145 | // TODO: Interleave support is future work. | |||
| 2146 | LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for "do { } while (false) | |||
| 2147 | "outer loops.\n")do { } while (false); | |||
| 2148 | Hints.emitRemarkWithHints(); | |||
| 2149 | return false; | |||
| 2150 | } | |||
| 2151 | ||||
| 2152 | return true; | |||
| 2153 | } | |||
| 2154 | ||||
| 2155 | static void collectSupportedLoops(Loop &L, LoopInfo *LI, | |||
| 2156 | OptimizationRemarkEmitter *ORE, | |||
| 2157 | SmallVectorImpl<Loop *> &V) { | |||
| 2158 | // Collect inner loops and outer loops without irreducible control flow. For | |||
| 2159 | // now, only collect outer loops that have explicit vectorization hints. If we | |||
| 2160 | // are stress testing the VPlan H-CFG construction, we collect the outermost | |||
| 2161 | // loop of every loop nest. | |||
| 2162 | if (L.isInnermost() || VPlanBuildStressTest || | |||
| 2163 | (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) { | |||
| 2164 | LoopBlocksRPO RPOT(&L); | |||
| 2165 | RPOT.perform(LI); | |||
| 2166 | if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) { | |||
| 2167 | V.push_back(&L); | |||
| 2168 | // TODO: Collect inner loops inside marked outer loops in case | |||
| 2169 | // vectorization fails for the outer loop. Do not invoke | |||
| 2170 | // 'containsIrreducibleCFG' again for inner loops when the outer loop is | |||
| 2171 | // already known to be reducible. We can use an inherited attribute for | |||
| 2172 | // that. | |||
| 2173 | return; | |||
| 2174 | } | |||
| 2175 | } | |||
| 2176 | for (Loop *InnerL : L) | |||
| 2177 | collectSupportedLoops(*InnerL, LI, ORE, V); | |||
| 2178 | } | |||
| 2179 | ||||
| 2180 | namespace { | |||
| 2181 | ||||
| 2182 | /// The LoopVectorize Pass. | |||
| 2183 | struct LoopVectorize : public FunctionPass { | |||
| 2184 | /// Pass identification, replacement for typeid | |||
| 2185 | static char ID; | |||
| 2186 | ||||
| 2187 | LoopVectorizePass Impl; | |||
| 2188 | ||||
| 2189 | explicit LoopVectorize(bool InterleaveOnlyWhenForced = false, | |||
| 2190 | bool VectorizeOnlyWhenForced = false) | |||
| 2191 | : FunctionPass(ID), | |||
| 2192 | Impl({InterleaveOnlyWhenForced, VectorizeOnlyWhenForced}) { | |||
| 2193 | initializeLoopVectorizePass(*PassRegistry::getPassRegistry()); | |||
| 2194 | } | |||
| 2195 | ||||
| 2196 | bool runOnFunction(Function &F) override { | |||
| 2197 | if (skipFunction(F)) | |||
| 2198 | return false; | |||
| 2199 | ||||
| 2200 | auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); | |||
| 2201 | auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); | |||
| 2202 | auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); | |||
| 2203 | auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); | |||
| 2204 | auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI(); | |||
| 2205 | auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); | |||
| 2206 | auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr; | |||
| 2207 | auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); | |||
| 2208 | auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); | |||
| 2209 | auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>(); | |||
| 2210 | auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); | |||
| 2211 | auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); | |||
| 2212 | auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); | |||
| 2213 | ||||
| 2214 | std::function<const LoopAccessInfo &(Loop &)> GetLAA = | |||
| 2215 | [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); }; | |||
| 2216 | ||||
| 2217 | return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC, | |||
| 2218 | GetLAA, *ORE, PSI).MadeAnyChange; | |||
| 2219 | } | |||
| 2220 | ||||
| 2221 | void getAnalysisUsage(AnalysisUsage &AU) const override { | |||
| 2222 | AU.addRequired<AssumptionCacheTracker>(); | |||
| 2223 | AU.addRequired<BlockFrequencyInfoWrapperPass>(); | |||
| 2224 | AU.addRequired<DominatorTreeWrapperPass>(); | |||
| 2225 | AU.addRequired<LoopInfoWrapperPass>(); | |||
| 2226 | AU.addRequired<ScalarEvolutionWrapperPass>(); | |||
| 2227 | AU.addRequired<TargetTransformInfoWrapperPass>(); | |||
| 2228 | AU.addRequired<AAResultsWrapperPass>(); | |||
| 2229 | AU.addRequired<LoopAccessLegacyAnalysis>(); | |||
| 2230 | AU.addRequired<DemandedBitsWrapperPass>(); | |||
| 2231 | AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); | |||
| 2232 | AU.addRequired<InjectTLIMappingsLegacy>(); | |||
| 2233 | ||||
| 2234 | // We currently do not preserve loopinfo/dominator analyses with outer loop | |||
| 2235 | // vectorization. Until this is addressed, mark these analyses as preserved | |||
| 2236 | // only for non-VPlan-native path. | |||
| 2237 | // TODO: Preserve Loop and Dominator analyses for VPlan-native path. | |||
| 2238 | if (!EnableVPlanNativePath) { | |||
| 2239 | AU.addPreserved<LoopInfoWrapperPass>(); | |||
| 2240 | AU.addPreserved<DominatorTreeWrapperPass>(); | |||
| 2241 | } | |||
| 2242 | ||||
| 2243 | AU.addPreserved<BasicAAWrapperPass>(); | |||
| 2244 | AU.addPreserved<GlobalsAAWrapperPass>(); | |||
| 2245 | AU.addRequired<ProfileSummaryInfoWrapperPass>(); | |||
| 2246 | } | |||
| 2247 | }; | |||
| 2248 | ||||
| 2249 | } // end anonymous namespace | |||
| 2250 | ||||
| 2251 | //===----------------------------------------------------------------------===// | |||
| 2252 | // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and | |||
| 2253 | // LoopVectorizationCostModel and LoopVectorizationPlanner. | |||
| 2254 | //===----------------------------------------------------------------------===// | |||
| 2255 | ||||
| 2256 | Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) { | |||
| 2257 | // We need to place the broadcast of invariant variables outside the loop, | |||
| 2258 | // but only if it's proven safe to do so. Else, broadcast will be inside | |||
| 2259 | // vector loop body. | |||
| 2260 | Instruction *Instr = dyn_cast<Instruction>(V); | |||
| 2261 | bool SafeToHoist = OrigLoop->isLoopInvariant(V) && | |||
| 2262 | (!Instr || | |||
| 2263 | DT->dominates(Instr->getParent(), LoopVectorPreHeader)); | |||
| 2264 | // Place the code for broadcasting invariant variables in the new preheader. | |||
| 2265 | IRBuilder<>::InsertPointGuard Guard(Builder); | |||
| 2266 | if (SafeToHoist) | |||
| 2267 | Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); | |||
| 2268 | ||||
| 2269 | // Broadcast the scalar into all locations in the vector. | |||
| 2270 | Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast"); | |||
| 2271 | ||||
| 2272 | return Shuf; | |||
| 2273 | } | |||
| 2274 | ||||
| 2275 | void InnerLoopVectorizer::createVectorIntOrFpInductionPHI( | |||
| 2276 | const InductionDescriptor &II, Value *Step, Value *Start, | |||
| 2277 | Instruction *EntryVal, VPValue *Def, VPValue *CastDef, | |||
| 2278 | VPTransformState &State) { | |||
| 2279 | assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&((void)0) | |||
| 2280 | "Expected either an induction phi-node or a truncate of it!")((void)0); | |||
| 2281 | ||||
| 2282 | // Construct the initial value of the vector IV in the vector loop preheader | |||
| 2283 | auto CurrIP = Builder.saveIP(); | |||
| 2284 | Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); | |||
| 2285 | if (isa<TruncInst>(EntryVal)) { | |||
| 2286 | assert(Start->getType()->isIntegerTy() &&((void)0) | |||
| 2287 | "Truncation requires an integer type")((void)0); | |||
| 2288 | auto *TruncType = cast<IntegerType>(EntryVal->getType()); | |||
| 2289 | Step = Builder.CreateTrunc(Step, TruncType); | |||
| 2290 | Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType); | |||
| 2291 | } | |||
| 2292 | Value *SplatStart = Builder.CreateVectorSplat(VF, Start); | |||
| 2293 | Value *SteppedStart = | |||
| 2294 | getStepVector(SplatStart, 0, Step, II.getInductionOpcode()); | |||
| 2295 | ||||
| 2296 | // We create vector phi nodes for both integer and floating-point induction | |||
| 2297 | // variables. Here, we determine the kind of arithmetic we will perform. | |||
| 2298 | Instruction::BinaryOps AddOp; | |||
| 2299 | Instruction::BinaryOps MulOp; | |||
| 2300 | if (Step->getType()->isIntegerTy()) { | |||
| 2301 | AddOp = Instruction::Add; | |||
| 2302 | MulOp = Instruction::Mul; | |||
| 2303 | } else { | |||
| 2304 | AddOp = II.getInductionOpcode(); | |||
| 2305 | MulOp = Instruction::FMul; | |||
| 2306 | } | |||
| 2307 | ||||
| 2308 | // Multiply the vectorization factor by the step using integer or | |||
| 2309 | // floating-point arithmetic as appropriate. | |||
| 2310 | Type *StepType = Step->getType(); | |||
| 2311 | if (Step->getType()->isFloatingPointTy()) | |||
| 2312 | StepType = IntegerType::get(StepType->getContext(), | |||
| 2313 | StepType->getScalarSizeInBits()); | |||
| 2314 | Value *RuntimeVF = getRuntimeVF(Builder, StepType, VF); | |||
| 2315 | if (Step->getType()->isFloatingPointTy()) | |||
| 2316 | RuntimeVF = Builder.CreateSIToFP(RuntimeVF, Step->getType()); | |||
| 2317 | Value *Mul = Builder.CreateBinOp(MulOp, Step, RuntimeVF); | |||
| 2318 | ||||
| 2319 | // Create a vector splat to use in the induction update. | |||
| 2320 | // | |||
| 2321 | // FIXME: If the step is non-constant, we create the vector splat with | |||
| 2322 | // IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't | |||
| 2323 | // handle a constant vector splat. | |||
| 2324 | Value *SplatVF = isa<Constant>(Mul) | |||
| 2325 | ? ConstantVector::getSplat(VF, cast<Constant>(Mul)) | |||
| 2326 | : Builder.CreateVectorSplat(VF, Mul); | |||
| 2327 | Builder.restoreIP(CurrIP); | |||
| 2328 | ||||
| 2329 | // We may need to add the step a number of times, depending on the unroll | |||
| 2330 | // factor. The last of those goes into the PHI. | |||
| 2331 | PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind", | |||
| 2332 | &*LoopVectorBody->getFirstInsertionPt()); | |||
| 2333 | VecInd->setDebugLoc(EntryVal->getDebugLoc()); | |||
| 2334 | Instruction *LastInduction = VecInd; | |||
| 2335 | for (unsigned Part = 0; Part < UF; ++Part) { | |||
| 2336 | State.set(Def, LastInduction, Part); | |||
| 2337 | ||||
| 2338 | if (isa<TruncInst>(EntryVal)) | |||
| 2339 | addMetadata(LastInduction, EntryVal); | |||
| 2340 | recordVectorLoopValueForInductionCast(II, EntryVal, LastInduction, CastDef, | |||
| 2341 | State, Part); | |||
| 2342 | ||||
| 2343 | LastInduction = cast<Instruction>( | |||
| 2344 | Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add")); | |||
| 2345 | LastInduction->setDebugLoc(EntryVal->getDebugLoc()); | |||
| 2346 | } | |||
| 2347 | ||||
| 2348 | // Move the last step to the end of the latch block. This ensures consistent | |||
| 2349 | // placement of all induction updates. | |||
| 2350 | auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); | |||
| 2351 | auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator()); | |||
| 2352 | auto *ICmp = cast<Instruction>(Br->getCondition()); | |||
| 2353 | LastInduction->moveBefore(ICmp); | |||
| 2354 | LastInduction->setName("vec.ind.next"); | |||
| 2355 | ||||
| 2356 | VecInd->addIncoming(SteppedStart, LoopVectorPreHeader); | |||
| 2357 | VecInd->addIncoming(LastInduction, LoopVectorLatch); | |||
| 2358 | } | |||
| 2359 | ||||
| 2360 | bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const { | |||
| 2361 | return Cost->isScalarAfterVectorization(I, VF) || | |||
| 2362 | Cost->isProfitableToScalarize(I, VF); | |||
| 2363 | } | |||
| 2364 | ||||
| 2365 | bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const { | |||
| 2366 | if (shouldScalarizeInstruction(IV)) | |||
| 2367 | return true; | |||
| 2368 | auto isScalarInst = [&](User *U) -> bool { | |||
| 2369 | auto *I = cast<Instruction>(U); | |||
| 2370 | return (OrigLoop->contains(I) && shouldScalarizeInstruction(I)); | |||
| 2371 | }; | |||
| 2372 | return llvm::any_of(IV->users(), isScalarInst); | |||
| 2373 | } | |||
| 2374 | ||||
| 2375 | void InnerLoopVectorizer::recordVectorLoopValueForInductionCast( | |||
| 2376 | const InductionDescriptor &ID, const Instruction *EntryVal, | |||
| 2377 | Value *VectorLoopVal, VPValue *CastDef, VPTransformState &State, | |||
| 2378 | unsigned Part, unsigned Lane) { | |||
| 2379 | assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&((void)0) | |||
| 2380 | "Expected either an induction phi-node or a truncate of it!")((void)0); | |||
| 2381 | ||||
| 2382 | // This induction variable is not the phi from the original loop but the | |||
| 2383 | // newly-created IV based on the proof that casted Phi is equal to the | |||
| 2384 | // uncasted Phi in the vectorized loop (under a runtime guard possibly). It | |||
| 2385 | // re-uses the same InductionDescriptor that original IV uses but we don't | |||
| 2386 | // have to do any recording in this case - that is done when original IV is | |||
| 2387 | // processed. | |||
| 2388 | if (isa<TruncInst>(EntryVal)) | |||
| 2389 | return; | |||
| 2390 | ||||
| 2391 | const SmallVectorImpl<Instruction *> &Casts = ID.getCastInsts(); | |||
| 2392 | if (Casts.empty()) | |||
| 2393 | return; | |||
| 2394 | // Only the first Cast instruction in the Casts vector is of interest. | |||
| 2395 | // The rest of the Casts (if exist) have no uses outside the | |||
| 2396 | // induction update chain itself. | |||
| 2397 | if (Lane < UINT_MAX(2147483647 *2U +1U)) | |||
| 2398 | State.set(CastDef, VectorLoopVal, VPIteration(Part, Lane)); | |||
| 2399 | else | |||
| 2400 | State.set(CastDef, VectorLoopVal, Part); | |||
| 2401 | } | |||
| 2402 | ||||
| 2403 | void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV, Value *Start, | |||
| 2404 | TruncInst *Trunc, VPValue *Def, | |||
| 2405 | VPValue *CastDef, | |||
| 2406 | VPTransformState &State) { | |||
| 2407 | assert((IV->getType()->isIntegerTy() || IV != OldInduction) &&((void)0) | |||
| 2408 | "Primary induction variable must have an integer type")((void)0); | |||
| 2409 | ||||
| 2410 | auto II = Legal->getInductionVars().find(IV); | |||
| 2411 | assert(II != Legal->getInductionVars().end() && "IV is not an induction")((void)0); | |||
| 2412 | ||||
| 2413 | auto ID = II->second; | |||
| 2414 | assert(IV->getType() == ID.getStartValue()->getType() && "Types must match")((void)0); | |||
| 2415 | ||||
| 2416 | // The value from the original loop to which we are mapping the new induction | |||
| 2417 | // variable. | |||
| 2418 | Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV; | |||
| 2419 | ||||
| 2420 | auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); | |||
| 2421 | ||||
| 2422 | // Generate code for the induction step. Note that induction steps are | |||
| 2423 | // required to be loop-invariant | |||
| 2424 | auto CreateStepValue = [&](const SCEV *Step) -> Value * { | |||
| 2425 | assert(PSE.getSE()->isLoopInvariant(Step, OrigLoop) &&((void)0) | |||
| 2426 | "Induction step should be loop invariant")((void)0); | |||
| 2427 | if (PSE.getSE()->isSCEVable(IV->getType())) { | |||
| 2428 | SCEVExpander Exp(*PSE.getSE(), DL, "induction"); | |||
| 2429 | return Exp.expandCodeFor(Step, Step->getType(), | |||
| 2430 | LoopVectorPreHeader->getTerminator()); | |||
| 2431 | } | |||
| 2432 | return cast<SCEVUnknown>(Step)->getValue(); | |||
| 2433 | }; | |||
| 2434 | ||||
| 2435 | // The scalar value to broadcast. This is derived from the canonical | |||
| 2436 | // induction variable. If a truncation type is given, truncate the canonical | |||
| 2437 | // induction variable and step. Otherwise, derive these values from the | |||
| 2438 | // induction descriptor. | |||
| 2439 | auto CreateScalarIV = [&](Value *&Step) -> Value * { | |||
| 2440 | Value *ScalarIV = Induction; | |||
| 2441 | if (IV != OldInduction) { | |||
| 2442 | ScalarIV = IV->getType()->isIntegerTy() | |||
| 2443 | ? Builder.CreateSExtOrTrunc(Induction, IV->getType()) | |||
| 2444 | : Builder.CreateCast(Instruction::SIToFP, Induction, | |||
| 2445 | IV->getType()); | |||
| 2446 | ScalarIV = emitTransformedIndex(Builder, ScalarIV, PSE.getSE(), DL, ID); | |||
| 2447 | ScalarIV->setName("offset.idx"); | |||
| 2448 | } | |||
| 2449 | if (Trunc) { | |||
| 2450 | auto *TruncType = cast<IntegerType>(Trunc->getType()); | |||
| 2451 | assert(Step->getType()->isIntegerTy() &&((void)0) | |||
| 2452 | "Truncation requires an integer step")((void)0); | |||
| 2453 | ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType); | |||
| 2454 | Step = Builder.CreateTrunc(Step, TruncType); | |||
| 2455 | } | |||
| 2456 | return ScalarIV; | |||
| 2457 | }; | |||
| 2458 | ||||
| 2459 | // Create the vector values from the scalar IV, in the absence of creating a | |||
| 2460 | // vector IV. | |||
| 2461 | auto CreateSplatIV = [&](Value *ScalarIV, Value *Step) { | |||
| 2462 | Value *Broadcasted = getBroadcastInstrs(ScalarIV); | |||
| 2463 | for (unsigned Part = 0; Part < UF; ++Part) { | |||
| 2464 | assert(!VF.isScalable() && "scalable vectors not yet supported.")((void)0); | |||
| 2465 | Value *EntryPart = | |||
| 2466 | getStepVector(Broadcasted, VF.getKnownMinValue() * Part, Step, | |||
| 2467 | ID.getInductionOpcode()); | |||
| 2468 | State.set(Def, EntryPart, Part); | |||
| 2469 | if (Trunc) | |||
| 2470 | addMetadata(EntryPart, Trunc); | |||
| 2471 | recordVectorLoopValueForInductionCast(ID, EntryVal, EntryPart, CastDef, | |||
| 2472 | State, Part); | |||
| 2473 | } | |||
| 2474 | }; | |||
| 2475 | ||||
| 2476 | // Fast-math-flags propagate from the original induction instruction. | |||
| 2477 | IRBuilder<>::FastMathFlagGuard FMFG(Builder); | |||
| 2478 | if (ID.getInductionBinOp() && isa<FPMathOperator>(ID.getInductionBinOp())) | |||
| 2479 | Builder.setFastMathFlags(ID.getInductionBinOp()->getFastMathFlags()); | |||
| 2480 | ||||
| 2481 | // Now do the actual transformations, and start with creating the step value. | |||
| 2482 | Value *Step = CreateStepValue(ID.getStep()); | |||
| 2483 | if (VF.isZero() || VF.isScalar()) { | |||
| 2484 | Value *ScalarIV = CreateScalarIV(Step); | |||
| 2485 | CreateSplatIV(ScalarIV, Step); | |||
| 2486 | return; | |||
| 2487 | } | |||
| 2488 | ||||
| 2489 | // Determine if we want a scalar version of the induction variable. This is | |||
| 2490 | // true if the induction variable itself is not widened, or if it has at | |||
| 2491 | // least one user in the loop that is not widened. | |||
| 2492 | auto NeedsScalarIV = needsScalarInduction(EntryVal); | |||
| 2493 | if (!NeedsScalarIV) { | |||
| 2494 | createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, CastDef, | |||
| 2495 | State); | |||
| 2496 | return; | |||
| 2497 | } | |||
| 2498 | ||||
| 2499 | // Try to create a new independent vector induction variable. If we can't | |||
| 2500 | // create the phi node, we will splat the scalar induction variable in each | |||
| 2501 | // loop iteration. | |||
| 2502 | if (!shouldScalarizeInstruction(EntryVal)) { | |||
| 2503 | createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, CastDef, | |||
| 2504 | State); | |||
| 2505 | Value *ScalarIV = CreateScalarIV(Step); | |||
| 2506 | // Create scalar steps that can be used by instructions we will later | |||
| 2507 | // scalarize. Note that the addition of the scalar steps will not increase | |||
| 2508 | // the number of instructions in the loop in the common case prior to | |||
| 2509 | // InstCombine. We will be trading one vector extract for each scalar step. | |||
| 2510 | buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, CastDef, State); | |||
| 2511 | return; | |||
| 2512 | } | |||
| 2513 | ||||
| 2514 | // All IV users are scalar instructions, so only emit a scalar IV, not a | |||
| 2515 | // vectorised IV. Except when we tail-fold, then the splat IV feeds the | |||
| 2516 | // predicate used by the masked loads/stores. | |||
| 2517 | Value *ScalarIV = CreateScalarIV(Step); | |||
| 2518 | if (!Cost->isScalarEpilogueAllowed()) | |||
| 2519 | CreateSplatIV(ScalarIV, Step); | |||
| 2520 | buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, CastDef, State); | |||
| 2521 | } | |||
| 2522 | ||||
| 2523 | Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step, | |||
| 2524 | Instruction::BinaryOps BinOp) { | |||
| 2525 | // Create and check the types. | |||
| 2526 | auto *ValVTy = cast<VectorType>(Val->getType()); | |||
| 2527 | ElementCount VLen = ValVTy->getElementCount(); | |||
| 2528 | ||||
| 2529 | Type *STy = Val->getType()->getScalarType(); | |||
| 2530 | assert((STy->isIntegerTy() || STy->isFloatingPointTy()) &&((void)0) | |||
| 2531 | "Induction Step must be an integer or FP")((void)0); | |||
| 2532 | assert(Step->getType() == STy && "Step has wrong type")((void)0); | |||
| 2533 | ||||
| 2534 | SmallVector<Constant *, 8> Indices; | |||
| 2535 | ||||
| 2536 | // Create a vector of consecutive numbers from zero to VF. | |||
| 2537 | VectorType *InitVecValVTy = ValVTy; | |||
| 2538 | Type *InitVecValSTy = STy; | |||
| 2539 | if (STy->isFloatingPointTy()) { | |||
| 2540 | InitVecValSTy = | |||
| 2541 | IntegerType::get(STy->getContext(), STy->getScalarSizeInBits()); | |||
| 2542 | InitVecValVTy = VectorType::get(InitVecValSTy, VLen); | |||
| 2543 | } | |||
| 2544 | Value *InitVec = Builder.CreateStepVector(InitVecValVTy); | |||
| 2545 | ||||
| 2546 | // Add on StartIdx | |||
| 2547 | Value *StartIdxSplat = Builder.CreateVectorSplat( | |||
| 2548 | VLen, ConstantInt::get(InitVecValSTy, StartIdx)); | |||
| 2549 | InitVec = Builder.CreateAdd(InitVec, StartIdxSplat); | |||
| 2550 | ||||
| 2551 | if (STy->isIntegerTy()) { | |||
| 2552 | Step = Builder.CreateVectorSplat(VLen, Step); | |||
| 2553 | assert(Step->getType() == Val->getType() && "Invalid step vec")((void)0); | |||
| 2554 | // FIXME: The newly created binary instructions should contain nsw/nuw flags, | |||
| 2555 | // which can be found from the original scalar operations. | |||
| 2556 | Step = Builder.CreateMul(InitVec, Step); | |||
| 2557 | return Builder.CreateAdd(Val, Step, "induction"); | |||
| 2558 | } | |||
| 2559 | ||||
| 2560 | // Floating point induction. | |||
| 2561 | assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) &&((void)0) | |||
| 2562 | "Binary Opcode should be specified for FP induction")((void)0); | |||
| 2563 | InitVec = Builder.CreateUIToFP(InitVec, ValVTy); | |||
| 2564 | Step = Builder.CreateVectorSplat(VLen, Step); | |||
| 2565 | Value *MulOp = Builder.CreateFMul(InitVec, Step); | |||
| 2566 | return Builder.CreateBinOp(BinOp, Val, MulOp, "induction"); | |||
| 2567 | } | |||
| 2568 | ||||
| 2569 | void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step, | |||
| 2570 | Instruction *EntryVal, | |||
| 2571 | const InductionDescriptor &ID, | |||
| 2572 | VPValue *Def, VPValue *CastDef, | |||
| 2573 | VPTransformState &State) { | |||
| 2574 | // We shouldn't have to build scalar steps if we aren't vectorizing. | |||
| 2575 | assert(VF.isVector() && "VF should be greater than one")((void)0); | |||
| 2576 | // Get the value type and ensure it and the step have the same integer type. | |||
| 2577 | Type *ScalarIVTy = ScalarIV->getType()->getScalarType(); | |||
| 2578 | assert(ScalarIVTy == Step->getType() &&((void)0) | |||
| 2579 | "Val and Step should have the same type")((void)0); | |||
| 2580 | ||||
| 2581 | // We build scalar steps for both integer and floating-point induction | |||
| 2582 | // variables. Here, we determine the kind of arithmetic we will perform. | |||
| 2583 | Instruction::BinaryOps AddOp; | |||
| 2584 | Instruction::BinaryOps MulOp; | |||
| 2585 | if (ScalarIVTy->isIntegerTy()) { | |||
| 2586 | AddOp = Instruction::Add; | |||
| 2587 | MulOp = Instruction::Mul; | |||
| 2588 | } else { | |||
| 2589 | AddOp = ID.getInductionOpcode(); | |||
| 2590 | MulOp = Instruction::FMul; | |||
| 2591 | } | |||
| 2592 | ||||
| 2593 | // Determine the number of scalars we need to generate for each unroll | |||
| 2594 | // iteration. If EntryVal is uniform, we only need to generate the first | |||
| 2595 | // lane. Otherwise, we generate all VF values. | |||
| 2596 | bool IsUniform = | |||
| 2597 | Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), VF); | |||
| 2598 | unsigned Lanes = IsUniform ? 1 : VF.getKnownMinValue(); | |||
| 2599 | // Compute the scalar steps and save the results in State. | |||
| 2600 | Type *IntStepTy = IntegerType::get(ScalarIVTy->getContext(), | |||
| 2601 | ScalarIVTy->getScalarSizeInBits()); | |||
| 2602 | Type *VecIVTy = nullptr; | |||
| 2603 | Value *UnitStepVec = nullptr, *SplatStep = nullptr, *SplatIV = nullptr; | |||
| 2604 | if (!IsUniform && VF.isScalable()) { | |||
| 2605 | VecIVTy = VectorType::get(ScalarIVTy, VF); | |||
| 2606 | UnitStepVec = Builder.CreateStepVector(VectorType::get(IntStepTy, VF)); | |||
| 2607 | SplatStep = Builder.CreateVectorSplat(VF, Step); | |||
| 2608 | SplatIV = Builder.CreateVectorSplat(VF, ScalarIV); | |||
| 2609 | } | |||
| 2610 | ||||
| 2611 | for (unsigned Part = 0; Part < UF; ++Part) { | |||
| 2612 | Value *StartIdx0 = | |||
| 2613 | createStepForVF(Builder, ConstantInt::get(IntStepTy, Part), VF); | |||
| 2614 | ||||
| 2615 | if (!IsUniform && VF.isScalable()) { | |||
| 2616 | auto *SplatStartIdx = Builder.CreateVectorSplat(VF, StartIdx0); | |||
| 2617 | auto *InitVec = Builder.CreateAdd(SplatStartIdx, UnitStepVec); | |||
| 2618 | if (ScalarIVTy->isFloatingPointTy()) | |||
| 2619 | InitVec = Builder.CreateSIToFP(InitVec, VecIVTy); | |||
| 2620 | auto *Mul = Builder.CreateBinOp(MulOp, InitVec, SplatStep); | |||
| 2621 | auto *Add = Builder.CreateBinOp(AddOp, SplatIV, Mul); | |||
| 2622 | State.set(Def, Add, Part); | |||
| 2623 | recordVectorLoopValueForInductionCast(ID, EntryVal, Add, CastDef, State, | |||
| 2624 | Part); | |||
| 2625 | // It's useful to record the lane values too for the known minimum number | |||
| 2626 | // of elements so we do those below. This improves the code quality when | |||
| 2627 | // trying to extract the first element, for example. | |||
| 2628 | } | |||
| 2629 | ||||
| 2630 | if (ScalarIVTy->isFloatingPointTy()) | |||
| 2631 | StartIdx0 = Builder.CreateSIToFP(StartIdx0, ScalarIVTy); | |||
| 2632 | ||||
| 2633 | for (unsigned Lane = 0; Lane < Lanes; ++Lane) { | |||
| 2634 | Value *StartIdx = Builder.CreateBinOp( | |||
| 2635 | AddOp, StartIdx0, getSignedIntOrFpConstant(ScalarIVTy, Lane)); | |||
| 2636 | // The step returned by `createStepForVF` is a runtime-evaluated value | |||
| 2637 | // when VF is scalable. Otherwise, it should be folded into a Constant. | |||
| 2638 | assert((VF.isScalable() || isa<Constant>(StartIdx)) &&((void)0) | |||
| 2639 | "Expected StartIdx to be folded to a constant when VF is not "((void)0) | |||
| 2640 | "scalable")((void)0); | |||
| 2641 | auto *Mul = Builder.CreateBinOp(MulOp, StartIdx, Step); | |||
| 2642 | auto *Add = Builder.CreateBinOp(AddOp, ScalarIV, Mul); | |||
| 2643 | State.set(Def, Add, VPIteration(Part, Lane)); | |||
| 2644 | recordVectorLoopValueForInductionCast(ID, EntryVal, Add, CastDef, State, | |||
| 2645 | Part, Lane); | |||
| 2646 | } | |||
| 2647 | } | |||
| 2648 | } | |||
| 2649 | ||||
| 2650 | void InnerLoopVectorizer::packScalarIntoVectorValue(VPValue *Def, | |||
| 2651 | const VPIteration &Instance, | |||
| 2652 | VPTransformState &State) { | |||
| 2653 | Value *ScalarInst = State.get(Def, Instance); | |||
| 2654 | Value *VectorValue = State.get(Def, Instance.Part); | |||
| 2655 | VectorValue = Builder.CreateInsertElement( | |||
| 2656 | VectorValue, ScalarInst, | |||
| 2657 | Instance.Lane.getAsRuntimeExpr(State.Builder, VF)); | |||
| 2658 | State.set(Def, VectorValue, Instance.Part); | |||
| 2659 | } | |||
| 2660 | ||||
| 2661 | Value *InnerLoopVectorizer::reverseVector(Value *Vec) { | |||
| 2662 | assert(Vec->getType()->isVectorTy() && "Invalid type")((void)0); | |||
| 2663 | return Builder.CreateVectorReverse(Vec, "reverse"); | |||
| 2664 | } | |||
| 2665 | ||||
| 2666 | // Return whether we allow using masked interleave-groups (for dealing with | |||
| 2667 | // strided loads/stores that reside in predicated blocks, or for dealing | |||
| 2668 | // with gaps). | |||
| 2669 | static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) { | |||
| 2670 | // If an override option has been passed in for interleaved accesses, use it. | |||
| 2671 | if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0) | |||
| 2672 | return EnableMaskedInterleavedMemAccesses; | |||
| 2673 | ||||
| 2674 | return TTI.enableMaskedInterleavedAccessVectorization(); | |||
| 2675 | } | |||
| 2676 | ||||
| 2677 | // Try to vectorize the interleave group that \p Instr belongs to. | |||
| 2678 | // | |||
| 2679 | // E.g. Translate following interleaved load group (factor = 3): | |||
| 2680 | // for (i = 0; i < N; i+=3) { | |||
| 2681 | // R = Pic[i]; // Member of index 0 | |||
| 2682 | // G = Pic[i+1]; // Member of index 1 | |||
| 2683 | // B = Pic[i+2]; // Member of index 2 | |||
| 2684 | // ... // do something to R, G, B | |||
| 2685 | // } | |||
| 2686 | // To: | |||
| 2687 | // %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B | |||
| 2688 | // %R.vec = shuffle %wide.vec, poison, <0, 3, 6, 9> ; R elements | |||
| 2689 | // %G.vec = shuffle %wide.vec, poison, <1, 4, 7, 10> ; G elements | |||
| 2690 | // %B.vec = shuffle %wide.vec, poison, <2, 5, 8, 11> ; B elements | |||
| 2691 | // | |||
| 2692 | // Or translate following interleaved store group (factor = 3): | |||
| 2693 | // for (i = 0; i < N; i+=3) { | |||
| 2694 | // ... do something to R, G, B | |||
| 2695 | // Pic[i] = R; // Member of index 0 | |||
| 2696 | // Pic[i+1] = G; // Member of index 1 | |||
| 2697 | // Pic[i+2] = B; // Member of index 2 | |||
| 2698 | // } | |||
| 2699 | // To: | |||
| 2700 | // %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7> | |||
| 2701 | // %B_U.vec = shuffle %B.vec, poison, <0, 1, 2, 3, u, u, u, u> | |||
| 2702 | // %interleaved.vec = shuffle %R_G.vec, %B_U.vec, | |||
| 2703 | // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements | |||
| 2704 | // store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B | |||
| 2705 | void InnerLoopVectorizer::vectorizeInterleaveGroup( | |||
| 2706 | const InterleaveGroup<Instruction> *Group, ArrayRef<VPValue *> VPDefs, | |||
| 2707 | VPTransformState &State, VPValue *Addr, ArrayRef<VPValue *> StoredValues, | |||
| 2708 | VPValue *BlockInMask) { | |||
| 2709 | Instruction *Instr = Group->getInsertPos(); | |||
| 2710 | const DataLayout &DL = Instr->getModule()->getDataLayout(); | |||
| 2711 | ||||
| 2712 | // Prepare for the vector type of the interleaved load/store. | |||
| 2713 | Type *ScalarTy = getLoadStoreType(Instr); | |||
| 2714 | unsigned InterleaveFactor = Group->getFactor(); | |||
| 2715 | assert(!VF.isScalable() && "scalable vectors not yet supported.")((void)0); | |||
| 2716 | auto *VecTy = VectorType::get(ScalarTy, VF * InterleaveFactor); | |||
| 2717 | ||||
| 2718 | // Prepare for the new pointers. | |||
| 2719 | SmallVector<Value *, 2> AddrParts; | |||
| 2720 | unsigned Index = Group->getIndex(Instr); | |||
| 2721 | ||||
| 2722 | // TODO: extend the masked interleaved-group support to reversed access. | |||
| 2723 | assert((!BlockInMask || !Group->isReverse()) &&((void)0) | |||
| 2724 | "Reversed masked interleave-group not supported.")((void)0); | |||
| 2725 | ||||
| 2726 | // If the group is reverse, adjust the index to refer to the last vector lane | |||
| 2727 | // instead of the first. We adjust the index from the first vector lane, | |||
| 2728 | // rather than directly getting the pointer for lane VF - 1, because the | |||
| 2729 | // pointer operand of the interleaved access is supposed to be uniform. For | |||
| 2730 | // uniform instructions, we're only required to generate a value for the | |||
| 2731 | // first vector lane in each unroll iteration. | |||
| 2732 | if (Group->isReverse()) | |||
| 2733 | Index += (VF.getKnownMinValue() - 1) * Group->getFactor(); | |||
| 2734 | ||||
| 2735 | for (unsigned Part = 0; Part < UF; Part++) { | |||
| 2736 | Value *AddrPart = State.get(Addr, VPIteration(Part, 0)); | |||
| 2737 | setDebugLocFromInst(AddrPart); | |||
| 2738 | ||||
| 2739 | // Notice current instruction could be any index. Need to adjust the address | |||
| 2740 | // to the member of index 0. | |||
| 2741 | // | |||
| 2742 | // E.g. a = A[i+1]; // Member of index 1 (Current instruction) | |||
| 2743 | // b = A[i]; // Member of index 0 | |||
| 2744 | // Current pointer is pointed to A[i+1], adjust it to A[i]. | |||
| 2745 | // | |||
| 2746 | // E.g. A[i+1] = a; // Member of index 1 | |||
| 2747 | // A[i] = b; // Member of index 0 | |||
| 2748 | // A[i+2] = c; // Member of index 2 (Current instruction) | |||
| 2749 | // Current pointer is pointed to A[i+2], adjust it to A[i]. | |||
| 2750 | ||||
| 2751 | bool InBounds = false; | |||
| 2752 | if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts())) | |||
| 2753 | InBounds = gep->isInBounds(); | |||
| 2754 | AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index)); | |||
| 2755 | cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds); | |||
| 2756 | ||||
| 2757 | // Cast to the vector pointer type. | |||
| 2758 | unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace(); | |||
| 2759 | Type *PtrTy = VecTy->getPointerTo(AddressSpace); | |||
| 2760 | AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy)); | |||
| 2761 | } | |||
| 2762 | ||||
| 2763 | setDebugLocFromInst(Instr); | |||
| 2764 | Value *PoisonVec = PoisonValue::get(VecTy); | |||
| 2765 | ||||
| 2766 | Value *MaskForGaps = nullptr; | |||
| 2767 | if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) { | |||
| 2768 | MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group); | |||
| 2769 | assert(MaskForGaps && "Mask for Gaps is required but it is null")((void)0); | |||
| 2770 | } | |||
| 2771 | ||||
| 2772 | // Vectorize the interleaved load group. | |||
| 2773 | if (isa<LoadInst>(Instr)) { | |||
| 2774 | // For each unroll part, create a wide load for the group. | |||
| 2775 | SmallVector<Value *, 2> NewLoads; | |||
| 2776 | for (unsigned Part = 0; Part < UF; Part++) { | |||
| 2777 | Instruction *NewLoad; | |||
| 2778 | if (BlockInMask || MaskForGaps) { | |||
| 2779 | assert(useMaskedInterleavedAccesses(*TTI) &&((void)0) | |||
| 2780 | "masked interleaved groups are not allowed.")((void)0); | |||
| 2781 | Value *GroupMask = MaskForGaps; | |||
| 2782 | if (BlockInMask) { | |||
| 2783 | Value *BlockInMaskPart = State.get(BlockInMask, Part); | |||
| 2784 | Value *ShuffledMask = Builder.CreateShuffleVector( | |||
| 2785 | BlockInMaskPart, | |||
| 2786 | createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()), | |||
| 2787 | "interleaved.mask"); | |||
| 2788 | GroupMask = MaskForGaps | |||
| 2789 | ? Builder.CreateBinOp(Instruction::And, ShuffledMask, | |||
| 2790 | MaskForGaps) | |||
| 2791 | : ShuffledMask; | |||
| 2792 | } | |||
| 2793 | NewLoad = | |||
| 2794 | Builder.CreateMaskedLoad(VecTy, AddrParts[Part], Group->getAlign(), | |||
| 2795 | GroupMask, PoisonVec, "wide.masked.vec"); | |||
| 2796 | } | |||
| 2797 | else | |||
| 2798 | NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part], | |||
| 2799 | Group->getAlign(), "wide.vec"); | |||
| 2800 | Group->addMetadata(NewLoad); | |||
| 2801 | NewLoads.push_back(NewLoad); | |||
| 2802 | } | |||
| 2803 | ||||
| 2804 | // For each member in the group, shuffle out the appropriate data from the | |||
| 2805 | // wide loads. | |||
| 2806 | unsigned J = 0; | |||
| 2807 | for (unsigned I = 0; I < InterleaveFactor; ++I) { | |||
| 2808 | Instruction *Member = Group->getMember(I); | |||
| 2809 | ||||
| 2810 | // Skip the gaps in the group. | |||
| 2811 | if (!Member) | |||
| 2812 | continue; | |||
| 2813 | ||||
| 2814 | auto StrideMask = | |||
| 2815 | createStrideMask(I, InterleaveFactor, VF.getKnownMinValue()); | |||
| 2816 | for (unsigned Part = 0; Part < UF; Part++) { | |||
| 2817 | Value *StridedVec = Builder.CreateShuffleVector( | |||
| 2818 | NewLoads[Part], StrideMask, "strided.vec"); | |||
| 2819 | ||||
| 2820 | // If this member has different type, cast the result type. | |||
| 2821 | if (Member->getType() != ScalarTy) { | |||
| 2822 | assert(!VF.isScalable() && "VF is assumed to be non scalable.")((void)0); | |||
| 2823 | VectorType *OtherVTy = VectorType::get(Member->getType(), VF); | |||
| 2824 | StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL); | |||
| 2825 | } | |||
| 2826 | ||||
| 2827 | if (Group->isReverse()) | |||
| 2828 | StridedVec = reverseVector(StridedVec); | |||
| 2829 | ||||
| 2830 | State.set(VPDefs[J], StridedVec, Part); | |||
| 2831 | } | |||
| 2832 | ++J; | |||
| 2833 | } | |||
| 2834 | return; | |||
| 2835 | } | |||
| 2836 | ||||
| 2837 | // The sub vector type for current instruction. | |||
| 2838 | auto *SubVT = VectorType::get(ScalarTy, VF); | |||
| 2839 | ||||
| 2840 | // Vectorize the interleaved store group. | |||
| 2841 | for (unsigned Part = 0; Part < UF; Part++) { | |||
| 2842 | // Collect the stored vector from each member. | |||
| 2843 | SmallVector<Value *, 4> StoredVecs; | |||
| 2844 | for (unsigned i = 0; i < InterleaveFactor; i++) { | |||
| 2845 | // Interleaved store group doesn't allow a gap, so each index has a member | |||
| 2846 | assert(Group->getMember(i) && "Fail to get a member from an interleaved store group")((void)0); | |||
| 2847 | ||||
| 2848 | Value *StoredVec = State.get(StoredValues[i], Part); | |||
| 2849 | ||||
| 2850 | if (Group->isReverse()) | |||
| 2851 | StoredVec = reverseVector(StoredVec); | |||
| 2852 | ||||
| 2853 | // If this member has different type, cast it to a unified type. | |||
| 2854 | ||||
| 2855 | if (StoredVec->getType() != SubVT) | |||
| 2856 | StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL); | |||
| 2857 | ||||
| 2858 | StoredVecs.push_back(StoredVec); | |||
| 2859 | } | |||
| 2860 | ||||
| 2861 | // Concatenate all vectors into a wide vector. | |||
| 2862 | Value *WideVec = concatenateVectors(Builder, StoredVecs); | |||
| 2863 | ||||
| 2864 | // Interleave the elements in the wide vector. | |||
| 2865 | Value *IVec = Builder.CreateShuffleVector( | |||
| 2866 | WideVec, createInterleaveMask(VF.getKnownMinValue(), InterleaveFactor), | |||
| 2867 | "interleaved.vec"); | |||
| 2868 | ||||
| 2869 | Instruction *NewStoreInstr; | |||
| 2870 | if (BlockInMask) { | |||
| 2871 | Value *BlockInMaskPart = State.get(BlockInMask, Part); | |||
| 2872 | Value *ShuffledMask = Builder.CreateShuffleVector( | |||
| 2873 | BlockInMaskPart, | |||
| 2874 | createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()), | |||
| 2875 | "interleaved.mask"); | |||
| 2876 | NewStoreInstr = Builder.CreateMaskedStore( | |||
| 2877 | IVec, AddrParts[Part], Group->getAlign(), ShuffledMask); | |||
| 2878 | } | |||
| 2879 | else | |||
| 2880 | NewStoreInstr = | |||
| 2881 | Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign()); | |||
| 2882 | ||||
| 2883 | Group->addMetadata(NewStoreInstr); | |||
| 2884 | } | |||
| 2885 | } | |||
| 2886 | ||||
| 2887 | void InnerLoopVectorizer::vectorizeMemoryInstruction( | |||
| 2888 | Instruction *Instr, VPTransformState &State, VPValue *Def, VPValue *Addr, | |||
| 2889 | VPValue *StoredValue, VPValue *BlockInMask) { | |||
| 2890 | // Attempt to issue a wide load. | |||
| 2891 | LoadInst *LI = dyn_cast<LoadInst>(Instr); | |||
| 2892 | StoreInst *SI = dyn_cast<StoreInst>(Instr); | |||
| 2893 | ||||
| 2894 | assert((LI || SI) && "Invalid Load/Store instruction")((void)0); | |||
| 2895 | assert((!SI || StoredValue) && "No stored value provided for widened store")((void)0); | |||
| 2896 | assert((!LI || !StoredValue) && "Stored value provided for widened load")((void)0); | |||
| 2897 | ||||
| 2898 | LoopVectorizationCostModel::InstWidening Decision = | |||
| 2899 | Cost->getWideningDecision(Instr, VF); | |||
| 2900 | assert((Decision == LoopVectorizationCostModel::CM_Widen ||((void)0) | |||
| 2901 | Decision == LoopVectorizationCostModel::CM_Widen_Reverse ||((void)0) | |||
| 2902 | Decision == LoopVectorizationCostModel::CM_GatherScatter) &&((void)0) | |||
| 2903 | "CM decision is not to widen the memory instruction")((void)0); | |||
| 2904 | ||||
| 2905 | Type *ScalarDataTy = getLoadStoreType(Instr); | |||
| 2906 | ||||
| 2907 | auto *DataTy = VectorType::get(ScalarDataTy, VF); | |||
| 2908 | const Align Alignment = getLoadStoreAlignment(Instr); | |||
| 2909 | ||||
| 2910 | // Determine if the pointer operand of the access is either consecutive or | |||
| 2911 | // reverse consecutive. | |||
| 2912 | bool Reverse = (Decision == LoopVectorizationCostModel::CM_Widen_Reverse); | |||
| 2913 | bool ConsecutiveStride = | |||
| 2914 | Reverse || (Decision == LoopVectorizationCostModel::CM_Widen); | |||
| 2915 | bool CreateGatherScatter = | |||
| 2916 | (Decision == LoopVectorizationCostModel::CM_GatherScatter); | |||
| 2917 | ||||
| 2918 | // Either Ptr feeds a vector load/store, or a vector GEP should feed a vector | |||
| 2919 | // gather/scatter. Otherwise Decision should have been to Scalarize. | |||
| 2920 | assert((ConsecutiveStride || CreateGatherScatter) &&((void)0) | |||
| 2921 | "The instruction should be scalarized")((void)0); | |||
| 2922 | (void)ConsecutiveStride; | |||
| 2923 | ||||
| 2924 | VectorParts BlockInMaskParts(UF); | |||
| 2925 | bool isMaskRequired = BlockInMask; | |||
| 2926 | if (isMaskRequired) | |||
| 2927 | for (unsigned Part = 0; Part < UF; ++Part) | |||
| 2928 | BlockInMaskParts[Part] = State.get(BlockInMask, Part); | |||
| 2929 | ||||
| 2930 | const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * { | |||
| 2931 | // Calculate the pointer for the specific unroll-part. | |||
| 2932 | GetElementPtrInst *PartPtr = nullptr; | |||
| 2933 | ||||
| 2934 | bool InBounds = false; | |||
| 2935 | if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts())) | |||
| 2936 | InBounds = gep->isInBounds(); | |||
| 2937 | if (Reverse) { | |||
| 2938 | // If the address is consecutive but reversed, then the | |||
| 2939 | // wide store needs to start at the last vector element. | |||
| 2940 | // RunTimeVF = VScale * VF.getKnownMinValue() | |||
| 2941 | // For fixed-width VScale is 1, then RunTimeVF = VF.getKnownMinValue() | |||
| 2942 | Value *RunTimeVF = getRuntimeVF(Builder, Builder.getInt32Ty(), VF); | |||
| 2943 | // NumElt = -Part * RunTimeVF | |||
| 2944 | Value *NumElt = Builder.CreateMul(Builder.getInt32(-Part), RunTimeVF); | |||
| 2945 | // LastLane = 1 - RunTimeVF | |||
| 2946 | Value *LastLane = Builder.CreateSub(Builder.getInt32(1), RunTimeVF); | |||
| 2947 | PartPtr = | |||
| 2948 | cast<GetElementPtrInst>(Builder.CreateGEP(ScalarDataTy, Ptr, NumElt)); | |||
| 2949 | PartPtr->setIsInBounds(InBounds); | |||
| 2950 | PartPtr = cast<GetElementPtrInst>( | |||
| 2951 | Builder.CreateGEP(ScalarDataTy, PartPtr, LastLane)); | |||
| 2952 | PartPtr->setIsInBounds(InBounds); | |||
| 2953 | if (isMaskRequired) // Reverse of a null all-one mask is a null mask. | |||
| 2954 | BlockInMaskParts[Part] = reverseVector(BlockInMaskParts[Part]); | |||
| 2955 | } else { | |||
| 2956 | Value *Increment = createStepForVF(Builder, Builder.getInt32(Part), VF); | |||
| 2957 | PartPtr = cast<GetElementPtrInst>( | |||
| 2958 | Builder.CreateGEP(ScalarDataTy, Ptr, Increment)); | |||
| 2959 | PartPtr->setIsInBounds(InBounds); | |||
| 2960 | } | |||
| 2961 | ||||
| 2962 | unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace(); | |||
| 2963 | return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); | |||
| 2964 | }; | |||
| 2965 | ||||
| 2966 | // Handle Stores: | |||
| 2967 | if (SI) { | |||
| 2968 | setDebugLocFromInst(SI); | |||
| 2969 | ||||
| 2970 | for (unsigned Part = 0; Part < UF; ++Part) { | |||
| 2971 | Instruction *NewSI = nullptr; | |||
| 2972 | Value *StoredVal = State.get(StoredValue, Part); | |||
| 2973 | if (CreateGatherScatter) { | |||
| 2974 | Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; | |||
| 2975 | Value *VectorGep = State.get(Addr, Part); | |||
| 2976 | NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment, | |||
| 2977 | MaskPart); | |||
| 2978 | } else { | |||
| 2979 | if (Reverse) { | |||
| 2980 | // If we store to reverse consecutive memory locations, then we need | |||
| 2981 | // to reverse the order of elements in the stored value. | |||
| 2982 | StoredVal = reverseVector(StoredVal); | |||
| 2983 | // We don't want to update the value in the map as it might be used in | |||
| 2984 | // another expression. So don't call resetVectorValue(StoredVal). | |||
| 2985 | } | |||
| 2986 | auto *VecPtr = CreateVecPtr(Part, State.get(Addr, VPIteration(0, 0))); | |||
| 2987 | if (isMaskRequired) | |||
| 2988 | NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment, | |||
| 2989 | BlockInMaskParts[Part]); | |||
| 2990 | else | |||
| 2991 | NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment); | |||
| 2992 | } | |||
| 2993 | addMetadata(NewSI, SI); | |||
| 2994 | } | |||
| 2995 | return; | |||
| 2996 | } | |||
| 2997 | ||||
| 2998 | // Handle loads. | |||
| 2999 | assert(LI && "Must have a load instruction")((void)0); | |||
| 3000 | setDebugLocFromInst(LI); | |||
| 3001 | for (unsigned Part = 0; Part < UF; ++Part) { | |||
| 3002 | Value *NewLI; | |||
| 3003 | if (CreateGatherScatter) { | |||
| 3004 | Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; | |||
| 3005 | Value *VectorGep = State.get(Addr, Part); | |||
| 3006 | NewLI = Builder.CreateMaskedGather(DataTy, VectorGep, Alignment, MaskPart, | |||
| 3007 | nullptr, "wide.masked.gather"); | |||
| 3008 | addMetadata(NewLI, LI); | |||
| 3009 | } else { | |||
| 3010 | auto *VecPtr = CreateVecPtr(Part, State.get(Addr, VPIteration(0, 0))); | |||
| 3011 | if (isMaskRequired) | |||
| 3012 | NewLI = Builder.CreateMaskedLoad( | |||
| 3013 | DataTy, VecPtr, Alignment, BlockInMaskParts[Part], | |||
| 3014 | PoisonValue::get(DataTy), "wide.masked.load"); | |||
| 3015 | else | |||
| 3016 | NewLI = | |||
| 3017 | Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load"); | |||
| 3018 | ||||
| 3019 | // Add metadata to the load, but setVectorValue to the reverse shuffle. | |||
| 3020 | addMetadata(NewLI, LI); | |||
| 3021 | if (Reverse) | |||
| 3022 | NewLI = reverseVector(NewLI); | |||
| 3023 | } | |||
| 3024 | ||||
| 3025 | State.set(Def, NewLI, Part); | |||
| 3026 | } | |||
| 3027 | } | |||
| 3028 | ||||
| 3029 | void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, VPValue *Def, | |||
| 3030 | VPUser &User, | |||
| 3031 | const VPIteration &Instance, | |||
| 3032 | bool IfPredicateInstr, | |||
| 3033 | VPTransformState &State) { | |||
| 3034 | assert(!Instr->getType()->isAggregateType() && "Can't handle vectors")((void)0); | |||
| 3035 | ||||
| 3036 | // llvm.experimental.noalias.scope.decl intrinsics must only be duplicated for | |||
| 3037 | // the first lane and part. | |||
| 3038 | if (isa<NoAliasScopeDeclInst>(Instr)) | |||
| 3039 | if (!Instance.isFirstIteration()) | |||
| 3040 | return; | |||
| 3041 | ||||
| 3042 | setDebugLocFromInst(Instr); | |||
| 3043 | ||||
| 3044 | // Does this instruction return a value ? | |||
| 3045 | bool IsVoidRetTy = Instr->getType()->isVoidTy(); | |||
| 3046 | ||||
| 3047 | Instruction *Cloned = Instr->clone(); | |||
| 3048 | if (!IsVoidRetTy) | |||
| 3049 | Cloned->setName(Instr->getName() + ".cloned"); | |||
| 3050 | ||||
| 3051 | State.Builder.SetInsertPoint(Builder.GetInsertBlock(), | |||
| 3052 | Builder.GetInsertPoint()); | |||
| 3053 | // Replace the operands of the cloned instructions with their scalar | |||
| 3054 | // equivalents in the new loop. | |||
| 3055 | for (unsigned op = 0, e = User.getNumOperands(); op != e; ++op) { | |||
| 3056 | auto *Operand = dyn_cast<Instruction>(Instr->getOperand(op)); | |||
| 3057 | auto InputInstance = Instance; | |||
| 3058 | if (!Operand || !OrigLoop->contains(Operand) || | |||
| 3059 | (Cost->isUniformAfterVectorization(Operand, State.VF))) | |||
| 3060 | InputInstance.Lane = VPLane::getFirstLane(); | |||
| 3061 | auto *NewOp = State.get(User.getOperand(op), InputInstance); | |||
| 3062 | Cloned->setOperand(op, NewOp); | |||
| 3063 | } | |||
| 3064 | addNewMetadata(Cloned, Instr); | |||
| 3065 | ||||
| 3066 | // Place the cloned scalar in the new loop. | |||
| 3067 | Builder.Insert(Cloned); | |||
| 3068 | ||||
| 3069 | State.set(Def, Cloned, Instance); | |||
| 3070 | ||||
| 3071 | // If we just cloned a new assumption, add it the assumption cache. | |||
| 3072 | if (auto *II = dyn_cast<AssumeInst>(Cloned)) | |||
| 3073 | AC->registerAssumption(II); | |||
| 3074 | ||||
| 3075 | // End if-block. | |||
| 3076 | if (IfPredicateInstr) | |||
| 3077 | PredicatedInstructions.push_back(Cloned); | |||
| 3078 | } | |||
| 3079 | ||||
| 3080 | PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start, | |||
| 3081 | Value *End, Value *Step, | |||
| 3082 | Instruction *DL) { | |||
| 3083 | BasicBlock *Header = L->getHeader(); | |||
| 3084 | BasicBlock *Latch = L->getLoopLatch(); | |||
| 3085 | // As we're just creating this loop, it's possible no latch exists | |||
| 3086 | // yet. If so, use the header as this will be a single block loop. | |||
| 3087 | if (!Latch) | |||
| 3088 | Latch = Header; | |||
| 3089 | ||||
| 3090 | IRBuilder<> B(&*Header->getFirstInsertionPt()); | |||
| 3091 | Instruction *OldInst = getDebugLocFromInstOrOperands(OldInduction); | |||
| 3092 | setDebugLocFromInst(OldInst, &B); | |||
| 3093 | auto *Induction = B.CreatePHI(Start->getType(), 2, "index"); | |||
| 3094 | ||||
| 3095 | B.SetInsertPoint(Latch->getTerminator()); | |||
| 3096 | setDebugLocFromInst(OldInst, &B); | |||
| 3097 | ||||
| 3098 | // Create i+1 and fill the PHINode. | |||
| 3099 | // | |||
| 3100 | // If the tail is not folded, we know that End - Start >= Step (either | |||
| 3101 | // statically or through the minimum iteration checks). We also know that both | |||
| 3102 | // Start % Step == 0 and End % Step == 0. We exit the vector loop if %IV + | |||
| 3103 | // %Step == %End. Hence we must exit the loop before %IV + %Step unsigned | |||
| 3104 | // overflows and we can mark the induction increment as NUW. | |||
| 3105 | Value *Next = B.CreateAdd(Induction, Step, "index.next", | |||
| 3106 | /*NUW=*/!Cost->foldTailByMasking(), /*NSW=*/false); | |||
| 3107 | Induction->addIncoming(Start, L->getLoopPreheader()); | |||
| 3108 | Induction->addIncoming(Next, Latch); | |||
| 3109 | // Create the compare. | |||
| 3110 | Value *ICmp = B.CreateICmpEQ(Next, End); | |||
| 3111 | B.CreateCondBr(ICmp, L->getUniqueExitBlock(), Header); | |||
| 3112 | ||||
| 3113 | // Now we have two terminators. Remove the old one from the block. | |||
| 3114 | Latch->getTerminator()->eraseFromParent(); | |||
| 3115 | ||||
| 3116 | return Induction; | |||
| 3117 | } | |||
| 3118 | ||||
| 3119 | Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) { | |||
| 3120 | if (TripCount) | |||
| 3121 | return TripCount; | |||
| 3122 | ||||
| 3123 | assert(L && "Create Trip Count for null loop.")((void)0); | |||
| 3124 | IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); | |||
| 3125 | // Find the loop boundaries. | |||
| 3126 | ScalarEvolution *SE = PSE.getSE(); | |||
| 3127 | const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); | |||
| 3128 | assert(!isa<SCEVCouldNotCompute>(BackedgeTakenCount) &&((void)0) | |||
| 3129 | "Invalid loop count")((void)0); | |||
| 3130 | ||||
| 3131 | Type *IdxTy = Legal->getWidestInductionType(); | |||
| 3132 | assert(IdxTy && "No type for induction")((void)0); | |||
| 3133 | ||||
| 3134 | // The exit count might have the type of i64 while the phi is i32. This can | |||
| 3135 | // happen if we have an induction variable that is sign extended before the | |||
| 3136 | // compare. The only way that we get a backedge taken count is that the | |||
| 3137 | // induction variable was signed and as such will not overflow. In such a case | |||
| 3138 | // truncation is legal. | |||
| 3139 | if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) > | |||
| 3140 | IdxTy->getPrimitiveSizeInBits()) | |||
| 3141 | BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy); | |||
| 3142 | BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy); | |||
| 3143 | ||||
| 3144 | // Get the total trip count from the count by adding 1. | |||
| 3145 | const SCEV *ExitCount = SE->getAddExpr( | |||
| 3146 | BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); | |||
| 3147 | ||||
| 3148 | const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); | |||
| 3149 | ||||
| 3150 | // Expand the trip count and place the new instructions in the preheader. | |||
| 3151 | // Notice that the pre-header does not change, only the loop body. | |||
| 3152 | SCEVExpander Exp(*SE, DL, "induction"); | |||
| 3153 | ||||
| 3154 | // Count holds the overall loop count (N). | |||
| 3155 | TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(), | |||
| 3156 | L->getLoopPreheader()->getTerminator()); | |||
| 3157 | ||||
| 3158 | if (TripCount->getType()->isPointerTy()) | |||
| 3159 | TripCount = | |||
| 3160 | CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int", | |||
| 3161 | L->getLoopPreheader()->getTerminator()); | |||
| 3162 | ||||
| 3163 | return TripCount; | |||
| 3164 | } | |||
| 3165 | ||||
| 3166 | Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) { | |||
| 3167 | if (VectorTripCount) | |||
| 3168 | return VectorTripCount; | |||
| 3169 | ||||
| 3170 | Value *TC = getOrCreateTripCount(L); | |||
| 3171 | IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); | |||
| 3172 | ||||
| 3173 | Type *Ty = TC->getType(); | |||
| 3174 | // This is where we can make the step a runtime constant. | |||
| 3175 | Value *Step = createStepForVF(Builder, ConstantInt::get(Ty, UF), VF); | |||
| 3176 | ||||
| 3177 | // If the tail is to be folded by masking, round the number of iterations N | |||
| 3178 | // up to a multiple of Step instead of rounding down. This is done by first | |||
| 3179 | // adding Step-1 and then rounding down. Note that it's ok if this addition | |||
| 3180 | // overflows: the vector induction variable will eventually wrap to zero given | |||
| 3181 | // that it starts at zero and its Step is a power of two; the loop will then | |||
| 3182 | // exit, with the last early-exit vector comparison also producing all-true. | |||
| 3183 | if (Cost->foldTailByMasking()) { | |||
| 3184 | assert(isPowerOf2_32(VF.getKnownMinValue() * UF) &&((void)0) | |||
| 3185 | "VF*UF must be a power of 2 when folding tail by masking")((void)0); | |||
| 3186 | assert(!VF.isScalable() &&((void)0) | |||
| 3187 | "Tail folding not yet supported for scalable vectors")((void)0); | |||
| 3188 | TC = Builder.CreateAdd( | |||
| 3189 | TC, ConstantInt::get(Ty, VF.getKnownMinValue() * UF - 1), "n.rnd.up"); | |||
| 3190 | } | |||
| 3191 | ||||
| 3192 | // Now we need to generate the expression for the part of the loop that the | |||
| 3193 | // vectorized body will execute. This is equal to N - (N % Step) if scalar | |||
| 3194 | // iterations are not required for correctness, or N - Step, otherwise. Step | |||
| 3195 | // is equal to the vectorization factor (number of SIMD elements) times the | |||
| 3196 | // unroll factor (number of SIMD instructions). | |||
| 3197 | Value *R = Builder.CreateURem(TC, Step, "n.mod.vf"); | |||
| 3198 | ||||
| 3199 | // There are cases where we *must* run at least one iteration in the remainder | |||
| 3200 | // loop. See the cost model for when this can happen. If the step evenly | |||
| 3201 | // divides the trip count, we set the remainder to be equal to the step. If | |||
| 3202 | // the step does not evenly divide the trip count, no adjustment is necessary | |||
| 3203 | // since there will already be scalar iterations. Note that the minimum | |||
| 3204 | // iterations check ensures that N >= Step. | |||
| 3205 | if (Cost->requiresScalarEpilogue(VF)) { | |||
| 3206 | auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0)); | |||
| 3207 | R = Builder.CreateSelect(IsZero, Step, R); | |||
| 3208 | } | |||
| 3209 | ||||
| 3210 | VectorTripCount = Builder.CreateSub(TC, R, "n.vec"); | |||
| 3211 | ||||
| 3212 | return VectorTripCount; | |||
| 3213 | } | |||
| 3214 | ||||
| 3215 | Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy, | |||
| 3216 | const DataLayout &DL) { | |||
| 3217 | // Verify that V is a vector type with same number of elements as DstVTy. | |||
| 3218 | auto *DstFVTy = cast<FixedVectorType>(DstVTy); | |||
| 3219 | unsigned VF = DstFVTy->getNumElements(); | |||
| 3220 | auto *SrcVecTy = cast<FixedVectorType>(V->getType()); | |||
| 3221 | assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match")((void)0); | |||
| 3222 | Type *SrcElemTy = SrcVecTy->getElementType(); | |||
| 3223 | Type *DstElemTy = DstFVTy->getElementType(); | |||
| 3224 | assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) &&((void)0) | |||
| 3225 | "Vector elements must have same size")((void)0); | |||
| 3226 | ||||
| 3227 | // Do a direct cast if element types are castable. | |||
| 3228 | if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) { | |||
| 3229 | return Builder.CreateBitOrPointerCast(V, DstFVTy); | |||
| 3230 | } | |||
| 3231 | // V cannot be directly casted to desired vector type. | |||
| 3232 | // May happen when V is a floating point vector but DstVTy is a vector of | |||
| 3233 | // pointers or vice-versa. Handle this using a two-step bitcast using an | |||
| 3234 | // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float. | |||
| 3235 | assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) &&((void)0) | |||
| 3236 | "Only one type should be a pointer type")((void)0); | |||
| 3237 | assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) &&((void)0) | |||
| 3238 | "Only one type should be a floating point type")((void)0); | |||
| 3239 | Type *IntTy = | |||
| 3240 | IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy)); | |||
| 3241 | auto *VecIntTy = FixedVectorType::get(IntTy, VF); | |||
| 3242 | Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy); | |||
| 3243 | return Builder.CreateBitOrPointerCast(CastVal, DstFVTy); | |||
| 3244 | } | |||
| 3245 | ||||
| 3246 | void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L, | |||
| 3247 | BasicBlock *Bypass) { | |||
| 3248 | Value *Count = getOrCreateTripCount(L); | |||
| 3249 | // Reuse existing vector loop preheader for TC checks. | |||
| 3250 | // Note that new preheader block is generated for vector loop. | |||
| 3251 | BasicBlock *const TCCheckBlock = LoopVectorPreHeader; | |||
| 3252 | IRBuilder<> Builder(TCCheckBlock->getTerminator()); | |||
| 3253 | ||||
| 3254 | // Generate code to check if the loop's trip count is less than VF * UF, or | |||
| 3255 | // equal to it in case a scalar epilogue is required; this implies that the | |||
| 3256 | // vector trip count is zero. This check also covers the case where adding one | |||
| 3257 | // to the backedge-taken count overflowed leading to an incorrect trip count | |||
| 3258 | // of zero. In this case we will also jump to the scalar loop. | |||
| 3259 | auto P = Cost->requiresScalarEpilogue(VF) ? ICmpInst::ICMP_ULE | |||
| 3260 | : ICmpInst::ICMP_ULT; | |||
| 3261 | ||||
| 3262 | // If tail is to be folded, vector loop takes care of all iterations. | |||
| 3263 | Value *CheckMinIters = Builder.getFalse(); | |||
| 3264 | if (!Cost->foldTailByMasking()) { | |||
| 3265 | Value *Step = | |||
| 3266 | createStepForVF(Builder, ConstantInt::get(Count->getType(), UF), VF); | |||
| 3267 | CheckMinIters = Builder.CreateICmp(P, Count, Step, "min.iters.check"); | |||
| 3268 | } | |||
| 3269 | // Create new preheader for vector loop. | |||
| 3270 | LoopVectorPreHeader = | |||
| 3271 | SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr, | |||
| 3272 | "vector.ph"); | |||
| 3273 | ||||
| 3274 | assert(DT->properlyDominates(DT->getNode(TCCheckBlock),((void)0) | |||
| 3275 | DT->getNode(Bypass)->getIDom()) &&((void)0) | |||
| 3276 | "TC check is expected to dominate Bypass")((void)0); | |||
| 3277 | ||||
| 3278 | // Update dominator for Bypass & LoopExit (if needed). | |||
| 3279 | DT->changeImmediateDominator(Bypass, TCCheckBlock); | |||
| 3280 | if (!Cost->requiresScalarEpilogue(VF)) | |||
| 3281 | // If there is an epilogue which must run, there's no edge from the | |||
| 3282 | // middle block to exit blocks and thus no need to update the immediate | |||
| 3283 | // dominator of the exit blocks. | |||
| 3284 | DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock); | |||
| 3285 | ||||
| 3286 | ReplaceInstWithInst( | |||
| 3287 | TCCheckBlock->getTerminator(), | |||
| 3288 | BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); | |||
| 3289 | LoopBypassBlocks.push_back(TCCheckBlock); | |||
| 3290 | } | |||
| 3291 | ||||
| 3292 | BasicBlock *InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) { | |||
| 3293 | ||||
| 3294 | BasicBlock *const SCEVCheckBlock = | |||
| 3295 | RTChecks.emitSCEVChecks(L, Bypass, LoopVectorPreHeader, LoopExitBlock); | |||
| 3296 | if (!SCEVCheckBlock) | |||
| 3297 | return nullptr; | |||
| 3298 | ||||
| 3299 | assert(!(SCEVCheckBlock->getParent()->hasOptSize() ||((void)0) | |||
| 3300 | (OptForSizeBasedOnProfile &&((void)0) | |||
| 3301 | Cost->Hints->getForce() != LoopVectorizeHints::FK_Enabled)) &&((void)0) | |||
| 3302 | "Cannot SCEV check stride or overflow when optimizing for size")((void)0); | |||
| 3303 | ||||
| 3304 | ||||
| 3305 | // Update dominator only if this is first RT check. | |||
| 3306 | if (LoopBypassBlocks.empty()) { | |||
| 3307 | DT->changeImmediateDominator(Bypass, SCEVCheckBlock); | |||
| 3308 | if (!Cost->requiresScalarEpilogue(VF)) | |||
| 3309 | // If there is an epilogue which must run, there's no edge from the | |||
| 3310 | // middle block to exit blocks and thus no need to update the immediate | |||
| 3311 | // dominator of the exit blocks. | |||
| 3312 | DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock); | |||
| 3313 | } | |||
| 3314 | ||||
| 3315 | LoopBypassBlocks.push_back(SCEVCheckBlock); | |||
| 3316 | AddedSafetyChecks = true; | |||
| 3317 | return SCEVCheckBlock; | |||
| 3318 | } | |||
| 3319 | ||||
| 3320 | BasicBlock *InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, | |||
| 3321 | BasicBlock *Bypass) { | |||
| 3322 | // VPlan-native path does not do any analysis for runtime checks currently. | |||
| 3323 | if (EnableVPlanNativePath) | |||
| 3324 | return nullptr; | |||
| 3325 | ||||
| 3326 | BasicBlock *const MemCheckBlock = | |||
| 3327 | RTChecks.emitMemRuntimeChecks(L, Bypass, LoopVectorPreHeader); | |||
| 3328 | ||||
| 3329 | // Check if we generated code that checks in runtime if arrays overlap. We put | |||
| 3330 | // the checks into a separate block to make the more common case of few | |||
| 3331 | // elements faster. | |||
| 3332 | if (!MemCheckBlock) | |||
| 3333 | return nullptr; | |||
| 3334 | ||||
| 3335 | if (MemCheckBlock->getParent()->hasOptSize() || OptForSizeBasedOnProfile) { | |||
| 3336 | assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled &&((void)0) | |||
| 3337 | "Cannot emit memory checks when optimizing for size, unless forced "((void)0) | |||
| 3338 | "to vectorize.")((void)0); | |||
| 3339 | ORE->emit([&]() { | |||
| 3340 | return OptimizationRemarkAnalysis(DEBUG_TYPE"loop-vectorize", "VectorizationCodeSize", | |||
| 3341 | L->getStartLoc(), L->getHeader()) | |||
| 3342 | << "Code-size may be reduced by not forcing " | |||
| 3343 | "vectorization, or by source-code modifications " | |||
| 3344 | "eliminating the need for runtime checks " | |||
| 3345 | "(e.g., adding 'restrict')."; | |||
| 3346 | }); | |||
| 3347 | } | |||
| 3348 | ||||
| 3349 | LoopBypassBlocks.push_back(MemCheckBlock); | |||
| 3350 | ||||
| 3351 | AddedSafetyChecks = true; | |||
| 3352 | ||||
| 3353 | // We currently don't use LoopVersioning for the actual loop cloning but we | |||
| 3354 | // still use it to add the noalias metadata. | |||
| 3355 | LVer = std::make_unique<LoopVersioning>( | |||
| 3356 | *Legal->getLAI(), | |||
| 3357 | Legal->getLAI()->getRuntimePointerChecking()->getChecks(), OrigLoop, LI, | |||
| 3358 | DT, PSE.getSE()); | |||
| 3359 | LVer->prepareNoAliasMetadata(); | |||
| 3360 | return MemCheckBlock; | |||
| 3361 | } | |||
| 3362 | ||||
| 3363 | Value *InnerLoopVectorizer::emitTransformedIndex( | |||
| 3364 | IRBuilder<> &B, Value *Index, ScalarEvolution *SE, const DataLayout &DL, | |||
| 3365 | const InductionDescriptor &ID) const { | |||
| 3366 | ||||
| 3367 | SCEVExpander Exp(*SE, DL, "induction"); | |||
| 3368 | auto Step = ID.getStep(); | |||
| 3369 | auto StartValue = ID.getStartValue(); | |||
| 3370 | assert(Index->getType()->getScalarType() == Step->getType() &&((void)0) | |||
| 3371 | "Index scalar type does not match StepValue type")((void)0); | |||
| 3372 | ||||
| 3373 | // Note: the IR at this point is broken. We cannot use SE to create any new | |||
| 3374 | // SCEV and then expand it, hoping that SCEV's simplification will give us | |||
| 3375 | // a more optimal code. Unfortunately, attempt of doing so on invalid IR may | |||
| 3376 | // lead to various SCEV crashes. So all we can do is to use builder and rely | |||
| 3377 | // on InstCombine for future simplifications. Here we handle some trivial | |||
| 3378 | // cases only. | |||
| 3379 | auto CreateAdd = [&B](Value *X, Value *Y) { | |||
| 3380 | assert(X->getType() == Y->getType() && "Types don't match!")((void)0); | |||
| 3381 | if (auto *CX = dyn_cast<ConstantInt>(X)) | |||
| 3382 | if (CX->isZero()) | |||
| 3383 | return Y; | |||
| 3384 | if (auto *CY = dyn_cast<ConstantInt>(Y)) | |||
| 3385 | if (CY->isZero()) | |||
| 3386 | return X; | |||
| 3387 | return B.CreateAdd(X, Y); | |||
| 3388 | }; | |||
| 3389 | ||||
| 3390 | // We allow X to be a vector type, in which case Y will potentially be | |||
| 3391 | // splatted into a vector with the same element count. | |||
| 3392 | auto CreateMul = [&B](Value *X, Value *Y) { | |||
| 3393 | assert(X->getType()->getScalarType() == Y->getType() &&((void)0) | |||
| 3394 | "Types don't match!")((void)0); | |||
| 3395 | if (auto *CX = dyn_cast<ConstantInt>(X)) | |||
| 3396 | if (CX->isOne()) | |||
| 3397 | return Y; | |||
| 3398 | if (auto *CY = dyn_cast<ConstantInt>(Y)) | |||
| 3399 | if (CY->isOne()) | |||
| 3400 | return X; | |||
| 3401 | VectorType *XVTy = dyn_cast<VectorType>(X->getType()); | |||
| 3402 | if (XVTy && !isa<VectorType>(Y->getType())) | |||
| 3403 | Y = B.CreateVectorSplat(XVTy->getElementCount(), Y); | |||
| 3404 | return B.CreateMul(X, Y); | |||
| 3405 | }; | |||
| 3406 | ||||
| 3407 | // Get a suitable insert point for SCEV expansion. For blocks in the vector | |||
| 3408 | // loop, choose the end of the vector loop header (=LoopVectorBody), because | |||
| 3409 | // the DomTree is not kept up-to-date for additional blocks generated in the | |||
| 3410 | // vector loop. By using the header as insertion point, we guarantee that the | |||
| 3411 | // expanded instructions dominate all their uses. | |||
| 3412 | auto GetInsertPoint = [this, &B]() { | |||
| 3413 | BasicBlock *InsertBB = B.GetInsertPoint()->getParent(); | |||
| 3414 | if (InsertBB != LoopVectorBody && | |||
| 3415 | LI->getLoopFor(LoopVectorBody) == LI->getLoopFor(InsertBB)) | |||
| 3416 | return LoopVectorBody->getTerminator(); | |||
| 3417 | return &*B.GetInsertPoint(); | |||
| 3418 | }; | |||
| 3419 | ||||
| 3420 | switch (ID.getKind()) { | |||
| 3421 | case InductionDescriptor::IK_IntInduction: { | |||
| 3422 | assert(!isa<VectorType>(Index->getType()) &&((void)0) | |||
| 3423 | "Vector indices not supported for integer inductions yet")((void)0); | |||
| 3424 | assert(Index->getType() == StartValue->getType() &&((void)0) | |||
| 3425 | "Index type does not match StartValue type")((void)0); | |||
| 3426 | if (ID.getConstIntStepValue() && ID.getConstIntStepValue()->isMinusOne()) | |||
| 3427 | return B.CreateSub(StartValue, Index); | |||
| 3428 | auto *Offset = CreateMul( | |||
| 3429 | Index, Exp.expandCodeFor(Step, Index->getType(), GetInsertPoint())); | |||
| 3430 | return CreateAdd(StartValue, Offset); | |||
| 3431 | } | |||
| 3432 | case InductionDescriptor::IK_PtrInduction: { | |||
| 3433 | assert(isa<SCEVConstant>(Step) &&((void)0) | |||
| 3434 | "Expected constant step for pointer induction")((void)0); | |||
| 3435 | return B.CreateGEP( | |||
| 3436 | StartValue->getType()->getPointerElementType(), StartValue, | |||
| 3437 | CreateMul(Index, | |||
| 3438 | Exp.expandCodeFor(Step, Index->getType()->getScalarType(), | |||
| 3439 | GetInsertPoint()))); | |||
| 3440 | } | |||
| 3441 | case InductionDescriptor::IK_FpInduction: { | |||
| 3442 | assert(!isa<VectorType>(Index->getType()) &&((void)0) | |||
| 3443 | "Vector indices not supported for FP inductions yet")((void)0); | |||
| 3444 | assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value")((void)0); | |||
| 3445 | auto InductionBinOp = ID.getInductionBinOp(); | |||
| 3446 | assert(InductionBinOp &&((void)0) | |||
| 3447 | (InductionBinOp->getOpcode() == Instruction::FAdd ||((void)0) | |||
| 3448 | InductionBinOp->getOpcode() == Instruction::FSub) &&((void)0) | |||
| 3449 | "Original bin op should be defined for FP induction")((void)0); | |||
| 3450 | ||||
| 3451 | Value *StepValue = cast<SCEVUnknown>(Step)->getValue(); | |||
| 3452 | Value *MulExp = B.CreateFMul(StepValue, Index); | |||
| 3453 | return B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp, | |||
| 3454 | "induction"); | |||
| 3455 | } | |||
| 3456 | case InductionDescriptor::IK_NoInduction: | |||
| 3457 | return nullptr; | |||
| 3458 | } | |||
| 3459 | llvm_unreachable("invalid enum")__builtin_unreachable(); | |||
| 3460 | } | |||
| 3461 | ||||
| 3462 | Loop *InnerLoopVectorizer::createVectorLoopSkeleton(StringRef Prefix) { | |||
| 3463 | LoopScalarBody = OrigLoop->getHeader(); | |||
| 3464 | LoopVectorPreHeader = OrigLoop->getLoopPreheader(); | |||
| 3465 | assert(LoopVectorPreHeader && "Invalid loop structure")((void)0); | |||
| 3466 | LoopExitBlock = OrigLoop->getUniqueExitBlock(); // may be nullptr | |||
| 3467 | assert((LoopExitBlock || Cost->requiresScalarEpilogue(VF)) &&((void)0) | |||
| 3468 | "multiple exit loop without required epilogue?")((void)0); | |||
| 3469 | ||||
| 3470 | LoopMiddleBlock = | |||
| 3471 | SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, | |||
| 3472 | LI, nullptr, Twine(Prefix) + "middle.block"); | |||
| 3473 | LoopScalarPreHeader = | |||
| 3474 | SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI, | |||
| 3475 | nullptr, Twine(Prefix) + "scalar.ph"); | |||
| 3476 | ||||
| 3477 | auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator(); | |||
| 3478 | ||||
| 3479 | // Set up the middle block terminator. Two cases: | |||
| 3480 | // 1) If we know that we must execute the scalar epilogue, emit an | |||
| 3481 | // unconditional branch. | |||
| 3482 | // 2) Otherwise, we must have a single unique exit block (due to how we | |||
| 3483 | // implement the multiple exit case). In this case, set up a conditonal | |||
| 3484 | // branch from the middle block to the loop scalar preheader, and the | |||
| 3485 | // exit block. completeLoopSkeleton will update the condition to use an | |||
| 3486 | // iteration check, if required to decide whether to execute the remainder. | |||
| 3487 | BranchInst *BrInst = Cost->requiresScalarEpilogue(VF) ? | |||
| 3488 | BranchInst::Create(LoopScalarPreHeader) : | |||
| 3489 | BranchInst::Create(LoopExitBlock, LoopScalarPreHeader, | |||
| 3490 | Builder.getTrue()); | |||
| 3491 | BrInst->setDebugLoc(ScalarLatchTerm->getDebugLoc()); | |||
| 3492 | ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst); | |||
| 3493 | ||||
| 3494 | // We intentionally don't let SplitBlock to update LoopInfo since | |||
| 3495 | // LoopVectorBody should belong to another loop than LoopVectorPreHeader. | |||
| 3496 | // LoopVectorBody is explicitly added to the correct place few lines later. | |||
| 3497 | LoopVectorBody = | |||
| 3498 | SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, | |||
| 3499 | nullptr, nullptr, Twine(Prefix) + "vector.body"); | |||
| 3500 | ||||
| 3501 | // Update dominator for loop exit. | |||
| 3502 | if (!Cost->requiresScalarEpilogue(VF)) | |||
| 3503 | // If there is an epilogue which must run, there's no edge from the | |||
| 3504 | // middle block to exit blocks and thus no need to update the immediate | |||
| 3505 | // dominator of the exit blocks. | |||
| 3506 | DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock); | |||
| 3507 | ||||
| 3508 | // Create and register the new vector loop. | |||
| 3509 | Loop *Lp = LI->AllocateLoop(); | |||
| 3510 | Loop *ParentLoop = OrigLoop->getParentLoop(); | |||
| 3511 | ||||
| 3512 | // Insert the new loop into the loop nest and register the new basic blocks | |||
| 3513 | // before calling any utilities such as SCEV that require valid LoopInfo. | |||
| 3514 | if (ParentLoop) { | |||
| 3515 | ParentLoop->addChildLoop(Lp); | |||
| 3516 | } else { | |||
| 3517 | LI->addTopLevelLoop(Lp); | |||
| 3518 | } | |||
| 3519 | Lp->addBasicBlockToLoop(LoopVectorBody, *LI); | |||
| 3520 | return Lp; | |||
| 3521 | } | |||
| 3522 | ||||
| 3523 | void InnerLoopVectorizer::createInductionResumeValues( | |||
| 3524 | Loop *L, Value *VectorTripCount, | |||
| 3525 | std::pair<BasicBlock *, Value *> AdditionalBypass) { | |||
| 3526 | assert(VectorTripCount && L && "Expected valid arguments")((void)0); | |||
| 3527 | assert(((AdditionalBypass.first && AdditionalBypass.second) ||((void)0) | |||
| 3528 | (!AdditionalBypass.first && !AdditionalBypass.second)) &&((void)0) | |||
| 3529 | "Inconsistent information about additional bypass.")((void)0); | |||
| 3530 | // We are going to resume the execution of the scalar loop. | |||
| 3531 | // Go over all of the induction variables that we found and fix the | |||
| 3532 | // PHIs that are left in the scalar version of the loop. | |||
| 3533 | // The starting values of PHI nodes depend on the counter of the last | |||
| 3534 | // iteration in the vectorized loop. | |||
| 3535 | // If we come from a bypass edge then we need to start from the original | |||
| 3536 | // start value. | |||
| 3537 | for (auto &InductionEntry : Legal->getInductionVars()) { | |||
| 3538 | PHINode *OrigPhi = InductionEntry.first; | |||
| 3539 | InductionDescriptor II = InductionEntry.second; | |||
| 3540 | ||||
| 3541 | // Create phi nodes to merge from the backedge-taken check block. | |||
| 3542 | PHINode *BCResumeVal = | |||
| 3543 | PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val", | |||
| 3544 | LoopScalarPreHeader->getTerminator()); | |||
| 3545 | // Copy original phi DL over to the new one. | |||
| 3546 | BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc()); | |||
| 3547 | Value *&EndValue = IVEndValues[OrigPhi]; | |||
| 3548 | Value *EndValueFromAdditionalBypass = AdditionalBypass.second; | |||
| 3549 | if (OrigPhi == OldInduction) { | |||
| 3550 | // We know what the end value is. | |||
| 3551 | EndValue = VectorTripCount; | |||
| 3552 | } else { | |||
| 3553 | IRBuilder<> B(L->getLoopPreheader()->getTerminator()); | |||
| 3554 | ||||
| 3555 | // Fast-math-flags propagate from the original induction instruction. | |||
| 3556 | if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp())) | |||
| 3557 | B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags()); | |||
| 3558 | ||||
| 3559 | Type *StepType = II.getStep()->getType(); | |||
| 3560 | Instruction::CastOps CastOp = | |||
| 3561 | CastInst::getCastOpcode(VectorTripCount, true, StepType, true); | |||
| 3562 | Value *CRD = B.CreateCast(CastOp, VectorTripCount, StepType, "cast.crd"); | |||
| 3563 | const DataLayout &DL = LoopScalarBody->getModule()->getDataLayout(); | |||
| 3564 | EndValue = emitTransformedIndex(B, CRD, PSE.getSE(), DL, II); | |||
| 3565 | EndValue->setName("ind.end"); | |||
| 3566 | ||||
| 3567 | // Compute the end value for the additional bypass (if applicable). | |||
| 3568 | if (AdditionalBypass.first) { | |||
| 3569 | B.SetInsertPoint(&(*AdditionalBypass.first->getFirstInsertionPt())); | |||
| 3570 | CastOp = CastInst::getCastOpcode(AdditionalBypass.second, true, | |||
| 3571 | StepType, true); | |||
| 3572 | CRD = | |||
| 3573 | B.CreateCast(CastOp, AdditionalBypass.second, StepType, "cast.crd"); | |||
| 3574 | EndValueFromAdditionalBypass = | |||
| 3575 | emitTransformedIndex(B, CRD, PSE.getSE(), DL, II); | |||
| 3576 | EndValueFromAdditionalBypass->setName("ind.end"); | |||
| 3577 | } | |||
| 3578 | } | |||
| 3579 | // The new PHI merges the original incoming value, in case of a bypass, | |||
| 3580 | // or the value at the end of the vectorized loop. | |||
| 3581 | BCResumeVal->addIncoming(EndValue, LoopMiddleBlock); | |||
| 3582 | ||||
| 3583 | // Fix the scalar body counter (PHI node). | |||
| 3584 | // The old induction's phi node in the scalar body needs the truncated | |||
| 3585 | // value. | |||
| 3586 | for (BasicBlock *BB : LoopBypassBlocks) | |||
| 3587 | BCResumeVal->addIncoming(II.getStartValue(), BB); | |||
| 3588 | ||||
| 3589 | if (AdditionalBypass.first) | |||
| 3590 | BCResumeVal->setIncomingValueForBlock(AdditionalBypass.first, | |||
| 3591 | EndValueFromAdditionalBypass); | |||
| 3592 | ||||
| 3593 | OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal); | |||
| 3594 | } | |||
| 3595 | } | |||
| 3596 | ||||
| 3597 | BasicBlock *InnerLoopVectorizer::completeLoopSkeleton(Loop *L, | |||
| 3598 | MDNode *OrigLoopID) { | |||
| 3599 | assert(L && "Expected valid loop.")((void)0); | |||
| 3600 | ||||
| 3601 | // The trip counts should be cached by now. | |||
| 3602 | Value *Count = getOrCreateTripCount(L); | |||
| 3603 | Value *VectorTripCount = getOrCreateVectorTripCount(L); | |||
| 3604 | ||||
| 3605 | auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator(); | |||
| 3606 | ||||
| 3607 | // Add a check in the middle block to see if we have completed | |||
| 3608 | // all of the iterations in the first vector loop. Three cases: | |||
| 3609 | // 1) If we require a scalar epilogue, there is no conditional branch as | |||
| 3610 | // we unconditionally branch to the scalar preheader. Do nothing. | |||
| 3611 | // 2) If (N - N%VF) == N, then we *don't* need to run the remainder. | |||
| 3612 | // Thus if tail is to be folded, we know we don't need to run the | |||
| 3613 | // remainder and we can use the previous value for the condition (true). | |||
| 3614 | // 3) Otherwise, construct a runtime check. | |||
| 3615 | if (!Cost->requiresScalarEpilogue(VF) && !Cost->foldTailByMasking()) { | |||
| 3616 | Instruction *CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, | |||
| 3617 | Count, VectorTripCount, "cmp.n", | |||
| 3618 | LoopMiddleBlock->getTerminator()); | |||
| 3619 | ||||
| 3620 | // Here we use the same DebugLoc as the scalar loop latch terminator instead | |||
| 3621 | // of the corresponding compare because they may have ended up with | |||
| 3622 | // different line numbers and we want to avoid awkward line stepping while | |||
| 3623 | // debugging. Eg. if the compare has got a line number inside the loop. | |||
| 3624 | CmpN->setDebugLoc(ScalarLatchTerm->getDebugLoc()); | |||
| 3625 | cast<BranchInst>(LoopMiddleBlock->getTerminator())->setCondition(CmpN); | |||
| 3626 | } | |||
| 3627 | ||||
| 3628 | // Get ready to start creating new instructions into the vectorized body. | |||
| 3629 | assert(LoopVectorPreHeader == L->getLoopPreheader() &&((void)0) | |||
| 3630 | "Inconsistent vector loop preheader")((void)0); | |||
| 3631 | Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt()); | |||
| 3632 | ||||
| 3633 | Optional<MDNode *> VectorizedLoopID = | |||
| 3634 | makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, | |||
| 3635 | LLVMLoopVectorizeFollowupVectorized}); | |||
| 3636 | if (VectorizedLoopID.hasValue()) { | |||
| 3637 | L->setLoopID(VectorizedLoopID.getValue()); | |||
| 3638 | ||||
| 3639 | // Do not setAlreadyVectorized if loop attributes have been defined | |||
| 3640 | // explicitly. | |||
| 3641 | return LoopVectorPreHeader; | |||
| 3642 | } | |||
| 3643 | ||||
| 3644 | // Keep all loop hints from the original loop on the vector loop (we'll | |||
| 3645 | // replace the vectorizer-specific hints below). | |||
| 3646 | if (MDNode *LID = OrigLoop->getLoopID()) | |||
| 3647 | L->setLoopID(LID); | |||
| 3648 | ||||
| 3649 | LoopVectorizeHints Hints(L, true, *ORE); | |||
| 3650 | Hints.setAlreadyVectorized(); | |||
| 3651 | ||||
| 3652 | #ifdef EXPENSIVE_CHECKS | |||
| 3653 | assert(DT->verify(DominatorTree::VerificationLevel::Fast))((void)0); | |||
| 3654 | LI->verify(*DT); | |||
| 3655 | #endif | |||
| 3656 | ||||
| 3657 | return LoopVectorPreHeader; | |||
| 3658 | } | |||
| 3659 | ||||
| 3660 | BasicBlock *InnerLoopVectorizer::createVectorizedLoopSkeleton() { | |||
| 3661 | /* | |||
| 3662 | In this function we generate a new loop. The new loop will contain | |||
| 3663 | the vectorized instructions while the old loop will continue to run the | |||
| 3664 | scalar remainder. | |||
| 3665 | ||||
| 3666 | [ ] <-- loop iteration number check. | |||
| 3667 | / | | |||
| 3668 | / v | |||
| 3669 | | [ ] <-- vector loop bypass (may consist of multiple blocks). | |||
| 3670 | | / | | |||
| 3671 | | / v | |||
| 3672 | || [ ] <-- vector pre header. | |||
| 3673 | |/ | | |||
| 3674 | | v | |||
| 3675 | | [ ] \ | |||
| 3676 | | [ ]_| <-- vector loop. | |||
| 3677 | | | | |||
| 3678 | | v | |||
| 3679 | \ -[ ] <--- middle-block. | |||
| 3680 | \/ | | |||
| 3681 | /\ v | |||
| 3682 | | ->[ ] <--- new preheader. | |||
| 3683 | | | | |||
| 3684 | (opt) v <-- edge from middle to exit iff epilogue is not required. | |||
| 3685 | | [ ] \ | |||
| 3686 | | [ ]_| <-- old scalar loop to handle remainder (scalar epilogue). | |||
| 3687 | \ | | |||
| 3688 | \ v | |||
| 3689 | >[ ] <-- exit block(s). | |||
| 3690 | ... | |||
| 3691 | */ | |||
| 3692 | ||||
| 3693 | // Get the metadata of the original loop before it gets modified. | |||
| 3694 | MDNode *OrigLoopID = OrigLoop->getLoopID(); | |||
| 3695 | ||||
| 3696 | // Workaround! Compute the trip count of the original loop and cache it | |||
| 3697 | // before we start modifying the CFG. This code has a systemic problem | |||
| 3698 | // wherein it tries to run analysis over partially constructed IR; this is | |||
| 3699 | // wrong, and not simply for SCEV. The trip count of the original loop | |||
| 3700 | // simply happens to be prone to hitting this in practice. In theory, we | |||
| 3701 | // can hit the same issue for any SCEV, or ValueTracking query done during | |||
| 3702 | // mutation. See PR49900. | |||
| 3703 | getOrCreateTripCount(OrigLoop); | |||
| 3704 | ||||
| 3705 | // Create an empty vector loop, and prepare basic blocks for the runtime | |||
| 3706 | // checks. | |||
| 3707 | Loop *Lp = createVectorLoopSkeleton(""); | |||
| 3708 | ||||
| 3709 | // Now, compare the new count to zero. If it is zero skip the vector loop and | |||
| 3710 | // jump to the scalar loop. This check also covers the case where the | |||
| 3711 | // backedge-taken count is uint##_max: adding one to it will overflow leading | |||
| 3712 | // to an incorrect trip count of zero. In this (rare) case we will also jump | |||
| 3713 | // to the scalar loop. | |||
| 3714 | emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader); | |||
| 3715 | ||||
| 3716 | // Generate the code to check any assumptions that we've made for SCEV | |||
| 3717 | // expressions. | |||
| 3718 | emitSCEVChecks(Lp, LoopScalarPreHeader); | |||
| 3719 | ||||
| 3720 | // Generate the code that checks in runtime if arrays overlap. We put the | |||
| 3721 | // checks into a separate block to make the more common case of few elements | |||
| 3722 | // faster. | |||
| 3723 | emitMemRuntimeChecks(Lp, LoopScalarPreHeader); | |||
| 3724 | ||||
| 3725 | // Some loops have a single integer induction variable, while other loops | |||
| 3726 | // don't. One example is c++ iterators that often have multiple pointer | |||
| 3727 | // induction variables. In the code below we also support a case where we | |||
| 3728 | // don't have a single induction variable. | |||
| 3729 | // | |||
| 3730 | // We try to obtain an induction variable from the original loop as hard | |||
| 3731 | // as possible. However if we don't find one that: | |||
| 3732 | // - is an integer | |||
| 3733 | // - counts from zero, stepping by one | |||
| 3734 | // - is the size of the widest induction variable type | |||
| 3735 | // then we create a new one. | |||
| 3736 | OldInduction = Legal->getPrimaryInduction(); | |||
| 3737 | Type *IdxTy = Legal->getWidestInductionType(); | |||
| 3738 | Value *StartIdx = ConstantInt::get(IdxTy, 0); | |||
| 3739 | // The loop step is equal to the vectorization factor (num of SIMD elements) | |||
| 3740 | // times the unroll factor (num of SIMD instructions). | |||
| 3741 | Builder.SetInsertPoint(&*Lp->getHeader()->getFirstInsertionPt()); | |||
| 3742 | Value *Step = createStepForVF(Builder, ConstantInt::get(IdxTy, UF), VF); | |||
| 3743 | Value *CountRoundDown = getOrCreateVectorTripCount(Lp); | |||
| 3744 | Induction = | |||
| 3745 | createInductionVariable(Lp, StartIdx, CountRoundDown, Step, | |||
| 3746 | getDebugLocFromInstOrOperands(OldInduction)); | |||
| 3747 | ||||
| 3748 | // Emit phis for the new starting index of the scalar loop. | |||
| 3749 | createInductionResumeValues(Lp, CountRoundDown); | |||
| 3750 | ||||
| 3751 | return completeLoopSkeleton(Lp, OrigLoopID); | |||
| 3752 | } | |||
| 3753 | ||||
| 3754 | // Fix up external users of the induction variable. At this point, we are | |||
| 3755 | // in LCSSA form, with all external PHIs that use the IV having one input value, | |||
| 3756 | // coming from the remainder loop. We need those PHIs to also have a correct | |||
| 3757 | // value for the IV when arriving directly from the middle block. | |||
| 3758 | void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi, | |||
| 3759 | const InductionDescriptor &II, | |||
| 3760 | Value *CountRoundDown, Value *EndValue, | |||
| 3761 | BasicBlock *MiddleBlock) { | |||
| 3762 | // There are two kinds of external IV usages - those that use the value | |||
| 3763 | // computed in the last iteration (the PHI) and those that use the penultimate | |||
| 3764 | // value (the value that feeds into the phi from the loop latch). | |||
| 3765 | // We allow both, but they, obviously, have different values. | |||
| 3766 | ||||
| 3767 | assert(OrigLoop->getUniqueExitBlock() && "Expected a single exit block")((void)0); | |||
| 3768 | ||||
| 3769 | DenseMap<Value *, Value *> MissingVals; | |||
| 3770 | ||||
| 3771 | // An external user of the last iteration's value should see the value that | |||
| 3772 | // the remainder loop uses to initialize its own IV. | |||
| 3773 | Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch()); | |||
| 3774 | for (User *U : PostInc->users()) { | |||
| 3775 | Instruction *UI = cast<Instruction>(U); | |||
| 3776 | if (!OrigLoop->contains(UI)) { | |||
| 3777 | assert(isa<PHINode>(UI) && "Expected LCSSA form")((void)0); | |||
| 3778 | MissingVals[UI] = EndValue; | |||
| 3779 | } | |||
| 3780 | } | |||
| 3781 | ||||
| 3782 | // An external user of the penultimate value need to see EndValue - Step. | |||
| 3783 | // The simplest way to get this is to recompute it from the constituent SCEVs, | |||
| 3784 | // that is Start + (Step * (CRD - 1)). | |||
| 3785 | for (User *U : OrigPhi->users()) { | |||
| 3786 | auto *UI = cast<Instruction>(U); | |||
| 3787 | if (!OrigLoop->contains(UI)) { | |||
| 3788 | const DataLayout &DL = | |||
| 3789 | OrigLoop->getHeader()->getModule()->getDataLayout(); | |||
| 3790 | assert(isa<PHINode>(UI) && "Expected LCSSA form")((void)0); | |||
| 3791 | ||||
| 3792 | IRBuilder<> B(MiddleBlock->getTerminator()); | |||
| 3793 | ||||
| 3794 | // Fast-math-flags propagate from the original induction instruction. | |||
| 3795 | if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp())) | |||
| 3796 | B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags()); | |||
| 3797 | ||||
| 3798 | Value *CountMinusOne = B.CreateSub( | |||
| 3799 | CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1)); | |||
| 3800 | Value *CMO = | |||
| 3801 | !II.getStep()->getType()->isIntegerTy() | |||
| 3802 | ? B.CreateCast(Instruction::SIToFP, CountMinusOne, | |||
| 3803 | II.getStep()->getType()) | |||
| 3804 | : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType()); | |||
| 3805 | CMO->setName("cast.cmo"); | |||
| 3806 | Value *Escape = emitTransformedIndex(B, CMO, PSE.getSE(), DL, II); | |||
| 3807 | Escape->setName("ind.escape"); | |||
| 3808 | MissingVals[UI] = Escape; | |||
| 3809 | } | |||
| 3810 | } | |||
| 3811 | ||||
| 3812 | for (auto &I : MissingVals) { | |||
| 3813 | PHINode *PHI = cast<PHINode>(I.first); | |||
| 3814 | // One corner case we have to handle is two IVs "chasing" each-other, | |||
| 3815 | // that is %IV2 = phi [...], [ %IV1, %latch ] | |||
| 3816 | // In this case, if IV1 has an external use, we need to avoid adding both | |||
| 3817 | // "last value of IV1" and "penultimate value of IV2". So, verify that we | |||
| 3818 | // don't already have an incoming value for the middle block. | |||
| 3819 | if (PHI->getBasicBlockIndex(MiddleBlock) == -1) | |||
| 3820 | PHI->addIncoming(I.second, MiddleBlock); | |||
| 3821 | } | |||
| 3822 | } | |||
| 3823 | ||||
| 3824 | namespace { | |||
| 3825 | ||||
| 3826 | struct CSEDenseMapInfo { | |||
| 3827 | static bool canHandle(const Instruction *I) { | |||
| 3828 | return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || | |||
| 3829 | isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I); | |||
| 3830 | } | |||
| 3831 | ||||
| 3832 | static inline Instruction *getEmptyKey() { | |||
| 3833 | return DenseMapInfo<Instruction *>::getEmptyKey(); | |||
| 3834 | } | |||
| 3835 | ||||
| 3836 | static inline Instruction *getTombstoneKey() { | |||
| 3837 | return DenseMapInfo<Instruction *>::getTombstoneKey(); | |||
| 3838 | } | |||
| 3839 | ||||
| 3840 | static unsigned getHashValue(const Instruction *I) { | |||
| 3841 | assert(canHandle(I) && "Unknown instruction!")((void)0); | |||
| 3842 | return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(), | |||
| 3843 | I->value_op_end())); | |||
| 3844 | } | |||
| 3845 | ||||
| 3846 | static bool isEqual(const Instruction *LHS, const Instruction *RHS) { | |||
| 3847 | if (LHS == getEmptyKey() || RHS == getEmptyKey() || | |||
| 3848 | LHS == getTombstoneKey() || RHS == getTombstoneKey()) | |||
| 3849 | return LHS == RHS; | |||
| 3850 | return LHS->isIdenticalTo(RHS); | |||
| 3851 | } | |||
| 3852 | }; | |||
| 3853 | ||||
| 3854 | } // end anonymous namespace | |||
| 3855 | ||||
| 3856 | ///Perform cse of induction variable instructions. | |||
| 3857 | static void cse(BasicBlock *BB) { | |||
| 3858 | // Perform simple cse. | |||
| 3859 | SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap; | |||
| 3860 | for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) { | |||
| 3861 | Instruction *In = &*I++; | |||
| 3862 | ||||
| 3863 | if (!CSEDenseMapInfo::canHandle(In)) | |||
| 3864 | continue; | |||
| 3865 | ||||
| 3866 | // Check if we can replace this instruction with any of the | |||
| 3867 | // visited instructions. | |||
| 3868 | if (Instruction *V = CSEMap.lookup(In)) { | |||
| 3869 | In->replaceAllUsesWith(V); | |||
| 3870 | In->eraseFromParent(); | |||
| 3871 | continue; | |||
| 3872 | } | |||
| 3873 | ||||
| 3874 | CSEMap[In] = In; | |||
| 3875 | } | |||
| 3876 | } | |||
| 3877 | ||||
| 3878 | InstructionCost | |||
| 3879 | LoopVectorizationCostModel::getVectorCallCost(CallInst *CI, ElementCount VF, | |||
| 3880 | bool &NeedToScalarize) const { | |||
| 3881 | Function *F = CI->getCalledFunction(); | |||
| 3882 | Type *ScalarRetTy = CI->getType(); | |||
| 3883 | SmallVector<Type *, 4> Tys, ScalarTys; | |||
| 3884 | for (auto &ArgOp : CI->arg_operands()) | |||
| 3885 | ScalarTys.push_back(ArgOp->getType()); | |||
| 3886 | ||||
| 3887 | // Estimate cost of scalarized vector call. The source operands are assumed | |||
| 3888 | // to be vectors, so we need to extract individual elements from there, | |||
| 3889 | // execute VF scalar calls, and then gather the result into the vector return | |||
| 3890 | // value. | |||
| 3891 | InstructionCost ScalarCallCost = | |||
| 3892 | TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys, TTI::TCK_RecipThroughput); | |||
| 3893 | if (VF.isScalar()) | |||
| 3894 | return ScalarCallCost; | |||
| 3895 | ||||
| 3896 | // Compute corresponding vector type for return value and arguments. | |||
| 3897 | Type *RetTy = ToVectorTy(ScalarRetTy, VF); | |||
| 3898 | for (Type *ScalarTy : ScalarTys) | |||
| 3899 | Tys.push_back(ToVectorTy(ScalarTy, VF)); | |||
| 3900 | ||||
| 3901 | // Compute costs of unpacking argument values for the scalar calls and | |||
| 3902 | // packing the return values to a vector. | |||
| 3903 | InstructionCost ScalarizationCost = getScalarizationOverhead(CI, VF); | |||
| 3904 | ||||
| 3905 | InstructionCost Cost = | |||
| 3906 | ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost; | |||
| 3907 | ||||
| 3908 | // If we can't emit a vector call for this function, then the currently found | |||
| 3909 | // cost is the cost we need to return. | |||
| 3910 | NeedToScalarize = true; | |||
| 3911 | VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/); | |||
| 3912 | Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape); | |||
| 3913 | ||||
| 3914 | if (!TLI || CI->isNoBuiltin() || !VecFunc) | |||
| 3915 | return Cost; | |||
| 3916 | ||||
| 3917 | // If the corresponding vector cost is cheaper, return its cost. | |||
| 3918 | InstructionCost VectorCallCost = | |||
| 3919 | TTI.getCallInstrCost(nullptr, RetTy, Tys, TTI::TCK_RecipThroughput); | |||
| 3920 | if (VectorCallCost < Cost) { | |||
| 3921 | NeedToScalarize = false; | |||
| 3922 | Cost = VectorCallCost; | |||
| 3923 | } | |||
| 3924 | return Cost; | |||
| 3925 | } | |||
| 3926 | ||||
| 3927 | static Type *MaybeVectorizeType(Type *Elt, ElementCount VF) { | |||
| 3928 | if (VF.isScalar() || (!Elt->isIntOrPtrTy() && !Elt->isFloatingPointTy())) | |||
| 3929 | return Elt; | |||
| 3930 | return VectorType::get(Elt, VF); | |||
| 3931 | } | |||
| 3932 | ||||
| 3933 | InstructionCost | |||
| 3934 | LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI, | |||
| 3935 | ElementCount VF) const { | |||
| 3936 | Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); | |||
| 3937 | assert(ID && "Expected intrinsic call!")((void)0); | |||
| 3938 | Type *RetTy = MaybeVectorizeType(CI->getType(), VF); | |||
| 3939 | FastMathFlags FMF; | |||
| 3940 | if (auto *FPMO = dyn_cast<FPMathOperator>(CI)) | |||
| 3941 | FMF = FPMO->getFastMathFlags(); | |||
| 3942 | ||||
| 3943 | SmallVector<const Value *> Arguments(CI->arg_begin(), CI->arg_end()); | |||
| 3944 | FunctionType *FTy = CI->getCalledFunction()->getFunctionType(); | |||
| 3945 | SmallVector<Type *> ParamTys; | |||
| 3946 | std::transform(FTy->param_begin(), FTy->param_end(), | |||
| 3947 | std::back_inserter(ParamTys), | |||
| 3948 | [&](Type *Ty) { return MaybeVectorizeType(Ty, VF); }); | |||
| 3949 | ||||
| 3950 | IntrinsicCostAttributes CostAttrs(ID, RetTy, Arguments, ParamTys, FMF, | |||
| 3951 | dyn_cast<IntrinsicInst>(CI)); | |||
| 3952 | return TTI.getIntrinsicInstrCost(CostAttrs, | |||
| 3953 | TargetTransformInfo::TCK_RecipThroughput); | |||
| 3954 | } | |||
| 3955 | ||||
| 3956 | static Type *smallestIntegerVectorType(Type *T1, Type *T2) { | |||
| 3957 | auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); | |||
| 3958 | auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); | |||
| 3959 | return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2; | |||
| 3960 | } | |||
| 3961 | ||||
| 3962 | static Type *largestIntegerVectorType(Type *T1, Type *T2) { | |||
| 3963 | auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); | |||
| 3964 | auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); | |||
| 3965 | return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2; | |||
| 3966 | } | |||
| 3967 | ||||
| 3968 | void InnerLoopVectorizer::truncateToMinimalBitwidths(VPTransformState &State) { | |||
| 3969 | // For every instruction `I` in MinBWs, truncate the operands, create a | |||
| 3970 | // truncated version of `I` and reextend its result. InstCombine runs | |||
| 3971 | // later and will remove any ext/trunc pairs. | |||
| 3972 | SmallPtrSet<Value *, 4> Erased; | |||
| 3973 | for (const auto &KV : Cost->getMinimalBitwidths()) { | |||
| 3974 | // If the value wasn't vectorized, we must maintain the original scalar | |||
| 3975 | // type. The absence of the value from State indicates that it | |||
| 3976 | // wasn't vectorized. | |||
| 3977 | VPValue *Def = State.Plan->getVPValue(KV.first); | |||
| 3978 | if (!State.hasAnyVectorValue(Def)) | |||
| 3979 | continue; | |||
| 3980 | for (unsigned Part = 0; Part < UF; ++Part) { | |||
| 3981 | Value *I = State.get(Def, Part); | |||
| 3982 | if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I)) | |||
| 3983 | continue; | |||
| 3984 | Type *OriginalTy = I->getType(); | |||
| 3985 | Type *ScalarTruncatedTy = | |||
| 3986 | IntegerType::get(OriginalTy->getContext(), KV.second); | |||
| 3987 | auto *TruncatedTy = VectorType::get( | |||
| 3988 | ScalarTruncatedTy, cast<VectorType>(OriginalTy)->getElementCount()); | |||
| 3989 | if (TruncatedTy == OriginalTy) | |||
| 3990 | continue; | |||
| 3991 | ||||
| 3992 | IRBuilder<> B(cast<Instruction>(I)); | |||
| 3993 | auto ShrinkOperand = [&](Value *V) -> Value * { | |||
| 3994 | if (auto *ZI = dyn_cast<ZExtInst>(V)) | |||
| 3995 | if (ZI->getSrcTy() == TruncatedTy) | |||
| 3996 | return ZI->getOperand(0); | |||
| 3997 | return B.CreateZExtOrTrunc(V, TruncatedTy); | |||
| 3998 | }; | |||
| 3999 | ||||
| 4000 | // The actual instruction modification depends on the instruction type, | |||
| 4001 | // unfortunately. | |||
| 4002 | Value *NewI = nullptr; | |||
| 4003 | if (auto *BO = dyn_cast<BinaryOperator>(I)) { | |||
| 4004 | NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)), | |||
| 4005 | ShrinkOperand(BO->getOperand(1))); | |||
| 4006 | ||||
| 4007 | // Any wrapping introduced by shrinking this operation shouldn't be | |||
| 4008 | // considered undefined behavior. So, we can't unconditionally copy | |||
| 4009 | // arithmetic wrapping flags to NewI. | |||
| 4010 | cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false); | |||
| 4011 | } else if (auto *CI = dyn_cast<ICmpInst>(I)) { | |||
| 4012 | NewI = | |||
| 4013 | B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)), | |||
| 4014 | ShrinkOperand(CI->getOperand(1))); | |||
| 4015 | } else if (auto *SI = dyn_cast<SelectInst>(I)) { | |||
| 4016 | NewI = B.CreateSelect(SI->getCondition(), | |||
| 4017 | ShrinkOperand(SI->getTrueValue()), | |||
| 4018 | ShrinkOperand(SI->getFalseValue())); | |||
| 4019 | } else if (auto *CI = dyn_cast<CastInst>(I)) { | |||
| 4020 | switch (CI->getOpcode()) { | |||
| 4021 | default: | |||
| 4022 | llvm_unreachable("Unhandled cast!")__builtin_unreachable(); | |||
| 4023 | case Instruction::Trunc: | |||
| 4024 | NewI = ShrinkOperand(CI->getOperand(0)); | |||
| 4025 | break; | |||
| 4026 | case Instruction::SExt: | |||
| 4027 | NewI = B.CreateSExtOrTrunc( | |||
| 4028 | CI->getOperand(0), | |||
| 4029 | smallestIntegerVectorType(OriginalTy, TruncatedTy)); | |||
| 4030 | break; | |||
| 4031 | case Instruction::ZExt: | |||
| 4032 | NewI = B.CreateZExtOrTrunc( | |||
| 4033 | CI->getOperand(0), | |||
| 4034 | smallestIntegerVectorType(OriginalTy, TruncatedTy)); | |||
| 4035 | break; | |||
| 4036 | } | |||
| 4037 | } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) { | |||
| 4038 | auto Elements0 = | |||
| 4039 | cast<VectorType>(SI->getOperand(0)->getType())->getElementCount(); | |||
| 4040 | auto *O0 = B.CreateZExtOrTrunc( | |||
| 4041 | SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0)); | |||
| 4042 | auto Elements1 = | |||
| 4043 | cast<VectorType>(SI->getOperand(1)->getType())->getElementCount(); | |||
| 4044 | auto *O1 = B.CreateZExtOrTrunc( | |||
| 4045 | SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1)); | |||
| 4046 | ||||
| 4047 | NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask()); | |||
| 4048 | } else if (isa<LoadInst>(I) || isa<PHINode>(I)) { | |||
| 4049 | // Don't do anything with the operands, just extend the result. | |||
| 4050 | continue; | |||
| 4051 | } else if (auto *IE = dyn_cast<InsertElementInst>(I)) { | |||
| 4052 | auto Elements = | |||
| 4053 | cast<VectorType>(IE->getOperand(0)->getType())->getElementCount(); | |||
| 4054 | auto *O0 = B.CreateZExtOrTrunc( | |||
| 4055 | IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); | |||
| 4056 | auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy); | |||
| 4057 | NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2)); | |||
| 4058 | } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) { | |||
| 4059 | auto Elements = | |||
| 4060 | cast<VectorType>(EE->getOperand(0)->getType())->getElementCount(); | |||
| 4061 | auto *O0 = B.CreateZExtOrTrunc( | |||
| 4062 | EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); | |||
| 4063 | NewI = B.CreateExtractElement(O0, EE->getOperand(2)); | |||
| 4064 | } else { | |||
| 4065 | // If we don't know what to do, be conservative and don't do anything. | |||
| 4066 | continue; | |||
| 4067 | } | |||
| 4068 | ||||
| 4069 | // Lastly, extend the result. | |||
| 4070 | NewI->takeName(cast<Instruction>(I)); | |||
| 4071 | Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy); | |||
| 4072 | I->replaceAllUsesWith(Res); | |||
| 4073 | cast<Instruction>(I)->eraseFromParent(); | |||
| 4074 | Erased.insert(I); | |||
| 4075 | State.reset(Def, Res, Part); | |||
| 4076 | } | |||
| 4077 | } | |||
| 4078 | ||||
| 4079 | // We'll have created a bunch of ZExts that are now parentless. Clean up. | |||
| 4080 | for (const auto &KV : Cost->getMinimalBitwidths()) { | |||
| 4081 | // If the value wasn't vectorized, we must maintain the original scalar | |||
| 4082 | // type. The absence of the value from State indicates that it | |||
| 4083 | // wasn't vectorized. | |||
| 4084 | VPValue *Def = State.Plan->getVPValue(KV.first); | |||
| 4085 | if (!State.hasAnyVectorValue(Def)) | |||
| 4086 | continue; | |||
| 4087 | for (unsigned Part = 0; Part < UF; ++Part) { | |||
| 4088 | Value *I = State.get(Def, Part); | |||
| 4089 | ZExtInst *Inst = dyn_cast<ZExtInst>(I); | |||
| 4090 | if (Inst && Inst->use_empty()) { | |||
| 4091 | Value *NewI = Inst->getOperand(0); | |||
| 4092 | Inst->eraseFromParent(); | |||
| 4093 | State.reset(Def, NewI, Part); | |||
| 4094 | } | |||
| 4095 | } | |||
| 4096 | } | |||
| 4097 | } | |||
| 4098 | ||||
| 4099 | void InnerLoopVectorizer::fixVectorizedLoop(VPTransformState &State) { | |||
| 4100 | // Insert truncates and extends for any truncated instructions as hints to | |||
| 4101 | // InstCombine. | |||
| 4102 | if (VF.isVector()) | |||
| 4103 | truncateToMinimalBitwidths(State); | |||
| 4104 | ||||
| 4105 | // Fix widened non-induction PHIs by setting up the PHI operands. | |||
| 4106 | if (OrigPHIsToFix.size()) { | |||
| 4107 | assert(EnableVPlanNativePath &&((void)0) | |||
| 4108 | "Unexpected non-induction PHIs for fixup in non VPlan-native path")((void)0); | |||
| 4109 | fixNonInductionPHIs(State); | |||
| 4110 | } | |||
| 4111 | ||||
| 4112 | // At this point every instruction in the original loop is widened to a | |||
| 4113 | // vector form. Now we need to fix the recurrences in the loop. These PHI | |||
| 4114 | // nodes are currently empty because we did not want to introduce cycles. | |||
| 4115 | // This is the second stage of vectorizing recurrences. | |||
| 4116 | fixCrossIterationPHIs(State); | |||
| 4117 | ||||
| 4118 | // Forget the original basic block. | |||
| 4119 | PSE.getSE()->forgetLoop(OrigLoop); | |||
| 4120 | ||||
| 4121 | // If we inserted an edge from the middle block to the unique exit block, | |||
| 4122 | // update uses outside the loop (phis) to account for the newly inserted | |||
| 4123 | // edge. | |||
| 4124 | if (!Cost->requiresScalarEpilogue(VF)) { | |||
| 4125 | // Fix-up external users of the induction variables. | |||
| 4126 | for (auto &Entry : Legal->getInductionVars()) | |||
| 4127 | fixupIVUsers(Entry.first, Entry.second, | |||
| 4128 | getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)), | |||
| 4129 | IVEndValues[Entry.first], LoopMiddleBlock); | |||
| 4130 | ||||
| 4131 | fixLCSSAPHIs(State); | |||
| 4132 | } | |||
| 4133 | ||||
| 4134 | for (Instruction *PI : PredicatedInstructions) | |||
| 4135 | sinkScalarOperands(&*PI); | |||
| 4136 | ||||
| 4137 | // Remove redundant induction instructions. | |||
| 4138 | cse(LoopVectorBody); | |||
| 4139 | ||||
| 4140 | // Set/update profile weights for the vector and remainder loops as original | |||
| 4141 | // loop iterations are now distributed among them. Note that original loop | |||
| 4142 | // represented by LoopScalarBody becomes remainder loop after vectorization. | |||
| 4143 | // | |||
| 4144 | // For cases like foldTailByMasking() and requiresScalarEpiloque() we may | |||
| 4145 | // end up getting slightly roughened result but that should be OK since | |||
| 4146 | // profile is not inherently precise anyway. Note also possible bypass of | |||
| 4147 | // vector code caused by legality checks is ignored, assigning all the weight | |||
| 4148 | // to the vector loop, optimistically. | |||
| 4149 | // | |||
| 4150 | // For scalable vectorization we can't know at compile time how many iterations | |||
| 4151 | // of the loop are handled in one vector iteration, so instead assume a pessimistic | |||
| 4152 | // vscale of '1'. | |||
| 4153 | setProfileInfoAfterUnrolling( | |||
| 4154 | LI->getLoopFor(LoopScalarBody), LI->getLoopFor(LoopVectorBody), | |||
| 4155 | LI->getLoopFor(LoopScalarBody), VF.getKnownMinValue() * UF); | |||
| 4156 | } | |||
| 4157 | ||||
| 4158 | void InnerLoopVectorizer::fixCrossIterationPHIs(VPTransformState &State) { | |||
| 4159 | // In order to support recurrences we need to be able to vectorize Phi nodes. | |||
| 4160 | // Phi nodes have cycles, so we need to vectorize them in two stages. This is | |||
| 4161 | // stage #2: We now need to fix the recurrences by adding incoming edges to | |||
| 4162 | // the currently empty PHI nodes. At this point every instruction in the | |||
| 4163 | // original loop is widened to a vector form so we can use them to construct | |||
| 4164 | // the incoming edges. | |||
| 4165 | VPBasicBlock *Header = State.Plan->getEntry()->getEntryBasicBlock(); | |||
| 4166 | for (VPRecipeBase &R : Header->phis()) { | |||
| 4167 | if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R)) | |||
| 4168 | fixReduction(ReductionPhi, State); | |||
| 4169 | else if (auto *FOR = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R)) | |||
| 4170 | fixFirstOrderRecurrence(FOR, State); | |||
| 4171 | } | |||
| 4172 | } | |||
| 4173 | ||||
| 4174 | void InnerLoopVectorizer::fixFirstOrderRecurrence(VPWidenPHIRecipe *PhiR, | |||
| 4175 | VPTransformState &State) { | |||
| 4176 | // This is the second phase of vectorizing first-order recurrences. An | |||
| 4177 | // overview of the transformation is described below. Suppose we have the | |||
| 4178 | // following loop. | |||
| 4179 | // | |||
| 4180 | // for (int i = 0; i < n; ++i) | |||
| 4181 | // b[i] = a[i] - a[i - 1]; | |||
| 4182 | // | |||
| 4183 | // There is a first-order recurrence on "a". For this loop, the shorthand | |||
| 4184 | // scalar IR looks like: | |||
| 4185 | // | |||
| 4186 | // scalar.ph: | |||
| 4187 | // s_init = a[-1] | |||
| 4188 | // br scalar.body | |||
| 4189 | // | |||
| 4190 | // scalar.body: | |||
| 4191 | // i = phi [0, scalar.ph], [i+1, scalar.body] | |||
| 4192 | // s1 = phi [s_init, scalar.ph], [s2, scalar.body] | |||
| 4193 | // s2 = a[i] | |||
| 4194 | // b[i] = s2 - s1 | |||
| 4195 | // br cond, scalar.body, ... | |||
| 4196 | // | |||
| 4197 | // In this example, s1 is a recurrence because it's value depends on the | |||
| 4198 | // previous iteration. In the first phase of vectorization, we created a | |||
| 4199 | // vector phi v1 for s1. We now complete the vectorization and produce the | |||
| 4200 | // shorthand vector IR shown below (for VF = 4, UF = 1). | |||
| 4201 | // | |||
| 4202 | // vector.ph: | |||
| 4203 | // v_init = vector(..., ..., ..., a[-1]) | |||
| 4204 | // br vector.body | |||
| 4205 | // | |||
| 4206 | // vector.body | |||
| 4207 | // i = phi [0, vector.ph], [i+4, vector.body] | |||
| 4208 | // v1 = phi [v_init, vector.ph], [v2, vector.body] | |||
| 4209 | // v2 = a[i, i+1, i+2, i+3]; | |||
| 4210 | // v3 = vector(v1(3), v2(0, 1, 2)) | |||
| 4211 | // b[i, i+1, i+2, i+3] = v2 - v3 | |||
| 4212 | // br cond, vector.body, middle.block | |||
| 4213 | // | |||
| 4214 | // middle.block: | |||
| 4215 | // x = v2(3) | |||
| 4216 | // br scalar.ph | |||
| 4217 | // | |||
| 4218 | // scalar.ph: | |||
| 4219 | // s_init = phi [x, middle.block], [a[-1], otherwise] | |||
| 4220 | // br scalar.body | |||
| 4221 | // | |||
| 4222 | // After execution completes the vector loop, we extract the next value of | |||
| 4223 | // the recurrence (x) to use as the initial value in the scalar loop. | |||
| 4224 | ||||
| 4225 | auto *IdxTy = Builder.getInt32Ty(); | |||
| 4226 | auto *VecPhi = cast<PHINode>(State.get(PhiR, 0)); | |||
| 4227 | ||||
| 4228 | // Fix the latch value of the new recurrence in the vector loop. | |||
| 4229 | VPValue *PreviousDef = PhiR->getBackedgeValue(); | |||
| 4230 | Value *Incoming = State.get(PreviousDef, UF - 1); | |||
| 4231 | VecPhi->addIncoming(Incoming, LI->getLoopFor(LoopVectorBody)->getLoopLatch()); | |||
| 4232 | ||||
| 4233 | // Extract the last vector element in the middle block. This will be the | |||
| 4234 | // initial value for the recurrence when jumping to the scalar loop. | |||
| 4235 | auto *ExtractForScalar = Incoming; | |||
| 4236 | if (VF.isVector()) { | |||
| 4237 | auto *One = ConstantInt::get(IdxTy, 1); | |||
| 4238 | Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); | |||
| 4239 | auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF); | |||
| 4240 | auto *LastIdx = Builder.CreateSub(RuntimeVF, One); | |||
| 4241 | ExtractForScalar = Builder.CreateExtractElement(ExtractForScalar, LastIdx, | |||
| 4242 | "vector.recur.extract"); | |||
| 4243 | } | |||
| 4244 | // Extract the second last element in the middle block if the | |||
| 4245 | // Phi is used outside the loop. We need to extract the phi itself | |||
| 4246 | // and not the last element (the phi update in the current iteration). This | |||
| 4247 | // will be the value when jumping to the exit block from the LoopMiddleBlock, | |||
| 4248 | // when the scalar loop is not run at all. | |||
| 4249 | Value *ExtractForPhiUsedOutsideLoop = nullptr; | |||
| 4250 | if (VF.isVector()) { | |||
| 4251 | auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF); | |||
| 4252 | auto *Idx = Builder.CreateSub(RuntimeVF, ConstantInt::get(IdxTy, 2)); | |||
| 4253 | ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement( | |||
| 4254 | Incoming, Idx, "vector.recur.extract.for.phi"); | |||
| 4255 | } else if (UF > 1) | |||
| 4256 | // When loop is unrolled without vectorizing, initialize | |||
| 4257 | // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value | |||
| 4258 | // of `Incoming`. This is analogous to the vectorized case above: extracting | |||
| 4259 | // the second last element when VF > 1. | |||
| 4260 | ExtractForPhiUsedOutsideLoop = State.get(PreviousDef, UF - 2); | |||
| 4261 | ||||
| 4262 | // Fix the initial value of the original recurrence in the scalar loop. | |||
| 4263 | Builder.SetInsertPoint(&*LoopScalarPreHeader->begin()); | |||
| 4264 | PHINode *Phi = cast<PHINode>(PhiR->getUnderlyingValue()); | |||
| 4265 | auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init"); | |||
| 4266 | auto *ScalarInit = PhiR->getStartValue()->getLiveInIRValue(); | |||
| 4267 | for (auto *BB : predecessors(LoopScalarPreHeader)) { | |||
| 4268 | auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit; | |||
| 4269 | Start->addIncoming(Incoming, BB); | |||
| 4270 | } | |||
| 4271 | ||||
| 4272 | Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start); | |||
| 4273 | Phi->setName("scalar.recur"); | |||
| 4274 | ||||
| 4275 | // Finally, fix users of the recurrence outside the loop. The users will need | |||
| 4276 | // either the last value of the scalar recurrence or the last value of the | |||
| 4277 | // vector recurrence we extracted in the middle block. Since the loop is in | |||
| 4278 | // LCSSA form, we just need to find all the phi nodes for the original scalar | |||
| 4279 | // recurrence in the exit block, and then add an edge for the middle block. | |||
| 4280 | // Note that LCSSA does not imply single entry when the original scalar loop | |||
| 4281 | // had multiple exiting edges (as we always run the last iteration in the | |||
| 4282 | // scalar epilogue); in that case, there is no edge from middle to exit and | |||
| 4283 | // and thus no phis which needed updated. | |||
| 4284 | if (!Cost->requiresScalarEpilogue(VF)) | |||
| 4285 | for (PHINode &LCSSAPhi : LoopExitBlock->phis()) | |||
| 4286 | if (any_of(LCSSAPhi.incoming_values(), | |||
| 4287 | [Phi](Value *V) { return V == Phi; })) | |||
| 4288 | LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock); | |||
| 4289 | } | |||
| 4290 | ||||
| 4291 | void InnerLoopVectorizer::fixReduction(VPReductionPHIRecipe *PhiR, | |||
| 4292 | VPTransformState &State) { | |||
| 4293 | PHINode *OrigPhi = cast<PHINode>(PhiR->getUnderlyingValue()); | |||
| 4294 | // Get it's reduction variable descriptor. | |||
| 4295 | assert(Legal->isReductionVariable(OrigPhi) &&((void)0) | |||
| 4296 | "Unable to find the reduction variable")((void)0); | |||
| 4297 | const RecurrenceDescriptor &RdxDesc = PhiR->getRecurrenceDescriptor(); | |||
| 4298 | ||||
| 4299 | RecurKind RK = RdxDesc.getRecurrenceKind(); | |||
| 4300 | TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue(); | |||
| 4301 | Instruction *LoopExitInst = RdxDesc.getLoopExitInstr(); | |||
| 4302 | setDebugLocFromInst(ReductionStartValue); | |||
| 4303 | ||||
| 4304 | VPValue *LoopExitInstDef = State.Plan->getVPValue(LoopExitInst); | |||
| 4305 | // This is the vector-clone of the value that leaves the loop. | |||
| 4306 | Type *VecTy = State.get(LoopExitInstDef, 0)->getType(); | |||
| 4307 | ||||
| 4308 | // Wrap flags are in general invalid after vectorization, clear them. | |||
| 4309 | clearReductionWrapFlags(RdxDesc, State); | |||
| 4310 | ||||
| 4311 | // Fix the vector-loop phi. | |||
| 4312 | ||||
| 4313 | // Reductions do not have to start at zero. They can start with | |||
| 4314 | // any loop invariant values. | |||
| 4315 | BasicBlock *VectorLoopLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); | |||
| 4316 | ||||
| 4317 | unsigned LastPartForNewPhi = PhiR->isOrdered() ? 1 : UF; | |||
| 4318 | for (unsigned Part = 0; Part < LastPartForNewPhi; ++Part) { | |||
| 4319 | Value *VecRdxPhi = State.get(PhiR->getVPSingleValue(), Part); | |||
| 4320 | Value *Val = State.get(PhiR->getBackedgeValue(), Part); | |||
| 4321 | if (PhiR->isOrdered()) | |||
| 4322 | Val = State.get(PhiR->getBackedgeValue(), UF - 1); | |||
| 4323 | ||||
| 4324 | cast<PHINode>(VecRdxPhi)->addIncoming(Val, VectorLoopLatch); | |||
| 4325 | } | |||
| 4326 | ||||
| 4327 | // Before each round, move the insertion point right between | |||
| 4328 | // the PHIs and the values we are going to write. | |||
| 4329 | // This allows us to write both PHINodes and the extractelement | |||
| 4330 | // instructions. | |||
| 4331 | Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); | |||
| 4332 | ||||
| 4333 | setDebugLocFromInst(LoopExitInst); | |||
| 4334 | ||||
| 4335 | Type *PhiTy = OrigPhi->getType(); | |||
| 4336 | // If tail is folded by masking, the vector value to leave the loop should be | |||
| 4337 | // a Select choosing between the vectorized LoopExitInst and vectorized Phi, | |||
| 4338 | // instead of the former. For an inloop reduction the reduction will already | |||
| 4339 | // be predicated, and does not need to be handled here. | |||
| 4340 | if (Cost->foldTailByMasking() && !PhiR->isInLoop()) { | |||
| 4341 | for (unsigned Part = 0; Part < UF; ++Part) { | |||
| 4342 | Value *VecLoopExitInst = State.get(LoopExitInstDef, Part); | |||
| 4343 | Value *Sel = nullptr; | |||
| 4344 | for (User *U : VecLoopExitInst->users()) { | |||
| 4345 | if (isa<SelectInst>(U)) { | |||
| 4346 | assert(!Sel && "Reduction exit feeding two selects")((void)0); | |||
| 4347 | Sel = U; | |||
| 4348 | } else | |||
| 4349 | assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select")((void)0); | |||
| 4350 | } | |||
| 4351 | assert(Sel && "Reduction exit feeds no select")((void)0); | |||
| 4352 | State.reset(LoopExitInstDef, Sel, Part); | |||
| 4353 | ||||
| 4354 | // If the target can create a predicated operator for the reduction at no | |||
| 4355 | // extra cost in the loop (for example a predicated vadd), it can be | |||
| 4356 | // cheaper for the select to remain in the loop than be sunk out of it, | |||
| 4357 | // and so use the select value for the phi instead of the old | |||
| 4358 | // LoopExitValue. | |||
| 4359 | if (PreferPredicatedReductionSelect || | |||
| 4360 | TTI->preferPredicatedReductionSelect( | |||
| 4361 | RdxDesc.getOpcode(), PhiTy, | |||
| 4362 | TargetTransformInfo::ReductionFlags())) { | |||
| 4363 | auto *VecRdxPhi = | |||
| 4364 | cast<PHINode>(State.get(PhiR->getVPSingleValue(), Part)); | |||
| 4365 | VecRdxPhi->setIncomingValueForBlock( | |||
| 4366 | LI->getLoopFor(LoopVectorBody)->getLoopLatch(), Sel); | |||
| 4367 | } | |||
| 4368 | } | |||
| 4369 | } | |||
| 4370 | ||||
| 4371 | // If the vector reduction can be performed in a smaller type, we truncate | |||
| 4372 | // then extend the loop exit value to enable InstCombine to evaluate the | |||
| 4373 | // entire expression in the smaller type. | |||
| 4374 | if (VF.isVector() && PhiTy != RdxDesc.getRecurrenceType()) { | |||
| 4375 | assert(!PhiR->isInLoop() && "Unexpected truncated inloop reduction!")((void)0); | |||
| 4376 | Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF); | |||
| 4377 | Builder.SetInsertPoint( | |||
| 4378 | LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator()); | |||
| 4379 | VectorParts RdxParts(UF); | |||
| 4380 | for (unsigned Part = 0; Part < UF; ++Part) { | |||
| 4381 | RdxParts[Part] = State.get(LoopExitInstDef, Part); | |||
| 4382 | Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); | |||
| 4383 | Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy) | |||
| 4384 | : Builder.CreateZExt(Trunc, VecTy); | |||
| 4385 | for (Value::user_iterator UI = RdxParts[Part]->user_begin(); | |||
| 4386 | UI != RdxParts[Part]->user_end();) | |||
| 4387 | if (*UI != Trunc) { | |||
| 4388 | (*UI++)->replaceUsesOfWith(RdxParts[Part], Extnd); | |||
| 4389 | RdxParts[Part] = Extnd; | |||
| 4390 | } else { | |||
| 4391 | ++UI; | |||
| 4392 | } | |||
| 4393 | } | |||
| 4394 | Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); | |||
| 4395 | for (unsigned Part = 0; Part < UF; ++Part) { | |||
| 4396 | RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); | |||
| 4397 | State.reset(LoopExitInstDef, RdxParts[Part], Part); | |||
| 4398 | } | |||
| 4399 | } | |||
| 4400 | ||||
| 4401 | // Reduce all of the unrolled parts into a single vector. | |||
| 4402 | Value *ReducedPartRdx = State.get(LoopExitInstDef, 0); | |||
| 4403 | unsigned Op = RecurrenceDescriptor::getOpcode(RK); | |||
| 4404 | ||||
| 4405 | // The middle block terminator has already been assigned a DebugLoc here (the | |||
| 4406 | // OrigLoop's single latch terminator). We want the whole middle block to | |||
| 4407 | // appear to execute on this line because: (a) it is all compiler generated, | |||
| 4408 | // (b) these instructions are always executed after evaluating the latch | |||
| 4409 | // conditional branch, and (c) other passes may add new predecessors which | |||
| 4410 | // terminate on this line. This is the easiest way to ensure we don't | |||
| 4411 | // accidentally cause an extra step back into the loop while debugging. | |||
| 4412 | setDebugLocFromInst(LoopMiddleBlock->getTerminator()); | |||
| 4413 | if (PhiR->isOrdered()) | |||
| 4414 | ReducedPartRdx = State.get(LoopExitInstDef, UF - 1); | |||
| 4415 | else { | |||
| 4416 | // Floating-point operations should have some FMF to enable the reduction. | |||
| 4417 | IRBuilderBase::FastMathFlagGuard FMFG(Builder); | |||
| 4418 | Builder.setFastMathFlags(RdxDesc.getFastMathFlags()); | |||
| 4419 | for (unsigned Part = 1; Part < UF; ++Part) { | |||
| 4420 | Value *RdxPart = State.get(LoopExitInstDef, Part); | |||
| 4421 | if (Op != Instruction::ICmp && Op != Instruction::FCmp) { | |||
| 4422 | ReducedPartRdx = Builder.CreateBinOp( | |||
| 4423 | (Instruction::BinaryOps)Op, RdxPart, ReducedPartRdx, "bin.rdx"); | |||
| 4424 | } else { | |||
| 4425 | ReducedPartRdx = createMinMaxOp(Builder, RK, ReducedPartRdx, RdxPart); | |||
| 4426 | } | |||
| 4427 | } | |||
| 4428 | } | |||
| 4429 | ||||
| 4430 | // Create the reduction after the loop. Note that inloop reductions create the | |||
| 4431 | // target reduction in the loop using a Reduction recipe. | |||
| 4432 | if (VF.isVector() && !PhiR->isInLoop()) { | |||
| 4433 | ReducedPartRdx = | |||
| 4434 | createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx); | |||
| 4435 | // If the reduction can be performed in a smaller type, we need to extend | |||
| 4436 | // the reduction to the wider type before we branch to the original loop. | |||
| 4437 | if (PhiTy != RdxDesc.getRecurrenceType()) | |||
| 4438 | ReducedPartRdx = RdxDesc.isSigned() | |||
| 4439 | ? Builder.CreateSExt(ReducedPartRdx, PhiTy) | |||
| 4440 | : Builder.CreateZExt(ReducedPartRdx, PhiTy); | |||
| 4441 | } | |||
| 4442 | ||||
| 4443 | // Create a phi node that merges control-flow from the backedge-taken check | |||
| 4444 | // block and the middle block. | |||
| 4445 | PHINode *BCBlockPhi = PHINode::Create(PhiTy, 2, "bc.merge.rdx", | |||
| 4446 | LoopScalarPreHeader->getTerminator()); | |||
| 4447 | for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I) | |||
| 4448 | BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]); | |||
| 4449 | BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock); | |||
| 4450 | ||||
| 4451 | // Now, we need to fix the users of the reduction variable | |||
| 4452 | // inside and outside of the scalar remainder loop. | |||
| 4453 | ||||
| 4454 | // We know that the loop is in LCSSA form. We need to update the PHI nodes | |||
| 4455 | // in the exit blocks. See comment on analogous loop in | |||
| 4456 | // fixFirstOrderRecurrence for a more complete explaination of the logic. | |||
| 4457 | if (!Cost->requiresScalarEpilogue(VF)) | |||
| 4458 | for (PHINode &LCSSAPhi : LoopExitBlock->phis()) | |||
| 4459 | if (any_of(LCSSAPhi.incoming_values(), | |||
| 4460 | [LoopExitInst](Value *V) { return V == LoopExitInst; })) | |||
| 4461 | LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock); | |||
| 4462 | ||||
| 4463 | // Fix the scalar loop reduction variable with the incoming reduction sum | |||
| 4464 | // from the vector body and from the backedge value. | |||
| 4465 | int IncomingEdgeBlockIdx = | |||
| 4466 | OrigPhi->getBasicBlockIndex(OrigLoop->getLoopLatch()); | |||
| 4467 | assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index")((void)0); | |||
| 4468 | // Pick the other block. | |||
| 4469 | int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1); | |||
| 4470 | OrigPhi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi); | |||
| 4471 | OrigPhi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst); | |||
| 4472 | } | |||
| 4473 | ||||
| 4474 | void InnerLoopVectorizer::clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc, | |||
| 4475 | VPTransformState &State) { | |||
| 4476 | RecurKind RK = RdxDesc.getRecurrenceKind(); | |||
| 4477 | if (RK != RecurKind::Add && RK != RecurKind::Mul) | |||
| 4478 | return; | |||
| 4479 | ||||
| 4480 | Instruction *LoopExitInstr = RdxDesc.getLoopExitInstr(); | |||
| 4481 | assert(LoopExitInstr && "null loop exit instruction")((void)0); | |||
| 4482 | SmallVector<Instruction *, 8> Worklist; | |||
| 4483 | SmallPtrSet<Instruction *, 8> Visited; | |||
| 4484 | Worklist.push_back(LoopExitInstr); | |||
| 4485 | Visited.insert(LoopExitInstr); | |||
| 4486 | ||||
| 4487 | while (!Worklist.empty()) { | |||
| 4488 | Instruction *Cur = Worklist.pop_back_val(); | |||
| 4489 | if (isa<OverflowingBinaryOperator>(Cur)) | |||
| 4490 | for (unsigned Part = 0; Part < UF; ++Part) { | |||
| 4491 | Value *V = State.get(State.Plan->getVPValue(Cur), Part); | |||
| 4492 | cast<Instruction>(V)->dropPoisonGeneratingFlags(); | |||
| 4493 | } | |||
| 4494 | ||||
| 4495 | for (User *U : Cur->users()) { | |||
| 4496 | Instruction *UI = cast<Instruction>(U); | |||
| 4497 | if ((Cur != LoopExitInstr || OrigLoop->contains(UI->getParent())) && | |||
| 4498 | Visited.insert(UI).second) | |||
| 4499 | Worklist.push_back(UI); | |||
| 4500 | } | |||
| 4501 | } | |||
| 4502 | } | |||
| 4503 | ||||
| 4504 | void InnerLoopVectorizer::fixLCSSAPHIs(VPTransformState &State) { | |||
| 4505 | for (PHINode &LCSSAPhi : LoopExitBlock->phis()) { | |||
| 4506 | if (LCSSAPhi.getBasicBlockIndex(LoopMiddleBlock) != -1) | |||
| 4507 | // Some phis were already hand updated by the reduction and recurrence | |||
| 4508 | // code above, leave them alone. | |||
| 4509 | continue; | |||
| 4510 | ||||
| 4511 | auto *IncomingValue = LCSSAPhi.getIncomingValue(0); | |||
| 4512 | // Non-instruction incoming values will have only one value. | |||
| 4513 | ||||
| 4514 | VPLane Lane = VPLane::getFirstLane(); | |||
| 4515 | if (isa<Instruction>(IncomingValue) && | |||
| 4516 | !Cost->isUniformAfterVectorization(cast<Instruction>(IncomingValue), | |||
| 4517 | VF)) | |||
| 4518 | Lane = VPLane::getLastLaneForVF(VF); | |||
| 4519 | ||||
| 4520 | // Can be a loop invariant incoming value or the last scalar value to be | |||
| 4521 | // extracted from the vectorized loop. | |||
| 4522 | Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); | |||
| 4523 | Value *lastIncomingValue = | |||
| 4524 | OrigLoop->isLoopInvariant(IncomingValue) | |||
| 4525 | ? IncomingValue | |||
| 4526 | : State.get(State.Plan->getVPValue(IncomingValue), | |||
| 4527 | VPIteration(UF - 1, Lane)); | |||
| 4528 | LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock); | |||
| 4529 | } | |||
| 4530 | } | |||
| 4531 | ||||
| 4532 | void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) { | |||
| 4533 | // The basic block and loop containing the predicated instruction. | |||
| 4534 | auto *PredBB = PredInst->getParent(); | |||
| 4535 | auto *VectorLoop = LI->getLoopFor(PredBB); | |||
| 4536 | ||||
| 4537 | // Initialize a worklist with the operands of the predicated instruction. | |||
| 4538 | SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end()); | |||
| 4539 | ||||
| 4540 | // Holds instructions that we need to analyze again. An instruction may be | |||
| 4541 | // reanalyzed if we don't yet know if we can sink it or not. | |||
| 4542 | SmallVector<Instruction *, 8> InstsToReanalyze; | |||
| 4543 | ||||
| 4544 | // Returns true if a given use occurs in the predicated block. Phi nodes use | |||
| 4545 | // their operands in their corresponding predecessor blocks. | |||
| 4546 | auto isBlockOfUsePredicated = [&](Use &U) -> bool { | |||
| 4547 | auto *I = cast<Instruction>(U.getUser()); | |||
| 4548 | BasicBlock *BB = I->getParent(); | |||
| 4549 | if (auto *Phi = dyn_cast<PHINode>(I)) | |||
| 4550 | BB = Phi->getIncomingBlock( | |||
| 4551 | PHINode::getIncomingValueNumForOperand(U.getOperandNo())); | |||
| 4552 | return BB == PredBB; | |||
| 4553 | }; | |||
| 4554 | ||||
| 4555 | // Iteratively sink the scalarized operands of the predicated instruction | |||
| 4556 | // into the block we created for it. When an instruction is sunk, it's | |||
| 4557 | // operands are then added to the worklist. The algorithm ends after one pass | |||
| 4558 | // through the worklist doesn't sink a single instruction. | |||
| 4559 | bool Changed; | |||
| 4560 | do { | |||
| 4561 | // Add the instructions that need to be reanalyzed to the worklist, and | |||
| 4562 | // reset the changed indicator. | |||
| 4563 | Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end()); | |||
| 4564 | InstsToReanalyze.clear(); | |||
| 4565 | Changed = false; | |||
| 4566 | ||||
| 4567 | while (!Worklist.empty()) { | |||
| 4568 | auto *I = dyn_cast<Instruction>(Worklist.pop_back_val()); | |||
| 4569 | ||||
| 4570 | // We can't sink an instruction if it is a phi node, is not in the loop, | |||
| 4571 | // or may have side effects. | |||
| 4572 | if (!I || isa<PHINode>(I) || !VectorLoop->contains(I) || | |||
| 4573 | I->mayHaveSideEffects()) | |||
| 4574 | continue; | |||
| 4575 | ||||
| 4576 | // If the instruction is already in PredBB, check if we can sink its | |||
| 4577 | // operands. In that case, VPlan's sinkScalarOperands() succeeded in | |||
| 4578 | // sinking the scalar instruction I, hence it appears in PredBB; but it | |||
| 4579 | // may have failed to sink I's operands (recursively), which we try | |||
| 4580 | // (again) here. | |||
| 4581 | if (I->getParent() == PredBB) { | |||
| 4582 | Worklist.insert(I->op_begin(), I->op_end()); | |||
| 4583 | continue; | |||
| 4584 | } | |||
| 4585 | ||||
| 4586 | // It's legal to sink the instruction if all its uses occur in the | |||
| 4587 | // predicated block. Otherwise, there's nothing to do yet, and we may | |||
| 4588 | // need to reanalyze the instruction. | |||
| 4589 | if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) { | |||
| 4590 | InstsToReanalyze.push_back(I); | |||
| 4591 | continue; | |||
| 4592 | } | |||
| 4593 | ||||
| 4594 | // Move the instruction to the beginning of the predicated block, and add | |||
| 4595 | // it's operands to the worklist. | |||
| 4596 | I->moveBefore(&*PredBB->getFirstInsertionPt()); | |||
| 4597 | Worklist.insert(I->op_begin(), I->op_end()); | |||
| 4598 | ||||
| 4599 | // The sinking may have enabled other instructions to be sunk, so we will | |||
| 4600 | // need to iterate. | |||
| 4601 | Changed = true; | |||
| 4602 | } | |||
| 4603 | } while (Changed); | |||
| 4604 | } | |||
| 4605 | ||||
| 4606 | void InnerLoopVectorizer::fixNonInductionPHIs(VPTransformState &State) { | |||
| 4607 | for (PHINode *OrigPhi : OrigPHIsToFix) { | |||
| 4608 | VPWidenPHIRecipe *VPPhi = | |||
| 4609 | cast<VPWidenPHIRecipe>(State.Plan->getVPValue(OrigPhi)); | |||
| 4610 | PHINode *NewPhi = cast<PHINode>(State.get(VPPhi, 0)); | |||
| 4611 | // Make sure the builder has a valid insert point. | |||
| 4612 | Builder.SetInsertPoint(NewPhi); | |||
| 4613 | for (unsigned i = 0; i < VPPhi->getNumOperands(); ++i) { | |||
| 4614 | VPValue *Inc = VPPhi->getIncomingValue(i); | |||
| 4615 | VPBasicBlock *VPBB = VPPhi->getIncomingBlock(i); | |||
| 4616 | NewPhi->addIncoming(State.get(Inc, 0), State.CFG.VPBB2IRBB[VPBB]); | |||
| 4617 | } | |||
| 4618 | } | |||
| 4619 | } | |||
| 4620 | ||||
| 4621 | bool InnerLoopVectorizer::useOrderedReductions(RecurrenceDescriptor &RdxDesc) { | |||
| 4622 | return Cost->useOrderedReductions(RdxDesc); | |||
| 4623 | } | |||
| 4624 | ||||
| 4625 | void InnerLoopVectorizer::widenGEP(GetElementPtrInst *GEP, VPValue *VPDef, | |||
| 4626 | VPUser &Operands, unsigned UF, | |||
| 4627 | ElementCount VF, bool IsPtrLoopInvariant, | |||
| 4628 | SmallBitVector &IsIndexLoopInvariant, | |||
| 4629 | VPTransformState &State) { | |||
| 4630 | // Construct a vector GEP by widening the operands of the scalar GEP as | |||
| 4631 | // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP | |||
| 4632 | // results in a vector of pointers when at least one operand of the GEP | |||
| 4633 | // is vector-typed. Thus, to keep the representation compact, we only use | |||
| 4634 | // vector-typed operands for loop-varying values. | |||
| 4635 | ||||
| 4636 | if (VF.isVector() && IsPtrLoopInvariant && IsIndexLoopInvariant.all()) { | |||
| 4637 | // If we are vectorizing, but the GEP has only loop-invariant operands, | |||
| 4638 | // the GEP we build (by only using vector-typed operands for | |||
| 4639 | // loop-varying values) would be a scalar pointer. Thus, to ensure we | |||
| 4640 | // produce a vector of pointers, we need to either arbitrarily pick an | |||
| 4641 | // operand to broadcast, or broadcast a clone of the original GEP. | |||
| 4642 | // Here, we broadcast a clone of the original. | |||
| 4643 | // | |||
| 4644 | // TODO: If at some point we decide to scalarize instructions having | |||
| 4645 | // loop-invariant operands, this special case will no longer be | |||
| 4646 | // required. We would add the scalarization decision to | |||
| 4647 | // collectLoopScalars() and teach getVectorValue() to broadcast | |||
| 4648 | // the lane-zero scalar value. | |||
| 4649 | auto *Clone = Builder.Insert(GEP->clone()); | |||
| 4650 | for (unsigned Part = 0; Part < UF; ++Part) { | |||
| 4651 | Value *EntryPart = Builder.CreateVectorSplat(VF, Clone); | |||
| 4652 | State.set(VPDef, EntryPart, Part); | |||
| 4653 | addMetadata(EntryPart, GEP); | |||
| 4654 | } | |||
| 4655 | } else { | |||
| 4656 | // If the GEP has at least one loop-varying operand, we are sure to | |||
| 4657 | // produce a vector of pointers. But if we are only unrolling, we want | |||
| 4658 | // to produce a scalar GEP for each unroll part. Thus, the GEP we | |||
| 4659 | // produce with the code below will be scalar (if VF == 1) or vector | |||
| 4660 | // (otherwise). Note that for the unroll-only case, we still maintain | |||
| 4661 | // values in the vector mapping with initVector, as we do for other | |||
| 4662 | // instructions. | |||
| 4663 | for (unsigned Part = 0; Part < UF; ++Part) { | |||
| 4664 | // The pointer operand of the new GEP. If it's loop-invariant, we | |||
| 4665 | // won't broadcast it. | |||
| 4666 | auto *Ptr = IsPtrLoopInvariant | |||
| 4667 | ? State.get(Operands.getOperand(0), VPIteration(0, 0)) | |||
| 4668 | : State.get(Operands.getOperand(0), Part); | |||
| 4669 | ||||
| 4670 | // Collect all the indices for the new GEP. If any index is | |||
| 4671 | // loop-invariant, we won't broadcast it. | |||
| 4672 | SmallVector<Value *, 4> Indices; | |||
| 4673 | for (unsigned I = 1, E = Operands.getNumOperands(); I < E; I++) { | |||
| 4674 | VPValue *Operand = Operands.getOperand(I); | |||
| 4675 | if (IsIndexLoopInvariant[I - 1]) | |||
| 4676 | Indices.push_back(State.get(Operand, VPIteration(0, 0))); | |||
| 4677 | else | |||
| 4678 | Indices.push_back(State.get(Operand, Part)); | |||
| 4679 | } | |||
| 4680 | ||||
| 4681 | // Create the new GEP. Note that this GEP may be a scalar if VF == 1, | |||
| 4682 | // but it should be a vector, otherwise. | |||
| 4683 | auto *NewGEP = | |||
| 4684 | GEP->isInBounds() | |||
| 4685 | ? Builder.CreateInBoundsGEP(GEP->getSourceElementType(), Ptr, | |||
| 4686 | Indices) | |||
| 4687 | : Builder.CreateGEP(GEP->getSourceElementType(), Ptr, Indices); | |||
| 4688 | assert((VF.isScalar() || NewGEP->getType()->isVectorTy()) &&((void)0) | |||
| 4689 | "NewGEP is not a pointer vector")((void)0); | |||
| 4690 | State.set(VPDef, NewGEP, Part); | |||
| 4691 | addMetadata(NewGEP, GEP); | |||
| 4692 | } | |||
| 4693 | } | |||
| 4694 | } | |||
| 4695 | ||||
| 4696 | void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, | |||
| 4697 | VPWidenPHIRecipe *PhiR, | |||
| 4698 | VPTransformState &State) { | |||
| 4699 | PHINode *P = cast<PHINode>(PN); | |||
| 4700 | if (EnableVPlanNativePath) { | |||
| 4701 | // Currently we enter here in the VPlan-native path for non-induction | |||
| 4702 | // PHIs where all control flow is uniform. We simply widen these PHIs. | |||
| 4703 | // Create a vector phi with no operands - the vector phi operands will be | |||
| 4704 | // set at the end of vector code generation. | |||
| 4705 | Type *VecTy = (State.VF.isScalar()) | |||
| 4706 | ? PN->getType() | |||
| 4707 | : VectorType::get(PN->getType(), State.VF); | |||
| 4708 | Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi"); | |||
| 4709 | State.set(PhiR, VecPhi, 0); | |||
| 4710 | OrigPHIsToFix.push_back(P); | |||
| 4711 | ||||
| 4712 | return; | |||
| 4713 | } | |||
| 4714 | ||||
| 4715 | assert(PN->getParent() == OrigLoop->getHeader() &&((void)0) | |||
| 4716 | "Non-header phis should have been handled elsewhere")((void)0); | |||
| 4717 | ||||
| 4718 | // In order to support recurrences we need to be able to vectorize Phi nodes. | |||
| 4719 | // Phi nodes have cycles, so we need to vectorize them in two stages. This is | |||
| 4720 | // stage #1: We create a new vector PHI node with no incoming edges. We'll use | |||
| 4721 | // this value when we vectorize all of the instructions that use the PHI. | |||
| 4722 | ||||
| 4723 | assert(!Legal->isReductionVariable(P) &&((void)0) | |||
| 4724 | "reductions should be handled elsewhere")((void)0); | |||
| 4725 | ||||
| 4726 | setDebugLocFromInst(P); | |||
| 4727 | ||||
| 4728 | // This PHINode must be an induction variable. | |||
| 4729 | // Make sure that we know about it. | |||
| 4730 | assert(Legal->getInductionVars().count(P) && "Not an induction variable")((void)0); | |||
| 4731 | ||||
| 4732 | InductionDescriptor II = Legal->getInductionVars().lookup(P); | |||
| 4733 | const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); | |||
| 4734 | ||||
| 4735 | // FIXME: The newly created binary instructions should contain nsw/nuw flags, | |||
| 4736 | // which can be found from the original scalar operations. | |||
| 4737 | switch (II.getKind()) { | |||
| 4738 | case InductionDescriptor::IK_NoInduction: | |||
| 4739 | llvm_unreachable("Unknown induction")__builtin_unreachable(); | |||
| 4740 | case InductionDescriptor::IK_IntInduction: | |||
| 4741 | case InductionDescriptor::IK_FpInduction: | |||
| 4742 | llvm_unreachable("Integer/fp induction is handled elsewhere.")__builtin_unreachable(); | |||
| 4743 | case InductionDescriptor::IK_PtrInduction: { | |||
| 4744 | // Handle the pointer induction variable case. | |||
| 4745 | assert(P->getType()->isPointerTy() && "Unexpected type.")((void)0); | |||
| 4746 | ||||
| 4747 | if (Cost->isScalarAfterVectorization(P, State.VF)) { | |||
| 4748 | // This is the normalized GEP that starts counting at zero. | |||
| 4749 | Value *PtrInd = | |||
| 4750 | Builder.CreateSExtOrTrunc(Induction, II.getStep()->getType()); | |||
| 4751 | // Determine the number of scalars we need to generate for each unroll | |||
| 4752 | // iteration. If the instruction is uniform, we only need to generate the | |||
| 4753 | // first lane. Otherwise, we generate all VF values. | |||
| 4754 | bool IsUniform = Cost->isUniformAfterVectorization(P, State.VF); | |||
| 4755 | unsigned Lanes = IsUniform ? 1 : State.VF.getKnownMinValue(); | |||
| 4756 | ||||
| 4757 | bool NeedsVectorIndex = !IsUniform && VF.isScalable(); | |||
| 4758 | Value *UnitStepVec = nullptr, *PtrIndSplat = nullptr; | |||
| 4759 | if (NeedsVectorIndex) { | |||
| 4760 | Type *VecIVTy = VectorType::get(PtrInd->getType(), VF); | |||
| 4761 | UnitStepVec = Builder.CreateStepVector(VecIVTy); | |||
| 4762 | PtrIndSplat = Builder.CreateVectorSplat(VF, PtrInd); | |||
| 4763 | } | |||
| 4764 | ||||
| 4765 | for (unsigned Part = 0; Part < UF; ++Part) { | |||
| 4766 | Value *PartStart = createStepForVF( | |||
| 4767 | Builder, ConstantInt::get(PtrInd->getType(), Part), VF); | |||
| 4768 | ||||
| 4769 | if (NeedsVectorIndex) { | |||
| 4770 | Value *PartStartSplat = Builder.CreateVectorSplat(VF, PartStart); | |||
| 4771 | Value *Indices = Builder.CreateAdd(PartStartSplat, UnitStepVec); | |||
| 4772 | Value *GlobalIndices = Builder.CreateAdd(PtrIndSplat, Indices); | |||
| 4773 | Value *SclrGep = | |||
| 4774 | emitTransformedIndex(Builder, GlobalIndices, PSE.getSE(), DL, II); | |||
| 4775 | SclrGep->setName("next.gep"); | |||
| 4776 | State.set(PhiR, SclrGep, Part); | |||
| 4777 | // We've cached the whole vector, which means we can support the | |||
| 4778 | // extraction of any lane. | |||
| 4779 | continue; | |||
| 4780 | } | |||
| 4781 | ||||
| 4782 | for (unsigned Lane = 0; Lane < Lanes; ++Lane) { | |||
| 4783 | Value *Idx = Builder.CreateAdd( | |||
| 4784 | PartStart, ConstantInt::get(PtrInd->getType(), Lane)); | |||
| 4785 | Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx); | |||
| 4786 | Value *SclrGep = | |||
| 4787 | emitTransformedIndex(Builder, GlobalIdx, PSE.getSE(), DL, II); | |||
| 4788 | SclrGep->setName("next.gep"); | |||
| 4789 | State.set(PhiR, SclrGep, VPIteration(Part, Lane)); | |||
| 4790 | } | |||
| 4791 | } | |||
| 4792 | return; | |||
| 4793 | } | |||
| 4794 | assert(isa<SCEVConstant>(II.getStep()) &&((void)0) | |||
| 4795 | "Induction step not a SCEV constant!")((void)0); | |||
| 4796 | Type *PhiType = II.getStep()->getType(); | |||
| 4797 | ||||
| 4798 | // Build a pointer phi | |||
| 4799 | Value *ScalarStartValue = II.getStartValue(); | |||
| 4800 | Type *ScStValueType = ScalarStartValue->getType(); | |||
| 4801 | PHINode *NewPointerPhi = | |||
| 4802 | PHINode::Create(ScStValueType, 2, "pointer.phi", Induction); | |||
| 4803 | NewPointerPhi->addIncoming(ScalarStartValue, LoopVectorPreHeader); | |||
| 4804 | ||||
| 4805 | // A pointer induction, performed by using a gep | |||
| 4806 | BasicBlock *LoopLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); | |||
| 4807 | Instruction *InductionLoc = LoopLatch->getTerminator(); | |||
| 4808 | const SCEV *ScalarStep = II.getStep(); | |||
| 4809 | SCEVExpander Exp(*PSE.getSE(), DL, "induction"); | |||
| 4810 | Value *ScalarStepValue = | |||
| 4811 | Exp.expandCodeFor(ScalarStep, PhiType, InductionLoc); | |||
| 4812 | Value *RuntimeVF = getRuntimeVF(Builder, PhiType, VF); | |||
| 4813 | Value *NumUnrolledElems = | |||
| 4814 | Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, State.UF)); | |||
| 4815 | Value *InductionGEP = GetElementPtrInst::Create( | |||
| 4816 | ScStValueType->getPointerElementType(), NewPointerPhi, | |||
| 4817 | Builder.CreateMul(ScalarStepValue, NumUnrolledElems), "ptr.ind", | |||
| 4818 | InductionLoc); | |||
| 4819 | NewPointerPhi->addIncoming(InductionGEP, LoopLatch); | |||
| 4820 | ||||
| 4821 | // Create UF many actual address geps that use the pointer | |||
| 4822 | // phi as base and a vectorized version of the step value | |||
| 4823 | // (<step*0, ..., step*N>) as offset. | |||
| 4824 | for (unsigned Part = 0; Part < State.UF; ++Part) { | |||
| 4825 | Type *VecPhiType = VectorType::get(PhiType, State.VF); | |||
| 4826 | Value *StartOffsetScalar = | |||
| 4827 | Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, Part)); | |||
| 4828 | Value *StartOffset = | |||
| 4829 | Builder.CreateVectorSplat(State.VF, StartOffsetScalar); | |||
| 4830 | // Create a vector of consecutive numbers from zero to VF. | |||
| 4831 | StartOffset = | |||
| 4832 | Builder.CreateAdd(StartOffset, Builder.CreateStepVector(VecPhiType)); | |||
| 4833 | ||||
| 4834 | Value *GEP = Builder.CreateGEP( | |||
| 4835 | ScStValueType->getPointerElementType(), NewPointerPhi, | |||
| 4836 | Builder.CreateMul( | |||
| 4837 | StartOffset, Builder.CreateVectorSplat(State.VF, ScalarStepValue), | |||
| 4838 | "vector.gep")); | |||
| 4839 | State.set(PhiR, GEP, Part); | |||
| 4840 | } | |||
| 4841 | } | |||
| 4842 | } | |||
| 4843 | } | |||
| 4844 | ||||
| 4845 | /// A helper function for checking whether an integer division-related | |||
| 4846 | /// instruction may divide by zero (in which case it must be predicated if | |||
| 4847 | /// executed conditionally in the scalar code). | |||
| 4848 | /// TODO: It may be worthwhile to generalize and check isKnownNonZero(). | |||
| 4849 | /// Non-zero divisors that are non compile-time constants will not be | |||
| 4850 | /// converted into multiplication, so we will still end up scalarizing | |||
| 4851 | /// the division, but can do so w/o predication. | |||
| 4852 | static bool mayDivideByZero(Instruction &I) { | |||
| 4853 | assert((I.getOpcode() == Instruction::UDiv ||((void)0) | |||
| 4854 | I.getOpcode() == Instruction::SDiv ||((void)0) | |||
| 4855 | I.getOpcode() == Instruction::URem ||((void)0) | |||
| 4856 | I.getOpcode() == Instruction::SRem) &&((void)0) | |||
| 4857 | "Unexpected instruction")((void)0); | |||
| 4858 | Value *Divisor = I.getOperand(1); | |||
| 4859 | auto *CInt = dyn_cast<ConstantInt>(Divisor); | |||
| 4860 | return !CInt || CInt->isZero(); | |||
| 4861 | } | |||
| 4862 | ||||
| 4863 | void InnerLoopVectorizer::widenInstruction(Instruction &I, VPValue *Def, | |||
| 4864 | VPUser &User, | |||
| 4865 | VPTransformState &State) { | |||
| 4866 | switch (I.getOpcode()) { | |||
| 4867 | case Instruction::Call: | |||
| 4868 | case Instruction::Br: | |||
| 4869 | case Instruction::PHI: | |||
| 4870 | case Instruction::GetElementPtr: | |||
| 4871 | case Instruction::Select: | |||
| 4872 | llvm_unreachable("This instruction is handled by a different recipe.")__builtin_unreachable(); | |||
| 4873 | case Instruction::UDiv: | |||
| 4874 | case Instruction::SDiv: | |||
| 4875 | case Instruction::SRem: | |||
| 4876 | case Instruction::URem: | |||
| 4877 | case Instruction::Add: | |||
| 4878 | case Instruction::FAdd: | |||
| 4879 | case Instruction::Sub: | |||
| 4880 | case Instruction::FSub: | |||
| 4881 | case Instruction::FNeg: | |||
| 4882 | case Instruction::Mul: | |||
| 4883 | case Instruction::FMul: | |||
| 4884 | case Instruction::FDiv: | |||
| 4885 | case Instruction::FRem: | |||
| 4886 | case Instruction::Shl: | |||
| 4887 | case Instruction::LShr: | |||
| 4888 | case Instruction::AShr: | |||
| 4889 | case Instruction::And: | |||
| 4890 | case Instruction::Or: | |||
| 4891 | case Instruction::Xor: { | |||
| 4892 | // Just widen unops and binops. | |||
| 4893 | setDebugLocFromInst(&I); | |||
| 4894 | ||||
| 4895 | for (unsigned Part = 0; Part < UF; ++Part) { | |||
| 4896 | SmallVector<Value *, 2> Ops; | |||
| 4897 | for (VPValue *VPOp : User.operands()) | |||
| 4898 | Ops.push_back(State.get(VPOp, Part)); | |||
| 4899 | ||||
| 4900 | Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops); | |||
| 4901 | ||||
| 4902 | if (auto *VecOp = dyn_cast<Instruction>(V)) | |||
| 4903 | VecOp->copyIRFlags(&I); | |||
| 4904 | ||||
| 4905 | // Use this vector value for all users of the original instruction. | |||
| 4906 | State.set(Def, V, Part); | |||
| 4907 | addMetadata(V, &I); | |||
| 4908 | } | |||
| 4909 | ||||
| 4910 | break; | |||
| 4911 | } | |||
| 4912 | case Instruction::ICmp: | |||
| 4913 | case Instruction::FCmp: { | |||
| 4914 | // Widen compares. Generate vector compares. | |||
| 4915 | bool FCmp = (I.getOpcode() == Instruction::FCmp); | |||
| 4916 | auto *Cmp = cast<CmpInst>(&I); | |||
| 4917 | setDebugLocFromInst(Cmp); | |||
| 4918 | for (unsigned Part = 0; Part < UF; ++Part) { | |||
| 4919 | Value *A = State.get(User.getOperand(0), Part); | |||
| 4920 | Value *B = State.get(User.getOperand(1), Part); | |||
| 4921 | Value *C = nullptr; | |||
| 4922 | if (FCmp) { | |||
| 4923 | // Propagate fast math flags. | |||
| 4924 | IRBuilder<>::FastMathFlagGuard FMFG(Builder); | |||
| 4925 | Builder.setFastMathFlags(Cmp->getFastMathFlags()); | |||
| 4926 | C = Builder.CreateFCmp(Cmp->getPredicate(), A, B); | |||
| 4927 | } else { | |||
| 4928 | C = Builder.CreateICmp(Cmp->getPredicate(), A, B); | |||
| 4929 | } | |||
| 4930 | State.set(Def, C, Part); | |||
| 4931 | addMetadata(C, &I); | |||
| 4932 | } | |||
| 4933 | ||||
| 4934 | break; | |||
| 4935 | } | |||
| 4936 | ||||
| 4937 | case Instruction::ZExt: | |||
| 4938 | case Instruction::SExt: | |||
| 4939 | case Instruction::FPToUI: | |||
| 4940 | case Instruction::FPToSI: | |||
| 4941 | case Instruction::FPExt: | |||
| 4942 | case Instruction::PtrToInt: | |||
| 4943 | case Instruction::IntToPtr: | |||
| 4944 | case Instruction::SIToFP: | |||
| 4945 | case Instruction::UIToFP: | |||
| 4946 | case Instruction::Trunc: | |||
| 4947 | case Instruction::FPTrunc: | |||
| 4948 | case Instruction::BitCast: { | |||
| 4949 | auto *CI = cast<CastInst>(&I); | |||
| 4950 | setDebugLocFromInst(CI); | |||
| 4951 | ||||
| 4952 | /// Vectorize casts. | |||
| 4953 | Type *DestTy = | |||
| 4954 | (VF.isScalar()) ? CI->getType() : VectorType::get(CI->getType(), VF); | |||
| 4955 | ||||
| 4956 | for (unsigned Part = 0; Part < UF; ++Part) { | |||
| 4957 | Value *A = State.get(User.getOperand(0), Part); | |||
| 4958 | Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy); | |||
| 4959 | State.set(Def, Cast, Part); | |||
| 4960 | addMetadata(Cast, &I); | |||
| 4961 | } | |||
| 4962 | break; | |||
| 4963 | } | |||
| 4964 | default: | |||
| 4965 | // This instruction is not vectorized by simple widening. | |||
| 4966 | LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I)do { } while (false); | |||
| 4967 | llvm_unreachable("Unhandled instruction!")__builtin_unreachable(); | |||
| 4968 | } // end of switch. | |||
| 4969 | } | |||
| 4970 | ||||
| 4971 | void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPValue *Def, | |||
| 4972 | VPUser &ArgOperands, | |||
| 4973 | VPTransformState &State) { | |||
| 4974 | assert(!isa<DbgInfoIntrinsic>(I) &&((void)0) | |||
| 4975 | "DbgInfoIntrinsic should have been dropped during VPlan construction")((void)0); | |||
| 4976 | setDebugLocFromInst(&I); | |||
| 4977 | ||||
| 4978 | Module *M = I.getParent()->getParent()->getParent(); | |||
| 4979 | auto *CI = cast<CallInst>(&I); | |||
| 4980 | ||||
| 4981 | SmallVector<Type *, 4> Tys; | |||
| 4982 | for (Value *ArgOperand : CI->arg_operands()) | |||
| 4983 | Tys.push_back(ToVectorTy(ArgOperand->getType(), VF.getKnownMinValue())); | |||
| 4984 | ||||
| 4985 | Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); | |||
| 4986 | ||||
| 4987 | // The flag shows whether we use Intrinsic or a usual Call for vectorized | |||
| 4988 | // version of the instruction. | |||
| 4989 | // Is it beneficial to perform intrinsic call compared to lib call? | |||
| 4990 | bool NeedToScalarize = false; | |||
| 4991 | InstructionCost CallCost = Cost->getVectorCallCost(CI, VF, NeedToScalarize); | |||
| 4992 | InstructionCost IntrinsicCost = ID ? Cost->getVectorIntrinsicCost(CI, VF) : 0; | |||
| 4993 | bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost; | |||
| 4994 | assert((UseVectorIntrinsic || !NeedToScalarize) &&((void)0) | |||
| 4995 | "Instruction should be scalarized elsewhere.")((void)0); | |||
| 4996 | assert((IntrinsicCost.isValid() || CallCost.isValid()) &&((void)0) | |||
| 4997 | "Either the intrinsic cost or vector call cost must be valid")((void)0); | |||
| 4998 | ||||
| 4999 | for (unsigned Part = 0; Part < UF; ++Part) { | |||
| 5000 | SmallVector<Type *, 2> TysForDecl = {CI->getType()}; | |||
| 5001 | SmallVector<Value *, 4> Args; | |||
| 5002 | for (auto &I : enumerate(ArgOperands.operands())) { | |||
| 5003 | // Some intrinsics have a scalar argument - don't replace it with a | |||
| 5004 | // vector. | |||
| 5005 | Value *Arg; | |||
| 5006 | if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, I.index())) | |||
| 5007 | Arg = State.get(I.value(), Part); | |||
| 5008 | else { | |||
| 5009 | Arg = State.get(I.value(), VPIteration(0, 0)); | |||
| 5010 | if (hasVectorInstrinsicOverloadedScalarOpd(ID, I.index())) | |||
| 5011 | TysForDecl.push_back(Arg->getType()); | |||
| 5012 | } | |||
| 5013 | Args.push_back(Arg); | |||
| 5014 | } | |||
| 5015 | ||||
| 5016 | Function *VectorF; | |||
| 5017 | if (UseVectorIntrinsic) { | |||
| 5018 | // Use vector version of the intrinsic. | |||
| 5019 | if (VF.isVector()) | |||
| 5020 | TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF); | |||
| 5021 | VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl); | |||
| 5022 | assert(VectorF && "Can't retrieve vector intrinsic.")((void)0); | |||
| 5023 | } else { | |||
| 5024 | // Use vector version of the function call. | |||
| 5025 | const VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/); | |||
| 5026 | #ifndef NDEBUG1 | |||
| 5027 | assert(VFDatabase(*CI).getVectorizedFunction(Shape) != nullptr &&((void)0) | |||
| 5028 | "Can't create vector function.")((void)0); | |||
| 5029 | #endif | |||
| 5030 | VectorF = VFDatabase(*CI).getVectorizedFunction(Shape); | |||
| 5031 | } | |||
| 5032 | SmallVector<OperandBundleDef, 1> OpBundles; | |||
| 5033 | CI->getOperandBundlesAsDefs(OpBundles); | |||
| 5034 | CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles); | |||
| 5035 | ||||
| 5036 | if (isa<FPMathOperator>(V)) | |||
| 5037 | V->copyFastMathFlags(CI); | |||
| 5038 | ||||
| 5039 | State.set(Def, V, Part); | |||
| 5040 | addMetadata(V, &I); | |||
| 5041 | } | |||
| 5042 | } | |||
| 5043 | ||||
| 5044 | void InnerLoopVectorizer::widenSelectInstruction(SelectInst &I, VPValue *VPDef, | |||
| 5045 | VPUser &Operands, | |||
| 5046 | bool InvariantCond, | |||
| 5047 | VPTransformState &State) { | |||
| 5048 | setDebugLocFromInst(&I); | |||
| 5049 | ||||
| 5050 | // The condition can be loop invariant but still defined inside the | |||
| 5051 | // loop. This means that we can't just use the original 'cond' value. | |||
| 5052 | // We have to take the 'vectorized' value and pick the first lane. | |||
| 5053 | // Instcombine will make this a no-op. | |||
| 5054 | auto *InvarCond = InvariantCond | |||
| 5055 | ? State.get(Operands.getOperand(0), VPIteration(0, 0)) | |||
| 5056 | : nullptr; | |||
| 5057 | ||||
| 5058 | for (unsigned Part = 0; Part < UF; ++Part) { | |||
| 5059 | Value *Cond = | |||
| 5060 | InvarCond ? InvarCond : State.get(Operands.getOperand(0), Part); | |||
| 5061 | Value *Op0 = State.get(Operands.getOperand(1), Part); | |||
| 5062 | Value *Op1 = State.get(Operands.getOperand(2), Part); | |||
| 5063 | Value *Sel = Builder.CreateSelect(Cond, Op0, Op1); | |||
| 5064 | State.set(VPDef, Sel, Part); | |||
| 5065 | addMetadata(Sel, &I); | |||
| 5066 | } | |||
| 5067 | } | |||
| 5068 | ||||
| 5069 | void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) { | |||
| 5070 | // We should not collect Scalars more than once per VF. Right now, this | |||
| 5071 | // function is called from collectUniformsAndScalars(), which already does | |||
| 5072 | // this check. Collecting Scalars for VF=1 does not make any sense. | |||
| 5073 | assert(VF.isVector() && Scalars.find(VF) == Scalars.end() &&((void)0) | |||
| 5074 | "This function should not be visited twice for the same VF")((void)0); | |||
| 5075 | ||||
| 5076 | SmallSetVector<Instruction *, 8> Worklist; | |||
| 5077 | ||||
| 5078 | // These sets are used to seed the analysis with pointers used by memory | |||
| 5079 | // accesses that will remain scalar. | |||
| 5080 | SmallSetVector<Instruction *, 8> ScalarPtrs; | |||
| 5081 | SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs; | |||
| 5082 | auto *Latch = TheLoop->getLoopLatch(); | |||
| 5083 | ||||
| 5084 | // A helper that returns true if the use of Ptr by MemAccess will be scalar. | |||
| 5085 | // The pointer operands of loads and stores will be scalar as long as the | |||
| 5086 | // memory access is not a gather or scatter operation. The value operand of a | |||
| 5087 | // store will remain scalar if the store is scalarized. | |||
| 5088 | auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) { | |||
| 5089 | InstWidening WideningDecision = getWideningDecision(MemAccess, VF); | |||
| 5090 | assert(WideningDecision != CM_Unknown &&((void)0) | |||
| 5091 | "Widening decision should be ready at this moment")((void)0); | |||
| 5092 | if (auto *Store = dyn_cast<StoreInst>(MemAccess)) | |||
| 5093 | if (Ptr == Store->getValueOperand()) | |||
| 5094 | return WideningDecision == CM_Scalarize; | |||
| 5095 | assert(Ptr == getLoadStorePointerOperand(MemAccess) &&((void)0) | |||
| 5096 | "Ptr is neither a value or pointer operand")((void)0); | |||
| 5097 | return WideningDecision != CM_GatherScatter; | |||
| 5098 | }; | |||
| 5099 | ||||
| 5100 | // A helper that returns true if the given value is a bitcast or | |||
| 5101 | // getelementptr instruction contained in the loop. | |||
| 5102 | auto isLoopVaryingBitCastOrGEP = [&](Value *V) { | |||
| 5103 | return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) || | |||
| 5104 | isa<GetElementPtrInst>(V)) && | |||
| 5105 | !TheLoop->isLoopInvariant(V); | |||
| 5106 | }; | |||
| 5107 | ||||
| 5108 | auto isScalarPtrInduction = [&](Instruction *MemAccess, Value *Ptr) { | |||
| 5109 | if (!isa<PHINode>(Ptr) || | |||
| 5110 | !Legal->getInductionVars().count(cast<PHINode>(Ptr))) | |||
| 5111 | return false; | |||
| 5112 | auto &Induction = Legal->getInductionVars()[cast<PHINode>(Ptr)]; | |||
| 5113 | if (Induction.getKind() != InductionDescriptor::IK_PtrInduction) | |||
| 5114 | return false; | |||
| 5115 | return isScalarUse(MemAccess, Ptr); | |||
| 5116 | }; | |||
| 5117 | ||||
| 5118 | // A helper that evaluates a memory access's use of a pointer. If the | |||
| 5119 | // pointer is actually the pointer induction of a loop, it is being | |||
| 5120 | // inserted into Worklist. If the use will be a scalar use, and the | |||
| 5121 | // pointer is only used by memory accesses, we place the pointer in | |||
| 5122 | // ScalarPtrs. Otherwise, the pointer is placed in PossibleNonScalarPtrs. | |||
| 5123 | auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) { | |||
| 5124 | if (isScalarPtrInduction(MemAccess, Ptr)) { | |||
| 5125 | Worklist.insert(cast<Instruction>(Ptr)); | |||
| 5126 | LLVM_DEBUG(dbgs() << "LV: Found new scalar instruction: " << *Ptrdo { } while (false) | |||
| 5127 | << "\n")do { } while (false); | |||
| 5128 | ||||
| 5129 | Instruction *Update = cast<Instruction>( | |||
| 5130 | cast<PHINode>(Ptr)->getIncomingValueForBlock(Latch)); | |||
| 5131 | ScalarPtrs.insert(Update); | |||
| 5132 | return; | |||
| 5133 | } | |||
| 5134 | // We only care about bitcast and getelementptr instructions contained in | |||
| 5135 | // the loop. | |||
| 5136 | if (!isLoopVaryingBitCastOrGEP(Ptr)) | |||
| 5137 | return; | |||
| 5138 | ||||
| 5139 | // If the pointer has already been identified as scalar (e.g., if it was | |||
| 5140 | // also identified as uniform), there's nothing to do. | |||
| 5141 | auto *I = cast<Instruction>(Ptr); | |||
| 5142 | if (Worklist.count(I)) | |||
| 5143 | return; | |||
| 5144 | ||||
| 5145 | // If all users of the pointer will be memory accesses and scalar, place the | |||
| 5146 | // pointer in ScalarPtrs. Otherwise, place the pointer in | |||
| 5147 | // PossibleNonScalarPtrs. | |||
| 5148 | if (llvm::all_of(I->users(), [&](User *U) { | |||
| 5149 | return (isa<LoadInst>(U) || isa<StoreInst>(U)) && | |||
| 5150 | isScalarUse(cast<Instruction>(U), Ptr); | |||
| 5151 | })) | |||
| 5152 | ScalarPtrs.insert(I); | |||
| 5153 | else | |||
| 5154 | PossibleNonScalarPtrs.insert(I); | |||
| 5155 | }; | |||
| 5156 | ||||
| 5157 | // We seed the scalars analysis with three classes of instructions: (1) | |||
| 5158 | // instructions marked uniform-after-vectorization and (2) bitcast, | |||
| 5159 | // getelementptr and (pointer) phi instructions used by memory accesses | |||
| 5160 | // requiring a scalar use. | |||
| 5161 | // | |||
| 5162 | // (1) Add to the worklist all instructions that have been identified as | |||
| 5163 | // uniform-after-vectorization. | |||
| 5164 | Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end()); | |||
| 5165 | ||||
| 5166 | // (2) Add to the worklist all bitcast and getelementptr instructions used by | |||
| 5167 | // memory accesses requiring a scalar use. The pointer operands of loads and | |||
| 5168 | // stores will be scalar as long as the memory accesses is not a gather or | |||
| 5169 | // scatter operation. The value operand of a store will remain scalar if the | |||
| 5170 | // store is scalarized. | |||
| 5171 | for (auto *BB : TheLoop->blocks()) | |||
| 5172 | for (auto &I : *BB) { | |||
| 5173 | if (auto *Load = dyn_cast<LoadInst>(&I)) { | |||
| 5174 | evaluatePtrUse(Load, Load->getPointerOperand()); | |||
| 5175 | } else if (auto *Store = dyn_cast<StoreInst>(&I)) { | |||
| 5176 | evaluatePtrUse(Store, Store->getPointerOperand()); | |||
| 5177 | evaluatePtrUse(Store, Store->getValueOperand()); | |||
| 5178 | } | |||
| 5179 | } | |||
| 5180 | for (auto *I : ScalarPtrs) | |||
| 5181 | if (!PossibleNonScalarPtrs.count(I)) { | |||
| 5182 | LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n")do { } while (false); | |||
| 5183 | Worklist.insert(I); | |||
| 5184 | } | |||
| 5185 | ||||
| 5186 | // Insert the forced scalars. | |||
| 5187 | // FIXME: Currently widenPHIInstruction() often creates a dead vector | |||
| 5188 | // induction variable when the PHI user is scalarized. | |||
| 5189 | auto ForcedScalar = ForcedScalars.find(VF); | |||
| 5190 | if (ForcedScalar != ForcedScalars.end()) | |||
| 5191 | for (auto *I : ForcedScalar->second) | |||
| 5192 | Worklist.insert(I); | |||
| 5193 | ||||
| 5194 | // Expand the worklist by looking through any bitcasts and getelementptr | |||
| 5195 | // instructions we've already identified as scalar. This is similar to the | |||
| 5196 | // expansion step in collectLoopUniforms(); however, here we're only | |||
| 5197 | // expanding to include additional bitcasts and getelementptr instructions. | |||
| 5198 | unsigned Idx = 0; | |||
| 5199 | while (Idx != Worklist.size()) { | |||
| 5200 | Instruction *Dst = Worklist[Idx++]; | |||
| 5201 | if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0))) | |||
| 5202 | continue; | |||
| 5203 | auto *Src = cast<Instruction>(Dst->getOperand(0)); | |||
| 5204 | if (llvm::all_of(Src->users(), [&](User *U) -> bool { | |||
| 5205 | auto *J = cast<Instruction>(U); | |||
| 5206 | return !TheLoop->contains(J) || Worklist.count(J) || | |||
| 5207 | ((isa<LoadInst>(J) || isa<StoreInst>(J)) && | |||
| 5208 | isScalarUse(J, Src)); | |||
| 5209 | })) { | |||
| 5210 | Worklist.insert(Src); | |||
| 5211 | LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n")do { } while (false); | |||
| 5212 | } | |||
| 5213 | } | |||
| 5214 | ||||
| 5215 | // An induction variable will remain scalar if all users of the induction | |||
| 5216 | // variable and induction variable update remain scalar. | |||
| 5217 | for (auto &Induction : Legal->getInductionVars()) { | |||
| 5218 | auto *Ind = Induction.first; | |||
| 5219 | auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); | |||
| 5220 | ||||
| 5221 | // If tail-folding is applied, the primary induction variable will be used | |||
| 5222 | // to feed a vector compare. | |||
| 5223 | if (Ind == Legal->getPrimaryInduction() && foldTailByMasking()) | |||
| 5224 | continue; | |||
| 5225 | ||||
| 5226 | // Determine if all users of the induction variable are scalar after | |||
| 5227 | // vectorization. | |||
| 5228 | auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { | |||
| 5229 | auto *I = cast<Instruction>(U); | |||
| 5230 | return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I); | |||
| 5231 | }); | |||
| 5232 | if (!ScalarInd) | |||
| 5233 | continue; | |||
| 5234 | ||||
| 5235 | // Determine if all users of the induction variable update instruction are | |||
| 5236 | // scalar after vectorization. | |||
| 5237 | auto ScalarIndUpdate = | |||
| 5238 | llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { | |||
| 5239 | auto *I = cast<Instruction>(U); | |||
| 5240 | return I == Ind || !TheLoop->contains(I) || Worklist.count(I); | |||
| 5241 | }); | |||
| 5242 | if (!ScalarIndUpdate) | |||
| 5243 | continue; | |||
| 5244 | ||||
| 5245 | // The induction variable and its update instruction will remain scalar. | |||
| 5246 | Worklist.insert(Ind); | |||
| 5247 | Worklist.insert(IndUpdate); | |||
| 5248 | LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n")do { } while (false); | |||
| 5249 | LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdatedo { } while (false) | |||
| 5250 | << "\n")do { } while (false); | |||
| 5251 | } | |||
| 5252 | ||||
| 5253 | Scalars[VF].insert(Worklist.begin(), Worklist.end()); | |||
| 5254 | } | |||
| 5255 | ||||
| 5256 | bool LoopVectorizationCostModel::isScalarWithPredication(Instruction *I) const { | |||
| 5257 | if (!blockNeedsPredication(I->getParent())) | |||
| 5258 | return false; | |||
| 5259 | switch(I->getOpcode()) { | |||
| 5260 | default: | |||
| 5261 | break; | |||
| 5262 | case Instruction::Load: | |||
| 5263 | case Instruction::Store: { | |||
| 5264 | if (!Legal->isMaskRequired(I)) | |||
| 5265 | return false; | |||
| 5266 | auto *Ptr = getLoadStorePointerOperand(I); | |||
| 5267 | auto *Ty = getLoadStoreType(I); | |||
| 5268 | const Align Alignment = getLoadStoreAlignment(I); | |||
| 5269 | return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) || | |||
| 5270 | TTI.isLegalMaskedGather(Ty, Alignment)) | |||
| 5271 | : !(isLegalMaskedStore(Ty, Ptr, Alignment) || | |||
| 5272 | TTI.isLegalMaskedScatter(Ty, Alignment)); | |||
| 5273 | } | |||
| 5274 | case Instruction::UDiv: | |||
| 5275 | case Instruction::SDiv: | |||
| 5276 | case Instruction::SRem: | |||
| 5277 | case Instruction::URem: | |||
| 5278 | return mayDivideByZero(*I); | |||
| 5279 | } | |||
| 5280 | return false; | |||
| 5281 | } | |||
| 5282 | ||||
| 5283 | bool LoopVectorizationCostModel::interleavedAccessCanBeWidened( | |||
| 5284 | Instruction *I, ElementCount VF) { | |||
| 5285 | assert(isAccessInterleaved(I) && "Expecting interleaved access.")((void)0); | |||
| 5286 | assert(getWideningDecision(I, VF) == CM_Unknown &&((void)0) | |||
| 5287 | "Decision should not be set yet.")((void)0); | |||
| 5288 | auto *Group = getInterleavedAccessGroup(I); | |||
| 5289 | assert(Group && "Must have a group.")((void)0); | |||
| 5290 | ||||
| 5291 | // If the instruction's allocated size doesn't equal it's type size, it | |||
| 5292 | // requires padding and will be scalarized. | |||
| 5293 | auto &DL = I->getModule()->getDataLayout(); | |||
| 5294 | auto *ScalarTy = getLoadStoreType(I); | |||
| 5295 | if (hasIrregularType(ScalarTy, DL)) | |||
| 5296 | return false; | |||
| 5297 | ||||
| 5298 | // Check if masking is required. | |||
| 5299 | // A Group may need masking for one of two reasons: it resides in a block that | |||
| 5300 | // needs predication, or it was decided to use masking to deal with gaps. | |||
| 5301 | bool PredicatedAccessRequiresMasking = | |||
| 5302 | Legal->blockNeedsPredication(I->getParent()) && Legal->isMaskRequired(I); | |||
| 5303 | bool AccessWithGapsRequiresMasking = | |||
| 5304 | Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed(); | |||
| 5305 | if (!PredicatedAccessRequiresMasking && !AccessWithGapsRequiresMasking) | |||
| 5306 | return true; | |||
| 5307 | ||||
| 5308 | // If masked interleaving is required, we expect that the user/target had | |||
| 5309 | // enabled it, because otherwise it either wouldn't have been created or | |||
| 5310 | // it should have been invalidated by the CostModel. | |||
| 5311 | assert(useMaskedInterleavedAccesses(TTI) &&((void)0) | |||
| 5312 | "Masked interleave-groups for predicated accesses are not enabled.")((void)0); | |||
| 5313 | ||||
| 5314 | auto *Ty = getLoadStoreType(I); | |||
| 5315 | const Align Alignment = getLoadStoreAlignment(I); | |||
| 5316 | return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment) | |||
| 5317 | : TTI.isLegalMaskedStore(Ty, Alignment); | |||
| 5318 | } | |||
| 5319 | ||||
| 5320 | bool LoopVectorizationCostModel::memoryInstructionCanBeWidened( | |||
| 5321 | Instruction *I, ElementCount VF) { | |||
| 5322 | // Get and ensure we have a valid memory instruction. | |||
| 5323 | LoadInst *LI = dyn_cast<LoadInst>(I); | |||
| 5324 | StoreInst *SI = dyn_cast<StoreInst>(I); | |||
| 5325 | assert((LI || SI) && "Invalid memory instruction")((void)0); | |||
| 5326 | ||||
| 5327 | auto *Ptr = getLoadStorePointerOperand(I); | |||
| 5328 | ||||
| 5329 | // In order to be widened, the pointer should be consecutive, first of all. | |||
| 5330 | if (!Legal->isConsecutivePtr(Ptr)) | |||
| 5331 | return false; | |||
| 5332 | ||||
| 5333 | // If the instruction is a store located in a predicated block, it will be | |||
| 5334 | // scalarized. | |||
| 5335 | if (isScalarWithPredication(I)) | |||
| 5336 | return false; | |||
| 5337 | ||||
| 5338 | // If the instruction's allocated size doesn't equal it's type size, it | |||
| 5339 | // requires padding and will be scalarized. | |||
| 5340 | auto &DL = I->getModule()->getDataLayout(); | |||
| 5341 | auto *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType(); | |||
| 5342 | if (hasIrregularType(ScalarTy, DL)) | |||
| 5343 | return false; | |||
| 5344 | ||||
| 5345 | return true; | |||
| 5346 | } | |||
| 5347 | ||||
| 5348 | void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) { | |||
| 5349 | // We should not collect Uniforms more than once per VF. Right now, | |||
| 5350 | // this function is called from collectUniformsAndScalars(), which | |||
| 5351 | // already does this check. Collecting Uniforms for VF=1 does not make any | |||
| 5352 | // sense. | |||
| 5353 | ||||
| 5354 | assert(VF.isVector() && Uniforms.find(VF) == Uniforms.end() &&((void)0) | |||
| 5355 | "This function should not be visited twice for the same VF")((void)0); | |||
| 5356 | ||||
| 5357 | // Visit the list of Uniforms. If we'll not find any uniform value, we'll | |||
| 5358 | // not analyze again. Uniforms.count(VF) will return 1. | |||
| 5359 | Uniforms[VF].clear(); | |||
| 5360 | ||||
| 5361 | // We now know that the loop is vectorizable! | |||
| 5362 | // Collect instructions inside the loop that will remain uniform after | |||
| 5363 | // vectorization. | |||
| 5364 | ||||
| 5365 | // Global values, params and instructions outside of current loop are out of | |||
| 5366 | // scope. | |||
| 5367 | auto isOutOfScope = [&](Value *V) -> bool { | |||
| 5368 | Instruction *I = dyn_cast<Instruction>(V); | |||
| 5369 | return (!I || !TheLoop->contains(I)); | |||
| 5370 | }; | |||
| 5371 | ||||
| 5372 | SetVector<Instruction *> Worklist; | |||
| 5373 | BasicBlock *Latch = TheLoop->getLoopLatch(); | |||
| 5374 | ||||
| 5375 | // Instructions that are scalar with predication must not be considered | |||
| 5376 | // uniform after vectorization, because that would create an erroneous | |||
| 5377 | // replicating region where only a single instance out of VF should be formed. | |||
| 5378 | // TODO: optimize such seldom cases if found important, see PR40816. | |||
| 5379 | auto addToWorklistIfAllowed = [&](Instruction *I) -> void { | |||
| 5380 | if (isOutOfScope(I)) { | |||
| 5381 | LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: "do { } while (false) | |||
| 5382 | << *I << "\n")do { } while (false); | |||
| 5383 | return; | |||
| 5384 | } | |||
| 5385 | if (isScalarWithPredication(I)) { | |||
| 5386 | LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: "do { } while (false) | |||
| 5387 | << *I << "\n")do { } while (false); | |||
| 5388 | return; | |||
| 5389 | } | |||
| 5390 | LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n")do { } while (false); | |||
| 5391 | Worklist.insert(I); | |||
| 5392 | }; | |||
| 5393 | ||||
| 5394 | // Start with the conditional branch. If the branch condition is an | |||
| 5395 | // instruction contained in the loop that is only used by the branch, it is | |||
| 5396 | // uniform. | |||
| 5397 | auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); | |||
| 5398 | if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse()) | |||
| 5399 | addToWorklistIfAllowed(Cmp); | |||
| 5400 | ||||
| 5401 | auto isUniformDecision = [&](Instruction *I, ElementCount VF) { | |||
| 5402 | InstWidening WideningDecision = getWideningDecision(I, VF); | |||
| 5403 | assert(WideningDecision != CM_Unknown &&((void)0) | |||
| 5404 | "Widening decision should be ready at this moment")((void)0); | |||
| 5405 | ||||
| 5406 | // A uniform memory op is itself uniform. We exclude uniform stores | |||
| 5407 | // here as they demand the last lane, not the first one. | |||
| 5408 | if (isa<LoadInst>(I) && Legal->isUniformMemOp(*I)) { | |||
| 5409 | assert(WideningDecision == CM_Scalarize)((void)0); | |||
| 5410 | return true; | |||
| 5411 | } | |||
| 5412 | ||||
| 5413 | return (WideningDecision == CM_Widen || | |||
| 5414 | WideningDecision == CM_Widen_Reverse || | |||
| 5415 | WideningDecision == CM_Interleave); | |||
| 5416 | }; | |||
| 5417 | ||||
| 5418 | ||||
| 5419 | // Returns true if Ptr is the pointer operand of a memory access instruction | |||
| 5420 | // I, and I is known to not require scalarization. | |||
| 5421 | auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool { | |||
| 5422 | return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF); | |||
| 5423 | }; | |||
| 5424 | ||||
| 5425 | // Holds a list of values which are known to have at least one uniform use. | |||
| 5426 | // Note that there may be other uses which aren't uniform. A "uniform use" | |||
| 5427 | // here is something which only demands lane 0 of the unrolled iterations; | |||
| 5428 | // it does not imply that all lanes produce the same value (e.g. this is not | |||
| 5429 | // the usual meaning of uniform) | |||
| 5430 | SetVector<Value *> HasUniformUse; | |||
| 5431 | ||||
| 5432 | // Scan the loop for instructions which are either a) known to have only | |||
| 5433 | // lane 0 demanded or b) are uses which demand only lane 0 of their operand. | |||
| 5434 | for (auto *BB : TheLoop->blocks()) | |||
| 5435 | for (auto &I : *BB) { | |||
| 5436 | if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I)) { | |||
| 5437 | switch (II->getIntrinsicID()) { | |||
| 5438 | case Intrinsic::sideeffect: | |||
| 5439 | case Intrinsic::experimental_noalias_scope_decl: | |||
| 5440 | case Intrinsic::assume: | |||
| 5441 | case Intrinsic::lifetime_start: | |||
| 5442 | case Intrinsic::lifetime_end: | |||
| 5443 | if (TheLoop->hasLoopInvariantOperands(&I)) | |||
| 5444 | addToWorklistIfAllowed(&I); | |||
| 5445 | break; | |||
| 5446 | default: | |||
| 5447 | break; | |||
| 5448 | } | |||
| 5449 | } | |||
| 5450 | ||||
| 5451 | // If there's no pointer operand, there's nothing to do. | |||
| 5452 | auto *Ptr = getLoadStorePointerOperand(&I); | |||
| 5453 | if (!Ptr) | |||
| 5454 | continue; | |||
| 5455 | ||||
| 5456 | // A uniform memory op is itself uniform. We exclude uniform stores | |||
| 5457 | // here as they demand the last lane, not the first one. | |||
| 5458 | if (isa<LoadInst>(I) && Legal->isUniformMemOp(I)) | |||
| 5459 | addToWorklistIfAllowed(&I); | |||
| 5460 | ||||
| 5461 | if (isUniformDecision(&I, VF)) { | |||
| 5462 | assert(isVectorizedMemAccessUse(&I, Ptr) && "consistency check")((void)0); | |||
| 5463 | HasUniformUse.insert(Ptr); | |||
| 5464 | } | |||
| 5465 | } | |||
| 5466 | ||||
| 5467 | // Add to the worklist any operands which have *only* uniform (e.g. lane 0 | |||
| 5468 | // demanding) users. Since loops are assumed to be in LCSSA form, this | |||
| 5469 | // disallows uses outside the loop as well. | |||
| 5470 | for (auto *V : HasUniformUse) { | |||
| 5471 | if (isOutOfScope(V)) | |||
| 5472 | continue; | |||
| 5473 | auto *I = cast<Instruction>(V); | |||
| 5474 | auto UsersAreMemAccesses = | |||
| 5475 | llvm::all_of(I->users(), [&](User *U) -> bool { | |||
| 5476 | return isVectorizedMemAccessUse(cast<Instruction>(U), V); | |||
| 5477 | }); | |||
| 5478 | if (UsersAreMemAccesses) | |||
| 5479 | addToWorklistIfAllowed(I); | |||
| 5480 | } | |||
| 5481 | ||||
| 5482 | // Expand Worklist in topological order: whenever a new instruction | |||
| 5483 | // is added , its users should be already inside Worklist. It ensures | |||
| 5484 | // a uniform instruction will only be used by uniform instructions. | |||
| 5485 | unsigned idx = 0; | |||
| 5486 | while (idx != Worklist.size()) { | |||
| 5487 | Instruction *I = Worklist[idx++]; | |||
| 5488 | ||||
| 5489 | for (auto OV : I->operand_values()) { | |||
| 5490 | // isOutOfScope operands cannot be uniform instructions. | |||
| 5491 | if (isOutOfScope(OV)) | |||
| 5492 | continue; | |||
| 5493 | // First order recurrence Phi's should typically be considered | |||
| 5494 | // non-uniform. | |||
| 5495 | auto *OP = dyn_cast<PHINode>(OV); | |||
| 5496 | if (OP && Legal->isFirstOrderRecurrence(OP)) | |||
| 5497 | continue; | |||
| 5498 | // If all the users of the operand are uniform, then add the | |||
| 5499 | // operand into the uniform worklist. | |||
| 5500 | auto *OI = cast<Instruction>(OV); | |||
| 5501 | if (llvm::all_of(OI->users(), [&](User *U) -> bool { | |||
| 5502 | auto *J = cast<Instruction>(U); | |||
| 5503 | return Worklist.count(J) || isVectorizedMemAccessUse(J, OI); | |||
| 5504 | })) | |||
| 5505 | addToWorklistIfAllowed(OI); | |||
| 5506 | } | |||
| 5507 | } | |||
| 5508 | ||||
| 5509 | // For an instruction to be added into Worklist above, all its users inside | |||
| 5510 | // the loop should also be in Worklist. However, this condition cannot be | |||
| 5511 | // true for phi nodes that form a cyclic dependence. We must process phi | |||
| 5512 | // nodes separately. An induction variable will remain uniform if all users | |||
| 5513 | // of the induction variable and induction variable update remain uniform. | |||
| 5514 | // The code below handles both pointer and non-pointer induction variables. | |||
| 5515 | for (auto &Induction : Legal->getInductionVars()) { | |||
| 5516 | auto *Ind = Induction.first; | |||
| 5517 | auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); | |||
| 5518 | ||||
| 5519 | // Determine if all users of the induction variable are uniform after | |||
| 5520 | // vectorization. | |||
| 5521 | auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { | |||
| 5522 | auto *I = cast<Instruction>(U); | |||
| 5523 | return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) || | |||
| 5524 | isVectorizedMemAccessUse(I, Ind); | |||
| 5525 | }); | |||
| 5526 | if (!UniformInd) | |||
| 5527 | continue; | |||
| 5528 | ||||
| 5529 | // Determine if all users of the induction variable update instruction are | |||
| 5530 | // uniform after vectorization. | |||
| 5531 | auto UniformIndUpdate = | |||
| 5532 | llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { | |||
| 5533 | auto *I = cast<Instruction>(U); | |||
| 5534 | return I == Ind || !TheLoop->contains(I) || Worklist.count(I) || | |||
| 5535 | isVectorizedMemAccessUse(I, IndUpdate); | |||
| 5536 | }); | |||
| 5537 | if (!UniformIndUpdate) | |||
| 5538 | continue; | |||
| 5539 | ||||
| 5540 | // The induction variable and its update instruction will remain uniform. | |||
| 5541 | addToWorklistIfAllowed(Ind); | |||
| 5542 | addToWorklistIfAllowed(IndUpdate); | |||
| 5543 | } | |||
| 5544 | ||||
| 5545 | Uniforms[VF].insert(Worklist.begin(), Worklist.end()); | |||
| 5546 | } | |||
| 5547 | ||||
| 5548 | bool LoopVectorizationCostModel::runtimeChecksRequired() { | |||
| 5549 | LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n")do { } while (false); | |||
| 5550 | ||||
| 5551 | if (Legal->getRuntimePointerChecking()->Need) { | |||
| 5552 | reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz", | |||
| 5553 | "runtime pointer checks needed. Enable vectorization of this " | |||
| 5554 | "loop with '#pragma clang loop vectorize(enable)' when " | |||
| 5555 | "compiling with -Os/-Oz", | |||
| 5556 | "CantVersionLoopWithOptForSize", ORE, TheLoop); | |||
| 5557 | return true; | |||
| 5558 | } | |||
| 5559 | ||||
| 5560 | if (!PSE.getUnionPredicate().getPredicates().empty()) { | |||
| 5561 | reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz", | |||
| 5562 | "runtime SCEV checks needed. Enable vectorization of this " | |||
| 5563 | "loop with '#pragma clang loop vectorize(enable)' when " | |||
| 5564 | "compiling with -Os/-Oz", | |||
| 5565 | "CantVersionLoopWithOptForSize", ORE, TheLoop); | |||
| 5566 | return true; | |||
| 5567 | } | |||
| 5568 | ||||
| 5569 | // FIXME: Avoid specializing for stride==1 instead of bailing out. | |||
| 5570 | if (!Legal->getLAI()->getSymbolicStrides().empty()) { | |||
| 5571 | reportVectorizationFailure("Runtime stride check for small trip count", | |||
| 5572 | "runtime stride == 1 checks needed. Enable vectorization of " | |||
| 5573 | "this loop without such check by compiling with -Os/-Oz", | |||
| 5574 | "CantVersionLoopWithOptForSize", ORE, TheLoop); | |||
| 5575 | return true; | |||
| 5576 | } | |||
| 5577 | ||||
| 5578 | return false; | |||
| 5579 | } | |||
| 5580 | ||||
| 5581 | ElementCount | |||
| 5582 | LoopVectorizationCostModel::getMaxLegalScalableVF(unsigned MaxSafeElements) { | |||
| 5583 | if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors) { | |||
| 5584 | reportVectorizationInfo( | |||
| 5585 | "Disabling scalable vectorization, because target does not " | |||
| 5586 | "support scalable vectors.", | |||
| 5587 | "ScalableVectorsUnsupported", ORE, TheLoop); | |||
| 5588 | return ElementCount::getScalable(0); | |||
| 5589 | } | |||
| 5590 | ||||
| 5591 | if (Hints->isScalableVectorizationDisabled()) { | |||
| 5592 | reportVectorizationInfo("Scalable vectorization is explicitly disabled", | |||
| 5593 | "ScalableVectorizationDisabled", ORE, TheLoop); | |||
| 5594 | return ElementCount::getScalable(0); | |||
| 5595 | } | |||
| 5596 | ||||
| 5597 | auto MaxScalableVF = ElementCount::getScalable( | |||
| 5598 | std::numeric_limits<ElementCount::ScalarTy>::max()); | |||
| 5599 | ||||
| 5600 | // Test that the loop-vectorizer can legalize all operations for this MaxVF. | |||
| 5601 | // FIXME: While for scalable vectors this is currently sufficient, this should | |||
| 5602 | // be replaced by a more detailed mechanism that filters out specific VFs, | |||
| 5603 | // instead of invalidating vectorization for a whole set of VFs based on the | |||
| 5604 | // MaxVF. | |||
| 5605 | ||||
| 5606 | // Disable scalable vectorization if the loop contains unsupported reductions. | |||
| 5607 | if (!canVectorizeReductions(MaxScalableVF)) { | |||
| 5608 | reportVectorizationInfo( | |||
| 5609 | "Scalable vectorization not supported for the reduction " | |||
| 5610 | "operations found in this loop.", | |||
| 5611 | "ScalableVFUnfeasible", ORE, TheLoop); | |||
| 5612 | return ElementCount::getScalable(0); | |||
| 5613 | } | |||
| 5614 | ||||
| 5615 | // Disable scalable vectorization if the loop contains any instructions | |||
| 5616 | // with element types not supported for scalable vectors. | |||
| 5617 | if (any_of(ElementTypesInLoop, [&](Type *Ty) { | |||
| 5618 | return !Ty->isVoidTy() && | |||
| 5619 | !this->TTI.isElementTypeLegalForScalableVector(Ty); | |||
| 5620 | })) { | |||
| 5621 | reportVectorizationInfo("Scalable vectorization is not supported " | |||
| 5622 | "for all element types found in this loop.", | |||
| 5623 | "ScalableVFUnfeasible", ORE, TheLoop); | |||
| 5624 | return ElementCount::getScalable(0); | |||
| 5625 | } | |||
| 5626 | ||||
| 5627 | if (Legal->isSafeForAnyVectorWidth()) | |||
| 5628 | return MaxScalableVF; | |||
| 5629 | ||||
| 5630 | // Limit MaxScalableVF by the maximum safe dependence distance. | |||
| 5631 | Optional<unsigned> MaxVScale = TTI.getMaxVScale(); | |||
| 5632 | MaxScalableVF = ElementCount::getScalable( | |||
| 5633 | MaxVScale ? (MaxSafeElements / MaxVScale.getValue()) : 0); | |||
| 5634 | if (!MaxScalableVF) | |||
| 5635 | reportVectorizationInfo( | |||
| 5636 | "Max legal vector width too small, scalable vectorization " | |||
| 5637 | "unfeasible.", | |||
| 5638 | "ScalableVFUnfeasible", ORE, TheLoop); | |||
| 5639 | ||||
| 5640 | return MaxScalableVF; | |||
| 5641 | } | |||
| 5642 | ||||
| 5643 | FixedScalableVFPair | |||
| 5644 | LoopVectorizationCostModel::computeFeasibleMaxVF(unsigned ConstTripCount, | |||
| 5645 | ElementCount UserVF) { | |||
| 5646 | MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI); | |||
| 5647 | unsigned SmallestType, WidestType; | |||
| 5648 | std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes(); | |||
| 5649 | ||||
| 5650 | // Get the maximum safe dependence distance in bits computed by LAA. | |||
| 5651 | // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from | |||
| 5652 | // the memory accesses that is most restrictive (involved in the smallest | |||
| 5653 | // dependence distance). | |||
| 5654 | unsigned MaxSafeElements = | |||
| 5655 | PowerOf2Floor(Legal->getMaxSafeVectorWidthInBits() / WidestType); | |||
| 5656 | ||||
| 5657 | auto MaxSafeFixedVF = ElementCount::getFixed(MaxSafeElements); | |||
| 5658 | auto MaxSafeScalableVF = getMaxLegalScalableVF(MaxSafeElements); | |||
| 5659 | ||||
| 5660 | LLVM_DEBUG(dbgs() << "LV: The max safe fixed VF is: " << MaxSafeFixedVFdo { } while (false) | |||
| 5661 | << ".\n")do { } while (false); | |||
| 5662 | LLVM_DEBUG(dbgs() << "LV: The max safe scalable VF is: " << MaxSafeScalableVFdo { } while (false) | |||
| 5663 | << ".\n")do { } while (false); | |||
| 5664 | ||||
| 5665 | // First analyze the UserVF, fall back if the UserVF should be ignored. | |||
| 5666 | if (UserVF) { | |||
| 5667 | auto MaxSafeUserVF = | |||
| 5668 | UserVF.isScalable() ? MaxSafeScalableVF : MaxSafeFixedVF; | |||
| 5669 | ||||
| 5670 | if (ElementCount::isKnownLE(UserVF, MaxSafeUserVF)) { | |||
| 5671 | // If `VF=vscale x N` is safe, then so is `VF=N` | |||
| 5672 | if (UserVF.isScalable()) | |||
| 5673 | return FixedScalableVFPair( | |||
| 5674 | ElementCount::getFixed(UserVF.getKnownMinValue()), UserVF); | |||
| 5675 | else | |||
| 5676 | return UserVF; | |||
| 5677 | } | |||
| 5678 | ||||
| 5679 | assert(ElementCount::isKnownGT(UserVF, MaxSafeUserVF))((void)0); | |||
| 5680 | ||||
| 5681 | // Only clamp if the UserVF is not scalable. If the UserVF is scalable, it | |||
| 5682 | // is better to ignore the hint and let the compiler choose a suitable VF. | |||
| 5683 | if (!UserVF.isScalable()) { | |||
| 5684 | LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVFdo { } while (false) | |||
| 5685 | << " is unsafe, clamping to max safe VF="do { } while (false) | |||
| 5686 | << MaxSafeFixedVF << ".\n")do { } while (false); | |||
| 5687 | ORE->emit([&]() { | |||
| 5688 | return OptimizationRemarkAnalysis(DEBUG_TYPE"loop-vectorize", "VectorizationFactor", | |||
| 5689 | TheLoop->getStartLoc(), | |||
| 5690 | TheLoop->getHeader()) | |||
| 5691 | << "User-specified vectorization factor " | |||
| 5692 | << ore::NV("UserVectorizationFactor", UserVF) | |||
| 5693 | << " is unsafe, clamping to maximum safe vectorization factor " | |||
| 5694 | << ore::NV("VectorizationFactor", MaxSafeFixedVF); | |||
| 5695 | }); | |||
| 5696 | return MaxSafeFixedVF; | |||
| 5697 | } | |||
| 5698 | ||||
| 5699 | LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVFdo { } while (false) | |||
| 5700 | << " is unsafe. Ignoring scalable UserVF.\n")do { } while (false); | |||
| 5701 | ORE->emit([&]() { | |||
| 5702 | return OptimizationRemarkAnalysis(DEBUG_TYPE"loop-vectorize", "VectorizationFactor", | |||
| 5703 | TheLoop->getStartLoc(), | |||
| 5704 | TheLoop->getHeader()) | |||
| 5705 | << "User-specified vectorization factor " | |||
| 5706 | << ore::NV("UserVectorizationFactor", UserVF) | |||
| 5707 | << " is unsafe. Ignoring the hint to let the compiler pick a " | |||
| 5708 | "suitable VF."; | |||
| 5709 | }); | |||
| 5710 | } | |||
| 5711 | ||||
| 5712 | LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestTypedo { } while (false) | |||
| 5713 | << " / " << WidestType << " bits.\n")do { } while (false); | |||
| 5714 | ||||
| 5715 | FixedScalableVFPair Result(ElementCount::getFixed(1), | |||
| 5716 | ElementCount::getScalable(0)); | |||
| 5717 | if (auto MaxVF = getMaximizedVFForTarget(ConstTripCount, SmallestType, | |||
| 5718 | WidestType, MaxSafeFixedVF)) | |||
| 5719 | Result.FixedVF = MaxVF; | |||
| 5720 | ||||
| 5721 | if (auto MaxVF = getMaximizedVFForTarget(ConstTripCount, SmallestType, | |||
| 5722 | WidestType, MaxSafeScalableVF)) | |||
| 5723 | if (MaxVF.isScalable()) { | |||
| 5724 | Result.ScalableVF = MaxVF; | |||
| 5725 | LLVM_DEBUG(dbgs() << "LV: Found feasible scalable VF = " << MaxVFdo { } while (false) | |||
| 5726 | << "\n")do { } while (false); | |||
| 5727 | } | |||
| 5728 | ||||
| 5729 | return Result; | |||
| 5730 | } | |||
| 5731 | ||||
| 5732 | FixedScalableVFPair | |||
| 5733 | LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) { | |||
| 5734 | if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) { | |||
| 5735 | // TODO: It may by useful to do since it's still likely to be dynamically | |||
| 5736 | // uniform if the target can skip. | |||
| 5737 | reportVectorizationFailure( | |||
| 5738 | "Not inserting runtime ptr check for divergent target", | |||
| 5739 | "runtime pointer checks needed. Not enabled for divergent target", | |||
| 5740 | "CantVersionLoopWithDivergentTarget", ORE, TheLoop); | |||
| 5741 | return FixedScalableVFPair::getNone(); | |||
| 5742 | } | |||
| 5743 | ||||
| 5744 | unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); | |||
| 5745 | LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n')do { } while (false); | |||
| 5746 | if (TC == 1) { | |||
| 5747 | reportVectorizationFailure("Single iteration (non) loop", | |||
| 5748 | "loop trip count is one, irrelevant for vectorization", | |||
| 5749 | "SingleIterationLoop", ORE, TheLoop); | |||
| 5750 | return FixedScalableVFPair::getNone(); | |||
| 5751 | } | |||
| 5752 | ||||
| 5753 | switch (ScalarEpilogueStatus) { | |||
| 5754 | case CM_ScalarEpilogueAllowed: | |||
| 5755 | return computeFeasibleMaxVF(TC, UserVF); | |||
| 5756 | case CM_ScalarEpilogueNotAllowedUsePredicate: | |||
| 5757 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; | |||
| 5758 | case CM_ScalarEpilogueNotNeededUsePredicate: | |||
| 5759 | LLVM_DEBUG(do { } while (false) | |||
| 5760 | dbgs() << "LV: vector predicate hint/switch found.\n"do { } while (false) | |||
| 5761 | << "LV: Not allowing scalar epilogue, creating predicated "do { } while (false) | |||
| 5762 | << "vector loop.\n")do { } while (false); | |||
| 5763 | break; | |||
| 5764 | case CM_ScalarEpilogueNotAllowedLowTripLoop: | |||
| 5765 | // fallthrough as a special case of OptForSize | |||
| 5766 | case CM_ScalarEpilogueNotAllowedOptSize: | |||
| 5767 | if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize) | |||
| 5768 | LLVM_DEBUG(do { } while (false) | |||
| 5769 | dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n")do { } while (false); | |||
| 5770 | else | |||
| 5771 | LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip "do { } while (false) | |||
| 5772 | << "count.\n")do { } while (false); | |||
| 5773 | ||||
| 5774 | // Bail if runtime checks are required, which are not good when optimising | |||
| 5775 | // for size. | |||
| 5776 | if (runtimeChecksRequired()) | |||
| 5777 | return FixedScalableVFPair::getNone(); | |||
| 5778 | ||||
| 5779 | break; | |||
| 5780 | } | |||
| 5781 | ||||
| 5782 | // The only loops we can vectorize without a scalar epilogue, are loops with | |||
| 5783 | // a bottom-test and a single exiting block. We'd have to handle the fact | |||
| 5784 | // that not every instruction executes on the last iteration. This will | |||
| 5785 | // require a lane mask which varies through the vector loop body. (TODO) | |||
| 5786 | if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) { | |||
| 5787 | // If there was a tail-folding hint/switch, but we can't fold the tail by | |||
| 5788 | // masking, fallback to a vectorization with a scalar epilogue. | |||
| 5789 | if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) { | |||
| 5790 | LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a "do { } while (false) | |||
| 5791 | "scalar epilogue instead.\n")do { } while (false); | |||
| 5792 | ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; | |||
| 5793 | return computeFeasibleMaxVF(TC, UserVF); | |||
| 5794 | } | |||
| 5795 | return FixedScalableVFPair::getNone(); | |||
| 5796 | } | |||
| 5797 | ||||
| 5798 | // Now try the tail folding | |||
| 5799 | ||||
| 5800 | // Invalidate interleave groups that require an epilogue if we can't mask | |||
| 5801 | // the interleave-group. | |||
| 5802 | if (!useMaskedInterleavedAccesses(TTI)) { | |||
| 5803 | assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() &&((void)0) | |||
| 5804 | "No decisions should have been taken at this point")((void)0); | |||
| 5805 | // Note: There is no need to invalidate any cost modeling decisions here, as | |||
| 5806 | // non where taken so far. | |||
| 5807 | InterleaveInfo.invalidateGroupsRequiringScalarEpilogue(); | |||
| 5808 | } | |||
| 5809 | ||||
| 5810 | FixedScalableVFPair MaxFactors = computeFeasibleMaxVF(TC, UserVF); | |||
| 5811 | // Avoid tail folding if the trip count is known to be a multiple of any VF | |||
| 5812 | // we chose. | |||
| 5813 | // FIXME: The condition below pessimises the case for fixed-width vectors, | |||
| 5814 | // when scalable VFs are also candidates for vectorization. | |||
| 5815 | if (MaxFactors.FixedVF.isVector() && !MaxFactors.ScalableVF) { | |||
| 5816 | ElementCount MaxFixedVF = MaxFactors.FixedVF; | |||
| 5817 | assert((UserVF.isNonZero() || isPowerOf2_32(MaxFixedVF.getFixedValue())) &&((void)0) | |||
| 5818 | "MaxFixedVF must be a power of 2")((void)0); | |||
| 5819 | unsigned MaxVFtimesIC = UserIC ? MaxFixedVF.getFixedValue() * UserIC | |||
| 5820 | : MaxFixedVF.getFixedValue(); | |||
| 5821 | ScalarEvolution *SE = PSE.getSE(); | |||
| 5822 | const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); | |||
| 5823 | const SCEV *ExitCount = SE->getAddExpr( | |||
| 5824 | BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); | |||
| 5825 | const SCEV *Rem = SE->getURemExpr( | |||
| 5826 | SE->applyLoopGuards(ExitCount, TheLoop), | |||
| 5827 | SE->getConstant(BackedgeTakenCount->getType(), MaxVFtimesIC)); | |||
| 5828 | if (Rem->isZero()) { | |||
| 5829 | // Accept MaxFixedVF if we do not have a tail. | |||
| 5830 | LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n")do { } while (false); | |||
| 5831 | return MaxFactors; | |||
| 5832 | } | |||
| 5833 | } | |||
| 5834 | ||||
| 5835 | // For scalable vectors, don't use tail folding as this is currently not yet | |||
| 5836 | // supported. The code is likely to have ended up here if the tripcount is | |||
| 5837 | // low, in which case it makes sense not to use scalable vectors. | |||
| 5838 | if (MaxFactors.ScalableVF.isVector()) | |||
| 5839 | MaxFactors.ScalableVF = ElementCount::getScalable(0); | |||
| 5840 | ||||
| 5841 | // If we don't know the precise trip count, or if the trip count that we | |||
| 5842 | // found modulo the vectorization factor is not zero, try to fold the tail | |||
| 5843 | // by masking. | |||
| 5844 | // FIXME: look for a smaller MaxVF that does divide TC rather than masking. | |||
| 5845 | if (Legal->prepareToFoldTailByMasking()) { | |||
| 5846 | FoldTailByMasking = true; | |||
| 5847 | return MaxFactors; | |||
| 5848 | } | |||
| 5849 | ||||
| 5850 | // If there was a tail-folding hint/switch, but we can't fold the tail by | |||
| 5851 | // masking, fallback to a vectorization with a scalar epilogue. | |||
| 5852 | if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) { | |||
| 5853 | LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a "do { } while (false) | |||
| 5854 | "scalar epilogue instead.\n")do { } while (false); | |||
| 5855 | ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; | |||
| 5856 | return MaxFactors; | |||
| 5857 | } | |||
| 5858 | ||||
| 5859 | if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedUsePredicate) { | |||
| 5860 | LLVM_DEBUG(dbgs() << "LV: Can't fold tail by masking: don't vectorize\n")do { } while (false); | |||
| 5861 | return FixedScalableVFPair::getNone(); | |||
| 5862 | } | |||
| 5863 | ||||
| 5864 | if (TC == 0) { | |||
| 5865 | reportVectorizationFailure( | |||
| 5866 | "Unable to calculate the loop count due to complex control flow", | |||
| 5867 | "unable to calculate the loop count due to complex control flow", | |||
| 5868 | "UnknownLoopCountComplexCFG", ORE, TheLoop); | |||
| 5869 | return FixedScalableVFPair::getNone(); | |||
| 5870 | } | |||
| 5871 | ||||
| 5872 | reportVectorizationFailure( | |||
| 5873 | "Cannot optimize for size and vectorize at the same time.", | |||
| 5874 | "cannot optimize for size and vectorize at the same time. " | |||
| 5875 | "Enable vectorization of this loop with '#pragma clang loop " | |||
| 5876 | "vectorize(enable)' when compiling with -Os/-Oz", | |||
| 5877 | "NoTailLoopWithOptForSize", ORE, TheLoop); | |||
| 5878 | return FixedScalableVFPair::getNone(); | |||
| 5879 | } | |||
| 5880 | ||||
| 5881 | ElementCount LoopVectorizationCostModel::getMaximizedVFForTarget( | |||
| 5882 | unsigned ConstTripCount, unsigned SmallestType, unsigned WidestType, | |||
| 5883 | const ElementCount &MaxSafeVF) { | |||
| 5884 | bool ComputeScalableMaxVF = MaxSafeVF.isScalable(); | |||
| 5885 | TypeSize WidestRegister = TTI.getRegisterBitWidth( | |||
| 5886 | ComputeScalableMaxVF ? TargetTransformInfo::RGK_ScalableVector | |||
| 5887 | : TargetTransformInfo::RGK_FixedWidthVector); | |||
| 5888 | ||||
| 5889 | // Convenience function to return the minimum of two ElementCounts. | |||
| 5890 | auto MinVF = [](const ElementCount &LHS, const ElementCount &RHS) { | |||
| 5891 | assert((LHS.isScalable() == RHS.isScalable()) &&((void)0) | |||
| 5892 | "Scalable flags must match")((void)0); | |||
| 5893 | return ElementCount::isKnownLT(LHS, RHS) ? LHS : RHS; | |||
| 5894 | }; | |||
| 5895 | ||||
| 5896 | // Ensure MaxVF is a power of 2; the dependence distance bound may not be. | |||
| 5897 | // Note that both WidestRegister and WidestType may not be a powers of 2. | |||
| 5898 | auto MaxVectorElementCount = ElementCount::get( | |||
| 5899 | PowerOf2Floor(WidestRegister.getKnownMinSize() / WidestType), | |||
| 5900 | ComputeScalableMaxVF); | |||
| 5901 | MaxVectorElementCount = MinVF(MaxVectorElementCount, MaxSafeVF); | |||
| 5902 | LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: "do { } while (false) | |||
| 5903 | << (MaxVectorElementCount * WidestType) << " bits.\n")do { } while (false); | |||
| 5904 | ||||
| 5905 | if (!MaxVectorElementCount) { | |||
| 5906 | LLVM_DEBUG(dbgs() << "LV: The target has no "do { } while (false) | |||
| 5907 | << (ComputeScalableMaxVF ? "scalable" : "fixed")do { } while (false) | |||
| 5908 | << " vector registers.\n")do { } while (false); | |||
| 5909 | return ElementCount::getFixed(1); | |||
| 5910 | } | |||
| 5911 | ||||
| 5912 | const auto TripCountEC = ElementCount::getFixed(ConstTripCount); | |||
| 5913 | if (ConstTripCount && | |||
| 5914 | ElementCount::isKnownLE(TripCountEC, MaxVectorElementCount) && | |||
| 5915 | isPowerOf2_32(ConstTripCount)) { | |||
| 5916 | // We need to clamp the VF to be the ConstTripCount. There is no point in | |||
| 5917 | // choosing a higher viable VF as done in the loop below. If | |||
| 5918 | // MaxVectorElementCount is scalable, we only fall back on a fixed VF when | |||
| 5919 | // the TC is less than or equal to the known number of lanes. | |||
| 5920 | LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to the constant trip count: "do { } while (false) | |||
| 5921 | << ConstTripCount << "\n")do { } while (false); | |||
| 5922 | return TripCountEC; | |||
| 5923 | } | |||
| 5924 | ||||
| 5925 | ElementCount MaxVF = MaxVectorElementCount; | |||
| 5926 | if (TTI.shouldMaximizeVectorBandwidth() || | |||
| 5927 | (MaximizeBandwidth && isScalarEpilogueAllowed())) { | |||
| 5928 | auto MaxVectorElementCountMaxBW = ElementCount::get( | |||
| 5929 | PowerOf2Floor(WidestRegister.getKnownMinSize() / SmallestType), | |||
| 5930 | ComputeScalableMaxVF); | |||
| 5931 | MaxVectorElementCountMaxBW = MinVF(MaxVectorElementCountMaxBW, MaxSafeVF); | |||
| 5932 | ||||
| 5933 | // Collect all viable vectorization factors larger than the default MaxVF | |||
| 5934 | // (i.e. MaxVectorElementCount). | |||
| 5935 | SmallVector<ElementCount, 8> VFs; | |||
| 5936 | for (ElementCount VS = MaxVectorElementCount * 2; | |||
| 5937 | ElementCount::isKnownLE(VS, MaxVectorElementCountMaxBW); VS *= 2) | |||
| 5938 | VFs.push_back(VS); | |||
| 5939 | ||||
| 5940 | // For each VF calculate its register usage. | |||
| 5941 | auto RUs = calculateRegisterUsage(VFs); | |||
| 5942 | ||||
| 5943 | // Select the largest VF which doesn't require more registers than existing | |||
| 5944 | // ones. | |||
| 5945 | for (int i = RUs.size() - 1; i >= 0; --i) { | |||
| 5946 | bool Selected = true; | |||
| 5947 | for (auto &pair : RUs[i].MaxLocalUsers) { | |||
| 5948 | unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); | |||
| 5949 | if (pair.second > TargetNumRegisters) | |||
| 5950 | Selected = false; | |||
| 5951 | } | |||
| 5952 | if (Selected) { | |||
| 5953 | MaxVF = VFs[i]; | |||
| 5954 | break; | |||
| 5955 | } | |||
| 5956 | } | |||
| 5957 | if (ElementCount MinVF = | |||
| 5958 | TTI.getMinimumVF(SmallestType, ComputeScalableMaxVF)) { | |||
| 5959 | if (ElementCount::isKnownLT(MaxVF, MinVF)) { | |||
| 5960 | LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVFdo { } while (false) | |||
| 5961 | << ") with target's minimum: " << MinVF << '\n')do { } while (false); | |||
| 5962 | MaxVF = MinVF; | |||
| 5963 | } | |||
| 5964 | } | |||
| 5965 | } | |||
| 5966 | return MaxVF; | |||
| 5967 | } | |||
| 5968 | ||||
| 5969 | bool LoopVectorizationCostModel::isMoreProfitable( | |||
| 5970 | const VectorizationFactor &A, const VectorizationFactor &B) const { | |||
| 5971 | InstructionCost CostA = A.Cost; | |||
| 5972 | InstructionCost CostB = B.Cost; | |||
| 5973 | ||||
| 5974 | unsigned MaxTripCount = PSE.getSE()->getSmallConstantMaxTripCount(TheLoop); | |||
| 5975 | ||||
| 5976 | if (!A.Width.isScalable() && !B.Width.isScalable() && FoldTailByMasking && | |||
| 5977 | MaxTripCount) { | |||
| 5978 | // If we are folding the tail and the trip count is a known (possibly small) | |||
| 5979 | // constant, the trip count will be rounded up to an integer number of | |||
| 5980 | // iterations. The total cost will be PerIterationCost*ceil(TripCount/VF), | |||
| 5981 | // which we compare directly. When not folding the tail, the total cost will | |||
| 5982 | // be PerIterationCost*floor(TC/VF) + Scalar remainder cost, and so is | |||
| 5983 | // approximated with the per-lane cost below instead of using the tripcount | |||
| 5984 | // as here. | |||
| 5985 | auto RTCostA = CostA * divideCeil(MaxTripCount, A.Width.getFixedValue()); | |||
| 5986 | auto RTCostB = CostB * divideCeil(MaxTripCount, B.Width.getFixedValue()); | |||
| 5987 | return RTCostA < RTCostB; | |||
| 5988 | } | |||
| 5989 | ||||
| 5990 | // When set to preferred, for now assume vscale may be larger than 1, so | |||
| 5991 | // that scalable vectorization is slightly favorable over fixed-width | |||
| 5992 | // vectorization. | |||
| 5993 | if (Hints->isScalableVectorizationPreferred()) | |||
| 5994 | if (A.Width.isScalable() && !B.Width.isScalable()) | |||
| 5995 | return (CostA * B.Width.getKnownMinValue()) <= | |||
| 5996 | (CostB * A.Width.getKnownMinValue()); | |||
| 5997 | ||||
| 5998 | // To avoid the need for FP division: | |||
| 5999 | // (CostA / A.Width) < (CostB / B.Width) | |||
| 6000 | // <=> (CostA * B.Width) < (CostB * A.Width) | |||
| 6001 | return (CostA * B.Width.getKnownMinValue()) < | |||
| 6002 | (CostB * A.Width.getKnownMinValue()); | |||
| 6003 | } | |||
| 6004 | ||||
| 6005 | VectorizationFactor LoopVectorizationCostModel::selectVectorizationFactor( | |||
| 6006 | const ElementCountSet &VFCandidates) { | |||
| 6007 | InstructionCost ExpectedCost = expectedCost(ElementCount::getFixed(1)).first; | |||
| 6008 | LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ExpectedCost << ".\n")do { } while (false); | |||
| 6009 | assert(ExpectedCost.isValid() && "Unexpected invalid cost for scalar loop")((void)0); | |||
| 6010 | assert(VFCandidates.count(ElementCount::getFixed(1)) &&((void)0) | |||
| 6011 | "Expected Scalar VF to be a candidate")((void)0); | |||
| 6012 | ||||
| 6013 | const VectorizationFactor ScalarCost(ElementCount::getFixed(1), ExpectedCost); | |||
| 6014 | VectorizationFactor ChosenFactor = ScalarCost; | |||
| 6015 | ||||
| 6016 | bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled; | |||
| 6017 | if (ForceVectorization && VFCandidates.size() > 1) { | |||
| 6018 | // Ignore scalar width, because the user explicitly wants vectorization. | |||
| 6019 | // Initialize cost to max so that VF = 2 is, at least, chosen during cost | |||
| 6020 | // evaluation. | |||
| 6021 | ChosenFactor.Cost = InstructionCost::getMax(); | |||
| 6022 | } | |||
| 6023 | ||||
| 6024 | SmallVector<InstructionVFPair> InvalidCosts; | |||
| 6025 | for (const auto &i : VFCandidates) { | |||
| 6026 | // The cost for scalar VF=1 is already calculated, so ignore it. | |||
| 6027 | if (i.isScalar()) | |||
| 6028 | continue; | |||
| 6029 | ||||
| 6030 | VectorizationCostTy C = expectedCost(i, &InvalidCosts); | |||
| 6031 | VectorizationFactor Candidate(i, C.first); | |||
| 6032 | LLVM_DEBUG(do { } while (false) | |||
| 6033 | dbgs() << "LV: Vector loop of width " << i << " costs: "do { } while (false) | |||
| 6034 | << (Candidate.Cost / Candidate.Width.getKnownMinValue())do { } while (false) | |||
| 6035 | << (i.isScalable() ? " (assuming a minimum vscale of 1)" : "")do { } while (false) | |||
| 6036 | << ".\n")do { } while (false); | |||
| 6037 | ||||
| 6038 | if (!C.second && !ForceVectorization) { | |||
| 6039 | LLVM_DEBUG(do { } while (false) | |||
| 6040 | dbgs() << "LV: Not considering vector loop of width " << ido { } while (false) | |||
| 6041 | << " because it will not generate any vector instructions.\n")do { } while (false); | |||
| 6042 | continue; | |||
| 6043 | } | |||
| 6044 | ||||
| 6045 | // If profitable add it to ProfitableVF list. | |||
| 6046 | if (isMoreProfitable(Candidate, ScalarCost)) | |||
| 6047 | ProfitableVFs.push_back(Candidate); | |||
| 6048 | ||||
| 6049 | if (isMoreProfitable(Candidate, ChosenFactor)) | |||
| 6050 | ChosenFactor = Candidate; | |||
| 6051 | } | |||
| 6052 | ||||
| 6053 | // Emit a report of VFs with invalid costs in the loop. | |||
| 6054 | if (!InvalidCosts.empty()) { | |||
| 6055 | // Group the remarks per instruction, keeping the instruction order from | |||
| 6056 | // InvalidCosts. | |||
| 6057 | std::map<Instruction *, unsigned> Numbering; | |||
| 6058 | unsigned I = 0; | |||
| 6059 | for (auto &Pair : InvalidCosts) | |||
| 6060 | if (!Numbering.count(Pair.first)) | |||
| 6061 | Numbering[Pair.first] = I++; | |||
| 6062 | ||||
| 6063 | // Sort the list, first on instruction(number) then on VF. | |||
| 6064 | llvm::sort(InvalidCosts, | |||
| 6065 | [&Numbering](InstructionVFPair &A, InstructionVFPair &B) { | |||
| 6066 | if (Numbering[A.first] != Numbering[B.first]) | |||
| 6067 | return Numbering[A.first] < Numbering[B.first]; | |||
| 6068 | ElementCountComparator ECC; | |||
| 6069 | return ECC(A.second, B.second); | |||
| 6070 | }); | |||
| 6071 | ||||
| 6072 | // For a list of ordered instruction-vf pairs: | |||
| 6073 | // [(load, vf1), (load, vf2), (store, vf1)] | |||
| 6074 | // Group the instructions together to emit separate remarks for: | |||
| 6075 | // load (vf1, vf2) | |||
| 6076 | // store (vf1) | |||
| 6077 | auto Tail = ArrayRef<InstructionVFPair>(InvalidCosts); | |||
| 6078 | auto Subset = ArrayRef<InstructionVFPair>(); | |||
| 6079 | do { | |||
| 6080 | if (Subset.empty()) | |||
| 6081 | Subset = Tail.take_front(1); | |||
| 6082 | ||||
| 6083 | Instruction *I = Subset.front().first; | |||
| 6084 | ||||
| 6085 | // If the next instruction is different, or if there are no other pairs, | |||
| 6086 | // emit a remark for the collated subset. e.g. | |||
| 6087 | // [(load, vf1), (load, vf2))] | |||
| 6088 | // to emit: | |||
| 6089 | // remark: invalid costs for 'load' at VF=(vf, vf2) | |||
| 6090 | if (Subset == Tail || Tail[Subset.size()].first != I) { | |||
| 6091 | std::string OutString; | |||
| 6092 | raw_string_ostream OS(OutString); | |||
| 6093 | assert(!Subset.empty() && "Unexpected empty range")((void)0); | |||
| 6094 | OS << "Instruction with invalid costs prevented vectorization at VF=("; | |||
| 6095 | for (auto &Pair : Subset) | |||
| 6096 | OS << (Pair.second == Subset.front().second ? "" : ", ") | |||
| 6097 | << Pair.second; | |||
| 6098 | OS << "):"; | |||
| 6099 | if (auto *CI = dyn_cast<CallInst>(I)) | |||
| 6100 | OS << " call to " << CI->getCalledFunction()->getName(); | |||
| 6101 | else | |||
| 6102 | OS << " " << I->getOpcodeName(); | |||
| 6103 | OS.flush(); | |||
| 6104 | reportVectorizationInfo(OutString, "InvalidCost", ORE, TheLoop, I); | |||
| 6105 | Tail = Tail.drop_front(Subset.size()); | |||
| 6106 | Subset = {}; | |||
| 6107 | } else | |||
| 6108 | // Grow the subset by one element | |||
| 6109 | Subset = Tail.take_front(Subset.size() + 1); | |||
| 6110 | } while (!Tail.empty()); | |||
| 6111 | } | |||
| 6112 | ||||
| 6113 | if (!EnableCondStoresVectorization && NumPredStores) { | |||
| 6114 | reportVectorizationFailure("There are conditional stores.", | |||
| 6115 | "store that is conditionally executed prevents vectorization", | |||
| 6116 | "ConditionalStore", ORE, TheLoop); | |||
| 6117 | ChosenFactor = ScalarCost; | |||
| 6118 | } | |||
| 6119 | ||||
| 6120 | LLVM_DEBUG(if (ForceVectorization && !ChosenFactor.Width.isScalar() &&do { } while (false) | |||
| 6121 | ChosenFactor.Cost >= ScalarCost.Cost) dbgs()do { } while (false) | |||
| 6122 | << "LV: Vectorization seems to be not beneficial, "do { } while (false) | |||
| 6123 | << "but was forced by a user.\n")do { } while (false); | |||
| 6124 | LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << ChosenFactor.Width << ".\n")do { } while (false); | |||
| 6125 | return ChosenFactor; | |||
| 6126 | } | |||
| 6127 | ||||
| 6128 | bool LoopVectorizationCostModel::isCandidateForEpilogueVectorization( | |||
| 6129 | const Loop &L, ElementCount VF) const { | |||
| 6130 | // Cross iteration phis such as reductions need special handling and are | |||
| 6131 | // currently unsupported. | |||
| 6132 | if (any_of(L.getHeader()->phis(), [&](PHINode &Phi) { | |||
| 6133 | return Legal->isFirstOrderRecurrence(&Phi) || | |||
| 6134 | Legal->isReductionVariable(&Phi); | |||
| 6135 | })) | |||
| 6136 | return false; | |||
| 6137 | ||||
| 6138 | // Phis with uses outside of the loop require special handling and are | |||
| 6139 | // currently unsupported. | |||
| 6140 | for (auto &Entry : Legal->getInductionVars()) { | |||
| 6141 | // Look for uses of the value of the induction at the last iteration. | |||
| 6142 | Value *PostInc = Entry.first->getIncomingValueForBlock(L.getLoopLatch()); | |||
| 6143 | for (User *U : PostInc->users()) | |||
| 6144 | if (!L.contains(cast<Instruction>(U))) | |||
| 6145 | return false; | |||
| 6146 | // Look for uses of penultimate value of the induction. | |||
| 6147 | for (User *U : Entry.first->users()) | |||
| 6148 | if (!L.contains(cast<Instruction>(U))) | |||
| 6149 | return false; | |||
| 6150 | } | |||
| 6151 | ||||
| 6152 | // Induction variables that are widened require special handling that is | |||
| 6153 | // currently not supported. | |||
| 6154 | if (any_of(Legal->getInductionVars(), [&](auto &Entry) { | |||
| 6155 | return !(this->isScalarAfterVectorization(Entry.first, VF) || | |||
| 6156 | this->isProfitableToScalarize(Entry.first, VF)); | |||
| 6157 | })) | |||
| 6158 | return false; | |||
| 6159 | ||||
| 6160 | // Epilogue vectorization code has not been auditted to ensure it handles | |||
| 6161 | // non-latch exits properly. It may be fine, but it needs auditted and | |||
| 6162 | // tested. | |||
| 6163 | if (L.getExitingBlock() != L.getLoopLatch()) | |||
| 6164 | return false; | |||
| 6165 | ||||
| 6166 | return true; | |||
| 6167 | } | |||
| 6168 | ||||
| 6169 | bool LoopVectorizationCostModel::isEpilogueVectorizationProfitable( | |||
| 6170 | const ElementCount VF) const { | |||
| 6171 | // FIXME: We need a much better cost-model to take different parameters such | |||
| 6172 | // as register pressure, code size increase and cost of extra branches into | |||
| 6173 | // account. For now we apply a very crude heuristic and only consider loops | |||
| 6174 | // with vectorization factors larger than a certain value. | |||
| 6175 | // We also consider epilogue vectorization unprofitable for targets that don't | |||
| 6176 | // consider interleaving beneficial (eg. MVE). | |||
| 6177 | if (TTI.getMaxInterleaveFactor(VF.getKnownMinValue()) <= 1) | |||
| 6178 | return false; | |||
| 6179 | if (VF.getFixedValue() >= EpilogueVectorizationMinVF) | |||
| 6180 | return true; | |||
| 6181 | return false; | |||
| 6182 | } | |||
| 6183 | ||||
| 6184 | VectorizationFactor | |||
| 6185 | LoopVectorizationCostModel::selectEpilogueVectorizationFactor( | |||
| 6186 | const ElementCount MainLoopVF, const LoopVectorizationPlanner &LVP) { | |||
| 6187 | VectorizationFactor Result = VectorizationFactor::Disabled(); | |||
| 6188 | if (!EnableEpilogueVectorization) { | |||
| 6189 | LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is disabled.\n";)do { } while (false); | |||
| 6190 | return Result; | |||
| 6191 | } | |||
| 6192 | ||||
| 6193 | if (!isScalarEpilogueAllowed()) { | |||
| 6194 | LLVM_DEBUG(do { } while (false) | |||
| 6195 | dbgs() << "LEV: Unable to vectorize epilogue because no epilogue is "do { } while (false) | |||
| 6196 | "allowed.\n";)do { } while (false); | |||
| 6197 | return Result; | |||
| 6198 | } | |||
| 6199 | ||||
| 6200 | // FIXME: This can be fixed for scalable vectors later, because at this stage | |||
| 6201 | // the LoopVectorizer will only consider vectorizing a loop with scalable | |||
| 6202 | // vectors when the loop has a hint to enable vectorization for a given VF. | |||
| 6203 | if (MainLoopVF.isScalable()) { | |||
| 6204 | LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization for scalable vectors not "do { } while (false) | |||
| 6205 | "yet supported.\n")do { } while (false); | |||
| 6206 | return Result; | |||
| 6207 | } | |||
| 6208 | ||||
| 6209 | // Not really a cost consideration, but check for unsupported cases here to | |||
| 6210 | // simplify the logic. | |||
| 6211 | if (!isCandidateForEpilogueVectorization(*TheLoop, MainLoopVF)) { | |||
| 6212 | LLVM_DEBUG(do { } while (false) | |||
| 6213 | dbgs() << "LEV: Unable to vectorize epilogue because the loop is "do { } while (false) | |||
| 6214 | "not a supported candidate.\n";)do { } while (false); | |||
| 6215 | return Result; | |||
| 6216 | } | |||
| 6217 | ||||
| 6218 | if (EpilogueVectorizationForceVF > 1) { | |||
| 6219 | LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization factor is forced.\n";)do { } while (false); | |||
| 6220 | if (LVP.hasPlanWithVFs( | |||
| 6221 | {MainLoopVF, ElementCount::getFixed(EpilogueVectorizationForceVF)})) | |||
| 6222 | return {ElementCount::getFixed(EpilogueVectorizationForceVF), 0}; | |||
| 6223 | else { | |||
| 6224 | LLVM_DEBUG(do { } while (false) | |||
| 6225 | dbgs()do { } while (false) | |||
| 6226 | << "LEV: Epilogue vectorization forced factor is not viable.\n";)do { } while (false); | |||
| 6227 | return Result; | |||
| 6228 | } | |||
| 6229 | } | |||
| 6230 | ||||
| 6231 | if (TheLoop->getHeader()->getParent()->hasOptSize() || | |||
| 6232 | TheLoop->getHeader()->getParent()->hasMinSize()) { | |||
| 6233 | LLVM_DEBUG(do { } while (false) | |||
| 6234 | dbgs()do { } while (false) | |||
| 6235 | << "LEV: Epilogue vectorization skipped due to opt for size.\n";)do { } while (false); | |||
| 6236 | return Result; | |||
| 6237 | } | |||
| 6238 | ||||
| 6239 | if (!isEpilogueVectorizationProfitable(MainLoopVF)) | |||
| 6240 | return Result; | |||
| 6241 | ||||
| 6242 | for (auto &NextVF : ProfitableVFs) | |||
| 6243 | if (ElementCount::isKnownLT(NextVF.Width, MainLoopVF) && | |||
| 6244 | (Result.Width.getFixedValue() == 1 || | |||
| 6245 | isMoreProfitable(NextVF, Result)) && | |||
| 6246 | LVP.hasPlanWithVFs({MainLoopVF, NextVF.Width})) | |||
| 6247 | Result = NextVF; | |||
| 6248 | ||||
| 6249 | if (Result != VectorizationFactor::Disabled()) | |||
| 6250 | LLVM_DEBUG(dbgs() << "LEV: Vectorizing epilogue loop with VF = "do { } while (false) | |||
| 6251 | << Result.Width.getFixedValue() << "\n";)do { } while (false); | |||
| 6252 | return Result; | |||
| 6253 | } | |||
| 6254 | ||||
| 6255 | std::pair<unsigned, unsigned> | |||
| 6256 | LoopVectorizationCostModel::getSmallestAndWidestTypes() { | |||
| 6257 | unsigned MinWidth = -1U; | |||
| 6258 | unsigned MaxWidth = 8; | |||
| 6259 | const DataLayout &DL = TheFunction->getParent()->getDataLayout(); | |||
| 6260 | for (Type *T : ElementTypesInLoop) { | |||
| 6261 | MinWidth = std::min<unsigned>( | |||
| 6262 | MinWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize()); | |||
| 6263 | MaxWidth = std::max<unsigned>( | |||
| 6264 | MaxWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize()); | |||
| 6265 | } | |||
| 6266 | return {MinWidth, MaxWidth}; | |||
| 6267 | } | |||
| 6268 | ||||
| 6269 | void LoopVectorizationCostModel::collectElementTypesForWidening() { | |||
| 6270 | ElementTypesInLoop.clear(); | |||
| 6271 | // For each block. | |||
| 6272 | for (BasicBlock *BB : TheLoop->blocks()) { | |||
| 6273 | // For each instruction in the loop. | |||
| 6274 | for (Instruction &I : BB->instructionsWithoutDebug()) { | |||
| 6275 | Type *T = I.getType(); | |||
| 6276 | ||||
| 6277 | // Skip ignored values. | |||
| 6278 | if (ValuesToIgnore.count(&I)) | |||
| 6279 | continue; | |||
| 6280 | ||||
| 6281 | // Only examine Loads, Stores and PHINodes. | |||
| 6282 | if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I)) | |||
| 6283 | continue; | |||
| 6284 | ||||
| 6285 | // Examine PHI nodes that are reduction variables. Update the type to | |||
| 6286 | // account for the recurrence type. | |||
| 6287 | if (auto *PN = dyn_cast<PHINode>(&I)) { | |||
| 6288 | if (!Legal->isReductionVariable(PN)) | |||
| 6289 | continue; | |||
| 6290 | const RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[PN]; | |||
| 6291 | if (PreferInLoopReductions || useOrderedReductions(RdxDesc) || | |||
| 6292 | TTI.preferInLoopReduction(RdxDesc.getOpcode(), | |||
| 6293 | RdxDesc.getRecurrenceType(), | |||
| 6294 | TargetTransformInfo::ReductionFlags())) | |||
| 6295 | continue; | |||
| 6296 | T = RdxDesc.getRecurrenceType(); | |||
| 6297 | } | |||
| 6298 | ||||
| 6299 | // Examine the stored values. | |||
| 6300 | if (auto *ST = dyn_cast<StoreInst>(&I)) | |||
| 6301 | T = ST->getValueOperand()->getType(); | |||
| 6302 | ||||
| 6303 | // Ignore loaded pointer types and stored pointer types that are not | |||
| 6304 | // vectorizable. | |||
| 6305 | // | |||
| 6306 | // FIXME: The check here attempts to predict whether a load or store will | |||
| 6307 | // be vectorized. We only know this for certain after a VF has | |||
| 6308 | // been selected. Here, we assume that if an access can be | |||
| 6309 | // vectorized, it will be. We should also look at extending this | |||
| 6310 | // optimization to non-pointer types. | |||
| 6311 | // | |||
| 6312 | if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I) && | |||
| 6313 | !isAccessInterleaved(&I) && !isLegalGatherOrScatter(&I)) | |||
| 6314 | continue; | |||
| 6315 | ||||
| 6316 | ElementTypesInLoop.insert(T); | |||
| 6317 | } | |||
| 6318 | } | |||
| 6319 | } | |||
| 6320 | ||||
| 6321 | unsigned LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF, | |||
| 6322 | unsigned LoopCost) { | |||
| 6323 | // -- The interleave heuristics -- | |||
| 6324 | // We interleave the loop in order to expose ILP and reduce the loop overhead. | |||
| 6325 | // There are many micro-architectural considerations that we can't predict | |||
| 6326 | // at this level. For example, frontend pressure (on decode or fetch) due to | |||
| 6327 | // code size, or the number and capabilities of the execution ports. | |||
| 6328 | // | |||
| 6329 | // We use the following heuristics to select the interleave count: | |||
| 6330 | // 1. If the code has reductions, then we interleave to break the cross | |||
| 6331 | // iteration dependency. | |||
| 6332 | // 2. If the loop is really small, then we interleave to reduce the loop | |||
| 6333 | // overhead. | |||
| 6334 | // 3. We don't interleave if we think that we will spill registers to memory | |||
| 6335 | // due to the increased register pressure. | |||
| 6336 | ||||
| 6337 | if (!isScalarEpilogueAllowed()) | |||
| 6338 | return 1; | |||
| 6339 | ||||
| 6340 | // We used the distance for the interleave count. | |||
| 6341 | if (Legal->getMaxSafeDepDistBytes() != -1U) | |||
| 6342 | return 1; | |||
| 6343 | ||||
| 6344 | auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop); | |||
| 6345 | const bool HasReductions = !Legal->getReductionVars().empty(); | |||
| 6346 | // Do not interleave loops with a relatively small known or estimated trip | |||
| 6347 | // count. But we will interleave when InterleaveSmallLoopScalarReduction is | |||
| 6348 | // enabled, and the code has scalar reductions(HasReductions && VF = 1), | |||
| 6349 | // because with the above conditions interleaving can expose ILP and break | |||
| 6350 | // cross iteration dependences for reductions. | |||
| 6351 | if (BestKnownTC && (*BestKnownTC < TinyTripCountInterleaveThreshold) && | |||
| 6352 | !(InterleaveSmallLoopScalarReduction && HasReductions && VF.isScalar())) | |||
| 6353 | return 1; | |||
| 6354 | ||||
| 6355 | RegisterUsage R = calculateRegisterUsage({VF})[0]; | |||
| 6356 | // We divide by these constants so assume that we have at least one | |||
| 6357 | // instruction that uses at least one register. | |||
| 6358 | for (auto& pair : R.MaxLocalUsers) { | |||
| 6359 | pair.second = std::max(pair.second, 1U); | |||
| 6360 | } | |||
| 6361 | ||||
| 6362 | // We calculate the interleave count using the following formula. | |||
| 6363 | // Subtract the number of loop invariants from the number of available | |||
| 6364 | // registers. These registers are used by all of the interleaved instances. | |||
| 6365 | // Next, divide the remaining registers by the number of registers that is | |||
| 6366 | // required by the loop, in order to estimate how many parallel instances | |||
| 6367 | // fit without causing spills. All of this is rounded down if necessary to be | |||
| 6368 | // a power of two. We want power of two interleave count to simplify any | |||
| 6369 | // addressing operations or alignment considerations. | |||
| 6370 | // We also want power of two interleave counts to ensure that the induction | |||
| 6371 | // variable of the vector loop wraps to zero, when tail is folded by masking; | |||
| 6372 | // this currently happens when OptForSize, in which case IC is set to 1 above. | |||
| 6373 | unsigned IC = UINT_MAX(2147483647 *2U +1U); | |||
| 6374 | ||||
| 6375 | for (auto& pair : R.MaxLocalUsers) { | |||
| 6376 | unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); | |||
| 6377 | LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegistersdo { } while (false) | |||
| 6378 | << " registers of "do { } while (false) | |||
| 6379 | << TTI.getRegisterClassName(pair.first) << " register class\n")do { } while (false); | |||
| 6380 | if (VF.isScalar()) { | |||
| 6381 | if (ForceTargetNumScalarRegs.getNumOccurrences() > 0) | |||
| 6382 | TargetNumRegisters = ForceTargetNumScalarRegs; | |||
| 6383 | } else { | |||
| 6384 | if (ForceTargetNumVectorRegs.getNumOccurrences() > 0) | |||
| 6385 | TargetNumRegisters = ForceTargetNumVectorRegs; | |||
| 6386 | } | |||
| 6387 | unsigned MaxLocalUsers = pair.second; | |||
| 6388 | unsigned LoopInvariantRegs = 0; | |||
| 6389 | if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end()) | |||
| 6390 | LoopInvariantRegs = R.LoopInvariantRegs[pair.first]; | |||
| 6391 | ||||
| 6392 | unsigned TmpIC = PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs) / MaxLocalUsers); | |||
| 6393 | // Don't count the induction variable as interleaved. | |||
| 6394 | if (EnableIndVarRegisterHeur) { | |||
| 6395 | TmpIC = | |||
| 6396 | PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs - 1) / | |||
| 6397 | std::max(1U, (MaxLocalUsers - 1))); | |||
| 6398 | } | |||
| 6399 | ||||
| 6400 | IC = std::min(IC, TmpIC); | |||
| 6401 | } | |||
| 6402 | ||||
| 6403 | // Clamp the interleave ranges to reasonable counts. | |||
| 6404 | unsigned MaxInterleaveCount = | |||
| 6405 | TTI.getMaxInterleaveFactor(VF.getKnownMinValue()); | |||
| 6406 | ||||
| 6407 | // Check if the user has overridden the max. | |||
| 6408 | if (VF.isScalar()) { | |||
| 6409 | if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0) | |||
| 6410 | MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor; | |||
| 6411 | } else { | |||
| 6412 | if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0) | |||
| 6413 | MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor; | |||
| 6414 | } | |||
| 6415 | ||||
| 6416 | // If trip count is known or estimated compile time constant, limit the | |||
| 6417 | // interleave count to be less than the trip count divided by VF, provided it | |||
| 6418 | // is at least 1. | |||
| 6419 | // | |||
| 6420 | // For scalable vectors we can't know if interleaving is beneficial. It may | |||
| 6421 | // not be beneficial for small loops if none of the lanes in the second vector | |||
| 6422 | // iterations is enabled. However, for larger loops, there is likely to be a | |||
| 6423 | // similar benefit as for fixed-width vectors. For now, we choose to leave | |||
| 6424 | // the InterleaveCount as if vscale is '1', although if some information about | |||
| 6425 | // the vector is known (e.g. min vector size), we can make a better decision. | |||
| 6426 | if (BestKnownTC) { | |||
| 6427 | MaxInterleaveCount = | |||
| 6428 | std::min(*BestKnownTC / VF.getKnownMinValue(), MaxInterleaveCount); | |||
| 6429 | // Make sure MaxInterleaveCount is greater than 0. | |||
| 6430 | MaxInterleaveCount = std::max(1u, MaxInterleaveCount); | |||
| 6431 | } | |||
| 6432 | ||||
| 6433 | assert(MaxInterleaveCount > 0 &&((void)0) | |||
| 6434 | "Maximum interleave count must be greater than 0")((void)0); | |||
| 6435 | ||||
| 6436 | // Clamp the calculated IC to be between the 1 and the max interleave count | |||
| 6437 | // that the target and trip count allows. | |||
| 6438 | if (IC > MaxInterleaveCount) | |||
| 6439 | IC = MaxInterleaveCount; | |||
| 6440 | else | |||
| 6441 | // Make sure IC is greater than 0. | |||
| 6442 | IC = std::max(1u, IC); | |||
| 6443 | ||||
| 6444 | assert(IC > 0 && "Interleave count must be greater than 0.")((void)0); | |||
| 6445 | ||||
| 6446 | // If we did not calculate the cost for VF (because the user selected the VF) | |||
| 6447 | // then we calculate the cost of VF here. | |||
| 6448 | if (LoopCost == 0) { | |||
| 6449 | InstructionCost C = expectedCost(VF).first; | |||
| 6450 | assert(C.isValid() && "Expected to have chosen a VF with valid cost")((void)0); | |||
| 6451 | LoopCost = *C.getValue(); | |||
| 6452 | } | |||
| 6453 | ||||
| 6454 | assert(LoopCost && "Non-zero loop cost expected")((void)0); | |||
| 6455 | ||||
| 6456 | // Interleave if we vectorized this loop and there is a reduction that could | |||
| 6457 | // benefit from interleaving. | |||
| 6458 | if (VF.isVector() && HasReductions) { | |||
| 6459 | LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n")do { } while (false); | |||
| 6460 | return IC; | |||
| 6461 | } | |||
| 6462 | ||||
| 6463 | // Note that if we've already vectorized the loop we will have done the | |||
| 6464 | // runtime check and so interleaving won't require further checks. | |||
| 6465 | bool InterleavingRequiresRuntimePointerCheck = | |||
| 6466 | (VF.isScalar() && Legal->getRuntimePointerChecking()->Need); | |||
| 6467 | ||||
| 6468 | // We want to interleave small loops in order to reduce the loop overhead and | |||
| 6469 | // potentially expose ILP opportunities. | |||
| 6470 | LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'do { } while (false) | |||
| 6471 | << "LV: IC is " << IC << '\n'do { } while (false) | |||
| 6472 | << "LV: VF is " << VF << '\n')do { } while (false); | |||
| 6473 | const bool AggressivelyInterleaveReductions = | |||
| 6474 | TTI.enableAggressiveInterleaving(HasReductions); | |||
| 6475 | if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) { | |||
| 6476 | // We assume that the cost overhead is 1 and we use the cost model | |||
| 6477 | // to estimate the cost of the loop and interleave until the cost of the | |||
| 6478 | // loop overhead is about 5% of the cost of the loop. | |||
| 6479 | unsigned SmallIC = | |||
| 6480 | std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost)); | |||
| 6481 | ||||
| 6482 | // Interleave until store/load ports (estimated by max interleave count) are | |||
| 6483 | // saturated. | |||
| 6484 | unsigned NumStores = Legal->getNumStores(); | |||
| 6485 | unsigned NumLoads = Legal->getNumLoads(); | |||
| 6486 | unsigned StoresIC = IC / (NumStores ? NumStores : 1); | |||
| 6487 | unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1); | |||
| 6488 | ||||
| 6489 | // If we have a scalar reduction (vector reductions are already dealt with | |||
| 6490 | // by this point), we can increase the critical path length if the loop | |||
| 6491 | // we're interleaving is inside another loop. For tree-wise reductions | |||
| 6492 | // set the limit to 2, and for ordered reductions it's best to disable | |||
| 6493 | // interleaving entirely. | |||
| 6494 | if (HasReductions && TheLoop->getLoopDepth() > 1) { | |||
| 6495 | bool HasOrderedReductions = | |||
| 6496 | any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { | |||
| 6497 | const RecurrenceDescriptor &RdxDesc = Reduction.second; | |||
| 6498 | return RdxDesc.isOrdered(); | |||
| 6499 | }); | |||
| 6500 | if (HasOrderedReductions) { | |||
| 6501 | LLVM_DEBUG(do { } while (false) | |||
| 6502 | dbgs() << "LV: Not interleaving scalar ordered reductions.\n")do { } while (false); | |||
| 6503 | return 1; | |||
| 6504 | } | |||
| 6505 | ||||
| 6506 | unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC); | |||
| 6507 | SmallIC = std::min(SmallIC, F); | |||
| 6508 | StoresIC = std::min(StoresIC, F); | |||
| 6509 | LoadsIC = std::min(LoadsIC, F); | |||
| 6510 | } | |||
| 6511 | ||||
| 6512 | if (EnableLoadStoreRuntimeInterleave && | |||
| 6513 | std::max(StoresIC, LoadsIC) > SmallIC) { | |||
| 6514 | LLVM_DEBUG(do { } while (false) | |||
| 6515 | dbgs() << "LV: Interleaving to saturate store or load ports.\n")do { } while (false); | |||
| 6516 | return std::max(StoresIC, LoadsIC); | |||
| 6517 | } | |||
| 6518 | ||||
| 6519 | // If there are scalar reductions and TTI has enabled aggressive | |||
| 6520 | // interleaving for reductions, we will interleave to expose ILP. | |||
| 6521 | if (InterleaveSmallLoopScalarReduction && VF.isScalar() && | |||
| 6522 | AggressivelyInterleaveReductions) { | |||
| 6523 | LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n")do { } while (false); | |||
| 6524 | // Interleave no less than SmallIC but not as aggressive as the normal IC | |||
| 6525 | // to satisfy the rare situation when resources are too limited. | |||
| 6526 | return std::max(IC / 2, SmallIC); | |||
| 6527 | } else { | |||
| 6528 | LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n")do { } while (false); | |||
| 6529 | return SmallIC; | |||
| 6530 | } | |||
| 6531 | } | |||
| 6532 | ||||
| 6533 | // Interleave if this is a large loop (small loops are already dealt with by | |||
| 6534 | // this point) that could benefit from interleaving. | |||
| 6535 | if (AggressivelyInterleaveReductions) { | |||
| 6536 | LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n")do { } while (false); | |||
| 6537 | return IC; | |||
| 6538 | } | |||
| 6539 | ||||
| 6540 | LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n")do { } while (false); | |||
| 6541 | return 1; | |||
| 6542 | } | |||
| 6543 | ||||
| 6544 | SmallVector<LoopVectorizationCostModel::RegisterUsage, 8> | |||
| 6545 | LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<ElementCount> VFs) { | |||
| 6546 | // This function calculates the register usage by measuring the highest number | |||
| 6547 | // of values that are alive at a single location. Obviously, this is a very | |||
| 6548 | // rough estimation. We scan the loop in a topological order in order and | |||
| 6549 | // assign a number to each instruction. We use RPO to ensure that defs are | |||
| 6550 | // met before their users. We assume that each instruction that has in-loop | |||
| 6551 | // users starts an interval. We record every time that an in-loop value is | |||
| 6552 | // used, so we have a list of the first and last occurrences of each | |||
| 6553 | // instruction. Next, we transpose this data structure into a multi map that | |||
| 6554 | // holds the list of intervals that *end* at a specific location. This multi | |||
| 6555 | // map allows us to perform a linear search. We scan the instructions linearly | |||
| 6556 | // and record each time that a new interval starts, by placing it in a set. | |||
| 6557 | // If we find this value in the multi-map then we remove it from the set. | |||
| 6558 | // The max register usage is the maximum size of the set. | |||
| 6559 | // We also search for instructions that are defined outside the loop, but are | |||
| 6560 | // used inside the loop. We need this number separately from the max-interval | |||
| 6561 | // usage number because when we unroll, loop-invariant values do not take | |||
| 6562 | // more register. | |||
| 6563 | LoopBlocksDFS DFS(TheLoop); | |||
| 6564 | DFS.perform(LI); | |||
| 6565 | ||||
| 6566 | RegisterUsage RU; | |||
| 6567 | ||||
| 6568 | // Each 'key' in the map opens a new interval. The values | |||
| 6569 | // of the map are the index of the 'last seen' usage of the | |||
| 6570 | // instruction that is the key. | |||
| 6571 | using IntervalMap = DenseMap<Instruction *, unsigned>; | |||
| 6572 | ||||
| 6573 | // Maps instruction to its index. | |||
| 6574 | SmallVector<Instruction *, 64> IdxToInstr; | |||
| 6575 | // Marks the end of each interval. | |||
| 6576 | IntervalMap EndPoint; | |||
| 6577 | // Saves the list of instruction indices that are used in the loop. | |||
| 6578 | SmallPtrSet<Instruction *, 8> Ends; | |||
| 6579 | // Saves the list of values that are used in the loop but are | |||
| 6580 | // defined outside the loop, such as arguments and constants. | |||
| 6581 | SmallPtrSet<Value *, 8> LoopInvariants; | |||
| 6582 | ||||
| 6583 | for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { | |||
| 6584 | for (Instruction &I : BB->instructionsWithoutDebug()) { | |||
| 6585 | IdxToInstr.push_back(&I); | |||
| 6586 | ||||
| 6587 | // Save the end location of each USE. | |||
| 6588 | for (Value *U : I.operands()) { | |||
| 6589 | auto *Instr = dyn_cast<Instruction>(U); | |||
| 6590 | ||||
| 6591 | // Ignore non-instruction values such as arguments, constants, etc. | |||
| 6592 | if (!Instr) | |||
| 6593 | continue; | |||
| 6594 | ||||
| 6595 | // If this instruction is outside the loop then record it and continue. | |||
| 6596 | if (!TheLoop->contains(Instr)) { | |||
| 6597 | LoopInvariants.insert(Instr); | |||
| 6598 | continue; | |||
| 6599 | } | |||
| 6600 | ||||
| 6601 | // Overwrite previous end points. | |||
| 6602 | EndPoint[Instr] = IdxToInstr.size(); | |||
| 6603 | Ends.insert(Instr); | |||
| 6604 | } | |||
| 6605 | } | |||
| 6606 | } | |||
| 6607 | ||||
| 6608 | // Saves the list of intervals that end with the index in 'key'. | |||
| 6609 | using InstrList = SmallVector<Instruction *, 2>; | |||
| 6610 | DenseMap<unsigned, InstrList> TransposeEnds; | |||
| 6611 | ||||
| 6612 | // Transpose the EndPoints to a list of values that end at each index. | |||
| 6613 | for (auto &Interval : EndPoint) | |||
| 6614 | TransposeEnds[Interval.second].push_back(Interval.first); | |||
| 6615 | ||||
| 6616 | SmallPtrSet<Instruction *, 8> OpenIntervals; | |||
| 6617 | SmallVector<RegisterUsage, 8> RUs(VFs.size()); | |||
| 6618 | SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size()); | |||
| 6619 | ||||
| 6620 | LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n")do { } while (false); | |||
| 6621 | ||||
| 6622 | // A lambda that gets the register usage for the given type and VF. | |||
| 6623 | const auto &TTICapture = TTI; | |||
| 6624 | auto GetRegUsage = [&TTICapture](Type *Ty, ElementCount VF) -> unsigned { | |||
| 6625 | if (Ty->isTokenTy() || !VectorType::isValidElementType(Ty)) | |||
| 6626 | return 0; | |||
| 6627 | InstructionCost::CostType RegUsage = | |||
| 6628 | *TTICapture.getRegUsageForType(VectorType::get(Ty, VF)).getValue(); | |||
| 6629 | assert(RegUsage >= 0 && RegUsage <= std::numeric_limits<unsigned>::max() &&((void)0) | |||
| 6630 | "Nonsensical values for register usage.")((void)0); | |||
| 6631 | return RegUsage; | |||
| 6632 | }; | |||
| 6633 | ||||
| 6634 | for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) { | |||
| 6635 | Instruction *I = IdxToInstr[i]; | |||
| 6636 | ||||
| 6637 | // Remove all of the instructions that end at this location. | |||
| 6638 | InstrList &List = TransposeEnds[i]; | |||
| 6639 | for (Instruction *ToRemove : List) | |||
| 6640 | OpenIntervals.erase(ToRemove); | |||
| 6641 | ||||
| 6642 | // Ignore instructions that are never used within the loop. | |||
| 6643 | if (!Ends.count(I)) | |||
| 6644 | continue; | |||
| 6645 | ||||
| 6646 | // Skip ignored values. | |||
| 6647 | if (ValuesToIgnore.count(I)) | |||
| 6648 | continue; | |||
| 6649 | ||||
| 6650 | // For each VF find the maximum usage of registers. | |||
| 6651 | for (unsigned j = 0, e = VFs.size(); j < e; ++j) { | |||
| 6652 | // Count the number of live intervals. | |||
| 6653 | SmallMapVector<unsigned, unsigned, 4> RegUsage; | |||
| 6654 | ||||
| 6655 | if (VFs[j].isScalar()) { | |||
| 6656 | for (auto Inst : OpenIntervals) { | |||
| 6657 | unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); | |||
| 6658 | if (RegUsage.find(ClassID) == RegUsage.end()) | |||
| 6659 | RegUsage[ClassID] = 1; | |||
| 6660 | else | |||
| 6661 | RegUsage[ClassID] += 1; | |||
| 6662 | } | |||
| 6663 | } else { | |||
| 6664 | collectUniformsAndScalars(VFs[j]); | |||
| 6665 | for (auto Inst : OpenIntervals) { | |||
| 6666 | // Skip ignored values for VF > 1. | |||
| 6667 | if (VecValuesToIgnore.count(Inst)) | |||
| 6668 | continue; | |||
| 6669 | if (isScalarAfterVectorization(Inst, VFs[j])) { | |||
| 6670 | unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); | |||
| 6671 | if (RegUsage.find(ClassID) == RegUsage.end()) | |||
| 6672 | RegUsage[ClassID] = 1; | |||
| 6673 | else | |||
| 6674 | RegUsage[ClassID] += 1; | |||
| 6675 | } else { | |||
| 6676 | unsigned ClassID = TTI.getRegisterClassForType(true, Inst->getType()); | |||
| 6677 | if (RegUsage.find(ClassID) == RegUsage.end()) | |||
| 6678 | RegUsage[ClassID] = GetRegUsage(Inst->getType(), VFs[j]); | |||
| 6679 | else | |||
| 6680 | RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]); | |||
| 6681 | } | |||
| 6682 | } | |||
| 6683 | } | |||
| 6684 | ||||
| 6685 | for (auto& pair : RegUsage) { | |||
| 6686 | if (MaxUsages[j].find(pair.first) != MaxUsages[j].end()) | |||
| 6687 | MaxUsages[j][pair.first] = std::max(MaxUsages[j][pair.first], pair.second); | |||
| 6688 | else | |||
| 6689 | MaxUsages[j][pair.first] = pair.second; | |||
| 6690 | } | |||
| 6691 | } | |||
| 6692 | ||||
| 6693 | LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # "do { } while (false) | |||
| 6694 | << OpenIntervals.size() << '\n')do { } while (false); | |||
| 6695 | ||||
| 6696 | // Add the current instruction to the list of open intervals. | |||
| 6697 | OpenIntervals.insert(I); | |||
| 6698 | } | |||
| 6699 | ||||
| 6700 | for (unsigned i = 0, e = VFs.size(); i < e; ++i) { | |||
| 6701 | SmallMapVector<unsigned, unsigned, 4> Invariant; | |||
| 6702 | ||||
| 6703 | for (auto Inst : LoopInvariants) { | |||
| 6704 | unsigned Usage = | |||
| 6705 | VFs[i].isScalar() ? 1 : GetRegUsage(Inst->getType(), VFs[i]); | |||
| 6706 | unsigned ClassID = | |||
| 6707 | TTI.getRegisterClassForType(VFs[i].isVector(), Inst->getType()); | |||
| 6708 | if (Invariant.find(ClassID) == Invariant.end()) | |||
| 6709 | Invariant[ClassID] = Usage; | |||
| 6710 | else | |||
| 6711 | Invariant[ClassID] += Usage; | |||
| 6712 | } | |||
| 6713 | ||||
| 6714 | LLVM_DEBUG({do { } while (false) | |||
| 6715 | dbgs() << "LV(REG): VF = " << VFs[i] << '\n';do { } while (false) | |||
| 6716 | dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size()do { } while (false) | |||
| 6717 | << " item\n";do { } while (false) | |||
| 6718 | for (const auto &pair : MaxUsages[i]) {do { } while (false) | |||
| 6719 | dbgs() << "LV(REG): RegisterClass: "do { } while (false) | |||
| 6720 | << TTI.getRegisterClassName(pair.first) << ", " << pair.seconddo { } while (false) | |||
| 6721 | << " registers\n";do { } while (false) | |||
| 6722 | }do { } while (false) | |||
| 6723 | dbgs() << "LV(REG): Found invariant usage: " << Invariant.size()do { } while (false) | |||
| 6724 | << " item\n";do { } while (false) | |||
| 6725 | for (const auto &pair : Invariant) {do { } while (false) | |||
| 6726 | dbgs() << "LV(REG): RegisterClass: "do { } while (false) | |||
| 6727 | << TTI.getRegisterClassName(pair.first) << ", " << pair.seconddo { } while (false) | |||
| 6728 | << " registers\n";do { } while (false) | |||
| 6729 | }do { } while (false) | |||
| 6730 | })do { } while (false); | |||
| 6731 | ||||
| 6732 | RU.LoopInvariantRegs = Invariant; | |||
| 6733 | RU.MaxLocalUsers = MaxUsages[i]; | |||
| 6734 | RUs[i] = RU; | |||
| 6735 | } | |||
| 6736 | ||||
| 6737 | return RUs; | |||
| 6738 | } | |||
| 6739 | ||||
| 6740 | bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I){ | |||
| 6741 | // TODO: Cost model for emulated masked load/store is completely | |||
| 6742 | // broken. This hack guides the cost model to use an artificially | |||
| 6743 | // high enough value to practically disable vectorization with such | |||
| 6744 | // operations, except where previously deployed legality hack allowed | |||
| 6745 | // using very low cost values. This is to avoid regressions coming simply | |||
| 6746 | // from moving "masked load/store" check from legality to cost model. | |||
| 6747 | // Masked Load/Gather emulation was previously never allowed. | |||
| 6748 | // Limited number of Masked Store/Scatter emulation was allowed. | |||
| 6749 | assert(isPredicatedInst(I) &&((void)0) | |||
| 6750 | "Expecting a scalar emulated instruction")((void)0); | |||
| 6751 | return isa<LoadInst>(I) || | |||
| 6752 | (isa<StoreInst>(I) && | |||
| 6753 | NumPredStores > NumberOfStoresToPredicate); | |||
| 6754 | } | |||
| 6755 | ||||
| 6756 | void LoopVectorizationCostModel::collectInstsToScalarize(ElementCount VF) { | |||
| 6757 | // If we aren't vectorizing the loop, or if we've already collected the | |||
| 6758 | // instructions to scalarize, there's nothing to do. Collection may already | |||
| 6759 | // have occurred if we have a user-selected VF and are now computing the | |||
| 6760 | // expected cost for interleaving. | |||
| 6761 | if (VF.isScalar() || VF.isZero() || | |||
| 6762 | InstsToScalarize.find(VF) != InstsToScalarize.end()) | |||
| 6763 | return; | |||
| 6764 | ||||
| 6765 | // Initialize a mapping for VF in InstsToScalalarize. If we find that it's | |||
| 6766 | // not profitable to scalarize any instructions, the presence of VF in the | |||
| 6767 | // map will indicate that we've analyzed it already. | |||
| 6768 | ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF]; | |||
| 6769 | ||||
| 6770 | // Find all the instructions that are scalar with predication in the loop and | |||
| 6771 | // determine if it would be better to not if-convert the blocks they are in. | |||
| 6772 | // If so, we also record the instructions to scalarize. | |||
| 6773 | for (BasicBlock *BB : TheLoop->blocks()) { | |||
| 6774 | if (!blockNeedsPredication(BB)) | |||
| 6775 | continue; | |||
| 6776 | for (Instruction &I : *BB) | |||
| 6777 | if (isScalarWithPredication(&I)) { | |||
| 6778 | ScalarCostsTy ScalarCosts; | |||
| 6779 | // Do not apply discount if scalable, because that would lead to | |||
| 6780 | // invalid scalarization costs. | |||
| 6781 | // Do not apply discount logic if hacked cost is needed | |||
| 6782 | // for emulated masked memrefs. | |||
| 6783 | if (!VF.isScalable() && !useEmulatedMaskMemRefHack(&I) && | |||
| 6784 | computePredInstDiscount(&I, ScalarCosts, VF) >= 0) | |||
| 6785 | ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end()); | |||
| 6786 | // Remember that BB will remain after vectorization. | |||
| 6787 | PredicatedBBsAfterVectorization.insert(BB); | |||
| 6788 | } | |||
| 6789 | } | |||
| 6790 | } | |||
| 6791 | ||||
| 6792 | int LoopVectorizationCostModel::computePredInstDiscount( | |||
| 6793 | Instruction *PredInst, ScalarCostsTy &ScalarCosts, ElementCount VF) { | |||
| 6794 | assert(!isUniformAfterVectorization(PredInst, VF) &&((void)0) | |||
| 6795 | "Instruction marked uniform-after-vectorization will be predicated")((void)0); | |||
| 6796 | ||||
| 6797 | // Initialize the discount to zero, meaning that the scalar version and the | |||
| 6798 | // vector version cost the same. | |||
| 6799 | InstructionCost Discount = 0; | |||
| 6800 | ||||
| 6801 | // Holds instructions to analyze. The instructions we visit are mapped in | |||
| 6802 | // ScalarCosts. Those instructions are the ones that would be scalarized if | |||
| 6803 | // we find that the scalar version costs less. | |||
| 6804 | SmallVector<Instruction *, 8> Worklist; | |||
| 6805 | ||||
| 6806 | // Returns true if the given instruction can be scalarized. | |||
| 6807 | auto canBeScalarized = [&](Instruction *I) -> bool { | |||
| 6808 | // We only attempt to scalarize instructions forming a single-use chain | |||
| 6809 | // from the original predicated block that would otherwise be vectorized. | |||
| 6810 | // Although not strictly necessary, we give up on instructions we know will | |||
| 6811 | // already be scalar to avoid traversing chains that are unlikely to be | |||
| 6812 | // beneficial. | |||
| 6813 | if (!I->hasOneUse() || PredInst->getParent() != I->getParent() || | |||
| 6814 | isScalarAfterVectorization(I, VF)) | |||
| 6815 | return false; | |||
| 6816 | ||||
| 6817 | // If the instruction is scalar with predication, it will be analyzed | |||
| 6818 | // separately. We ignore it within the context of PredInst. | |||
| 6819 | if (isScalarWithPredication(I)) | |||
| 6820 | return false; | |||
| 6821 | ||||
| 6822 | // If any of the instruction's operands are uniform after vectorization, | |||
| 6823 | // the instruction cannot be scalarized. This prevents, for example, a | |||
| 6824 | // masked load from being scalarized. | |||
| 6825 | // | |||
| 6826 | // We assume we will only emit a value for lane zero of an instruction | |||
| 6827 | // marked uniform after vectorization, rather than VF identical values. | |||
| 6828 | // Thus, if we scalarize an instruction that uses a uniform, we would | |||
| 6829 | // create uses of values corresponding to the lanes we aren't emitting code | |||
| 6830 | // for. This behavior can be changed by allowing getScalarValue to clone | |||
| 6831 | // the lane zero values for uniforms rather than asserting. | |||
| 6832 | for (Use &U : I->operands()) | |||
| 6833 | if (auto *J = dyn_cast<Instruction>(U.get())) | |||
| 6834 | if (isUniformAfterVectorization(J, VF)) | |||
| 6835 | return false; | |||
| 6836 | ||||
| 6837 | // Otherwise, we can scalarize the instruction. | |||
| 6838 | return true; | |||
| 6839 | }; | |||
| 6840 | ||||
| 6841 | // Compute the expected cost discount from scalarizing the entire expression | |||
| 6842 | // feeding the predicated instruction. We currently only consider expressions | |||
| 6843 | // that are single-use instruction chains. | |||
| 6844 | Worklist.push_back(PredInst); | |||
| 6845 | while (!Worklist.empty()) { | |||
| 6846 | Instruction *I = Worklist.pop_back_val(); | |||
| 6847 | ||||
| 6848 | // If we've already analyzed the instruction, there's nothing to do. | |||
| 6849 | if (ScalarCosts.find(I) != ScalarCosts.end()) | |||
| 6850 | continue; | |||
| 6851 | ||||
| 6852 | // Compute the cost of the vector instruction. Note that this cost already | |||
| 6853 | // includes the scalarization overhead of the predicated instruction. | |||
| 6854 | InstructionCost VectorCost = getInstructionCost(I, VF).first; | |||
| 6855 | ||||
| 6856 | // Compute the cost of the scalarized instruction. This cost is the cost of | |||
| 6857 | // the instruction as if it wasn't if-converted and instead remained in the | |||
| 6858 | // predicated block. We will scale this cost by block probability after | |||
| 6859 | // computing the scalarization overhead. | |||
| 6860 | InstructionCost ScalarCost = | |||
| 6861 | VF.getFixedValue() * | |||
| 6862 | getInstructionCost(I, ElementCount::getFixed(1)).first; | |||
| 6863 | ||||
| 6864 | // Compute the scalarization overhead of needed insertelement instructions | |||
| 6865 | // and phi nodes. | |||
| 6866 | if (isScalarWithPredication(I) && !I->getType()->isVoidTy()) { | |||
| 6867 | ScalarCost += TTI.getScalarizationOverhead( | |||
| 6868 | cast<VectorType>(ToVectorTy(I->getType(), VF)), | |||
| 6869 | APInt::getAllOnesValue(VF.getFixedValue()), true, false); | |||
| 6870 | ScalarCost += | |||
| 6871 | VF.getFixedValue() * | |||
| 6872 | TTI.getCFInstrCost(Instruction::PHI, TTI::TCK_RecipThroughput); | |||
| 6873 | } | |||
| 6874 | ||||
| 6875 | // Compute the scalarization overhead of needed extractelement | |||
| 6876 | // instructions. For each of the instruction's operands, if the operand can | |||
| 6877 | // be scalarized, add it to the worklist; otherwise, account for the | |||
| 6878 | // overhead. | |||
| 6879 | for (Use &U : I->operands()) | |||
| 6880 | if (auto *J = dyn_cast<Instruction>(U.get())) { | |||
| 6881 | assert(VectorType::isValidElementType(J->getType()) &&((void)0) | |||
| 6882 | "Instruction has non-scalar type")((void)0); | |||
| 6883 | if (canBeScalarized(J)) | |||
| 6884 | Worklist.push_back(J); | |||
| 6885 | else if (needsExtract(J, VF)) { | |||
| 6886 | ScalarCost += TTI.getScalarizationOverhead( | |||
| 6887 | cast<VectorType>(ToVectorTy(J->getType(), VF)), | |||
| 6888 | APInt::getAllOnesValue(VF.getFixedValue()), false, true); | |||
| 6889 | } | |||
| 6890 | } | |||
| 6891 | ||||
| 6892 | // Scale the total scalar cost by block probability. | |||
| 6893 | ScalarCost /= getReciprocalPredBlockProb(); | |||
| 6894 | ||||
| 6895 | // Compute the discount. A non-negative discount means the vector version | |||
| 6896 | // of the instruction costs more, and scalarizing would be beneficial. | |||
| 6897 | Discount += VectorCost - ScalarCost; | |||
| 6898 | ScalarCosts[I] = ScalarCost; | |||
| 6899 | } | |||
| 6900 | ||||
| 6901 | return *Discount.getValue(); | |||
| 6902 | } | |||
| 6903 | ||||
| 6904 | LoopVectorizationCostModel::VectorizationCostTy | |||
| 6905 | LoopVectorizationCostModel::expectedCost( | |||
| 6906 | ElementCount VF, SmallVectorImpl<InstructionVFPair> *Invalid) { | |||
| 6907 | VectorizationCostTy Cost; | |||
| 6908 | ||||
| 6909 | // For each block. | |||
| 6910 | for (BasicBlock *BB : TheLoop->blocks()) { | |||
| 6911 | VectorizationCostTy BlockCost; | |||
| 6912 | ||||
| 6913 | // For each instruction in the old loop. | |||
| 6914 | for (Instruction &I : BB->instructionsWithoutDebug()) { | |||
| 6915 | // Skip ignored values. | |||
| 6916 | if (ValuesToIgnore.count(&I) || | |||
| 6917 | (VF.isVector() && VecValuesToIgnore.count(&I))) | |||
| 6918 | continue; | |||
| 6919 | ||||
| 6920 | VectorizationCostTy C = getInstructionCost(&I, VF); | |||
| 6921 | ||||
| 6922 | // Check if we should override the cost. | |||
| 6923 | if (C.first.isValid() && | |||
| 6924 | ForceTargetInstructionCost.getNumOccurrences() > 0) | |||
| 6925 | C.first = InstructionCost(ForceTargetInstructionCost); | |||
| 6926 | ||||
| 6927 | // Keep a list of instructions with invalid costs. | |||
| 6928 | if (Invalid && !C.first.isValid()) | |||
| 6929 | Invalid->emplace_back(&I, VF); | |||
| 6930 | ||||
| 6931 | BlockCost.first += C.first; | |||
| 6932 | BlockCost.second |= C.second; | |||
| 6933 | LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.firstdo { } while (false) | |||
| 6934 | << " for VF " << VF << " For instruction: " << Ido { } while (false) | |||
| 6935 | << '\n')do { } while (false); | |||
| 6936 | } | |||
| 6937 | ||||
| 6938 | // If we are vectorizing a predicated block, it will have been | |||
| 6939 | // if-converted. This means that the block's instructions (aside from | |||
| 6940 | // stores and instructions that may divide by zero) will now be | |||
| 6941 | // unconditionally executed. For the scalar case, we may not always execute | |||
| 6942 | // the predicated block, if it is an if-else block. Thus, scale the block's | |||
| 6943 | // cost by the probability of executing it. blockNeedsPredication from | |||
| 6944 | // Legal is used so as to not include all blocks in tail folded loops. | |||
| 6945 | if (VF.isScalar() && Legal->blockNeedsPredication(BB)) | |||
| 6946 | BlockCost.first /= getReciprocalPredBlockProb(); | |||
| 6947 | ||||
| 6948 | Cost.first += BlockCost.first; | |||
| 6949 | Cost.second |= BlockCost.second; | |||
| 6950 | } | |||
| 6951 | ||||
| 6952 | return Cost; | |||
| 6953 | } | |||
| 6954 | ||||
| 6955 | /// Gets Address Access SCEV after verifying that the access pattern | |||
| 6956 | /// is loop invariant except the induction variable dependence. | |||
| 6957 | /// | |||
| 6958 | /// This SCEV can be sent to the Target in order to estimate the address | |||
| 6959 | /// calculation cost. | |||
| 6960 | static const SCEV *getAddressAccessSCEV( | |||
| 6961 | Value *Ptr, | |||
| 6962 | LoopVectorizationLegality *Legal, | |||
| 6963 | PredicatedScalarEvolution &PSE, | |||
| 6964 | const Loop *TheLoop) { | |||
| 6965 | ||||
| 6966 | auto *Gep = dyn_cast<GetElementPtrInst>(Ptr); | |||
| 6967 | if (!Gep) | |||
| 6968 | return nullptr; | |||
| 6969 | ||||
| 6970 | // We are looking for a gep with all loop invariant indices except for one | |||
| 6971 | // which should be an induction variable. | |||
| 6972 | auto SE = PSE.getSE(); | |||
| 6973 | unsigned NumOperands = Gep->getNumOperands(); | |||
| 6974 | for (unsigned i = 1; i < NumOperands; ++i) { | |||
| 6975 | Value *Opd = Gep->getOperand(i); | |||
| 6976 | if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) && | |||
| 6977 | !Legal->isInductionVariable(Opd)) | |||
| 6978 | return nullptr; | |||
| 6979 | } | |||
| 6980 | ||||
| 6981 | // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV. | |||
| 6982 | return PSE.getSCEV(Ptr); | |||
| 6983 | } | |||
| 6984 | ||||
| 6985 | static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) { | |||
| 6986 | return Legal->hasStride(I->getOperand(0)) || | |||
| 6987 | Legal->hasStride(I->getOperand(1)); | |||
| 6988 | } | |||
| 6989 | ||||
| 6990 | InstructionCost | |||
| 6991 | LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I, | |||
| 6992 | ElementCount VF) { | |||
| 6993 | assert(VF.isVector() &&((void)0) | |||
| 6994 | "Scalarization cost of instruction implies vectorization.")((void)0); | |||
| 6995 | if (VF.isScalable()) | |||
| 6996 | return InstructionCost::getInvalid(); | |||
| 6997 | ||||
| 6998 | Type *ValTy = getLoadStoreType(I); | |||
| 6999 | auto SE = PSE.getSE(); | |||
| 7000 | ||||
| 7001 | unsigned AS = getLoadStoreAddressSpace(I); | |||
| 7002 | Value *Ptr = getLoadStorePointerOperand(I); | |||
| 7003 | Type *PtrTy = ToVectorTy(Ptr->getType(), VF); | |||
| 7004 | ||||
| 7005 | // Figure out whether the access is strided and get the stride value | |||
| 7006 | // if it's known in compile time | |||
| 7007 | const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop); | |||
| 7008 | ||||
| 7009 | // Get the cost of the scalar memory instruction and address computation. | |||
| 7010 | InstructionCost Cost = | |||
| 7011 | VF.getKnownMinValue() * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV); | |||
| 7012 | ||||
| 7013 | // Don't pass *I here, since it is scalar but will actually be part of a | |||
| 7014 | // vectorized loop where the user of it is a vectorized instruction. | |||
| 7015 | const Align Alignment = getLoadStoreAlignment(I); | |||
| 7016 | Cost += VF.getKnownMinValue() * | |||
| 7017 | TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment, | |||
| 7018 | AS, TTI::TCK_RecipThroughput); | |||
| 7019 | ||||
| 7020 | // Get the overhead of the extractelement and insertelement instructions | |||
| 7021 | // we might create due to scalarization. | |||
| 7022 | Cost += getScalarizationOverhead(I, VF); | |||
| 7023 | ||||
| 7024 | // If we have a predicated load/store, it will need extra i1 extracts and | |||
| 7025 | // conditional branches, but may not be executed for each vector lane. Scale | |||
| 7026 | // the cost by the probability of executing the predicated block. | |||
| 7027 | if (isPredicatedInst(I)) { | |||
| 7028 | Cost /= getReciprocalPredBlockProb(); | |||
| 7029 | ||||
| 7030 | // Add the cost of an i1 extract and a branch | |||
| 7031 | auto *Vec_i1Ty = | |||
| 7032 | VectorType::get(IntegerType::getInt1Ty(ValTy->getContext()), VF); | |||
| 7033 | Cost += TTI.getScalarizationOverhead( | |||
| 7034 | Vec_i1Ty, APInt::getAllOnesValue(VF.getKnownMinValue()), | |||
| 7035 | /*Insert=*/false, /*Extract=*/true); | |||
| 7036 | Cost += TTI.getCFInstrCost(Instruction::Br, TTI::TCK_RecipThroughput); | |||
| 7037 | ||||
| 7038 | if (useEmulatedMaskMemRefHack(I)) | |||
| 7039 | // Artificially setting to a high enough value to practically disable | |||
| 7040 | // vectorization with such operations. | |||
| 7041 | Cost = 3000000; | |||
| 7042 | } | |||
| 7043 | ||||
| 7044 | return Cost; | |||
| 7045 | } | |||
| 7046 | ||||
| 7047 | InstructionCost | |||
| 7048 | LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I, | |||
| 7049 | ElementCount VF) { | |||
| 7050 | Type *ValTy = getLoadStoreType(I); | |||
| 7051 | auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); | |||
| 7052 | Value *Ptr = getLoadStorePointerOperand(I); | |||
| 7053 | unsigned AS = getLoadStoreAddressSpace(I); | |||
| 7054 | int ConsecutiveStride = Legal->isConsecutivePtr(Ptr); | |||
| 7055 | enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; | |||
| 7056 | ||||
| 7057 | assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&((void)0) | |||
| 7058 | "Stride should be 1 or -1 for consecutive memory access")((void)0); | |||
| 7059 | const Align Alignment = getLoadStoreAlignment(I); | |||
| 7060 | InstructionCost Cost = 0; | |||
| 7061 | if (Legal->isMaskRequired(I)) | |||
| 7062 | Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, | |||
| 7063 | CostKind); | |||
| 7064 | else | |||
| 7065 | Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, | |||
| 7066 | CostKind, I); | |||
| 7067 | ||||
| 7068 | bool Reverse = ConsecutiveStride < 0; | |||
| 7069 | if (Reverse) | |||
| 7070 | Cost += | |||
| 7071 | TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0); | |||
| 7072 | return Cost; | |||
| 7073 | } | |||
| 7074 | ||||
| 7075 | InstructionCost | |||
| 7076 | LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I, | |||
| 7077 | ElementCount VF) { | |||
| 7078 | assert(Legal->isUniformMemOp(*I))((void)0); | |||
| 7079 | ||||
| 7080 | Type *ValTy = getLoadStoreType(I); | |||
| 7081 | auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); | |||
| 7082 | const Align Alignment = getLoadStoreAlignment(I); | |||
| 7083 | unsigned AS = getLoadStoreAddressSpace(I); | |||
| 7084 | enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; | |||
| 7085 | if (isa<LoadInst>(I)) { | |||
| 7086 | return TTI.getAddressComputationCost(ValTy) + | |||
| 7087 | TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS, | |||
| 7088 | CostKind) + | |||
| 7089 | TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy); | |||
| 7090 | } | |||
| 7091 | StoreInst *SI = cast<StoreInst>(I); | |||
| 7092 | ||||
| 7093 | bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand()); | |||
| 7094 | return TTI.getAddressComputationCost(ValTy) + | |||
| 7095 | TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS, | |||
| 7096 | CostKind) + | |||
| 7097 | (isLoopInvariantStoreValue | |||
| 7098 | ? 0 | |||
| 7099 | : TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy, | |||
| 7100 | VF.getKnownMinValue() - 1)); | |||
| 7101 | } | |||
| 7102 | ||||
| 7103 | InstructionCost | |||
| 7104 | LoopVectorizationCostModel::getGatherScatterCost(Instruction *I, | |||
| 7105 | ElementCount VF) { | |||
| 7106 | Type *ValTy = getLoadStoreType(I); | |||
| 7107 | auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); | |||
| 7108 | const Align Alignment = getLoadStoreAlignment(I); | |||
| 7109 | const Value *Ptr = getLoadStorePointerOperand(I); | |||
| 7110 | ||||
| 7111 | return TTI.getAddressComputationCost(VectorTy) + | |||
| 7112 | TTI.getGatherScatterOpCost( | |||
| 7113 | I->getOpcode(), VectorTy, Ptr, Legal->isMaskRequired(I), Alignment, | |||
| 7114 | TargetTransformInfo::TCK_RecipThroughput, I); | |||
| 7115 | } | |||
| 7116 | ||||
| 7117 | InstructionCost | |||
| 7118 | LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I, | |||
| 7119 | ElementCount VF) { | |||
| 7120 | // TODO: Once we have support for interleaving with scalable vectors | |||
| 7121 | // we can calculate the cost properly here. | |||
| 7122 | if (VF.isScalable()) | |||
| 7123 | return InstructionCost::getInvalid(); | |||
| 7124 | ||||
| 7125 | Type *ValTy = getLoadStoreType(I); | |||
| 7126 | auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); | |||
| 7127 | unsigned AS = getLoadStoreAddressSpace(I); | |||
| 7128 | ||||
| 7129 | auto Group = getInterleavedAccessGroup(I); | |||
| 7130 | assert(Group && "Fail to get an interleaved access group.")((void)0); | |||
| 7131 | ||||
| 7132 | unsigned InterleaveFactor = Group->getFactor(); | |||
| 7133 | auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor); | |||
| 7134 | ||||
| 7135 | // Holds the indices of existing members in an interleaved load group. | |||
| 7136 | // An interleaved store group doesn't need this as it doesn't allow gaps. | |||
| 7137 | SmallVector<unsigned, 4> Indices; | |||
| 7138 | if (isa<LoadInst>(I)) { | |||
| 7139 | for (unsigned i = 0; i < InterleaveFactor; i++) | |||
| 7140 | if (Group->getMember(i)) | |||
| 7141 | Indices.push_back(i); | |||
| 7142 | } | |||
| 7143 | ||||
| 7144 | // Calculate the cost of the whole interleaved group. | |||
| 7145 | bool UseMaskForGaps = | |||
| 7146 | Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed(); | |||
| 7147 | InstructionCost Cost = TTI.getInterleavedMemoryOpCost( | |||
| 7148 | I->getOpcode(), WideVecTy, Group->getFactor(), Indices, Group->getAlign(), | |||
| 7149 | AS, TTI::TCK_RecipThroughput, Legal->isMaskRequired(I), UseMaskForGaps); | |||
| 7150 | ||||
| 7151 | if (Group->isReverse()) { | |||
| 7152 | // TODO: Add support for reversed masked interleaved access. | |||
| 7153 | assert(!Legal->isMaskRequired(I) &&((void)0) | |||
| 7154 | "Reverse masked interleaved access not supported.")((void)0); | |||
| 7155 | Cost += | |||
| 7156 | Group->getNumMembers() * | |||
| 7157 | TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0); | |||
| 7158 | } | |||
| 7159 | return Cost; | |||
| 7160 | } | |||
| 7161 | ||||
| 7162 | Optional<InstructionCost> LoopVectorizationCostModel::getReductionPatternCost( | |||
| 7163 | Instruction *I, ElementCount VF, Type *Ty, TTI::TargetCostKind CostKind) { | |||
| 7164 | using namespace llvm::PatternMatch; | |||
| 7165 | // Early exit for no inloop reductions | |||
| 7166 | if (InLoopReductionChains.empty() || VF.isScalar() || !isa<VectorType>(Ty)) | |||
| 7167 | return None; | |||
| 7168 | auto *VectorTy = cast<VectorType>(Ty); | |||
| 7169 | ||||
| 7170 | // We are looking for a pattern of, and finding the minimal acceptable cost: | |||
| 7171 | // reduce(mul(ext(A), ext(B))) or | |||
| 7172 | // reduce(mul(A, B)) or | |||
| 7173 | // reduce(ext(A)) or | |||
| 7174 | // reduce(A). | |||
| 7175 | // The basic idea is that we walk down the tree to do that, finding the root | |||
| 7176 | // reduction instruction in InLoopReductionImmediateChains. From there we find | |||
| 7177 | // the pattern of mul/ext and test the cost of the entire pattern vs the cost | |||
| 7178 | // of the components. If the reduction cost is lower then we return it for the | |||
| 7179 | // reduction instruction and 0 for the other instructions in the pattern. If | |||
| 7180 | // it is not we return an invalid cost specifying the orignal cost method | |||
| 7181 | // should be used. | |||
| 7182 | Instruction *RetI = I; | |||
| 7183 | if (match(RetI, m_ZExtOrSExt(m_Value()))) { | |||
| 7184 | if (!RetI->hasOneUser()) | |||
| 7185 | return None; | |||
| 7186 | RetI = RetI->user_back(); | |||
| 7187 | } | |||
| 7188 | if (match(RetI, m_Mul(m_Value(), m_Value())) && | |||
| 7189 | RetI->user_back()->getOpcode() == Instruction::Add) { | |||
| 7190 | if (!RetI->hasOneUser()) | |||
| 7191 | return None; | |||
| 7192 | RetI = RetI->user_back(); | |||
| 7193 | } | |||
| 7194 | ||||
| 7195 | // Test if the found instruction is a reduction, and if not return an invalid | |||
| 7196 | // cost specifying the parent to use the original cost modelling. | |||
| 7197 | if (!InLoopReductionImmediateChains.count(RetI)) | |||
| 7198 | return None; | |||
| 7199 | ||||
| 7200 | // Find the reduction this chain is a part of and calculate the basic cost of | |||
| 7201 | // the reduction on its own. | |||
| 7202 | Instruction *LastChain = InLoopReductionImmediateChains[RetI]; | |||
| 7203 | Instruction *ReductionPhi = LastChain; | |||
| 7204 | while (!isa<PHINode>(ReductionPhi)) | |||
| 7205 | ReductionPhi = InLoopReductionImmediateChains[ReductionPhi]; | |||
| 7206 | ||||
| 7207 | const RecurrenceDescriptor &RdxDesc = | |||
| 7208 | Legal->getReductionVars()[cast<PHINode>(ReductionPhi)]; | |||
| 7209 | ||||
| 7210 | InstructionCost BaseCost = TTI.getArithmeticReductionCost( | |||
| 7211 | RdxDesc.getOpcode(), VectorTy, RdxDesc.getFastMathFlags(), CostKind); | |||
| 7212 | ||||
| 7213 | // If we're using ordered reductions then we can just return the base cost | |||
| 7214 | // here, since getArithmeticReductionCost calculates the full ordered | |||
| 7215 | // reduction cost when FP reassociation is not allowed. | |||
| 7216 | if (useOrderedReductions(RdxDesc)) | |||
| 7217 | return BaseCost; | |||
| 7218 | ||||
| 7219 | // Get the operand that was not the reduction chain and match it to one of the | |||
| 7220 | // patterns, returning the better cost if it is found. | |||
| 7221 | Instruction *RedOp = RetI->getOperand(1) == LastChain | |||
| 7222 | ? dyn_cast<Instruction>(RetI->getOperand(0)) | |||
| 7223 | : dyn_cast<Instruction>(RetI->getOperand(1)); | |||
| 7224 | ||||
| 7225 | VectorTy = VectorType::get(I->getOperand(0)->getType(), VectorTy); | |||
| 7226 | ||||
| 7227 | Instruction *Op0, *Op1; | |||
| 7228 | if (RedOp && match(RedOp, m_ZExtOrSExt(m_Value())) && | |||
| 7229 | !TheLoop->isLoopInvariant(RedOp)) { | |||
| 7230 | // Matched reduce(ext(A)) | |||
| 7231 | bool IsUnsigned = isa<ZExtInst>(RedOp); | |||
| 7232 | auto *ExtType = VectorType::get(RedOp->getOperand(0)->getType(), VectorTy); | |||
| 7233 | InstructionCost RedCost = TTI.getExtendedAddReductionCost( | |||
| 7234 | /*IsMLA=*/false, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, | |||
| 7235 | CostKind); | |||
| 7236 | ||||
| 7237 | InstructionCost ExtCost = | |||
| 7238 | TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, ExtType, | |||
| 7239 | TTI::CastContextHint::None, CostKind, RedOp); | |||
| 7240 | if (RedCost.isValid() && RedCost < BaseCost + ExtCost) | |||
| 7241 | return I == RetI ? RedCost : 0; | |||
| 7242 | } else if (RedOp && | |||
| 7243 | match(RedOp, m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) { | |||
| 7244 | if (match(Op0, m_ZExtOrSExt(m_Value())) && | |||
| 7245 | Op0->getOpcode() == Op1->getOpcode() && | |||
| 7246 | Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() && | |||
| 7247 | !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1)) { | |||
| 7248 | bool IsUnsigned = isa<ZExtInst>(Op0); | |||
| 7249 | auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy); | |||
| 7250 | // Matched reduce(mul(ext, ext)) | |||
| 7251 | InstructionCost ExtCost = | |||
| 7252 | TTI.getCastInstrCost(Op0->getOpcode(), VectorTy, ExtType, | |||
| 7253 | TTI::CastContextHint::None, CostKind, Op0); | |||
| 7254 | InstructionCost MulCost = | |||
| 7255 | TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind); | |||
| 7256 | ||||
| 7257 | InstructionCost RedCost = TTI.getExtendedAddReductionCost( | |||
| 7258 | /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, | |||
| 7259 | CostKind); | |||
| 7260 | ||||
| 7261 | if (RedCost.isValid() && RedCost < ExtCost * 2 + MulCost + BaseCost) | |||
| 7262 | return I == RetI ? RedCost : 0; | |||
| 7263 | } else { | |||
| 7264 | // Matched reduce(mul()) | |||
| 7265 | InstructionCost MulCost = | |||
| 7266 | TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind); | |||
| 7267 | ||||
| 7268 | InstructionCost RedCost = TTI.getExtendedAddReductionCost( | |||
| 7269 | /*IsMLA=*/true, true, RdxDesc.getRecurrenceType(), VectorTy, | |||
| 7270 | CostKind); | |||
| 7271 | ||||
| 7272 | if (RedCost.isValid() && RedCost < MulCost + BaseCost) | |||
| 7273 | return I == RetI ? RedCost : 0; | |||
| 7274 | } | |||
| 7275 | } | |||
| 7276 | ||||
| 7277 | return I == RetI ? Optional<InstructionCost>(BaseCost) : None; | |||
| 7278 | } | |||
| 7279 | ||||
| 7280 | InstructionCost | |||
| 7281 | LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I, | |||
| 7282 | ElementCount VF) { | |||
| 7283 | // Calculate scalar cost only. Vectorization cost should be ready at this | |||
| 7284 | // moment. | |||
| 7285 | if (VF.isScalar()) { | |||
| 7286 | Type *ValTy = getLoadStoreType(I); | |||
| 7287 | const Align Alignment = getLoadStoreAlignment(I); | |||
| 7288 | unsigned AS = getLoadStoreAddressSpace(I); | |||
| 7289 | ||||
| 7290 | return TTI.getAddressComputationCost(ValTy) + | |||
| 7291 | TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS, | |||
| 7292 | TTI::TCK_RecipThroughput, I); | |||
| 7293 | } | |||
| 7294 | return getWideningCost(I, VF); | |||
| 7295 | } | |||
| 7296 | ||||
| 7297 | LoopVectorizationCostModel::VectorizationCostTy | |||
| 7298 | LoopVectorizationCostModel::getInstructionCost(Instruction *I, | |||
| 7299 | ElementCount VF) { | |||
| 7300 | // If we know that this instruction will remain uniform, check the cost of | |||
| 7301 | // the scalar version. | |||
| 7302 | if (isUniformAfterVectorization(I, VF)) | |||
| 7303 | VF = ElementCount::getFixed(1); | |||
| 7304 | ||||
| 7305 | if (VF.isVector() && isProfitableToScalarize(I, VF)) | |||
| 7306 | return VectorizationCostTy(InstsToScalarize[VF][I], false); | |||
| 7307 | ||||
| 7308 | // Forced scalars do not have any scalarization overhead. | |||
| 7309 | auto ForcedScalar = ForcedScalars.find(VF); | |||
| 7310 | if (VF.isVector() && ForcedScalar != ForcedScalars.end()) { | |||
| 7311 | auto InstSet = ForcedScalar->second; | |||
| 7312 | if (InstSet.count(I)) | |||
| 7313 | return VectorizationCostTy( | |||
| 7314 | (getInstructionCost(I, ElementCount::getFixed(1)).first * | |||
| 7315 | VF.getKnownMinValue()), | |||
| 7316 | false); | |||
| 7317 | } | |||
| 7318 | ||||
| 7319 | Type *VectorTy; | |||
| 7320 | InstructionCost C = getInstructionCost(I, VF, VectorTy); | |||
| 7321 | ||||
| 7322 | bool TypeNotScalarized = | |||
| 7323 | VF.isVector() && VectorTy->isVectorTy() && | |||
| 7324 | TTI.getNumberOfParts(VectorTy) < VF.getKnownMinValue(); | |||
| 7325 | return VectorizationCostTy(C, TypeNotScalarized); | |||
| 7326 | } | |||
| 7327 | ||||
| 7328 | InstructionCost | |||
| 7329 | LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I, | |||
| 7330 | ElementCount VF) const { | |||
| 7331 | ||||
| 7332 | // There is no mechanism yet to create a scalable scalarization loop, | |||
| 7333 | // so this is currently Invalid. | |||
| 7334 | if (VF.isScalable()) | |||
| 7335 | return InstructionCost::getInvalid(); | |||
| 7336 | ||||
| 7337 | if (VF.isScalar()) | |||
| 7338 | return 0; | |||
| 7339 | ||||
| 7340 | InstructionCost Cost = 0; | |||
| 7341 | Type *RetTy = ToVectorTy(I->getType(), VF); | |||
| 7342 | if (!RetTy->isVoidTy() && | |||
| 7343 | (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore())) | |||
| 7344 | Cost += TTI.getScalarizationOverhead( | |||
| 7345 | cast<VectorType>(RetTy), APInt::getAllOnesValue(VF.getKnownMinValue()), | |||
| 7346 | true, false); | |||
| 7347 | ||||
| 7348 | // Some targets keep addresses scalar. | |||
| 7349 | if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing()) | |||
| 7350 | return Cost; | |||
| 7351 | ||||
| 7352 | // Some targets support efficient element stores. | |||
| 7353 | if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore()) | |||
| 7354 | return Cost; | |||
| 7355 | ||||
| 7356 | // Collect operands to consider. | |||
| 7357 | CallInst *CI = dyn_cast<CallInst>(I); | |||
| 7358 | Instruction::op_range Ops = CI ? CI->arg_operands() : I->operands(); | |||
| 7359 | ||||
| 7360 | // Skip operands that do not require extraction/scalarization and do not incur | |||
| 7361 | // any overhead. | |||
| 7362 | SmallVector<Type *> Tys; | |||
| 7363 | for (auto *V : filterExtractingOperands(Ops, VF)) | |||
| 7364 | Tys.push_back(MaybeVectorizeType(V->getType(), VF)); | |||
| 7365 | return Cost + TTI.getOperandsScalarizationOverhead( | |||
| 7366 | filterExtractingOperands(Ops, VF), Tys); | |||
| 7367 | } | |||
| 7368 | ||||
| 7369 | void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) { | |||
| 7370 | if (VF.isScalar()) | |||
| 7371 | return; | |||
| 7372 | NumPredStores = 0; | |||
| 7373 | for (BasicBlock *BB : TheLoop->blocks()) { | |||
| 7374 | // For each instruction in the old loop. | |||
| 7375 | for (Instruction &I : *BB) { | |||
| 7376 | Value *Ptr = getLoadStorePointerOperand(&I); | |||
| 7377 | if (!Ptr) | |||
| 7378 | continue; | |||
| 7379 | ||||
| 7380 | // TODO: We should generate better code and update the cost model for | |||
| 7381 | // predicated uniform stores. Today they are treated as any other | |||
| 7382 | // predicated store (see added test cases in | |||
| 7383 | // invariant-store-vectorization.ll). | |||
| 7384 | if (isa<StoreInst>(&I) && isScalarWithPredication(&I)) | |||
| 7385 | NumPredStores++; | |||
| 7386 | ||||
| 7387 | if (Legal->isUniformMemOp(I)) { | |||
| 7388 | // TODO: Avoid replicating loads and stores instead of | |||
| 7389 | // relying on instcombine to remove them. | |||
| 7390 | // Load: Scalar load + broadcast | |||
| 7391 | // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract | |||
| 7392 | InstructionCost Cost; | |||
| 7393 | if (isa<StoreInst>(&I) && VF.isScalable() && | |||
| 7394 | isLegalGatherOrScatter(&I)) { | |||
| 7395 | Cost = getGatherScatterCost(&I, VF); | |||
| 7396 | setWideningDecision(&I, VF, CM_GatherScatter, Cost); | |||
| 7397 | } else { | |||
| 7398 | assert((isa<LoadInst>(&I) || !VF.isScalable()) &&((void)0) | |||
| 7399 | "Cannot yet scalarize uniform stores")((void)0); | |||
| 7400 | Cost = getUniformMemOpCost(&I, VF); | |||
| 7401 | setWideningDecision(&I, VF, CM_Scalarize, Cost); | |||
| 7402 | } | |||
| 7403 | continue; | |||
| 7404 | } | |||
| 7405 | ||||
| 7406 | // We assume that widening is the best solution when possible. | |||
| 7407 | if (memoryInstructionCanBeWidened(&I, VF)) { | |||
| 7408 | InstructionCost Cost = getConsecutiveMemOpCost(&I, VF); | |||
| 7409 | int ConsecutiveStride = | |||
| 7410 | Legal->isConsecutivePtr(getLoadStorePointerOperand(&I)); | |||
| 7411 | assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&((void)0) | |||
| 7412 | "Expected consecutive stride.")((void)0); | |||
| 7413 | InstWidening Decision = | |||
| 7414 | ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse; | |||
| 7415 | setWideningDecision(&I, VF, Decision, Cost); | |||
| 7416 | continue; | |||
| 7417 | } | |||
| 7418 | ||||
| 7419 | // Choose between Interleaving, Gather/Scatter or Scalarization. | |||
| 7420 | InstructionCost InterleaveCost = InstructionCost::getInvalid(); | |||
| 7421 | unsigned NumAccesses = 1; | |||
| 7422 | if (isAccessInterleaved(&I)) { | |||
| 7423 | auto Group = getInterleavedAccessGroup(&I); | |||
| 7424 | assert(Group && "Fail to get an interleaved access group.")((void)0); | |||
| 7425 | ||||
| 7426 | // Make one decision for the whole group. | |||
| 7427 | if (getWideningDecision(&I, VF) != CM_Unknown) | |||
| 7428 | continue; | |||
| 7429 | ||||
| 7430 | NumAccesses = Group->getNumMembers(); | |||
| 7431 | if (interleavedAccessCanBeWidened(&I, VF)) | |||
| 7432 | InterleaveCost = getInterleaveGroupCost(&I, VF); | |||
| 7433 | } | |||
| 7434 | ||||
| 7435 | InstructionCost GatherScatterCost = | |||
| 7436 | isLegalGatherOrScatter(&I) | |||
| 7437 | ? getGatherScatterCost(&I, VF) * NumAccesses | |||
| 7438 | : InstructionCost::getInvalid(); | |||
| 7439 | ||||
| 7440 | InstructionCost ScalarizationCost = | |||
| 7441 | getMemInstScalarizationCost(&I, VF) * NumAccesses; | |||
| 7442 | ||||
| 7443 | // Choose better solution for the current VF, | |||
| 7444 | // write down this decision and use it during vectorization. | |||
| 7445 | InstructionCost Cost; | |||
| 7446 | InstWidening Decision; | |||
| 7447 | if (InterleaveCost <= GatherScatterCost && | |||
| 7448 | InterleaveCost < ScalarizationCost) { | |||
| 7449 | Decision = CM_Interleave; | |||
| 7450 | Cost = InterleaveCost; | |||
| 7451 | } else if (GatherScatterCost < ScalarizationCost) { | |||
| 7452 | Decision = CM_GatherScatter; | |||
| 7453 | Cost = GatherScatterCost; | |||
| 7454 | } else { | |||
| 7455 | Decision = CM_Scalarize; | |||
| 7456 | Cost = ScalarizationCost; | |||
| 7457 | } | |||
| 7458 | // If the instructions belongs to an interleave group, the whole group | |||
| 7459 | // receives the same decision. The whole group receives the cost, but | |||
| 7460 | // the cost will actually be assigned to one instruction. | |||
| 7461 | if (auto Group = getInterleavedAccessGroup(&I)) | |||
| 7462 | setWideningDecision(Group, VF, Decision, Cost); | |||
| 7463 | else | |||
| 7464 | setWideningDecision(&I, VF, Decision, Cost); | |||
| 7465 | } | |||
| 7466 | } | |||
| 7467 | ||||
| 7468 | // Make sure that any load of address and any other address computation | |||
| 7469 | // remains scalar unless there is gather/scatter support. This avoids | |||
| 7470 | // inevitable extracts into address registers, and also has the benefit of | |||
| 7471 | // activating LSR more, since that pass can't optimize vectorized | |||
| 7472 | // addresses. | |||
| 7473 | if (TTI.prefersVectorizedAddressing()) | |||
| 7474 | return; | |||
| 7475 | ||||
| 7476 | // Start with all scalar pointer uses. | |||
| 7477 | SmallPtrSet<Instruction *, 8> AddrDefs; | |||
| 7478 | for (BasicBlock *BB : TheLoop->blocks()) | |||
| 7479 | for (Instruction &I : *BB) { | |||
| 7480 | Instruction *PtrDef = | |||
| 7481 | dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I)); | |||
| 7482 | if (PtrDef && TheLoop->contains(PtrDef) && | |||
| 7483 | getWideningDecision(&I, VF) != CM_GatherScatter) | |||
| 7484 | AddrDefs.insert(PtrDef); | |||
| 7485 | } | |||
| 7486 | ||||
| 7487 | // Add all instructions used to generate the addresses. | |||
| 7488 | SmallVector<Instruction *, 4> Worklist; | |||
| 7489 | append_range(Worklist, AddrDefs); | |||
| 7490 | while (!Worklist.empty()) { | |||
| 7491 | Instruction *I = Worklist.pop_back_val(); | |||
| 7492 | for (auto &Op : I->operands()) | |||
| 7493 | if (auto *InstOp = dyn_cast<Instruction>(Op)) | |||
| 7494 | if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) && | |||
| 7495 | AddrDefs.insert(InstOp).second) | |||
| 7496 | Worklist.push_back(InstOp); | |||
| 7497 | } | |||
| 7498 | ||||
| 7499 | for (auto *I : AddrDefs) { | |||
| 7500 | if (isa<LoadInst>(I)) { | |||
| 7501 | // Setting the desired widening decision should ideally be handled in | |||
| 7502 | // by cost functions, but since this involves the task of finding out | |||
| 7503 | // if the loaded register is involved in an address computation, it is | |||
| 7504 | // instead changed here when we know this is the case. | |||
| 7505 | InstWidening Decision = getWideningDecision(I, VF); | |||
| 7506 | if (Decision == CM_Widen || Decision == CM_Widen_Reverse) | |||
| 7507 | // Scalarize a widened load of address. | |||
| 7508 | setWideningDecision( | |||
| 7509 | I, VF, CM_Scalarize, | |||
| 7510 | (VF.getKnownMinValue() * | |||
| 7511 | getMemoryInstructionCost(I, ElementCount::getFixed(1)))); | |||
| 7512 | else if (auto Group = getInterleavedAccessGroup(I)) { | |||
| 7513 | // Scalarize an interleave group of address loads. | |||
| 7514 | for (unsigned I = 0; I < Group->getFactor(); ++I) { | |||
| 7515 | if (Instruction *Member = Group->getMember(I)) | |||
| 7516 | setWideningDecision( | |||
| 7517 | Member, VF, CM_Scalarize, | |||
| 7518 | (VF.getKnownMinValue() * | |||
| 7519 | getMemoryInstructionCost(Member, ElementCount::getFixed(1)))); | |||
| 7520 | } | |||
| 7521 | } | |||
| 7522 | } else | |||
| 7523 | // Make sure I gets scalarized and a cost estimate without | |||
| 7524 | // scalarization overhead. | |||
| 7525 | ForcedScalars[VF].insert(I); | |||
| 7526 | } | |||
| 7527 | } | |||
| 7528 | ||||
| 7529 | InstructionCost | |||
| 7530 | LoopVectorizationCostModel::getInstructionCost(Instruction *I, ElementCount VF, | |||
| 7531 | Type *&VectorTy) { | |||
| 7532 | Type *RetTy = I->getType(); | |||
| 7533 | if (canTruncateToMinimalBitwidth(I, VF)) | |||
| 7534 | RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]); | |||
| 7535 | auto SE = PSE.getSE(); | |||
| 7536 | TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; | |||
| 7537 | ||||
| 7538 | auto hasSingleCopyAfterVectorization = [this](Instruction *I, | |||
| 7539 | ElementCount VF) -> bool { | |||
| 7540 | if (VF.isScalar()) | |||
| 7541 | return true; | |||
| 7542 | ||||
| 7543 | auto Scalarized = InstsToScalarize.find(VF); | |||
| 7544 | assert(Scalarized != InstsToScalarize.end() &&((void)0) | |||
| 7545 | "VF not yet analyzed for scalarization profitability")((void)0); | |||
| 7546 | return !Scalarized->second.count(I) && | |||
| 7547 | llvm::all_of(I->users(), [&](User *U) { | |||
| 7548 | auto *UI = cast<Instruction>(U); | |||
| 7549 | return !Scalarized->second.count(UI); | |||
| 7550 | }); | |||
| 7551 | }; | |||
| 7552 | (void) hasSingleCopyAfterVectorization; | |||
| 7553 | ||||
| 7554 | if (isScalarAfterVectorization(I, VF)) { | |||
| 7555 | // With the exception of GEPs and PHIs, after scalarization there should | |||
| 7556 | // only be one copy of the instruction generated in the loop. This is | |||
| 7557 | // because the VF is either 1, or any instructions that need scalarizing | |||
| 7558 | // have already been dealt with by the the time we get here. As a result, | |||
| 7559 | // it means we don't have to multiply the instruction cost by VF. | |||
| 7560 | assert(I->getOpcode() == Instruction::GetElementPtr ||((void)0) | |||
| 7561 | I->getOpcode() == Instruction::PHI ||((void)0) | |||
| 7562 | (I->getOpcode() == Instruction::BitCast &&((void)0) | |||
| 7563 | I->getType()->isPointerTy()) ||((void)0) | |||
| 7564 | hasSingleCopyAfterVectorization(I, VF))((void)0); | |||
| 7565 | VectorTy = RetTy; | |||
| 7566 | } else | |||
| 7567 | VectorTy = ToVectorTy(RetTy, VF); | |||
| 7568 | ||||
| 7569 | // TODO: We need to estimate the cost of intrinsic calls. | |||
| 7570 | switch (I->getOpcode()) { | |||
| 7571 | case Instruction::GetElementPtr: | |||
| 7572 | // We mark this instruction as zero-cost because the cost of GEPs in | |||
| 7573 | // vectorized code depends on whether the corresponding memory instruction | |||
| 7574 | // is scalarized or not. Therefore, we handle GEPs with the memory | |||
| 7575 | // instruction cost. | |||
| 7576 | return 0; | |||
| 7577 | case Instruction::Br: { | |||
| 7578 | // In cases of scalarized and predicated instructions, there will be VF | |||
| 7579 | // predicated blocks in the vectorized loop. Each branch around these | |||
| 7580 | // blocks requires also an extract of its vector compare i1 element. | |||
| 7581 | bool ScalarPredicatedBB = false; | |||
| 7582 | BranchInst *BI = cast<BranchInst>(I); | |||
| 7583 | if (VF.isVector() && BI->isConditional() && | |||
| 7584 | (PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) || | |||
| 7585 | PredicatedBBsAfterVectorization.count(BI->getSuccessor(1)))) | |||
| 7586 | ScalarPredicatedBB = true; | |||
| 7587 | ||||
| 7588 | if (ScalarPredicatedBB) { | |||
| 7589 | // Not possible to scalarize scalable vector with predicated instructions. | |||
| 7590 | if (VF.isScalable()) | |||
| 7591 | return InstructionCost::getInvalid(); | |||
| 7592 | // Return cost for branches around scalarized and predicated blocks. | |||
| 7593 | auto *Vec_i1Ty = | |||
| 7594 | VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF); | |||
| 7595 | return ( | |||
| 7596 | TTI.getScalarizationOverhead( | |||
| 7597 | Vec_i1Ty, APInt::getAllOnesValue(VF.getFixedValue()), false, | |||
| 7598 | true) + | |||
| 7599 | (TTI.getCFInstrCost(Instruction::Br, CostKind) * VF.getFixedValue())); | |||
| 7600 | } else if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar()) | |||
| 7601 | // The back-edge branch will remain, as will all scalar branches. | |||
| 7602 | return TTI.getCFInstrCost(Instruction::Br, CostKind); | |||
| 7603 | else | |||
| 7604 | // This branch will be eliminated by if-conversion. | |||
| 7605 | return 0; | |||
| 7606 | // Note: We currently assume zero cost for an unconditional branch inside | |||
| 7607 | // a predicated block since it will become a fall-through, although we | |||
| 7608 | // may decide in the future to call TTI for all branches. | |||
| 7609 | } | |||
| 7610 | case Instruction::PHI: { | |||
| 7611 | auto *Phi = cast<PHINode>(I); | |||
| 7612 | ||||
| 7613 | // First-order recurrences are replaced by vector shuffles inside the loop. | |||
| 7614 | // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type. | |||
| 7615 | if (VF.isVector() && Legal->isFirstOrderRecurrence(Phi)) | |||
| 7616 | return TTI.getShuffleCost( | |||
| 7617 | TargetTransformInfo::SK_ExtractSubvector, cast<VectorType>(VectorTy), | |||
| 7618 | None, VF.getKnownMinValue() - 1, FixedVectorType::get(RetTy, 1)); | |||
| 7619 | ||||
| 7620 | // Phi nodes in non-header blocks (not inductions, reductions, etc.) are | |||
| 7621 | // converted into select instructions. We require N - 1 selects per phi | |||
| 7622 | // node, where N is the number of incoming values. | |||
| 7623 | if (VF.isVector() && Phi->getParent() != TheLoop->getHeader()) | |||
| 7624 | return (Phi->getNumIncomingValues() - 1) * | |||
| 7625 | TTI.getCmpSelInstrCost( | |||
| 7626 | Instruction::Select, ToVectorTy(Phi->getType(), VF), | |||
| 7627 | ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF), | |||
| 7628 | CmpInst::BAD_ICMP_PREDICATE, CostKind); | |||
| 7629 | ||||
| 7630 | return TTI.getCFInstrCost(Instruction::PHI, CostKind); | |||
| 7631 | } | |||
| 7632 | case Instruction::UDiv: | |||
| 7633 | case Instruction::SDiv: | |||
| 7634 | case Instruction::URem: | |||
| 7635 | case Instruction::SRem: | |||
| 7636 | // If we have a predicated instruction, it may not be executed for each | |||
| 7637 | // vector lane. Get the scalarization cost and scale this amount by the | |||
| 7638 | // probability of executing the predicated block. If the instruction is not | |||
| 7639 | // predicated, we fall through to the next case. | |||
| 7640 | if (VF.isVector() && isScalarWithPredication(I)) { | |||
| 7641 | InstructionCost Cost = 0; | |||
| 7642 | ||||
| 7643 | // These instructions have a non-void type, so account for the phi nodes | |||
| 7644 | // that we will create. This cost is likely to be zero. The phi node | |||
| 7645 | // cost, if any, should be scaled by the block probability because it | |||
| 7646 | // models a copy at the end of each predicated block. | |||
| 7647 | Cost += VF.getKnownMinValue() * | |||
| 7648 | TTI.getCFInstrCost(Instruction::PHI, CostKind); | |||
| 7649 | ||||
| 7650 | // The cost of the non-predicated instruction. | |||
| 7651 | Cost += VF.getKnownMinValue() * | |||
| 7652 | TTI.getArithmeticInstrCost(I->getOpcode(), RetTy, CostKind); | |||
| 7653 | ||||
| 7654 | // The cost of insertelement and extractelement instructions needed for | |||
| 7655 | // scalarization. | |||
| 7656 | Cost += getScalarizationOverhead(I, VF); | |||
| 7657 | ||||
| 7658 | // Scale the cost by the probability of executing the predicated blocks. | |||
| 7659 | // This assumes the predicated block for each vector lane is equally | |||
| 7660 | // likely. | |||
| 7661 | return Cost / getReciprocalPredBlockProb(); | |||
| 7662 | } | |||
| 7663 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; | |||
| 7664 | case Instruction::Add: | |||
| 7665 | case Instruction::FAdd: | |||
| 7666 | case Instruction::Sub: | |||
| 7667 | case Instruction::FSub: | |||
| 7668 | case Instruction::Mul: | |||
| 7669 | case Instruction::FMul: | |||
| 7670 | case Instruction::FDiv: | |||
| 7671 | case Instruction::FRem: | |||
| 7672 | case Instruction::Shl: | |||
| 7673 | case Instruction::LShr: | |||
| 7674 | case Instruction::AShr: | |||
| 7675 | case Instruction::And: | |||
| 7676 | case Instruction::Or: | |||
| 7677 | case Instruction::Xor: { | |||
| 7678 | // Since we will replace the stride by 1 the multiplication should go away. | |||
| 7679 | if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal)) | |||
| 7680 | return 0; | |||
| 7681 | ||||
| 7682 | // Detect reduction patterns | |||
| 7683 | if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) | |||
| 7684 | return *RedCost; | |||
| 7685 | ||||
| 7686 | // Certain instructions can be cheaper to vectorize if they have a constant | |||
| 7687 | // second vector operand. One example of this are shifts on x86. | |||
| 7688 | Value *Op2 = I->getOperand(1); | |||
| 7689 | TargetTransformInfo::OperandValueProperties Op2VP; | |||
| 7690 | TargetTransformInfo::OperandValueKind Op2VK = | |||
| 7691 | TTI.getOperandInfo(Op2, Op2VP); | |||
| 7692 | if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2)) | |||
| 7693 | Op2VK = TargetTransformInfo::OK_UniformValue; | |||
| 7694 | ||||
| 7695 | SmallVector<const Value *, 4> Operands(I->operand_values()); | |||
| 7696 | return TTI.getArithmeticInstrCost( | |||
| 7697 | I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue, | |||
| 7698 | Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands, I); | |||
| 7699 | } | |||
| 7700 | case Instruction::FNeg: { | |||
| 7701 | return TTI.getArithmeticInstrCost( | |||
| 7702 | I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue, | |||
| 7703 | TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None, | |||
| 7704 | TargetTransformInfo::OP_None, I->getOperand(0), I); | |||
| 7705 | } | |||
| 7706 | case Instruction::Select: { | |||
| 7707 | SelectInst *SI = cast<SelectInst>(I); | |||
| 7708 | const SCEV *CondSCEV = SE->getSCEV(SI->getCondition()); | |||
| 7709 | bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop)); | |||
| 7710 | ||||
| 7711 | const Value *Op0, *Op1; | |||
| 7712 | using namespace llvm::PatternMatch; | |||
| 7713 | if (!ScalarCond && (match(I, m_LogicalAnd(m_Value(Op0), m_Value(Op1))) || | |||
| 7714 | match(I, m_LogicalOr(m_Value(Op0), m_Value(Op1))))) { | |||
| 7715 | // select x, y, false --> x & y | |||
| 7716 | // select x, true, y --> x | y | |||
| 7717 | TTI::OperandValueProperties Op1VP = TTI::OP_None; | |||
| 7718 | TTI::OperandValueProperties Op2VP = TTI::OP_None; | |||
| 7719 | TTI::OperandValueKind Op1VK = TTI::getOperandInfo(Op0, Op1VP); | |||
| 7720 | TTI::OperandValueKind Op2VK = TTI::getOperandInfo(Op1, Op2VP); | |||
| 7721 | assert(Op0->getType()->getScalarSizeInBits() == 1 &&((void)0) | |||
| 7722 | Op1->getType()->getScalarSizeInBits() == 1)((void)0); | |||
| 7723 | ||||
| 7724 | SmallVector<const Value *, 2> Operands{Op0, Op1}; | |||
| 7725 | return TTI.getArithmeticInstrCost( | |||
| 7726 | match(I, m_LogicalOr()) ? Instruction::Or : Instruction::And, VectorTy, | |||
| 7727 | CostKind, Op1VK, Op2VK, Op1VP, Op2VP, Operands, I); | |||
| 7728 | } | |||
| 7729 | ||||
| 7730 | Type *CondTy = SI->getCondition()->getType(); | |||
| 7731 | if (!ScalarCond) | |||
| 7732 | CondTy = VectorType::get(CondTy, VF); | |||
| 7733 | return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, | |||
| 7734 | CmpInst::BAD_ICMP_PREDICATE, CostKind, I); | |||
| 7735 | } | |||
| 7736 | case Instruction::ICmp: | |||
| 7737 | case Instruction::FCmp: { | |||
| 7738 | Type *ValTy = I->getOperand(0)->getType(); | |||
| 7739 | Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0)); | |||
| 7740 | if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF)) | |||
| 7741 | ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]); | |||
| 7742 | VectorTy = ToVectorTy(ValTy, VF); | |||
| 7743 | return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr, | |||
| 7744 | CmpInst::BAD_ICMP_PREDICATE, CostKind, I); | |||
| 7745 | } | |||
| 7746 | case Instruction::Store: | |||
| 7747 | case Instruction::Load: { | |||
| 7748 | ElementCount Width = VF; | |||
| 7749 | if (Width.isVector()) { | |||
| 7750 | InstWidening Decision = getWideningDecision(I, Width); | |||
| 7751 | assert(Decision != CM_Unknown &&((void)0) | |||
| 7752 | "CM decision should be taken at this point")((void)0); | |||
| 7753 | if (Decision == CM_Scalarize) | |||
| 7754 | Width = ElementCount::getFixed(1); | |||
| 7755 | } | |||
| 7756 | VectorTy = ToVectorTy(getLoadStoreType(I), Width); | |||
| 7757 | return getMemoryInstructionCost(I, VF); | |||
| 7758 | } | |||
| 7759 | case Instruction::BitCast: | |||
| 7760 | if (I->getType()->isPointerTy()) | |||
| 7761 | return 0; | |||
| 7762 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; | |||
| 7763 | case Instruction::ZExt: | |||
| 7764 | case Instruction::SExt: | |||
| 7765 | case Instruction::FPToUI: | |||
| 7766 | case Instruction::FPToSI: | |||
| 7767 | case Instruction::FPExt: | |||
| 7768 | case Instruction::PtrToInt: | |||
| 7769 | case Instruction::IntToPtr: | |||
| 7770 | case Instruction::SIToFP: | |||
| 7771 | case Instruction::UIToFP: | |||
| 7772 | case Instruction::Trunc: | |||
| 7773 | case Instruction::FPTrunc: { | |||
| 7774 | // Computes the CastContextHint from a Load/Store instruction. | |||
| 7775 | auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint { | |||
| 7776 | assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&((void)0) | |||
| 7777 | "Expected a load or a store!")((void)0); | |||
| 7778 | ||||
| 7779 | if (VF.isScalar() || !TheLoop->contains(I)) | |||
| 7780 | return TTI::CastContextHint::Normal; | |||
| 7781 | ||||
| 7782 | switch (getWideningDecision(I, VF)) { | |||
| 7783 | case LoopVectorizationCostModel::CM_GatherScatter: | |||
| 7784 | return TTI::CastContextHint::GatherScatter; | |||
| 7785 | case LoopVectorizationCostModel::CM_Interleave: | |||
| 7786 | return TTI::CastContextHint::Interleave; | |||
| 7787 | case LoopVectorizationCostModel::CM_Scalarize: | |||
| 7788 | case LoopVectorizationCostModel::CM_Widen: | |||
| 7789 | return Legal->isMaskRequired(I) ? TTI::CastContextHint::Masked | |||
| 7790 | : TTI::CastContextHint::Normal; | |||
| 7791 | case LoopVectorizationCostModel::CM_Widen_Reverse: | |||
| 7792 | return TTI::CastContextHint::Reversed; | |||
| 7793 | case LoopVectorizationCostModel::CM_Unknown: | |||
| 7794 | llvm_unreachable("Instr did not go through cost modelling?")__builtin_unreachable(); | |||
| 7795 | } | |||
| 7796 | ||||
| 7797 | llvm_unreachable("Unhandled case!")__builtin_unreachable(); | |||
| 7798 | }; | |||
| 7799 | ||||
| 7800 | unsigned Opcode = I->getOpcode(); | |||
| 7801 | TTI::CastContextHint CCH = TTI::CastContextHint::None; | |||
| 7802 | // For Trunc, the context is the only user, which must be a StoreInst. | |||
| 7803 | if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) { | |||
| 7804 | if (I->hasOneUse()) | |||
| 7805 | if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin())) | |||
| 7806 | CCH = ComputeCCH(Store); | |||
| 7807 | } | |||
| 7808 | // For Z/Sext, the context is the operand, which must be a LoadInst. | |||
| 7809 | else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt || | |||
| 7810 | Opcode == Instruction::FPExt) { | |||
| 7811 | if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0))) | |||
| 7812 | CCH = ComputeCCH(Load); | |||
| 7813 | } | |||
| 7814 | ||||
| 7815 | // We optimize the truncation of induction variables having constant | |||
| 7816 | // integer steps. The cost of these truncations is the same as the scalar | |||
| 7817 | // operation. | |||
| 7818 | if (isOptimizableIVTruncate(I, VF)) { | |||
| 7819 | auto *Trunc = cast<TruncInst>(I); | |||
| 7820 | return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(), | |||
| 7821 | Trunc->getSrcTy(), CCH, CostKind, Trunc); | |||
| 7822 | } | |||
| 7823 | ||||
| 7824 | // Detect reduction patterns | |||
| 7825 | if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) | |||
| 7826 | return *RedCost; | |||
| 7827 | ||||
| 7828 | Type *SrcScalarTy = I->getOperand(0)->getType(); | |||
| 7829 | Type *SrcVecTy = | |||
| 7830 | VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy; | |||
| 7831 | if (canTruncateToMinimalBitwidth(I, VF)) { | |||
| 7832 | // This cast is going to be shrunk. This may remove the cast or it might | |||
| 7833 | // turn it into slightly different cast. For example, if MinBW == 16, | |||
| 7834 | // "zext i8 %1 to i32" becomes "zext i8 %1 to i16". | |||
| 7835 | // | |||
| 7836 | // Calculate the modified src and dest types. | |||
| 7837 | Type *MinVecTy = VectorTy; | |||
| 7838 | if (Opcode == Instruction::Trunc) { | |||
| 7839 | SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy); | |||
| 7840 | VectorTy = | |||
| 7841 | largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); | |||
| 7842 | } else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) { | |||
| 7843 | SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy); | |||
| 7844 | VectorTy = | |||
| 7845 | smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); | |||
| 7846 | } | |||
| 7847 | } | |||
| 7848 | ||||
| 7849 | return TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH, CostKind, I); | |||
| 7850 | } | |||
| 7851 | case Instruction::Call: { | |||
| 7852 | bool NeedToScalarize; | |||
| 7853 | CallInst *CI = cast<CallInst>(I); | |||
| 7854 | InstructionCost CallCost = getVectorCallCost(CI, VF, NeedToScalarize); | |||
| 7855 | if (getVectorIntrinsicIDForCall(CI, TLI)) { | |||
| 7856 | InstructionCost IntrinsicCost = getVectorIntrinsicCost(CI, VF); | |||
| 7857 | return std::min(CallCost, IntrinsicCost); | |||
| 7858 | } | |||
| 7859 | return CallCost; | |||
| 7860 | } | |||
| 7861 | case Instruction::ExtractValue: | |||
| 7862 | return TTI.getInstructionCost(I, TTI::TCK_RecipThroughput); | |||
| 7863 | case Instruction::Alloca: | |||
| 7864 | // We cannot easily widen alloca to a scalable alloca, as | |||
| 7865 | // the result would need to be a vector of pointers. | |||
| 7866 | if (VF.isScalable()) | |||
| 7867 | return InstructionCost::getInvalid(); | |||
| 7868 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; | |||
| 7869 | default: | |||
| 7870 | // This opcode is unknown. Assume that it is the same as 'mul'. | |||
| 7871 | return TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind); | |||
| 7872 | } // end of switch. | |||
| 7873 | } | |||
| 7874 | ||||
| 7875 | char LoopVectorize::ID = 0; | |||
| 7876 | ||||
| 7877 | static const char lv_name[] = "Loop Vectorization"; | |||
| 7878 | ||||
| 7879 | INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false)static void *initializeLoopVectorizePassOnce(PassRegistry & Registry) { | |||
| 7880 | INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)initializeTargetTransformInfoWrapperPassPass(Registry); | |||
| 7881 | INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass)initializeBasicAAWrapperPassPass(Registry); | |||
| 7882 | INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)initializeAAResultsWrapperPassPass(Registry); | |||
| 7883 | INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)initializeGlobalsAAWrapperPassPass(Registry); | |||
| 7884 | INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)initializeAssumptionCacheTrackerPass(Registry); | |||
| 7885 | INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass)initializeBlockFrequencyInfoWrapperPassPass(Registry); | |||
| 7886 | INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)initializeDominatorTreeWrapperPassPass(Registry); | |||
| 7887 | INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)initializeScalarEvolutionWrapperPassPass(Registry); | |||
| 7888 | INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)initializeLoopInfoWrapperPassPass(Registry); | |||
| 7889 | INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis)initializeLoopAccessLegacyAnalysisPass(Registry); | |||
| 7890 | INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass)initializeDemandedBitsWrapperPassPass(Registry); | |||
| 7891 | INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)initializeOptimizationRemarkEmitterWrapperPassPass(Registry); | |||
| 7892 | INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)initializeProfileSummaryInfoWrapperPassPass(Registry); | |||
| 7893 | INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy)initializeInjectTLIMappingsLegacyPass(Registry); | |||
| 7894 | INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false)PassInfo *PI = new PassInfo( lv_name, "loop-vectorize", & LoopVectorize::ID, PassInfo::NormalCtor_t(callDefaultCtor< LoopVectorize>), false, false); Registry.registerPass(*PI, true); return PI; } static llvm::once_flag InitializeLoopVectorizePassFlag ; void llvm::initializeLoopVectorizePass(PassRegistry &Registry ) { llvm::call_once(InitializeLoopVectorizePassFlag, initializeLoopVectorizePassOnce , std::ref(Registry)); } | |||
| 7895 | ||||
| 7896 | namespace llvm { | |||
| 7897 | ||||
| 7898 | Pass *createLoopVectorizePass() { return new LoopVectorize(); } | |||
| 7899 | ||||
| 7900 | Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced, | |||
| 7901 | bool VectorizeOnlyWhenForced) { | |||
| 7902 | return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced); | |||
| 7903 | } | |||
| 7904 | ||||
| 7905 | } // end namespace llvm | |||
| 7906 | ||||
| 7907 | bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) { | |||
| 7908 | // Check if the pointer operand of a load or store instruction is | |||
| 7909 | // consecutive. | |||
| 7910 | if (auto *Ptr = getLoadStorePointerOperand(Inst)) | |||
| 7911 | return Legal->isConsecutivePtr(Ptr); | |||
| 7912 | return false; | |||
| 7913 | } | |||
| 7914 | ||||
| 7915 | void LoopVectorizationCostModel::collectValuesToIgnore() { | |||
| 7916 | // Ignore ephemeral values. | |||
| 7917 | CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore); | |||
| 7918 | ||||
| 7919 | // Ignore type-promoting instructions we identified during reduction | |||
| 7920 | // detection. | |||
| 7921 | for (auto &Reduction : Legal->getReductionVars()) { | |||
| 7922 | RecurrenceDescriptor &RedDes = Reduction.second; | |||
| 7923 | const SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts(); | |||
| 7924 | VecValuesToIgnore.insert(Casts.begin(), Casts.end()); | |||
| 7925 | } | |||
| 7926 | // Ignore type-casting instructions we identified during induction | |||
| 7927 | // detection. | |||
| 7928 | for (auto &Induction : Legal->getInductionVars()) { | |||
| 7929 | InductionDescriptor &IndDes = Induction.second; | |||
| 7930 | const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); | |||
| 7931 | VecValuesToIgnore.insert(Casts.begin(), Casts.end()); | |||
| 7932 | } | |||
| 7933 | } | |||
| 7934 | ||||
| 7935 | void LoopVectorizationCostModel::collectInLoopReductions() { | |||
| 7936 | for (auto &Reduction : Legal->getReductionVars()) { | |||
| 7937 | PHINode *Phi = Reduction.first; | |||
| 7938 | RecurrenceDescriptor &RdxDesc = Reduction.second; | |||
| 7939 | ||||
| 7940 | // We don't collect reductions that are type promoted (yet). | |||
| 7941 | if (RdxDesc.getRecurrenceType() != Phi->getType()) | |||
| 7942 | continue; | |||
| 7943 | ||||
| 7944 | // If the target would prefer this reduction to happen "in-loop", then we | |||
| 7945 | // want to record it as such. | |||
| 7946 | unsigned Opcode = RdxDesc.getOpcode(); | |||
| 7947 | if (!PreferInLoopReductions && !useOrderedReductions(RdxDesc) && | |||
| 7948 | !TTI.preferInLoopReduction(Opcode, Phi->getType(), | |||
| 7949 | TargetTransformInfo::ReductionFlags())) | |||
| 7950 | continue; | |||
| 7951 | ||||
| 7952 | // Check that we can correctly put the reductions into the loop, by | |||
| 7953 | // finding the chain of operations that leads from the phi to the loop | |||
| 7954 | // exit value. | |||
| 7955 | SmallVector<Instruction *, 4> ReductionOperations = | |||
| 7956 | RdxDesc.getReductionOpChain(Phi, TheLoop); | |||
| 7957 | bool InLoop = !ReductionOperations.empty(); | |||
| 7958 | if (InLoop) { | |||
| 7959 | InLoopReductionChains[Phi] = ReductionOperations; | |||
| 7960 | // Add the elements to InLoopReductionImmediateChains for cost modelling. | |||
| 7961 | Instruction *LastChain = Phi; | |||
| 7962 | for (auto *I : ReductionOperations) { | |||
| 7963 | InLoopReductionImmediateChains[I] = LastChain; | |||
| 7964 | LastChain = I; | |||
| 7965 | } | |||
| 7966 | } | |||
| 7967 | LLVM_DEBUG(dbgs() << "LV: Using " << (InLoop ? "inloop" : "out of loop")do { } while (false) | |||
| 7968 | << " reduction for phi: " << *Phi << "\n")do { } while (false); | |||
| 7969 | } | |||
| 7970 | } | |||
| 7971 | ||||
| 7972 | // TODO: we could return a pair of values that specify the max VF and | |||
| 7973 | // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of | |||
| 7974 | // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment | |||
| 7975 | // doesn't have a cost model that can choose which plan to execute if | |||
| 7976 | // more than one is generated. | |||
| 7977 | static unsigned determineVPlanVF(const unsigned WidestVectorRegBits, | |||
| 7978 | LoopVectorizationCostModel &CM) { | |||
| 7979 | unsigned WidestType; | |||
| 7980 | std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes(); | |||
| 7981 | return WidestVectorRegBits / WidestType; | |||
| 7982 | } | |||
| 7983 | ||||
| 7984 | VectorizationFactor | |||
| 7985 | LoopVectorizationPlanner::planInVPlanNativePath(ElementCount UserVF) { | |||
| 7986 | assert(!UserVF.isScalable() && "scalable vectors not yet supported")((void)0); | |||
| 7987 | ElementCount VF = UserVF; | |||
| 7988 | // Outer loop handling: They may require CFG and instruction level | |||
| 7989 | // transformations before even evaluating whether vectorization is profitable. | |||
| 7990 | // Since we cannot modify the incoming IR, we need to build VPlan upfront in | |||
| 7991 | // the vectorization pipeline. | |||
| 7992 | if (!OrigLoop->isInnermost()) { | |||
| 7993 | // If the user doesn't provide a vectorization factor, determine a | |||
| 7994 | // reasonable one. | |||
| 7995 | if (UserVF.isZero()) { | |||
| 7996 | VF = ElementCount::getFixed(determineVPlanVF( | |||
| 7997 | TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector) | |||
| 7998 | .getFixedSize(), | |||
| 7999 | CM)); | |||
| 8000 | LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n")do { } while (false); | |||
| 8001 | ||||
| 8002 | // Make sure we have a VF > 1 for stress testing. | |||
| 8003 | if (VPlanBuildStressTest && (VF.isScalar() || VF.isZero())) { | |||
| 8004 | LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: "do { } while (false) | |||
| 8005 | << "overriding computed VF.\n")do { } while (false); | |||
| 8006 | VF = ElementCount::getFixed(4); | |||
| 8007 | } | |||
| 8008 | } | |||
| 8009 | assert(EnableVPlanNativePath && "VPlan-native path is not enabled.")((void)0); | |||
| 8010 | assert(isPowerOf2_32(VF.getKnownMinValue()) &&((void)0) | |||
| 8011 | "VF needs to be a power of two")((void)0); | |||
| 8012 | LLVM_DEBUG(dbgs() << "LV: Using " << (!UserVF.isZero() ? "user " : "")do { } while (false) | |||
| 8013 | << "VF " << VF << " to build VPlans.\n")do { } while (false); | |||
| 8014 | buildVPlans(VF, VF); | |||
| 8015 | ||||
| 8016 | // For VPlan build stress testing, we bail out after VPlan construction. | |||
| 8017 | if (VPlanBuildStressTest) | |||
| 8018 | return VectorizationFactor::Disabled(); | |||
| 8019 | ||||
| 8020 | return {VF, 0 /*Cost*/}; | |||
| 8021 | } | |||
| 8022 | ||||
| 8023 | LLVM_DEBUG(do { } while (false) | |||
| 8024 | dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the "do { } while (false) | |||
| 8025 | "VPlan-native path.\n")do { } while (false); | |||
| 8026 | return VectorizationFactor::Disabled(); | |||
| 8027 | } | |||
| 8028 | ||||
| 8029 | Optional<VectorizationFactor> | |||
| 8030 | LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) { | |||
| 8031 | assert(OrigLoop->isInnermost() && "Inner loop expected.")((void)0); | |||
| 8032 | FixedScalableVFPair MaxFactors = CM.computeMaxVF(UserVF, UserIC); | |||
| 8033 | if (!MaxFactors) // Cases that should not to be vectorized nor interleaved. | |||
| 8034 | return None; | |||
| 8035 | ||||
| 8036 | // Invalidate interleave groups if all blocks of loop will be predicated. | |||
| 8037 | if (CM.blockNeedsPredication(OrigLoop->getHeader()) && | |||
| 8038 | !useMaskedInterleavedAccesses(*TTI)) { | |||
| 8039 | LLVM_DEBUG(do { } while (false) | |||
| 8040 | dbgs()do { } while (false) | |||
| 8041 | << "LV: Invalidate all interleaved groups due to fold-tail by masking "do { } while (false) | |||
| 8042 | "which requires masked-interleaved support.\n")do { } while (false); | |||
| 8043 | if (CM.InterleaveInfo.invalidateGroups()) | |||
| 8044 | // Invalidating interleave groups also requires invalidating all decisions | |||
| 8045 | // based on them, which includes widening decisions and uniform and scalar | |||
| 8046 | // values. | |||
| 8047 | CM.invalidateCostModelingDecisions(); | |||
| 8048 | } | |||
| 8049 | ||||
| 8050 | ElementCount MaxUserVF = | |||
| 8051 | UserVF.isScalable() ? MaxFactors.ScalableVF : MaxFactors.FixedVF; | |||
| 8052 | bool UserVFIsLegal = ElementCount::isKnownLE(UserVF, MaxUserVF); | |||
| 8053 | if (!UserVF.isZero() && UserVFIsLegal) { | |||
| 8054 | assert(isPowerOf2_32(UserVF.getKnownMinValue()) &&((void)0) | |||
| 8055 | "VF needs to be a power of two")((void)0); | |||
| 8056 | // Collect the instructions (and their associated costs) that will be more | |||
| 8057 | // profitable to scalarize. | |||
| 8058 | if (CM.selectUserVectorizationFactor(UserVF)) { | |||
| 8059 | LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n")do { } while (false); | |||
| 8060 | CM.collectInLoopReductions(); | |||
| 8061 | buildVPlansWithVPRecipes(UserVF, UserVF); | |||
| 8062 | LLVM_DEBUG(printPlans(dbgs()))do { } while (false); | |||
| 8063 | return {{UserVF, 0}}; | |||
| 8064 | } else | |||
| 8065 | reportVectorizationInfo("UserVF ignored because of invalid costs.", | |||
| 8066 | "InvalidCost", ORE, OrigLoop); | |||
| 8067 | } | |||
| 8068 | ||||
| 8069 | // Populate the set of Vectorization Factor Candidates. | |||
| 8070 | ElementCountSet VFCandidates; | |||
| 8071 | for (auto VF = ElementCount::getFixed(1); | |||
| 8072 | ElementCount::isKnownLE(VF, MaxFactors.FixedVF); VF *= 2) | |||
| 8073 | VFCandidates.insert(VF); | |||
| 8074 | for (auto VF = ElementCount::getScalable(1); | |||
| 8075 | ElementCount::isKnownLE(VF, MaxFactors.ScalableVF); VF *= 2) | |||
| 8076 | VFCandidates.insert(VF); | |||
| 8077 | ||||
| 8078 | for (const auto &VF : VFCandidates) { | |||
| 8079 | // Collect Uniform and Scalar instructions after vectorization with VF. | |||
| 8080 | CM.collectUniformsAndScalars(VF); | |||
| 8081 | ||||
| 8082 | // Collect the instructions (and their associated costs) that will be more | |||
| 8083 | // profitable to scalarize. | |||
| 8084 | if (VF.isVector()) | |||
| 8085 | CM.collectInstsToScalarize(VF); | |||
| 8086 | } | |||
| 8087 | ||||
| 8088 | CM.collectInLoopReductions(); | |||
| 8089 | buildVPlansWithVPRecipes(ElementCount::getFixed(1), MaxFactors.FixedVF); | |||
| 8090 | buildVPlansWithVPRecipes(ElementCount::getScalable(1), MaxFactors.ScalableVF); | |||
| 8091 | ||||
| 8092 | LLVM_DEBUG(printPlans(dbgs()))do { } while (false); | |||
| 8093 | if (!MaxFactors.hasVector()) | |||
| 8094 | return VectorizationFactor::Disabled(); | |||
| 8095 | ||||
| 8096 | // Select the optimal vectorization factor. | |||
| 8097 | auto SelectedVF = CM.selectVectorizationFactor(VFCandidates); | |||
| 8098 | ||||
| 8099 | // Check if it is profitable to vectorize with runtime checks. | |||
| 8100 | unsigned NumRuntimePointerChecks = Requirements.getNumRuntimePointerChecks(); | |||
| 8101 | if (SelectedVF.Width.getKnownMinValue() > 1 && NumRuntimePointerChecks) { | |||
| 8102 | bool PragmaThresholdReached = | |||
| 8103 | NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold; | |||
| 8104 | bool ThresholdReached = | |||
| 8105 | NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold; | |||
| 8106 | if ((ThresholdReached && !Hints.allowReordering()) || | |||
| 8107 | PragmaThresholdReached) { | |||
| 8108 | ORE->emit([&]() { | |||
| 8109 | return OptimizationRemarkAnalysisAliasing( | |||
| 8110 | DEBUG_TYPE"loop-vectorize", "CantReorderMemOps", OrigLoop->getStartLoc(), | |||
| 8111 | OrigLoop->getHeader()) | |||
| 8112 | << "loop not vectorized: cannot prove it is safe to reorder " | |||
| 8113 | "memory operations"; | |||
| 8114 | }); | |||
| 8115 | LLVM_DEBUG(dbgs() << "LV: Too many memory checks needed.\n")do { } while (false); | |||
| 8116 | Hints.emitRemarkWithHints(); | |||
| 8117 | return VectorizationFactor::Disabled(); | |||
| 8118 | } | |||
| 8119 | } | |||
| 8120 | return SelectedVF; | |||
| 8121 | } | |||
| 8122 | ||||
| 8123 | void LoopVectorizationPlanner::setBestPlan(ElementCount VF, unsigned UF) { | |||
| 8124 | LLVM_DEBUG(dbgs() << "Setting best plan to VF=" << VF << ", UF=" << UFdo { } while (false) | |||
| 8125 | << '\n')do { } while (false); | |||
| 8126 | BestVF = VF; | |||
| 8127 | BestUF = UF; | |||
| 8128 | ||||
| 8129 | erase_if(VPlans, [VF](const VPlanPtr &Plan) { | |||
| 8130 | return !Plan->hasVF(VF); | |||
| 8131 | }); | |||
| 8132 | assert(VPlans.size() == 1 && "Best VF has not a single VPlan.")((void)0); | |||
| 8133 | } | |||
| 8134 | ||||
| 8135 | void LoopVectorizationPlanner::executePlan(InnerLoopVectorizer &ILV, | |||
| 8136 | DominatorTree *DT) { | |||
| 8137 | // Perform the actual loop transformation. | |||
| 8138 | ||||
| 8139 | // 1. Create a new empty loop. Unlink the old loop and connect the new one. | |||
| 8140 | assert(BestVF.hasValue() && "Vectorization Factor is missing")((void)0); | |||
| 8141 | assert(VPlans.size() == 1 && "Not a single VPlan to execute.")((void)0); | |||
| 8142 | ||||
| 8143 | VPTransformState State{ | |||
| 8144 | *BestVF, BestUF, LI, DT, ILV.Builder, &ILV, VPlans.front().get()}; | |||
| 8145 | State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton(); | |||
| 8146 | State.TripCount = ILV.getOrCreateTripCount(nullptr); | |||
| 8147 | State.CanonicalIV = ILV.Induction; | |||
| 8148 | ||||
| 8149 | ILV.printDebugTracesAtStart(); | |||
| 8150 | ||||
| 8151 | //===------------------------------------------------===// | |||
| 8152 | // | |||
| 8153 | // Notice: any optimization or new instruction that go | |||
| 8154 | // into the code below should also be implemented in | |||
| 8155 | // the cost-model. | |||
| 8156 | // | |||
| 8157 | //===------------------------------------------------===// | |||
| 8158 | ||||
| 8159 | // 2. Copy and widen instructions from the old loop into the new loop. | |||
| 8160 | VPlans.front()->execute(&State); | |||
| 8161 | ||||
| 8162 | // 3. Fix the vectorized code: take care of header phi's, live-outs, | |||
| 8163 | // predication, updating analyses. | |||
| 8164 | ILV.fixVectorizedLoop(State); | |||
| 8165 | ||||
| 8166 | ILV.printDebugTracesAtEnd(); | |||
| 8167 | } | |||
| 8168 | ||||
| 8169 | #if !defined(NDEBUG1) || defined(LLVM_ENABLE_DUMP) | |||
| 8170 | void LoopVectorizationPlanner::printPlans(raw_ostream &O) { | |||
| 8171 | for (const auto &Plan : VPlans) | |||
| 8172 | if (PrintVPlansInDotFormat) | |||
| 8173 | Plan->printDOT(O); | |||
| 8174 | else | |||
| 8175 | Plan->print(O); | |||
| 8176 | } | |||
| 8177 | #endif | |||
| 8178 | ||||
| 8179 | void LoopVectorizationPlanner::collectTriviallyDeadInstructions( | |||
| 8180 | SmallPtrSetImpl<Instruction *> &DeadInstructions) { | |||
| 8181 | ||||
| 8182 | // We create new control-flow for the vectorized loop, so the original exit | |||
| 8183 | // conditions will be dead after vectorization if it's only used by the | |||
| 8184 | // terminator | |||
| 8185 | SmallVector<BasicBlock*> ExitingBlocks; | |||
| 8186 | OrigLoop->getExitingBlocks(ExitingBlocks); | |||
| 8187 | for (auto *BB : ExitingBlocks) { | |||
| 8188 | auto *Cmp = dyn_cast<Instruction>(BB->getTerminator()->getOperand(0)); | |||
| 8189 | if (!Cmp || !Cmp->hasOneUse()) | |||
| 8190 | continue; | |||
| 8191 | ||||
| 8192 | // TODO: we should introduce a getUniqueExitingBlocks on Loop | |||
| 8193 | if (!DeadInstructions.insert(Cmp).second) | |||
| 8194 | continue; | |||
| 8195 | ||||
| 8196 | // The operands of the icmp is often a dead trunc, used by IndUpdate. | |||
| 8197 | // TODO: can recurse through operands in general | |||
| 8198 | for (Value *Op : Cmp->operands()) { | |||
| 8199 | if (isa<TruncInst>(Op) && Op->hasOneUse()) | |||
| 8200 | DeadInstructions.insert(cast<Instruction>(Op)); | |||
| 8201 | } | |||
| 8202 | } | |||
| 8203 | ||||
| 8204 | // We create new "steps" for induction variable updates to which the original | |||
| 8205 | // induction variables map. An original update instruction will be dead if | |||
| 8206 | // all its users except the induction variable are dead. | |||
| 8207 | auto *Latch = OrigLoop->getLoopLatch(); | |||
| 8208 | for (auto &Induction : Legal->getInductionVars()) { | |||
| 8209 | PHINode *Ind = Induction.first; | |||
| 8210 | auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); | |||
| 8211 | ||||
| 8212 | // If the tail is to be folded by masking, the primary induction variable, | |||
| 8213 | // if exists, isn't dead: it will be used for masking. Don't kill it. | |||
| 8214 | if (CM.foldTailByMasking() && IndUpdate == Legal->getPrimaryInduction()) | |||
| 8215 | continue; | |||
| 8216 | ||||
| 8217 | if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { | |||
| 8218 | return U == Ind || DeadInstructions.count(cast<Instruction>(U)); | |||
| 8219 | })) | |||
| 8220 | DeadInstructions.insert(IndUpdate); | |||
| 8221 | ||||
| 8222 | // We record as "Dead" also the type-casting instructions we had identified | |||
| 8223 | // during induction analysis. We don't need any handling for them in the | |||
| 8224 | // vectorized loop because we have proven that, under a proper runtime | |||
| 8225 | // test guarding the vectorized loop, the value of the phi, and the casted | |||
| 8226 | // value of the phi, are the same. The last instruction in this casting chain | |||
| 8227 | // will get its scalar/vector/widened def from the scalar/vector/widened def | |||
| 8228 | // of the respective phi node. Any other casts in the induction def-use chain | |||
| 8229 | // have no other uses outside the phi update chain, and will be ignored. | |||
| 8230 | InductionDescriptor &IndDes = Induction.second; | |||
| 8231 | const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); | |||
| 8232 | DeadInstructions.insert(Casts.begin(), Casts.end()); | |||
| 8233 | } | |||
| 8234 | } | |||
| 8235 | ||||
| 8236 | Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; } | |||
| 8237 | ||||
| 8238 | Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; } | |||
| 8239 | ||||
| 8240 | Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step, | |||
| 8241 | Instruction::BinaryOps BinOp) { | |||
| 8242 | // When unrolling and the VF is 1, we only need to add a simple scalar. | |||
| 8243 | Type *Ty = Val->getType(); | |||
| 8244 | assert(!Ty->isVectorTy() && "Val must be a scalar")((void)0); | |||
| 8245 | ||||
| 8246 | if (Ty->isFloatingPointTy()) { | |||
| 8247 | Constant *C = ConstantFP::get(Ty, (double)StartIdx); | |||
| 8248 | ||||
| 8249 | // Floating-point operations inherit FMF via the builder's flags. | |||
| 8250 | Value *MulOp = Builder.CreateFMul(C, Step); | |||
| 8251 | return Builder.CreateBinOp(BinOp, Val, MulOp); | |||
| 8252 | } | |||
| 8253 | Constant *C = ConstantInt::get(Ty, StartIdx); | |||
| 8254 | return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction"); | |||
| 8255 | } | |||
| 8256 | ||||
| 8257 | static void AddRuntimeUnrollDisableMetaData(Loop *L) { | |||
| 8258 | SmallVector<Metadata *, 4> MDs; | |||
| 8259 | // Reserve first location for self reference to the LoopID metadata node. | |||
| 8260 | MDs.push_back(nullptr); | |||
| 8261 | bool IsUnrollMetadata = false; | |||
| 8262 | MDNode *LoopID = L->getLoopID(); | |||
| 8263 | if (LoopID) { | |||
| 8264 | // First find existing loop unrolling disable metadata. | |||
| 8265 | for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { | |||
| 8266 | auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i)); | |||
| 8267 | if (MD) { | |||
| 8268 | const auto *S = dyn_cast<MDString>(MD->getOperand(0)); | |||
| 8269 | IsUnrollMetadata = | |||
| 8270 | S && S->getString().startswith("llvm.loop.unroll.disable"); | |||
| 8271 | } | |||
| 8272 | MDs.push_back(LoopID->getOperand(i)); | |||
| 8273 | } | |||
| 8274 | } | |||
| 8275 | ||||
| 8276 | if (!IsUnrollMetadata) { | |||
| 8277 | // Add runtime unroll disable metadata. | |||
| 8278 | LLVMContext &Context = L->getHeader()->getContext(); | |||
| 8279 | SmallVector<Metadata *, 1> DisableOperands; | |||
| 8280 | DisableOperands.push_back( | |||
| 8281 | MDString::get(Context, "llvm.loop.unroll.runtime.disable")); | |||
| 8282 | MDNode *DisableNode = MDNode::get(Context, DisableOperands); | |||
| 8283 | MDs.push_back(DisableNode); | |||
| 8284 | MDNode *NewLoopID = MDNode::get(Context, MDs); | |||
| 8285 | // Set operand 0 to refer to the loop id itself. | |||
| 8286 | NewLoopID->replaceOperandWith(0, NewLoopID); | |||
| 8287 | L->setLoopID(NewLoopID); | |||
| 8288 | } | |||
| 8289 | } | |||
| 8290 | ||||
| 8291 | //===--------------------------------------------------------------------===// | |||
| 8292 | // EpilogueVectorizerMainLoop | |||
| 8293 | //===--------------------------------------------------------------------===// | |||
| 8294 | ||||
| 8295 | /// This function is partially responsible for generating the control flow | |||
| 8296 | /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization. | |||
| 8297 | BasicBlock *EpilogueVectorizerMainLoop::createEpilogueVectorizedLoopSkeleton() { | |||
| 8298 | MDNode *OrigLoopID = OrigLoop->getLoopID(); | |||
| 8299 | Loop *Lp = createVectorLoopSkeleton(""); | |||
| 8300 | ||||
| 8301 | // Generate the code to check the minimum iteration count of the vector | |||
| 8302 | // epilogue (see below). | |||
| 8303 | EPI.EpilogueIterationCountCheck = | |||
| 8304 | emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, true); | |||
| 8305 | EPI.EpilogueIterationCountCheck->setName("iter.check"); | |||
| 8306 | ||||
| 8307 | // Generate the code to check any assumptions that we've made for SCEV | |||
| 8308 | // expressions. | |||
| 8309 | EPI.SCEVSafetyCheck = emitSCEVChecks(Lp, LoopScalarPreHeader); | |||
| 8310 | ||||
| 8311 | // Generate the code that checks at runtime if arrays overlap. We put the | |||
| 8312 | // checks into a separate block to make the more common case of few elements | |||
| 8313 | // faster. | |||
| 8314 | EPI.MemSafetyCheck = emitMemRuntimeChecks(Lp, LoopScalarPreHeader); | |||
| 8315 | ||||
| 8316 | // Generate the iteration count check for the main loop, *after* the check | |||
| 8317 | // for the epilogue loop, so that the path-length is shorter for the case | |||
| 8318 | // that goes directly through the vector epilogue. The longer-path length for | |||
| 8319 | // the main loop is compensated for, by the gain from vectorizing the larger | |||
| 8320 | // trip count. Note: the branch will get updated later on when we vectorize | |||
| 8321 | // the epilogue. | |||
| 8322 | EPI.MainLoopIterationCountCheck = | |||
| 8323 | emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, false); | |||
| 8324 | ||||
| 8325 | // Generate the induction variable. | |||
| 8326 | OldInduction = Legal->getPrimaryInduction(); | |||
| 8327 | Type *IdxTy = Legal->getWidestInductionType(); | |||
| 8328 | Value *StartIdx = ConstantInt::get(IdxTy, 0); | |||
| 8329 | Constant *Step = ConstantInt::get(IdxTy, VF.getKnownMinValue() * UF); | |||
| 8330 | Value *CountRoundDown = getOrCreateVectorTripCount(Lp); | |||
| 8331 | EPI.VectorTripCount = CountRoundDown; | |||
| 8332 | Induction = | |||
| 8333 | createInductionVariable(Lp, StartIdx, CountRoundDown, Step, | |||
| 8334 | getDebugLocFromInstOrOperands(OldInduction)); | |||
| 8335 | ||||
| 8336 | // Skip induction resume value creation here because they will be created in | |||
| 8337 | // the second pass. If we created them here, they wouldn't be used anyway, | |||
| 8338 | // because the vplan in the second pass still contains the inductions from the | |||
| 8339 | // original loop. | |||
| 8340 | ||||
| 8341 | return completeLoopSkeleton(Lp, OrigLoopID); | |||
| 8342 | } | |||
| 8343 | ||||
| 8344 | void EpilogueVectorizerMainLoop::printDebugTracesAtStart() { | |||
| 8345 | LLVM_DEBUG({do { } while (false) | |||
| 8346 | dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n"do { } while (false) | |||
| 8347 | << "Main Loop VF:" << EPI.MainLoopVF.getKnownMinValue()do { } while (false) | |||
| 8348 | << ", Main Loop UF:" << EPI.MainLoopUFdo { } while (false) | |||
| 8349 | << ", Epilogue Loop VF:" << EPI.EpilogueVF.getKnownMinValue()do { } while (false) | |||
| 8350 | << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";do { } while (false) | |||
| 8351 | })do { } while (false); | |||
| 8352 | } | |||
| 8353 | ||||
| 8354 | void EpilogueVectorizerMainLoop::printDebugTracesAtEnd() { | |||
| 8355 | DEBUG_WITH_TYPE(VerboseDebug, {do { } while (false) | |||
| 8356 | dbgs() << "intermediate fn:\n" << *Induction->getFunction() << "\n";do { } while (false) | |||
| 8357 | })do { } while (false); | |||
| 8358 | } | |||
| 8359 | ||||
| 8360 | BasicBlock *EpilogueVectorizerMainLoop::emitMinimumIterationCountCheck( | |||
| 8361 | Loop *L, BasicBlock *Bypass, bool ForEpilogue) { | |||
| 8362 | assert(L && "Expected valid Loop.")((void)0); | |||
| 8363 | assert(Bypass && "Expected valid bypass basic block.")((void)0); | |||
| 8364 | unsigned VFactor = | |||
| 8365 | ForEpilogue ? EPI.EpilogueVF.getKnownMinValue() : VF.getKnownMinValue(); | |||
| 8366 | unsigned UFactor = ForEpilogue ? EPI.EpilogueUF : UF; | |||
| 8367 | Value *Count = getOrCreateTripCount(L); | |||
| 8368 | // Reuse existing vector loop preheader for TC checks. | |||
| 8369 | // Note that new preheader block is generated for vector loop. | |||
| 8370 | BasicBlock *const TCCheckBlock = LoopVectorPreHeader; | |||
| 8371 | IRBuilder<> Builder(TCCheckBlock->getTerminator()); | |||
| 8372 | ||||
| 8373 | // Generate code to check if the loop's trip count is less than VF * UF of the | |||
| 8374 | // main vector loop. | |||
| 8375 | auto P = Cost->requiresScalarEpilogue(ForEpilogue ? EPI.EpilogueVF : VF) ? | |||
| 8376 | ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT; | |||
| 8377 | ||||
| 8378 | Value *CheckMinIters = Builder.CreateICmp( | |||
| 8379 | P, Count, ConstantInt::get(Count->getType(), VFactor * UFactor), | |||
| 8380 | "min.iters.check"); | |||
| 8381 | ||||
| 8382 | if (!ForEpilogue) | |||
| 8383 | TCCheckBlock->setName("vector.main.loop.iter.check"); | |||
| 8384 | ||||
| 8385 | // Create new preheader for vector loop. | |||
| 8386 | LoopVectorPreHeader = SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), | |||
| 8387 | DT, LI, nullptr, "vector.ph"); | |||
| 8388 | ||||
| 8389 | if (ForEpilogue) { | |||
| 8390 | assert(DT->properlyDominates(DT->getNode(TCCheckBlock),((void)0) | |||
| 8391 | DT->getNode(Bypass)->getIDom()) &&((void)0) | |||
| 8392 | "TC check is expected to dominate Bypass")((void)0); | |||
| 8393 | ||||
| 8394 | // Update dominator for Bypass & LoopExit. | |||
| 8395 | DT->changeImmediateDominator(Bypass, TCCheckBlock); | |||
| 8396 | if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF)) | |||
| 8397 | // For loops with multiple exits, there's no edge from the middle block | |||
| 8398 | // to exit blocks (as the epilogue must run) and thus no need to update | |||
| 8399 | // the immediate dominator of the exit blocks. | |||
| 8400 | DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock); | |||
| 8401 | ||||
| 8402 | LoopBypassBlocks.push_back(TCCheckBlock); | |||
| 8403 | ||||
| 8404 | // Save the trip count so we don't have to regenerate it in the | |||
| 8405 | // vec.epilog.iter.check. This is safe to do because the trip count | |||
| 8406 | // generated here dominates the vector epilog iter check. | |||
| 8407 | EPI.TripCount = Count; | |||
| 8408 | } | |||
| 8409 | ||||
| 8410 | ReplaceInstWithInst( | |||
| 8411 | TCCheckBlock->getTerminator(), | |||
| 8412 | BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); | |||
| 8413 | ||||
| 8414 | return TCCheckBlock; | |||
| 8415 | } | |||
| 8416 | ||||
| 8417 | //===--------------------------------------------------------------------===// | |||
| 8418 | // EpilogueVectorizerEpilogueLoop | |||
| 8419 | //===--------------------------------------------------------------------===// | |||
| 8420 | ||||
| 8421 | /// This function is partially responsible for generating the control flow | |||
| 8422 | /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization. | |||
| 8423 | BasicBlock * | |||
| 8424 | EpilogueVectorizerEpilogueLoop::createEpilogueVectorizedLoopSkeleton() { | |||
| 8425 | MDNode *OrigLoopID = OrigLoop->getLoopID(); | |||
| 8426 | Loop *Lp = createVectorLoopSkeleton("vec.epilog."); | |||
| 8427 | ||||
| 8428 | // Now, compare the remaining count and if there aren't enough iterations to | |||
| 8429 | // execute the vectorized epilogue skip to the scalar part. | |||
| 8430 | BasicBlock *VecEpilogueIterationCountCheck = LoopVectorPreHeader; | |||
| 8431 | VecEpilogueIterationCountCheck->setName("vec.epilog.iter.check"); | |||
| 8432 | LoopVectorPreHeader = | |||
| 8433 | SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, | |||
| 8434 | LI, nullptr, "vec.epilog.ph"); | |||
| 8435 | emitMinimumVectorEpilogueIterCountCheck(Lp, LoopScalarPreHeader, | |||
| 8436 | VecEpilogueIterationCountCheck); | |||
| 8437 | ||||
| 8438 | // Adjust the control flow taking the state info from the main loop | |||
| 8439 | // vectorization into account. | |||
| 8440 | assert(EPI.MainLoopIterationCountCheck && EPI.EpilogueIterationCountCheck &&((void)0) | |||
| 8441 | "expected this to be saved from the previous pass.")((void)0); | |||
| 8442 | EPI.MainLoopIterationCountCheck->getTerminator()->replaceUsesOfWith( | |||
| 8443 | VecEpilogueIterationCountCheck, LoopVectorPreHeader); | |||
| 8444 | ||||
| 8445 | DT->changeImmediateDominator(LoopVectorPreHeader, | |||
| 8446 | EPI.MainLoopIterationCountCheck); | |||
| 8447 | ||||
| 8448 | EPI.EpilogueIterationCountCheck->getTerminator()->replaceUsesOfWith( | |||
| 8449 | VecEpilogueIterationCountCheck, LoopScalarPreHeader); | |||
| 8450 | ||||
| 8451 | if (EPI.SCEVSafetyCheck) | |||
| 8452 | EPI.SCEVSafetyCheck->getTerminator()->replaceUsesOfWith( | |||
| 8453 | VecEpilogueIterationCountCheck, LoopScalarPreHeader); | |||
| 8454 | if (EPI.MemSafetyCheck) | |||
| 8455 | EPI.MemSafetyCheck->getTerminator()->replaceUsesOfWith( | |||
| 8456 | VecEpilogueIterationCountCheck, LoopScalarPreHeader); | |||
| 8457 | ||||
| 8458 | DT->changeImmediateDominator( | |||
| 8459 | VecEpilogueIterationCountCheck, | |||
| 8460 | VecEpilogueIterationCountCheck->getSinglePredecessor()); | |||
| 8461 | ||||
| 8462 | DT->changeImmediateDominator(LoopScalarPreHeader, | |||
| 8463 | EPI.EpilogueIterationCountCheck); | |||
| 8464 | if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF)) | |||
| 8465 | // If there is an epilogue which must run, there's no edge from the | |||
| 8466 | // middle block to exit blocks and thus no need to update the immediate | |||
| 8467 | // dominator of the exit blocks. | |||
| 8468 | DT->changeImmediateDominator(LoopExitBlock, | |||
| 8469 | EPI.EpilogueIterationCountCheck); | |||
| 8470 | ||||
| 8471 | // Keep track of bypass blocks, as they feed start values to the induction | |||
| 8472 | // phis in the scalar loop preheader. | |||
| 8473 | if (EPI.SCEVSafetyCheck) | |||
| 8474 | LoopBypassBlocks.push_back(EPI.SCEVSafetyCheck); | |||
| 8475 | if (EPI.MemSafetyCheck) | |||
| 8476 | LoopBypassBlocks.push_back(EPI.MemSafetyCheck); | |||
| 8477 | LoopBypassBlocks.push_back(EPI.EpilogueIterationCountCheck); | |||
| 8478 | ||||
| 8479 | // Generate a resume induction for the vector epilogue and put it in the | |||
| 8480 | // vector epilogue preheader | |||
| 8481 | Type *IdxTy = Legal->getWidestInductionType(); | |||
| 8482 | PHINode *EPResumeVal = PHINode::Create(IdxTy, 2, "vec.epilog.resume.val", | |||
| 8483 | LoopVectorPreHeader->getFirstNonPHI()); | |||
| 8484 | EPResumeVal->addIncoming(EPI.VectorTripCount, VecEpilogueIterationCountCheck); | |||
| 8485 | EPResumeVal->addIncoming(ConstantInt::get(IdxTy, 0), | |||
| 8486 | EPI.MainLoopIterationCountCheck); | |||
| 8487 | ||||
| 8488 | // Generate the induction variable. | |||
| 8489 | OldInduction = Legal->getPrimaryInduction(); | |||
| 8490 | Value *CountRoundDown = getOrCreateVectorTripCount(Lp); | |||
| 8491 | Constant *Step = ConstantInt::get(IdxTy, VF.getKnownMinValue() * UF); | |||
| 8492 | Value *StartIdx = EPResumeVal; | |||
| 8493 | Induction = | |||
| 8494 | createInductionVariable(Lp, StartIdx, CountRoundDown, Step, | |||
| 8495 | getDebugLocFromInstOrOperands(OldInduction)); | |||
| 8496 | ||||
| 8497 | // Generate induction resume values. These variables save the new starting | |||
| 8498 | // indexes for the scalar loop. They are used to test if there are any tail | |||
| 8499 | // iterations left once the vector loop has completed. | |||
| 8500 | // Note that when the vectorized epilogue is skipped due to iteration count | |||
| 8501 | // check, then the resume value for the induction variable comes from | |||
| 8502 | // the trip count of the main vector loop, hence passing the AdditionalBypass | |||
| 8503 | // argument. | |||
| 8504 | createInductionResumeValues(Lp, CountRoundDown, | |||
| 8505 | {VecEpilogueIterationCountCheck, | |||
| 8506 | EPI.VectorTripCount} /* AdditionalBypass */); | |||
| 8507 | ||||
| 8508 | AddRuntimeUnrollDisableMetaData(Lp); | |||
| 8509 | return completeLoopSkeleton(Lp, OrigLoopID); | |||
| 8510 | } | |||
| 8511 | ||||
| 8512 | BasicBlock * | |||
| 8513 | EpilogueVectorizerEpilogueLoop::emitMinimumVectorEpilogueIterCountCheck( | |||
| 8514 | Loop *L, BasicBlock *Bypass, BasicBlock *Insert) { | |||
| 8515 | ||||
| 8516 | assert(EPI.TripCount &&((void)0) | |||
| 8517 | "Expected trip count to have been safed in the first pass.")((void)0); | |||
| 8518 | assert(((void)0) | |||
| 8519 | (!isa<Instruction>(EPI.TripCount) ||((void)0) | |||
| 8520 | DT->dominates(cast<Instruction>(EPI.TripCount)->getParent(), Insert)) &&((void)0) | |||
| 8521 | "saved trip count does not dominate insertion point.")((void)0); | |||
| 8522 | Value *TC = EPI.TripCount; | |||
| 8523 | IRBuilder<> Builder(Insert->getTerminator()); | |||
| 8524 | Value *Count = Builder.CreateSub(TC, EPI.VectorTripCount, "n.vec.remaining"); | |||
| 8525 | ||||
| 8526 | // Generate code to check if the loop's trip count is less than VF * UF of the | |||
| 8527 | // vector epilogue loop. | |||
| 8528 | auto P = Cost->requiresScalarEpilogue(EPI.EpilogueVF) ? | |||
| 8529 | ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT; | |||
| 8530 | ||||
| 8531 | Value *CheckMinIters = Builder.CreateICmp( | |||
| 8532 | P, Count, | |||
| 8533 | ConstantInt::get(Count->getType(), | |||
| 8534 | EPI.EpilogueVF.getKnownMinValue() * EPI.EpilogueUF), | |||
| 8535 | "min.epilog.iters.check"); | |||
| 8536 | ||||
| 8537 | ReplaceInstWithInst( | |||
| 8538 | Insert->getTerminator(), | |||
| 8539 | BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); | |||
| 8540 | ||||
| 8541 | LoopBypassBlocks.push_back(Insert); | |||
| 8542 | return Insert; | |||
| 8543 | } | |||
| 8544 | ||||
| 8545 | void EpilogueVectorizerEpilogueLoop::printDebugTracesAtStart() { | |||
| 8546 | LLVM_DEBUG({do { } while (false) | |||
| 8547 | dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n"do { } while (false) | |||
| 8548 | << "Epilogue Loop VF:" << EPI.EpilogueVF.getKnownMinValue()do { } while (false) | |||
| 8549 | << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";do { } while (false) | |||
| 8550 | })do { } while (false); | |||
| 8551 | } | |||
| 8552 | ||||
| 8553 | void EpilogueVectorizerEpilogueLoop::printDebugTracesAtEnd() { | |||
| 8554 | DEBUG_WITH_TYPE(VerboseDebug, {do { } while (false) | |||
| 8555 | dbgs() << "final fn:\n" << *Induction->getFunction() << "\n";do { } while (false) | |||
| 8556 | })do { } while (false); | |||
| 8557 | } | |||
| 8558 | ||||
| 8559 | bool LoopVectorizationPlanner::getDecisionAndClampRange( | |||
| 8560 | const std::function<bool(ElementCount)> &Predicate, VFRange &Range) { | |||
| 8561 | assert(!Range.isEmpty() && "Trying to test an empty VF range.")((void)0); | |||
| 8562 | bool PredicateAtRangeStart = Predicate(Range.Start); | |||
| 8563 | ||||
| 8564 | for (ElementCount TmpVF = Range.Start * 2; | |||
| 8565 | ElementCount::isKnownLT(TmpVF, Range.End); TmpVF *= 2) | |||
| 8566 | if (Predicate(TmpVF) != PredicateAtRangeStart) { | |||
| 8567 | Range.End = TmpVF; | |||
| 8568 | break; | |||
| 8569 | } | |||
| 8570 | ||||
| 8571 | return PredicateAtRangeStart; | |||
| 8572 | } | |||
| 8573 | ||||
| 8574 | /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF, | |||
| 8575 | /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range | |||
| 8576 | /// of VF's starting at a given VF and extending it as much as possible. Each | |||
| 8577 | /// vectorization decision can potentially shorten this sub-range during | |||
| 8578 | /// buildVPlan(). | |||
| 8579 | void LoopVectorizationPlanner::buildVPlans(ElementCount MinVF, | |||
| 8580 | ElementCount MaxVF) { | |||
| 8581 | auto MaxVFPlusOne = MaxVF.getWithIncrement(1); | |||
| 8582 | for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) { | |||
| 8583 | VFRange SubRange = {VF, MaxVFPlusOne}; | |||
| 8584 | VPlans.push_back(buildVPlan(SubRange)); | |||
| 8585 | VF = SubRange.End; | |||
| 8586 | } | |||
| 8587 | } | |||
| 8588 | ||||
| 8589 | VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst, | |||
| 8590 | VPlanPtr &Plan) { | |||
| 8591 | assert(is_contained(predecessors(Dst), Src) && "Invalid edge")((void)0); | |||
| 8592 | ||||
| 8593 | // Look for cached value. | |||
| 8594 | std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst); | |||
| 8595 | EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge); | |||
| 8596 | if (ECEntryIt != EdgeMaskCache.end()) | |||
| 8597 | return ECEntryIt->second; | |||
| 8598 | ||||
| 8599 | VPValue *SrcMask = createBlockInMask(Src, Plan); | |||
| 8600 | ||||
| 8601 | // The terminator has to be a branch inst! | |||
| 8602 | BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator()); | |||
| 8603 | assert(BI && "Unexpected terminator found")((void)0); | |||
| 8604 | ||||
| 8605 | if (!BI->isConditional() || BI->getSuccessor(0) == BI->getSuccessor(1)) | |||
| 8606 | return EdgeMaskCache[Edge] = SrcMask; | |||
| 8607 | ||||
| 8608 | // If source is an exiting block, we know the exit edge is dynamically dead | |||
| 8609 | // in the vector loop, and thus we don't need to restrict the mask. Avoid | |||
| 8610 | // adding uses of an otherwise potentially dead instruction. | |||
| 8611 | if (OrigLoop->isLoopExiting(Src)) | |||
| 8612 | return EdgeMaskCache[Edge] = SrcMask; | |||
| 8613 | ||||
| 8614 | VPValue *EdgeMask = Plan->getOrAddVPValue(BI->getCondition()); | |||
| 8615 | assert(EdgeMask && "No Edge Mask found for condition")((void)0); | |||
| 8616 | ||||
| 8617 | if (BI->getSuccessor(0) != Dst) | |||
| 8618 | EdgeMask = Builder.createNot(EdgeMask); | |||
| 8619 | ||||
| 8620 | if (SrcMask) { // Otherwise block in-mask is all-one, no need to AND. | |||
| 8621 | // The condition is 'SrcMask && EdgeMask', which is equivalent to | |||
| 8622 | // 'select i1 SrcMask, i1 EdgeMask, i1 false'. | |||
| 8623 | // The select version does not introduce new UB if SrcMask is false and | |||
| 8624 | // EdgeMask is poison. Using 'and' here introduces undefined behavior. | |||
| 8625 | VPValue *False = Plan->getOrAddVPValue( | |||
| 8626 | ConstantInt::getFalse(BI->getCondition()->getType())); | |||
| 8627 | EdgeMask = Builder.createSelect(SrcMask, EdgeMask, False); | |||
| 8628 | } | |||
| 8629 | ||||
| 8630 | return EdgeMaskCache[Edge] = EdgeMask; | |||
| 8631 | } | |||
| 8632 | ||||
| 8633 | VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) { | |||
| 8634 | assert(OrigLoop->contains(BB) && "Block is not a part of a loop")((void)0); | |||
| 8635 | ||||
| 8636 | // Look for cached value. | |||
| 8637 | BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB); | |||
| 8638 | if (BCEntryIt != BlockMaskCache.end()) | |||
| 8639 | return BCEntryIt->second; | |||
| 8640 | ||||
| 8641 | // All-one mask is modelled as no-mask following the convention for masked | |||
| 8642 | // load/store/gather/scatter. Initialize BlockMask to no-mask. | |||
| 8643 | VPValue *BlockMask = nullptr; | |||
| 8644 | ||||
| 8645 | if (OrigLoop->getHeader() == BB) { | |||
| 8646 | if (!CM.blockNeedsPredication(BB)) | |||
| 8647 | return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one. | |||
| 8648 | ||||
| 8649 | // Create the block in mask as the first non-phi instruction in the block. | |||
| 8650 | VPBuilder::InsertPointGuard Guard(Builder); | |||
| 8651 | auto NewInsertionPoint = Builder.getInsertBlock()->getFirstNonPhi(); | |||
| 8652 | Builder.setInsertPoint(Builder.getInsertBlock(), NewInsertionPoint); | |||
| 8653 | ||||
| 8654 | // Introduce the early-exit compare IV <= BTC to form header block mask. | |||
| 8655 | // This is used instead of IV < TC because TC may wrap, unlike BTC. | |||
| 8656 | // Start by constructing the desired canonical IV. | |||
| 8657 | VPValue *IV = nullptr; | |||
| 8658 | if (Legal->getPrimaryInduction()) | |||
| 8659 | IV = Plan->getOrAddVPValue(Legal->getPrimaryInduction()); | |||
| 8660 | else { | |||
| 8661 | auto IVRecipe = new VPWidenCanonicalIVRecipe(); | |||
| 8662 | Builder.getInsertBlock()->insert(IVRecipe, NewInsertionPoint); | |||
| 8663 | IV = IVRecipe->getVPSingleValue(); | |||
| 8664 | } | |||
| 8665 | VPValue *BTC = Plan->getOrCreateBackedgeTakenCount(); | |||
| 8666 | bool TailFolded = !CM.isScalarEpilogueAllowed(); | |||
| 8667 | ||||
| 8668 | if (TailFolded && CM.TTI.emitGetActiveLaneMask()) { | |||
| 8669 | // While ActiveLaneMask is a binary op that consumes the loop tripcount | |||
| 8670 | // as a second argument, we only pass the IV here and extract the | |||
| 8671 | // tripcount from the transform state where codegen of the VP instructions | |||
| 8672 | // happen. | |||
| 8673 | BlockMask = Builder.createNaryOp(VPInstruction::ActiveLaneMask, {IV}); | |||
| 8674 | } else { | |||
| 8675 | BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC}); | |||
| 8676 | } | |||
| 8677 | return BlockMaskCache[BB] = BlockMask; | |||
| 8678 | } | |||
| 8679 | ||||
| 8680 | // This is the block mask. We OR all incoming edges. | |||
| 8681 | for (auto *Predecessor : predecessors(BB)) { | |||
| 8682 | VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan); | |||
| 8683 | if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too. | |||
| 8684 | return BlockMaskCache[BB] = EdgeMask; | |||
| 8685 | ||||
| 8686 | if (!BlockMask) { // BlockMask has its initialized nullptr value. | |||
| 8687 | BlockMask = EdgeMask; | |||
| 8688 | continue; | |||
| 8689 | } | |||
| 8690 | ||||
| 8691 | BlockMask = Builder.createOr(BlockMask, EdgeMask); | |||
| 8692 | } | |||
| 8693 | ||||
| 8694 | return BlockMaskCache[BB] = BlockMask; | |||
| 8695 | } | |||
| 8696 | ||||
| 8697 | VPRecipeBase *VPRecipeBuilder::tryToWidenMemory(Instruction *I, | |||
| 8698 | ArrayRef<VPValue *> Operands, | |||
| 8699 | VFRange &Range, | |||
| 8700 | VPlanPtr &Plan) { | |||
| 8701 | assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&((void)0) | |||
| 8702 | "Must be called with either a load or store")((void)0); | |||
| 8703 | ||||
| 8704 | auto willWiden = [&](ElementCount VF) -> bool { | |||
| 8705 | if (VF.isScalar()) | |||
| 8706 | return false; | |||
| 8707 | LoopVectorizationCostModel::InstWidening Decision = | |||
| 8708 | CM.getWideningDecision(I, VF); | |||
| 8709 | assert(Decision != LoopVectorizationCostModel::CM_Unknown &&((void)0) | |||
| 8710 | "CM decision should be taken at this point.")((void)0); | |||
| 8711 | if (Decision == LoopVectorizationCostModel::CM_Interleave) | |||
| 8712 | return true; | |||
| 8713 | if (CM.isScalarAfterVectorization(I, VF) || | |||
| 8714 | CM.isProfitableToScalarize(I, VF)) | |||
| 8715 | return false; | |||
| 8716 | return Decision != LoopVectorizationCostModel::CM_Scalarize; | |||
| 8717 | }; | |||
| 8718 | ||||
| 8719 | if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) | |||
| 8720 | return nullptr; | |||
| 8721 | ||||
| 8722 | VPValue *Mask = nullptr; | |||
| 8723 | if (Legal->isMaskRequired(I)) | |||
| 8724 | Mask = createBlockInMask(I->getParent(), Plan); | |||
| 8725 | ||||
| 8726 | if (LoadInst *Load = dyn_cast<LoadInst>(I)) | |||
| 8727 | return new VPWidenMemoryInstructionRecipe(*Load, Operands[0], Mask); | |||
| 8728 | ||||
| 8729 | StoreInst *Store = cast<StoreInst>(I); | |||
| 8730 | return new VPWidenMemoryInstructionRecipe(*Store, Operands[1], Operands[0], | |||
| 8731 | Mask); | |||
| 8732 | } | |||
| 8733 | ||||
| 8734 | VPWidenIntOrFpInductionRecipe * | |||
| 8735 | VPRecipeBuilder::tryToOptimizeInductionPHI(PHINode *Phi, | |||
| 8736 | ArrayRef<VPValue *> Operands) const { | |||
| 8737 | // Check if this is an integer or fp induction. If so, build the recipe that | |||
| 8738 | // produces its scalar and vector values. | |||
| 8739 | InductionDescriptor II = Legal->getInductionVars().lookup(Phi); | |||
| 8740 | if (II.getKind() == InductionDescriptor::IK_IntInduction || | |||
| 8741 | II.getKind() == InductionDescriptor::IK_FpInduction) { | |||
| 8742 | assert(II.getStartValue() ==((void)0) | |||
| 8743 | Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader()))((void)0); | |||
| 8744 | const SmallVectorImpl<Instruction *> &Casts = II.getCastInsts(); | |||
| 8745 | return new VPWidenIntOrFpInductionRecipe( | |||
| 8746 | Phi, Operands[0], Casts.empty() ? nullptr : Casts.front()); | |||
| 8747 | } | |||
| 8748 | ||||
| 8749 | return nullptr; | |||
| 8750 | } | |||
| 8751 | ||||
| 8752 | VPWidenIntOrFpInductionRecipe *VPRecipeBuilder::tryToOptimizeInductionTruncate( | |||
| 8753 | TruncInst *I, ArrayRef<VPValue *> Operands, VFRange &Range, | |||
| 8754 | VPlan &Plan) const { | |||
| 8755 | // Optimize the special case where the source is a constant integer | |||
| 8756 | // induction variable. Notice that we can only optimize the 'trunc' case | |||
| 8757 | // because (a) FP conversions lose precision, (b) sext/zext may wrap, and | |||
| 8758 | // (c) other casts depend on pointer size. | |||
| 8759 | ||||
| 8760 | // Determine whether \p K is a truncation based on an induction variable that | |||
| 8761 | // can be optimized. | |||
| 8762 | auto isOptimizableIVTruncate = | |||
| 8763 | [&](Instruction *K) -> std::function<bool(ElementCount)> { | |||
| 8764 | return [=](ElementCount VF) -> bool { | |||
| 8765 | return CM.isOptimizableIVTruncate(K, VF); | |||
| 8766 | }; | |||
| 8767 | }; | |||
| 8768 | ||||
| 8769 | if (LoopVectorizationPlanner::getDecisionAndClampRange( | |||
| 8770 | isOptimizableIVTruncate(I), Range)) { | |||
| 8771 | ||||
| 8772 | InductionDescriptor II = | |||
| 8773 | Legal->getInductionVars().lookup(cast<PHINode>(I->getOperand(0))); | |||
| 8774 | VPValue *Start = Plan.getOrAddVPValue(II.getStartValue()); | |||
| 8775 | return new VPWidenIntOrFpInductionRecipe(cast<PHINode>(I->getOperand(0)), | |||
| 8776 | Start, nullptr, I); | |||
| 8777 | } | |||
| 8778 | return nullptr; | |||
| 8779 | } | |||
| 8780 | ||||
| 8781 | VPRecipeOrVPValueTy VPRecipeBuilder::tryToBlend(PHINode *Phi, | |||
| 8782 | ArrayRef<VPValue *> Operands, | |||
| 8783 | VPlanPtr &Plan) { | |||
| 8784 | // If all incoming values are equal, the incoming VPValue can be used directly | |||
| 8785 | // instead of creating a new VPBlendRecipe. | |||
| 8786 | VPValue *FirstIncoming = Operands[0]; | |||
| 8787 | if (all_of(Operands, [FirstIncoming](const VPValue *Inc) { | |||
| 8788 | return FirstIncoming == Inc; | |||
| 8789 | })) { | |||
| 8790 | return Operands[0]; | |||
| 8791 | } | |||
| 8792 | ||||
| 8793 | // We know that all PHIs in non-header blocks are converted into selects, so | |||
| 8794 | // we don't have to worry about the insertion order and we can just use the | |||
| 8795 | // builder. At this point we generate the predication tree. There may be | |||
| 8796 | // duplications since this is a simple recursive scan, but future | |||
| 8797 | // optimizations will clean it up. | |||
| 8798 | SmallVector<VPValue *, 2> OperandsWithMask; | |||
| 8799 | unsigned NumIncoming = Phi->getNumIncomingValues(); | |||
| 8800 | ||||
| 8801 | for (unsigned In = 0; In < NumIncoming; In++) { | |||
| 8802 | VPValue *EdgeMask = | |||
| 8803 | createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan); | |||
| 8804 | assert((EdgeMask || NumIncoming == 1) &&((void)0) | |||
| 8805 | "Multiple predecessors with one having a full mask")((void)0); | |||
| 8806 | OperandsWithMask.push_back(Operands[In]); | |||
| 8807 | if (EdgeMask) | |||
| 8808 | OperandsWithMask.push_back(EdgeMask); | |||
| 8809 | } | |||
| 8810 | return toVPRecipeResult(new VPBlendRecipe(Phi, OperandsWithMask)); | |||
| 8811 | } | |||
| 8812 | ||||
| 8813 | VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI, | |||
| 8814 | ArrayRef<VPValue *> Operands, | |||
| 8815 | VFRange &Range) const { | |||
| 8816 | ||||
| 8817 | bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( | |||
| 8818 | [this, CI](ElementCount VF) { return CM.isScalarWithPredication(CI); }, | |||
| 8819 | Range); | |||
| 8820 | ||||
| 8821 | if (IsPredicated) | |||
| 8822 | return nullptr; | |||
| 8823 | ||||
| 8824 | Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); | |||
| 8825 | if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end || | |||
| 8826 | ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect || | |||
| 8827 | ID == Intrinsic::pseudoprobe || | |||
| 8828 | ID == Intrinsic::experimental_noalias_scope_decl)) | |||
| 8829 | return nullptr; | |||
| 8830 | ||||
| 8831 | auto willWiden = [&](ElementCount VF) -> bool { | |||
| 8832 | Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); | |||
| 8833 | // The following case may be scalarized depending on the VF. | |||
| 8834 | // The flag shows whether we use Intrinsic or a usual Call for vectorized | |||
| 8835 | // version of the instruction. | |||
| 8836 | // Is it beneficial to perform intrinsic call compared to lib call? | |||
| 8837 | bool NeedToScalarize = false; | |||
| 8838 | InstructionCost CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize); | |||
| 8839 | InstructionCost IntrinsicCost = ID ? CM.getVectorIntrinsicCost(CI, VF) : 0; | |||
| 8840 | bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost; | |||
| 8841 | return UseVectorIntrinsic || !NeedToScalarize; | |||
| 8842 | }; | |||
| 8843 | ||||
| 8844 | if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) | |||
| 8845 | return nullptr; | |||
| 8846 | ||||
| 8847 | ArrayRef<VPValue *> Ops = Operands.take_front(CI->getNumArgOperands()); | |||
| 8848 | return new VPWidenCallRecipe(*CI, make_range(Ops.begin(), Ops.end())); | |||
| 8849 | } | |||
| 8850 | ||||
| 8851 | bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const { | |||
| 8852 | assert(!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) &&((void)0) | |||
| 8853 | !isa<StoreInst>(I) && "Instruction should have been handled earlier")((void)0); | |||
| 8854 | // Instruction should be widened, unless it is scalar after vectorization, | |||
| 8855 | // scalarization is profitable or it is predicated. | |||
| 8856 | auto WillScalarize = [this, I](ElementCount VF) -> bool { | |||
| 8857 | return CM.isScalarAfterVectorization(I, VF) || | |||
| 8858 | CM.isProfitableToScalarize(I, VF) || CM.isScalarWithPredication(I); | |||
| 8859 | }; | |||
| 8860 | return !LoopVectorizationPlanner::getDecisionAndClampRange(WillScalarize, | |||
| 8861 | Range); | |||
| 8862 | } | |||
| 8863 | ||||
| 8864 | VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I, | |||
| 8865 | ArrayRef<VPValue *> Operands) const { | |||
| 8866 | auto IsVectorizableOpcode = [](unsigned Opcode) { | |||
| 8867 | switch (Opcode) { | |||
| 8868 | case Instruction::Add: | |||
| 8869 | case Instruction::And: | |||
| 8870 | case Instruction::AShr: | |||
| 8871 | case Instruction::BitCast: | |||
| 8872 | case Instruction::FAdd: | |||
| 8873 | case Instruction::FCmp: | |||
| 8874 | case Instruction::FDiv: | |||
| 8875 | case Instruction::FMul: | |||
| 8876 | case Instruction::FNeg: | |||
| 8877 | case Instruction::FPExt: | |||
| 8878 | case Instruction::FPToSI: | |||
| 8879 | case Instruction::FPToUI: | |||
| 8880 | case Instruction::FPTrunc: | |||
| 8881 | case Instruction::FRem: | |||
| 8882 | case Instruction::FSub: | |||
| 8883 | case Instruction::ICmp: | |||
| 8884 | case Instruction::IntToPtr: | |||
| 8885 | case Instruction::LShr: | |||
| 8886 | case Instruction::Mul: | |||
| 8887 | case Instruction::Or: | |||
| 8888 | case Instruction::PtrToInt: | |||
| 8889 | case Instruction::SDiv: | |||
| 8890 | case Instruction::Select: | |||
| 8891 | case Instruction::SExt: | |||
| 8892 | case Instruction::Shl: | |||
| 8893 | case Instruction::SIToFP: | |||
| 8894 | case Instruction::SRem: | |||
| 8895 | case Instruction::Sub: | |||
| 8896 | case Instruction::Trunc: | |||
| 8897 | case Instruction::UDiv: | |||
| 8898 | case Instruction::UIToFP: | |||
| 8899 | case Instruction::URem: | |||
| 8900 | case Instruction::Xor: | |||
| 8901 | case Instruction::ZExt: | |||
| 8902 | return true; | |||
| 8903 | } | |||
| 8904 | return false; | |||
| 8905 | }; | |||
| 8906 | ||||
| 8907 | if (!IsVectorizableOpcode(I->getOpcode())) | |||
| 8908 | return nullptr; | |||
| 8909 | ||||
| 8910 | // Success: widen this instruction. | |||
| 8911 | return new VPWidenRecipe(*I, make_range(Operands.begin(), Operands.end())); | |||
| 8912 | } | |||
| 8913 | ||||
| 8914 | void VPRecipeBuilder::fixHeaderPhis() { | |||
| 8915 | BasicBlock *OrigLatch = OrigLoop->getLoopLatch(); | |||
| 8916 | for (VPWidenPHIRecipe *R : PhisToFix) { | |||
| 8917 | auto *PN = cast<PHINode>(R->getUnderlyingValue()); | |||
| 8918 | VPRecipeBase *IncR = | |||
| 8919 | getRecipe(cast<Instruction>(PN->getIncomingValueForBlock(OrigLatch))); | |||
| 8920 | R->addOperand(IncR->getVPSingleValue()); | |||
| 8921 | } | |||
| 8922 | } | |||
| 8923 | ||||
| 8924 | VPBasicBlock *VPRecipeBuilder::handleReplication( | |||
| 8925 | Instruction *I, VFRange &Range, VPBasicBlock *VPBB, | |||
| 8926 | VPlanPtr &Plan) { | |||
| 8927 | bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange( | |||
| 8928 | [&](ElementCount VF) { return CM.isUniformAfterVectorization(I, VF); }, | |||
| 8929 | Range); | |||
| 8930 | ||||
| 8931 | bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( | |||
| 8932 | [&](ElementCount VF) { return CM.isPredicatedInst(I); }, Range); | |||
| 8933 | ||||
| 8934 | // Even if the instruction is not marked as uniform, there are certain | |||
| 8935 | // intrinsic calls that can be effectively treated as such, so we check for | |||
| 8936 | // them here. Conservatively, we only do this for scalable vectors, since | |||
| 8937 | // for fixed-width VFs we can always fall back on full scalarization. | |||
| 8938 | if (!IsUniform && Range.Start.isScalable() && isa<IntrinsicInst>(I)) { | |||
| 8939 | switch (cast<IntrinsicInst>(I)->getIntrinsicID()) { | |||
| 8940 | case Intrinsic::assume: | |||
| 8941 | case Intrinsic::lifetime_start: | |||
| 8942 | case Intrinsic::lifetime_end: | |||
| 8943 | // For scalable vectors if one of the operands is variant then we still | |||
| 8944 | // want to mark as uniform, which will generate one instruction for just | |||
| 8945 | // the first lane of the vector. We can't scalarize the call in the same | |||
| 8946 | // way as for fixed-width vectors because we don't know how many lanes | |||
| 8947 | // there are. | |||
| 8948 | // | |||
| 8949 | // The reasons for doing it this way for scalable vectors are: | |||
| 8950 | // 1. For the assume intrinsic generating the instruction for the first | |||
| 8951 | // lane is still be better than not generating any at all. For | |||
| 8952 | // example, the input may be a splat across all lanes. | |||
| 8953 | // 2. For the lifetime start/end intrinsics the pointer operand only | |||
| 8954 | // does anything useful when the input comes from a stack object, | |||
| 8955 | // which suggests it should always be uniform. For non-stack objects | |||
| 8956 | // the effect is to poison the object, which still allows us to | |||
| 8957 | // remove the call. | |||
| 8958 | IsUniform = true; | |||
| 8959 | break; | |||
| 8960 | default: | |||
| 8961 | break; | |||
| 8962 | } | |||
| 8963 | } | |||
| 8964 | ||||
| 8965 | auto *Recipe = new VPReplicateRecipe(I, Plan->mapToVPValues(I->operands()), | |||
| 8966 | IsUniform, IsPredicated); | |||
| 8967 | setRecipe(I, Recipe); | |||
| 8968 | Plan->addVPValue(I, Recipe); | |||
| 8969 | ||||
| 8970 | // Find if I uses a predicated instruction. If so, it will use its scalar | |||
| 8971 | // value. Avoid hoisting the insert-element which packs the scalar value into | |||
| 8972 | // a vector value, as that happens iff all users use the vector value. | |||
| 8973 | for (VPValue *Op : Recipe->operands()) { | |||
| 8974 | auto *PredR = dyn_cast_or_null<VPPredInstPHIRecipe>(Op->getDef()); | |||
| 8975 | if (!PredR
| |||
| 8976 | continue; | |||
| 8977 | auto *RepR = | |||
| 8978 | cast_or_null<VPReplicateRecipe>(PredR->getOperand(0)->getDef()); | |||
| 8979 | assert(RepR->isPredicated() &&((void)0) | |||
| 8980 | "expected Replicate recipe to be predicated")((void)0); | |||
| 8981 | RepR->setAlsoPack(false); | |||
| ||||
| 8982 | } | |||
| 8983 | ||||
| 8984 | // Finalize the recipe for Instr, first if it is not predicated. | |||
| 8985 | if (!IsPredicated) { | |||
| 8986 | LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n")do { } while (false); | |||
| 8987 | VPBB->appendRecipe(Recipe); | |||
| 8988 | return VPBB; | |||
| 8989 | } | |||
| 8990 | LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n")do { } while (false); | |||
| 8991 | assert(VPBB->getSuccessors().empty() &&((void)0) | |||
| 8992 | "VPBB has successors when handling predicated replication.")((void)0); | |||
| 8993 | // Record predicated instructions for above packing optimizations. | |||
| 8994 | VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan); | |||
| 8995 | VPBlockUtils::insertBlockAfter(Region, VPBB); | |||
| 8996 | auto *RegSucc = new VPBasicBlock(); | |||
| 8997 | VPBlockUtils::insertBlockAfter(RegSucc, Region); | |||
| 8998 | return RegSucc; | |||
| 8999 | } | |||
| 9000 | ||||
| 9001 | VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr, | |||
| 9002 | VPRecipeBase *PredRecipe, | |||
| 9003 | VPlanPtr &Plan) { | |||
| 9004 | // Instructions marked for predication are replicated and placed under an | |||
| 9005 | // if-then construct to prevent side-effects. | |||
| 9006 | ||||
| 9007 | // Generate recipes to compute the block mask for this region. | |||
| 9008 | VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan); | |||
| 9009 | ||||
| 9010 | // Build the triangular if-then region. | |||
| 9011 | std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str(); | |||
| 9012 | assert(Instr->getParent() && "Predicated instruction not in any basic block")((void)0); | |||
| 9013 | auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask); | |||
| 9014 | auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe); | |||
| 9015 | auto *PHIRecipe = Instr->getType()->isVoidTy() | |||
| 9016 | ? nullptr | |||
| 9017 | : new VPPredInstPHIRecipe(Plan->getOrAddVPValue(Instr)); | |||
| 9018 | if (PHIRecipe) { | |||
| 9019 | Plan->removeVPValueFor(Instr); | |||
| 9020 | Plan->addVPValue(Instr, PHIRecipe); | |||
| 9021 | } | |||
| 9022 | auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe); | |||
| 9023 | auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe); | |||
| 9024 | VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true); | |||
| 9025 | ||||
| 9026 | // Note: first set Entry as region entry and then connect successors starting | |||
| 9027 | // from it in order, to propagate the "parent" of each VPBasicBlock. | |||
| 9028 | VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry); | |||
| 9029 | VPBlockUtils::connectBlocks(Pred, Exit); | |||
| 9030 | ||||
| 9031 | return Region; | |||
| 9032 | } | |||
| 9033 | ||||
| 9034 | VPRecipeOrVPValueTy | |||
| 9035 | VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr, | |||
| 9036 | ArrayRef<VPValue *> Operands, | |||
| 9037 | VFRange &Range, VPlanPtr &Plan) { | |||
| 9038 | // First, check for specific widening recipes that deal with calls, memory | |||
| 9039 | // operations, inductions and Phi nodes. | |||
| 9040 | if (auto *CI = dyn_cast<CallInst>(Instr)) | |||
| 9041 | return toVPRecipeResult(tryToWidenCall(CI, Operands, Range)); | |||
| 9042 | ||||
| 9043 | if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr)) | |||
| 9044 | return toVPRecipeResult(tryToWidenMemory(Instr, Operands, Range, Plan)); | |||
| 9045 | ||||
| 9046 | VPRecipeBase *Recipe; | |||
| 9047 | if (auto Phi = dyn_cast<PHINode>(Instr)) { | |||
| 9048 | if (Phi->getParent() != OrigLoop->getHeader()) | |||
| 9049 | return tryToBlend(Phi, Operands, Plan); | |||
| 9050 | if ((Recipe = tryToOptimizeInductionPHI(Phi, Operands))) | |||
| 9051 | return toVPRecipeResult(Recipe); | |||
| 9052 | ||||
| 9053 | VPWidenPHIRecipe *PhiRecipe = nullptr; | |||
| 9054 | if (Legal->isReductionVariable(Phi) || Legal->isFirstOrderRecurrence(Phi)) { | |||
| 9055 | VPValue *StartV = Operands[0]; | |||
| 9056 | if (Legal->isReductionVariable(Phi)) { | |||
| 9057 | RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[Phi]; | |||
| 9058 | assert(RdxDesc.getRecurrenceStartValue() ==((void)0) | |||
| 9059 | Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader()))((void)0); | |||
| 9060 | PhiRecipe = new VPReductionPHIRecipe(Phi, RdxDesc, *StartV, | |||
| 9061 | CM.isInLoopReduction(Phi), | |||
| 9062 | CM.useOrderedReductions(RdxDesc)); | |||
| 9063 | } else { | |||
| 9064 | PhiRecipe = new VPFirstOrderRecurrencePHIRecipe(Phi, *StartV); | |||
| 9065 | } | |||
| 9066 | ||||
| 9067 | // Record the incoming value from the backedge, so we can add the incoming | |||
| 9068 | // value from the backedge after all recipes have been created. | |||
| 9069 | recordRecipeOf(cast<Instruction>( | |||
| 9070 | Phi->getIncomingValueForBlock(OrigLoop->getLoopLatch()))); | |||
| 9071 | PhisToFix.push_back(PhiRecipe); | |||
| 9072 | } else { | |||
| 9073 | // TODO: record start and backedge value for remaining pointer induction | |||
| 9074 | // phis. | |||
| 9075 | assert(Phi->getType()->isPointerTy() &&((void)0) | |||
| 9076 | "only pointer phis should be handled here")((void)0); | |||
| 9077 | PhiRecipe = new VPWidenPHIRecipe(Phi); | |||
| 9078 | } | |||
| 9079 | ||||
| 9080 | return toVPRecipeResult(PhiRecipe); | |||
| 9081 | } | |||
| 9082 | ||||
| 9083 | if (isa<TruncInst>(Instr) && | |||
| 9084 | (Recipe = tryToOptimizeInductionTruncate(cast<TruncInst>(Instr), Operands, | |||
| 9085 | Range, *Plan))) | |||
| 9086 | return toVPRecipeResult(Recipe); | |||
| 9087 | ||||
| 9088 | if (!shouldWiden(Instr, Range)) | |||
| 9089 | return nullptr; | |||
| 9090 | ||||
| 9091 | if (auto GEP = dyn_cast<GetElementPtrInst>(Instr)) | |||
| 9092 | return toVPRecipeResult(new VPWidenGEPRecipe( | |||
| 9093 | GEP, make_range(Operands.begin(), Operands.end()), OrigLoop)); | |||
| 9094 | ||||
| 9095 | if (auto *SI = dyn_cast<SelectInst>(Instr)) { | |||
| 9096 | bool InvariantCond = | |||
| 9097 | PSE.getSE()->isLoopInvariant(PSE.getSCEV(SI->getOperand(0)), OrigLoop); | |||
| 9098 | return toVPRecipeResult(new VPWidenSelectRecipe( | |||
| 9099 | *SI, make_range(Operands.begin(), Operands.end()), InvariantCond)); | |||
| 9100 | } | |||
| 9101 | ||||
| 9102 | return toVPRecipeResult(tryToWiden(Instr, Operands)); | |||
| 9103 | } | |||
| 9104 | ||||
| 9105 | void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF, | |||
| 9106 | ElementCount MaxVF) { | |||
| 9107 | assert(OrigLoop->isInnermost() && "Inner loop expected.")((void)0); | |||
| 9108 | ||||
| 9109 | // Collect instructions from the original loop that will become trivially dead | |||
| 9110 | // in the vectorized loop. We don't need to vectorize these instructions. For | |||
| 9111 | // example, original induction update instructions can become dead because we | |||
| 9112 | // separately emit induction "steps" when generating code for the new loop. | |||
| 9113 | // Similarly, we create a new latch condition when setting up the structure | |||
| 9114 | // of the new loop, so the old one can become dead. | |||
| 9115 | SmallPtrSet<Instruction *, 4> DeadInstructions; | |||
| 9116 | collectTriviallyDeadInstructions(DeadInstructions); | |||
| 9117 | ||||
| 9118 | // Add assume instructions we need to drop to DeadInstructions, to prevent | |||
| 9119 | // them from being added to the VPlan. | |||
| 9120 | // TODO: We only need to drop assumes in blocks that get flattend. If the | |||
| 9121 | // control flow is preserved, we should keep them. | |||
| 9122 | auto &ConditionalAssumes = Legal->getConditionalAssumes(); | |||
| 9123 | DeadInstructions.insert(ConditionalAssumes.begin(), ConditionalAssumes.end()); | |||
| 9124 | ||||
| 9125 | MapVector<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter(); | |||
| 9126 | // Dead instructions do not need sinking. Remove them from SinkAfter. | |||
| 9127 | for (Instruction *I : DeadInstructions) | |||
| 9128 | SinkAfter.erase(I); | |||
| 9129 | ||||
| 9130 | // Cannot sink instructions after dead instructions (there won't be any | |||
| 9131 | // recipes for them). Instead, find the first non-dead previous instruction. | |||
| 9132 | for (auto &P : Legal->getSinkAfter()) { | |||
| 9133 | Instruction *SinkTarget = P.second; | |||
| 9134 | Instruction *FirstInst = &*SinkTarget->getParent()->begin(); | |||
| 9135 | (void)FirstInst; | |||
| 9136 | while (DeadInstructions.contains(SinkTarget)) { | |||
| 9137 | assert(((void)0) | |||
| 9138 | SinkTarget != FirstInst &&((void)0) | |||
| 9139 | "Must find a live instruction (at least the one feeding the "((void)0) | |||
| 9140 | "first-order recurrence PHI) before reaching beginning of the block")((void)0); | |||
| 9141 | SinkTarget = SinkTarget->getPrevNode(); | |||
| 9142 | assert(SinkTarget != P.first &&((void)0) | |||
| 9143 | "sink source equals target, no sinking required")((void)0); | |||
| 9144 | } | |||
| 9145 | P.second = SinkTarget; | |||
| 9146 | } | |||
| 9147 | ||||
| 9148 | auto MaxVFPlusOne = MaxVF.getWithIncrement(1); | |||
| 9149 | for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) { | |||
| 9150 | VFRange SubRange = {VF, MaxVFPlusOne}; | |||
| 9151 | VPlans.push_back( | |||
| 9152 | buildVPlanWithVPRecipes(SubRange, DeadInstructions, SinkAfter)); | |||
| 9153 | VF = SubRange.End; | |||
| 9154 | } | |||
| 9155 | } | |||
| 9156 | ||||
| 9157 | VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes( | |||
| 9158 | VFRange &Range, SmallPtrSetImpl<Instruction *> &DeadInstructions, | |||
| 9159 | const MapVector<Instruction *, Instruction *> &SinkAfter) { | |||
| 9160 | ||||
| 9161 | SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups; | |||
| 9162 | ||||
| 9163 | VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, PSE, Builder); | |||
| 9164 | ||||
| 9165 | // --------------------------------------------------------------------------- | |||
| 9166 | // Pre-construction: record ingredients whose recipes we'll need to further | |||
| 9167 | // process after constructing the initial VPlan. | |||
| 9168 | // --------------------------------------------------------------------------- | |||
| 9169 | ||||
| 9170 | // Mark instructions we'll need to sink later and their targets as | |||
| 9171 | // ingredients whose recipe we'll need to record. | |||
| 9172 | for (auto &Entry : SinkAfter) { | |||
| 9173 | RecipeBuilder.recordRecipeOf(Entry.first); | |||
| 9174 | RecipeBuilder.recordRecipeOf(Entry.second); | |||
| 9175 | } | |||
| 9176 | for (auto &Reduction : CM.getInLoopReductionChains()) { | |||
| ||||
| 9177 | PHINode *Phi = Reduction.first; | |||
| 9178 | RecurKind Kind = Legal->getReductionVars()[Phi].getRecurrenceKind(); | |||
| 9179 | const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second; | |||
| 9180 | ||||
| 9181 | RecipeBuilder.recordRecipeOf(Phi); | |||
| 9182 | for (auto &R : ReductionOperations) { | |||
| 9183 | RecipeBuilder.recordRecipeOf(R); | |||
| 9184 | // For min/max reducitons, where we have a pair of icmp/select, we also | |||
| 9185 | // need to record the ICmp recipe, so it can be removed later. | |||
| 9186 | if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) | |||
| 9187 | RecipeBuilder.recordRecipeOf(cast<Instruction>(R->getOperand(0))); | |||
| 9188 | } | |||
| 9189 | } | |||
| 9190 | ||||
| 9191 | // For each interleave group which is relevant for this (possibly trimmed) | |||
| 9192 | // Range, add it to the set of groups to be later applied to the VPlan and add | |||
| 9193 | // placeholders for its members' Recipes which we'll be replacing with a | |||
| 9194 | // single VPInterleaveRecipe. | |||
| 9195 | for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) { | |||
| 9196 | auto applyIG = [IG, this](ElementCount VF) -> bool { | |||
| 9197 | return (VF.isVector() && // Query is illegal for VF == 1 | |||
| 9198 | CM.getWideningDecision(IG->getInsertPos(), VF) == | |||
| 9199 | LoopVectorizationCostModel::CM_Interleave); | |||
| 9200 | }; | |||
| 9201 | if (!getDecisionAndClampRange(applyIG, Range)) | |||
| 9202 | continue; | |||
| 9203 | InterleaveGroups.insert(IG); | |||
| 9204 | for (unsigned i = 0; i < IG->getFactor(); i++) | |||
| 9205 | if (Instruction *Member = IG->getMember(i)) | |||
| 9206 | RecipeBuilder.recordRecipeOf(Member); | |||
| 9207 | }; | |||
| 9208 | ||||
| 9209 | // --------------------------------------------------------------------------- | |||
| 9210 | // Build initial VPlan: Scan the body of the loop in a topological order to | |||
| 9211 | // visit each basic block after having visited its predecessor basic blocks. | |||
| 9212 | // --------------------------------------------------------------------------- | |||
| 9213 | ||||
| 9214 | // Create a dummy pre-entry VPBasicBlock to start building the VPlan. | |||
| 9215 | auto Plan = std::make_unique<VPlan>(); | |||
| 9216 | VPBasicBlock *VPBB = new VPBasicBlock("Pre-Entry"); | |||
| 9217 | Plan->setEntry(VPBB); | |||
| 9218 | ||||
| 9219 | // Scan the body of the loop in a topological order to visit each basic block | |||
| 9220 | // after having visited its predecessor basic blocks. | |||
| 9221 | LoopBlocksDFS DFS(OrigLoop); | |||
| 9222 | DFS.perform(LI); | |||
| 9223 | ||||
| 9224 | for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { | |||
| 9225 | // Relevant instructions from basic block BB will be grouped into VPRecipe | |||
| 9226 | // ingredients and fill a new VPBasicBlock. | |||
| 9227 | unsigned VPBBsForBB = 0; | |||
| 9228 | auto *FirstVPBBForBB = new VPBasicBlock(BB->getName()); | |||
| 9229 | VPBlockUtils::insertBlockAfter(FirstVPBBForBB, VPBB); | |||
| 9230 | VPBB = FirstVPBBForBB; | |||
| 9231 | Builder.setInsertPoint(VPBB); | |||
| 9232 | ||||
| 9233 | // Introduce each ingredient into VPlan. | |||
| 9234 | // TODO: Model and preserve debug instrinsics in VPlan. | |||
| 9235 | for (Instruction &I : BB->instructionsWithoutDebug()) { | |||
| 9236 | Instruction *Instr = &I; | |||
| 9237 | ||||
| 9238 | // First filter out irrelevant instructions, to ensure no recipes are | |||
| 9239 | // built for them. | |||
| 9240 | if (isa<BranchInst>(Instr) || DeadInstructions.count(Instr)) | |||
| 9241 | continue; | |||
| 9242 | ||||
| 9243 | SmallVector<VPValue *, 4> Operands; | |||
| 9244 | auto *Phi = dyn_cast<PHINode>(Instr); | |||
| 9245 | if (Phi
| |||
| 9246 | Operands.push_back(Plan->getOrAddVPValue( | |||
| 9247 | Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader()))); | |||
| 9248 | } else { | |||
| 9249 | auto OpRange = Plan->mapToVPValues(Instr->operands()); | |||
| 9250 | Operands = {OpRange.begin(), OpRange.end()}; | |||
| 9251 | } | |||
| 9252 | if (auto RecipeOrValue = RecipeBuilder.tryToCreateWidenRecipe( | |||
| 9253 | Instr, Operands, Range, Plan)) { | |||
| 9254 | // If Instr can be simplified to an existing VPValue, use it. | |||
| 9255 | if (RecipeOrValue.is<VPValue *>()) { | |||
| 9256 | auto *VPV = RecipeOrValue.get<VPValue *>(); | |||
| 9257 | Plan->addVPValue(Instr, VPV); | |||
| 9258 | // If the re-used value is a recipe, register the recipe for the | |||
| 9259 | // instruction, in case the recipe for Instr needs to be recorded. | |||
| 9260 | if (auto *R = dyn_cast_or_null<VPRecipeBase>(VPV->getDef())) | |||
| 9261 | RecipeBuilder.setRecipe(Instr, R); | |||
| 9262 | continue; | |||
| 9263 | } | |||
| 9264 | // Otherwise, add the new recipe. | |||
| 9265 | VPRecipeBase *Recipe = RecipeOrValue.get<VPRecipeBase *>(); | |||
| 9266 | for (auto *Def : Recipe->definedValues()) { | |||
| 9267 | auto *UV = Def->getUnderlyingValue(); | |||
| 9268 | Plan->addVPValue(UV, Def); | |||
| 9269 | } | |||
| 9270 | ||||
| 9271 | RecipeBuilder.setRecipe(Instr, Recipe); | |||
| 9272 | VPBB->appendRecipe(Recipe); | |||
| 9273 | continue; | |||
| 9274 | } | |||
| 9275 | ||||
| 9276 | // Otherwise, if all widening options failed, Instruction is to be | |||
| 9277 | // replicated. This may create a successor for VPBB. | |||
| 9278 | VPBasicBlock *NextVPBB = | |||
| 9279 | RecipeBuilder.handleReplication(Instr, Range, VPBB, Plan); | |||
| 9280 | if (NextVPBB != VPBB) { | |||
| 9281 | VPBB = NextVPBB; | |||
| 9282 | VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++) | |||
| 9283 | : ""); | |||
| 9284 | } | |||
| 9285 | } | |||
| 9286 | } | |||
| 9287 | ||||
| 9288 | RecipeBuilder.fixHeaderPhis(); | |||
| 9289 | ||||
| 9290 | // Discard empty dummy pre-entry VPBasicBlock. Note that other VPBasicBlocks | |||
| 9291 | // may also be empty, such as the last one VPBB, reflecting original | |||
| 9292 | // basic-blocks with no recipes. | |||
| 9293 | VPBasicBlock *PreEntry = cast<VPBasicBlock>(Plan->getEntry()); | |||
| 9294 | assert(PreEntry->empty() && "Expecting empty pre-entry block.")((void)0); | |||
| 9295 | VPBlockBase *Entry = Plan->setEntry(PreEntry->getSingleSuccessor()); | |||
| 9296 | VPBlockUtils::disconnectBlocks(PreEntry, Entry); | |||
| 9297 | delete PreEntry; | |||
| 9298 | ||||
| 9299 | // --------------------------------------------------------------------------- | |||
| 9300 | // Transform initial VPlan: Apply previously taken decisions, in order, to | |||
| 9301 | // bring the VPlan to its final state. | |||
| 9302 | // --------------------------------------------------------------------------- | |||
| 9303 | ||||
| 9304 | // Apply Sink-After legal constraints. | |||
| 9305 | auto GetReplicateRegion = [](VPRecipeBase *R) -> VPRegionBlock * { | |||
| 9306 | auto *Region = dyn_cast_or_null<VPRegionBlock>(R->getParent()->getParent()); | |||
| 9307 | if (Region && Region->isReplicator()) { | |||
| 9308 | assert(Region->getNumSuccessors() == 1 &&((void)0) | |||
| 9309 | Region->getNumPredecessors() == 1 && "Expected SESE region!")((void)0); | |||
| 9310 | assert(R->getParent()->size() == 1 &&((void)0) | |||
| 9311 | "A recipe in an original replicator region must be the only "((void)0) | |||
| 9312 | "recipe in its block")((void)0); | |||
| 9313 | return Region; | |||
| 9314 | } | |||
| 9315 | return nullptr; | |||
| 9316 | }; | |||
| 9317 | for (auto &Entry : SinkAfter) { | |||
| 9318 | VPRecipeBase *Sink = RecipeBuilder.getRecipe(Entry.first); | |||
| 9319 | VPRecipeBase *Target = RecipeBuilder.getRecipe(Entry.second); | |||
| 9320 | ||||
| 9321 | auto *TargetRegion = GetReplicateRegion(Target); | |||
| 9322 | auto *SinkRegion = GetReplicateRegion(Sink); | |||
| 9323 | if (!SinkRegion) { | |||
| 9324 | // If the sink source is not a replicate region, sink the recipe directly. | |||
| 9325 | if (TargetRegion) { | |||
| 9326 | // The target is in a replication region, make sure to move Sink to | |||
| 9327 | // the block after it, not into the replication region itself. | |||
| 9328 | VPBasicBlock *NextBlock = | |||
| 9329 | cast<VPBasicBlock>(TargetRegion->getSuccessors().front()); | |||
| 9330 | Sink->moveBefore(*NextBlock, NextBlock->getFirstNonPhi()); | |||
| 9331 | } else | |||
| 9332 | Sink->moveAfter(Target); | |||
| 9333 | continue; | |||
| 9334 | } | |||
| 9335 | ||||
| 9336 | // The sink source is in a replicate region. Unhook the region from the CFG. | |||
| 9337 | auto *SinkPred = SinkRegion->getSinglePredecessor(); | |||
| 9338 | auto *SinkSucc = SinkRegion->getSingleSuccessor(); | |||
| 9339 | VPBlockUtils::disconnectBlocks(SinkPred, SinkRegion); | |||
| 9340 | VPBlockUtils::disconnectBlocks(SinkRegion, SinkSucc); | |||
| 9341 | VPBlockUtils::connectBlocks(SinkPred, SinkSucc); | |||
| 9342 | ||||
| 9343 | if (TargetRegion) { | |||
| 9344 | // The target recipe is also in a replicate region, move the sink region | |||
| 9345 | // after the target region. | |||
| 9346 | auto *TargetSucc = TargetRegion->getSingleSuccessor(); | |||
| 9347 | VPBlockUtils::disconnectBlocks(TargetRegion, TargetSucc); | |||
| 9348 | VPBlockUtils::connectBlocks(TargetRegion, SinkRegion); | |||
| 9349 | VPBlockUtils::connectBlocks(SinkRegion, TargetSucc); | |||
| 9350 | } else { | |||
| 9351 | // The sink source is in a replicate region, we need to move the whole | |||
| 9352 | // replicate region, which should only contain a single recipe in the | |||
| 9353 | // main block. | |||
| 9354 | auto *SplitBlock = | |||
| 9355 | Target->getParent()->splitAt(std::next(Target->getIterator())); | |||
| 9356 | ||||
| 9357 | auto *SplitPred = SplitBlock->getSinglePredecessor(); | |||
| 9358 | ||||
| 9359 | VPBlockUtils::disconnectBlocks(SplitPred, SplitBlock); | |||
| 9360 | VPBlockUtils::connectBlocks(SplitPred, SinkRegion); | |||
| 9361 | VPBlockUtils::connectBlocks(SinkRegion, SplitBlock); | |||
| 9362 | if (VPBB == SplitPred) | |||
| 9363 | VPBB = SplitBlock; | |||
| 9364 | } | |||
| 9365 | } | |||
| 9366 | ||||
| 9367 | // Introduce a recipe to combine the incoming and previous values of a | |||
| 9368 | // first-order recurrence. | |||
| 9369 | for (VPRecipeBase &R : Plan->getEntry()->getEntryBasicBlock()->phis()) { | |||
| 9370 | auto *RecurPhi = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R); | |||
| 9371 | if (!RecurPhi) | |||
| 9372 | continue; | |||
| 9373 | ||||
| 9374 | auto *RecurSplice = cast<VPInstruction>( | |||
| 9375 | Builder.createNaryOp(VPInstruction::FirstOrderRecurrenceSplice, | |||
| 9376 | {RecurPhi, RecurPhi->getBackedgeValue()})); | |||
| 9377 | ||||
| 9378 | VPRecipeBase *PrevRecipe = RecurPhi->getBackedgeRecipe(); | |||
| 9379 | if (auto *Region = GetReplicateRegion(PrevRecipe)) { | |||
| 9380 | VPBasicBlock *Succ = cast<VPBasicBlock>(Region->getSingleSuccessor()); | |||
| 9381 | RecurSplice->moveBefore(*Succ, Succ->getFirstNonPhi()); | |||
| 9382 | } else | |||
| 9383 | RecurSplice->moveAfter(PrevRecipe); | |||
| 9384 | RecurPhi->replaceAllUsesWith(RecurSplice); | |||
| 9385 | // Set the first operand of RecurSplice to RecurPhi again, after replacing | |||
| 9386 | // all users. | |||
| 9387 | RecurSplice->setOperand(0, RecurPhi); | |||
| 9388 | } | |||
| 9389 | ||||
| 9390 | // Interleave memory: for each Interleave Group we marked earlier as relevant | |||
| 9391 | // for this VPlan, replace the Recipes widening its memory instructions with a | |||
| 9392 | // single VPInterleaveRecipe at its insertion point. | |||
| 9393 | for (auto IG : InterleaveGroups) { | |||
| 9394 | auto *Recipe = cast<VPWidenMemoryInstructionRecipe>( | |||
| 9395 | RecipeBuilder.getRecipe(IG->getInsertPos())); | |||
| 9396 | SmallVector<VPValue *, 4> StoredValues; | |||
| 9397 | for (unsigned i = 0; i < IG->getFactor(); ++i) | |||
| 9398 | if (auto *SI = dyn_cast_or_null<StoreInst>(IG->getMember(i))) { | |||
| 9399 | auto *StoreR = | |||
| 9400 | cast<VPWidenMemoryInstructionRecipe>(RecipeBuilder.getRecipe(SI)); | |||
| 9401 | StoredValues.push_back(StoreR->getStoredValue()); | |||
| 9402 | } | |||
| 9403 | ||||
| 9404 | auto *VPIG = new VPInterleaveRecipe(IG, Recipe->getAddr(), StoredValues, | |||
| 9405 | Recipe->getMask()); | |||
| 9406 | VPIG->insertBefore(Recipe); | |||
| 9407 | unsigned J = 0; | |||
| 9408 | for (unsigned i = 0; i < IG->getFactor(); ++i) | |||
| 9409 | if (Instruction *Member = IG->getMember(i)) { | |||
| 9410 | if (!Member->getType()->isVoidTy()) { | |||
| 9411 | VPValue *OriginalV = Plan->getVPValue(Member); | |||
| 9412 | Plan->removeVPValueFor(Member); | |||
| 9413 | Plan->addVPValue(Member, VPIG->getVPValue(J)); | |||
| 9414 | OriginalV->replaceAllUsesWith(VPIG->getVPValue(J)); | |||
| 9415 | J++; | |||
| 9416 | } | |||
| 9417 | RecipeBuilder.getRecipe(Member)->eraseFromParent(); | |||
| 9418 | } | |||
| 9419 | } | |||
| 9420 | ||||
| 9421 | // Adjust the recipes for any inloop reductions. | |||
| 9422 | adjustRecipesForInLoopReductions(Plan, RecipeBuilder, Range.Start); | |||
| 9423 | ||||
| 9424 | // Finally, if tail is folded by masking, introduce selects between the phi | |||
| 9425 | // and the live-out instruction of each reduction, at the end of the latch. | |||
| 9426 | if (CM.foldTailByMasking() && !Legal->getReductionVars().empty()) { | |||
| 9427 | Builder.setInsertPoint(VPBB); | |||
| 9428 | auto *Cond = RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan); | |||
| 9429 | for (auto &Reduction : Legal->getReductionVars()) { | |||
| 9430 | if (CM.isInLoopReduction(Reduction.first)) | |||
| 9431 | continue; | |||
| 9432 | VPValue *Phi = Plan->getOrAddVPValue(Reduction.first); | |||
| 9433 | VPValue *Red = Plan->getOrAddVPValue(Reduction.second.getLoopExitInstr()); | |||
| 9434 | Builder.createNaryOp(Instruction::Select, {Cond, Red, Phi}); | |||
| 9435 | } | |||
| 9436 | } | |||
| 9437 | ||||
| 9438 | VPlanTransforms::sinkScalarOperands(*Plan); | |||
| 9439 | VPlanTransforms::mergeReplicateRegions(*Plan); | |||
| 9440 | ||||
| 9441 | std::string PlanName; | |||
| 9442 | raw_string_ostream RSO(PlanName); | |||
| 9443 | ElementCount VF = Range.Start; | |||
| 9444 | Plan->addVF(VF); | |||
| 9445 | RSO << "Initial VPlan for VF={" << VF; | |||
| 9446 | for (VF *= 2; ElementCount::isKnownLT(VF, Range.End); VF *= 2) { | |||
| 9447 | Plan->addVF(VF); | |||
| 9448 | RSO << "," << VF; | |||
| 9449 | } | |||
| 9450 | RSO << "},UF>=1"; | |||
| 9451 | RSO.flush(); | |||
| 9452 | Plan->setName(PlanName); | |||
| 9453 | ||||
| 9454 | return Plan; | |||
| 9455 | } | |||
| 9456 | ||||
| 9457 | VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) { | |||
| 9458 | // Outer loop handling: They may require CFG and instruction level | |||
| 9459 | // transformations before even evaluating whether vectorization is profitable. | |||
| 9460 | // Since we cannot modify the incoming IR, we need to build VPlan upfront in | |||
| 9461 | // the vectorization pipeline. | |||
| 9462 | assert(!OrigLoop->isInnermost())((void)0); | |||
| 9463 | assert(EnableVPlanNativePath && "VPlan-native path is not enabled.")((void)0); | |||
| 9464 | ||||
| 9465 | // Create new empty VPlan | |||
| 9466 | auto Plan = std::make_unique<VPlan>(); | |||
| 9467 | ||||
| 9468 | // Build hierarchical CFG | |||
| 9469 | VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan); | |||
| 9470 | HCFGBuilder.buildHierarchicalCFG(); | |||
| 9471 | ||||
| 9472 | for (ElementCount VF = Range.Start; ElementCount::isKnownLT(VF, Range.End); | |||
| 9473 | VF *= 2) | |||
| 9474 | Plan->addVF(VF); | |||
| 9475 | ||||
| 9476 | if (EnableVPlanPredication) { | |||
| 9477 | VPlanPredicator VPP(*Plan); | |||
| 9478 | VPP.predicate(); | |||
| 9479 | ||||
| 9480 | // Avoid running transformation to recipes until masked code generation in | |||
| 9481 | // VPlan-native path is in place. | |||
| 9482 | return Plan; | |||
| 9483 | } | |||
| 9484 | ||||
| 9485 | SmallPtrSet<Instruction *, 1> DeadInstructions; | |||
| 9486 | VPlanTransforms::VPInstructionsToVPRecipes(OrigLoop, Plan, | |||
| 9487 | Legal->getInductionVars(), | |||
| 9488 | DeadInstructions, *PSE.getSE()); | |||
| 9489 | return Plan; | |||
| 9490 | } | |||
| 9491 | ||||
| 9492 | // Adjust the recipes for any inloop reductions. The chain of instructions | |||
| 9493 | // leading from the loop exit instr to the phi need to be converted to | |||
| 9494 | // reductions, with one operand being vector and the other being the scalar | |||
| 9495 | // reduction chain. | |||
| 9496 | void LoopVectorizationPlanner::adjustRecipesForInLoopReductions( | |||
| 9497 | VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder, ElementCount MinVF) { | |||
| 9498 | for (auto &Reduction : CM.getInLoopReductionChains()) { | |||
| 9499 | PHINode *Phi = Reduction.first; | |||
| 9500 | RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[Phi]; | |||
| 9501 | const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second; | |||
| 9502 | ||||
| 9503 | if (MinVF.isScalar() && !CM.useOrderedReductions(RdxDesc)) | |||
| 9504 | continue; | |||
| 9505 | ||||
| 9506 | // ReductionOperations are orders top-down from the phi's use to the | |||
| 9507 | // LoopExitValue. We keep a track of the previous item (the Chain) to tell | |||
| 9508 | // which of the two operands will remain scalar and which will be reduced. | |||
| 9509 | // For minmax the chain will be the select instructions. | |||
| 9510 | Instruction *Chain = Phi; | |||
| 9511 | for (Instruction *R : ReductionOperations) { | |||
| 9512 | VPRecipeBase *WidenRecipe = RecipeBuilder.getRecipe(R); | |||
| 9513 | RecurKind Kind = RdxDesc.getRecurrenceKind(); | |||
| 9514 | ||||
| 9515 | VPValue *ChainOp = Plan->getVPValue(Chain); | |||
| 9516 | unsigned FirstOpId; | |||
| 9517 | if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { | |||
| 9518 | assert(isa<VPWidenSelectRecipe>(WidenRecipe) &&((void)0) | |||
| 9519 | "Expected to replace a VPWidenSelectSC")((void)0); | |||
| 9520 | FirstOpId = 1; | |||
| 9521 | } else { | |||
| 9522 | assert((MinVF.isScalar() || isa<VPWidenRecipe>(WidenRecipe)) &&((void)0) | |||
| 9523 | "Expected to replace a VPWidenSC")((void)0); | |||
| 9524 | FirstOpId = 0; | |||
| 9525 | } | |||
| 9526 | unsigned VecOpId = | |||
| 9527 | R->getOperand(FirstOpId) == Chain ? FirstOpId + 1 : FirstOpId; | |||
| 9528 | VPValue *VecOp = Plan->getVPValue(R->getOperand(VecOpId)); | |||
| 9529 | ||||
| 9530 | auto *CondOp = CM.foldTailByMasking() | |||
| 9531 | ? RecipeBuilder.createBlockInMask(R->getParent(), Plan) | |||
| 9532 | : nullptr; | |||
| 9533 | VPReductionRecipe *RedRecipe = new VPReductionRecipe( | |||
| 9534 | &RdxDesc, R, ChainOp, VecOp, CondOp, TTI); | |||
| 9535 | WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe); | |||
| 9536 | Plan->removeVPValueFor(R); | |||
| 9537 | Plan->addVPValue(R, RedRecipe); | |||
| 9538 | WidenRecipe->getParent()->insert(RedRecipe, WidenRecipe->getIterator()); | |||
| 9539 | WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe); | |||
| 9540 | WidenRecipe->eraseFromParent(); | |||
| 9541 | ||||
| 9542 | if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { | |||
| 9543 | VPRecipeBase *CompareRecipe = | |||
| 9544 | RecipeBuilder.getRecipe(cast<Instruction>(R->getOperand(0))); | |||
| 9545 | assert(isa<VPWidenRecipe>(CompareRecipe) &&((void)0) | |||
| 9546 | "Expected to replace a VPWidenSC")((void)0); | |||
| 9547 | assert(cast<VPWidenRecipe>(CompareRecipe)->getNumUsers() == 0 &&((void)0) | |||
| 9548 | "Expected no remaining users")((void)0); | |||
| 9549 | CompareRecipe->eraseFromParent(); | |||
| 9550 | } | |||
| 9551 | Chain = R; | |||
| 9552 | } | |||
| 9553 | } | |||
| 9554 | } | |||
| 9555 | ||||
| 9556 | #if !defined(NDEBUG1) || defined(LLVM_ENABLE_DUMP) | |||
| 9557 | void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent, | |||
| 9558 | VPSlotTracker &SlotTracker) const { | |||
| 9559 | O << Indent << "INTERLEAVE-GROUP with factor " << IG->getFactor() << " at "; | |||
| 9560 | IG->getInsertPos()->printAsOperand(O, false); | |||
| 9561 | O << ", "; | |||
| 9562 | getAddr()->printAsOperand(O, SlotTracker); | |||
| 9563 | VPValue *Mask = getMask(); | |||
| 9564 | if (Mask) { | |||
| 9565 | O << ", "; | |||
| 9566 | Mask->printAsOperand(O, SlotTracker); | |||
| 9567 | } | |||
| 9568 | for (unsigned i = 0; i < IG->getFactor(); ++i) | |||
| 9569 | if (Instruction *I = IG->getMember(i)) | |||
| 9570 | O << "\n" << Indent << " " << VPlanIngredient(I) << " " << i; | |||
| 9571 | } | |||
| 9572 | #endif | |||
| 9573 | ||||
| 9574 | void VPWidenCallRecipe::execute(VPTransformState &State) { | |||
| 9575 | State.ILV->widenCallInstruction(*cast<CallInst>(getUnderlyingInstr()), this, | |||
| 9576 | *this, State); | |||
| 9577 | } | |||
| 9578 | ||||
| 9579 | void VPWidenSelectRecipe::execute(VPTransformState &State) { | |||
| 9580 | State.ILV->widenSelectInstruction(*cast<SelectInst>(getUnderlyingInstr()), | |||
| 9581 | this, *this, InvariantCond, State); | |||
| 9582 | } | |||
| 9583 | ||||
| 9584 | void VPWidenRecipe::execute(VPTransformState &State) { | |||
| 9585 | State.ILV->widenInstruction(*getUnderlyingInstr(), this, *this, State); | |||
| 9586 | } | |||
| 9587 | ||||
| 9588 | void VPWidenGEPRecipe::execute(VPTransformState &State) { | |||
| 9589 | State.ILV->widenGEP(cast<GetElementPtrInst>(getUnderlyingInstr()), this, | |||
| 9590 | *this, State.UF, State.VF, IsPtrLoopInvariant, | |||
| 9591 | IsIndexLoopInvariant, State); | |||
| 9592 | } | |||
| 9593 | ||||
| 9594 | void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) { | |||
| 9595 | assert(!State.Instance && "Int or FP induction being replicated.")((void)0); | |||
| 9596 | State.ILV->widenIntOrFpInduction(IV, getStartValue()->getLiveInIRValue(), | |||
| 9597 | getTruncInst(), getVPValue(0), | |||
| 9598 | getCastValue(), State); | |||
| 9599 | } | |||
| 9600 | ||||
| 9601 | void VPWidenPHIRecipe::execute(VPTransformState &State) { | |||
| 9602 | State.ILV->widenPHIInstruction(cast<PHINode>(getUnderlyingValue()), this, | |||
| 9603 | State); | |||
| 9604 | } | |||
| 9605 | ||||
| 9606 | void VPBlendRecipe::execute(VPTransformState &State) { | |||
| 9607 | State.ILV->setDebugLocFromInst(Phi, &State.Builder); | |||
| 9608 | // We know that all PHIs in non-header blocks are converted into | |||
| 9609 | // selects, so we don't have to worry about the insertion order and we | |||
| 9610 | // can just use the builder. | |||
| 9611 | // At this point we generate the predication tree. There may be | |||
| 9612 | // duplications since this is a simple recursive scan, but future | |||
| 9613 | // optimizations will clean it up. | |||
| 9614 | ||||
| 9615 | unsigned NumIncoming = getNumIncomingValues(); | |||
| 9616 | ||||
| 9617 | // Generate a sequence of selects of the form: | |||
| 9618 | // SELECT(Mask3, In3, | |||
| 9619 | // SELECT(Mask2, In2, | |||
| 9620 | // SELECT(Mask1, In1, | |||
| 9621 | // In0))) | |||
| 9622 | // Note that Mask0 is never used: lanes for which no path reaches this phi and | |||
| 9623 | // are essentially undef are taken from In0. | |||
| 9624 | InnerLoopVectorizer::VectorParts Entry(State.UF); | |||
| 9625 | for (unsigned In = 0; In < NumIncoming; ++In) { | |||
| 9626 | for (unsigned Part = 0; Part < State.UF; ++Part) { | |||
| 9627 | // We might have single edge PHIs (blocks) - use an identity | |||
| 9628 | // 'select' for the first PHI operand. | |||
| 9629 | Value *In0 = State.get(getIncomingValue(In), Part); | |||
| 9630 | if (In == 0) | |||
| 9631 | Entry[Part] = In0; // Initialize with the first incoming value. | |||
| 9632 | else { | |||
| 9633 | // Select between the current value and the previous incoming edge | |||
| 9634 | // based on the incoming mask. | |||
| 9635 | Value *Cond = State.get(getMask(In), Part); | |||
| 9636 | Entry[Part] = | |||
| 9637 | State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi"); | |||
| 9638 | } | |||
| 9639 | } | |||
| 9640 | } | |||
| 9641 | for (unsigned Part = 0; Part < State.UF; ++Part) | |||
| 9642 | State.set(this, Entry[Part], Part); | |||
| 9643 | } | |||
| 9644 | ||||
| 9645 | void VPInterleaveRecipe::execute(VPTransformState &State) { | |||
| 9646 | assert(!State.Instance && "Interleave group being replicated.")((void)0); | |||
| 9647 | State.ILV->vectorizeInterleaveGroup(IG, definedValues(), State, getAddr(), | |||
| 9648 | getStoredValues(), getMask()); | |||
| 9649 | } | |||
| 9650 | ||||
| 9651 | void VPReductionRecipe::execute(VPTransformState &State) { | |||
| 9652 | assert(!State.Instance && "Reduction being replicated.")((void)0); | |||
| 9653 | Value *PrevInChain = State.get(getChainOp(), 0); | |||
| 9654 | for (unsigned Part = 0; Part < State.UF; ++Part) { | |||
| 9655 | RecurKind Kind = RdxDesc->getRecurrenceKind(); | |||
| 9656 | bool IsOrdered = State.ILV->useOrderedReductions(*RdxDesc); | |||
| 9657 | Value *NewVecOp = State.get(getVecOp(), Part); | |||
| 9658 | if (VPValue *Cond = getCondOp()) { | |||
| 9659 | Value *NewCond = State.get(Cond, Part); | |||
| 9660 | VectorType *VecTy = cast<VectorType>(NewVecOp->getType()); | |||
| 9661 | Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity( | |||
| 9662 | Kind, VecTy->getElementType(), RdxDesc->getFastMathFlags()); | |||
| 9663 | Constant *IdenVec = | |||
| 9664 | ConstantVector::getSplat(VecTy->getElementCount(), Iden); | |||
| 9665 | Value *Select = State.Builder.CreateSelect(NewCond, NewVecOp, IdenVec); | |||
| 9666 | NewVecOp = Select; | |||
| 9667 | } | |||
| 9668 | Value *NewRed; | |||
| 9669 | Value *NextInChain; | |||
| 9670 | if (IsOrdered) { | |||
| 9671 | if (State.VF.isVector()) | |||
| 9672 | NewRed = createOrderedReduction(State.Builder, *RdxDesc, NewVecOp, | |||
| 9673 | PrevInChain); | |||
| 9674 | else | |||
| 9675 | NewRed = State.Builder.CreateBinOp( | |||
| 9676 | (Instruction::BinaryOps)getUnderlyingInstr()->getOpcode(), | |||
| 9677 | PrevInChain, NewVecOp); | |||
| 9678 | PrevInChain = NewRed; | |||
| 9679 | } else { | |||
| 9680 | PrevInChain = State.get(getChainOp(), Part); | |||
| 9681 | NewRed = createTargetReduction(State.Builder, TTI, *RdxDesc, NewVecOp); | |||
| 9682 | } | |||
| 9683 | if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { | |||
| 9684 | NextInChain = | |||
| 9685 | createMinMaxOp(State.Builder, RdxDesc->getRecurrenceKind(), | |||
| 9686 | NewRed, PrevInChain); | |||
| 9687 | } else if (IsOrdered) | |||
| 9688 | NextInChain = NewRed; | |||
| 9689 | else { | |||
| 9690 | NextInChain = State.Builder.CreateBinOp( | |||
| 9691 | (Instruction::BinaryOps)getUnderlyingInstr()->getOpcode(), NewRed, | |||
| 9692 | PrevInChain); | |||
| 9693 | } | |||
| 9694 | State.set(this, NextInChain, Part); | |||
| 9695 | } | |||
| 9696 | } | |||
| 9697 | ||||
| 9698 | void VPReplicateRecipe::execute(VPTransformState &State) { | |||
| 9699 | if (State.Instance) { // Generate a single instance. | |||
| 9700 | assert(!State.VF.isScalable() && "Can't scalarize a scalable vector")((void)0); | |||
| 9701 | State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *this, | |||
| 9702 | *State.Instance, IsPredicated, State); | |||
| 9703 | // Insert scalar instance packing it into a vector. | |||
| 9704 | if (AlsoPack && State.VF.isVector()) { | |||
| 9705 | // If we're constructing lane 0, initialize to start from poison. | |||
| 9706 | if (State.Instance->Lane.isFirstLane()) { | |||
| 9707 | assert(!State.VF.isScalable() && "VF is assumed to be non scalable.")((void)0); | |||
| 9708 | Value *Poison = PoisonValue::get( | |||
| 9709 | VectorType::get(getUnderlyingValue()->getType(), State.VF)); | |||
| 9710 | State.set(this, Poison, State.Instance->Part); | |||
| 9711 | } | |||
| 9712 | State.ILV->packScalarIntoVectorValue(this, *State.Instance, State); | |||
| 9713 | } | |||
| 9714 | return; | |||
| 9715 | } | |||
| 9716 | ||||
| 9717 | // Generate scalar instances for all VF lanes of all UF parts, unless the | |||
| 9718 | // instruction is uniform inwhich case generate only the first lane for each | |||
| 9719 | // of the UF parts. | |||
| 9720 | unsigned EndLane = IsUniform ? 1 : State.VF.getKnownMinValue(); | |||
| 9721 | assert((!State.VF.isScalable() || IsUniform) &&((void)0) | |||
| 9722 | "Can't scalarize a scalable vector")((void)0); | |||
| 9723 | for (unsigned Part = 0; Part < State.UF; ++Part) | |||
| 9724 | for (unsigned Lane = 0; Lane < EndLane; ++Lane) | |||
| 9725 | State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *this, | |||
| 9726 | VPIteration(Part, Lane), IsPredicated, | |||
| 9727 | State); | |||
| 9728 | } | |||
| 9729 | ||||
| 9730 | void VPBranchOnMaskRecipe::execute(VPTransformState &State) { | |||
| 9731 | assert(State.Instance && "Branch on Mask works only on single instance.")((void)0); | |||
| 9732 | ||||
| 9733 | unsigned Part = State.Instance->Part; | |||
| 9734 | unsigned Lane = State.Instance->Lane.getKnownLane(); | |||
| 9735 | ||||
| 9736 | Value *ConditionBit = nullptr; | |||
| 9737 | VPValue *BlockInMask = getMask(); | |||
| 9738 | if (BlockInMask) { | |||
| 9739 | ConditionBit = State.get(BlockInMask, Part); | |||
| 9740 | if (ConditionBit->getType()->isVectorTy()) | |||
| 9741 | ConditionBit = State.Builder.CreateExtractElement( | |||
| 9742 | ConditionBit, State.Builder.getInt32(Lane)); | |||
| 9743 | } else // Block in mask is all-one. | |||
| 9744 | ConditionBit = State.Builder.getTrue(); | |||
| 9745 | ||||
| 9746 | // Replace the temporary unreachable terminator with a new conditional branch, | |||
| 9747 | // whose two destinations will be set later when they are created. | |||
| 9748 | auto *CurrentTerminator = State.CFG.PrevBB->getTerminator(); | |||
| 9749 | assert(isa<UnreachableInst>(CurrentTerminator) &&((void)0) | |||
| 9750 | "Expected to replace unreachable terminator with conditional branch.")((void)0); | |||
| 9751 | auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit); | |||
| 9752 | CondBr->setSuccessor(0, nullptr); | |||
| 9753 | ReplaceInstWithInst(CurrentTerminator, CondBr); | |||
| 9754 | } | |||
| 9755 | ||||
| 9756 | void VPPredInstPHIRecipe::execute(VPTransformState &State) { | |||
| 9757 | assert(State.Instance && "Predicated instruction PHI works per instance.")((void)0); | |||
| 9758 | Instruction *ScalarPredInst = | |||
| 9759 | cast<Instruction>(State.get(getOperand(0), *State.Instance)); | |||
| 9760 | BasicBlock *PredicatedBB = ScalarPredInst->getParent(); | |||
| 9761 | BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor(); | |||
| 9762 | assert(PredicatingBB && "Predicated block has no single predecessor.")((void)0); | |||
| 9763 | assert(isa<VPReplicateRecipe>(getOperand(0)) &&((void)0) | |||
| 9764 | "operand must be VPReplicateRecipe")((void)0); | |||
| 9765 | ||||
| 9766 | // By current pack/unpack logic we need to generate only a single phi node: if | |||
| 9767 | // a vector value for the predicated instruction exists at this point it means | |||
| 9768 | // the instruction has vector users only, and a phi for the vector value is | |||
| 9769 | // needed. In this case the recipe of the predicated instruction is marked to | |||
| 9770 | // also do that packing, thereby "hoisting" the insert-element sequence. | |||
| 9771 | // Otherwise, a phi node for the scalar value is needed. | |||
| 9772 | unsigned Part = State.Instance->Part; | |||
| 9773 | if (State.hasVectorValue(getOperand(0), Part)) { | |||
| 9774 | Value *VectorValue = State.get(getOperand(0), Part); | |||
| 9775 | InsertElementInst *IEI = cast<InsertElementInst>(VectorValue); | |||
| 9776 | PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2); | |||
| 9777 | VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector. | |||
| 9778 | VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element. | |||
| 9779 | if (State.hasVectorValue(this, Part)) | |||
| 9780 | State.reset(this, VPhi, Part); | |||
| 9781 | else | |||
| 9782 | State.set(this, VPhi, Part); | |||
| 9783 | // NOTE: Currently we need to update the value of the operand, so the next | |||
| 9784 | // predicated iteration inserts its generated value in the correct vector. | |||
| 9785 | State.reset(getOperand(0), VPhi, Part); | |||
| 9786 | } else { | |||
| 9787 | Type *PredInstType = getOperand(0)->getUnderlyingValue()->getType(); | |||
| 9788 | PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2); | |||
| 9789 | Phi->addIncoming(PoisonValue::get(ScalarPredInst->getType()), | |||
| 9790 | PredicatingBB); | |||
| 9791 | Phi->addIncoming(ScalarPredInst, PredicatedBB); | |||
| 9792 | if (State.hasScalarValue(this, *State.Instance)) | |||
| 9793 | State.reset(this, Phi, *State.Instance); | |||
| 9794 | else | |||
| 9795 | State.set(this, Phi, *State.Instance); | |||
| 9796 | // NOTE: Currently we need to update the value of the operand, so the next | |||
| 9797 | // predicated iteration inserts its generated value in the correct vector. | |||
| 9798 | State.reset(getOperand(0), Phi, *State.Instance); | |||
| 9799 | } | |||
| 9800 | } | |||
| 9801 | ||||
| 9802 | void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) { | |||
| 9803 | VPValue *StoredValue = isStore() ? getStoredValue() : nullptr; | |||
| 9804 | State.ILV->vectorizeMemoryInstruction( | |||
| 9805 | &Ingredient, State, StoredValue ? nullptr : getVPSingleValue(), getAddr(), | |||
| 9806 | StoredValue, getMask()); | |||
| 9807 | } | |||
| 9808 | ||||
| 9809 | // Determine how to lower the scalar epilogue, which depends on 1) optimising | |||
| 9810 | // for minimum code-size, 2) predicate compiler options, 3) loop hints forcing | |||
| 9811 | // predication, and 4) a TTI hook that analyses whether the loop is suitable | |||
| 9812 | // for predication. | |||
| 9813 | static ScalarEpilogueLowering getScalarEpilogueLowering( | |||
| 9814 | Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI, | |||
| 9815 | BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, | |||
| 9816 | AssumptionCache *AC, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT, | |||
| 9817 | LoopVectorizationLegality &LVL) { | |||
| 9818 | // 1) OptSize takes precedence over all other options, i.e. if this is set, | |||
| 9819 | // don't look at hints or options, and don't request a scalar epilogue. | |||
| 9820 | // (For PGSO, as shouldOptimizeForSize isn't currently accessible from | |||
| 9821 | // LoopAccessInfo (due to code dependency and not being able to reliably get | |||
| 9822 | // PSI/BFI from a loop analysis under NPM), we cannot suppress the collection | |||
| 9823 | // of strides in LoopAccessInfo::analyzeLoop() and vectorize without | |||
| 9824 | // versioning when the vectorization is forced, unlike hasOptSize. So revert | |||
| 9825 | // back to the old way and vectorize with versioning when forced. See D81345.) | |||
| 9826 | if (F->hasOptSize() || (llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI, | |||
| 9827 | PGSOQueryType::IRPass) && | |||
| 9828 | Hints.getForce() != LoopVectorizeHints::FK_Enabled)) | |||
| 9829 | return CM_ScalarEpilogueNotAllowedOptSize; | |||
| 9830 | ||||
| 9831 | // 2) If set, obey the directives | |||
| 9832 | if (PreferPredicateOverEpilogue.getNumOccurrences()) { | |||
| 9833 | switch (PreferPredicateOverEpilogue) { | |||
| 9834 | case PreferPredicateTy::ScalarEpilogue: | |||
| 9835 | return CM_ScalarEpilogueAllowed; | |||
| 9836 | case PreferPredicateTy::PredicateElseScalarEpilogue: | |||
| 9837 | return CM_ScalarEpilogueNotNeededUsePredicate; | |||
| 9838 | case PreferPredicateTy::PredicateOrDontVectorize: | |||
| 9839 | return CM_ScalarEpilogueNotAllowedUsePredicate; | |||
| 9840 | }; | |||
| 9841 | } | |||
| 9842 | ||||
| 9843 | // 3) If set, obey the hints | |||
| 9844 | switch (Hints.getPredicate()) { | |||
| 9845 | case LoopVectorizeHints::FK_Enabled: | |||
| 9846 | return CM_ScalarEpilogueNotNeededUsePredicate; | |||
| 9847 | case LoopVectorizeHints::FK_Disabled: | |||
| 9848 | return CM_ScalarEpilogueAllowed; | |||
| 9849 | }; | |||
| 9850 | ||||
| 9851 | // 4) if the TTI hook indicates this is profitable, request predication. | |||
| 9852 | if (TTI->preferPredicateOverEpilogue(L, LI, *SE, *AC, TLI, DT, | |||
| 9853 | LVL.getLAI())) | |||
| 9854 | return CM_ScalarEpilogueNotNeededUsePredicate; | |||
| 9855 | ||||
| 9856 | return CM_ScalarEpilogueAllowed; | |||
| 9857 | } | |||
| 9858 | ||||
| 9859 | Value *VPTransformState::get(VPValue *Def, unsigned Part) { | |||
| 9860 | // If Values have been set for this Def return the one relevant for \p Part. | |||
| 9861 | if (hasVectorValue(Def, Part)) | |||
| 9862 | return Data.PerPartOutput[Def][Part]; | |||
| 9863 | ||||
| 9864 | if (!hasScalarValue(Def, {Part, 0})) { | |||
| 9865 | Value *IRV = Def->getLiveInIRValue(); | |||
| 9866 | Value *B = ILV->getBroadcastInstrs(IRV); | |||
| 9867 | set(Def, B, Part); | |||
| 9868 | return B; | |||
| 9869 | } | |||
| 9870 | ||||
| 9871 | Value *ScalarValue = get(Def, {Part, 0}); | |||
| 9872 | // If we aren't vectorizing, we can just copy the scalar map values over | |||
| 9873 | // to the vector map. | |||
| 9874 | if (VF.isScalar()) { | |||
| 9875 | set(Def, ScalarValue, Part); | |||
| 9876 | return ScalarValue; | |||
| 9877 | } | |||
| 9878 | ||||
| 9879 | auto *RepR = dyn_cast<VPReplicateRecipe>(Def); | |||
| 9880 | bool IsUniform = RepR && RepR->isUniform(); | |||
| 9881 | ||||
| 9882 | unsigned LastLane = IsUniform ? 0 : VF.getKnownMinValue() - 1; | |||
| 9883 | // Check if there is a scalar value for the selected lane. | |||
| 9884 | if (!hasScalarValue(Def, {Part, LastLane})) { | |||
| 9885 | // At the moment, VPWidenIntOrFpInductionRecipes can also be uniform. | |||
| 9886 | assert(isa<VPWidenIntOrFpInductionRecipe>(Def->getDef()) &&((void)0) | |||
| 9887 | "unexpected recipe found to be invariant")((void)0); | |||
| 9888 | IsUniform = true; | |||
| 9889 | LastLane = 0; | |||
| 9890 | } | |||
| 9891 | ||||
| 9892 | auto *LastInst = cast<Instruction>(get(Def, {Part, LastLane})); | |||
| 9893 | // Set the insert point after the last scalarized instruction or after the | |||
| 9894 | // last PHI, if LastInst is a PHI. This ensures the insertelement sequence | |||
| 9895 | // will directly follow the scalar definitions. | |||
| 9896 | auto OldIP = Builder.saveIP(); | |||
| 9897 | auto NewIP = | |||
| 9898 | isa<PHINode>(LastInst) | |||
| 9899 | ? BasicBlock::iterator(LastInst->getParent()->getFirstNonPHI()) | |||
| 9900 | : std::next(BasicBlock::iterator(LastInst)); | |||
| 9901 | Builder.SetInsertPoint(&*NewIP); | |||
| 9902 | ||||
| 9903 | // However, if we are vectorizing, we need to construct the vector values. | |||
| 9904 | // If the value is known to be uniform after vectorization, we can just | |||
| 9905 | // broadcast the scalar value corresponding to lane zero for each unroll | |||
| 9906 | // iteration. Otherwise, we construct the vector values using | |||
| 9907 | // insertelement instructions. Since the resulting vectors are stored in | |||
| 9908 | // State, we will only generate the insertelements once. | |||
| 9909 | Value *VectorValue = nullptr; | |||
| 9910 | if (IsUniform) { | |||
| 9911 | VectorValue = ILV->getBroadcastInstrs(ScalarValue); | |||
| 9912 | set(Def, VectorValue, Part); | |||
| 9913 | } else { | |||
| 9914 | // Initialize packing with insertelements to start from undef. | |||
| 9915 | assert(!VF.isScalable() && "VF is assumed to be non scalable.")((void)0); | |||
| 9916 | Value *Undef = PoisonValue::get(VectorType::get(LastInst->getType(), VF)); | |||
| 9917 | set(Def, Undef, Part); | |||
| 9918 | for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane) | |||
| 9919 | ILV->packScalarIntoVectorValue(Def, {Part, Lane}, *this); | |||
| 9920 | VectorValue = get(Def, Part); | |||
| 9921 | } | |||
| 9922 | Builder.restoreIP(OldIP); | |||
| 9923 | return VectorValue; | |||
| 9924 | } | |||
| 9925 | ||||
| 9926 | // Process the loop in the VPlan-native vectorization path. This path builds | |||
| 9927 | // VPlan upfront in the vectorization pipeline, which allows to apply | |||
| 9928 | // VPlan-to-VPlan transformations from the very beginning without modifying the | |||
| 9929 | // input LLVM IR. | |||
| 9930 | static bool processLoopInVPlanNativePath( | |||
| 9931 | Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, | |||
| 9932 | LoopVectorizationLegality *LVL, TargetTransformInfo *TTI, | |||
| 9933 | TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, | |||
| 9934 | OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI, | |||
| 9935 | ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints, | |||
| 9936 | LoopVectorizationRequirements &Requirements) { | |||
| 9937 | ||||
| 9938 | if (isa<SCEVCouldNotCompute>(PSE.getBackedgeTakenCount())) { | |||
| 9939 | LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n")do { } while (false); | |||
| 9940 | return false; | |||
| 9941 | } | |||
| 9942 | assert(EnableVPlanNativePath && "VPlan-native path is disabled.")((void)0); | |||
| 9943 | Function *F = L->getHeader()->getParent(); | |||
| 9944 | InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI()); | |||
| 9945 | ||||
| 9946 | ScalarEpilogueLowering SEL = getScalarEpilogueLowering( | |||
| 9947 | F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, *LVL); | |||
| 9948 | ||||
| 9949 | LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F, | |||
| 9950 | &Hints, IAI); | |||
| 9951 | // Use the planner for outer loop vectorization. | |||
| 9952 | // TODO: CM is not used at this point inside the planner. Turn CM into an | |||
| 9953 | // optional argument if we don't need it in the future. | |||
| 9954 | LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM, IAI, PSE, Hints, | |||
| 9955 | Requirements, ORE); | |||
| 9956 | ||||
| 9957 | // Get user vectorization factor. | |||
| 9958 | ElementCount UserVF = Hints.getWidth(); | |||
| 9959 | ||||
| 9960 | CM.collectElementTypesForWidening(); | |||
| 9961 | ||||
| 9962 | // Plan how to best vectorize, return the best VF and its cost. | |||
| 9963 | const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF); | |||
| 9964 | ||||
| 9965 | // If we are stress testing VPlan builds, do not attempt to generate vector | |||
| 9966 | // code. Masked vector code generation support will follow soon. | |||
| 9967 | // Also, do not attempt to vectorize if no vector code will be produced. | |||
| 9968 | if (VPlanBuildStressTest || EnableVPlanPredication || | |||
| 9969 | VectorizationFactor::Disabled() == VF) | |||
| 9970 | return false; | |||
| 9971 | ||||
| 9972 | LVP.setBestPlan(VF.Width, 1); | |||
| 9973 | ||||
| 9974 | { | |||
| 9975 | GeneratedRTChecks Checks(*PSE.getSE(), DT, LI, | |||
| 9976 | F->getParent()->getDataLayout()); | |||
| 9977 | InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL, | |||
| 9978 | &CM, BFI, PSI, Checks); | |||
| 9979 | LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \""do { } while (false) | |||
| 9980 | << L->getHeader()->getParent()->getName() << "\"\n")do { } while (false); | |||
| 9981 | LVP.executePlan(LB, DT); | |||
| 9982 | } | |||
| 9983 | ||||
| 9984 | // Mark the loop as already vectorized to avoid vectorizing again. | |||
| 9985 | Hints.setAlreadyVectorized(); | |||
| 9986 | assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()))((void)0); | |||
| 9987 | return true; | |||
| 9988 | } | |||
| 9989 | ||||
| 9990 | // Emit a remark if there are stores to floats that required a floating point | |||
| 9991 | // extension. If the vectorized loop was generated with floating point there | |||
| 9992 | // will be a performance penalty from the conversion overhead and the change in | |||
| 9993 | // the vector width. | |||
| 9994 | static void checkMixedPrecision(Loop *L, OptimizationRemarkEmitter *ORE) { | |||
| 9995 | SmallVector<Instruction *, 4> Worklist; | |||
| 9996 | for (BasicBlock *BB : L->getBlocks()) { | |||
| 9997 | for (Instruction &Inst : *BB) { | |||
| 9998 | if (auto *S = dyn_cast<StoreInst>(&Inst)) { | |||
| 9999 | if (S->getValueOperand()->getType()->isFloatTy()) | |||
| 10000 | Worklist.push_back(S); | |||
| 10001 | } | |||
| 10002 | } | |||
| 10003 | } | |||
| 10004 | ||||
| 10005 | // Traverse the floating point stores upwards searching, for floating point | |||
| 10006 | // conversions. | |||
| 10007 | SmallPtrSet<const Instruction *, 4> Visited; | |||
| 10008 | SmallPtrSet<const Instruction *, 4> EmittedRemark; | |||
| 10009 | while (!Worklist.empty()) { | |||
| 10010 | auto *I = Worklist.pop_back_val(); | |||
| 10011 | if (!L->contains(I)) | |||
| 10012 | continue; | |||
| 10013 | if (!Visited.insert(I).second) | |||
| 10014 | continue; | |||
| 10015 | ||||
| 10016 | // Emit a remark if the floating point store required a floating | |||
| 10017 | // point conversion. | |||
| 10018 | // TODO: More work could be done to identify the root cause such as a | |||
| 10019 | // constant or a function return type and point the user to it. | |||
| 10020 | if (isa<FPExtInst>(I) && EmittedRemark.insert(I).second) | |||
| 10021 | ORE->emit([&]() { | |||
| 10022 | return OptimizationRemarkAnalysis(LV_NAME"loop-vectorize", "VectorMixedPrecision", | |||
| 10023 | I->getDebugLoc(), L->getHeader()) | |||
| 10024 | << "floating point conversion changes vector width. " | |||
| 10025 | << "Mixed floating point precision requires an up/down " | |||
| 10026 | << "cast that will negatively impact performance."; | |||
| 10027 | }); | |||
| 10028 | ||||
| 10029 | for (Use &Op : I->operands()) | |||
| 10030 | if (auto *OpI = dyn_cast<Instruction>(Op)) | |||
| 10031 | Worklist.push_back(OpI); | |||
| 10032 | } | |||
| 10033 | } | |||
| 10034 | ||||
| 10035 | LoopVectorizePass::LoopVectorizePass(LoopVectorizeOptions Opts) | |||
| 10036 | : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced || | |||
| 10037 | !EnableLoopInterleaving), | |||
| 10038 | VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced || | |||
| 10039 | !EnableLoopVectorization) {} | |||
| 10040 | ||||
| 10041 | bool LoopVectorizePass::processLoop(Loop *L) { | |||
| 10042 | assert((EnableVPlanNativePath || L->isInnermost()) &&((void)0) | |||
| 10043 | "VPlan-native path is not enabled. Only process inner loops.")((void)0); | |||
| 10044 | ||||
| 10045 | #ifndef NDEBUG1 | |||
| 10046 | const std::string DebugLocStr = getDebugLocString(L); | |||
| 10047 | #endif /* NDEBUG */ | |||
| 10048 | ||||
| 10049 | LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in \""do { } while (false) | |||
| 10050 | << L->getHeader()->getParent()->getName() << "\" from "do { } while (false) | |||
| 10051 | << DebugLocStr << "\n")do { } while (false); | |||
| 10052 | ||||
| 10053 | LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE); | |||
| 10054 | ||||
| 10055 | LLVM_DEBUG(do { } while (false) | |||
| 10056 | dbgs() << "LV: Loop hints:"do { } while (false) | |||
| 10057 | << " force="do { } while (false) | |||
| 10058 | << (Hints.getForce() == LoopVectorizeHints::FK_Disableddo { } while (false) | |||
| 10059 | ? "disabled"do { } while (false) | |||
| 10060 | : (Hints.getForce() == LoopVectorizeHints::FK_Enableddo { } while (false) | |||
| 10061 | ? "enabled"do { } while (false) | |||
| 10062 | : "?"))do { } while (false) | |||
| 10063 | << " width=" << Hints.getWidth()do { } while (false) | |||
| 10064 | << " interleave=" << Hints.getInterleave() << "\n")do { } while (false); | |||
| 10065 | ||||
| 10066 | // Function containing loop | |||
| 10067 | Function *F = L->getHeader()->getParent(); | |||
| 10068 | ||||
| 10069 | // Looking at the diagnostic output is the only way to determine if a loop | |||
| 10070 | // was vectorized (other than looking at the IR or machine code), so it | |||
| 10071 | // is important to generate an optimization remark for each loop. Most of | |||
| 10072 | // these messages are generated as OptimizationRemarkAnalysis. Remarks | |||
| 10073 | // generated as OptimizationRemark and OptimizationRemarkMissed are | |||
| 10074 | // less verbose reporting vectorized loops and unvectorized loops that may | |||
| 10075 | // benefit from vectorization, respectively. | |||
| 10076 | ||||
| 10077 | if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) { | |||
| 10078 | LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n")do { } while (false); | |||
| 10079 | return false; | |||
| 10080 | } | |||
| 10081 | ||||
| 10082 | PredicatedScalarEvolution PSE(*SE, *L); | |||
| 10083 | ||||
| 10084 | // Check if it is legal to vectorize the loop. | |||
| 10085 | LoopVectorizationRequirements Requirements; | |||
| 10086 | LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE, | |||
| 10087 | &Requirements, &Hints, DB, AC, BFI, PSI); | |||
| 10088 | if (!LVL.canVectorize(EnableVPlanNativePath)) { | |||
| 10089 | LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n")do { } while (false); | |||
| 10090 | Hints.emitRemarkWithHints(); | |||
| 10091 | return false; | |||
| 10092 | } | |||
| 10093 | ||||
| 10094 | // Check the function attributes and profiles to find out if this function | |||
| 10095 | // should be optimized for size. | |||
| 10096 | ScalarEpilogueLowering SEL = getScalarEpilogueLowering( | |||
| 10097 | F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, LVL); | |||
| 10098 | ||||
| 10099 | // Entrance to the VPlan-native vectorization path. Outer loops are processed | |||
| 10100 | // here. They may require CFG and instruction level transformations before | |||
| 10101 | // even evaluating whether vectorization is profitable. Since we cannot modify | |||
| 10102 | // the incoming IR, we need to build VPlan upfront in the vectorization | |||
| 10103 | // pipeline. | |||
| 10104 | if (!L->isInnermost()) | |||
| 10105 | return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC, | |||
| 10106 | ORE, BFI, PSI, Hints, Requirements); | |||
| 10107 | ||||
| 10108 | assert(L->isInnermost() && "Inner loop expected.")((void)0); | |||
| 10109 | ||||
| 10110 | // Check the loop for a trip count threshold: vectorize loops with a tiny trip | |||
| 10111 | // count by optimizing for size, to minimize overheads. | |||
| 10112 | auto ExpectedTC = getSmallBestKnownTC(*SE, L); | |||
| 10113 | if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) { | |||
| 10114 | LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. "do { } while (false) | |||
| 10115 | << "This loop is worth vectorizing only if no scalar "do { } while (false) | |||
| 10116 | << "iteration overheads are incurred.")do { } while (false); | |||
| 10117 | if (Hints.getForce() == LoopVectorizeHints::FK_Enabled) | |||
| 10118 | LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n")do { } while (false); | |||
| 10119 | else { | |||
| 10120 | LLVM_DEBUG(dbgs() << "\n")do { } while (false); | |||
| 10121 | SEL = CM_ScalarEpilogueNotAllowedLowTripLoop; | |||
| 10122 | } | |||
| 10123 | } | |||
| 10124 | ||||
| 10125 | // Check the function attributes to see if implicit floats are allowed. | |||
| 10126 | // FIXME: This check doesn't seem possibly correct -- what if the loop is | |||
| 10127 | // an integer loop and the vector instructions selected are purely integer | |||
| 10128 | // vector instructions? | |||
| 10129 | if (F->hasFnAttribute(Attribute::NoImplicitFloat)) { | |||
| 10130 | reportVectorizationFailure( | |||
| 10131 | "Can't vectorize when the NoImplicitFloat attribute is used", | |||
| 10132 | "loop not vectorized due to NoImplicitFloat attribute", | |||
| 10133 | "NoImplicitFloat", ORE, L); | |||
| 10134 | Hints.emitRemarkWithHints(); | |||
| 10135 | return false; | |||
| 10136 | } | |||
| 10137 | ||||
| 10138 | // Check if the target supports potentially unsafe FP vectorization. | |||
| 10139 | // FIXME: Add a check for the type of safety issue (denormal, signaling) | |||
| 10140 | // for the target we're vectorizing for, to make sure none of the | |||
| 10141 | // additional fp-math flags can help. | |||
| 10142 | if (Hints.isPotentiallyUnsafe() && | |||
| 10143 | TTI->isFPVectorizationPotentiallyUnsafe()) { | |||
| 10144 | reportVectorizationFailure( | |||
| 10145 | "Potentially unsafe FP op prevents vectorization", | |||
| 10146 | "loop not vectorized due to unsafe FP support.", | |||
| 10147 | "UnsafeFP", ORE, L); | |||
| 10148 | Hints.emitRemarkWithHints(); | |||
| 10149 | return false; | |||
| 10150 | } | |||
| 10151 | ||||
| 10152 | if (!LVL.canVectorizeFPMath(EnableStrictReductions)) { | |||
| 10153 | ORE->emit([&]() { | |||
| 10154 | auto *ExactFPMathInst = Requirements.getExactFPInst(); | |||
| 10155 | return OptimizationRemarkAnalysisFPCommute(DEBUG_TYPE"loop-vectorize", "CantReorderFPOps", | |||
| 10156 | ExactFPMathInst->getDebugLoc(), | |||
| 10157 | ExactFPMathInst->getParent()) | |||
| 10158 | << "loop not vectorized: cannot prove it is safe to reorder " | |||
| 10159 | "floating-point operations"; | |||
| 10160 | }); | |||
| 10161 | LLVM_DEBUG(dbgs() << "LV: loop not vectorized: cannot prove it is safe to "do { } while (false) | |||
| 10162 | "reorder floating-point operations\n")do { } while (false); | |||
| 10163 | Hints.emitRemarkWithHints(); | |||
| 10164 | return false; | |||
| 10165 | } | |||
| 10166 | ||||
| 10167 | bool UseInterleaved = TTI->enableInterleavedAccessVectorization(); | |||
| 10168 | InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI()); | |||
| 10169 | ||||
| 10170 | // If an override option has been passed in for interleaved accesses, use it. | |||
| 10171 | if (EnableInterleavedMemAccesses.getNumOccurrences() > 0) | |||
| 10172 | UseInterleaved = EnableInterleavedMemAccesses; | |||
| 10173 | ||||
| 10174 | // Analyze interleaved memory accesses. | |||
| 10175 | if (UseInterleaved) { | |||
| 10176 | IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI)); | |||
| 10177 | } | |||
| 10178 | ||||
| 10179 | // Use the cost model. | |||
| 10180 | LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE, | |||
| 10181 | F, &Hints, IAI); | |||
| 10182 | CM.collectValuesToIgnore(); | |||
| 10183 | CM.collectElementTypesForWidening(); | |||
| 10184 | ||||
| 10185 | // Use the planner for vectorization. | |||
| 10186 | LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM, IAI, PSE, Hints, | |||
| 10187 | Requirements, ORE); | |||
| 10188 | ||||
| 10189 | // Get user vectorization factor and interleave count. | |||
| 10190 | ElementCount UserVF = Hints.getWidth(); | |||
| 10191 | unsigned UserIC = Hints.getInterleave(); | |||
| 10192 | ||||
| 10193 | // Plan how to best vectorize, return the best VF and its cost. | |||
| 10194 | Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF, UserIC); | |||
| 10195 | ||||
| 10196 | VectorizationFactor VF = VectorizationFactor::Disabled(); | |||
| 10197 | unsigned IC = 1; | |||
| 10198 | ||||
| 10199 | if (MaybeVF) { | |||
| 10200 | VF = *MaybeVF; | |||
| 10201 | // Select the interleave count. | |||
| 10202 | IC = CM.selectInterleaveCount(VF.Width, *VF.Cost.getValue()); | |||
| 10203 | } | |||
| 10204 | ||||
| 10205 | // Identify the diagnostic messages that should be produced. | |||
| 10206 | std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg; | |||
| 10207 | bool VectorizeLoop = true, InterleaveLoop = true; | |||
| 10208 | if (VF.Width.isScalar()) { | |||
| 10209 | LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n")do { } while (false); | |||
| 10210 | VecDiagMsg = std::make_pair( | |||
| 10211 | "VectorizationNotBeneficial", | |||
| 10212 | "the cost-model indicates that vectorization is not beneficial"); | |||
| 10213 | VectorizeLoop = false; | |||
| 10214 | } | |||
| 10215 | ||||
| 10216 | if (!MaybeVF && UserIC > 1) { | |||
| 10217 | // Tell the user interleaving was avoided up-front, despite being explicitly | |||
| 10218 | // requested. | |||
| 10219 | LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and "do { } while (false) | |||
| 10220 | "interleaving should be avoided up front\n")do { } while (false); | |||
| 10221 | IntDiagMsg = std::make_pair( | |||
| 10222 | "InterleavingAvoided", | |||
| 10223 | "Ignoring UserIC, because interleaving was avoided up front"); | |||
| 10224 | InterleaveLoop = false; | |||
| 10225 | } else if (IC == 1 && UserIC <= 1) { | |||
| 10226 | // Tell the user interleaving is not beneficial. | |||
| 10227 | LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n")do { } while (false); | |||
| 10228 | IntDiagMsg = std::make_pair( | |||
| 10229 | "InterleavingNotBeneficial", | |||
| 10230 | "the cost-model indicates that interleaving is not beneficial"); | |||
| 10231 | InterleaveLoop = false; | |||
| 10232 | if (UserIC == 1) { | |||
| 10233 | IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled"; | |||
| 10234 | IntDiagMsg.second += | |||
| 10235 | " and is explicitly disabled or interleave count is set to 1"; | |||
| 10236 | } | |||
| 10237 | } else if (IC > 1 && UserIC == 1) { | |||
| 10238 | // Tell the user interleaving is beneficial, but it explicitly disabled. | |||
| 10239 | LLVM_DEBUG(do { } while (false) | |||
| 10240 | dbgs() << "LV: Interleaving is beneficial but is explicitly disabled.")do { } while (false); | |||
| 10241 | IntDiagMsg = std::make_pair( | |||
| 10242 | "InterleavingBeneficialButDisabled", | |||
| 10243 | "the cost-model indicates that interleaving is beneficial " | |||
| 10244 | "but is explicitly disabled or interleave count is set to 1"); | |||
| 10245 | InterleaveLoop = false; | |||
| 10246 | } | |||
| 10247 | ||||
| 10248 | // Override IC if user provided an interleave count. | |||
| 10249 | IC = UserIC > 0 ? UserIC : IC; | |||
| 10250 | ||||
| 10251 | // Emit diagnostic messages, if any. | |||
| 10252 | const char *VAPassName = Hints.vectorizeAnalysisPassName(); | |||
| 10253 | if (!VectorizeLoop && !InterleaveLoop) { | |||
| 10254 | // Do not vectorize or interleaving the loop. | |||
| 10255 | ORE->emit([&]() { | |||
| 10256 | return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first, | |||
| 10257 | L->getStartLoc(), L->getHeader()) | |||
| 10258 | << VecDiagMsg.second; | |||
| 10259 | }); | |||
| 10260 | ORE->emit([&]() { | |||
| 10261 | return OptimizationRemarkMissed(LV_NAME"loop-vectorize", IntDiagMsg.first, | |||
| 10262 | L->getStartLoc(), L->getHeader()) | |||
| 10263 | << IntDiagMsg.second; | |||
| 10264 | }); | |||
| 10265 | return false; | |||
| 10266 | } else if (!VectorizeLoop && InterleaveLoop) { | |||
| 10267 | LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n')do { } while (false); | |||
| 10268 | ORE->emit([&]() { | |||
| 10269 | return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first, | |||
| 10270 | L->getStartLoc(), L->getHeader()) | |||
| 10271 | << VecDiagMsg.second; | |||
| 10272 | }); | |||
| 10273 | } else if (VectorizeLoop && !InterleaveLoop) { | |||
| 10274 | LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Widthdo { } while (false) | |||
| 10275 | << ") in " << DebugLocStr << '\n')do { } while (false); | |||
| 10276 | ORE->emit([&]() { | |||
| 10277 | return OptimizationRemarkAnalysis(LV_NAME"loop-vectorize", IntDiagMsg.first, | |||
| 10278 | L->getStartLoc(), L->getHeader()) | |||
| 10279 | << IntDiagMsg.second; | |||
| 10280 | }); | |||
| 10281 | } else if (VectorizeLoop && InterleaveLoop) { | |||
| 10282 | LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Widthdo { } while (false) | |||
| 10283 | << ") in " << DebugLocStr << '\n')do { } while (false); | |||
| 10284 | LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n')do { } while (false); | |||
| 10285 | } | |||
| 10286 | ||||
| 10287 | bool DisableRuntimeUnroll = false; | |||
| 10288 | MDNode *OrigLoopID = L->getLoopID(); | |||
| 10289 | { | |||
| 10290 | // Optimistically generate runtime checks. Drop them if they turn out to not | |||
| 10291 | // be profitable. Limit the scope of Checks, so the cleanup happens | |||
| 10292 | // immediately after vector codegeneration is done. | |||
| 10293 | GeneratedRTChecks Checks(*PSE.getSE(), DT, LI, | |||
| 10294 | F->getParent()->getDataLayout()); | |||
| 10295 | if (!VF.Width.isScalar() || IC > 1) | |||
| 10296 | Checks.Create(L, *LVL.getLAI(), PSE.getUnionPredicate()); | |||
| 10297 | LVP.setBestPlan(VF.Width, IC); | |||
| 10298 | ||||
| 10299 | using namespace ore; | |||
| 10300 | if (!VectorizeLoop) { | |||
| 10301 | assert(IC > 1 && "interleave count should not be 1 or 0")((void)0); | |||
| 10302 | // If we decided that it is not legal to vectorize the loop, then | |||
| 10303 | // interleave it. | |||
| 10304 | InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL, | |||
| 10305 | &CM, BFI, PSI, Checks); | |||
| 10306 | LVP.executePlan(Unroller, DT); | |||
| 10307 | ||||
| 10308 | ORE->emit([&]() { | |||
| 10309 | return OptimizationRemark(LV_NAME"loop-vectorize", "Interleaved", L->getStartLoc(), | |||
| 10310 | L->getHeader()) | |||
| 10311 | << "interleaved loop (interleaved count: " | |||
| 10312 | << NV("InterleaveCount", IC) << ")"; | |||
| 10313 | }); | |||
| 10314 | } else { | |||
| 10315 | // If we decided that it is *legal* to vectorize the loop, then do it. | |||
| 10316 | ||||
| 10317 | // Consider vectorizing the epilogue too if it's profitable. | |||
| 10318 | VectorizationFactor EpilogueVF = | |||
| 10319 | CM.selectEpilogueVectorizationFactor(VF.Width, LVP); | |||
| 10320 | if (EpilogueVF.Width.isVector()) { | |||
| 10321 | ||||
| 10322 | // The first pass vectorizes the main loop and creates a scalar epilogue | |||
| 10323 | // to be vectorized by executing the plan (potentially with a different | |||
| 10324 | // factor) again shortly afterwards. | |||
| 10325 | EpilogueLoopVectorizationInfo EPI(VF.Width.getKnownMinValue(), IC, | |||
| 10326 | EpilogueVF.Width.getKnownMinValue(), | |||
| 10327 | 1); | |||
| 10328 | EpilogueVectorizerMainLoop MainILV(L, PSE, LI, DT, TLI, TTI, AC, ORE, | |||
| 10329 | EPI, &LVL, &CM, BFI, PSI, Checks); | |||
| 10330 | ||||
| 10331 | LVP.setBestPlan(EPI.MainLoopVF, EPI.MainLoopUF); | |||
| 10332 | LVP.executePlan(MainILV, DT); | |||
| 10333 | ++LoopsVectorized; | |||
| 10334 | ||||
| 10335 | simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */); | |||
| 10336 | formLCSSARecursively(*L, *DT, LI, SE); | |||
| 10337 | ||||
| 10338 | // Second pass vectorizes the epilogue and adjusts the control flow | |||
| 10339 | // edges from the first pass. | |||
| 10340 | LVP.setBestPlan(EPI.EpilogueVF, EPI.EpilogueUF); | |||
| 10341 | EPI.MainLoopVF = EPI.EpilogueVF; | |||
| 10342 | EPI.MainLoopUF = EPI.EpilogueUF; | |||
| 10343 | EpilogueVectorizerEpilogueLoop EpilogILV(L, PSE, LI, DT, TLI, TTI, AC, | |||
| 10344 | ORE, EPI, &LVL, &CM, BFI, PSI, | |||
| 10345 | Checks); | |||
| 10346 | LVP.executePlan(EpilogILV, DT); | |||
| 10347 | ++LoopsEpilogueVectorized; | |||
| 10348 | ||||
| 10349 | if (!MainILV.areSafetyChecksAdded()) | |||
| 10350 | DisableRuntimeUnroll = true; | |||
| 10351 | } else { | |||
| 10352 | InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC, | |||
| 10353 | &LVL, &CM, BFI, PSI, Checks); | |||
| 10354 | LVP.executePlan(LB, DT); | |||
| 10355 | ++LoopsVectorized; | |||
| 10356 | ||||
| 10357 | // Add metadata to disable runtime unrolling a scalar loop when there | |||
| 10358 | // are no runtime checks about strides and memory. A scalar loop that is | |||
| 10359 | // rarely used is not worth unrolling. | |||
| 10360 | if (!LB.areSafetyChecksAdded()) | |||
| 10361 | DisableRuntimeUnroll = true; | |||
| 10362 | } | |||
| 10363 | // Report the vectorization decision. | |||
| 10364 | ORE->emit([&]() { | |||
| 10365 | return OptimizationRemark(LV_NAME"loop-vectorize", "Vectorized", L->getStartLoc(), | |||
| 10366 | L->getHeader()) | |||
| 10367 | << "vectorized loop (vectorization width: " | |||
| 10368 | << NV("VectorizationFactor", VF.Width) | |||
| 10369 | << ", interleaved count: " << NV("InterleaveCount", IC) << ")"; | |||
| 10370 | }); | |||
| 10371 | } | |||
| 10372 | ||||
| 10373 | if (ORE->allowExtraAnalysis(LV_NAME"loop-vectorize")) | |||
| 10374 | checkMixedPrecision(L, ORE); | |||
| 10375 | } | |||
| 10376 | ||||
| 10377 | Optional<MDNode *> RemainderLoopID = | |||
| 10378 | makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, | |||
| 10379 | LLVMLoopVectorizeFollowupEpilogue}); | |||
| 10380 | if (RemainderLoopID.hasValue()) { | |||
| 10381 | L->setLoopID(RemainderLoopID.getValue()); | |||
| 10382 | } else { | |||
| 10383 | if (DisableRuntimeUnroll) | |||
| 10384 | AddRuntimeUnrollDisableMetaData(L); | |||
| 10385 | ||||
| 10386 | // Mark the loop as already vectorized to avoid vectorizing again. | |||
| 10387 | Hints.setAlreadyVectorized(); | |||
| 10388 | } | |||
| 10389 | ||||
| 10390 | assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()))((void)0); | |||
| 10391 | return true; | |||
| 10392 | } | |||
| 10393 | ||||
| 10394 | LoopVectorizeResult LoopVectorizePass::runImpl( | |||
| 10395 | Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_, | |||
| 10396 | DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_, | |||
| 10397 | DemandedBits &DB_, AAResults &AA_, AssumptionCache &AC_, | |||
| 10398 | std::function<const LoopAccessInfo &(Loop &)> &GetLAA_, | |||
| 10399 | OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) { | |||
| 10400 | SE = &SE_; | |||
| 10401 | LI = &LI_; | |||
| 10402 | TTI = &TTI_; | |||
| 10403 | DT = &DT_; | |||
| 10404 | BFI = &BFI_; | |||
| 10405 | TLI = TLI_; | |||
| 10406 | AA = &AA_; | |||
| 10407 | AC = &AC_; | |||
| 10408 | GetLAA = &GetLAA_; | |||
| 10409 | DB = &DB_; | |||
| 10410 | ORE = &ORE_; | |||
| 10411 | PSI = PSI_; | |||
| 10412 | ||||
| 10413 | // Don't attempt if | |||
| 10414 | // 1. the target claims to have no vector registers, and | |||
| 10415 | // 2. interleaving won't help ILP. | |||
| 10416 | // | |||
| 10417 | // The second condition is necessary because, even if the target has no | |||
| 10418 | // vector registers, loop vectorization may still enable scalar | |||
| 10419 | // interleaving. | |||
| 10420 | if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) && | |||
| 10421 | TTI->getMaxInterleaveFactor(1) < 2) | |||
| 10422 | return LoopVectorizeResult(false, false); | |||
| 10423 | ||||
| 10424 | bool Changed = false, CFGChanged = false; | |||
| 10425 | ||||
| 10426 | // The vectorizer requires loops to be in simplified form. | |||
| 10427 | // Since simplification may add new inner loops, it has to run before the | |||
| 10428 | // legality and profitability checks. This means running the loop vectorizer | |||
| 10429 | // will simplify all loops, regardless of whether anything end up being | |||
| 10430 | // vectorized. | |||
| 10431 | for (auto &L : *LI) | |||
| 10432 | Changed |= CFGChanged |= | |||
| 10433 | simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */); | |||
| 10434 | ||||
| 10435 | // Build up a worklist of inner-loops to vectorize. This is necessary as | |||
| 10436 | // the act of vectorizing or partially unrolling a loop creates new loops | |||
| 10437 | // and can invalidate iterators across the loops. | |||
| 10438 | SmallVector<Loop *, 8> Worklist; | |||
| 10439 | ||||
| 10440 | for (Loop *L : *LI) | |||
| 10441 | collectSupportedLoops(*L, LI, ORE, Worklist); | |||
| 10442 | ||||
| 10443 | LoopsAnalyzed += Worklist.size(); | |||
| 10444 | ||||
| 10445 | // Now walk the identified inner loops. | |||
| 10446 | while (!Worklist.empty()) { | |||
| 10447 | Loop *L = Worklist.pop_back_val(); | |||
| 10448 | ||||
| 10449 | // For the inner loops we actually process, form LCSSA to simplify the | |||
| 10450 | // transform. | |||
| 10451 | Changed |= formLCSSARecursively(*L, *DT, LI, SE); | |||
| 10452 | ||||
| 10453 | Changed |= CFGChanged |= processLoop(L); | |||
| 10454 | } | |||
| 10455 | ||||
| 10456 | // Process each loop nest in the function. | |||
| 10457 | return LoopVectorizeResult(Changed, CFGChanged); | |||
| 10458 | } | |||
| 10459 | ||||
| 10460 | PreservedAnalyses LoopVectorizePass::run(Function &F, | |||
| 10461 | FunctionAnalysisManager &AM) { | |||
| 10462 | auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F); | |||
| 10463 | auto &LI = AM.getResult<LoopAnalysis>(F); | |||
| 10464 | auto &TTI = AM.getResult<TargetIRAnalysis>(F); | |||
| 10465 | auto &DT = AM.getResult<DominatorTreeAnalysis>(F); | |||
| 10466 | auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F); | |||
| 10467 | auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); | |||
| 10468 | auto &AA = AM.getResult<AAManager>(F); | |||
| 10469 | auto &AC = AM.getResult<AssumptionAnalysis>(F); | |||
| 10470 | auto &DB = AM.getResult<DemandedBitsAnalysis>(F); | |||
| 10471 | auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F); | |||
| 10472 | MemorySSA *MSSA = EnableMSSALoopDependency | |||
| 10473 | ? &AM.getResult<MemorySSAAnalysis>(F).getMSSA() | |||
| 10474 | : nullptr; | |||
| 10475 | ||||
| 10476 | auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager(); | |||
| 10477 | std::function<const LoopAccessInfo &(Loop &)> GetLAA = | |||
| 10478 | [&](Loop &L) -> const LoopAccessInfo & { | |||
| 10479 | LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, | |||
| 10480 | TLI, TTI, nullptr, MSSA}; | |||
| 10481 | return LAM.getResult<LoopAccessAnalysis>(L, AR); | |||
| 10482 | }; | |||
| 10483 | auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F); | |||
| 10484 | ProfileSummaryInfo *PSI = | |||
| 10485 | MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent()); | |||
| 10486 | LoopVectorizeResult Result = | |||
| 10487 | runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI); | |||
| 10488 | if (!Result.MadeAnyChange) | |||
| 10489 | return PreservedAnalyses::all(); | |||
| 10490 | PreservedAnalyses PA; | |||
| 10491 | ||||
| 10492 | // We currently do not preserve loopinfo/dominator analyses with outer loop | |||
| 10493 | // vectorization. Until this is addressed, mark these analyses as preserved | |||
| 10494 | // only for non-VPlan-native path. | |||
| 10495 | // TODO: Preserve Loop and Dominator analyses for VPlan-native path. | |||
| 10496 | if (!EnableVPlanNativePath) { | |||
| 10497 | PA.preserve<LoopAnalysis>(); | |||
| 10498 | PA.preserve<DominatorTreeAnalysis>(); | |||
| 10499 | } | |||
| 10500 | if (!Result.MadeCFGChange) | |||
| 10501 | PA.preserveSet<CFGAnalyses>(); | |||
| 10502 | return PA; | |||
| 10503 | } |