File: | src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/CodeGen/SelectionDAGNodes.h |
Warning: | line 1150, column 10 Called C++ object pointer is null |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===- SelectionDAG.cpp - Implement the SelectionDAG data structures ------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This implements the SelectionDAG class. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #include "llvm/CodeGen/SelectionDAG.h" |
14 | #include "SDNodeDbgValue.h" |
15 | #include "llvm/ADT/APFloat.h" |
16 | #include "llvm/ADT/APInt.h" |
17 | #include "llvm/ADT/APSInt.h" |
18 | #include "llvm/ADT/ArrayRef.h" |
19 | #include "llvm/ADT/BitVector.h" |
20 | #include "llvm/ADT/FoldingSet.h" |
21 | #include "llvm/ADT/None.h" |
22 | #include "llvm/ADT/STLExtras.h" |
23 | #include "llvm/ADT/SmallPtrSet.h" |
24 | #include "llvm/ADT/SmallVector.h" |
25 | #include "llvm/ADT/Triple.h" |
26 | #include "llvm/ADT/Twine.h" |
27 | #include "llvm/Analysis/BlockFrequencyInfo.h" |
28 | #include "llvm/Analysis/MemoryLocation.h" |
29 | #include "llvm/Analysis/ProfileSummaryInfo.h" |
30 | #include "llvm/Analysis/ValueTracking.h" |
31 | #include "llvm/CodeGen/FunctionLoweringInfo.h" |
32 | #include "llvm/CodeGen/ISDOpcodes.h" |
33 | #include "llvm/CodeGen/MachineBasicBlock.h" |
34 | #include "llvm/CodeGen/MachineConstantPool.h" |
35 | #include "llvm/CodeGen/MachineFrameInfo.h" |
36 | #include "llvm/CodeGen/MachineFunction.h" |
37 | #include "llvm/CodeGen/MachineMemOperand.h" |
38 | #include "llvm/CodeGen/RuntimeLibcalls.h" |
39 | #include "llvm/CodeGen/SelectionDAGAddressAnalysis.h" |
40 | #include "llvm/CodeGen/SelectionDAGNodes.h" |
41 | #include "llvm/CodeGen/SelectionDAGTargetInfo.h" |
42 | #include "llvm/CodeGen/TargetFrameLowering.h" |
43 | #include "llvm/CodeGen/TargetLowering.h" |
44 | #include "llvm/CodeGen/TargetRegisterInfo.h" |
45 | #include "llvm/CodeGen/TargetSubtargetInfo.h" |
46 | #include "llvm/CodeGen/ValueTypes.h" |
47 | #include "llvm/IR/Constant.h" |
48 | #include "llvm/IR/Constants.h" |
49 | #include "llvm/IR/DataLayout.h" |
50 | #include "llvm/IR/DebugInfoMetadata.h" |
51 | #include "llvm/IR/DebugLoc.h" |
52 | #include "llvm/IR/DerivedTypes.h" |
53 | #include "llvm/IR/Function.h" |
54 | #include "llvm/IR/GlobalValue.h" |
55 | #include "llvm/IR/Metadata.h" |
56 | #include "llvm/IR/Type.h" |
57 | #include "llvm/IR/Value.h" |
58 | #include "llvm/Support/Casting.h" |
59 | #include "llvm/Support/CodeGen.h" |
60 | #include "llvm/Support/Compiler.h" |
61 | #include "llvm/Support/Debug.h" |
62 | #include "llvm/Support/ErrorHandling.h" |
63 | #include "llvm/Support/KnownBits.h" |
64 | #include "llvm/Support/MachineValueType.h" |
65 | #include "llvm/Support/ManagedStatic.h" |
66 | #include "llvm/Support/MathExtras.h" |
67 | #include "llvm/Support/Mutex.h" |
68 | #include "llvm/Support/raw_ostream.h" |
69 | #include "llvm/Target/TargetMachine.h" |
70 | #include "llvm/Target/TargetOptions.h" |
71 | #include "llvm/Transforms/Utils/SizeOpts.h" |
72 | #include <algorithm> |
73 | #include <cassert> |
74 | #include <cstdint> |
75 | #include <cstdlib> |
76 | #include <limits> |
77 | #include <set> |
78 | #include <string> |
79 | #include <utility> |
80 | #include <vector> |
81 | |
82 | using namespace llvm; |
83 | |
84 | /// makeVTList - Return an instance of the SDVTList struct initialized with the |
85 | /// specified members. |
86 | static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs) { |
87 | SDVTList Res = {VTs, NumVTs}; |
88 | return Res; |
89 | } |
90 | |
91 | // Default null implementations of the callbacks. |
92 | void SelectionDAG::DAGUpdateListener::NodeDeleted(SDNode*, SDNode*) {} |
93 | void SelectionDAG::DAGUpdateListener::NodeUpdated(SDNode*) {} |
94 | void SelectionDAG::DAGUpdateListener::NodeInserted(SDNode *) {} |
95 | |
96 | void SelectionDAG::DAGNodeDeletedListener::anchor() {} |
97 | |
98 | #define DEBUG_TYPE"selectiondag" "selectiondag" |
99 | |
100 | static cl::opt<bool> EnableMemCpyDAGOpt("enable-memcpy-dag-opt", |
101 | cl::Hidden, cl::init(true), |
102 | cl::desc("Gang up loads and stores generated by inlining of memcpy")); |
103 | |
104 | static cl::opt<int> MaxLdStGlue("ldstmemcpy-glue-max", |
105 | cl::desc("Number limit for gluing ld/st of memcpy."), |
106 | cl::Hidden, cl::init(0)); |
107 | |
108 | static void NewSDValueDbgMsg(SDValue V, StringRef Msg, SelectionDAG *G) { |
109 | LLVM_DEBUG(dbgs() << Msg; V.getNode()->dump(G);)do { } while (false); |
110 | } |
111 | |
112 | //===----------------------------------------------------------------------===// |
113 | // ConstantFPSDNode Class |
114 | //===----------------------------------------------------------------------===// |
115 | |
116 | /// isExactlyValue - We don't rely on operator== working on double values, as |
117 | /// it returns true for things that are clearly not equal, like -0.0 and 0.0. |
118 | /// As such, this method can be used to do an exact bit-for-bit comparison of |
119 | /// two floating point values. |
120 | bool ConstantFPSDNode::isExactlyValue(const APFloat& V) const { |
121 | return getValueAPF().bitwiseIsEqual(V); |
122 | } |
123 | |
124 | bool ConstantFPSDNode::isValueValidForType(EVT VT, |
125 | const APFloat& Val) { |
126 | assert(VT.isFloatingPoint() && "Can only convert between FP types")((void)0); |
127 | |
128 | // convert modifies in place, so make a copy. |
129 | APFloat Val2 = APFloat(Val); |
130 | bool losesInfo; |
131 | (void) Val2.convert(SelectionDAG::EVTToAPFloatSemantics(VT), |
132 | APFloat::rmNearestTiesToEven, |
133 | &losesInfo); |
134 | return !losesInfo; |
135 | } |
136 | |
137 | //===----------------------------------------------------------------------===// |
138 | // ISD Namespace |
139 | //===----------------------------------------------------------------------===// |
140 | |
141 | bool ISD::isConstantSplatVector(const SDNode *N, APInt &SplatVal) { |
142 | if (N->getOpcode() == ISD::SPLAT_VECTOR) { |
143 | unsigned EltSize = |
144 | N->getValueType(0).getVectorElementType().getSizeInBits(); |
145 | if (auto *Op0 = dyn_cast<ConstantSDNode>(N->getOperand(0))) { |
146 | SplatVal = Op0->getAPIntValue().truncOrSelf(EltSize); |
147 | return true; |
148 | } |
149 | if (auto *Op0 = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) { |
150 | SplatVal = Op0->getValueAPF().bitcastToAPInt().truncOrSelf(EltSize); |
151 | return true; |
152 | } |
153 | } |
154 | |
155 | auto *BV = dyn_cast<BuildVectorSDNode>(N); |
156 | if (!BV) |
157 | return false; |
158 | |
159 | APInt SplatUndef; |
160 | unsigned SplatBitSize; |
161 | bool HasUndefs; |
162 | unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits(); |
163 | return BV->isConstantSplat(SplatVal, SplatUndef, SplatBitSize, HasUndefs, |
164 | EltSize) && |
165 | EltSize == SplatBitSize; |
166 | } |
167 | |
168 | // FIXME: AllOnes and AllZeros duplicate a lot of code. Could these be |
169 | // specializations of the more general isConstantSplatVector()? |
170 | |
171 | bool ISD::isConstantSplatVectorAllOnes(const SDNode *N, bool BuildVectorOnly) { |
172 | // Look through a bit convert. |
173 | while (N->getOpcode() == ISD::BITCAST) |
174 | N = N->getOperand(0).getNode(); |
175 | |
176 | if (!BuildVectorOnly && N->getOpcode() == ISD::SPLAT_VECTOR) { |
177 | APInt SplatVal; |
178 | return isConstantSplatVector(N, SplatVal) && SplatVal.isAllOnesValue(); |
179 | } |
180 | |
181 | if (N->getOpcode() != ISD::BUILD_VECTOR) return false; |
182 | |
183 | unsigned i = 0, e = N->getNumOperands(); |
184 | |
185 | // Skip over all of the undef values. |
186 | while (i != e && N->getOperand(i).isUndef()) |
187 | ++i; |
188 | |
189 | // Do not accept an all-undef vector. |
190 | if (i == e) return false; |
191 | |
192 | // Do not accept build_vectors that aren't all constants or which have non-~0 |
193 | // elements. We have to be a bit careful here, as the type of the constant |
194 | // may not be the same as the type of the vector elements due to type |
195 | // legalization (the elements are promoted to a legal type for the target and |
196 | // a vector of a type may be legal when the base element type is not). |
197 | // We only want to check enough bits to cover the vector elements, because |
198 | // we care if the resultant vector is all ones, not whether the individual |
199 | // constants are. |
200 | SDValue NotZero = N->getOperand(i); |
201 | unsigned EltSize = N->getValueType(0).getScalarSizeInBits(); |
202 | if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(NotZero)) { |
203 | if (CN->getAPIntValue().countTrailingOnes() < EltSize) |
204 | return false; |
205 | } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(NotZero)) { |
206 | if (CFPN->getValueAPF().bitcastToAPInt().countTrailingOnes() < EltSize) |
207 | return false; |
208 | } else |
209 | return false; |
210 | |
211 | // Okay, we have at least one ~0 value, check to see if the rest match or are |
212 | // undefs. Even with the above element type twiddling, this should be OK, as |
213 | // the same type legalization should have applied to all the elements. |
214 | for (++i; i != e; ++i) |
215 | if (N->getOperand(i) != NotZero && !N->getOperand(i).isUndef()) |
216 | return false; |
217 | return true; |
218 | } |
219 | |
220 | bool ISD::isConstantSplatVectorAllZeros(const SDNode *N, bool BuildVectorOnly) { |
221 | // Look through a bit convert. |
222 | while (N->getOpcode() == ISD::BITCAST) |
223 | N = N->getOperand(0).getNode(); |
224 | |
225 | if (!BuildVectorOnly && N->getOpcode() == ISD::SPLAT_VECTOR) { |
226 | APInt SplatVal; |
227 | return isConstantSplatVector(N, SplatVal) && SplatVal.isNullValue(); |
228 | } |
229 | |
230 | if (N->getOpcode() != ISD::BUILD_VECTOR) return false; |
231 | |
232 | bool IsAllUndef = true; |
233 | for (const SDValue &Op : N->op_values()) { |
234 | if (Op.isUndef()) |
235 | continue; |
236 | IsAllUndef = false; |
237 | // Do not accept build_vectors that aren't all constants or which have non-0 |
238 | // elements. We have to be a bit careful here, as the type of the constant |
239 | // may not be the same as the type of the vector elements due to type |
240 | // legalization (the elements are promoted to a legal type for the target |
241 | // and a vector of a type may be legal when the base element type is not). |
242 | // We only want to check enough bits to cover the vector elements, because |
243 | // we care if the resultant vector is all zeros, not whether the individual |
244 | // constants are. |
245 | unsigned EltSize = N->getValueType(0).getScalarSizeInBits(); |
246 | if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op)) { |
247 | if (CN->getAPIntValue().countTrailingZeros() < EltSize) |
248 | return false; |
249 | } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(Op)) { |
250 | if (CFPN->getValueAPF().bitcastToAPInt().countTrailingZeros() < EltSize) |
251 | return false; |
252 | } else |
253 | return false; |
254 | } |
255 | |
256 | // Do not accept an all-undef vector. |
257 | if (IsAllUndef) |
258 | return false; |
259 | return true; |
260 | } |
261 | |
262 | bool ISD::isBuildVectorAllOnes(const SDNode *N) { |
263 | return isConstantSplatVectorAllOnes(N, /*BuildVectorOnly*/ true); |
264 | } |
265 | |
266 | bool ISD::isBuildVectorAllZeros(const SDNode *N) { |
267 | return isConstantSplatVectorAllZeros(N, /*BuildVectorOnly*/ true); |
268 | } |
269 | |
270 | bool ISD::isBuildVectorOfConstantSDNodes(const SDNode *N) { |
271 | if (N->getOpcode() != ISD::BUILD_VECTOR) |
272 | return false; |
273 | |
274 | for (const SDValue &Op : N->op_values()) { |
275 | if (Op.isUndef()) |
276 | continue; |
277 | if (!isa<ConstantSDNode>(Op)) |
278 | return false; |
279 | } |
280 | return true; |
281 | } |
282 | |
283 | bool ISD::isBuildVectorOfConstantFPSDNodes(const SDNode *N) { |
284 | if (N->getOpcode() != ISD::BUILD_VECTOR) |
285 | return false; |
286 | |
287 | for (const SDValue &Op : N->op_values()) { |
288 | if (Op.isUndef()) |
289 | continue; |
290 | if (!isa<ConstantFPSDNode>(Op)) |
291 | return false; |
292 | } |
293 | return true; |
294 | } |
295 | |
296 | bool ISD::allOperandsUndef(const SDNode *N) { |
297 | // Return false if the node has no operands. |
298 | // This is "logically inconsistent" with the definition of "all" but |
299 | // is probably the desired behavior. |
300 | if (N->getNumOperands() == 0) |
301 | return false; |
302 | return all_of(N->op_values(), [](SDValue Op) { return Op.isUndef(); }); |
303 | } |
304 | |
305 | bool ISD::matchUnaryPredicate(SDValue Op, |
306 | std::function<bool(ConstantSDNode *)> Match, |
307 | bool AllowUndefs) { |
308 | // FIXME: Add support for scalar UNDEF cases? |
309 | if (auto *Cst = dyn_cast<ConstantSDNode>(Op)) |
310 | return Match(Cst); |
311 | |
312 | // FIXME: Add support for vector UNDEF cases? |
313 | if (ISD::BUILD_VECTOR != Op.getOpcode() && |
314 | ISD::SPLAT_VECTOR != Op.getOpcode()) |
315 | return false; |
316 | |
317 | EVT SVT = Op.getValueType().getScalarType(); |
318 | for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) { |
319 | if (AllowUndefs && Op.getOperand(i).isUndef()) { |
320 | if (!Match(nullptr)) |
321 | return false; |
322 | continue; |
323 | } |
324 | |
325 | auto *Cst = dyn_cast<ConstantSDNode>(Op.getOperand(i)); |
326 | if (!Cst || Cst->getValueType(0) != SVT || !Match(Cst)) |
327 | return false; |
328 | } |
329 | return true; |
330 | } |
331 | |
332 | bool ISD::matchBinaryPredicate( |
333 | SDValue LHS, SDValue RHS, |
334 | std::function<bool(ConstantSDNode *, ConstantSDNode *)> Match, |
335 | bool AllowUndefs, bool AllowTypeMismatch) { |
336 | if (!AllowTypeMismatch && LHS.getValueType() != RHS.getValueType()) |
337 | return false; |
338 | |
339 | // TODO: Add support for scalar UNDEF cases? |
340 | if (auto *LHSCst = dyn_cast<ConstantSDNode>(LHS)) |
341 | if (auto *RHSCst = dyn_cast<ConstantSDNode>(RHS)) |
342 | return Match(LHSCst, RHSCst); |
343 | |
344 | // TODO: Add support for vector UNDEF cases? |
345 | if (LHS.getOpcode() != RHS.getOpcode() || |
346 | (LHS.getOpcode() != ISD::BUILD_VECTOR && |
347 | LHS.getOpcode() != ISD::SPLAT_VECTOR)) |
348 | return false; |
349 | |
350 | EVT SVT = LHS.getValueType().getScalarType(); |
351 | for (unsigned i = 0, e = LHS.getNumOperands(); i != e; ++i) { |
352 | SDValue LHSOp = LHS.getOperand(i); |
353 | SDValue RHSOp = RHS.getOperand(i); |
354 | bool LHSUndef = AllowUndefs && LHSOp.isUndef(); |
355 | bool RHSUndef = AllowUndefs && RHSOp.isUndef(); |
356 | auto *LHSCst = dyn_cast<ConstantSDNode>(LHSOp); |
357 | auto *RHSCst = dyn_cast<ConstantSDNode>(RHSOp); |
358 | if ((!LHSCst && !LHSUndef) || (!RHSCst && !RHSUndef)) |
359 | return false; |
360 | if (!AllowTypeMismatch && (LHSOp.getValueType() != SVT || |
361 | LHSOp.getValueType() != RHSOp.getValueType())) |
362 | return false; |
363 | if (!Match(LHSCst, RHSCst)) |
364 | return false; |
365 | } |
366 | return true; |
367 | } |
368 | |
369 | ISD::NodeType ISD::getVecReduceBaseOpcode(unsigned VecReduceOpcode) { |
370 | switch (VecReduceOpcode) { |
371 | default: |
372 | llvm_unreachable("Expected VECREDUCE opcode")__builtin_unreachable(); |
373 | case ISD::VECREDUCE_FADD: |
374 | case ISD::VECREDUCE_SEQ_FADD: |
375 | return ISD::FADD; |
376 | case ISD::VECREDUCE_FMUL: |
377 | case ISD::VECREDUCE_SEQ_FMUL: |
378 | return ISD::FMUL; |
379 | case ISD::VECREDUCE_ADD: |
380 | return ISD::ADD; |
381 | case ISD::VECREDUCE_MUL: |
382 | return ISD::MUL; |
383 | case ISD::VECREDUCE_AND: |
384 | return ISD::AND; |
385 | case ISD::VECREDUCE_OR: |
386 | return ISD::OR; |
387 | case ISD::VECREDUCE_XOR: |
388 | return ISD::XOR; |
389 | case ISD::VECREDUCE_SMAX: |
390 | return ISD::SMAX; |
391 | case ISD::VECREDUCE_SMIN: |
392 | return ISD::SMIN; |
393 | case ISD::VECREDUCE_UMAX: |
394 | return ISD::UMAX; |
395 | case ISD::VECREDUCE_UMIN: |
396 | return ISD::UMIN; |
397 | case ISD::VECREDUCE_FMAX: |
398 | return ISD::FMAXNUM; |
399 | case ISD::VECREDUCE_FMIN: |
400 | return ISD::FMINNUM; |
401 | } |
402 | } |
403 | |
404 | bool ISD::isVPOpcode(unsigned Opcode) { |
405 | switch (Opcode) { |
406 | default: |
407 | return false; |
408 | #define BEGIN_REGISTER_VP_SDNODE(SDOPC, ...) \ |
409 | case ISD::SDOPC: \ |
410 | return true; |
411 | #include "llvm/IR/VPIntrinsics.def" |
412 | } |
413 | } |
414 | |
415 | /// The operand position of the vector mask. |
416 | Optional<unsigned> ISD::getVPMaskIdx(unsigned Opcode) { |
417 | switch (Opcode) { |
418 | default: |
419 | return None; |
420 | #define BEGIN_REGISTER_VP_SDNODE(SDOPC, LEGALPOS, TDNAME, MASKPOS, ...) \ |
421 | case ISD::SDOPC: \ |
422 | return MASKPOS; |
423 | #include "llvm/IR/VPIntrinsics.def" |
424 | } |
425 | } |
426 | |
427 | /// The operand position of the explicit vector length parameter. |
428 | Optional<unsigned> ISD::getVPExplicitVectorLengthIdx(unsigned Opcode) { |
429 | switch (Opcode) { |
430 | default: |
431 | return None; |
432 | #define BEGIN_REGISTER_VP_SDNODE(SDOPC, LEGALPOS, TDNAME, MASKPOS, EVLPOS) \ |
433 | case ISD::SDOPC: \ |
434 | return EVLPOS; |
435 | #include "llvm/IR/VPIntrinsics.def" |
436 | } |
437 | } |
438 | |
439 | ISD::NodeType ISD::getExtForLoadExtType(bool IsFP, ISD::LoadExtType ExtType) { |
440 | switch (ExtType) { |
441 | case ISD::EXTLOAD: |
442 | return IsFP ? ISD::FP_EXTEND : ISD::ANY_EXTEND; |
443 | case ISD::SEXTLOAD: |
444 | return ISD::SIGN_EXTEND; |
445 | case ISD::ZEXTLOAD: |
446 | return ISD::ZERO_EXTEND; |
447 | default: |
448 | break; |
449 | } |
450 | |
451 | llvm_unreachable("Invalid LoadExtType")__builtin_unreachable(); |
452 | } |
453 | |
454 | ISD::CondCode ISD::getSetCCSwappedOperands(ISD::CondCode Operation) { |
455 | // To perform this operation, we just need to swap the L and G bits of the |
456 | // operation. |
457 | unsigned OldL = (Operation >> 2) & 1; |
458 | unsigned OldG = (Operation >> 1) & 1; |
459 | return ISD::CondCode((Operation & ~6) | // Keep the N, U, E bits |
460 | (OldL << 1) | // New G bit |
461 | (OldG << 2)); // New L bit. |
462 | } |
463 | |
464 | static ISD::CondCode getSetCCInverseImpl(ISD::CondCode Op, bool isIntegerLike) { |
465 | unsigned Operation = Op; |
466 | if (isIntegerLike) |
467 | Operation ^= 7; // Flip L, G, E bits, but not U. |
468 | else |
469 | Operation ^= 15; // Flip all of the condition bits. |
470 | |
471 | if (Operation > ISD::SETTRUE2) |
472 | Operation &= ~8; // Don't let N and U bits get set. |
473 | |
474 | return ISD::CondCode(Operation); |
475 | } |
476 | |
477 | ISD::CondCode ISD::getSetCCInverse(ISD::CondCode Op, EVT Type) { |
478 | return getSetCCInverseImpl(Op, Type.isInteger()); |
479 | } |
480 | |
481 | ISD::CondCode ISD::GlobalISel::getSetCCInverse(ISD::CondCode Op, |
482 | bool isIntegerLike) { |
483 | return getSetCCInverseImpl(Op, isIntegerLike); |
484 | } |
485 | |
486 | /// For an integer comparison, return 1 if the comparison is a signed operation |
487 | /// and 2 if the result is an unsigned comparison. Return zero if the operation |
488 | /// does not depend on the sign of the input (setne and seteq). |
489 | static int isSignedOp(ISD::CondCode Opcode) { |
490 | switch (Opcode) { |
491 | default: llvm_unreachable("Illegal integer setcc operation!")__builtin_unreachable(); |
492 | case ISD::SETEQ: |
493 | case ISD::SETNE: return 0; |
494 | case ISD::SETLT: |
495 | case ISD::SETLE: |
496 | case ISD::SETGT: |
497 | case ISD::SETGE: return 1; |
498 | case ISD::SETULT: |
499 | case ISD::SETULE: |
500 | case ISD::SETUGT: |
501 | case ISD::SETUGE: return 2; |
502 | } |
503 | } |
504 | |
505 | ISD::CondCode ISD::getSetCCOrOperation(ISD::CondCode Op1, ISD::CondCode Op2, |
506 | EVT Type) { |
507 | bool IsInteger = Type.isInteger(); |
508 | if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3) |
509 | // Cannot fold a signed integer setcc with an unsigned integer setcc. |
510 | return ISD::SETCC_INVALID; |
511 | |
512 | unsigned Op = Op1 | Op2; // Combine all of the condition bits. |
513 | |
514 | // If the N and U bits get set, then the resultant comparison DOES suddenly |
515 | // care about orderedness, and it is true when ordered. |
516 | if (Op > ISD::SETTRUE2) |
517 | Op &= ~16; // Clear the U bit if the N bit is set. |
518 | |
519 | // Canonicalize illegal integer setcc's. |
520 | if (IsInteger && Op == ISD::SETUNE) // e.g. SETUGT | SETULT |
521 | Op = ISD::SETNE; |
522 | |
523 | return ISD::CondCode(Op); |
524 | } |
525 | |
526 | ISD::CondCode ISD::getSetCCAndOperation(ISD::CondCode Op1, ISD::CondCode Op2, |
527 | EVT Type) { |
528 | bool IsInteger = Type.isInteger(); |
529 | if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3) |
530 | // Cannot fold a signed setcc with an unsigned setcc. |
531 | return ISD::SETCC_INVALID; |
532 | |
533 | // Combine all of the condition bits. |
534 | ISD::CondCode Result = ISD::CondCode(Op1 & Op2); |
535 | |
536 | // Canonicalize illegal integer setcc's. |
537 | if (IsInteger) { |
538 | switch (Result) { |
539 | default: break; |
540 | case ISD::SETUO : Result = ISD::SETFALSE; break; // SETUGT & SETULT |
541 | case ISD::SETOEQ: // SETEQ & SETU[LG]E |
542 | case ISD::SETUEQ: Result = ISD::SETEQ ; break; // SETUGE & SETULE |
543 | case ISD::SETOLT: Result = ISD::SETULT ; break; // SETULT & SETNE |
544 | case ISD::SETOGT: Result = ISD::SETUGT ; break; // SETUGT & SETNE |
545 | } |
546 | } |
547 | |
548 | return Result; |
549 | } |
550 | |
551 | //===----------------------------------------------------------------------===// |
552 | // SDNode Profile Support |
553 | //===----------------------------------------------------------------------===// |
554 | |
555 | /// AddNodeIDOpcode - Add the node opcode to the NodeID data. |
556 | static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC) { |
557 | ID.AddInteger(OpC); |
558 | } |
559 | |
560 | /// AddNodeIDValueTypes - Value type lists are intern'd so we can represent them |
561 | /// solely with their pointer. |
562 | static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList) { |
563 | ID.AddPointer(VTList.VTs); |
564 | } |
565 | |
566 | /// AddNodeIDOperands - Various routines for adding operands to the NodeID data. |
567 | static void AddNodeIDOperands(FoldingSetNodeID &ID, |
568 | ArrayRef<SDValue> Ops) { |
569 | for (auto& Op : Ops) { |
570 | ID.AddPointer(Op.getNode()); |
571 | ID.AddInteger(Op.getResNo()); |
572 | } |
573 | } |
574 | |
575 | /// AddNodeIDOperands - Various routines for adding operands to the NodeID data. |
576 | static void AddNodeIDOperands(FoldingSetNodeID &ID, |
577 | ArrayRef<SDUse> Ops) { |
578 | for (auto& Op : Ops) { |
579 | ID.AddPointer(Op.getNode()); |
580 | ID.AddInteger(Op.getResNo()); |
581 | } |
582 | } |
583 | |
584 | static void AddNodeIDNode(FoldingSetNodeID &ID, unsigned short OpC, |
585 | SDVTList VTList, ArrayRef<SDValue> OpList) { |
586 | AddNodeIDOpcode(ID, OpC); |
587 | AddNodeIDValueTypes(ID, VTList); |
588 | AddNodeIDOperands(ID, OpList); |
589 | } |
590 | |
591 | /// If this is an SDNode with special info, add this info to the NodeID data. |
592 | static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) { |
593 | switch (N->getOpcode()) { |
594 | case ISD::TargetExternalSymbol: |
595 | case ISD::ExternalSymbol: |
596 | case ISD::MCSymbol: |
597 | llvm_unreachable("Should only be used on nodes with operands")__builtin_unreachable(); |
598 | default: break; // Normal nodes don't need extra info. |
599 | case ISD::TargetConstant: |
600 | case ISD::Constant: { |
601 | const ConstantSDNode *C = cast<ConstantSDNode>(N); |
602 | ID.AddPointer(C->getConstantIntValue()); |
603 | ID.AddBoolean(C->isOpaque()); |
604 | break; |
605 | } |
606 | case ISD::TargetConstantFP: |
607 | case ISD::ConstantFP: |
608 | ID.AddPointer(cast<ConstantFPSDNode>(N)->getConstantFPValue()); |
609 | break; |
610 | case ISD::TargetGlobalAddress: |
611 | case ISD::GlobalAddress: |
612 | case ISD::TargetGlobalTLSAddress: |
613 | case ISD::GlobalTLSAddress: { |
614 | const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N); |
615 | ID.AddPointer(GA->getGlobal()); |
616 | ID.AddInteger(GA->getOffset()); |
617 | ID.AddInteger(GA->getTargetFlags()); |
618 | break; |
619 | } |
620 | case ISD::BasicBlock: |
621 | ID.AddPointer(cast<BasicBlockSDNode>(N)->getBasicBlock()); |
622 | break; |
623 | case ISD::Register: |
624 | ID.AddInteger(cast<RegisterSDNode>(N)->getReg()); |
625 | break; |
626 | case ISD::RegisterMask: |
627 | ID.AddPointer(cast<RegisterMaskSDNode>(N)->getRegMask()); |
628 | break; |
629 | case ISD::SRCVALUE: |
630 | ID.AddPointer(cast<SrcValueSDNode>(N)->getValue()); |
631 | break; |
632 | case ISD::FrameIndex: |
633 | case ISD::TargetFrameIndex: |
634 | ID.AddInteger(cast<FrameIndexSDNode>(N)->getIndex()); |
635 | break; |
636 | case ISD::LIFETIME_START: |
637 | case ISD::LIFETIME_END: |
638 | if (cast<LifetimeSDNode>(N)->hasOffset()) { |
639 | ID.AddInteger(cast<LifetimeSDNode>(N)->getSize()); |
640 | ID.AddInteger(cast<LifetimeSDNode>(N)->getOffset()); |
641 | } |
642 | break; |
643 | case ISD::PSEUDO_PROBE: |
644 | ID.AddInteger(cast<PseudoProbeSDNode>(N)->getGuid()); |
645 | ID.AddInteger(cast<PseudoProbeSDNode>(N)->getIndex()); |
646 | ID.AddInteger(cast<PseudoProbeSDNode>(N)->getAttributes()); |
647 | break; |
648 | case ISD::JumpTable: |
649 | case ISD::TargetJumpTable: |
650 | ID.AddInteger(cast<JumpTableSDNode>(N)->getIndex()); |
651 | ID.AddInteger(cast<JumpTableSDNode>(N)->getTargetFlags()); |
652 | break; |
653 | case ISD::ConstantPool: |
654 | case ISD::TargetConstantPool: { |
655 | const ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(N); |
656 | ID.AddInteger(CP->getAlign().value()); |
657 | ID.AddInteger(CP->getOffset()); |
658 | if (CP->isMachineConstantPoolEntry()) |
659 | CP->getMachineCPVal()->addSelectionDAGCSEId(ID); |
660 | else |
661 | ID.AddPointer(CP->getConstVal()); |
662 | ID.AddInteger(CP->getTargetFlags()); |
663 | break; |
664 | } |
665 | case ISD::TargetIndex: { |
666 | const TargetIndexSDNode *TI = cast<TargetIndexSDNode>(N); |
667 | ID.AddInteger(TI->getIndex()); |
668 | ID.AddInteger(TI->getOffset()); |
669 | ID.AddInteger(TI->getTargetFlags()); |
670 | break; |
671 | } |
672 | case ISD::LOAD: { |
673 | const LoadSDNode *LD = cast<LoadSDNode>(N); |
674 | ID.AddInteger(LD->getMemoryVT().getRawBits()); |
675 | ID.AddInteger(LD->getRawSubclassData()); |
676 | ID.AddInteger(LD->getPointerInfo().getAddrSpace()); |
677 | break; |
678 | } |
679 | case ISD::STORE: { |
680 | const StoreSDNode *ST = cast<StoreSDNode>(N); |
681 | ID.AddInteger(ST->getMemoryVT().getRawBits()); |
682 | ID.AddInteger(ST->getRawSubclassData()); |
683 | ID.AddInteger(ST->getPointerInfo().getAddrSpace()); |
684 | break; |
685 | } |
686 | case ISD::MLOAD: { |
687 | const MaskedLoadSDNode *MLD = cast<MaskedLoadSDNode>(N); |
688 | ID.AddInteger(MLD->getMemoryVT().getRawBits()); |
689 | ID.AddInteger(MLD->getRawSubclassData()); |
690 | ID.AddInteger(MLD->getPointerInfo().getAddrSpace()); |
691 | break; |
692 | } |
693 | case ISD::MSTORE: { |
694 | const MaskedStoreSDNode *MST = cast<MaskedStoreSDNode>(N); |
695 | ID.AddInteger(MST->getMemoryVT().getRawBits()); |
696 | ID.AddInteger(MST->getRawSubclassData()); |
697 | ID.AddInteger(MST->getPointerInfo().getAddrSpace()); |
698 | break; |
699 | } |
700 | case ISD::MGATHER: { |
701 | const MaskedGatherSDNode *MG = cast<MaskedGatherSDNode>(N); |
702 | ID.AddInteger(MG->getMemoryVT().getRawBits()); |
703 | ID.AddInteger(MG->getRawSubclassData()); |
704 | ID.AddInteger(MG->getPointerInfo().getAddrSpace()); |
705 | break; |
706 | } |
707 | case ISD::MSCATTER: { |
708 | const MaskedScatterSDNode *MS = cast<MaskedScatterSDNode>(N); |
709 | ID.AddInteger(MS->getMemoryVT().getRawBits()); |
710 | ID.AddInteger(MS->getRawSubclassData()); |
711 | ID.AddInteger(MS->getPointerInfo().getAddrSpace()); |
712 | break; |
713 | } |
714 | case ISD::ATOMIC_CMP_SWAP: |
715 | case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: |
716 | case ISD::ATOMIC_SWAP: |
717 | case ISD::ATOMIC_LOAD_ADD: |
718 | case ISD::ATOMIC_LOAD_SUB: |
719 | case ISD::ATOMIC_LOAD_AND: |
720 | case ISD::ATOMIC_LOAD_CLR: |
721 | case ISD::ATOMIC_LOAD_OR: |
722 | case ISD::ATOMIC_LOAD_XOR: |
723 | case ISD::ATOMIC_LOAD_NAND: |
724 | case ISD::ATOMIC_LOAD_MIN: |
725 | case ISD::ATOMIC_LOAD_MAX: |
726 | case ISD::ATOMIC_LOAD_UMIN: |
727 | case ISD::ATOMIC_LOAD_UMAX: |
728 | case ISD::ATOMIC_LOAD: |
729 | case ISD::ATOMIC_STORE: { |
730 | const AtomicSDNode *AT = cast<AtomicSDNode>(N); |
731 | ID.AddInteger(AT->getMemoryVT().getRawBits()); |
732 | ID.AddInteger(AT->getRawSubclassData()); |
733 | ID.AddInteger(AT->getPointerInfo().getAddrSpace()); |
734 | break; |
735 | } |
736 | case ISD::PREFETCH: { |
737 | const MemSDNode *PF = cast<MemSDNode>(N); |
738 | ID.AddInteger(PF->getPointerInfo().getAddrSpace()); |
739 | break; |
740 | } |
741 | case ISD::VECTOR_SHUFFLE: { |
742 | const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); |
743 | for (unsigned i = 0, e = N->getValueType(0).getVectorNumElements(); |
744 | i != e; ++i) |
745 | ID.AddInteger(SVN->getMaskElt(i)); |
746 | break; |
747 | } |
748 | case ISD::TargetBlockAddress: |
749 | case ISD::BlockAddress: { |
750 | const BlockAddressSDNode *BA = cast<BlockAddressSDNode>(N); |
751 | ID.AddPointer(BA->getBlockAddress()); |
752 | ID.AddInteger(BA->getOffset()); |
753 | ID.AddInteger(BA->getTargetFlags()); |
754 | break; |
755 | } |
756 | } // end switch (N->getOpcode()) |
757 | |
758 | // Target specific memory nodes could also have address spaces to check. |
759 | if (N->isTargetMemoryOpcode()) |
760 | ID.AddInteger(cast<MemSDNode>(N)->getPointerInfo().getAddrSpace()); |
761 | } |
762 | |
763 | /// AddNodeIDNode - Generic routine for adding a nodes info to the NodeID |
764 | /// data. |
765 | static void AddNodeIDNode(FoldingSetNodeID &ID, const SDNode *N) { |
766 | AddNodeIDOpcode(ID, N->getOpcode()); |
767 | // Add the return value info. |
768 | AddNodeIDValueTypes(ID, N->getVTList()); |
769 | // Add the operand info. |
770 | AddNodeIDOperands(ID, N->ops()); |
771 | |
772 | // Handle SDNode leafs with special info. |
773 | AddNodeIDCustom(ID, N); |
774 | } |
775 | |
776 | //===----------------------------------------------------------------------===// |
777 | // SelectionDAG Class |
778 | //===----------------------------------------------------------------------===// |
779 | |
780 | /// doNotCSE - Return true if CSE should not be performed for this node. |
781 | static bool doNotCSE(SDNode *N) { |
782 | if (N->getValueType(0) == MVT::Glue) |
783 | return true; // Never CSE anything that produces a flag. |
784 | |
785 | switch (N->getOpcode()) { |
786 | default: break; |
787 | case ISD::HANDLENODE: |
788 | case ISD::EH_LABEL: |
789 | return true; // Never CSE these nodes. |
790 | } |
791 | |
792 | // Check that remaining values produced are not flags. |
793 | for (unsigned i = 1, e = N->getNumValues(); i != e; ++i) |
794 | if (N->getValueType(i) == MVT::Glue) |
795 | return true; // Never CSE anything that produces a flag. |
796 | |
797 | return false; |
798 | } |
799 | |
800 | /// RemoveDeadNodes - This method deletes all unreachable nodes in the |
801 | /// SelectionDAG. |
802 | void SelectionDAG::RemoveDeadNodes() { |
803 | // Create a dummy node (which is not added to allnodes), that adds a reference |
804 | // to the root node, preventing it from being deleted. |
805 | HandleSDNode Dummy(getRoot()); |
806 | |
807 | SmallVector<SDNode*, 128> DeadNodes; |
808 | |
809 | // Add all obviously-dead nodes to the DeadNodes worklist. |
810 | for (SDNode &Node : allnodes()) |
811 | if (Node.use_empty()) |
812 | DeadNodes.push_back(&Node); |
813 | |
814 | RemoveDeadNodes(DeadNodes); |
815 | |
816 | // If the root changed (e.g. it was a dead load, update the root). |
817 | setRoot(Dummy.getValue()); |
818 | } |
819 | |
820 | /// RemoveDeadNodes - This method deletes the unreachable nodes in the |
821 | /// given list, and any nodes that become unreachable as a result. |
822 | void SelectionDAG::RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes) { |
823 | |
824 | // Process the worklist, deleting the nodes and adding their uses to the |
825 | // worklist. |
826 | while (!DeadNodes.empty()) { |
827 | SDNode *N = DeadNodes.pop_back_val(); |
828 | // Skip to next node if we've already managed to delete the node. This could |
829 | // happen if replacing a node causes a node previously added to the node to |
830 | // be deleted. |
831 | if (N->getOpcode() == ISD::DELETED_NODE) |
832 | continue; |
833 | |
834 | for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) |
835 | DUL->NodeDeleted(N, nullptr); |
836 | |
837 | // Take the node out of the appropriate CSE map. |
838 | RemoveNodeFromCSEMaps(N); |
839 | |
840 | // Next, brutally remove the operand list. This is safe to do, as there are |
841 | // no cycles in the graph. |
842 | for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) { |
843 | SDUse &Use = *I++; |
844 | SDNode *Operand = Use.getNode(); |
845 | Use.set(SDValue()); |
846 | |
847 | // Now that we removed this operand, see if there are no uses of it left. |
848 | if (Operand->use_empty()) |
849 | DeadNodes.push_back(Operand); |
850 | } |
851 | |
852 | DeallocateNode(N); |
853 | } |
854 | } |
855 | |
856 | void SelectionDAG::RemoveDeadNode(SDNode *N){ |
857 | SmallVector<SDNode*, 16> DeadNodes(1, N); |
858 | |
859 | // Create a dummy node that adds a reference to the root node, preventing |
860 | // it from being deleted. (This matters if the root is an operand of the |
861 | // dead node.) |
862 | HandleSDNode Dummy(getRoot()); |
863 | |
864 | RemoveDeadNodes(DeadNodes); |
865 | } |
866 | |
867 | void SelectionDAG::DeleteNode(SDNode *N) { |
868 | // First take this out of the appropriate CSE map. |
869 | RemoveNodeFromCSEMaps(N); |
870 | |
871 | // Finally, remove uses due to operands of this node, remove from the |
872 | // AllNodes list, and delete the node. |
873 | DeleteNodeNotInCSEMaps(N); |
874 | } |
875 | |
876 | void SelectionDAG::DeleteNodeNotInCSEMaps(SDNode *N) { |
877 | assert(N->getIterator() != AllNodes.begin() &&((void)0) |
878 | "Cannot delete the entry node!")((void)0); |
879 | assert(N->use_empty() && "Cannot delete a node that is not dead!")((void)0); |
880 | |
881 | // Drop all of the operands and decrement used node's use counts. |
882 | N->DropOperands(); |
883 | |
884 | DeallocateNode(N); |
885 | } |
886 | |
887 | void SDDbgInfo::add(SDDbgValue *V, bool isParameter) { |
888 | assert(!(V->isVariadic() && isParameter))((void)0); |
889 | if (isParameter) |
890 | ByvalParmDbgValues.push_back(V); |
891 | else |
892 | DbgValues.push_back(V); |
893 | for (const SDNode *Node : V->getSDNodes()) |
894 | if (Node) |
895 | DbgValMap[Node].push_back(V); |
896 | } |
897 | |
898 | void SDDbgInfo::erase(const SDNode *Node) { |
899 | DbgValMapType::iterator I = DbgValMap.find(Node); |
900 | if (I == DbgValMap.end()) |
901 | return; |
902 | for (auto &Val: I->second) |
903 | Val->setIsInvalidated(); |
904 | DbgValMap.erase(I); |
905 | } |
906 | |
907 | void SelectionDAG::DeallocateNode(SDNode *N) { |
908 | // If we have operands, deallocate them. |
909 | removeOperands(N); |
910 | |
911 | NodeAllocator.Deallocate(AllNodes.remove(N)); |
912 | |
913 | // Set the opcode to DELETED_NODE to help catch bugs when node |
914 | // memory is reallocated. |
915 | // FIXME: There are places in SDag that have grown a dependency on the opcode |
916 | // value in the released node. |
917 | __asan_unpoison_memory_region(&N->NodeType, sizeof(N->NodeType)); |
918 | N->NodeType = ISD::DELETED_NODE; |
919 | |
920 | // If any of the SDDbgValue nodes refer to this SDNode, invalidate |
921 | // them and forget about that node. |
922 | DbgInfo->erase(N); |
923 | } |
924 | |
925 | #ifndef NDEBUG1 |
926 | /// VerifySDNode - Sanity check the given SDNode. Aborts if it is invalid. |
927 | static void VerifySDNode(SDNode *N) { |
928 | switch (N->getOpcode()) { |
929 | default: |
930 | break; |
931 | case ISD::BUILD_PAIR: { |
932 | EVT VT = N->getValueType(0); |
933 | assert(N->getNumValues() == 1 && "Too many results!")((void)0); |
934 | assert(!VT.isVector() && (VT.isInteger() || VT.isFloatingPoint()) &&((void)0) |
935 | "Wrong return type!")((void)0); |
936 | assert(N->getNumOperands() == 2 && "Wrong number of operands!")((void)0); |
937 | assert(N->getOperand(0).getValueType() == N->getOperand(1).getValueType() &&((void)0) |
938 | "Mismatched operand types!")((void)0); |
939 | assert(N->getOperand(0).getValueType().isInteger() == VT.isInteger() &&((void)0) |
940 | "Wrong operand type!")((void)0); |
941 | assert(VT.getSizeInBits() == 2 * N->getOperand(0).getValueSizeInBits() &&((void)0) |
942 | "Wrong return type size")((void)0); |
943 | break; |
944 | } |
945 | case ISD::BUILD_VECTOR: { |
946 | assert(N->getNumValues() == 1 && "Too many results!")((void)0); |
947 | assert(N->getValueType(0).isVector() && "Wrong return type!")((void)0); |
948 | assert(N->getNumOperands() == N->getValueType(0).getVectorNumElements() &&((void)0) |
949 | "Wrong number of operands!")((void)0); |
950 | EVT EltVT = N->getValueType(0).getVectorElementType(); |
951 | for (const SDUse &Op : N->ops()) { |
952 | assert((Op.getValueType() == EltVT ||((void)0) |
953 | (EltVT.isInteger() && Op.getValueType().isInteger() &&((void)0) |
954 | EltVT.bitsLE(Op.getValueType()))) &&((void)0) |
955 | "Wrong operand type!")((void)0); |
956 | assert(Op.getValueType() == N->getOperand(0).getValueType() &&((void)0) |
957 | "Operands must all have the same type")((void)0); |
958 | } |
959 | break; |
960 | } |
961 | } |
962 | } |
963 | #endif // NDEBUG |
964 | |
965 | /// Insert a newly allocated node into the DAG. |
966 | /// |
967 | /// Handles insertion into the all nodes list and CSE map, as well as |
968 | /// verification and other common operations when a new node is allocated. |
969 | void SelectionDAG::InsertNode(SDNode *N) { |
970 | AllNodes.push_back(N); |
971 | #ifndef NDEBUG1 |
972 | N->PersistentId = NextPersistentId++; |
973 | VerifySDNode(N); |
974 | #endif |
975 | for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) |
976 | DUL->NodeInserted(N); |
977 | } |
978 | |
979 | /// RemoveNodeFromCSEMaps - Take the specified node out of the CSE map that |
980 | /// correspond to it. This is useful when we're about to delete or repurpose |
981 | /// the node. We don't want future request for structurally identical nodes |
982 | /// to return N anymore. |
983 | bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) { |
984 | bool Erased = false; |
985 | switch (N->getOpcode()) { |
986 | case ISD::HANDLENODE: return false; // noop. |
987 | case ISD::CONDCODE: |
988 | assert(CondCodeNodes[cast<CondCodeSDNode>(N)->get()] &&((void)0) |
989 | "Cond code doesn't exist!")((void)0); |
990 | Erased = CondCodeNodes[cast<CondCodeSDNode>(N)->get()] != nullptr; |
991 | CondCodeNodes[cast<CondCodeSDNode>(N)->get()] = nullptr; |
992 | break; |
993 | case ISD::ExternalSymbol: |
994 | Erased = ExternalSymbols.erase(cast<ExternalSymbolSDNode>(N)->getSymbol()); |
995 | break; |
996 | case ISD::TargetExternalSymbol: { |
997 | ExternalSymbolSDNode *ESN = cast<ExternalSymbolSDNode>(N); |
998 | Erased = TargetExternalSymbols.erase(std::pair<std::string, unsigned>( |
999 | ESN->getSymbol(), ESN->getTargetFlags())); |
1000 | break; |
1001 | } |
1002 | case ISD::MCSymbol: { |
1003 | auto *MCSN = cast<MCSymbolSDNode>(N); |
1004 | Erased = MCSymbols.erase(MCSN->getMCSymbol()); |
1005 | break; |
1006 | } |
1007 | case ISD::VALUETYPE: { |
1008 | EVT VT = cast<VTSDNode>(N)->getVT(); |
1009 | if (VT.isExtended()) { |
1010 | Erased = ExtendedValueTypeNodes.erase(VT); |
1011 | } else { |
1012 | Erased = ValueTypeNodes[VT.getSimpleVT().SimpleTy] != nullptr; |
1013 | ValueTypeNodes[VT.getSimpleVT().SimpleTy] = nullptr; |
1014 | } |
1015 | break; |
1016 | } |
1017 | default: |
1018 | // Remove it from the CSE Map. |
1019 | assert(N->getOpcode() != ISD::DELETED_NODE && "DELETED_NODE in CSEMap!")((void)0); |
1020 | assert(N->getOpcode() != ISD::EntryToken && "EntryToken in CSEMap!")((void)0); |
1021 | Erased = CSEMap.RemoveNode(N); |
1022 | break; |
1023 | } |
1024 | #ifndef NDEBUG1 |
1025 | // Verify that the node was actually in one of the CSE maps, unless it has a |
1026 | // flag result (which cannot be CSE'd) or is one of the special cases that are |
1027 | // not subject to CSE. |
1028 | if (!Erased && N->getValueType(N->getNumValues()-1) != MVT::Glue && |
1029 | !N->isMachineOpcode() && !doNotCSE(N)) { |
1030 | N->dump(this); |
1031 | dbgs() << "\n"; |
1032 | llvm_unreachable("Node is not in map!")__builtin_unreachable(); |
1033 | } |
1034 | #endif |
1035 | return Erased; |
1036 | } |
1037 | |
1038 | /// AddModifiedNodeToCSEMaps - The specified node has been removed from the CSE |
1039 | /// maps and modified in place. Add it back to the CSE maps, unless an identical |
1040 | /// node already exists, in which case transfer all its users to the existing |
1041 | /// node. This transfer can potentially trigger recursive merging. |
1042 | void |
1043 | SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *N) { |
1044 | // For node types that aren't CSE'd, just act as if no identical node |
1045 | // already exists. |
1046 | if (!doNotCSE(N)) { |
1047 | SDNode *Existing = CSEMap.GetOrInsertNode(N); |
1048 | if (Existing != N) { |
1049 | // If there was already an existing matching node, use ReplaceAllUsesWith |
1050 | // to replace the dead one with the existing one. This can cause |
1051 | // recursive merging of other unrelated nodes down the line. |
1052 | ReplaceAllUsesWith(N, Existing); |
1053 | |
1054 | // N is now dead. Inform the listeners and delete it. |
1055 | for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) |
1056 | DUL->NodeDeleted(N, Existing); |
1057 | DeleteNodeNotInCSEMaps(N); |
1058 | return; |
1059 | } |
1060 | } |
1061 | |
1062 | // If the node doesn't already exist, we updated it. Inform listeners. |
1063 | for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) |
1064 | DUL->NodeUpdated(N); |
1065 | } |
1066 | |
1067 | /// FindModifiedNodeSlot - Find a slot for the specified node if its operands |
1068 | /// were replaced with those specified. If this node is never memoized, |
1069 | /// return null, otherwise return a pointer to the slot it would take. If a |
1070 | /// node already exists with these operands, the slot will be non-null. |
1071 | SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, SDValue Op, |
1072 | void *&InsertPos) { |
1073 | if (doNotCSE(N)) |
1074 | return nullptr; |
1075 | |
1076 | SDValue Ops[] = { Op }; |
1077 | FoldingSetNodeID ID; |
1078 | AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops); |
1079 | AddNodeIDCustom(ID, N); |
1080 | SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos); |
1081 | if (Node) |
1082 | Node->intersectFlagsWith(N->getFlags()); |
1083 | return Node; |
1084 | } |
1085 | |
1086 | /// FindModifiedNodeSlot - Find a slot for the specified node if its operands |
1087 | /// were replaced with those specified. If this node is never memoized, |
1088 | /// return null, otherwise return a pointer to the slot it would take. If a |
1089 | /// node already exists with these operands, the slot will be non-null. |
1090 | SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, |
1091 | SDValue Op1, SDValue Op2, |
1092 | void *&InsertPos) { |
1093 | if (doNotCSE(N)) |
1094 | return nullptr; |
1095 | |
1096 | SDValue Ops[] = { Op1, Op2 }; |
1097 | FoldingSetNodeID ID; |
1098 | AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops); |
1099 | AddNodeIDCustom(ID, N); |
1100 | SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos); |
1101 | if (Node) |
1102 | Node->intersectFlagsWith(N->getFlags()); |
1103 | return Node; |
1104 | } |
1105 | |
1106 | /// FindModifiedNodeSlot - Find a slot for the specified node if its operands |
1107 | /// were replaced with those specified. If this node is never memoized, |
1108 | /// return null, otherwise return a pointer to the slot it would take. If a |
1109 | /// node already exists with these operands, the slot will be non-null. |
1110 | SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, ArrayRef<SDValue> Ops, |
1111 | void *&InsertPos) { |
1112 | if (doNotCSE(N)) |
1113 | return nullptr; |
1114 | |
1115 | FoldingSetNodeID ID; |
1116 | AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops); |
1117 | AddNodeIDCustom(ID, N); |
1118 | SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos); |
1119 | if (Node) |
1120 | Node->intersectFlagsWith(N->getFlags()); |
1121 | return Node; |
1122 | } |
1123 | |
1124 | Align SelectionDAG::getEVTAlign(EVT VT) const { |
1125 | Type *Ty = VT == MVT::iPTR ? |
1126 | PointerType::get(Type::getInt8Ty(*getContext()), 0) : |
1127 | VT.getTypeForEVT(*getContext()); |
1128 | |
1129 | return getDataLayout().getABITypeAlign(Ty); |
1130 | } |
1131 | |
1132 | // EntryNode could meaningfully have debug info if we can find it... |
1133 | SelectionDAG::SelectionDAG(const TargetMachine &tm, CodeGenOpt::Level OL) |
1134 | : TM(tm), OptLevel(OL), |
1135 | EntryNode(ISD::EntryToken, 0, DebugLoc(), getVTList(MVT::Other)), |
1136 | Root(getEntryNode()) { |
1137 | InsertNode(&EntryNode); |
1138 | DbgInfo = new SDDbgInfo(); |
1139 | } |
1140 | |
1141 | void SelectionDAG::init(MachineFunction &NewMF, |
1142 | OptimizationRemarkEmitter &NewORE, |
1143 | Pass *PassPtr, const TargetLibraryInfo *LibraryInfo, |
1144 | LegacyDivergenceAnalysis * Divergence, |
1145 | ProfileSummaryInfo *PSIin, |
1146 | BlockFrequencyInfo *BFIin) { |
1147 | MF = &NewMF; |
1148 | SDAGISelPass = PassPtr; |
1149 | ORE = &NewORE; |
1150 | TLI = getSubtarget().getTargetLowering(); |
1151 | TSI = getSubtarget().getSelectionDAGInfo(); |
1152 | LibInfo = LibraryInfo; |
1153 | Context = &MF->getFunction().getContext(); |
1154 | DA = Divergence; |
1155 | PSI = PSIin; |
1156 | BFI = BFIin; |
1157 | } |
1158 | |
1159 | SelectionDAG::~SelectionDAG() { |
1160 | assert(!UpdateListeners && "Dangling registered DAGUpdateListeners")((void)0); |
1161 | allnodes_clear(); |
1162 | OperandRecycler.clear(OperandAllocator); |
1163 | delete DbgInfo; |
1164 | } |
1165 | |
1166 | bool SelectionDAG::shouldOptForSize() const { |
1167 | return MF->getFunction().hasOptSize() || |
1168 | llvm::shouldOptimizeForSize(FLI->MBB->getBasicBlock(), PSI, BFI); |
1169 | } |
1170 | |
1171 | void SelectionDAG::allnodes_clear() { |
1172 | assert(&*AllNodes.begin() == &EntryNode)((void)0); |
1173 | AllNodes.remove(AllNodes.begin()); |
1174 | while (!AllNodes.empty()) |
1175 | DeallocateNode(&AllNodes.front()); |
1176 | #ifndef NDEBUG1 |
1177 | NextPersistentId = 0; |
1178 | #endif |
1179 | } |
1180 | |
1181 | SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID, |
1182 | void *&InsertPos) { |
1183 | SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos); |
1184 | if (N) { |
1185 | switch (N->getOpcode()) { |
1186 | default: break; |
1187 | case ISD::Constant: |
1188 | case ISD::ConstantFP: |
1189 | llvm_unreachable("Querying for Constant and ConstantFP nodes requires "__builtin_unreachable() |
1190 | "debug location. Use another overload.")__builtin_unreachable(); |
1191 | } |
1192 | } |
1193 | return N; |
1194 | } |
1195 | |
1196 | SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID, |
1197 | const SDLoc &DL, void *&InsertPos) { |
1198 | SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos); |
1199 | if (N) { |
1200 | switch (N->getOpcode()) { |
1201 | case ISD::Constant: |
1202 | case ISD::ConstantFP: |
1203 | // Erase debug location from the node if the node is used at several |
1204 | // different places. Do not propagate one location to all uses as it |
1205 | // will cause a worse single stepping debugging experience. |
1206 | if (N->getDebugLoc() != DL.getDebugLoc()) |
1207 | N->setDebugLoc(DebugLoc()); |
1208 | break; |
1209 | default: |
1210 | // When the node's point of use is located earlier in the instruction |
1211 | // sequence than its prior point of use, update its debug info to the |
1212 | // earlier location. |
1213 | if (DL.getIROrder() && DL.getIROrder() < N->getIROrder()) |
1214 | N->setDebugLoc(DL.getDebugLoc()); |
1215 | break; |
1216 | } |
1217 | } |
1218 | return N; |
1219 | } |
1220 | |
1221 | void SelectionDAG::clear() { |
1222 | allnodes_clear(); |
1223 | OperandRecycler.clear(OperandAllocator); |
1224 | OperandAllocator.Reset(); |
1225 | CSEMap.clear(); |
1226 | |
1227 | ExtendedValueTypeNodes.clear(); |
1228 | ExternalSymbols.clear(); |
1229 | TargetExternalSymbols.clear(); |
1230 | MCSymbols.clear(); |
1231 | SDCallSiteDbgInfo.clear(); |
1232 | std::fill(CondCodeNodes.begin(), CondCodeNodes.end(), |
1233 | static_cast<CondCodeSDNode*>(nullptr)); |
1234 | std::fill(ValueTypeNodes.begin(), ValueTypeNodes.end(), |
1235 | static_cast<SDNode*>(nullptr)); |
1236 | |
1237 | EntryNode.UseList = nullptr; |
1238 | InsertNode(&EntryNode); |
1239 | Root = getEntryNode(); |
1240 | DbgInfo->clear(); |
1241 | } |
1242 | |
1243 | SDValue SelectionDAG::getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT) { |
1244 | return VT.bitsGT(Op.getValueType()) |
1245 | ? getNode(ISD::FP_EXTEND, DL, VT, Op) |
1246 | : getNode(ISD::FP_ROUND, DL, VT, Op, getIntPtrConstant(0, DL)); |
1247 | } |
1248 | |
1249 | std::pair<SDValue, SDValue> |
1250 | SelectionDAG::getStrictFPExtendOrRound(SDValue Op, SDValue Chain, |
1251 | const SDLoc &DL, EVT VT) { |
1252 | assert(!VT.bitsEq(Op.getValueType()) &&((void)0) |
1253 | "Strict no-op FP extend/round not allowed.")((void)0); |
1254 | SDValue Res = |
1255 | VT.bitsGT(Op.getValueType()) |
1256 | ? getNode(ISD::STRICT_FP_EXTEND, DL, {VT, MVT::Other}, {Chain, Op}) |
1257 | : getNode(ISD::STRICT_FP_ROUND, DL, {VT, MVT::Other}, |
1258 | {Chain, Op, getIntPtrConstant(0, DL)}); |
1259 | |
1260 | return std::pair<SDValue, SDValue>(Res, SDValue(Res.getNode(), 1)); |
1261 | } |
1262 | |
1263 | SDValue SelectionDAG::getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { |
1264 | return VT.bitsGT(Op.getValueType()) ? |
1265 | getNode(ISD::ANY_EXTEND, DL, VT, Op) : |
1266 | getNode(ISD::TRUNCATE, DL, VT, Op); |
1267 | } |
1268 | |
1269 | SDValue SelectionDAG::getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { |
1270 | return VT.bitsGT(Op.getValueType()) ? |
1271 | getNode(ISD::SIGN_EXTEND, DL, VT, Op) : |
1272 | getNode(ISD::TRUNCATE, DL, VT, Op); |
1273 | } |
1274 | |
1275 | SDValue SelectionDAG::getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { |
1276 | return VT.bitsGT(Op.getValueType()) ? |
1277 | getNode(ISD::ZERO_EXTEND, DL, VT, Op) : |
1278 | getNode(ISD::TRUNCATE, DL, VT, Op); |
1279 | } |
1280 | |
1281 | SDValue SelectionDAG::getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT, |
1282 | EVT OpVT) { |
1283 | if (VT.bitsLE(Op.getValueType())) |
1284 | return getNode(ISD::TRUNCATE, SL, VT, Op); |
1285 | |
1286 | TargetLowering::BooleanContent BType = TLI->getBooleanContents(OpVT); |
1287 | return getNode(TLI->getExtendForContent(BType), SL, VT, Op); |
1288 | } |
1289 | |
1290 | SDValue SelectionDAG::getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT) { |
1291 | EVT OpVT = Op.getValueType(); |
1292 | assert(VT.isInteger() && OpVT.isInteger() &&((void)0) |
1293 | "Cannot getZeroExtendInReg FP types")((void)0); |
1294 | assert(VT.isVector() == OpVT.isVector() &&((void)0) |
1295 | "getZeroExtendInReg type should be vector iff the operand "((void)0) |
1296 | "type is vector!")((void)0); |
1297 | assert((!VT.isVector() ||((void)0) |
1298 | VT.getVectorElementCount() == OpVT.getVectorElementCount()) &&((void)0) |
1299 | "Vector element counts must match in getZeroExtendInReg")((void)0); |
1300 | assert(VT.bitsLE(OpVT) && "Not extending!")((void)0); |
1301 | if (OpVT == VT) |
1302 | return Op; |
1303 | APInt Imm = APInt::getLowBitsSet(OpVT.getScalarSizeInBits(), |
1304 | VT.getScalarSizeInBits()); |
1305 | return getNode(ISD::AND, DL, OpVT, Op, getConstant(Imm, DL, OpVT)); |
1306 | } |
1307 | |
1308 | SDValue SelectionDAG::getPtrExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { |
1309 | // Only unsigned pointer semantics are supported right now. In the future this |
1310 | // might delegate to TLI to check pointer signedness. |
1311 | return getZExtOrTrunc(Op, DL, VT); |
1312 | } |
1313 | |
1314 | SDValue SelectionDAG::getPtrExtendInReg(SDValue Op, const SDLoc &DL, EVT VT) { |
1315 | // Only unsigned pointer semantics are supported right now. In the future this |
1316 | // might delegate to TLI to check pointer signedness. |
1317 | return getZeroExtendInReg(Op, DL, VT); |
1318 | } |
1319 | |
1320 | /// getNOT - Create a bitwise NOT operation as (XOR Val, -1). |
1321 | SDValue SelectionDAG::getNOT(const SDLoc &DL, SDValue Val, EVT VT) { |
1322 | EVT EltVT = VT.getScalarType(); |
1323 | SDValue NegOne = |
1324 | getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), DL, VT); |
1325 | return getNode(ISD::XOR, DL, VT, Val, NegOne); |
1326 | } |
1327 | |
1328 | SDValue SelectionDAG::getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT) { |
1329 | SDValue TrueValue = getBoolConstant(true, DL, VT, VT); |
1330 | return getNode(ISD::XOR, DL, VT, Val, TrueValue); |
1331 | } |
1332 | |
1333 | SDValue SelectionDAG::getBoolConstant(bool V, const SDLoc &DL, EVT VT, |
1334 | EVT OpVT) { |
1335 | if (!V) |
1336 | return getConstant(0, DL, VT); |
1337 | |
1338 | switch (TLI->getBooleanContents(OpVT)) { |
1339 | case TargetLowering::ZeroOrOneBooleanContent: |
1340 | case TargetLowering::UndefinedBooleanContent: |
1341 | return getConstant(1, DL, VT); |
1342 | case TargetLowering::ZeroOrNegativeOneBooleanContent: |
1343 | return getAllOnesConstant(DL, VT); |
1344 | } |
1345 | llvm_unreachable("Unexpected boolean content enum!")__builtin_unreachable(); |
1346 | } |
1347 | |
1348 | SDValue SelectionDAG::getConstant(uint64_t Val, const SDLoc &DL, EVT VT, |
1349 | bool isT, bool isO) { |
1350 | EVT EltVT = VT.getScalarType(); |
1351 | assert((EltVT.getSizeInBits() >= 64 ||((void)0) |
1352 | (uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 < 2) &&((void)0) |
1353 | "getConstant with a uint64_t value that doesn't fit in the type!")((void)0); |
1354 | return getConstant(APInt(EltVT.getSizeInBits(), Val), DL, VT, isT, isO); |
1355 | } |
1356 | |
1357 | SDValue SelectionDAG::getConstant(const APInt &Val, const SDLoc &DL, EVT VT, |
1358 | bool isT, bool isO) { |
1359 | return getConstant(*ConstantInt::get(*Context, Val), DL, VT, isT, isO); |
1360 | } |
1361 | |
1362 | SDValue SelectionDAG::getConstant(const ConstantInt &Val, const SDLoc &DL, |
1363 | EVT VT, bool isT, bool isO) { |
1364 | assert(VT.isInteger() && "Cannot create FP integer constant!")((void)0); |
1365 | |
1366 | EVT EltVT = VT.getScalarType(); |
1367 | const ConstantInt *Elt = &Val; |
1368 | |
1369 | // In some cases the vector type is legal but the element type is illegal and |
1370 | // needs to be promoted, for example v8i8 on ARM. In this case, promote the |
1371 | // inserted value (the type does not need to match the vector element type). |
1372 | // Any extra bits introduced will be truncated away. |
1373 | if (VT.isVector() && TLI->getTypeAction(*getContext(), EltVT) == |
1374 | TargetLowering::TypePromoteInteger) { |
1375 | EltVT = TLI->getTypeToTransformTo(*getContext(), EltVT); |
1376 | APInt NewVal = Elt->getValue().zextOrTrunc(EltVT.getSizeInBits()); |
1377 | Elt = ConstantInt::get(*getContext(), NewVal); |
1378 | } |
1379 | // In other cases the element type is illegal and needs to be expanded, for |
1380 | // example v2i64 on MIPS32. In this case, find the nearest legal type, split |
1381 | // the value into n parts and use a vector type with n-times the elements. |
1382 | // Then bitcast to the type requested. |
1383 | // Legalizing constants too early makes the DAGCombiner's job harder so we |
1384 | // only legalize if the DAG tells us we must produce legal types. |
1385 | else if (NewNodesMustHaveLegalTypes && VT.isVector() && |
1386 | TLI->getTypeAction(*getContext(), EltVT) == |
1387 | TargetLowering::TypeExpandInteger) { |
1388 | const APInt &NewVal = Elt->getValue(); |
1389 | EVT ViaEltVT = TLI->getTypeToTransformTo(*getContext(), EltVT); |
1390 | unsigned ViaEltSizeInBits = ViaEltVT.getSizeInBits(); |
1391 | |
1392 | // For scalable vectors, try to use a SPLAT_VECTOR_PARTS node. |
1393 | if (VT.isScalableVector()) { |
1394 | assert(EltVT.getSizeInBits() % ViaEltSizeInBits == 0 &&((void)0) |
1395 | "Can only handle an even split!")((void)0); |
1396 | unsigned Parts = EltVT.getSizeInBits() / ViaEltSizeInBits; |
1397 | |
1398 | SmallVector<SDValue, 2> ScalarParts; |
1399 | for (unsigned i = 0; i != Parts; ++i) |
1400 | ScalarParts.push_back(getConstant( |
1401 | NewVal.extractBits(ViaEltSizeInBits, i * ViaEltSizeInBits), DL, |
1402 | ViaEltVT, isT, isO)); |
1403 | |
1404 | return getNode(ISD::SPLAT_VECTOR_PARTS, DL, VT, ScalarParts); |
1405 | } |
1406 | |
1407 | unsigned ViaVecNumElts = VT.getSizeInBits() / ViaEltSizeInBits; |
1408 | EVT ViaVecVT = EVT::getVectorVT(*getContext(), ViaEltVT, ViaVecNumElts); |
1409 | |
1410 | // Check the temporary vector is the correct size. If this fails then |
1411 | // getTypeToTransformTo() probably returned a type whose size (in bits) |
1412 | // isn't a power-of-2 factor of the requested type size. |
1413 | assert(ViaVecVT.getSizeInBits() == VT.getSizeInBits())((void)0); |
1414 | |
1415 | SmallVector<SDValue, 2> EltParts; |
1416 | for (unsigned i = 0; i < ViaVecNumElts / VT.getVectorNumElements(); ++i) |
1417 | EltParts.push_back(getConstant( |
1418 | NewVal.extractBits(ViaEltSizeInBits, i * ViaEltSizeInBits), DL, |
1419 | ViaEltVT, isT, isO)); |
1420 | |
1421 | // EltParts is currently in little endian order. If we actually want |
1422 | // big-endian order then reverse it now. |
1423 | if (getDataLayout().isBigEndian()) |
1424 | std::reverse(EltParts.begin(), EltParts.end()); |
1425 | |
1426 | // The elements must be reversed when the element order is different |
1427 | // to the endianness of the elements (because the BITCAST is itself a |
1428 | // vector shuffle in this situation). However, we do not need any code to |
1429 | // perform this reversal because getConstant() is producing a vector |
1430 | // splat. |
1431 | // This situation occurs in MIPS MSA. |
1432 | |
1433 | SmallVector<SDValue, 8> Ops; |
1434 | for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) |
1435 | llvm::append_range(Ops, EltParts); |
1436 | |
1437 | SDValue V = |
1438 | getNode(ISD::BITCAST, DL, VT, getBuildVector(ViaVecVT, DL, Ops)); |
1439 | return V; |
1440 | } |
1441 | |
1442 | assert(Elt->getBitWidth() == EltVT.getSizeInBits() &&((void)0) |
1443 | "APInt size does not match type size!")((void)0); |
1444 | unsigned Opc = isT ? ISD::TargetConstant : ISD::Constant; |
1445 | FoldingSetNodeID ID; |
1446 | AddNodeIDNode(ID, Opc, getVTList(EltVT), None); |
1447 | ID.AddPointer(Elt); |
1448 | ID.AddBoolean(isO); |
1449 | void *IP = nullptr; |
1450 | SDNode *N = nullptr; |
1451 | if ((N = FindNodeOrInsertPos(ID, DL, IP))) |
1452 | if (!VT.isVector()) |
1453 | return SDValue(N, 0); |
1454 | |
1455 | if (!N) { |
1456 | N = newSDNode<ConstantSDNode>(isT, isO, Elt, EltVT); |
1457 | CSEMap.InsertNode(N, IP); |
1458 | InsertNode(N); |
1459 | NewSDValueDbgMsg(SDValue(N, 0), "Creating constant: ", this); |
1460 | } |
1461 | |
1462 | SDValue Result(N, 0); |
1463 | if (VT.isScalableVector()) |
1464 | Result = getSplatVector(VT, DL, Result); |
1465 | else if (VT.isVector()) |
1466 | Result = getSplatBuildVector(VT, DL, Result); |
1467 | |
1468 | return Result; |
1469 | } |
1470 | |
1471 | SDValue SelectionDAG::getIntPtrConstant(uint64_t Val, const SDLoc &DL, |
1472 | bool isTarget) { |
1473 | return getConstant(Val, DL, TLI->getPointerTy(getDataLayout()), isTarget); |
1474 | } |
1475 | |
1476 | SDValue SelectionDAG::getShiftAmountConstant(uint64_t Val, EVT VT, |
1477 | const SDLoc &DL, bool LegalTypes) { |
1478 | assert(VT.isInteger() && "Shift amount is not an integer type!")((void)0); |
1479 | EVT ShiftVT = TLI->getShiftAmountTy(VT, getDataLayout(), LegalTypes); |
1480 | return getConstant(Val, DL, ShiftVT); |
1481 | } |
1482 | |
1483 | SDValue SelectionDAG::getVectorIdxConstant(uint64_t Val, const SDLoc &DL, |
1484 | bool isTarget) { |
1485 | return getConstant(Val, DL, TLI->getVectorIdxTy(getDataLayout()), isTarget); |
1486 | } |
1487 | |
1488 | SDValue SelectionDAG::getConstantFP(const APFloat &V, const SDLoc &DL, EVT VT, |
1489 | bool isTarget) { |
1490 | return getConstantFP(*ConstantFP::get(*getContext(), V), DL, VT, isTarget); |
1491 | } |
1492 | |
1493 | SDValue SelectionDAG::getConstantFP(const ConstantFP &V, const SDLoc &DL, |
1494 | EVT VT, bool isTarget) { |
1495 | assert(VT.isFloatingPoint() && "Cannot create integer FP constant!")((void)0); |
1496 | |
1497 | EVT EltVT = VT.getScalarType(); |
1498 | |
1499 | // Do the map lookup using the actual bit pattern for the floating point |
1500 | // value, so that we don't have problems with 0.0 comparing equal to -0.0, and |
1501 | // we don't have issues with SNANs. |
1502 | unsigned Opc = isTarget ? ISD::TargetConstantFP : ISD::ConstantFP; |
1503 | FoldingSetNodeID ID; |
1504 | AddNodeIDNode(ID, Opc, getVTList(EltVT), None); |
1505 | ID.AddPointer(&V); |
1506 | void *IP = nullptr; |
1507 | SDNode *N = nullptr; |
1508 | if ((N = FindNodeOrInsertPos(ID, DL, IP))) |
1509 | if (!VT.isVector()) |
1510 | return SDValue(N, 0); |
1511 | |
1512 | if (!N) { |
1513 | N = newSDNode<ConstantFPSDNode>(isTarget, &V, EltVT); |
1514 | CSEMap.InsertNode(N, IP); |
1515 | InsertNode(N); |
1516 | } |
1517 | |
1518 | SDValue Result(N, 0); |
1519 | if (VT.isScalableVector()) |
1520 | Result = getSplatVector(VT, DL, Result); |
1521 | else if (VT.isVector()) |
1522 | Result = getSplatBuildVector(VT, DL, Result); |
1523 | NewSDValueDbgMsg(Result, "Creating fp constant: ", this); |
1524 | return Result; |
1525 | } |
1526 | |
1527 | SDValue SelectionDAG::getConstantFP(double Val, const SDLoc &DL, EVT VT, |
1528 | bool isTarget) { |
1529 | EVT EltVT = VT.getScalarType(); |
1530 | if (EltVT == MVT::f32) |
1531 | return getConstantFP(APFloat((float)Val), DL, VT, isTarget); |
1532 | if (EltVT == MVT::f64) |
1533 | return getConstantFP(APFloat(Val), DL, VT, isTarget); |
1534 | if (EltVT == MVT::f80 || EltVT == MVT::f128 || EltVT == MVT::ppcf128 || |
1535 | EltVT == MVT::f16 || EltVT == MVT::bf16) { |
1536 | bool Ignored; |
1537 | APFloat APF = APFloat(Val); |
1538 | APF.convert(EVTToAPFloatSemantics(EltVT), APFloat::rmNearestTiesToEven, |
1539 | &Ignored); |
1540 | return getConstantFP(APF, DL, VT, isTarget); |
1541 | } |
1542 | llvm_unreachable("Unsupported type in getConstantFP")__builtin_unreachable(); |
1543 | } |
1544 | |
1545 | SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, |
1546 | EVT VT, int64_t Offset, bool isTargetGA, |
1547 | unsigned TargetFlags) { |
1548 | assert((TargetFlags == 0 || isTargetGA) &&((void)0) |
1549 | "Cannot set target flags on target-independent globals")((void)0); |
1550 | |
1551 | // Truncate (with sign-extension) the offset value to the pointer size. |
1552 | unsigned BitWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType()); |
1553 | if (BitWidth < 64) |
1554 | Offset = SignExtend64(Offset, BitWidth); |
1555 | |
1556 | unsigned Opc; |
1557 | if (GV->isThreadLocal()) |
1558 | Opc = isTargetGA ? ISD::TargetGlobalTLSAddress : ISD::GlobalTLSAddress; |
1559 | else |
1560 | Opc = isTargetGA ? ISD::TargetGlobalAddress : ISD::GlobalAddress; |
1561 | |
1562 | FoldingSetNodeID ID; |
1563 | AddNodeIDNode(ID, Opc, getVTList(VT), None); |
1564 | ID.AddPointer(GV); |
1565 | ID.AddInteger(Offset); |
1566 | ID.AddInteger(TargetFlags); |
1567 | void *IP = nullptr; |
1568 | if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) |
1569 | return SDValue(E, 0); |
1570 | |
1571 | auto *N = newSDNode<GlobalAddressSDNode>( |
1572 | Opc, DL.getIROrder(), DL.getDebugLoc(), GV, VT, Offset, TargetFlags); |
1573 | CSEMap.InsertNode(N, IP); |
1574 | InsertNode(N); |
1575 | return SDValue(N, 0); |
1576 | } |
1577 | |
1578 | SDValue SelectionDAG::getFrameIndex(int FI, EVT VT, bool isTarget) { |
1579 | unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex; |
1580 | FoldingSetNodeID ID; |
1581 | AddNodeIDNode(ID, Opc, getVTList(VT), None); |
1582 | ID.AddInteger(FI); |
1583 | void *IP = nullptr; |
1584 | if (SDNode *E = FindNodeOrInsertPos(ID, IP)) |
1585 | return SDValue(E, 0); |
1586 | |
1587 | auto *N = newSDNode<FrameIndexSDNode>(FI, VT, isTarget); |
1588 | CSEMap.InsertNode(N, IP); |
1589 | InsertNode(N); |
1590 | return SDValue(N, 0); |
1591 | } |
1592 | |
1593 | SDValue SelectionDAG::getJumpTable(int JTI, EVT VT, bool isTarget, |
1594 | unsigned TargetFlags) { |
1595 | assert((TargetFlags == 0 || isTarget) &&((void)0) |
1596 | "Cannot set target flags on target-independent jump tables")((void)0); |
1597 | unsigned Opc = isTarget ? ISD::TargetJumpTable : ISD::JumpTable; |
1598 | FoldingSetNodeID ID; |
1599 | AddNodeIDNode(ID, Opc, getVTList(VT), None); |
1600 | ID.AddInteger(JTI); |
1601 | ID.AddInteger(TargetFlags); |
1602 | void *IP = nullptr; |
1603 | if (SDNode *E = FindNodeOrInsertPos(ID, IP)) |
1604 | return SDValue(E, 0); |
1605 | |
1606 | auto *N = newSDNode<JumpTableSDNode>(JTI, VT, isTarget, TargetFlags); |
1607 | CSEMap.InsertNode(N, IP); |
1608 | InsertNode(N); |
1609 | return SDValue(N, 0); |
1610 | } |
1611 | |
1612 | SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT, |
1613 | MaybeAlign Alignment, int Offset, |
1614 | bool isTarget, unsigned TargetFlags) { |
1615 | assert((TargetFlags == 0 || isTarget) &&((void)0) |
1616 | "Cannot set target flags on target-independent globals")((void)0); |
1617 | if (!Alignment) |
1618 | Alignment = shouldOptForSize() |
1619 | ? getDataLayout().getABITypeAlign(C->getType()) |
1620 | : getDataLayout().getPrefTypeAlign(C->getType()); |
1621 | unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool; |
1622 | FoldingSetNodeID ID; |
1623 | AddNodeIDNode(ID, Opc, getVTList(VT), None); |
1624 | ID.AddInteger(Alignment->value()); |
1625 | ID.AddInteger(Offset); |
1626 | ID.AddPointer(C); |
1627 | ID.AddInteger(TargetFlags); |
1628 | void *IP = nullptr; |
1629 | if (SDNode *E = FindNodeOrInsertPos(ID, IP)) |
1630 | return SDValue(E, 0); |
1631 | |
1632 | auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, *Alignment, |
1633 | TargetFlags); |
1634 | CSEMap.InsertNode(N, IP); |
1635 | InsertNode(N); |
1636 | SDValue V = SDValue(N, 0); |
1637 | NewSDValueDbgMsg(V, "Creating new constant pool: ", this); |
1638 | return V; |
1639 | } |
1640 | |
1641 | SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT, |
1642 | MaybeAlign Alignment, int Offset, |
1643 | bool isTarget, unsigned TargetFlags) { |
1644 | assert((TargetFlags == 0 || isTarget) &&((void)0) |
1645 | "Cannot set target flags on target-independent globals")((void)0); |
1646 | if (!Alignment) |
1647 | Alignment = getDataLayout().getPrefTypeAlign(C->getType()); |
1648 | unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool; |
1649 | FoldingSetNodeID ID; |
1650 | AddNodeIDNode(ID, Opc, getVTList(VT), None); |
1651 | ID.AddInteger(Alignment->value()); |
1652 | ID.AddInteger(Offset); |
1653 | C->addSelectionDAGCSEId(ID); |
1654 | ID.AddInteger(TargetFlags); |
1655 | void *IP = nullptr; |
1656 | if (SDNode *E = FindNodeOrInsertPos(ID, IP)) |
1657 | return SDValue(E, 0); |
1658 | |
1659 | auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, *Alignment, |
1660 | TargetFlags); |
1661 | CSEMap.InsertNode(N, IP); |
1662 | InsertNode(N); |
1663 | return SDValue(N, 0); |
1664 | } |
1665 | |
1666 | SDValue SelectionDAG::getTargetIndex(int Index, EVT VT, int64_t Offset, |
1667 | unsigned TargetFlags) { |
1668 | FoldingSetNodeID ID; |
1669 | AddNodeIDNode(ID, ISD::TargetIndex, getVTList(VT), None); |
1670 | ID.AddInteger(Index); |
1671 | ID.AddInteger(Offset); |
1672 | ID.AddInteger(TargetFlags); |
1673 | void *IP = nullptr; |
1674 | if (SDNode *E = FindNodeOrInsertPos(ID, IP)) |
1675 | return SDValue(E, 0); |
1676 | |
1677 | auto *N = newSDNode<TargetIndexSDNode>(Index, VT, Offset, TargetFlags); |
1678 | CSEMap.InsertNode(N, IP); |
1679 | InsertNode(N); |
1680 | return SDValue(N, 0); |
1681 | } |
1682 | |
1683 | SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) { |
1684 | FoldingSetNodeID ID; |
1685 | AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), None); |
1686 | ID.AddPointer(MBB); |
1687 | void *IP = nullptr; |
1688 | if (SDNode *E = FindNodeOrInsertPos(ID, IP)) |
1689 | return SDValue(E, 0); |
1690 | |
1691 | auto *N = newSDNode<BasicBlockSDNode>(MBB); |
1692 | CSEMap.InsertNode(N, IP); |
1693 | InsertNode(N); |
1694 | return SDValue(N, 0); |
1695 | } |
1696 | |
1697 | SDValue SelectionDAG::getValueType(EVT VT) { |
1698 | if (VT.isSimple() && (unsigned)VT.getSimpleVT().SimpleTy >= |
1699 | ValueTypeNodes.size()) |
1700 | ValueTypeNodes.resize(VT.getSimpleVT().SimpleTy+1); |
1701 | |
1702 | SDNode *&N = VT.isExtended() ? |
1703 | ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT.getSimpleVT().SimpleTy]; |
1704 | |
1705 | if (N) return SDValue(N, 0); |
1706 | N = newSDNode<VTSDNode>(VT); |
1707 | InsertNode(N); |
1708 | return SDValue(N, 0); |
1709 | } |
1710 | |
1711 | SDValue SelectionDAG::getExternalSymbol(const char *Sym, EVT VT) { |
1712 | SDNode *&N = ExternalSymbols[Sym]; |
1713 | if (N) return SDValue(N, 0); |
1714 | N = newSDNode<ExternalSymbolSDNode>(false, Sym, 0, VT); |
1715 | InsertNode(N); |
1716 | return SDValue(N, 0); |
1717 | } |
1718 | |
1719 | SDValue SelectionDAG::getMCSymbol(MCSymbol *Sym, EVT VT) { |
1720 | SDNode *&N = MCSymbols[Sym]; |
1721 | if (N) |
1722 | return SDValue(N, 0); |
1723 | N = newSDNode<MCSymbolSDNode>(Sym, VT); |
1724 | InsertNode(N); |
1725 | return SDValue(N, 0); |
1726 | } |
1727 | |
1728 | SDValue SelectionDAG::getTargetExternalSymbol(const char *Sym, EVT VT, |
1729 | unsigned TargetFlags) { |
1730 | SDNode *&N = |
1731 | TargetExternalSymbols[std::pair<std::string, unsigned>(Sym, TargetFlags)]; |
1732 | if (N) return SDValue(N, 0); |
1733 | N = newSDNode<ExternalSymbolSDNode>(true, Sym, TargetFlags, VT); |
1734 | InsertNode(N); |
1735 | return SDValue(N, 0); |
1736 | } |
1737 | |
1738 | SDValue SelectionDAG::getCondCode(ISD::CondCode Cond) { |
1739 | if ((unsigned)Cond >= CondCodeNodes.size()) |
1740 | CondCodeNodes.resize(Cond+1); |
1741 | |
1742 | if (!CondCodeNodes[Cond]) { |
1743 | auto *N = newSDNode<CondCodeSDNode>(Cond); |
1744 | CondCodeNodes[Cond] = N; |
1745 | InsertNode(N); |
1746 | } |
1747 | |
1748 | return SDValue(CondCodeNodes[Cond], 0); |
1749 | } |
1750 | |
1751 | SDValue SelectionDAG::getStepVector(const SDLoc &DL, EVT ResVT) { |
1752 | APInt One(ResVT.getScalarSizeInBits(), 1); |
1753 | return getStepVector(DL, ResVT, One); |
1754 | } |
1755 | |
1756 | SDValue SelectionDAG::getStepVector(const SDLoc &DL, EVT ResVT, APInt StepVal) { |
1757 | assert(ResVT.getScalarSizeInBits() == StepVal.getBitWidth())((void)0); |
1758 | if (ResVT.isScalableVector()) |
1759 | return getNode( |
1760 | ISD::STEP_VECTOR, DL, ResVT, |
1761 | getTargetConstant(StepVal, DL, ResVT.getVectorElementType())); |
1762 | |
1763 | SmallVector<SDValue, 16> OpsStepConstants; |
1764 | for (uint64_t i = 0; i < ResVT.getVectorNumElements(); i++) |
1765 | OpsStepConstants.push_back( |
1766 | getConstant(StepVal * i, DL, ResVT.getVectorElementType())); |
1767 | return getBuildVector(ResVT, DL, OpsStepConstants); |
1768 | } |
1769 | |
1770 | /// Swaps the values of N1 and N2. Swaps all indices in the shuffle mask M that |
1771 | /// point at N1 to point at N2 and indices that point at N2 to point at N1. |
1772 | static void commuteShuffle(SDValue &N1, SDValue &N2, MutableArrayRef<int> M) { |
1773 | std::swap(N1, N2); |
1774 | ShuffleVectorSDNode::commuteMask(M); |
1775 | } |
1776 | |
1777 | SDValue SelectionDAG::getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, |
1778 | SDValue N2, ArrayRef<int> Mask) { |
1779 | assert(VT.getVectorNumElements() == Mask.size() &&((void)0) |
1780 | "Must have the same number of vector elements as mask elements!")((void)0); |
1781 | assert(VT == N1.getValueType() && VT == N2.getValueType() &&((void)0) |
1782 | "Invalid VECTOR_SHUFFLE")((void)0); |
1783 | |
1784 | // Canonicalize shuffle undef, undef -> undef |
1785 | if (N1.isUndef() && N2.isUndef()) |
1786 | return getUNDEF(VT); |
1787 | |
1788 | // Validate that all indices in Mask are within the range of the elements |
1789 | // input to the shuffle. |
1790 | int NElts = Mask.size(); |
1791 | assert(llvm::all_of(Mask,((void)0) |
1792 | [&](int M) { return M < (NElts * 2) && M >= -1; }) &&((void)0) |
1793 | "Index out of range")((void)0); |
1794 | |
1795 | // Copy the mask so we can do any needed cleanup. |
1796 | SmallVector<int, 8> MaskVec(Mask.begin(), Mask.end()); |
1797 | |
1798 | // Canonicalize shuffle v, v -> v, undef |
1799 | if (N1 == N2) { |
1800 | N2 = getUNDEF(VT); |
1801 | for (int i = 0; i != NElts; ++i) |
1802 | if (MaskVec[i] >= NElts) MaskVec[i] -= NElts; |
1803 | } |
1804 | |
1805 | // Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask. |
1806 | if (N1.isUndef()) |
1807 | commuteShuffle(N1, N2, MaskVec); |
1808 | |
1809 | if (TLI->hasVectorBlend()) { |
1810 | // If shuffling a splat, try to blend the splat instead. We do this here so |
1811 | // that even when this arises during lowering we don't have to re-handle it. |
1812 | auto BlendSplat = [&](BuildVectorSDNode *BV, int Offset) { |
1813 | BitVector UndefElements; |
1814 | SDValue Splat = BV->getSplatValue(&UndefElements); |
1815 | if (!Splat) |
1816 | return; |
1817 | |
1818 | for (int i = 0; i < NElts; ++i) { |
1819 | if (MaskVec[i] < Offset || MaskVec[i] >= (Offset + NElts)) |
1820 | continue; |
1821 | |
1822 | // If this input comes from undef, mark it as such. |
1823 | if (UndefElements[MaskVec[i] - Offset]) { |
1824 | MaskVec[i] = -1; |
1825 | continue; |
1826 | } |
1827 | |
1828 | // If we can blend a non-undef lane, use that instead. |
1829 | if (!UndefElements[i]) |
1830 | MaskVec[i] = i + Offset; |
1831 | } |
1832 | }; |
1833 | if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1)) |
1834 | BlendSplat(N1BV, 0); |
1835 | if (auto *N2BV = dyn_cast<BuildVectorSDNode>(N2)) |
1836 | BlendSplat(N2BV, NElts); |
1837 | } |
1838 | |
1839 | // Canonicalize all index into lhs, -> shuffle lhs, undef |
1840 | // Canonicalize all index into rhs, -> shuffle rhs, undef |
1841 | bool AllLHS = true, AllRHS = true; |
1842 | bool N2Undef = N2.isUndef(); |
1843 | for (int i = 0; i != NElts; ++i) { |
1844 | if (MaskVec[i] >= NElts) { |
1845 | if (N2Undef) |
1846 | MaskVec[i] = -1; |
1847 | else |
1848 | AllLHS = false; |
1849 | } else if (MaskVec[i] >= 0) { |
1850 | AllRHS = false; |
1851 | } |
1852 | } |
1853 | if (AllLHS && AllRHS) |
1854 | return getUNDEF(VT); |
1855 | if (AllLHS && !N2Undef) |
1856 | N2 = getUNDEF(VT); |
1857 | if (AllRHS) { |
1858 | N1 = getUNDEF(VT); |
1859 | commuteShuffle(N1, N2, MaskVec); |
1860 | } |
1861 | // Reset our undef status after accounting for the mask. |
1862 | N2Undef = N2.isUndef(); |
1863 | // Re-check whether both sides ended up undef. |
1864 | if (N1.isUndef() && N2Undef) |
1865 | return getUNDEF(VT); |
1866 | |
1867 | // If Identity shuffle return that node. |
1868 | bool Identity = true, AllSame = true; |
1869 | for (int i = 0; i != NElts; ++i) { |
1870 | if (MaskVec[i] >= 0 && MaskVec[i] != i) Identity = false; |
1871 | if (MaskVec[i] != MaskVec[0]) AllSame = false; |
1872 | } |
1873 | if (Identity && NElts) |
1874 | return N1; |
1875 | |
1876 | // Shuffling a constant splat doesn't change the result. |
1877 | if (N2Undef) { |
1878 | SDValue V = N1; |
1879 | |
1880 | // Look through any bitcasts. We check that these don't change the number |
1881 | // (and size) of elements and just changes their types. |
1882 | while (V.getOpcode() == ISD::BITCAST) |
1883 | V = V->getOperand(0); |
1884 | |
1885 | // A splat should always show up as a build vector node. |
1886 | if (auto *BV = dyn_cast<BuildVectorSDNode>(V)) { |
1887 | BitVector UndefElements; |
1888 | SDValue Splat = BV->getSplatValue(&UndefElements); |
1889 | // If this is a splat of an undef, shuffling it is also undef. |
1890 | if (Splat && Splat.isUndef()) |
1891 | return getUNDEF(VT); |
1892 | |
1893 | bool SameNumElts = |
1894 | V.getValueType().getVectorNumElements() == VT.getVectorNumElements(); |
1895 | |
1896 | // We only have a splat which can skip shuffles if there is a splatted |
1897 | // value and no undef lanes rearranged by the shuffle. |
1898 | if (Splat && UndefElements.none()) { |
1899 | // Splat of <x, x, ..., x>, return <x, x, ..., x>, provided that the |
1900 | // number of elements match or the value splatted is a zero constant. |
1901 | if (SameNumElts) |
1902 | return N1; |
1903 | if (auto *C = dyn_cast<ConstantSDNode>(Splat)) |
1904 | if (C->isNullValue()) |
1905 | return N1; |
1906 | } |
1907 | |
1908 | // If the shuffle itself creates a splat, build the vector directly. |
1909 | if (AllSame && SameNumElts) { |
1910 | EVT BuildVT = BV->getValueType(0); |
1911 | const SDValue &Splatted = BV->getOperand(MaskVec[0]); |
1912 | SDValue NewBV = getSplatBuildVector(BuildVT, dl, Splatted); |
1913 | |
1914 | // We may have jumped through bitcasts, so the type of the |
1915 | // BUILD_VECTOR may not match the type of the shuffle. |
1916 | if (BuildVT != VT) |
1917 | NewBV = getNode(ISD::BITCAST, dl, VT, NewBV); |
1918 | return NewBV; |
1919 | } |
1920 | } |
1921 | } |
1922 | |
1923 | FoldingSetNodeID ID; |
1924 | SDValue Ops[2] = { N1, N2 }; |
1925 | AddNodeIDNode(ID, ISD::VECTOR_SHUFFLE, getVTList(VT), Ops); |
1926 | for (int i = 0; i != NElts; ++i) |
1927 | ID.AddInteger(MaskVec[i]); |
1928 | |
1929 | void* IP = nullptr; |
1930 | if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) |
1931 | return SDValue(E, 0); |
1932 | |
1933 | // Allocate the mask array for the node out of the BumpPtrAllocator, since |
1934 | // SDNode doesn't have access to it. This memory will be "leaked" when |
1935 | // the node is deallocated, but recovered when the NodeAllocator is released. |
1936 | int *MaskAlloc = OperandAllocator.Allocate<int>(NElts); |
1937 | llvm::copy(MaskVec, MaskAlloc); |
1938 | |
1939 | auto *N = newSDNode<ShuffleVectorSDNode>(VT, dl.getIROrder(), |
1940 | dl.getDebugLoc(), MaskAlloc); |
1941 | createOperands(N, Ops); |
1942 | |
1943 | CSEMap.InsertNode(N, IP); |
1944 | InsertNode(N); |
1945 | SDValue V = SDValue(N, 0); |
1946 | NewSDValueDbgMsg(V, "Creating new node: ", this); |
1947 | return V; |
1948 | } |
1949 | |
1950 | SDValue SelectionDAG::getCommutedVectorShuffle(const ShuffleVectorSDNode &SV) { |
1951 | EVT VT = SV.getValueType(0); |
1952 | SmallVector<int, 8> MaskVec(SV.getMask().begin(), SV.getMask().end()); |
1953 | ShuffleVectorSDNode::commuteMask(MaskVec); |
1954 | |
1955 | SDValue Op0 = SV.getOperand(0); |
1956 | SDValue Op1 = SV.getOperand(1); |
1957 | return getVectorShuffle(VT, SDLoc(&SV), Op1, Op0, MaskVec); |
1958 | } |
1959 | |
1960 | SDValue SelectionDAG::getRegister(unsigned RegNo, EVT VT) { |
1961 | FoldingSetNodeID ID; |
1962 | AddNodeIDNode(ID, ISD::Register, getVTList(VT), None); |
1963 | ID.AddInteger(RegNo); |
1964 | void *IP = nullptr; |
1965 | if (SDNode *E = FindNodeOrInsertPos(ID, IP)) |
1966 | return SDValue(E, 0); |
1967 | |
1968 | auto *N = newSDNode<RegisterSDNode>(RegNo, VT); |
1969 | N->SDNodeBits.IsDivergent = TLI->isSDNodeSourceOfDivergence(N, FLI, DA); |
1970 | CSEMap.InsertNode(N, IP); |
1971 | InsertNode(N); |
1972 | return SDValue(N, 0); |
1973 | } |
1974 | |
1975 | SDValue SelectionDAG::getRegisterMask(const uint32_t *RegMask) { |
1976 | FoldingSetNodeID ID; |
1977 | AddNodeIDNode(ID, ISD::RegisterMask, getVTList(MVT::Untyped), None); |
1978 | ID.AddPointer(RegMask); |
1979 | void *IP = nullptr; |
1980 | if (SDNode *E = FindNodeOrInsertPos(ID, IP)) |
1981 | return SDValue(E, 0); |
1982 | |
1983 | auto *N = newSDNode<RegisterMaskSDNode>(RegMask); |
1984 | CSEMap.InsertNode(N, IP); |
1985 | InsertNode(N); |
1986 | return SDValue(N, 0); |
1987 | } |
1988 | |
1989 | SDValue SelectionDAG::getEHLabel(const SDLoc &dl, SDValue Root, |
1990 | MCSymbol *Label) { |
1991 | return getLabelNode(ISD::EH_LABEL, dl, Root, Label); |
1992 | } |
1993 | |
1994 | SDValue SelectionDAG::getLabelNode(unsigned Opcode, const SDLoc &dl, |
1995 | SDValue Root, MCSymbol *Label) { |
1996 | FoldingSetNodeID ID; |
1997 | SDValue Ops[] = { Root }; |
1998 | AddNodeIDNode(ID, Opcode, getVTList(MVT::Other), Ops); |
1999 | ID.AddPointer(Label); |
2000 | void *IP = nullptr; |
2001 | if (SDNode *E = FindNodeOrInsertPos(ID, IP)) |
2002 | return SDValue(E, 0); |
2003 | |
2004 | auto *N = |
2005 | newSDNode<LabelSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), Label); |
2006 | createOperands(N, Ops); |
2007 | |
2008 | CSEMap.InsertNode(N, IP); |
2009 | InsertNode(N); |
2010 | return SDValue(N, 0); |
2011 | } |
2012 | |
2013 | SDValue SelectionDAG::getBlockAddress(const BlockAddress *BA, EVT VT, |
2014 | int64_t Offset, bool isTarget, |
2015 | unsigned TargetFlags) { |
2016 | unsigned Opc = isTarget ? ISD::TargetBlockAddress : ISD::BlockAddress; |
2017 | |
2018 | FoldingSetNodeID ID; |
2019 | AddNodeIDNode(ID, Opc, getVTList(VT), None); |
2020 | ID.AddPointer(BA); |
2021 | ID.AddInteger(Offset); |
2022 | ID.AddInteger(TargetFlags); |
2023 | void *IP = nullptr; |
2024 | if (SDNode *E = FindNodeOrInsertPos(ID, IP)) |
2025 | return SDValue(E, 0); |
2026 | |
2027 | auto *N = newSDNode<BlockAddressSDNode>(Opc, VT, BA, Offset, TargetFlags); |
2028 | CSEMap.InsertNode(N, IP); |
2029 | InsertNode(N); |
2030 | return SDValue(N, 0); |
2031 | } |
2032 | |
2033 | SDValue SelectionDAG::getSrcValue(const Value *V) { |
2034 | FoldingSetNodeID ID; |
2035 | AddNodeIDNode(ID, ISD::SRCVALUE, getVTList(MVT::Other), None); |
2036 | ID.AddPointer(V); |
2037 | |
2038 | void *IP = nullptr; |
2039 | if (SDNode *E = FindNodeOrInsertPos(ID, IP)) |
2040 | return SDValue(E, 0); |
2041 | |
2042 | auto *N = newSDNode<SrcValueSDNode>(V); |
2043 | CSEMap.InsertNode(N, IP); |
2044 | InsertNode(N); |
2045 | return SDValue(N, 0); |
2046 | } |
2047 | |
2048 | SDValue SelectionDAG::getMDNode(const MDNode *MD) { |
2049 | FoldingSetNodeID ID; |
2050 | AddNodeIDNode(ID, ISD::MDNODE_SDNODE, getVTList(MVT::Other), None); |
2051 | ID.AddPointer(MD); |
2052 | |
2053 | void *IP = nullptr; |
2054 | if (SDNode *E = FindNodeOrInsertPos(ID, IP)) |
2055 | return SDValue(E, 0); |
2056 | |
2057 | auto *N = newSDNode<MDNodeSDNode>(MD); |
2058 | CSEMap.InsertNode(N, IP); |
2059 | InsertNode(N); |
2060 | return SDValue(N, 0); |
2061 | } |
2062 | |
2063 | SDValue SelectionDAG::getBitcast(EVT VT, SDValue V) { |
2064 | if (VT == V.getValueType()) |
2065 | return V; |
2066 | |
2067 | return getNode(ISD::BITCAST, SDLoc(V), VT, V); |
2068 | } |
2069 | |
2070 | SDValue SelectionDAG::getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, |
2071 | unsigned SrcAS, unsigned DestAS) { |
2072 | SDValue Ops[] = {Ptr}; |
2073 | FoldingSetNodeID ID; |
2074 | AddNodeIDNode(ID, ISD::ADDRSPACECAST, getVTList(VT), Ops); |
2075 | ID.AddInteger(SrcAS); |
2076 | ID.AddInteger(DestAS); |
2077 | |
2078 | void *IP = nullptr; |
2079 | if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) |
2080 | return SDValue(E, 0); |
2081 | |
2082 | auto *N = newSDNode<AddrSpaceCastSDNode>(dl.getIROrder(), dl.getDebugLoc(), |
2083 | VT, SrcAS, DestAS); |
2084 | createOperands(N, Ops); |
2085 | |
2086 | CSEMap.InsertNode(N, IP); |
2087 | InsertNode(N); |
2088 | return SDValue(N, 0); |
2089 | } |
2090 | |
2091 | SDValue SelectionDAG::getFreeze(SDValue V) { |
2092 | return getNode(ISD::FREEZE, SDLoc(V), V.getValueType(), V); |
2093 | } |
2094 | |
2095 | /// getShiftAmountOperand - Return the specified value casted to |
2096 | /// the target's desired shift amount type. |
2097 | SDValue SelectionDAG::getShiftAmountOperand(EVT LHSTy, SDValue Op) { |
2098 | EVT OpTy = Op.getValueType(); |
2099 | EVT ShTy = TLI->getShiftAmountTy(LHSTy, getDataLayout()); |
2100 | if (OpTy == ShTy || OpTy.isVector()) return Op; |
2101 | |
2102 | return getZExtOrTrunc(Op, SDLoc(Op), ShTy); |
2103 | } |
2104 | |
2105 | SDValue SelectionDAG::expandVAArg(SDNode *Node) { |
2106 | SDLoc dl(Node); |
2107 | const TargetLowering &TLI = getTargetLoweringInfo(); |
2108 | const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); |
2109 | EVT VT = Node->getValueType(0); |
2110 | SDValue Tmp1 = Node->getOperand(0); |
2111 | SDValue Tmp2 = Node->getOperand(1); |
2112 | const MaybeAlign MA(Node->getConstantOperandVal(3)); |
2113 | |
2114 | SDValue VAListLoad = getLoad(TLI.getPointerTy(getDataLayout()), dl, Tmp1, |
2115 | Tmp2, MachinePointerInfo(V)); |
2116 | SDValue VAList = VAListLoad; |
2117 | |
2118 | if (MA && *MA > TLI.getMinStackArgumentAlignment()) { |
2119 | VAList = getNode(ISD::ADD, dl, VAList.getValueType(), VAList, |
2120 | getConstant(MA->value() - 1, dl, VAList.getValueType())); |
2121 | |
2122 | VAList = |
2123 | getNode(ISD::AND, dl, VAList.getValueType(), VAList, |
2124 | getConstant(-(int64_t)MA->value(), dl, VAList.getValueType())); |
2125 | } |
2126 | |
2127 | // Increment the pointer, VAList, to the next vaarg |
2128 | Tmp1 = getNode(ISD::ADD, dl, VAList.getValueType(), VAList, |
2129 | getConstant(getDataLayout().getTypeAllocSize( |
2130 | VT.getTypeForEVT(*getContext())), |
2131 | dl, VAList.getValueType())); |
2132 | // Store the incremented VAList to the legalized pointer |
2133 | Tmp1 = |
2134 | getStore(VAListLoad.getValue(1), dl, Tmp1, Tmp2, MachinePointerInfo(V)); |
2135 | // Load the actual argument out of the pointer VAList |
2136 | return getLoad(VT, dl, Tmp1, VAList, MachinePointerInfo()); |
2137 | } |
2138 | |
2139 | SDValue SelectionDAG::expandVACopy(SDNode *Node) { |
2140 | SDLoc dl(Node); |
2141 | const TargetLowering &TLI = getTargetLoweringInfo(); |
2142 | // This defaults to loading a pointer from the input and storing it to the |
2143 | // output, returning the chain. |
2144 | const Value *VD = cast<SrcValueSDNode>(Node->getOperand(3))->getValue(); |
2145 | const Value *VS = cast<SrcValueSDNode>(Node->getOperand(4))->getValue(); |
2146 | SDValue Tmp1 = |
2147 | getLoad(TLI.getPointerTy(getDataLayout()), dl, Node->getOperand(0), |
2148 | Node->getOperand(2), MachinePointerInfo(VS)); |
2149 | return getStore(Tmp1.getValue(1), dl, Tmp1, Node->getOperand(1), |
2150 | MachinePointerInfo(VD)); |
2151 | } |
2152 | |
2153 | Align SelectionDAG::getReducedAlign(EVT VT, bool UseABI) { |
2154 | const DataLayout &DL = getDataLayout(); |
2155 | Type *Ty = VT.getTypeForEVT(*getContext()); |
2156 | Align RedAlign = UseABI ? DL.getABITypeAlign(Ty) : DL.getPrefTypeAlign(Ty); |
2157 | |
2158 | if (TLI->isTypeLegal(VT) || !VT.isVector()) |
2159 | return RedAlign; |
2160 | |
2161 | const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering(); |
2162 | const Align StackAlign = TFI->getStackAlign(); |
2163 | |
2164 | // See if we can choose a smaller ABI alignment in cases where it's an |
2165 | // illegal vector type that will get broken down. |
2166 | if (RedAlign > StackAlign) { |
2167 | EVT IntermediateVT; |
2168 | MVT RegisterVT; |
2169 | unsigned NumIntermediates; |
2170 | TLI->getVectorTypeBreakdown(*getContext(), VT, IntermediateVT, |
2171 | NumIntermediates, RegisterVT); |
2172 | Ty = IntermediateVT.getTypeForEVT(*getContext()); |
2173 | Align RedAlign2 = UseABI ? DL.getABITypeAlign(Ty) : DL.getPrefTypeAlign(Ty); |
2174 | if (RedAlign2 < RedAlign) |
2175 | RedAlign = RedAlign2; |
2176 | } |
2177 | |
2178 | return RedAlign; |
2179 | } |
2180 | |
2181 | SDValue SelectionDAG::CreateStackTemporary(TypeSize Bytes, Align Alignment) { |
2182 | MachineFrameInfo &MFI = MF->getFrameInfo(); |
2183 | const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering(); |
2184 | int StackID = 0; |
2185 | if (Bytes.isScalable()) |
2186 | StackID = TFI->getStackIDForScalableVectors(); |
2187 | // The stack id gives an indication of whether the object is scalable or |
2188 | // not, so it's safe to pass in the minimum size here. |
2189 | int FrameIdx = MFI.CreateStackObject(Bytes.getKnownMinSize(), Alignment, |
2190 | false, nullptr, StackID); |
2191 | return getFrameIndex(FrameIdx, TLI->getFrameIndexTy(getDataLayout())); |
2192 | } |
2193 | |
2194 | SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) { |
2195 | Type *Ty = VT.getTypeForEVT(*getContext()); |
2196 | Align StackAlign = |
2197 | std::max(getDataLayout().getPrefTypeAlign(Ty), Align(minAlign)); |
2198 | return CreateStackTemporary(VT.getStoreSize(), StackAlign); |
2199 | } |
2200 | |
2201 | SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) { |
2202 | TypeSize VT1Size = VT1.getStoreSize(); |
2203 | TypeSize VT2Size = VT2.getStoreSize(); |
2204 | assert(VT1Size.isScalable() == VT2Size.isScalable() &&((void)0) |
2205 | "Don't know how to choose the maximum size when creating a stack "((void)0) |
2206 | "temporary")((void)0); |
2207 | TypeSize Bytes = |
2208 | VT1Size.getKnownMinSize() > VT2Size.getKnownMinSize() ? VT1Size : VT2Size; |
2209 | |
2210 | Type *Ty1 = VT1.getTypeForEVT(*getContext()); |
2211 | Type *Ty2 = VT2.getTypeForEVT(*getContext()); |
2212 | const DataLayout &DL = getDataLayout(); |
2213 | Align Align = std::max(DL.getPrefTypeAlign(Ty1), DL.getPrefTypeAlign(Ty2)); |
2214 | return CreateStackTemporary(Bytes, Align); |
2215 | } |
2216 | |
2217 | SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1, SDValue N2, |
2218 | ISD::CondCode Cond, const SDLoc &dl) { |
2219 | EVT OpVT = N1.getValueType(); |
2220 | |
2221 | // These setcc operations always fold. |
2222 | switch (Cond) { |
2223 | default: break; |
2224 | case ISD::SETFALSE: |
2225 | case ISD::SETFALSE2: return getBoolConstant(false, dl, VT, OpVT); |
2226 | case ISD::SETTRUE: |
2227 | case ISD::SETTRUE2: return getBoolConstant(true, dl, VT, OpVT); |
2228 | |
2229 | case ISD::SETOEQ: |
2230 | case ISD::SETOGT: |
2231 | case ISD::SETOGE: |
2232 | case ISD::SETOLT: |
2233 | case ISD::SETOLE: |
2234 | case ISD::SETONE: |
2235 | case ISD::SETO: |
2236 | case ISD::SETUO: |
2237 | case ISD::SETUEQ: |
2238 | case ISD::SETUNE: |
2239 | assert(!OpVT.isInteger() && "Illegal setcc for integer!")((void)0); |
2240 | break; |
2241 | } |
2242 | |
2243 | if (OpVT.isInteger()) { |
2244 | // For EQ and NE, we can always pick a value for the undef to make the |
2245 | // predicate pass or fail, so we can return undef. |
2246 | // Matches behavior in llvm::ConstantFoldCompareInstruction. |
2247 | // icmp eq/ne X, undef -> undef. |
2248 | if ((N1.isUndef() || N2.isUndef()) && |
2249 | (Cond == ISD::SETEQ || Cond == ISD::SETNE)) |
2250 | return getUNDEF(VT); |
2251 | |
2252 | // If both operands are undef, we can return undef for int comparison. |
2253 | // icmp undef, undef -> undef. |
2254 | if (N1.isUndef() && N2.isUndef()) |
2255 | return getUNDEF(VT); |
2256 | |
2257 | // icmp X, X -> true/false |
2258 | // icmp X, undef -> true/false because undef could be X. |
2259 | if (N1 == N2) |
2260 | return getBoolConstant(ISD::isTrueWhenEqual(Cond), dl, VT, OpVT); |
2261 | } |
2262 | |
2263 | if (ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2)) { |
2264 | const APInt &C2 = N2C->getAPIntValue(); |
2265 | if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1)) { |
2266 | const APInt &C1 = N1C->getAPIntValue(); |
2267 | |
2268 | switch (Cond) { |
2269 | default: llvm_unreachable("Unknown integer setcc!")__builtin_unreachable(); |
2270 | case ISD::SETEQ: return getBoolConstant(C1 == C2, dl, VT, OpVT); |
2271 | case ISD::SETNE: return getBoolConstant(C1 != C2, dl, VT, OpVT); |
2272 | case ISD::SETULT: return getBoolConstant(C1.ult(C2), dl, VT, OpVT); |
2273 | case ISD::SETUGT: return getBoolConstant(C1.ugt(C2), dl, VT, OpVT); |
2274 | case ISD::SETULE: return getBoolConstant(C1.ule(C2), dl, VT, OpVT); |
2275 | case ISD::SETUGE: return getBoolConstant(C1.uge(C2), dl, VT, OpVT); |
2276 | case ISD::SETLT: return getBoolConstant(C1.slt(C2), dl, VT, OpVT); |
2277 | case ISD::SETGT: return getBoolConstant(C1.sgt(C2), dl, VT, OpVT); |
2278 | case ISD::SETLE: return getBoolConstant(C1.sle(C2), dl, VT, OpVT); |
2279 | case ISD::SETGE: return getBoolConstant(C1.sge(C2), dl, VT, OpVT); |
2280 | } |
2281 | } |
2282 | } |
2283 | |
2284 | auto *N1CFP = dyn_cast<ConstantFPSDNode>(N1); |
2285 | auto *N2CFP = dyn_cast<ConstantFPSDNode>(N2); |
2286 | |
2287 | if (N1CFP && N2CFP) { |
2288 | APFloat::cmpResult R = N1CFP->getValueAPF().compare(N2CFP->getValueAPF()); |
2289 | switch (Cond) { |
2290 | default: break; |
2291 | case ISD::SETEQ: if (R==APFloat::cmpUnordered) |
2292 | return getUNDEF(VT); |
2293 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
2294 | case ISD::SETOEQ: return getBoolConstant(R==APFloat::cmpEqual, dl, VT, |
2295 | OpVT); |
2296 | case ISD::SETNE: if (R==APFloat::cmpUnordered) |
2297 | return getUNDEF(VT); |
2298 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
2299 | case ISD::SETONE: return getBoolConstant(R==APFloat::cmpGreaterThan || |
2300 | R==APFloat::cmpLessThan, dl, VT, |
2301 | OpVT); |
2302 | case ISD::SETLT: if (R==APFloat::cmpUnordered) |
2303 | return getUNDEF(VT); |
2304 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
2305 | case ISD::SETOLT: return getBoolConstant(R==APFloat::cmpLessThan, dl, VT, |
2306 | OpVT); |
2307 | case ISD::SETGT: if (R==APFloat::cmpUnordered) |
2308 | return getUNDEF(VT); |
2309 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
2310 | case ISD::SETOGT: return getBoolConstant(R==APFloat::cmpGreaterThan, dl, |
2311 | VT, OpVT); |
2312 | case ISD::SETLE: if (R==APFloat::cmpUnordered) |
2313 | return getUNDEF(VT); |
2314 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
2315 | case ISD::SETOLE: return getBoolConstant(R==APFloat::cmpLessThan || |
2316 | R==APFloat::cmpEqual, dl, VT, |
2317 | OpVT); |
2318 | case ISD::SETGE: if (R==APFloat::cmpUnordered) |
2319 | return getUNDEF(VT); |
2320 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
2321 | case ISD::SETOGE: return getBoolConstant(R==APFloat::cmpGreaterThan || |
2322 | R==APFloat::cmpEqual, dl, VT, OpVT); |
2323 | case ISD::SETO: return getBoolConstant(R!=APFloat::cmpUnordered, dl, VT, |
2324 | OpVT); |
2325 | case ISD::SETUO: return getBoolConstant(R==APFloat::cmpUnordered, dl, VT, |
2326 | OpVT); |
2327 | case ISD::SETUEQ: return getBoolConstant(R==APFloat::cmpUnordered || |
2328 | R==APFloat::cmpEqual, dl, VT, |
2329 | OpVT); |
2330 | case ISD::SETUNE: return getBoolConstant(R!=APFloat::cmpEqual, dl, VT, |
2331 | OpVT); |
2332 | case ISD::SETULT: return getBoolConstant(R==APFloat::cmpUnordered || |
2333 | R==APFloat::cmpLessThan, dl, VT, |
2334 | OpVT); |
2335 | case ISD::SETUGT: return getBoolConstant(R==APFloat::cmpGreaterThan || |
2336 | R==APFloat::cmpUnordered, dl, VT, |
2337 | OpVT); |
2338 | case ISD::SETULE: return getBoolConstant(R!=APFloat::cmpGreaterThan, dl, |
2339 | VT, OpVT); |
2340 | case ISD::SETUGE: return getBoolConstant(R!=APFloat::cmpLessThan, dl, VT, |
2341 | OpVT); |
2342 | } |
2343 | } else if (N1CFP && OpVT.isSimple() && !N2.isUndef()) { |
2344 | // Ensure that the constant occurs on the RHS. |
2345 | ISD::CondCode SwappedCond = ISD::getSetCCSwappedOperands(Cond); |
2346 | if (!TLI->isCondCodeLegal(SwappedCond, OpVT.getSimpleVT())) |
2347 | return SDValue(); |
2348 | return getSetCC(dl, VT, N2, N1, SwappedCond); |
2349 | } else if ((N2CFP && N2CFP->getValueAPF().isNaN()) || |
2350 | (OpVT.isFloatingPoint() && (N1.isUndef() || N2.isUndef()))) { |
2351 | // If an operand is known to be a nan (or undef that could be a nan), we can |
2352 | // fold it. |
2353 | // Choosing NaN for the undef will always make unordered comparison succeed |
2354 | // and ordered comparison fails. |
2355 | // Matches behavior in llvm::ConstantFoldCompareInstruction. |
2356 | switch (ISD::getUnorderedFlavor(Cond)) { |
2357 | default: |
2358 | llvm_unreachable("Unknown flavor!")__builtin_unreachable(); |
2359 | case 0: // Known false. |
2360 | return getBoolConstant(false, dl, VT, OpVT); |
2361 | case 1: // Known true. |
2362 | return getBoolConstant(true, dl, VT, OpVT); |
2363 | case 2: // Undefined. |
2364 | return getUNDEF(VT); |
2365 | } |
2366 | } |
2367 | |
2368 | // Could not fold it. |
2369 | return SDValue(); |
2370 | } |
2371 | |
2372 | /// See if the specified operand can be simplified with the knowledge that only |
2373 | /// the bits specified by DemandedBits are used. |
2374 | /// TODO: really we should be making this into the DAG equivalent of |
2375 | /// SimplifyMultipleUseDemandedBits and not generate any new nodes. |
2376 | SDValue SelectionDAG::GetDemandedBits(SDValue V, const APInt &DemandedBits) { |
2377 | EVT VT = V.getValueType(); |
2378 | |
2379 | if (VT.isScalableVector()) |
2380 | return SDValue(); |
2381 | |
2382 | APInt DemandedElts = VT.isVector() |
2383 | ? APInt::getAllOnesValue(VT.getVectorNumElements()) |
2384 | : APInt(1, 1); |
2385 | return GetDemandedBits(V, DemandedBits, DemandedElts); |
2386 | } |
2387 | |
2388 | /// See if the specified operand can be simplified with the knowledge that only |
2389 | /// the bits specified by DemandedBits are used in the elements specified by |
2390 | /// DemandedElts. |
2391 | /// TODO: really we should be making this into the DAG equivalent of |
2392 | /// SimplifyMultipleUseDemandedBits and not generate any new nodes. |
2393 | SDValue SelectionDAG::GetDemandedBits(SDValue V, const APInt &DemandedBits, |
2394 | const APInt &DemandedElts) { |
2395 | switch (V.getOpcode()) { |
2396 | default: |
2397 | return TLI->SimplifyMultipleUseDemandedBits(V, DemandedBits, DemandedElts, |
2398 | *this, 0); |
2399 | case ISD::Constant: { |
2400 | const APInt &CVal = cast<ConstantSDNode>(V)->getAPIntValue(); |
2401 | APInt NewVal = CVal & DemandedBits; |
2402 | if (NewVal != CVal) |
2403 | return getConstant(NewVal, SDLoc(V), V.getValueType()); |
2404 | break; |
2405 | } |
2406 | case ISD::SRL: |
2407 | // Only look at single-use SRLs. |
2408 | if (!V.getNode()->hasOneUse()) |
2409 | break; |
2410 | if (auto *RHSC = dyn_cast<ConstantSDNode>(V.getOperand(1))) { |
2411 | // See if we can recursively simplify the LHS. |
2412 | unsigned Amt = RHSC->getZExtValue(); |
2413 | |
2414 | // Watch out for shift count overflow though. |
2415 | if (Amt >= DemandedBits.getBitWidth()) |
2416 | break; |
2417 | APInt SrcDemandedBits = DemandedBits << Amt; |
2418 | if (SDValue SimplifyLHS = |
2419 | GetDemandedBits(V.getOperand(0), SrcDemandedBits)) |
2420 | return getNode(ISD::SRL, SDLoc(V), V.getValueType(), SimplifyLHS, |
2421 | V.getOperand(1)); |
2422 | } |
2423 | break; |
2424 | } |
2425 | return SDValue(); |
2426 | } |
2427 | |
2428 | /// SignBitIsZero - Return true if the sign bit of Op is known to be zero. We |
2429 | /// use this predicate to simplify operations downstream. |
2430 | bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const { |
2431 | unsigned BitWidth = Op.getScalarValueSizeInBits(); |
2432 | return MaskedValueIsZero(Op, APInt::getSignMask(BitWidth), Depth); |
2433 | } |
2434 | |
2435 | /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use |
2436 | /// this predicate to simplify operations downstream. Mask is known to be zero |
2437 | /// for bits that V cannot have. |
2438 | bool SelectionDAG::MaskedValueIsZero(SDValue V, const APInt &Mask, |
2439 | unsigned Depth) const { |
2440 | return Mask.isSubsetOf(computeKnownBits(V, Depth).Zero); |
2441 | } |
2442 | |
2443 | /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero in |
2444 | /// DemandedElts. We use this predicate to simplify operations downstream. |
2445 | /// Mask is known to be zero for bits that V cannot have. |
2446 | bool SelectionDAG::MaskedValueIsZero(SDValue V, const APInt &Mask, |
2447 | const APInt &DemandedElts, |
2448 | unsigned Depth) const { |
2449 | return Mask.isSubsetOf(computeKnownBits(V, DemandedElts, Depth).Zero); |
2450 | } |
2451 | |
2452 | /// MaskedValueIsAllOnes - Return true if '(Op & Mask) == Mask'. |
2453 | bool SelectionDAG::MaskedValueIsAllOnes(SDValue V, const APInt &Mask, |
2454 | unsigned Depth) const { |
2455 | return Mask.isSubsetOf(computeKnownBits(V, Depth).One); |
2456 | } |
2457 | |
2458 | /// isSplatValue - Return true if the vector V has the same value |
2459 | /// across all DemandedElts. For scalable vectors it does not make |
2460 | /// sense to specify which elements are demanded or undefined, therefore |
2461 | /// they are simply ignored. |
2462 | bool SelectionDAG::isSplatValue(SDValue V, const APInt &DemandedElts, |
2463 | APInt &UndefElts, unsigned Depth) { |
2464 | EVT VT = V.getValueType(); |
2465 | assert(VT.isVector() && "Vector type expected")((void)0); |
2466 | |
2467 | if (!VT.isScalableVector() && !DemandedElts) |
2468 | return false; // No demanded elts, better to assume we don't know anything. |
2469 | |
2470 | if (Depth >= MaxRecursionDepth) |
2471 | return false; // Limit search depth. |
2472 | |
2473 | // Deal with some common cases here that work for both fixed and scalable |
2474 | // vector types. |
2475 | switch (V.getOpcode()) { |
2476 | case ISD::SPLAT_VECTOR: |
2477 | UndefElts = V.getOperand(0).isUndef() |
2478 | ? APInt::getAllOnesValue(DemandedElts.getBitWidth()) |
2479 | : APInt(DemandedElts.getBitWidth(), 0); |
2480 | return true; |
2481 | case ISD::ADD: |
2482 | case ISD::SUB: |
2483 | case ISD::AND: |
2484 | case ISD::XOR: |
2485 | case ISD::OR: { |
2486 | APInt UndefLHS, UndefRHS; |
2487 | SDValue LHS = V.getOperand(0); |
2488 | SDValue RHS = V.getOperand(1); |
2489 | if (isSplatValue(LHS, DemandedElts, UndefLHS, Depth + 1) && |
2490 | isSplatValue(RHS, DemandedElts, UndefRHS, Depth + 1)) { |
2491 | UndefElts = UndefLHS | UndefRHS; |
2492 | return true; |
2493 | } |
2494 | return false; |
2495 | } |
2496 | case ISD::ABS: |
2497 | case ISD::TRUNCATE: |
2498 | case ISD::SIGN_EXTEND: |
2499 | case ISD::ZERO_EXTEND: |
2500 | return isSplatValue(V.getOperand(0), DemandedElts, UndefElts, Depth + 1); |
2501 | } |
2502 | |
2503 | // We don't support other cases than those above for scalable vectors at |
2504 | // the moment. |
2505 | if (VT.isScalableVector()) |
2506 | return false; |
2507 | |
2508 | unsigned NumElts = VT.getVectorNumElements(); |
2509 | assert(NumElts == DemandedElts.getBitWidth() && "Vector size mismatch")((void)0); |
2510 | UndefElts = APInt::getNullValue(NumElts); |
2511 | |
2512 | switch (V.getOpcode()) { |
2513 | case ISD::BUILD_VECTOR: { |
2514 | SDValue Scl; |
2515 | for (unsigned i = 0; i != NumElts; ++i) { |
2516 | SDValue Op = V.getOperand(i); |
2517 | if (Op.isUndef()) { |
2518 | UndefElts.setBit(i); |
2519 | continue; |
2520 | } |
2521 | if (!DemandedElts[i]) |
2522 | continue; |
2523 | if (Scl && Scl != Op) |
2524 | return false; |
2525 | Scl = Op; |
2526 | } |
2527 | return true; |
2528 | } |
2529 | case ISD::VECTOR_SHUFFLE: { |
2530 | // Check if this is a shuffle node doing a splat. |
2531 | // TODO: Do we need to handle shuffle(splat, undef, mask)? |
2532 | int SplatIndex = -1; |
2533 | ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(V)->getMask(); |
2534 | for (int i = 0; i != (int)NumElts; ++i) { |
2535 | int M = Mask[i]; |
2536 | if (M < 0) { |
2537 | UndefElts.setBit(i); |
2538 | continue; |
2539 | } |
2540 | if (!DemandedElts[i]) |
2541 | continue; |
2542 | if (0 <= SplatIndex && SplatIndex != M) |
2543 | return false; |
2544 | SplatIndex = M; |
2545 | } |
2546 | return true; |
2547 | } |
2548 | case ISD::EXTRACT_SUBVECTOR: { |
2549 | // Offset the demanded elts by the subvector index. |
2550 | SDValue Src = V.getOperand(0); |
2551 | // We don't support scalable vectors at the moment. |
2552 | if (Src.getValueType().isScalableVector()) |
2553 | return false; |
2554 | uint64_t Idx = V.getConstantOperandVal(1); |
2555 | unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); |
2556 | APInt UndefSrcElts; |
2557 | APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx); |
2558 | if (isSplatValue(Src, DemandedSrcElts, UndefSrcElts, Depth + 1)) { |
2559 | UndefElts = UndefSrcElts.extractBits(NumElts, Idx); |
2560 | return true; |
2561 | } |
2562 | break; |
2563 | } |
2564 | } |
2565 | |
2566 | return false; |
2567 | } |
2568 | |
2569 | /// Helper wrapper to main isSplatValue function. |
2570 | bool SelectionDAG::isSplatValue(SDValue V, bool AllowUndefs) { |
2571 | EVT VT = V.getValueType(); |
2572 | assert(VT.isVector() && "Vector type expected")((void)0); |
2573 | |
2574 | APInt UndefElts; |
2575 | APInt DemandedElts; |
2576 | |
2577 | // For now we don't support this with scalable vectors. |
2578 | if (!VT.isScalableVector()) |
2579 | DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements()); |
2580 | return isSplatValue(V, DemandedElts, UndefElts) && |
2581 | (AllowUndefs || !UndefElts); |
2582 | } |
2583 | |
2584 | SDValue SelectionDAG::getSplatSourceVector(SDValue V, int &SplatIdx) { |
2585 | V = peekThroughExtractSubvectors(V); |
2586 | |
2587 | EVT VT = V.getValueType(); |
2588 | unsigned Opcode = V.getOpcode(); |
2589 | switch (Opcode) { |
2590 | default: { |
2591 | APInt UndefElts; |
2592 | APInt DemandedElts; |
2593 | |
2594 | if (!VT.isScalableVector()) |
2595 | DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements()); |
2596 | |
2597 | if (isSplatValue(V, DemandedElts, UndefElts)) { |
2598 | if (VT.isScalableVector()) { |
2599 | // DemandedElts and UndefElts are ignored for scalable vectors, since |
2600 | // the only supported cases are SPLAT_VECTOR nodes. |
2601 | SplatIdx = 0; |
2602 | } else { |
2603 | // Handle case where all demanded elements are UNDEF. |
2604 | if (DemandedElts.isSubsetOf(UndefElts)) { |
2605 | SplatIdx = 0; |
2606 | return getUNDEF(VT); |
2607 | } |
2608 | SplatIdx = (UndefElts & DemandedElts).countTrailingOnes(); |
2609 | } |
2610 | return V; |
2611 | } |
2612 | break; |
2613 | } |
2614 | case ISD::SPLAT_VECTOR: |
2615 | SplatIdx = 0; |
2616 | return V; |
2617 | case ISD::VECTOR_SHUFFLE: { |
2618 | if (VT.isScalableVector()) |
2619 | return SDValue(); |
2620 | |
2621 | // Check if this is a shuffle node doing a splat. |
2622 | // TODO - remove this and rely purely on SelectionDAG::isSplatValue, |
2623 | // getTargetVShiftNode currently struggles without the splat source. |
2624 | auto *SVN = cast<ShuffleVectorSDNode>(V); |
2625 | if (!SVN->isSplat()) |
2626 | break; |
2627 | int Idx = SVN->getSplatIndex(); |
2628 | int NumElts = V.getValueType().getVectorNumElements(); |
2629 | SplatIdx = Idx % NumElts; |
2630 | return V.getOperand(Idx / NumElts); |
2631 | } |
2632 | } |
2633 | |
2634 | return SDValue(); |
2635 | } |
2636 | |
2637 | SDValue SelectionDAG::getSplatValue(SDValue V, bool LegalTypes) { |
2638 | int SplatIdx; |
2639 | if (SDValue SrcVector = getSplatSourceVector(V, SplatIdx)) { |
2640 | EVT SVT = SrcVector.getValueType().getScalarType(); |
2641 | EVT LegalSVT = SVT; |
2642 | if (LegalTypes && !TLI->isTypeLegal(SVT)) { |
2643 | if (!SVT.isInteger()) |
2644 | return SDValue(); |
2645 | LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT); |
2646 | if (LegalSVT.bitsLT(SVT)) |
2647 | return SDValue(); |
2648 | } |
2649 | return getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(V), LegalSVT, SrcVector, |
2650 | getVectorIdxConstant(SplatIdx, SDLoc(V))); |
2651 | } |
2652 | return SDValue(); |
2653 | } |
2654 | |
2655 | const APInt * |
2656 | SelectionDAG::getValidShiftAmountConstant(SDValue V, |
2657 | const APInt &DemandedElts) const { |
2658 | assert((V.getOpcode() == ISD::SHL || V.getOpcode() == ISD::SRL ||((void)0) |
2659 | V.getOpcode() == ISD::SRA) &&((void)0) |
2660 | "Unknown shift node")((void)0); |
2661 | unsigned BitWidth = V.getScalarValueSizeInBits(); |
2662 | if (ConstantSDNode *SA = isConstOrConstSplat(V.getOperand(1), DemandedElts)) { |
2663 | // Shifting more than the bitwidth is not valid. |
2664 | const APInt &ShAmt = SA->getAPIntValue(); |
2665 | if (ShAmt.ult(BitWidth)) |
2666 | return &ShAmt; |
2667 | } |
2668 | return nullptr; |
2669 | } |
2670 | |
2671 | const APInt *SelectionDAG::getValidMinimumShiftAmountConstant( |
2672 | SDValue V, const APInt &DemandedElts) const { |
2673 | assert((V.getOpcode() == ISD::SHL || V.getOpcode() == ISD::SRL ||((void)0) |
2674 | V.getOpcode() == ISD::SRA) &&((void)0) |
2675 | "Unknown shift node")((void)0); |
2676 | if (const APInt *ValidAmt = getValidShiftAmountConstant(V, DemandedElts)) |
2677 | return ValidAmt; |
2678 | unsigned BitWidth = V.getScalarValueSizeInBits(); |
2679 | auto *BV = dyn_cast<BuildVectorSDNode>(V.getOperand(1)); |
2680 | if (!BV) |
2681 | return nullptr; |
2682 | const APInt *MinShAmt = nullptr; |
2683 | for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) { |
2684 | if (!DemandedElts[i]) |
2685 | continue; |
2686 | auto *SA = dyn_cast<ConstantSDNode>(BV->getOperand(i)); |
2687 | if (!SA) |
2688 | return nullptr; |
2689 | // Shifting more than the bitwidth is not valid. |
2690 | const APInt &ShAmt = SA->getAPIntValue(); |
2691 | if (ShAmt.uge(BitWidth)) |
2692 | return nullptr; |
2693 | if (MinShAmt && MinShAmt->ule(ShAmt)) |
2694 | continue; |
2695 | MinShAmt = &ShAmt; |
2696 | } |
2697 | return MinShAmt; |
2698 | } |
2699 | |
2700 | const APInt *SelectionDAG::getValidMaximumShiftAmountConstant( |
2701 | SDValue V, const APInt &DemandedElts) const { |
2702 | assert((V.getOpcode() == ISD::SHL || V.getOpcode() == ISD::SRL ||((void)0) |
2703 | V.getOpcode() == ISD::SRA) &&((void)0) |
2704 | "Unknown shift node")((void)0); |
2705 | if (const APInt *ValidAmt = getValidShiftAmountConstant(V, DemandedElts)) |
2706 | return ValidAmt; |
2707 | unsigned BitWidth = V.getScalarValueSizeInBits(); |
2708 | auto *BV = dyn_cast<BuildVectorSDNode>(V.getOperand(1)); |
2709 | if (!BV) |
2710 | return nullptr; |
2711 | const APInt *MaxShAmt = nullptr; |
2712 | for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) { |
2713 | if (!DemandedElts[i]) |
2714 | continue; |
2715 | auto *SA = dyn_cast<ConstantSDNode>(BV->getOperand(i)); |
2716 | if (!SA) |
2717 | return nullptr; |
2718 | // Shifting more than the bitwidth is not valid. |
2719 | const APInt &ShAmt = SA->getAPIntValue(); |
2720 | if (ShAmt.uge(BitWidth)) |
2721 | return nullptr; |
2722 | if (MaxShAmt && MaxShAmt->uge(ShAmt)) |
2723 | continue; |
2724 | MaxShAmt = &ShAmt; |
2725 | } |
2726 | return MaxShAmt; |
2727 | } |
2728 | |
2729 | /// Determine which bits of Op are known to be either zero or one and return |
2730 | /// them in Known. For vectors, the known bits are those that are shared by |
2731 | /// every vector element. |
2732 | KnownBits SelectionDAG::computeKnownBits(SDValue Op, unsigned Depth) const { |
2733 | EVT VT = Op.getValueType(); |
2734 | |
2735 | // TOOD: Until we have a plan for how to represent demanded elements for |
2736 | // scalable vectors, we can just bail out for now. |
2737 | if (Op.getValueType().isScalableVector()) { |
2738 | unsigned BitWidth = Op.getScalarValueSizeInBits(); |
2739 | return KnownBits(BitWidth); |
2740 | } |
2741 | |
2742 | APInt DemandedElts = VT.isVector() |
2743 | ? APInt::getAllOnesValue(VT.getVectorNumElements()) |
2744 | : APInt(1, 1); |
2745 | return computeKnownBits(Op, DemandedElts, Depth); |
2746 | } |
2747 | |
2748 | /// Determine which bits of Op are known to be either zero or one and return |
2749 | /// them in Known. The DemandedElts argument allows us to only collect the known |
2750 | /// bits that are shared by the requested vector elements. |
2751 | KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts, |
2752 | unsigned Depth) const { |
2753 | unsigned BitWidth = Op.getScalarValueSizeInBits(); |
2754 | |
2755 | KnownBits Known(BitWidth); // Don't know anything. |
2756 | |
2757 | // TOOD: Until we have a plan for how to represent demanded elements for |
2758 | // scalable vectors, we can just bail out for now. |
2759 | if (Op.getValueType().isScalableVector()) |
2760 | return Known; |
2761 | |
2762 | if (auto *C = dyn_cast<ConstantSDNode>(Op)) { |
2763 | // We know all of the bits for a constant! |
2764 | return KnownBits::makeConstant(C->getAPIntValue()); |
2765 | } |
2766 | if (auto *C = dyn_cast<ConstantFPSDNode>(Op)) { |
2767 | // We know all of the bits for a constant fp! |
2768 | return KnownBits::makeConstant(C->getValueAPF().bitcastToAPInt()); |
2769 | } |
2770 | |
2771 | if (Depth >= MaxRecursionDepth) |
2772 | return Known; // Limit search depth. |
2773 | |
2774 | KnownBits Known2; |
2775 | unsigned NumElts = DemandedElts.getBitWidth(); |
2776 | assert((!Op.getValueType().isVector() ||((void)0) |
2777 | NumElts == Op.getValueType().getVectorNumElements()) &&((void)0) |
2778 | "Unexpected vector size")((void)0); |
2779 | |
2780 | if (!DemandedElts) |
2781 | return Known; // No demanded elts, better to assume we don't know anything. |
2782 | |
2783 | unsigned Opcode = Op.getOpcode(); |
2784 | switch (Opcode) { |
2785 | case ISD::BUILD_VECTOR: |
2786 | // Collect the known bits that are shared by every demanded vector element. |
2787 | Known.Zero.setAllBits(); Known.One.setAllBits(); |
2788 | for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) { |
2789 | if (!DemandedElts[i]) |
2790 | continue; |
2791 | |
2792 | SDValue SrcOp = Op.getOperand(i); |
2793 | Known2 = computeKnownBits(SrcOp, Depth + 1); |
2794 | |
2795 | // BUILD_VECTOR can implicitly truncate sources, we must handle this. |
2796 | if (SrcOp.getValueSizeInBits() != BitWidth) { |
2797 | assert(SrcOp.getValueSizeInBits() > BitWidth &&((void)0) |
2798 | "Expected BUILD_VECTOR implicit truncation")((void)0); |
2799 | Known2 = Known2.trunc(BitWidth); |
2800 | } |
2801 | |
2802 | // Known bits are the values that are shared by every demanded element. |
2803 | Known = KnownBits::commonBits(Known, Known2); |
2804 | |
2805 | // If we don't know any bits, early out. |
2806 | if (Known.isUnknown()) |
2807 | break; |
2808 | } |
2809 | break; |
2810 | case ISD::VECTOR_SHUFFLE: { |
2811 | // Collect the known bits that are shared by every vector element referenced |
2812 | // by the shuffle. |
2813 | APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0); |
2814 | Known.Zero.setAllBits(); Known.One.setAllBits(); |
2815 | const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op); |
2816 | assert(NumElts == SVN->getMask().size() && "Unexpected vector size")((void)0); |
2817 | for (unsigned i = 0; i != NumElts; ++i) { |
2818 | if (!DemandedElts[i]) |
2819 | continue; |
2820 | |
2821 | int M = SVN->getMaskElt(i); |
2822 | if (M < 0) { |
2823 | // For UNDEF elements, we don't know anything about the common state of |
2824 | // the shuffle result. |
2825 | Known.resetAll(); |
2826 | DemandedLHS.clearAllBits(); |
2827 | DemandedRHS.clearAllBits(); |
2828 | break; |
2829 | } |
2830 | |
2831 | if ((unsigned)M < NumElts) |
2832 | DemandedLHS.setBit((unsigned)M % NumElts); |
2833 | else |
2834 | DemandedRHS.setBit((unsigned)M % NumElts); |
2835 | } |
2836 | // Known bits are the values that are shared by every demanded element. |
2837 | if (!!DemandedLHS) { |
2838 | SDValue LHS = Op.getOperand(0); |
2839 | Known2 = computeKnownBits(LHS, DemandedLHS, Depth + 1); |
2840 | Known = KnownBits::commonBits(Known, Known2); |
2841 | } |
2842 | // If we don't know any bits, early out. |
2843 | if (Known.isUnknown()) |
2844 | break; |
2845 | if (!!DemandedRHS) { |
2846 | SDValue RHS = Op.getOperand(1); |
2847 | Known2 = computeKnownBits(RHS, DemandedRHS, Depth + 1); |
2848 | Known = KnownBits::commonBits(Known, Known2); |
2849 | } |
2850 | break; |
2851 | } |
2852 | case ISD::CONCAT_VECTORS: { |
2853 | // Split DemandedElts and test each of the demanded subvectors. |
2854 | Known.Zero.setAllBits(); Known.One.setAllBits(); |
2855 | EVT SubVectorVT = Op.getOperand(0).getValueType(); |
2856 | unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements(); |
2857 | unsigned NumSubVectors = Op.getNumOperands(); |
2858 | for (unsigned i = 0; i != NumSubVectors; ++i) { |
2859 | APInt DemandedSub = |
2860 | DemandedElts.extractBits(NumSubVectorElts, i * NumSubVectorElts); |
2861 | if (!!DemandedSub) { |
2862 | SDValue Sub = Op.getOperand(i); |
2863 | Known2 = computeKnownBits(Sub, DemandedSub, Depth + 1); |
2864 | Known = KnownBits::commonBits(Known, Known2); |
2865 | } |
2866 | // If we don't know any bits, early out. |
2867 | if (Known.isUnknown()) |
2868 | break; |
2869 | } |
2870 | break; |
2871 | } |
2872 | case ISD::INSERT_SUBVECTOR: { |
2873 | // Demand any elements from the subvector and the remainder from the src its |
2874 | // inserted into. |
2875 | SDValue Src = Op.getOperand(0); |
2876 | SDValue Sub = Op.getOperand(1); |
2877 | uint64_t Idx = Op.getConstantOperandVal(2); |
2878 | unsigned NumSubElts = Sub.getValueType().getVectorNumElements(); |
2879 | APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx); |
2880 | APInt DemandedSrcElts = DemandedElts; |
2881 | DemandedSrcElts.insertBits(APInt::getNullValue(NumSubElts), Idx); |
2882 | |
2883 | Known.One.setAllBits(); |
2884 | Known.Zero.setAllBits(); |
2885 | if (!!DemandedSubElts) { |
2886 | Known = computeKnownBits(Sub, DemandedSubElts, Depth + 1); |
2887 | if (Known.isUnknown()) |
2888 | break; // early-out. |
2889 | } |
2890 | if (!!DemandedSrcElts) { |
2891 | Known2 = computeKnownBits(Src, DemandedSrcElts, Depth + 1); |
2892 | Known = KnownBits::commonBits(Known, Known2); |
2893 | } |
2894 | break; |
2895 | } |
2896 | case ISD::EXTRACT_SUBVECTOR: { |
2897 | // Offset the demanded elts by the subvector index. |
2898 | SDValue Src = Op.getOperand(0); |
2899 | // Bail until we can represent demanded elements for scalable vectors. |
2900 | if (Src.getValueType().isScalableVector()) |
2901 | break; |
2902 | uint64_t Idx = Op.getConstantOperandVal(1); |
2903 | unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); |
2904 | APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx); |
2905 | Known = computeKnownBits(Src, DemandedSrcElts, Depth + 1); |
2906 | break; |
2907 | } |
2908 | case ISD::SCALAR_TO_VECTOR: { |
2909 | // We know about scalar_to_vector as much as we know about it source, |
2910 | // which becomes the first element of otherwise unknown vector. |
2911 | if (DemandedElts != 1) |
2912 | break; |
2913 | |
2914 | SDValue N0 = Op.getOperand(0); |
2915 | Known = computeKnownBits(N0, Depth + 1); |
2916 | if (N0.getValueSizeInBits() != BitWidth) |
2917 | Known = Known.trunc(BitWidth); |
2918 | |
2919 | break; |
2920 | } |
2921 | case ISD::BITCAST: { |
2922 | SDValue N0 = Op.getOperand(0); |
2923 | EVT SubVT = N0.getValueType(); |
2924 | unsigned SubBitWidth = SubVT.getScalarSizeInBits(); |
2925 | |
2926 | // Ignore bitcasts from unsupported types. |
2927 | if (!(SubVT.isInteger() || SubVT.isFloatingPoint())) |
2928 | break; |
2929 | |
2930 | // Fast handling of 'identity' bitcasts. |
2931 | if (BitWidth == SubBitWidth) { |
2932 | Known = computeKnownBits(N0, DemandedElts, Depth + 1); |
2933 | break; |
2934 | } |
2935 | |
2936 | bool IsLE = getDataLayout().isLittleEndian(); |
2937 | |
2938 | // Bitcast 'small element' vector to 'large element' scalar/vector. |
2939 | if ((BitWidth % SubBitWidth) == 0) { |
2940 | assert(N0.getValueType().isVector() && "Expected bitcast from vector")((void)0); |
2941 | |
2942 | // Collect known bits for the (larger) output by collecting the known |
2943 | // bits from each set of sub elements and shift these into place. |
2944 | // We need to separately call computeKnownBits for each set of |
2945 | // sub elements as the knownbits for each is likely to be different. |
2946 | unsigned SubScale = BitWidth / SubBitWidth; |
2947 | APInt SubDemandedElts(NumElts * SubScale, 0); |
2948 | for (unsigned i = 0; i != NumElts; ++i) |
2949 | if (DemandedElts[i]) |
2950 | SubDemandedElts.setBit(i * SubScale); |
2951 | |
2952 | for (unsigned i = 0; i != SubScale; ++i) { |
2953 | Known2 = computeKnownBits(N0, SubDemandedElts.shl(i), |
2954 | Depth + 1); |
2955 | unsigned Shifts = IsLE ? i : SubScale - 1 - i; |
2956 | Known.insertBits(Known2, SubBitWidth * Shifts); |
2957 | } |
2958 | } |
2959 | |
2960 | // Bitcast 'large element' scalar/vector to 'small element' vector. |
2961 | if ((SubBitWidth % BitWidth) == 0) { |
2962 | assert(Op.getValueType().isVector() && "Expected bitcast to vector")((void)0); |
2963 | |
2964 | // Collect known bits for the (smaller) output by collecting the known |
2965 | // bits from the overlapping larger input elements and extracting the |
2966 | // sub sections we actually care about. |
2967 | unsigned SubScale = SubBitWidth / BitWidth; |
2968 | APInt SubDemandedElts(NumElts / SubScale, 0); |
2969 | for (unsigned i = 0; i != NumElts; ++i) |
2970 | if (DemandedElts[i]) |
2971 | SubDemandedElts.setBit(i / SubScale); |
2972 | |
2973 | Known2 = computeKnownBits(N0, SubDemandedElts, Depth + 1); |
2974 | |
2975 | Known.Zero.setAllBits(); Known.One.setAllBits(); |
2976 | for (unsigned i = 0; i != NumElts; ++i) |
2977 | if (DemandedElts[i]) { |
2978 | unsigned Shifts = IsLE ? i : NumElts - 1 - i; |
2979 | unsigned Offset = (Shifts % SubScale) * BitWidth; |
2980 | Known = KnownBits::commonBits(Known, |
2981 | Known2.extractBits(BitWidth, Offset)); |
2982 | // If we don't know any bits, early out. |
2983 | if (Known.isUnknown()) |
2984 | break; |
2985 | } |
2986 | } |
2987 | break; |
2988 | } |
2989 | case ISD::AND: |
2990 | Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); |
2991 | Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); |
2992 | |
2993 | Known &= Known2; |
2994 | break; |
2995 | case ISD::OR: |
2996 | Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); |
2997 | Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); |
2998 | |
2999 | Known |= Known2; |
3000 | break; |
3001 | case ISD::XOR: |
3002 | Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); |
3003 | Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); |
3004 | |
3005 | Known ^= Known2; |
3006 | break; |
3007 | case ISD::MUL: { |
3008 | Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); |
3009 | Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); |
3010 | Known = KnownBits::mul(Known, Known2); |
3011 | break; |
3012 | } |
3013 | case ISD::MULHU: { |
3014 | Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); |
3015 | Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); |
3016 | Known = KnownBits::mulhu(Known, Known2); |
3017 | break; |
3018 | } |
3019 | case ISD::MULHS: { |
3020 | Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); |
3021 | Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); |
3022 | Known = KnownBits::mulhs(Known, Known2); |
3023 | break; |
3024 | } |
3025 | case ISD::UMUL_LOHI: { |
3026 | assert((Op.getResNo() == 0 || Op.getResNo() == 1) && "Unknown result")((void)0); |
3027 | Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); |
3028 | Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); |
3029 | if (Op.getResNo() == 0) |
3030 | Known = KnownBits::mul(Known, Known2); |
3031 | else |
3032 | Known = KnownBits::mulhu(Known, Known2); |
3033 | break; |
3034 | } |
3035 | case ISD::SMUL_LOHI: { |
3036 | assert((Op.getResNo() == 0 || Op.getResNo() == 1) && "Unknown result")((void)0); |
3037 | Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); |
3038 | Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); |
3039 | if (Op.getResNo() == 0) |
3040 | Known = KnownBits::mul(Known, Known2); |
3041 | else |
3042 | Known = KnownBits::mulhs(Known, Known2); |
3043 | break; |
3044 | } |
3045 | case ISD::UDIV: { |
3046 | Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); |
3047 | Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); |
3048 | Known = KnownBits::udiv(Known, Known2); |
3049 | break; |
3050 | } |
3051 | case ISD::SELECT: |
3052 | case ISD::VSELECT: |
3053 | Known = computeKnownBits(Op.getOperand(2), DemandedElts, Depth+1); |
3054 | // If we don't know any bits, early out. |
3055 | if (Known.isUnknown()) |
3056 | break; |
3057 | Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth+1); |
3058 | |
3059 | // Only known if known in both the LHS and RHS. |
3060 | Known = KnownBits::commonBits(Known, Known2); |
3061 | break; |
3062 | case ISD::SELECT_CC: |
3063 | Known = computeKnownBits(Op.getOperand(3), DemandedElts, Depth+1); |
3064 | // If we don't know any bits, early out. |
3065 | if (Known.isUnknown()) |
3066 | break; |
3067 | Known2 = computeKnownBits(Op.getOperand(2), DemandedElts, Depth+1); |
3068 | |
3069 | // Only known if known in both the LHS and RHS. |
3070 | Known = KnownBits::commonBits(Known, Known2); |
3071 | break; |
3072 | case ISD::SMULO: |
3073 | case ISD::UMULO: |
3074 | if (Op.getResNo() != 1) |
3075 | break; |
3076 | // The boolean result conforms to getBooleanContents. |
3077 | // If we know the result of a setcc has the top bits zero, use this info. |
3078 | // We know that we have an integer-based boolean since these operations |
3079 | // are only available for integer. |
3080 | if (TLI->getBooleanContents(Op.getValueType().isVector(), false) == |
3081 | TargetLowering::ZeroOrOneBooleanContent && |
3082 | BitWidth > 1) |
3083 | Known.Zero.setBitsFrom(1); |
3084 | break; |
3085 | case ISD::SETCC: |
3086 | case ISD::STRICT_FSETCC: |
3087 | case ISD::STRICT_FSETCCS: { |
3088 | unsigned OpNo = Op->isStrictFPOpcode() ? 1 : 0; |
3089 | // If we know the result of a setcc has the top bits zero, use this info. |
3090 | if (TLI->getBooleanContents(Op.getOperand(OpNo).getValueType()) == |
3091 | TargetLowering::ZeroOrOneBooleanContent && |
3092 | BitWidth > 1) |
3093 | Known.Zero.setBitsFrom(1); |
3094 | break; |
3095 | } |
3096 | case ISD::SHL: |
3097 | Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); |
3098 | Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); |
3099 | Known = KnownBits::shl(Known, Known2); |
3100 | |
3101 | // Minimum shift low bits are known zero. |
3102 | if (const APInt *ShMinAmt = |
3103 | getValidMinimumShiftAmountConstant(Op, DemandedElts)) |
3104 | Known.Zero.setLowBits(ShMinAmt->getZExtValue()); |
3105 | break; |
3106 | case ISD::SRL: |
3107 | Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); |
3108 | Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); |
3109 | Known = KnownBits::lshr(Known, Known2); |
3110 | |
3111 | // Minimum shift high bits are known zero. |
3112 | if (const APInt *ShMinAmt = |
3113 | getValidMinimumShiftAmountConstant(Op, DemandedElts)) |
3114 | Known.Zero.setHighBits(ShMinAmt->getZExtValue()); |
3115 | break; |
3116 | case ISD::SRA: |
3117 | Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); |
3118 | Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); |
3119 | Known = KnownBits::ashr(Known, Known2); |
3120 | // TODO: Add minimum shift high known sign bits. |
3121 | break; |
3122 | case ISD::FSHL: |
3123 | case ISD::FSHR: |
3124 | if (ConstantSDNode *C = isConstOrConstSplat(Op.getOperand(2), DemandedElts)) { |
3125 | unsigned Amt = C->getAPIntValue().urem(BitWidth); |
3126 | |
3127 | // For fshl, 0-shift returns the 1st arg. |
3128 | // For fshr, 0-shift returns the 2nd arg. |
3129 | if (Amt == 0) { |
3130 | Known = computeKnownBits(Op.getOperand(Opcode == ISD::FSHL ? 0 : 1), |
3131 | DemandedElts, Depth + 1); |
3132 | break; |
3133 | } |
3134 | |
3135 | // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW))) |
3136 | // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW)) |
3137 | Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); |
3138 | Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); |
3139 | if (Opcode == ISD::FSHL) { |
3140 | Known.One <<= Amt; |
3141 | Known.Zero <<= Amt; |
3142 | Known2.One.lshrInPlace(BitWidth - Amt); |
3143 | Known2.Zero.lshrInPlace(BitWidth - Amt); |
3144 | } else { |
3145 | Known.One <<= BitWidth - Amt; |
3146 | Known.Zero <<= BitWidth - Amt; |
3147 | Known2.One.lshrInPlace(Amt); |
3148 | Known2.Zero.lshrInPlace(Amt); |
3149 | } |
3150 | Known.One |= Known2.One; |
3151 | Known.Zero |= Known2.Zero; |
3152 | } |
3153 | break; |
3154 | case ISD::SIGN_EXTEND_INREG: { |
3155 | Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); |
3156 | EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); |
3157 | Known = Known.sextInReg(EVT.getScalarSizeInBits()); |
3158 | break; |
3159 | } |
3160 | case ISD::CTTZ: |
3161 | case ISD::CTTZ_ZERO_UNDEF: { |
3162 | Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); |
3163 | // If we have a known 1, its position is our upper bound. |
3164 | unsigned PossibleTZ = Known2.countMaxTrailingZeros(); |
3165 | unsigned LowBits = Log2_32(PossibleTZ) + 1; |
3166 | Known.Zero.setBitsFrom(LowBits); |
3167 | break; |
3168 | } |
3169 | case ISD::CTLZ: |
3170 | case ISD::CTLZ_ZERO_UNDEF: { |
3171 | Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); |
3172 | // If we have a known 1, its position is our upper bound. |
3173 | unsigned PossibleLZ = Known2.countMaxLeadingZeros(); |
3174 | unsigned LowBits = Log2_32(PossibleLZ) + 1; |
3175 | Known.Zero.setBitsFrom(LowBits); |
3176 | break; |
3177 | } |
3178 | case ISD::CTPOP: { |
3179 | Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); |
3180 | // If we know some of the bits are zero, they can't be one. |
3181 | unsigned PossibleOnes = Known2.countMaxPopulation(); |
3182 | Known.Zero.setBitsFrom(Log2_32(PossibleOnes) + 1); |
3183 | break; |
3184 | } |
3185 | case ISD::PARITY: { |
3186 | // Parity returns 0 everywhere but the LSB. |
3187 | Known.Zero.setBitsFrom(1); |
3188 | break; |
3189 | } |
3190 | case ISD::LOAD: { |
3191 | LoadSDNode *LD = cast<LoadSDNode>(Op); |
3192 | const Constant *Cst = TLI->getTargetConstantFromLoad(LD); |
3193 | if (ISD::isNON_EXTLoad(LD) && Cst) { |
3194 | // Determine any common known bits from the loaded constant pool value. |
3195 | Type *CstTy = Cst->getType(); |
3196 | if ((NumElts * BitWidth) == CstTy->getPrimitiveSizeInBits()) { |
3197 | // If its a vector splat, then we can (quickly) reuse the scalar path. |
3198 | // NOTE: We assume all elements match and none are UNDEF. |
3199 | if (CstTy->isVectorTy()) { |
3200 | if (const Constant *Splat = Cst->getSplatValue()) { |
3201 | Cst = Splat; |
3202 | CstTy = Cst->getType(); |
3203 | } |
3204 | } |
3205 | // TODO - do we need to handle different bitwidths? |
3206 | if (CstTy->isVectorTy() && BitWidth == CstTy->getScalarSizeInBits()) { |
3207 | // Iterate across all vector elements finding common known bits. |
3208 | Known.One.setAllBits(); |
3209 | Known.Zero.setAllBits(); |
3210 | for (unsigned i = 0; i != NumElts; ++i) { |
3211 | if (!DemandedElts[i]) |
3212 | continue; |
3213 | if (Constant *Elt = Cst->getAggregateElement(i)) { |
3214 | if (auto *CInt = dyn_cast<ConstantInt>(Elt)) { |
3215 | const APInt &Value = CInt->getValue(); |
3216 | Known.One &= Value; |
3217 | Known.Zero &= ~Value; |
3218 | continue; |
3219 | } |
3220 | if (auto *CFP = dyn_cast<ConstantFP>(Elt)) { |
3221 | APInt Value = CFP->getValueAPF().bitcastToAPInt(); |
3222 | Known.One &= Value; |
3223 | Known.Zero &= ~Value; |
3224 | continue; |
3225 | } |
3226 | } |
3227 | Known.One.clearAllBits(); |
3228 | Known.Zero.clearAllBits(); |
3229 | break; |
3230 | } |
3231 | } else if (BitWidth == CstTy->getPrimitiveSizeInBits()) { |
3232 | if (auto *CInt = dyn_cast<ConstantInt>(Cst)) { |
3233 | Known = KnownBits::makeConstant(CInt->getValue()); |
3234 | } else if (auto *CFP = dyn_cast<ConstantFP>(Cst)) { |
3235 | Known = |
3236 | KnownBits::makeConstant(CFP->getValueAPF().bitcastToAPInt()); |
3237 | } |
3238 | } |
3239 | } |
3240 | } else if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) { |
3241 | // If this is a ZEXTLoad and we are looking at the loaded value. |
3242 | EVT VT = LD->getMemoryVT(); |
3243 | unsigned MemBits = VT.getScalarSizeInBits(); |
3244 | Known.Zero.setBitsFrom(MemBits); |
3245 | } else if (const MDNode *Ranges = LD->getRanges()) { |
3246 | if (LD->getExtensionType() == ISD::NON_EXTLOAD) |
3247 | computeKnownBitsFromRangeMetadata(*Ranges, Known); |
3248 | } |
3249 | break; |
3250 | } |
3251 | case ISD::ZERO_EXTEND_VECTOR_INREG: { |
3252 | EVT InVT = Op.getOperand(0).getValueType(); |
3253 | APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements()); |
3254 | Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1); |
3255 | Known = Known.zext(BitWidth); |
3256 | break; |
3257 | } |
3258 | case ISD::ZERO_EXTEND: { |
3259 | Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); |
3260 | Known = Known.zext(BitWidth); |
3261 | break; |
3262 | } |
3263 | case ISD::SIGN_EXTEND_VECTOR_INREG: { |
3264 | EVT InVT = Op.getOperand(0).getValueType(); |
3265 | APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements()); |
3266 | Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1); |
3267 | // If the sign bit is known to be zero or one, then sext will extend |
3268 | // it to the top bits, else it will just zext. |
3269 | Known = Known.sext(BitWidth); |
3270 | break; |
3271 | } |
3272 | case ISD::SIGN_EXTEND: { |
3273 | Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); |
3274 | // If the sign bit is known to be zero or one, then sext will extend |
3275 | // it to the top bits, else it will just zext. |
3276 | Known = Known.sext(BitWidth); |
3277 | break; |
3278 | } |
3279 | case ISD::ANY_EXTEND_VECTOR_INREG: { |
3280 | EVT InVT = Op.getOperand(0).getValueType(); |
3281 | APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements()); |
3282 | Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1); |
3283 | Known = Known.anyext(BitWidth); |
3284 | break; |
3285 | } |
3286 | case ISD::ANY_EXTEND: { |
3287 | Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); |
3288 | Known = Known.anyext(BitWidth); |
3289 | break; |
3290 | } |
3291 | case ISD::TRUNCATE: { |
3292 | Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); |
3293 | Known = Known.trunc(BitWidth); |
3294 | break; |
3295 | } |
3296 | case ISD::AssertZext: { |
3297 | EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT(); |
3298 | APInt InMask = APInt::getLowBitsSet(BitWidth, VT.getSizeInBits()); |
3299 | Known = computeKnownBits(Op.getOperand(0), Depth+1); |
3300 | Known.Zero |= (~InMask); |
3301 | Known.One &= (~Known.Zero); |
3302 | break; |
3303 | } |
3304 | case ISD::AssertAlign: { |
3305 | unsigned LogOfAlign = Log2(cast<AssertAlignSDNode>(Op)->getAlign()); |
3306 | assert(LogOfAlign != 0)((void)0); |
3307 | // If a node is guaranteed to be aligned, set low zero bits accordingly as |
3308 | // well as clearing one bits. |
3309 | Known.Zero.setLowBits(LogOfAlign); |
3310 | Known.One.clearLowBits(LogOfAlign); |
3311 | break; |
3312 | } |
3313 | case ISD::FGETSIGN: |
3314 | // All bits are zero except the low bit. |
3315 | Known.Zero.setBitsFrom(1); |
3316 | break; |
3317 | case ISD::USUBO: |
3318 | case ISD::SSUBO: |
3319 | if (Op.getResNo() == 1) { |
3320 | // If we know the result of a setcc has the top bits zero, use this info. |
3321 | if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) == |
3322 | TargetLowering::ZeroOrOneBooleanContent && |
3323 | BitWidth > 1) |
3324 | Known.Zero.setBitsFrom(1); |
3325 | break; |
3326 | } |
3327 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
3328 | case ISD::SUB: |
3329 | case ISD::SUBC: { |
3330 | assert(Op.getResNo() == 0 &&((void)0) |
3331 | "We only compute knownbits for the difference here.")((void)0); |
3332 | |
3333 | Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); |
3334 | Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); |
3335 | Known = KnownBits::computeForAddSub(/* Add */ false, /* NSW */ false, |
3336 | Known, Known2); |
3337 | break; |
3338 | } |
3339 | case ISD::UADDO: |
3340 | case ISD::SADDO: |
3341 | case ISD::ADDCARRY: |
3342 | if (Op.getResNo() == 1) { |
3343 | // If we know the result of a setcc has the top bits zero, use this info. |
3344 | if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) == |
3345 | TargetLowering::ZeroOrOneBooleanContent && |
3346 | BitWidth > 1) |
3347 | Known.Zero.setBitsFrom(1); |
3348 | break; |
3349 | } |
3350 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
3351 | case ISD::ADD: |
3352 | case ISD::ADDC: |
3353 | case ISD::ADDE: { |
3354 | assert(Op.getResNo() == 0 && "We only compute knownbits for the sum here.")((void)0); |
3355 | |
3356 | // With ADDE and ADDCARRY, a carry bit may be added in. |
3357 | KnownBits Carry(1); |
3358 | if (Opcode == ISD::ADDE) |
3359 | // Can't track carry from glue, set carry to unknown. |
3360 | Carry.resetAll(); |
3361 | else if (Opcode == ISD::ADDCARRY) |
3362 | // TODO: Compute known bits for the carry operand. Not sure if it is worth |
3363 | // the trouble (how often will we find a known carry bit). And I haven't |
3364 | // tested this very much yet, but something like this might work: |
3365 | // Carry = computeKnownBits(Op.getOperand(2), DemandedElts, Depth + 1); |
3366 | // Carry = Carry.zextOrTrunc(1, false); |
3367 | Carry.resetAll(); |
3368 | else |
3369 | Carry.setAllZero(); |
3370 | |
3371 | Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); |
3372 | Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); |
3373 | Known = KnownBits::computeForAddCarry(Known, Known2, Carry); |
3374 | break; |
3375 | } |
3376 | case ISD::SREM: { |
3377 | Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); |
3378 | Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); |
3379 | Known = KnownBits::srem(Known, Known2); |
3380 | break; |
3381 | } |
3382 | case ISD::UREM: { |
3383 | Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); |
3384 | Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); |
3385 | Known = KnownBits::urem(Known, Known2); |
3386 | break; |
3387 | } |
3388 | case ISD::EXTRACT_ELEMENT: { |
3389 | Known = computeKnownBits(Op.getOperand(0), Depth+1); |
3390 | const unsigned Index = Op.getConstantOperandVal(1); |
3391 | const unsigned EltBitWidth = Op.getValueSizeInBits(); |
3392 | |
3393 | // Remove low part of known bits mask |
3394 | Known.Zero = Known.Zero.getHiBits(Known.getBitWidth() - Index * EltBitWidth); |
3395 | Known.One = Known.One.getHiBits(Known.getBitWidth() - Index * EltBitWidth); |
3396 | |
3397 | // Remove high part of known bit mask |
3398 | Known = Known.trunc(EltBitWidth); |
3399 | break; |
3400 | } |
3401 | case ISD::EXTRACT_VECTOR_ELT: { |
3402 | SDValue InVec = Op.getOperand(0); |
3403 | SDValue EltNo = Op.getOperand(1); |
3404 | EVT VecVT = InVec.getValueType(); |
3405 | // computeKnownBits not yet implemented for scalable vectors. |
3406 | if (VecVT.isScalableVector()) |
3407 | break; |
3408 | const unsigned EltBitWidth = VecVT.getScalarSizeInBits(); |
3409 | const unsigned NumSrcElts = VecVT.getVectorNumElements(); |
3410 | |
3411 | // If BitWidth > EltBitWidth the value is anyext:ed. So we do not know |
3412 | // anything about the extended bits. |
3413 | if (BitWidth > EltBitWidth) |
3414 | Known = Known.trunc(EltBitWidth); |
3415 | |
3416 | // If we know the element index, just demand that vector element, else for |
3417 | // an unknown element index, ignore DemandedElts and demand them all. |
3418 | APInt DemandedSrcElts = APInt::getAllOnesValue(NumSrcElts); |
3419 | auto *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo); |
3420 | if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts)) |
3421 | DemandedSrcElts = |
3422 | APInt::getOneBitSet(NumSrcElts, ConstEltNo->getZExtValue()); |
3423 | |
3424 | Known = computeKnownBits(InVec, DemandedSrcElts, Depth + 1); |
3425 | if (BitWidth > EltBitWidth) |
3426 | Known = Known.anyext(BitWidth); |
3427 | break; |
3428 | } |
3429 | case ISD::INSERT_VECTOR_ELT: { |
3430 | // If we know the element index, split the demand between the |
3431 | // source vector and the inserted element, otherwise assume we need |
3432 | // the original demanded vector elements and the value. |
3433 | SDValue InVec = Op.getOperand(0); |
3434 | SDValue InVal = Op.getOperand(1); |
3435 | SDValue EltNo = Op.getOperand(2); |
3436 | bool DemandedVal = true; |
3437 | APInt DemandedVecElts = DemandedElts; |
3438 | auto *CEltNo = dyn_cast<ConstantSDNode>(EltNo); |
3439 | if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) { |
3440 | unsigned EltIdx = CEltNo->getZExtValue(); |
3441 | DemandedVal = !!DemandedElts[EltIdx]; |
3442 | DemandedVecElts.clearBit(EltIdx); |
3443 | } |
3444 | Known.One.setAllBits(); |
3445 | Known.Zero.setAllBits(); |
3446 | if (DemandedVal) { |
3447 | Known2 = computeKnownBits(InVal, Depth + 1); |
3448 | Known = KnownBits::commonBits(Known, Known2.zextOrTrunc(BitWidth)); |
3449 | } |
3450 | if (!!DemandedVecElts) { |
3451 | Known2 = computeKnownBits(InVec, DemandedVecElts, Depth + 1); |
3452 | Known = KnownBits::commonBits(Known, Known2); |
3453 | } |
3454 | break; |
3455 | } |
3456 | case ISD::BITREVERSE: { |
3457 | Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); |
3458 | Known = Known2.reverseBits(); |
3459 | break; |
3460 | } |
3461 | case ISD::BSWAP: { |
3462 | Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); |
3463 | Known = Known2.byteSwap(); |
3464 | break; |
3465 | } |
3466 | case ISD::ABS: { |
3467 | Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); |
3468 | Known = Known2.abs(); |
3469 | break; |
3470 | } |
3471 | case ISD::USUBSAT: { |
3472 | // The result of usubsat will never be larger than the LHS. |
3473 | Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); |
3474 | Known.Zero.setHighBits(Known2.countMinLeadingZeros()); |
3475 | break; |
3476 | } |
3477 | case ISD::UMIN: { |
3478 | Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); |
3479 | Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); |
3480 | Known = KnownBits::umin(Known, Known2); |
3481 | break; |
3482 | } |
3483 | case ISD::UMAX: { |
3484 | Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); |
3485 | Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); |
3486 | Known = KnownBits::umax(Known, Known2); |
3487 | break; |
3488 | } |
3489 | case ISD::SMIN: |
3490 | case ISD::SMAX: { |
3491 | // If we have a clamp pattern, we know that the number of sign bits will be |
3492 | // the minimum of the clamp min/max range. |
3493 | bool IsMax = (Opcode == ISD::SMAX); |
3494 | ConstantSDNode *CstLow = nullptr, *CstHigh = nullptr; |
3495 | if ((CstLow = isConstOrConstSplat(Op.getOperand(1), DemandedElts))) |
3496 | if (Op.getOperand(0).getOpcode() == (IsMax ? ISD::SMIN : ISD::SMAX)) |
3497 | CstHigh = |
3498 | isConstOrConstSplat(Op.getOperand(0).getOperand(1), DemandedElts); |
3499 | if (CstLow && CstHigh) { |
3500 | if (!IsMax) |
3501 | std::swap(CstLow, CstHigh); |
3502 | |
3503 | const APInt &ValueLow = CstLow->getAPIntValue(); |
3504 | const APInt &ValueHigh = CstHigh->getAPIntValue(); |
3505 | if (ValueLow.sle(ValueHigh)) { |
3506 | unsigned LowSignBits = ValueLow.getNumSignBits(); |
3507 | unsigned HighSignBits = ValueHigh.getNumSignBits(); |
3508 | unsigned MinSignBits = std::min(LowSignBits, HighSignBits); |
3509 | if (ValueLow.isNegative() && ValueHigh.isNegative()) { |
3510 | Known.One.setHighBits(MinSignBits); |
3511 | break; |
3512 | } |
3513 | if (ValueLow.isNonNegative() && ValueHigh.isNonNegative()) { |
3514 | Known.Zero.setHighBits(MinSignBits); |
3515 | break; |
3516 | } |
3517 | } |
3518 | } |
3519 | |
3520 | Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); |
3521 | Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); |
3522 | if (IsMax) |
3523 | Known = KnownBits::smax(Known, Known2); |
3524 | else |
3525 | Known = KnownBits::smin(Known, Known2); |
3526 | break; |
3527 | } |
3528 | case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: |
3529 | if (Op.getResNo() == 1) { |
3530 | // The boolean result conforms to getBooleanContents. |
3531 | // If we know the result of a setcc has the top bits zero, use this info. |
3532 | // We know that we have an integer-based boolean since these operations |
3533 | // are only available for integer. |
3534 | if (TLI->getBooleanContents(Op.getValueType().isVector(), false) == |
3535 | TargetLowering::ZeroOrOneBooleanContent && |
3536 | BitWidth > 1) |
3537 | Known.Zero.setBitsFrom(1); |
3538 | break; |
3539 | } |
3540 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
3541 | case ISD::ATOMIC_CMP_SWAP: |
3542 | case ISD::ATOMIC_SWAP: |
3543 | case ISD::ATOMIC_LOAD_ADD: |
3544 | case ISD::ATOMIC_LOAD_SUB: |
3545 | case ISD::ATOMIC_LOAD_AND: |
3546 | case ISD::ATOMIC_LOAD_CLR: |
3547 | case ISD::ATOMIC_LOAD_OR: |
3548 | case ISD::ATOMIC_LOAD_XOR: |
3549 | case ISD::ATOMIC_LOAD_NAND: |
3550 | case ISD::ATOMIC_LOAD_MIN: |
3551 | case ISD::ATOMIC_LOAD_MAX: |
3552 | case ISD::ATOMIC_LOAD_UMIN: |
3553 | case ISD::ATOMIC_LOAD_UMAX: |
3554 | case ISD::ATOMIC_LOAD: { |
3555 | unsigned MemBits = |
3556 | cast<AtomicSDNode>(Op)->getMemoryVT().getScalarSizeInBits(); |
3557 | // If we are looking at the loaded value. |
3558 | if (Op.getResNo() == 0) { |
3559 | if (TLI->getExtendForAtomicOps() == ISD::ZERO_EXTEND) |
3560 | Known.Zero.setBitsFrom(MemBits); |
3561 | } |
3562 | break; |
3563 | } |
3564 | case ISD::FrameIndex: |
3565 | case ISD::TargetFrameIndex: |
3566 | TLI->computeKnownBitsForFrameIndex(cast<FrameIndexSDNode>(Op)->getIndex(), |
3567 | Known, getMachineFunction()); |
3568 | break; |
3569 | |
3570 | default: |
3571 | if (Opcode < ISD::BUILTIN_OP_END) |
3572 | break; |
3573 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
3574 | case ISD::INTRINSIC_WO_CHAIN: |
3575 | case ISD::INTRINSIC_W_CHAIN: |
3576 | case ISD::INTRINSIC_VOID: |
3577 | // Allow the target to implement this method for its nodes. |
3578 | TLI->computeKnownBitsForTargetNode(Op, Known, DemandedElts, *this, Depth); |
3579 | break; |
3580 | } |
3581 | |
3582 | assert(!Known.hasConflict() && "Bits known to be one AND zero?")((void)0); |
3583 | return Known; |
3584 | } |
3585 | |
3586 | SelectionDAG::OverflowKind SelectionDAG::computeOverflowKind(SDValue N0, |
3587 | SDValue N1) const { |
3588 | // X + 0 never overflow |
3589 | if (isNullConstant(N1)) |
3590 | return OFK_Never; |
3591 | |
3592 | KnownBits N1Known = computeKnownBits(N1); |
3593 | if (N1Known.Zero.getBoolValue()) { |
3594 | KnownBits N0Known = computeKnownBits(N0); |
3595 | |
3596 | bool overflow; |
3597 | (void)N0Known.getMaxValue().uadd_ov(N1Known.getMaxValue(), overflow); |
3598 | if (!overflow) |
3599 | return OFK_Never; |
3600 | } |
3601 | |
3602 | // mulhi + 1 never overflow |
3603 | if (N0.getOpcode() == ISD::UMUL_LOHI && N0.getResNo() == 1 && |
3604 | (N1Known.getMaxValue() & 0x01) == N1Known.getMaxValue()) |
3605 | return OFK_Never; |
3606 | |
3607 | if (N1.getOpcode() == ISD::UMUL_LOHI && N1.getResNo() == 1) { |
3608 | KnownBits N0Known = computeKnownBits(N0); |
3609 | |
3610 | if ((N0Known.getMaxValue() & 0x01) == N0Known.getMaxValue()) |
3611 | return OFK_Never; |
3612 | } |
3613 | |
3614 | return OFK_Sometime; |
3615 | } |
3616 | |
3617 | bool SelectionDAG::isKnownToBeAPowerOfTwo(SDValue Val) const { |
3618 | EVT OpVT = Val.getValueType(); |
3619 | unsigned BitWidth = OpVT.getScalarSizeInBits(); |
3620 | |
3621 | // Is the constant a known power of 2? |
3622 | if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Val)) |
3623 | return Const->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2(); |
3624 | |
3625 | // A left-shift of a constant one will have exactly one bit set because |
3626 | // shifting the bit off the end is undefined. |
3627 | if (Val.getOpcode() == ISD::SHL) { |
3628 | auto *C = isConstOrConstSplat(Val.getOperand(0)); |
3629 | if (C && C->getAPIntValue() == 1) |
3630 | return true; |
3631 | } |
3632 | |
3633 | // Similarly, a logical right-shift of a constant sign-bit will have exactly |
3634 | // one bit set. |
3635 | if (Val.getOpcode() == ISD::SRL) { |
3636 | auto *C = isConstOrConstSplat(Val.getOperand(0)); |
3637 | if (C && C->getAPIntValue().isSignMask()) |
3638 | return true; |
3639 | } |
3640 | |
3641 | // Are all operands of a build vector constant powers of two? |
3642 | if (Val.getOpcode() == ISD::BUILD_VECTOR) |
3643 | if (llvm::all_of(Val->ops(), [BitWidth](SDValue E) { |
3644 | if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(E)) |
3645 | return C->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2(); |
3646 | return false; |
3647 | })) |
3648 | return true; |
3649 | |
3650 | // More could be done here, though the above checks are enough |
3651 | // to handle some common cases. |
3652 | |
3653 | // Fall back to computeKnownBits to catch other known cases. |
3654 | KnownBits Known = computeKnownBits(Val); |
3655 | return (Known.countMaxPopulation() == 1) && (Known.countMinPopulation() == 1); |
3656 | } |
3657 | |
3658 | unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const { |
3659 | EVT VT = Op.getValueType(); |
3660 | |
3661 | // TODO: Assume we don't know anything for now. |
3662 | if (VT.isScalableVector()) |
3663 | return 1; |
3664 | |
3665 | APInt DemandedElts = VT.isVector() |
3666 | ? APInt::getAllOnesValue(VT.getVectorNumElements()) |
3667 | : APInt(1, 1); |
3668 | return ComputeNumSignBits(Op, DemandedElts, Depth); |
3669 | } |
3670 | |
3671 | unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts, |
3672 | unsigned Depth) const { |
3673 | EVT VT = Op.getValueType(); |
3674 | assert((VT.isInteger() || VT.isFloatingPoint()) && "Invalid VT!")((void)0); |
3675 | unsigned VTBits = VT.getScalarSizeInBits(); |
3676 | unsigned NumElts = DemandedElts.getBitWidth(); |
3677 | unsigned Tmp, Tmp2; |
3678 | unsigned FirstAnswer = 1; |
3679 | |
3680 | if (auto *C = dyn_cast<ConstantSDNode>(Op)) { |
3681 | const APInt &Val = C->getAPIntValue(); |
3682 | return Val.getNumSignBits(); |
3683 | } |
3684 | |
3685 | if (Depth >= MaxRecursionDepth) |
3686 | return 1; // Limit search depth. |
3687 | |
3688 | if (!DemandedElts || VT.isScalableVector()) |
3689 | return 1; // No demanded elts, better to assume we don't know anything. |
3690 | |
3691 | unsigned Opcode = Op.getOpcode(); |
3692 | switch (Opcode) { |
3693 | default: break; |
3694 | case ISD::AssertSext: |
3695 | Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits(); |
3696 | return VTBits-Tmp+1; |
3697 | case ISD::AssertZext: |
3698 | Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits(); |
3699 | return VTBits-Tmp; |
3700 | |
3701 | case ISD::BUILD_VECTOR: |
3702 | Tmp = VTBits; |
3703 | for (unsigned i = 0, e = Op.getNumOperands(); (i < e) && (Tmp > 1); ++i) { |
3704 | if (!DemandedElts[i]) |
3705 | continue; |
3706 | |
3707 | SDValue SrcOp = Op.getOperand(i); |
3708 | Tmp2 = ComputeNumSignBits(SrcOp, Depth + 1); |
3709 | |
3710 | // BUILD_VECTOR can implicitly truncate sources, we must handle this. |
3711 | if (SrcOp.getValueSizeInBits() != VTBits) { |
3712 | assert(SrcOp.getValueSizeInBits() > VTBits &&((void)0) |
3713 | "Expected BUILD_VECTOR implicit truncation")((void)0); |
3714 | unsigned ExtraBits = SrcOp.getValueSizeInBits() - VTBits; |
3715 | Tmp2 = (Tmp2 > ExtraBits ? Tmp2 - ExtraBits : 1); |
3716 | } |
3717 | Tmp = std::min(Tmp, Tmp2); |
3718 | } |
3719 | return Tmp; |
3720 | |
3721 | case ISD::VECTOR_SHUFFLE: { |
3722 | // Collect the minimum number of sign bits that are shared by every vector |
3723 | // element referenced by the shuffle. |
3724 | APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0); |
3725 | const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op); |
3726 | assert(NumElts == SVN->getMask().size() && "Unexpected vector size")((void)0); |
3727 | for (unsigned i = 0; i != NumElts; ++i) { |
3728 | int M = SVN->getMaskElt(i); |
3729 | if (!DemandedElts[i]) |
3730 | continue; |
3731 | // For UNDEF elements, we don't know anything about the common state of |
3732 | // the shuffle result. |
3733 | if (M < 0) |
3734 | return 1; |
3735 | if ((unsigned)M < NumElts) |
3736 | DemandedLHS.setBit((unsigned)M % NumElts); |
3737 | else |
3738 | DemandedRHS.setBit((unsigned)M % NumElts); |
3739 | } |
3740 | Tmp = std::numeric_limits<unsigned>::max(); |
3741 | if (!!DemandedLHS) |
3742 | Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedLHS, Depth + 1); |
3743 | if (!!DemandedRHS) { |
3744 | Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedRHS, Depth + 1); |
3745 | Tmp = std::min(Tmp, Tmp2); |
3746 | } |
3747 | // If we don't know anything, early out and try computeKnownBits fall-back. |
3748 | if (Tmp == 1) |
3749 | break; |
3750 | assert(Tmp <= VTBits && "Failed to determine minimum sign bits")((void)0); |
3751 | return Tmp; |
3752 | } |
3753 | |
3754 | case ISD::BITCAST: { |
3755 | SDValue N0 = Op.getOperand(0); |
3756 | EVT SrcVT = N0.getValueType(); |
3757 | unsigned SrcBits = SrcVT.getScalarSizeInBits(); |
3758 | |
3759 | // Ignore bitcasts from unsupported types.. |
3760 | if (!(SrcVT.isInteger() || SrcVT.isFloatingPoint())) |
3761 | break; |
3762 | |
3763 | // Fast handling of 'identity' bitcasts. |
3764 | if (VTBits == SrcBits) |
3765 | return ComputeNumSignBits(N0, DemandedElts, Depth + 1); |
3766 | |
3767 | bool IsLE = getDataLayout().isLittleEndian(); |
3768 | |
3769 | // Bitcast 'large element' scalar/vector to 'small element' vector. |
3770 | if ((SrcBits % VTBits) == 0) { |
3771 | assert(VT.isVector() && "Expected bitcast to vector")((void)0); |
3772 | |
3773 | unsigned Scale = SrcBits / VTBits; |
3774 | APInt SrcDemandedElts(NumElts / Scale, 0); |
3775 | for (unsigned i = 0; i != NumElts; ++i) |
3776 | if (DemandedElts[i]) |
3777 | SrcDemandedElts.setBit(i / Scale); |
3778 | |
3779 | // Fast case - sign splat can be simply split across the small elements. |
3780 | Tmp = ComputeNumSignBits(N0, SrcDemandedElts, Depth + 1); |
3781 | if (Tmp == SrcBits) |
3782 | return VTBits; |
3783 | |
3784 | // Slow case - determine how far the sign extends into each sub-element. |
3785 | Tmp2 = VTBits; |
3786 | for (unsigned i = 0; i != NumElts; ++i) |
3787 | if (DemandedElts[i]) { |
3788 | unsigned SubOffset = i % Scale; |
3789 | SubOffset = (IsLE ? ((Scale - 1) - SubOffset) : SubOffset); |
3790 | SubOffset = SubOffset * VTBits; |
3791 | if (Tmp <= SubOffset) |
3792 | return 1; |
3793 | Tmp2 = std::min(Tmp2, Tmp - SubOffset); |
3794 | } |
3795 | return Tmp2; |
3796 | } |
3797 | break; |
3798 | } |
3799 | |
3800 | case ISD::SIGN_EXTEND: |
3801 | Tmp = VTBits - Op.getOperand(0).getScalarValueSizeInBits(); |
3802 | return ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1) + Tmp; |
3803 | case ISD::SIGN_EXTEND_INREG: |
3804 | // Max of the input and what this extends. |
3805 | Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarSizeInBits(); |
3806 | Tmp = VTBits-Tmp+1; |
3807 | Tmp2 = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1); |
3808 | return std::max(Tmp, Tmp2); |
3809 | case ISD::SIGN_EXTEND_VECTOR_INREG: { |
3810 | SDValue Src = Op.getOperand(0); |
3811 | EVT SrcVT = Src.getValueType(); |
3812 | APInt DemandedSrcElts = DemandedElts.zextOrSelf(SrcVT.getVectorNumElements()); |
3813 | Tmp = VTBits - SrcVT.getScalarSizeInBits(); |
3814 | return ComputeNumSignBits(Src, DemandedSrcElts, Depth+1) + Tmp; |
3815 | } |
3816 | case ISD::SRA: |
3817 | Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1); |
3818 | // SRA X, C -> adds C sign bits. |
3819 | if (const APInt *ShAmt = |
3820 | getValidMinimumShiftAmountConstant(Op, DemandedElts)) |
3821 | Tmp = std::min<uint64_t>(Tmp + ShAmt->getZExtValue(), VTBits); |
3822 | return Tmp; |
3823 | case ISD::SHL: |
3824 | if (const APInt *ShAmt = |
3825 | getValidMaximumShiftAmountConstant(Op, DemandedElts)) { |
3826 | // shl destroys sign bits, ensure it doesn't shift out all sign bits. |
3827 | Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1); |
3828 | if (ShAmt->ult(Tmp)) |
3829 | return Tmp - ShAmt->getZExtValue(); |
3830 | } |
3831 | break; |
3832 | case ISD::AND: |
3833 | case ISD::OR: |
3834 | case ISD::XOR: // NOT is handled here. |
3835 | // Logical binary ops preserve the number of sign bits at the worst. |
3836 | Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1); |
3837 | if (Tmp != 1) { |
3838 | Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth+1); |
3839 | FirstAnswer = std::min(Tmp, Tmp2); |
3840 | // We computed what we know about the sign bits as our first |
3841 | // answer. Now proceed to the generic code that uses |
3842 | // computeKnownBits, and pick whichever answer is better. |
3843 | } |
3844 | break; |
3845 | |
3846 | case ISD::SELECT: |
3847 | case ISD::VSELECT: |
3848 | Tmp = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth+1); |
3849 | if (Tmp == 1) return 1; // Early out. |
3850 | Tmp2 = ComputeNumSignBits(Op.getOperand(2), DemandedElts, Depth+1); |
3851 | return std::min(Tmp, Tmp2); |
3852 | case ISD::SELECT_CC: |
3853 | Tmp = ComputeNumSignBits(Op.getOperand(2), DemandedElts, Depth+1); |
3854 | if (Tmp == 1) return 1; // Early out. |
3855 | Tmp2 = ComputeNumSignBits(Op.getOperand(3), DemandedElts, Depth+1); |
3856 | return std::min(Tmp, Tmp2); |
3857 | |
3858 | case ISD::SMIN: |
3859 | case ISD::SMAX: { |
3860 | // If we have a clamp pattern, we know that the number of sign bits will be |
3861 | // the minimum of the clamp min/max range. |
3862 | bool IsMax = (Opcode == ISD::SMAX); |
3863 | ConstantSDNode *CstLow = nullptr, *CstHigh = nullptr; |
3864 | if ((CstLow = isConstOrConstSplat(Op.getOperand(1), DemandedElts))) |
3865 | if (Op.getOperand(0).getOpcode() == (IsMax ? ISD::SMIN : ISD::SMAX)) |
3866 | CstHigh = |
3867 | isConstOrConstSplat(Op.getOperand(0).getOperand(1), DemandedElts); |
3868 | if (CstLow && CstHigh) { |
3869 | if (!IsMax) |
3870 | std::swap(CstLow, CstHigh); |
3871 | if (CstLow->getAPIntValue().sle(CstHigh->getAPIntValue())) { |
3872 | Tmp = CstLow->getAPIntValue().getNumSignBits(); |
3873 | Tmp2 = CstHigh->getAPIntValue().getNumSignBits(); |
3874 | return std::min(Tmp, Tmp2); |
3875 | } |
3876 | } |
3877 | |
3878 | // Fallback - just get the minimum number of sign bits of the operands. |
3879 | Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1); |
3880 | if (Tmp == 1) |
3881 | return 1; // Early out. |
3882 | Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1); |
3883 | return std::min(Tmp, Tmp2); |
3884 | } |
3885 | case ISD::UMIN: |
3886 | case ISD::UMAX: |
3887 | Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1); |
3888 | if (Tmp == 1) |
3889 | return 1; // Early out. |
3890 | Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1); |
3891 | return std::min(Tmp, Tmp2); |
3892 | case ISD::SADDO: |
3893 | case ISD::UADDO: |
3894 | case ISD::SSUBO: |
3895 | case ISD::USUBO: |
3896 | case ISD::SMULO: |
3897 | case ISD::UMULO: |
3898 | if (Op.getResNo() != 1) |
3899 | break; |
3900 | // The boolean result conforms to getBooleanContents. Fall through. |
3901 | // If setcc returns 0/-1, all bits are sign bits. |
3902 | // We know that we have an integer-based boolean since these operations |
3903 | // are only available for integer. |
3904 | if (TLI->getBooleanContents(VT.isVector(), false) == |
3905 | TargetLowering::ZeroOrNegativeOneBooleanContent) |
3906 | return VTBits; |
3907 | break; |
3908 | case ISD::SETCC: |
3909 | case ISD::STRICT_FSETCC: |
3910 | case ISD::STRICT_FSETCCS: { |
3911 | unsigned OpNo = Op->isStrictFPOpcode() ? 1 : 0; |
3912 | // If setcc returns 0/-1, all bits are sign bits. |
3913 | if (TLI->getBooleanContents(Op.getOperand(OpNo).getValueType()) == |
3914 | TargetLowering::ZeroOrNegativeOneBooleanContent) |
3915 | return VTBits; |
3916 | break; |
3917 | } |
3918 | case ISD::ROTL: |
3919 | case ISD::ROTR: |
3920 | Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1); |
3921 | |
3922 | // If we're rotating an 0/-1 value, then it stays an 0/-1 value. |
3923 | if (Tmp == VTBits) |
3924 | return VTBits; |
3925 | |
3926 | if (ConstantSDNode *C = |
3927 | isConstOrConstSplat(Op.getOperand(1), DemandedElts)) { |
3928 | unsigned RotAmt = C->getAPIntValue().urem(VTBits); |
3929 | |
3930 | // Handle rotate right by N like a rotate left by 32-N. |
3931 | if (Opcode == ISD::ROTR) |
3932 | RotAmt = (VTBits - RotAmt) % VTBits; |
3933 | |
3934 | // If we aren't rotating out all of the known-in sign bits, return the |
3935 | // number that are left. This handles rotl(sext(x), 1) for example. |
3936 | if (Tmp > (RotAmt + 1)) return (Tmp - RotAmt); |
3937 | } |
3938 | break; |
3939 | case ISD::ADD: |
3940 | case ISD::ADDC: |
3941 | // Add can have at most one carry bit. Thus we know that the output |
3942 | // is, at worst, one more bit than the inputs. |
3943 | Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1); |
3944 | if (Tmp == 1) return 1; // Early out. |
3945 | |
3946 | // Special case decrementing a value (ADD X, -1): |
3947 | if (ConstantSDNode *CRHS = |
3948 | isConstOrConstSplat(Op.getOperand(1), DemandedElts)) |
3949 | if (CRHS->isAllOnesValue()) { |
3950 | KnownBits Known = |
3951 | computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); |
3952 | |
3953 | // If the input is known to be 0 or 1, the output is 0/-1, which is all |
3954 | // sign bits set. |
3955 | if ((Known.Zero | 1).isAllOnesValue()) |
3956 | return VTBits; |
3957 | |
3958 | // If we are subtracting one from a positive number, there is no carry |
3959 | // out of the result. |
3960 | if (Known.isNonNegative()) |
3961 | return Tmp; |
3962 | } |
3963 | |
3964 | Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1); |
3965 | if (Tmp2 == 1) return 1; // Early out. |
3966 | return std::min(Tmp, Tmp2) - 1; |
3967 | case ISD::SUB: |
3968 | Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1); |
3969 | if (Tmp2 == 1) return 1; // Early out. |
3970 | |
3971 | // Handle NEG. |
3972 | if (ConstantSDNode *CLHS = |
3973 | isConstOrConstSplat(Op.getOperand(0), DemandedElts)) |
3974 | if (CLHS->isNullValue()) { |
3975 | KnownBits Known = |
3976 | computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); |
3977 | // If the input is known to be 0 or 1, the output is 0/-1, which is all |
3978 | // sign bits set. |
3979 | if ((Known.Zero | 1).isAllOnesValue()) |
3980 | return VTBits; |
3981 | |
3982 | // If the input is known to be positive (the sign bit is known clear), |
3983 | // the output of the NEG has the same number of sign bits as the input. |
3984 | if (Known.isNonNegative()) |
3985 | return Tmp2; |
3986 | |
3987 | // Otherwise, we treat this like a SUB. |
3988 | } |
3989 | |
3990 | // Sub can have at most one carry bit. Thus we know that the output |
3991 | // is, at worst, one more bit than the inputs. |
3992 | Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1); |
3993 | if (Tmp == 1) return 1; // Early out. |
3994 | return std::min(Tmp, Tmp2) - 1; |
3995 | case ISD::MUL: { |
3996 | // The output of the Mul can be at most twice the valid bits in the inputs. |
3997 | unsigned SignBitsOp0 = ComputeNumSignBits(Op.getOperand(0), Depth + 1); |
3998 | if (SignBitsOp0 == 1) |
3999 | break; |
4000 | unsigned SignBitsOp1 = ComputeNumSignBits(Op.getOperand(1), Depth + 1); |
4001 | if (SignBitsOp1 == 1) |
4002 | break; |
4003 | unsigned OutValidBits = |
4004 | (VTBits - SignBitsOp0 + 1) + (VTBits - SignBitsOp1 + 1); |
4005 | return OutValidBits > VTBits ? 1 : VTBits - OutValidBits + 1; |
4006 | } |
4007 | case ISD::SREM: |
4008 | // The sign bit is the LHS's sign bit, except when the result of the |
4009 | // remainder is zero. The magnitude of the result should be less than or |
4010 | // equal to the magnitude of the LHS. Therefore, the result should have |
4011 | // at least as many sign bits as the left hand side. |
4012 | return ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1); |
4013 | case ISD::TRUNCATE: { |
4014 | // Check if the sign bits of source go down as far as the truncated value. |
4015 | unsigned NumSrcBits = Op.getOperand(0).getScalarValueSizeInBits(); |
4016 | unsigned NumSrcSignBits = ComputeNumSignBits(Op.getOperand(0), Depth + 1); |
4017 | if (NumSrcSignBits > (NumSrcBits - VTBits)) |
4018 | return NumSrcSignBits - (NumSrcBits - VTBits); |
4019 | break; |
4020 | } |
4021 | case ISD::EXTRACT_ELEMENT: { |
4022 | const int KnownSign = ComputeNumSignBits(Op.getOperand(0), Depth+1); |
4023 | const int BitWidth = Op.getValueSizeInBits(); |
4024 | const int Items = Op.getOperand(0).getValueSizeInBits() / BitWidth; |
4025 | |
4026 | // Get reverse index (starting from 1), Op1 value indexes elements from |
4027 | // little end. Sign starts at big end. |
4028 | const int rIndex = Items - 1 - Op.getConstantOperandVal(1); |
4029 | |
4030 | // If the sign portion ends in our element the subtraction gives correct |
4031 | // result. Otherwise it gives either negative or > bitwidth result |
4032 | return std::max(std::min(KnownSign - rIndex * BitWidth, BitWidth), 0); |
4033 | } |
4034 | case ISD::INSERT_VECTOR_ELT: { |
4035 | // If we know the element index, split the demand between the |
4036 | // source vector and the inserted element, otherwise assume we need |
4037 | // the original demanded vector elements and the value. |
4038 | SDValue InVec = Op.getOperand(0); |
4039 | SDValue InVal = Op.getOperand(1); |
4040 | SDValue EltNo = Op.getOperand(2); |
4041 | bool DemandedVal = true; |
4042 | APInt DemandedVecElts = DemandedElts; |
4043 | auto *CEltNo = dyn_cast<ConstantSDNode>(EltNo); |
4044 | if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) { |
4045 | unsigned EltIdx = CEltNo->getZExtValue(); |
4046 | DemandedVal = !!DemandedElts[EltIdx]; |
4047 | DemandedVecElts.clearBit(EltIdx); |
4048 | } |
4049 | Tmp = std::numeric_limits<unsigned>::max(); |
4050 | if (DemandedVal) { |
4051 | // TODO - handle implicit truncation of inserted elements. |
4052 | if (InVal.getScalarValueSizeInBits() != VTBits) |
4053 | break; |
4054 | Tmp2 = ComputeNumSignBits(InVal, Depth + 1); |
4055 | Tmp = std::min(Tmp, Tmp2); |
4056 | } |
4057 | if (!!DemandedVecElts) { |
4058 | Tmp2 = ComputeNumSignBits(InVec, DemandedVecElts, Depth + 1); |
4059 | Tmp = std::min(Tmp, Tmp2); |
4060 | } |
4061 | assert(Tmp <= VTBits && "Failed to determine minimum sign bits")((void)0); |
4062 | return Tmp; |
4063 | } |
4064 | case ISD::EXTRACT_VECTOR_ELT: { |
4065 | SDValue InVec = Op.getOperand(0); |
4066 | SDValue EltNo = Op.getOperand(1); |
4067 | EVT VecVT = InVec.getValueType(); |
4068 | // ComputeNumSignBits not yet implemented for scalable vectors. |
4069 | if (VecVT.isScalableVector()) |
4070 | break; |
4071 | const unsigned BitWidth = Op.getValueSizeInBits(); |
4072 | const unsigned EltBitWidth = Op.getOperand(0).getScalarValueSizeInBits(); |
4073 | const unsigned NumSrcElts = VecVT.getVectorNumElements(); |
4074 | |
4075 | // If BitWidth > EltBitWidth the value is anyext:ed, and we do not know |
4076 | // anything about sign bits. But if the sizes match we can derive knowledge |
4077 | // about sign bits from the vector operand. |
4078 | if (BitWidth != EltBitWidth) |
4079 | break; |
4080 | |
4081 | // If we know the element index, just demand that vector element, else for |
4082 | // an unknown element index, ignore DemandedElts and demand them all. |
4083 | APInt DemandedSrcElts = APInt::getAllOnesValue(NumSrcElts); |
4084 | auto *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo); |
4085 | if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts)) |
4086 | DemandedSrcElts = |
4087 | APInt::getOneBitSet(NumSrcElts, ConstEltNo->getZExtValue()); |
4088 | |
4089 | return ComputeNumSignBits(InVec, DemandedSrcElts, Depth + 1); |
4090 | } |
4091 | case ISD::EXTRACT_SUBVECTOR: { |
4092 | // Offset the demanded elts by the subvector index. |
4093 | SDValue Src = Op.getOperand(0); |
4094 | // Bail until we can represent demanded elements for scalable vectors. |
4095 | if (Src.getValueType().isScalableVector()) |
4096 | break; |
4097 | uint64_t Idx = Op.getConstantOperandVal(1); |
4098 | unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); |
4099 | APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx); |
4100 | return ComputeNumSignBits(Src, DemandedSrcElts, Depth + 1); |
4101 | } |
4102 | case ISD::CONCAT_VECTORS: { |
4103 | // Determine the minimum number of sign bits across all demanded |
4104 | // elts of the input vectors. Early out if the result is already 1. |
4105 | Tmp = std::numeric_limits<unsigned>::max(); |
4106 | EVT SubVectorVT = Op.getOperand(0).getValueType(); |
4107 | unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements(); |
4108 | unsigned NumSubVectors = Op.getNumOperands(); |
4109 | for (unsigned i = 0; (i < NumSubVectors) && (Tmp > 1); ++i) { |
4110 | APInt DemandedSub = |
4111 | DemandedElts.extractBits(NumSubVectorElts, i * NumSubVectorElts); |
4112 | if (!DemandedSub) |
4113 | continue; |
4114 | Tmp2 = ComputeNumSignBits(Op.getOperand(i), DemandedSub, Depth + 1); |
4115 | Tmp = std::min(Tmp, Tmp2); |
4116 | } |
4117 | assert(Tmp <= VTBits && "Failed to determine minimum sign bits")((void)0); |
4118 | return Tmp; |
4119 | } |
4120 | case ISD::INSERT_SUBVECTOR: { |
4121 | // Demand any elements from the subvector and the remainder from the src its |
4122 | // inserted into. |
4123 | SDValue Src = Op.getOperand(0); |
4124 | SDValue Sub = Op.getOperand(1); |
4125 | uint64_t Idx = Op.getConstantOperandVal(2); |
4126 | unsigned NumSubElts = Sub.getValueType().getVectorNumElements(); |
4127 | APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx); |
4128 | APInt DemandedSrcElts = DemandedElts; |
4129 | DemandedSrcElts.insertBits(APInt::getNullValue(NumSubElts), Idx); |
4130 | |
4131 | Tmp = std::numeric_limits<unsigned>::max(); |
4132 | if (!!DemandedSubElts) { |
4133 | Tmp = ComputeNumSignBits(Sub, DemandedSubElts, Depth + 1); |
4134 | if (Tmp == 1) |
4135 | return 1; // early-out |
4136 | } |
4137 | if (!!DemandedSrcElts) { |
4138 | Tmp2 = ComputeNumSignBits(Src, DemandedSrcElts, Depth + 1); |
4139 | Tmp = std::min(Tmp, Tmp2); |
4140 | } |
4141 | assert(Tmp <= VTBits && "Failed to determine minimum sign bits")((void)0); |
4142 | return Tmp; |
4143 | } |
4144 | case ISD::ATOMIC_CMP_SWAP: |
4145 | case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: |
4146 | case ISD::ATOMIC_SWAP: |
4147 | case ISD::ATOMIC_LOAD_ADD: |
4148 | case ISD::ATOMIC_LOAD_SUB: |
4149 | case ISD::ATOMIC_LOAD_AND: |
4150 | case ISD::ATOMIC_LOAD_CLR: |
4151 | case ISD::ATOMIC_LOAD_OR: |
4152 | case ISD::ATOMIC_LOAD_XOR: |
4153 | case ISD::ATOMIC_LOAD_NAND: |
4154 | case ISD::ATOMIC_LOAD_MIN: |
4155 | case ISD::ATOMIC_LOAD_MAX: |
4156 | case ISD::ATOMIC_LOAD_UMIN: |
4157 | case ISD::ATOMIC_LOAD_UMAX: |
4158 | case ISD::ATOMIC_LOAD: { |
4159 | Tmp = cast<AtomicSDNode>(Op)->getMemoryVT().getScalarSizeInBits(); |
4160 | // If we are looking at the loaded value. |
4161 | if (Op.getResNo() == 0) { |
4162 | if (Tmp == VTBits) |
4163 | return 1; // early-out |
4164 | if (TLI->getExtendForAtomicOps() == ISD::SIGN_EXTEND) |
4165 | return VTBits - Tmp + 1; |
4166 | if (TLI->getExtendForAtomicOps() == ISD::ZERO_EXTEND) |
4167 | return VTBits - Tmp; |
4168 | } |
4169 | break; |
4170 | } |
4171 | } |
4172 | |
4173 | // If we are looking at the loaded value of the SDNode. |
4174 | if (Op.getResNo() == 0) { |
4175 | // Handle LOADX separately here. EXTLOAD case will fallthrough. |
4176 | if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op)) { |
4177 | unsigned ExtType = LD->getExtensionType(); |
4178 | switch (ExtType) { |
4179 | default: break; |
4180 | case ISD::SEXTLOAD: // e.g. i16->i32 = '17' bits known. |
4181 | Tmp = LD->getMemoryVT().getScalarSizeInBits(); |
4182 | return VTBits - Tmp + 1; |
4183 | case ISD::ZEXTLOAD: // e.g. i16->i32 = '16' bits known. |
4184 | Tmp = LD->getMemoryVT().getScalarSizeInBits(); |
4185 | return VTBits - Tmp; |
4186 | case ISD::NON_EXTLOAD: |
4187 | if (const Constant *Cst = TLI->getTargetConstantFromLoad(LD)) { |
4188 | // We only need to handle vectors - computeKnownBits should handle |
4189 | // scalar cases. |
4190 | Type *CstTy = Cst->getType(); |
4191 | if (CstTy->isVectorTy() && |
4192 | (NumElts * VTBits) == CstTy->getPrimitiveSizeInBits()) { |
4193 | Tmp = VTBits; |
4194 | for (unsigned i = 0; i != NumElts; ++i) { |
4195 | if (!DemandedElts[i]) |
4196 | continue; |
4197 | if (Constant *Elt = Cst->getAggregateElement(i)) { |
4198 | if (auto *CInt = dyn_cast<ConstantInt>(Elt)) { |
4199 | const APInt &Value = CInt->getValue(); |
4200 | Tmp = std::min(Tmp, Value.getNumSignBits()); |
4201 | continue; |
4202 | } |
4203 | if (auto *CFP = dyn_cast<ConstantFP>(Elt)) { |
4204 | APInt Value = CFP->getValueAPF().bitcastToAPInt(); |
4205 | Tmp = std::min(Tmp, Value.getNumSignBits()); |
4206 | continue; |
4207 | } |
4208 | } |
4209 | // Unknown type. Conservatively assume no bits match sign bit. |
4210 | return 1; |
4211 | } |
4212 | return Tmp; |
4213 | } |
4214 | } |
4215 | break; |
4216 | } |
4217 | } |
4218 | } |
4219 | |
4220 | // Allow the target to implement this method for its nodes. |
4221 | if (Opcode >= ISD::BUILTIN_OP_END || |
4222 | Opcode == ISD::INTRINSIC_WO_CHAIN || |
4223 | Opcode == ISD::INTRINSIC_W_CHAIN || |
4224 | Opcode == ISD::INTRINSIC_VOID) { |
4225 | unsigned NumBits = |
4226 | TLI->ComputeNumSignBitsForTargetNode(Op, DemandedElts, *this, Depth); |
4227 | if (NumBits > 1) |
4228 | FirstAnswer = std::max(FirstAnswer, NumBits); |
4229 | } |
4230 | |
4231 | // Finally, if we can prove that the top bits of the result are 0's or 1's, |
4232 | // use this information. |
4233 | KnownBits Known = computeKnownBits(Op, DemandedElts, Depth); |
4234 | |
4235 | APInt Mask; |
4236 | if (Known.isNonNegative()) { // sign bit is 0 |
4237 | Mask = Known.Zero; |
4238 | } else if (Known.isNegative()) { // sign bit is 1; |
4239 | Mask = Known.One; |
4240 | } else { |
4241 | // Nothing known. |
4242 | return FirstAnswer; |
4243 | } |
4244 | |
4245 | // Okay, we know that the sign bit in Mask is set. Use CLO to determine |
4246 | // the number of identical bits in the top of the input value. |
4247 | Mask <<= Mask.getBitWidth()-VTBits; |
4248 | return std::max(FirstAnswer, Mask.countLeadingOnes()); |
4249 | } |
4250 | |
4251 | bool SelectionDAG::isGuaranteedNotToBeUndefOrPoison(SDValue Op, bool PoisonOnly, |
4252 | unsigned Depth) const { |
4253 | // Early out for FREEZE. |
4254 | if (Op.getOpcode() == ISD::FREEZE) |
4255 | return true; |
4256 | |
4257 | // TODO: Assume we don't know anything for now. |
4258 | EVT VT = Op.getValueType(); |
4259 | if (VT.isScalableVector()) |
4260 | return false; |
4261 | |
4262 | APInt DemandedElts = VT.isVector() |
4263 | ? APInt::getAllOnesValue(VT.getVectorNumElements()) |
4264 | : APInt(1, 1); |
4265 | return isGuaranteedNotToBeUndefOrPoison(Op, DemandedElts, PoisonOnly, Depth); |
4266 | } |
4267 | |
4268 | bool SelectionDAG::isGuaranteedNotToBeUndefOrPoison(SDValue Op, |
4269 | const APInt &DemandedElts, |
4270 | bool PoisonOnly, |
4271 | unsigned Depth) const { |
4272 | unsigned Opcode = Op.getOpcode(); |
4273 | |
4274 | // Early out for FREEZE. |
4275 | if (Opcode == ISD::FREEZE) |
4276 | return true; |
4277 | |
4278 | if (Depth >= MaxRecursionDepth) |
4279 | return false; // Limit search depth. |
4280 | |
4281 | if (isIntOrFPConstant(Op)) |
4282 | return true; |
4283 | |
4284 | switch (Opcode) { |
4285 | case ISD::UNDEF: |
4286 | return PoisonOnly; |
4287 | |
4288 | // TODO: ISD::BUILD_VECTOR handling |
4289 | |
4290 | // TODO: Search for noundef attributes from library functions. |
4291 | |
4292 | // TODO: Pointers dereferenced by ISD::LOAD/STORE ops are noundef. |
4293 | |
4294 | default: |
4295 | // Allow the target to implement this method for its nodes. |
4296 | if (Opcode >= ISD::BUILTIN_OP_END || Opcode == ISD::INTRINSIC_WO_CHAIN || |
4297 | Opcode == ISD::INTRINSIC_W_CHAIN || Opcode == ISD::INTRINSIC_VOID) |
4298 | return TLI->isGuaranteedNotToBeUndefOrPoisonForTargetNode( |
4299 | Op, DemandedElts, *this, PoisonOnly, Depth); |
4300 | break; |
4301 | } |
4302 | |
4303 | return false; |
4304 | } |
4305 | |
4306 | bool SelectionDAG::isBaseWithConstantOffset(SDValue Op) const { |
4307 | if ((Op.getOpcode() != ISD::ADD && Op.getOpcode() != ISD::OR) || |
4308 | !isa<ConstantSDNode>(Op.getOperand(1))) |
4309 | return false; |
4310 | |
4311 | if (Op.getOpcode() == ISD::OR && |
4312 | !MaskedValueIsZero(Op.getOperand(0), Op.getConstantOperandAPInt(1))) |
4313 | return false; |
4314 | |
4315 | return true; |
4316 | } |
4317 | |
4318 | bool SelectionDAG::isKnownNeverNaN(SDValue Op, bool SNaN, unsigned Depth) const { |
4319 | // If we're told that NaNs won't happen, assume they won't. |
4320 | if (getTarget().Options.NoNaNsFPMath || Op->getFlags().hasNoNaNs()) |
4321 | return true; |
4322 | |
4323 | if (Depth >= MaxRecursionDepth) |
4324 | return false; // Limit search depth. |
4325 | |
4326 | // TODO: Handle vectors. |
4327 | // If the value is a constant, we can obviously see if it is a NaN or not. |
4328 | if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) { |
4329 | return !C->getValueAPF().isNaN() || |
4330 | (SNaN && !C->getValueAPF().isSignaling()); |
4331 | } |
4332 | |
4333 | unsigned Opcode = Op.getOpcode(); |
4334 | switch (Opcode) { |
4335 | case ISD::FADD: |
4336 | case ISD::FSUB: |
4337 | case ISD::FMUL: |
4338 | case ISD::FDIV: |
4339 | case ISD::FREM: |
4340 | case ISD::FSIN: |
4341 | case ISD::FCOS: { |
4342 | if (SNaN) |
4343 | return true; |
4344 | // TODO: Need isKnownNeverInfinity |
4345 | return false; |
4346 | } |
4347 | case ISD::FCANONICALIZE: |
4348 | case ISD::FEXP: |
4349 | case ISD::FEXP2: |
4350 | case ISD::FTRUNC: |
4351 | case ISD::FFLOOR: |
4352 | case ISD::FCEIL: |
4353 | case ISD::FROUND: |
4354 | case ISD::FROUNDEVEN: |
4355 | case ISD::FRINT: |
4356 | case ISD::FNEARBYINT: { |
4357 | if (SNaN) |
4358 | return true; |
4359 | return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1); |
4360 | } |
4361 | case ISD::FABS: |
4362 | case ISD::FNEG: |
4363 | case ISD::FCOPYSIGN: { |
4364 | return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1); |
4365 | } |
4366 | case ISD::SELECT: |
4367 | return isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) && |
4368 | isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1); |
4369 | case ISD::FP_EXTEND: |
4370 | case ISD::FP_ROUND: { |
4371 | if (SNaN) |
4372 | return true; |
4373 | return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1); |
4374 | } |
4375 | case ISD::SINT_TO_FP: |
4376 | case ISD::UINT_TO_FP: |
4377 | return true; |
4378 | case ISD::FMA: |
4379 | case ISD::FMAD: { |
4380 | if (SNaN) |
4381 | return true; |
4382 | return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) && |
4383 | isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) && |
4384 | isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1); |
4385 | } |
4386 | case ISD::FSQRT: // Need is known positive |
4387 | case ISD::FLOG: |
4388 | case ISD::FLOG2: |
4389 | case ISD::FLOG10: |
4390 | case ISD::FPOWI: |
4391 | case ISD::FPOW: { |
4392 | if (SNaN) |
4393 | return true; |
4394 | // TODO: Refine on operand |
4395 | return false; |
4396 | } |
4397 | case ISD::FMINNUM: |
4398 | case ISD::FMAXNUM: { |
4399 | // Only one needs to be known not-nan, since it will be returned if the |
4400 | // other ends up being one. |
4401 | return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) || |
4402 | isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1); |
4403 | } |
4404 | case ISD::FMINNUM_IEEE: |
4405 | case ISD::FMAXNUM_IEEE: { |
4406 | if (SNaN) |
4407 | return true; |
4408 | // This can return a NaN if either operand is an sNaN, or if both operands |
4409 | // are NaN. |
4410 | return (isKnownNeverNaN(Op.getOperand(0), false, Depth + 1) && |
4411 | isKnownNeverSNaN(Op.getOperand(1), Depth + 1)) || |
4412 | (isKnownNeverNaN(Op.getOperand(1), false, Depth + 1) && |
4413 | isKnownNeverSNaN(Op.getOperand(0), Depth + 1)); |
4414 | } |
4415 | case ISD::FMINIMUM: |
4416 | case ISD::FMAXIMUM: { |
4417 | // TODO: Does this quiet or return the origina NaN as-is? |
4418 | return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) && |
4419 | isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1); |
4420 | } |
4421 | case ISD::EXTRACT_VECTOR_ELT: { |
4422 | return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1); |
4423 | } |
4424 | default: |
4425 | if (Opcode >= ISD::BUILTIN_OP_END || |
4426 | Opcode == ISD::INTRINSIC_WO_CHAIN || |
4427 | Opcode == ISD::INTRINSIC_W_CHAIN || |
4428 | Opcode == ISD::INTRINSIC_VOID) { |
4429 | return TLI->isKnownNeverNaNForTargetNode(Op, *this, SNaN, Depth); |
4430 | } |
4431 | |
4432 | return false; |
4433 | } |
4434 | } |
4435 | |
4436 | bool SelectionDAG::isKnownNeverZeroFloat(SDValue Op) const { |
4437 | assert(Op.getValueType().isFloatingPoint() &&((void)0) |
4438 | "Floating point type expected")((void)0); |
4439 | |
4440 | // If the value is a constant, we can obviously see if it is a zero or not. |
4441 | // TODO: Add BuildVector support. |
4442 | if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) |
4443 | return !C->isZero(); |
4444 | return false; |
4445 | } |
4446 | |
4447 | bool SelectionDAG::isKnownNeverZero(SDValue Op) const { |
4448 | assert(!Op.getValueType().isFloatingPoint() &&((void)0) |
4449 | "Floating point types unsupported - use isKnownNeverZeroFloat")((void)0); |
4450 | |
4451 | // If the value is a constant, we can obviously see if it is a zero or not. |
4452 | if (ISD::matchUnaryPredicate( |
4453 | Op, [](ConstantSDNode *C) { return !C->isNullValue(); })) |
4454 | return true; |
4455 | |
4456 | // TODO: Recognize more cases here. |
4457 | switch (Op.getOpcode()) { |
4458 | default: break; |
4459 | case ISD::OR: |
4460 | if (isKnownNeverZero(Op.getOperand(1)) || |
4461 | isKnownNeverZero(Op.getOperand(0))) |
4462 | return true; |
4463 | break; |
4464 | } |
4465 | |
4466 | return false; |
4467 | } |
4468 | |
4469 | bool SelectionDAG::isEqualTo(SDValue A, SDValue B) const { |
4470 | // Check the obvious case. |
4471 | if (A == B) return true; |
4472 | |
4473 | // For for negative and positive zero. |
4474 | if (const ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A)) |
4475 | if (const ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B)) |
4476 | if (CA->isZero() && CB->isZero()) return true; |
4477 | |
4478 | // Otherwise they may not be equal. |
4479 | return false; |
4480 | } |
4481 | |
4482 | // FIXME: unify with llvm::haveNoCommonBitsSet. |
4483 | // FIXME: could also handle masked merge pattern (X & ~M) op (Y & M) |
4484 | bool SelectionDAG::haveNoCommonBitsSet(SDValue A, SDValue B) const { |
4485 | assert(A.getValueType() == B.getValueType() &&((void)0) |
4486 | "Values must have the same type")((void)0); |
4487 | return KnownBits::haveNoCommonBitsSet(computeKnownBits(A), |
4488 | computeKnownBits(B)); |
4489 | } |
4490 | |
4491 | static SDValue FoldSTEP_VECTOR(const SDLoc &DL, EVT VT, SDValue Step, |
4492 | SelectionDAG &DAG) { |
4493 | if (cast<ConstantSDNode>(Step)->isNullValue()) |
4494 | return DAG.getConstant(0, DL, VT); |
4495 | |
4496 | return SDValue(); |
4497 | } |
4498 | |
4499 | static SDValue FoldBUILD_VECTOR(const SDLoc &DL, EVT VT, |
4500 | ArrayRef<SDValue> Ops, |
4501 | SelectionDAG &DAG) { |
4502 | int NumOps = Ops.size(); |
4503 | assert(NumOps != 0 && "Can't build an empty vector!")((void)0); |
4504 | assert(!VT.isScalableVector() &&((void)0) |
4505 | "BUILD_VECTOR cannot be used with scalable types")((void)0); |
4506 | assert(VT.getVectorNumElements() == (unsigned)NumOps &&((void)0) |
4507 | "Incorrect element count in BUILD_VECTOR!")((void)0); |
4508 | |
4509 | // BUILD_VECTOR of UNDEFs is UNDEF. |
4510 | if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); })) |
4511 | return DAG.getUNDEF(VT); |
4512 | |
4513 | // BUILD_VECTOR of seq extract/insert from the same vector + type is Identity. |
4514 | SDValue IdentitySrc; |
4515 | bool IsIdentity = true; |
4516 | for (int i = 0; i != NumOps; ++i) { |
4517 | if (Ops[i].getOpcode() != ISD::EXTRACT_VECTOR_ELT || |
4518 | Ops[i].getOperand(0).getValueType() != VT || |
4519 | (IdentitySrc && Ops[i].getOperand(0) != IdentitySrc) || |
4520 | !isa<ConstantSDNode>(Ops[i].getOperand(1)) || |
4521 | cast<ConstantSDNode>(Ops[i].getOperand(1))->getAPIntValue() != i) { |
4522 | IsIdentity = false; |
4523 | break; |
4524 | } |
4525 | IdentitySrc = Ops[i].getOperand(0); |
4526 | } |
4527 | if (IsIdentity) |
4528 | return IdentitySrc; |
4529 | |
4530 | return SDValue(); |
4531 | } |
4532 | |
4533 | /// Try to simplify vector concatenation to an input value, undef, or build |
4534 | /// vector. |
4535 | static SDValue foldCONCAT_VECTORS(const SDLoc &DL, EVT VT, |
4536 | ArrayRef<SDValue> Ops, |
4537 | SelectionDAG &DAG) { |
4538 | assert(!Ops.empty() && "Can't concatenate an empty list of vectors!")((void)0); |
4539 | assert(llvm::all_of(Ops,((void)0) |
4540 | [Ops](SDValue Op) {((void)0) |
4541 | return Ops[0].getValueType() == Op.getValueType();((void)0) |
4542 | }) &&((void)0) |
4543 | "Concatenation of vectors with inconsistent value types!")((void)0); |
4544 | assert((Ops[0].getValueType().getVectorElementCount() * Ops.size()) ==((void)0) |
4545 | VT.getVectorElementCount() &&((void)0) |
4546 | "Incorrect element count in vector concatenation!")((void)0); |
4547 | |
4548 | if (Ops.size() == 1) |
4549 | return Ops[0]; |
4550 | |
4551 | // Concat of UNDEFs is UNDEF. |
4552 | if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); })) |
4553 | return DAG.getUNDEF(VT); |
4554 | |
4555 | // Scan the operands and look for extract operations from a single source |
4556 | // that correspond to insertion at the same location via this concatenation: |
4557 | // concat (extract X, 0*subvec_elts), (extract X, 1*subvec_elts), ... |
4558 | SDValue IdentitySrc; |
4559 | bool IsIdentity = true; |
4560 | for (unsigned i = 0, e = Ops.size(); i != e; ++i) { |
4561 | SDValue Op = Ops[i]; |
4562 | unsigned IdentityIndex = i * Op.getValueType().getVectorMinNumElements(); |
4563 | if (Op.getOpcode() != ISD::EXTRACT_SUBVECTOR || |
4564 | Op.getOperand(0).getValueType() != VT || |
4565 | (IdentitySrc && Op.getOperand(0) != IdentitySrc) || |
4566 | Op.getConstantOperandVal(1) != IdentityIndex) { |
4567 | IsIdentity = false; |
4568 | break; |
4569 | } |
4570 | assert((!IdentitySrc || IdentitySrc == Op.getOperand(0)) &&((void)0) |
4571 | "Unexpected identity source vector for concat of extracts")((void)0); |
4572 | IdentitySrc = Op.getOperand(0); |
4573 | } |
4574 | if (IsIdentity) { |
4575 | assert(IdentitySrc && "Failed to set source vector of extracts")((void)0); |
4576 | return IdentitySrc; |
4577 | } |
4578 | |
4579 | // The code below this point is only designed to work for fixed width |
4580 | // vectors, so we bail out for now. |
4581 | if (VT.isScalableVector()) |
4582 | return SDValue(); |
4583 | |
4584 | // A CONCAT_VECTOR with all UNDEF/BUILD_VECTOR operands can be |
4585 | // simplified to one big BUILD_VECTOR. |
4586 | // FIXME: Add support for SCALAR_TO_VECTOR as well. |
4587 | EVT SVT = VT.getScalarType(); |
4588 | SmallVector<SDValue, 16> Elts; |
4589 | for (SDValue Op : Ops) { |
4590 | EVT OpVT = Op.getValueType(); |
4591 | if (Op.isUndef()) |
4592 | Elts.append(OpVT.getVectorNumElements(), DAG.getUNDEF(SVT)); |
4593 | else if (Op.getOpcode() == ISD::BUILD_VECTOR) |
4594 | Elts.append(Op->op_begin(), Op->op_end()); |
4595 | else |
4596 | return SDValue(); |
4597 | } |
4598 | |
4599 | // BUILD_VECTOR requires all inputs to be of the same type, find the |
4600 | // maximum type and extend them all. |
4601 | for (SDValue Op : Elts) |
4602 | SVT = (SVT.bitsLT(Op.getValueType()) ? Op.getValueType() : SVT); |
4603 | |
4604 | if (SVT.bitsGT(VT.getScalarType())) { |
4605 | for (SDValue &Op : Elts) { |
4606 | if (Op.isUndef()) |
4607 | Op = DAG.getUNDEF(SVT); |
4608 | else |
4609 | Op = DAG.getTargetLoweringInfo().isZExtFree(Op.getValueType(), SVT) |
4610 | ? DAG.getZExtOrTrunc(Op, DL, SVT) |
4611 | : DAG.getSExtOrTrunc(Op, DL, SVT); |
4612 | } |
4613 | } |
4614 | |
4615 | SDValue V = DAG.getBuildVector(VT, DL, Elts); |
4616 | NewSDValueDbgMsg(V, "New node fold concat vectors: ", &DAG); |
4617 | return V; |
4618 | } |
4619 | |
4620 | /// Gets or creates the specified node. |
4621 | SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT) { |
4622 | FoldingSetNodeID ID; |
4623 | AddNodeIDNode(ID, Opcode, getVTList(VT), None); |
4624 | void *IP = nullptr; |
4625 | if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) |
4626 | return SDValue(E, 0); |
4627 | |
4628 | auto *N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), |
4629 | getVTList(VT)); |
4630 | CSEMap.InsertNode(N, IP); |
4631 | |
4632 | InsertNode(N); |
4633 | SDValue V = SDValue(N, 0); |
4634 | NewSDValueDbgMsg(V, "Creating new node: ", this); |
4635 | return V; |
4636 | } |
4637 | |
4638 | SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, |
4639 | SDValue Operand) { |
4640 | SDNodeFlags Flags; |
4641 | if (Inserter) |
4642 | Flags = Inserter->getFlags(); |
4643 | return getNode(Opcode, DL, VT, Operand, Flags); |
4644 | } |
4645 | |
4646 | SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, |
4647 | SDValue Operand, const SDNodeFlags Flags) { |
4648 | assert(Operand.getOpcode() != ISD::DELETED_NODE &&((void)0) |
4649 | "Operand is DELETED_NODE!")((void)0); |
4650 | // Constant fold unary operations with an integer constant operand. Even |
4651 | // opaque constant will be folded, because the folding of unary operations |
4652 | // doesn't create new constants with different values. Nevertheless, the |
4653 | // opaque flag is preserved during folding to prevent future folding with |
4654 | // other constants. |
4655 | if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Operand)) { |
4656 | const APInt &Val = C->getAPIntValue(); |
4657 | switch (Opcode) { |
4658 | default: break; |
4659 | case ISD::SIGN_EXTEND: |
4660 | return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), DL, VT, |
4661 | C->isTargetOpcode(), C->isOpaque()); |
4662 | case ISD::TRUNCATE: |
4663 | if (C->isOpaque()) |
4664 | break; |
4665 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
4666 | case ISD::ZERO_EXTEND: |
4667 | return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), DL, VT, |
4668 | C->isTargetOpcode(), C->isOpaque()); |
4669 | case ISD::ANY_EXTEND: |
4670 | // Some targets like RISCV prefer to sign extend some types. |
4671 | if (TLI->isSExtCheaperThanZExt(Operand.getValueType(), VT)) |
4672 | return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), DL, VT, |
4673 | C->isTargetOpcode(), C->isOpaque()); |
4674 | return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), DL, VT, |
4675 | C->isTargetOpcode(), C->isOpaque()); |
4676 | case ISD::UINT_TO_FP: |
4677 | case ISD::SINT_TO_FP: { |
4678 | APFloat apf(EVTToAPFloatSemantics(VT), |
4679 | APInt::getNullValue(VT.getSizeInBits())); |
4680 | (void)apf.convertFromAPInt(Val, |
4681 | Opcode==ISD::SINT_TO_FP, |
4682 | APFloat::rmNearestTiesToEven); |
4683 | return getConstantFP(apf, DL, VT); |
4684 | } |
4685 | case ISD::BITCAST: |
4686 | if (VT == MVT::f16 && C->getValueType(0) == MVT::i16) |
4687 | return getConstantFP(APFloat(APFloat::IEEEhalf(), Val), DL, VT); |
4688 | if (VT == MVT::f32 && C->getValueType(0) == MVT::i32) |
4689 | return getConstantFP(APFloat(APFloat::IEEEsingle(), Val), DL, VT); |
4690 | if (VT == MVT::f64 && C->getValueType(0) == MVT::i64) |
4691 | return getConstantFP(APFloat(APFloat::IEEEdouble(), Val), DL, VT); |
4692 | if (VT == MVT::f128 && C->getValueType(0) == MVT::i128) |
4693 | return getConstantFP(APFloat(APFloat::IEEEquad(), Val), DL, VT); |
4694 | break; |
4695 | case ISD::ABS: |
4696 | return getConstant(Val.abs(), DL, VT, C->isTargetOpcode(), |
4697 | C->isOpaque()); |
4698 | case ISD::BITREVERSE: |
4699 | return getConstant(Val.reverseBits(), DL, VT, C->isTargetOpcode(), |
4700 | C->isOpaque()); |
4701 | case ISD::BSWAP: |
4702 | return getConstant(Val.byteSwap(), DL, VT, C->isTargetOpcode(), |
4703 | C->isOpaque()); |
4704 | case ISD::CTPOP: |
4705 | return getConstant(Val.countPopulation(), DL, VT, C->isTargetOpcode(), |
4706 | C->isOpaque()); |
4707 | case ISD::CTLZ: |
4708 | case ISD::CTLZ_ZERO_UNDEF: |
4709 | return getConstant(Val.countLeadingZeros(), DL, VT, C->isTargetOpcode(), |
4710 | C->isOpaque()); |
4711 | case ISD::CTTZ: |
4712 | case ISD::CTTZ_ZERO_UNDEF: |
4713 | return getConstant(Val.countTrailingZeros(), DL, VT, C->isTargetOpcode(), |
4714 | C->isOpaque()); |
4715 | case ISD::FP16_TO_FP: { |
4716 | bool Ignored; |
4717 | APFloat FPV(APFloat::IEEEhalf(), |
4718 | (Val.getBitWidth() == 16) ? Val : Val.trunc(16)); |
4719 | |
4720 | // This can return overflow, underflow, or inexact; we don't care. |
4721 | // FIXME need to be more flexible about rounding mode. |
4722 | (void)FPV.convert(EVTToAPFloatSemantics(VT), |
4723 | APFloat::rmNearestTiesToEven, &Ignored); |
4724 | return getConstantFP(FPV, DL, VT); |
4725 | } |
4726 | case ISD::STEP_VECTOR: { |
4727 | if (SDValue V = FoldSTEP_VECTOR(DL, VT, Operand, *this)) |
4728 | return V; |
4729 | break; |
4730 | } |
4731 | } |
4732 | } |
4733 | |
4734 | // Constant fold unary operations with a floating point constant operand. |
4735 | if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Operand)) { |
4736 | APFloat V = C->getValueAPF(); // make copy |
4737 | switch (Opcode) { |
4738 | case ISD::FNEG: |
4739 | V.changeSign(); |
4740 | return getConstantFP(V, DL, VT); |
4741 | case ISD::FABS: |
4742 | V.clearSign(); |
4743 | return getConstantFP(V, DL, VT); |
4744 | case ISD::FCEIL: { |
4745 | APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardPositive); |
4746 | if (fs == APFloat::opOK || fs == APFloat::opInexact) |
4747 | return getConstantFP(V, DL, VT); |
4748 | break; |
4749 | } |
4750 | case ISD::FTRUNC: { |
4751 | APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardZero); |
4752 | if (fs == APFloat::opOK || fs == APFloat::opInexact) |
4753 | return getConstantFP(V, DL, VT); |
4754 | break; |
4755 | } |
4756 | case ISD::FFLOOR: { |
4757 | APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardNegative); |
4758 | if (fs == APFloat::opOK || fs == APFloat::opInexact) |
4759 | return getConstantFP(V, DL, VT); |
4760 | break; |
4761 | } |
4762 | case ISD::FP_EXTEND: { |
4763 | bool ignored; |
4764 | // This can return overflow, underflow, or inexact; we don't care. |
4765 | // FIXME need to be more flexible about rounding mode. |
4766 | (void)V.convert(EVTToAPFloatSemantics(VT), |
4767 | APFloat::rmNearestTiesToEven, &ignored); |
4768 | return getConstantFP(V, DL, VT); |
4769 | } |
4770 | case ISD::FP_TO_SINT: |
4771 | case ISD::FP_TO_UINT: { |
4772 | bool ignored; |
4773 | APSInt IntVal(VT.getSizeInBits(), Opcode == ISD::FP_TO_UINT); |
4774 | // FIXME need to be more flexible about rounding mode. |
4775 | APFloat::opStatus s = |
4776 | V.convertToInteger(IntVal, APFloat::rmTowardZero, &ignored); |
4777 | if (s == APFloat::opInvalidOp) // inexact is OK, in fact usual |
4778 | break; |
4779 | return getConstant(IntVal, DL, VT); |
4780 | } |
4781 | case ISD::BITCAST: |
4782 | if (VT == MVT::i16 && C->getValueType(0) == MVT::f16) |
4783 | return getConstant((uint16_t)V.bitcastToAPInt().getZExtValue(), DL, VT); |
4784 | if (VT == MVT::i16 && C->getValueType(0) == MVT::bf16) |
4785 | return getConstant((uint16_t)V.bitcastToAPInt().getZExtValue(), DL, VT); |
4786 | if (VT == MVT::i32 && C->getValueType(0) == MVT::f32) |
4787 | return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), DL, VT); |
4788 | if (VT == MVT::i64 && C->getValueType(0) == MVT::f64) |
4789 | return getConstant(V.bitcastToAPInt().getZExtValue(), DL, VT); |
4790 | break; |
4791 | case ISD::FP_TO_FP16: { |
4792 | bool Ignored; |
4793 | // This can return overflow, underflow, or inexact; we don't care. |
4794 | // FIXME need to be more flexible about rounding mode. |
4795 | (void)V.convert(APFloat::IEEEhalf(), |
4796 | APFloat::rmNearestTiesToEven, &Ignored); |
4797 | return getConstant(V.bitcastToAPInt().getZExtValue(), DL, VT); |
4798 | } |
4799 | } |
4800 | } |
4801 | |
4802 | // Constant fold unary operations with a vector integer or float operand. |
4803 | switch (Opcode) { |
4804 | default: |
4805 | // FIXME: Entirely reasonable to perform folding of other unary |
4806 | // operations here as the need arises. |
4807 | break; |
4808 | case ISD::FNEG: |
4809 | case ISD::FABS: |
4810 | case ISD::FCEIL: |
4811 | case ISD::FTRUNC: |
4812 | case ISD::FFLOOR: |
4813 | case ISD::FP_EXTEND: |
4814 | case ISD::FP_TO_SINT: |
4815 | case ISD::FP_TO_UINT: |
4816 | case ISD::TRUNCATE: |
4817 | case ISD::ANY_EXTEND: |
4818 | case ISD::ZERO_EXTEND: |
4819 | case ISD::SIGN_EXTEND: |
4820 | case ISD::UINT_TO_FP: |
4821 | case ISD::SINT_TO_FP: |
4822 | case ISD::ABS: |
4823 | case ISD::BITREVERSE: |
4824 | case ISD::BSWAP: |
4825 | case ISD::CTLZ: |
4826 | case ISD::CTLZ_ZERO_UNDEF: |
4827 | case ISD::CTTZ: |
4828 | case ISD::CTTZ_ZERO_UNDEF: |
4829 | case ISD::CTPOP: { |
4830 | SDValue Ops = {Operand}; |
4831 | if (SDValue Fold = FoldConstantVectorArithmetic(Opcode, DL, VT, Ops)) |
4832 | return Fold; |
4833 | } |
4834 | } |
4835 | |
4836 | unsigned OpOpcode = Operand.getNode()->getOpcode(); |
4837 | switch (Opcode) { |
4838 | case ISD::STEP_VECTOR: |
4839 | assert(VT.isScalableVector() &&((void)0) |
4840 | "STEP_VECTOR can only be used with scalable types")((void)0); |
4841 | assert(OpOpcode == ISD::TargetConstant &&((void)0) |
4842 | VT.getVectorElementType() == Operand.getValueType() &&((void)0) |
4843 | "Unexpected step operand")((void)0); |
4844 | break; |
4845 | case ISD::FREEZE: |
4846 | assert(VT == Operand.getValueType() && "Unexpected VT!")((void)0); |
4847 | break; |
4848 | case ISD::TokenFactor: |
4849 | case ISD::MERGE_VALUES: |
4850 | case ISD::CONCAT_VECTORS: |
4851 | return Operand; // Factor, merge or concat of one node? No need. |
4852 | case ISD::BUILD_VECTOR: { |
4853 | // Attempt to simplify BUILD_VECTOR. |
4854 | SDValue Ops[] = {Operand}; |
4855 | if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this)) |
4856 | return V; |
4857 | break; |
4858 | } |
4859 | case ISD::FP_ROUND: llvm_unreachable("Invalid method to make FP_ROUND node")__builtin_unreachable(); |
4860 | case ISD::FP_EXTEND: |
4861 | assert(VT.isFloatingPoint() &&((void)0) |
4862 | Operand.getValueType().isFloatingPoint() && "Invalid FP cast!")((void)0); |
4863 | if (Operand.getValueType() == VT) return Operand; // noop conversion. |
4864 | assert((!VT.isVector() ||((void)0) |
4865 | VT.getVectorElementCount() ==((void)0) |
4866 | Operand.getValueType().getVectorElementCount()) &&((void)0) |
4867 | "Vector element count mismatch!")((void)0); |
4868 | assert(Operand.getValueType().bitsLT(VT) &&((void)0) |
4869 | "Invalid fpext node, dst < src!")((void)0); |
4870 | if (Operand.isUndef()) |
4871 | return getUNDEF(VT); |
4872 | break; |
4873 | case ISD::FP_TO_SINT: |
4874 | case ISD::FP_TO_UINT: |
4875 | if (Operand.isUndef()) |
4876 | return getUNDEF(VT); |
4877 | break; |
4878 | case ISD::SINT_TO_FP: |
4879 | case ISD::UINT_TO_FP: |
4880 | // [us]itofp(undef) = 0, because the result value is bounded. |
4881 | if (Operand.isUndef()) |
4882 | return getConstantFP(0.0, DL, VT); |
4883 | break; |
4884 | case ISD::SIGN_EXTEND: |
4885 | assert(VT.isInteger() && Operand.getValueType().isInteger() &&((void)0) |
4886 | "Invalid SIGN_EXTEND!")((void)0); |
4887 | assert(VT.isVector() == Operand.getValueType().isVector() &&((void)0) |
4888 | "SIGN_EXTEND result type type should be vector iff the operand "((void)0) |
4889 | "type is vector!")((void)0); |
4890 | if (Operand.getValueType() == VT) return Operand; // noop extension |
4891 | assert((!VT.isVector() ||((void)0) |
4892 | VT.getVectorElementCount() ==((void)0) |
4893 | Operand.getValueType().getVectorElementCount()) &&((void)0) |
4894 | "Vector element count mismatch!")((void)0); |
4895 | assert(Operand.getValueType().bitsLT(VT) &&((void)0) |
4896 | "Invalid sext node, dst < src!")((void)0); |
4897 | if (OpOpcode == ISD::SIGN_EXTEND || OpOpcode == ISD::ZERO_EXTEND) |
4898 | return getNode(OpOpcode, DL, VT, Operand.getOperand(0)); |
4899 | if (OpOpcode == ISD::UNDEF) |
4900 | // sext(undef) = 0, because the top bits will all be the same. |
4901 | return getConstant(0, DL, VT); |
4902 | break; |
4903 | case ISD::ZERO_EXTEND: |
4904 | assert(VT.isInteger() && Operand.getValueType().isInteger() &&((void)0) |
4905 | "Invalid ZERO_EXTEND!")((void)0); |
4906 | assert(VT.isVector() == Operand.getValueType().isVector() &&((void)0) |
4907 | "ZERO_EXTEND result type type should be vector iff the operand "((void)0) |
4908 | "type is vector!")((void)0); |
4909 | if (Operand.getValueType() == VT) return Operand; // noop extension |
4910 | assert((!VT.isVector() ||((void)0) |
4911 | VT.getVectorElementCount() ==((void)0) |
4912 | Operand.getValueType().getVectorElementCount()) &&((void)0) |
4913 | "Vector element count mismatch!")((void)0); |
4914 | assert(Operand.getValueType().bitsLT(VT) &&((void)0) |
4915 | "Invalid zext node, dst < src!")((void)0); |
4916 | if (OpOpcode == ISD::ZERO_EXTEND) // (zext (zext x)) -> (zext x) |
4917 | return getNode(ISD::ZERO_EXTEND, DL, VT, Operand.getOperand(0)); |
4918 | if (OpOpcode == ISD::UNDEF) |
4919 | // zext(undef) = 0, because the top bits will be zero. |
4920 | return getConstant(0, DL, VT); |
4921 | break; |
4922 | case ISD::ANY_EXTEND: |
4923 | assert(VT.isInteger() && Operand.getValueType().isInteger() &&((void)0) |
4924 | "Invalid ANY_EXTEND!")((void)0); |
4925 | assert(VT.isVector() == Operand.getValueType().isVector() &&((void)0) |
4926 | "ANY_EXTEND result type type should be vector iff the operand "((void)0) |
4927 | "type is vector!")((void)0); |
4928 | if (Operand.getValueType() == VT) return Operand; // noop extension |
4929 | assert((!VT.isVector() ||((void)0) |
4930 | VT.getVectorElementCount() ==((void)0) |
4931 | Operand.getValueType().getVectorElementCount()) &&((void)0) |
4932 | "Vector element count mismatch!")((void)0); |
4933 | assert(Operand.getValueType().bitsLT(VT) &&((void)0) |
4934 | "Invalid anyext node, dst < src!")((void)0); |
4935 | |
4936 | if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND || |
4937 | OpOpcode == ISD::ANY_EXTEND) |
4938 | // (ext (zext x)) -> (zext x) and (ext (sext x)) -> (sext x) |
4939 | return getNode(OpOpcode, DL, VT, Operand.getOperand(0)); |
4940 | if (OpOpcode == ISD::UNDEF) |
4941 | return getUNDEF(VT); |
4942 | |
4943 | // (ext (trunc x)) -> x |
4944 | if (OpOpcode == ISD::TRUNCATE) { |
4945 | SDValue OpOp = Operand.getOperand(0); |
4946 | if (OpOp.getValueType() == VT) { |
4947 | transferDbgValues(Operand, OpOp); |
4948 | return OpOp; |
4949 | } |
4950 | } |
4951 | break; |
4952 | case ISD::TRUNCATE: |
4953 | assert(VT.isInteger() && Operand.getValueType().isInteger() &&((void)0) |
4954 | "Invalid TRUNCATE!")((void)0); |
4955 | assert(VT.isVector() == Operand.getValueType().isVector() &&((void)0) |
4956 | "TRUNCATE result type type should be vector iff the operand "((void)0) |
4957 | "type is vector!")((void)0); |
4958 | if (Operand.getValueType() == VT) return Operand; // noop truncate |
4959 | assert((!VT.isVector() ||((void)0) |
4960 | VT.getVectorElementCount() ==((void)0) |
4961 | Operand.getValueType().getVectorElementCount()) &&((void)0) |
4962 | "Vector element count mismatch!")((void)0); |
4963 | assert(Operand.getValueType().bitsGT(VT) &&((void)0) |
4964 | "Invalid truncate node, src < dst!")((void)0); |
4965 | if (OpOpcode == ISD::TRUNCATE) |
4966 | return getNode(ISD::TRUNCATE, DL, VT, Operand.getOperand(0)); |
4967 | if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND || |
4968 | OpOpcode == ISD::ANY_EXTEND) { |
4969 | // If the source is smaller than the dest, we still need an extend. |
4970 | if (Operand.getOperand(0).getValueType().getScalarType() |
4971 | .bitsLT(VT.getScalarType())) |
4972 | return getNode(OpOpcode, DL, VT, Operand.getOperand(0)); |
4973 | if (Operand.getOperand(0).getValueType().bitsGT(VT)) |
4974 | return getNode(ISD::TRUNCATE, DL, VT, Operand.getOperand(0)); |
4975 | return Operand.getOperand(0); |
4976 | } |
4977 | if (OpOpcode == ISD::UNDEF) |
4978 | return getUNDEF(VT); |
4979 | break; |
4980 | case ISD::ANY_EXTEND_VECTOR_INREG: |
4981 | case ISD::ZERO_EXTEND_VECTOR_INREG: |
4982 | case ISD::SIGN_EXTEND_VECTOR_INREG: |
4983 | assert(VT.isVector() && "This DAG node is restricted to vector types.")((void)0); |
4984 | assert(Operand.getValueType().bitsLE(VT) &&((void)0) |
4985 | "The input must be the same size or smaller than the result.")((void)0); |
4986 | assert(VT.getVectorMinNumElements() <((void)0) |
4987 | Operand.getValueType().getVectorMinNumElements() &&((void)0) |
4988 | "The destination vector type must have fewer lanes than the input.")((void)0); |
4989 | break; |
4990 | case ISD::ABS: |
4991 | assert(VT.isInteger() && VT == Operand.getValueType() &&((void)0) |
4992 | "Invalid ABS!")((void)0); |
4993 | if (OpOpcode == ISD::UNDEF) |
4994 | return getUNDEF(VT); |
4995 | break; |
4996 | case ISD::BSWAP: |
4997 | assert(VT.isInteger() && VT == Operand.getValueType() &&((void)0) |
4998 | "Invalid BSWAP!")((void)0); |
4999 | assert((VT.getScalarSizeInBits() % 16 == 0) &&((void)0) |
5000 | "BSWAP types must be a multiple of 16 bits!")((void)0); |
5001 | if (OpOpcode == ISD::UNDEF) |
5002 | return getUNDEF(VT); |
5003 | break; |
5004 | case ISD::BITREVERSE: |
5005 | assert(VT.isInteger() && VT == Operand.getValueType() &&((void)0) |
5006 | "Invalid BITREVERSE!")((void)0); |
5007 | if (OpOpcode == ISD::UNDEF) |
5008 | return getUNDEF(VT); |
5009 | break; |
5010 | case ISD::BITCAST: |
5011 | // Basic sanity checking. |
5012 | assert(VT.getSizeInBits() == Operand.getValueSizeInBits() &&((void)0) |
5013 | "Cannot BITCAST between types of different sizes!")((void)0); |
5014 | if (VT == Operand.getValueType()) return Operand; // noop conversion. |
5015 | if (OpOpcode == ISD::BITCAST) // bitconv(bitconv(x)) -> bitconv(x) |
5016 | return getNode(ISD::BITCAST, DL, VT, Operand.getOperand(0)); |
5017 | if (OpOpcode == ISD::UNDEF) |
5018 | return getUNDEF(VT); |
5019 | break; |
5020 | case ISD::SCALAR_TO_VECTOR: |
5021 | assert(VT.isVector() && !Operand.getValueType().isVector() &&((void)0) |
5022 | (VT.getVectorElementType() == Operand.getValueType() ||((void)0) |
5023 | (VT.getVectorElementType().isInteger() &&((void)0) |
5024 | Operand.getValueType().isInteger() &&((void)0) |
5025 | VT.getVectorElementType().bitsLE(Operand.getValueType()))) &&((void)0) |
5026 | "Illegal SCALAR_TO_VECTOR node!")((void)0); |
5027 | if (OpOpcode == ISD::UNDEF) |
5028 | return getUNDEF(VT); |
5029 | // scalar_to_vector(extract_vector_elt V, 0) -> V, top bits are undefined. |
5030 | if (OpOpcode == ISD::EXTRACT_VECTOR_ELT && |
5031 | isa<ConstantSDNode>(Operand.getOperand(1)) && |
5032 | Operand.getConstantOperandVal(1) == 0 && |
5033 | Operand.getOperand(0).getValueType() == VT) |
5034 | return Operand.getOperand(0); |
5035 | break; |
5036 | case ISD::FNEG: |
5037 | // Negation of an unknown bag of bits is still completely undefined. |
5038 | if (OpOpcode == ISD::UNDEF) |
5039 | return getUNDEF(VT); |
5040 | |
5041 | if (OpOpcode == ISD::FNEG) // --X -> X |
5042 | return Operand.getOperand(0); |
5043 | break; |
5044 | case ISD::FABS: |
5045 | if (OpOpcode == ISD::FNEG) // abs(-X) -> abs(X) |
5046 | return getNode(ISD::FABS, DL, VT, Operand.getOperand(0)); |
5047 | break; |
5048 | case ISD::VSCALE: |
5049 | assert(VT == Operand.getValueType() && "Unexpected VT!")((void)0); |
5050 | break; |
5051 | case ISD::CTPOP: |
5052 | if (Operand.getValueType().getScalarType() == MVT::i1) |
5053 | return Operand; |
5054 | break; |
5055 | case ISD::CTLZ: |
5056 | case ISD::CTTZ: |
5057 | if (Operand.getValueType().getScalarType() == MVT::i1) |
5058 | return getNOT(DL, Operand, Operand.getValueType()); |
5059 | break; |
5060 | case ISD::VECREDUCE_SMIN: |
5061 | case ISD::VECREDUCE_UMAX: |
5062 | if (Operand.getValueType().getScalarType() == MVT::i1) |
5063 | return getNode(ISD::VECREDUCE_OR, DL, VT, Operand); |
5064 | break; |
5065 | case ISD::VECREDUCE_SMAX: |
5066 | case ISD::VECREDUCE_UMIN: |
5067 | if (Operand.getValueType().getScalarType() == MVT::i1) |
5068 | return getNode(ISD::VECREDUCE_AND, DL, VT, Operand); |
5069 | break; |
5070 | } |
5071 | |
5072 | SDNode *N; |
5073 | SDVTList VTs = getVTList(VT); |
5074 | SDValue Ops[] = {Operand}; |
5075 | if (VT != MVT::Glue) { // Don't CSE flag producing nodes |
5076 | FoldingSetNodeID ID; |
5077 | AddNodeIDNode(ID, Opcode, VTs, Ops); |
5078 | void *IP = nullptr; |
5079 | if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { |
5080 | E->intersectFlagsWith(Flags); |
5081 | return SDValue(E, 0); |
5082 | } |
5083 | |
5084 | N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); |
5085 | N->setFlags(Flags); |
5086 | createOperands(N, Ops); |
5087 | CSEMap.InsertNode(N, IP); |
5088 | } else { |
5089 | N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); |
5090 | createOperands(N, Ops); |
5091 | } |
5092 | |
5093 | InsertNode(N); |
5094 | SDValue V = SDValue(N, 0); |
5095 | NewSDValueDbgMsg(V, "Creating new node: ", this); |
5096 | return V; |
5097 | } |
5098 | |
5099 | static llvm::Optional<APInt> FoldValue(unsigned Opcode, const APInt &C1, |
5100 | const APInt &C2) { |
5101 | switch (Opcode) { |
5102 | case ISD::ADD: return C1 + C2; |
5103 | case ISD::SUB: return C1 - C2; |
5104 | case ISD::MUL: return C1 * C2; |
5105 | case ISD::AND: return C1 & C2; |
5106 | case ISD::OR: return C1 | C2; |
5107 | case ISD::XOR: return C1 ^ C2; |
5108 | case ISD::SHL: return C1 << C2; |
5109 | case ISD::SRL: return C1.lshr(C2); |
5110 | case ISD::SRA: return C1.ashr(C2); |
5111 | case ISD::ROTL: return C1.rotl(C2); |
5112 | case ISD::ROTR: return C1.rotr(C2); |
5113 | case ISD::SMIN: return C1.sle(C2) ? C1 : C2; |
5114 | case ISD::SMAX: return C1.sge(C2) ? C1 : C2; |
5115 | case ISD::UMIN: return C1.ule(C2) ? C1 : C2; |
5116 | case ISD::UMAX: return C1.uge(C2) ? C1 : C2; |
5117 | case ISD::SADDSAT: return C1.sadd_sat(C2); |
5118 | case ISD::UADDSAT: return C1.uadd_sat(C2); |
5119 | case ISD::SSUBSAT: return C1.ssub_sat(C2); |
5120 | case ISD::USUBSAT: return C1.usub_sat(C2); |
5121 | case ISD::UDIV: |
5122 | if (!C2.getBoolValue()) |
5123 | break; |
5124 | return C1.udiv(C2); |
5125 | case ISD::UREM: |
5126 | if (!C2.getBoolValue()) |
5127 | break; |
5128 | return C1.urem(C2); |
5129 | case ISD::SDIV: |
5130 | if (!C2.getBoolValue()) |
5131 | break; |
5132 | return C1.sdiv(C2); |
5133 | case ISD::SREM: |
5134 | if (!C2.getBoolValue()) |
5135 | break; |
5136 | return C1.srem(C2); |
5137 | case ISD::MULHS: { |
5138 | unsigned FullWidth = C1.getBitWidth() * 2; |
5139 | APInt C1Ext = C1.sext(FullWidth); |
5140 | APInt C2Ext = C2.sext(FullWidth); |
5141 | return (C1Ext * C2Ext).extractBits(C1.getBitWidth(), C1.getBitWidth()); |
5142 | } |
5143 | case ISD::MULHU: { |
5144 | unsigned FullWidth = C1.getBitWidth() * 2; |
5145 | APInt C1Ext = C1.zext(FullWidth); |
5146 | APInt C2Ext = C2.zext(FullWidth); |
5147 | return (C1Ext * C2Ext).extractBits(C1.getBitWidth(), C1.getBitWidth()); |
5148 | } |
5149 | } |
5150 | return llvm::None; |
5151 | } |
5152 | |
5153 | SDValue SelectionDAG::FoldSymbolOffset(unsigned Opcode, EVT VT, |
5154 | const GlobalAddressSDNode *GA, |
5155 | const SDNode *N2) { |
5156 | if (GA->getOpcode() != ISD::GlobalAddress) |
5157 | return SDValue(); |
5158 | if (!TLI->isOffsetFoldingLegal(GA)) |
5159 | return SDValue(); |
5160 | auto *C2 = dyn_cast<ConstantSDNode>(N2); |
5161 | if (!C2) |
5162 | return SDValue(); |
5163 | int64_t Offset = C2->getSExtValue(); |
5164 | switch (Opcode) { |
5165 | case ISD::ADD: break; |
5166 | case ISD::SUB: Offset = -uint64_t(Offset); break; |
5167 | default: return SDValue(); |
5168 | } |
5169 | return getGlobalAddress(GA->getGlobal(), SDLoc(C2), VT, |
5170 | GA->getOffset() + uint64_t(Offset)); |
5171 | } |
5172 | |
5173 | bool SelectionDAG::isUndef(unsigned Opcode, ArrayRef<SDValue> Ops) { |
5174 | switch (Opcode) { |
5175 | case ISD::SDIV: |
5176 | case ISD::UDIV: |
5177 | case ISD::SREM: |
5178 | case ISD::UREM: { |
5179 | // If a divisor is zero/undef or any element of a divisor vector is |
5180 | // zero/undef, the whole op is undef. |
5181 | assert(Ops.size() == 2 && "Div/rem should have 2 operands")((void)0); |
5182 | SDValue Divisor = Ops[1]; |
5183 | if (Divisor.isUndef() || isNullConstant(Divisor)) |
5184 | return true; |
5185 | |
5186 | return ISD::isBuildVectorOfConstantSDNodes(Divisor.getNode()) && |
5187 | llvm::any_of(Divisor->op_values(), |
5188 | [](SDValue V) { return V.isUndef() || |
5189 | isNullConstant(V); }); |
5190 | // TODO: Handle signed overflow. |
5191 | } |
5192 | // TODO: Handle oversized shifts. |
5193 | default: |
5194 | return false; |
5195 | } |
5196 | } |
5197 | |
5198 | SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, |
5199 | EVT VT, ArrayRef<SDValue> Ops) { |
5200 | // If the opcode is a target-specific ISD node, there's nothing we can |
5201 | // do here and the operand rules may not line up with the below, so |
5202 | // bail early. |
5203 | // We can't create a scalar CONCAT_VECTORS so skip it. It will break |
5204 | // for concats involving SPLAT_VECTOR. Concats of BUILD_VECTORS are handled by |
5205 | // foldCONCAT_VECTORS in getNode before this is called. |
5206 | if (Opcode >= ISD::BUILTIN_OP_END || Opcode == ISD::CONCAT_VECTORS) |
5207 | return SDValue(); |
5208 | |
5209 | // For now, the array Ops should only contain two values. |
5210 | // This enforcement will be removed once this function is merged with |
5211 | // FoldConstantVectorArithmetic |
5212 | if (Ops.size() != 2) |
5213 | return SDValue(); |
5214 | |
5215 | if (isUndef(Opcode, Ops)) |
5216 | return getUNDEF(VT); |
5217 | |
5218 | SDNode *N1 = Ops[0].getNode(); |
5219 | SDNode *N2 = Ops[1].getNode(); |
5220 | |
5221 | // Handle the case of two scalars. |
5222 | if (auto *C1 = dyn_cast<ConstantSDNode>(N1)) { |
5223 | if (auto *C2 = dyn_cast<ConstantSDNode>(N2)) { |
5224 | if (C1->isOpaque() || C2->isOpaque()) |
5225 | return SDValue(); |
5226 | |
5227 | Optional<APInt> FoldAttempt = |
5228 | FoldValue(Opcode, C1->getAPIntValue(), C2->getAPIntValue()); |
5229 | if (!FoldAttempt) |
5230 | return SDValue(); |
5231 | |
5232 | SDValue Folded = getConstant(FoldAttempt.getValue(), DL, VT); |
5233 | assert((!Folded || !VT.isVector()) &&((void)0) |
5234 | "Can't fold vectors ops with scalar operands")((void)0); |
5235 | return Folded; |
5236 | } |
5237 | } |
5238 | |
5239 | // fold (add Sym, c) -> Sym+c |
5240 | if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N1)) |
5241 | return FoldSymbolOffset(Opcode, VT, GA, N2); |
5242 | if (TLI->isCommutativeBinOp(Opcode)) |
5243 | if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N2)) |
5244 | return FoldSymbolOffset(Opcode, VT, GA, N1); |
5245 | |
5246 | // For fixed width vectors, extract each constant element and fold them |
5247 | // individually. Either input may be an undef value. |
5248 | bool IsBVOrSV1 = N1->getOpcode() == ISD::BUILD_VECTOR || |
5249 | N1->getOpcode() == ISD::SPLAT_VECTOR; |
5250 | if (!IsBVOrSV1 && !N1->isUndef()) |
5251 | return SDValue(); |
5252 | bool IsBVOrSV2 = N2->getOpcode() == ISD::BUILD_VECTOR || |
5253 | N2->getOpcode() == ISD::SPLAT_VECTOR; |
5254 | if (!IsBVOrSV2 && !N2->isUndef()) |
5255 | return SDValue(); |
5256 | // If both operands are undef, that's handled the same way as scalars. |
5257 | if (!IsBVOrSV1 && !IsBVOrSV2) |
5258 | return SDValue(); |
5259 | |
5260 | EVT SVT = VT.getScalarType(); |
5261 | EVT LegalSVT = SVT; |
5262 | if (NewNodesMustHaveLegalTypes && LegalSVT.isInteger()) { |
5263 | LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT); |
5264 | if (LegalSVT.bitsLT(SVT)) |
5265 | return SDValue(); |
5266 | } |
5267 | |
5268 | SmallVector<SDValue, 4> Outputs; |
5269 | unsigned NumOps = 0; |
5270 | if (IsBVOrSV1) |
5271 | NumOps = std::max(NumOps, N1->getNumOperands()); |
5272 | if (IsBVOrSV2) |
5273 | NumOps = std::max(NumOps, N2->getNumOperands()); |
5274 | assert(NumOps != 0 && "Expected non-zero operands")((void)0); |
5275 | // Scalable vectors should only be SPLAT_VECTOR or UNDEF here. We only need |
5276 | // one iteration for that. |
5277 | assert((!VT.isScalableVector() || NumOps == 1) &&((void)0) |
5278 | "Scalable vector should only have one scalar")((void)0); |
5279 | |
5280 | for (unsigned I = 0; I != NumOps; ++I) { |
5281 | // We can have a fixed length SPLAT_VECTOR and a BUILD_VECTOR so we need |
5282 | // to use operand 0 of the SPLAT_VECTOR for each fixed element. |
5283 | SDValue V1; |
5284 | if (N1->getOpcode() == ISD::BUILD_VECTOR) |
5285 | V1 = N1->getOperand(I); |
5286 | else if (N1->getOpcode() == ISD::SPLAT_VECTOR) |
5287 | V1 = N1->getOperand(0); |
5288 | else |
5289 | V1 = getUNDEF(SVT); |
5290 | |
5291 | SDValue V2; |
5292 | if (N2->getOpcode() == ISD::BUILD_VECTOR) |
5293 | V2 = N2->getOperand(I); |
5294 | else if (N2->getOpcode() == ISD::SPLAT_VECTOR) |
5295 | V2 = N2->getOperand(0); |
5296 | else |
5297 | V2 = getUNDEF(SVT); |
5298 | |
5299 | if (SVT.isInteger()) { |
5300 | if (V1.getValueType().bitsGT(SVT)) |
5301 | V1 = getNode(ISD::TRUNCATE, DL, SVT, V1); |
5302 | if (V2.getValueType().bitsGT(SVT)) |
5303 | V2 = getNode(ISD::TRUNCATE, DL, SVT, V2); |
5304 | } |
5305 | |
5306 | if (V1.getValueType() != SVT || V2.getValueType() != SVT) |
5307 | return SDValue(); |
5308 | |
5309 | // Fold one vector element. |
5310 | SDValue ScalarResult = getNode(Opcode, DL, SVT, V1, V2); |
5311 | if (LegalSVT != SVT) |
5312 | ScalarResult = getNode(ISD::SIGN_EXTEND, DL, LegalSVT, ScalarResult); |
5313 | |
5314 | // Scalar folding only succeeded if the result is a constant or UNDEF. |
5315 | if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant && |
5316 | ScalarResult.getOpcode() != ISD::ConstantFP) |
5317 | return SDValue(); |
5318 | Outputs.push_back(ScalarResult); |
5319 | } |
5320 | |
5321 | if (N1->getOpcode() == ISD::BUILD_VECTOR || |
5322 | N2->getOpcode() == ISD::BUILD_VECTOR) { |
5323 | assert(VT.getVectorNumElements() == Outputs.size() &&((void)0) |
5324 | "Vector size mismatch!")((void)0); |
5325 | |
5326 | // Build a big vector out of the scalar elements we generated. |
5327 | return getBuildVector(VT, SDLoc(), Outputs); |
5328 | } |
5329 | |
5330 | assert((N1->getOpcode() == ISD::SPLAT_VECTOR ||((void)0) |
5331 | N2->getOpcode() == ISD::SPLAT_VECTOR) &&((void)0) |
5332 | "One operand should be a splat vector")((void)0); |
5333 | |
5334 | assert(Outputs.size() == 1 && "Vector size mismatch!")((void)0); |
5335 | return getSplatVector(VT, SDLoc(), Outputs[0]); |
5336 | } |
5337 | |
5338 | // TODO: Merge with FoldConstantArithmetic |
5339 | SDValue SelectionDAG::FoldConstantVectorArithmetic(unsigned Opcode, |
5340 | const SDLoc &DL, EVT VT, |
5341 | ArrayRef<SDValue> Ops, |
5342 | const SDNodeFlags Flags) { |
5343 | // If the opcode is a target-specific ISD node, there's nothing we can |
5344 | // do here and the operand rules may not line up with the below, so |
5345 | // bail early. |
5346 | if (Opcode >= ISD::BUILTIN_OP_END) |
5347 | return SDValue(); |
5348 | |
5349 | if (isUndef(Opcode, Ops)) |
5350 | return getUNDEF(VT); |
5351 | |
5352 | // We can only fold vectors - maybe merge with FoldConstantArithmetic someday? |
5353 | if (!VT.isVector()) |
5354 | return SDValue(); |
5355 | |
5356 | ElementCount NumElts = VT.getVectorElementCount(); |
5357 | |
5358 | auto IsScalarOrSameVectorSize = [NumElts](const SDValue &Op) { |
5359 | return !Op.getValueType().isVector() || |
5360 | Op.getValueType().getVectorElementCount() == NumElts; |
5361 | }; |
5362 | |
5363 | auto IsConstantBuildVectorSplatVectorOrUndef = [](const SDValue &Op) { |
5364 | APInt SplatVal; |
5365 | BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op); |
5366 | return Op.isUndef() || Op.getOpcode() == ISD::CONDCODE || |
5367 | (BV && BV->isConstant()) || |
5368 | (Op.getOpcode() == ISD::SPLAT_VECTOR && |
5369 | ISD::isConstantSplatVector(Op.getNode(), SplatVal)); |
5370 | }; |
5371 | |
5372 | // All operands must be vector types with the same number of elements as |
5373 | // the result type and must be either UNDEF or a build vector of constant |
5374 | // or UNDEF scalars. |
5375 | if (!llvm::all_of(Ops, IsConstantBuildVectorSplatVectorOrUndef) || |
5376 | !llvm::all_of(Ops, IsScalarOrSameVectorSize)) |
5377 | return SDValue(); |
5378 | |
5379 | // If we are comparing vectors, then the result needs to be a i1 boolean |
5380 | // that is then sign-extended back to the legal result type. |
5381 | EVT SVT = (Opcode == ISD::SETCC ? MVT::i1 : VT.getScalarType()); |
5382 | |
5383 | // Find legal integer scalar type for constant promotion and |
5384 | // ensure that its scalar size is at least as large as source. |
5385 | EVT LegalSVT = VT.getScalarType(); |
5386 | if (NewNodesMustHaveLegalTypes && LegalSVT.isInteger()) { |
5387 | LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT); |
5388 | if (LegalSVT.bitsLT(VT.getScalarType())) |
5389 | return SDValue(); |
5390 | } |
5391 | |
5392 | // For scalable vector types we know we're dealing with SPLAT_VECTORs. We |
5393 | // only have one operand to check. For fixed-length vector types we may have |
5394 | // a combination of BUILD_VECTOR and SPLAT_VECTOR. |
5395 | unsigned NumOperands = NumElts.isScalable() ? 1 : NumElts.getFixedValue(); |
5396 | |
5397 | // Constant fold each scalar lane separately. |
5398 | SmallVector<SDValue, 4> ScalarResults; |
5399 | for (unsigned I = 0; I != NumOperands; I++) { |
5400 | SmallVector<SDValue, 4> ScalarOps; |
5401 | for (SDValue Op : Ops) { |
5402 | EVT InSVT = Op.getValueType().getScalarType(); |
5403 | if (Op.getOpcode() != ISD::BUILD_VECTOR && |
5404 | Op.getOpcode() != ISD::SPLAT_VECTOR) { |
5405 | // We've checked that this is UNDEF or a constant of some kind. |
5406 | if (Op.isUndef()) |
5407 | ScalarOps.push_back(getUNDEF(InSVT)); |
5408 | else |
5409 | ScalarOps.push_back(Op); |
5410 | continue; |
5411 | } |
5412 | |
5413 | SDValue ScalarOp = |
5414 | Op.getOperand(Op.getOpcode() == ISD::SPLAT_VECTOR ? 0 : I); |
5415 | EVT ScalarVT = ScalarOp.getValueType(); |
5416 | |
5417 | // Build vector (integer) scalar operands may need implicit |
5418 | // truncation - do this before constant folding. |
5419 | if (ScalarVT.isInteger() && ScalarVT.bitsGT(InSVT)) |
5420 | ScalarOp = getNode(ISD::TRUNCATE, DL, InSVT, ScalarOp); |
5421 | |
5422 | ScalarOps.push_back(ScalarOp); |
5423 | } |
5424 | |
5425 | // Constant fold the scalar operands. |
5426 | SDValue ScalarResult = getNode(Opcode, DL, SVT, ScalarOps, Flags); |
5427 | |
5428 | // Legalize the (integer) scalar constant if necessary. |
5429 | if (LegalSVT != SVT) |
5430 | ScalarResult = getNode(ISD::SIGN_EXTEND, DL, LegalSVT, ScalarResult); |
5431 | |
5432 | // Scalar folding only succeeded if the result is a constant or UNDEF. |
5433 | if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant && |
5434 | ScalarResult.getOpcode() != ISD::ConstantFP) |
5435 | return SDValue(); |
5436 | ScalarResults.push_back(ScalarResult); |
5437 | } |
5438 | |
5439 | SDValue V = NumElts.isScalable() ? getSplatVector(VT, DL, ScalarResults[0]) |
5440 | : getBuildVector(VT, DL, ScalarResults); |
5441 | NewSDValueDbgMsg(V, "New node fold constant vector: ", this); |
5442 | return V; |
5443 | } |
5444 | |
5445 | SDValue SelectionDAG::foldConstantFPMath(unsigned Opcode, const SDLoc &DL, |
5446 | EVT VT, SDValue N1, SDValue N2) { |
5447 | // TODO: We don't do any constant folding for strict FP opcodes here, but we |
5448 | // should. That will require dealing with a potentially non-default |
5449 | // rounding mode, checking the "opStatus" return value from the APFloat |
5450 | // math calculations, and possibly other variations. |
5451 | auto *N1CFP = dyn_cast<ConstantFPSDNode>(N1.getNode()); |
5452 | auto *N2CFP = dyn_cast<ConstantFPSDNode>(N2.getNode()); |
5453 | if (N1CFP && N2CFP) { |
5454 | APFloat C1 = N1CFP->getValueAPF(), C2 = N2CFP->getValueAPF(); |
5455 | switch (Opcode) { |
5456 | case ISD::FADD: |
5457 | C1.add(C2, APFloat::rmNearestTiesToEven); |
5458 | return getConstantFP(C1, DL, VT); |
5459 | case ISD::FSUB: |
5460 | C1.subtract(C2, APFloat::rmNearestTiesToEven); |
5461 | return getConstantFP(C1, DL, VT); |
5462 | case ISD::FMUL: |
5463 | C1.multiply(C2, APFloat::rmNearestTiesToEven); |
5464 | return getConstantFP(C1, DL, VT); |
5465 | case ISD::FDIV: |
5466 | C1.divide(C2, APFloat::rmNearestTiesToEven); |
5467 | return getConstantFP(C1, DL, VT); |
5468 | case ISD::FREM: |
5469 | C1.mod(C2); |
5470 | return getConstantFP(C1, DL, VT); |
5471 | case ISD::FCOPYSIGN: |
5472 | C1.copySign(C2); |
5473 | return getConstantFP(C1, DL, VT); |
5474 | default: break; |
5475 | } |
5476 | } |
5477 | if (N1CFP && Opcode == ISD::FP_ROUND) { |
5478 | APFloat C1 = N1CFP->getValueAPF(); // make copy |
5479 | bool Unused; |
5480 | // This can return overflow, underflow, or inexact; we don't care. |
5481 | // FIXME need to be more flexible about rounding mode. |
5482 | (void) C1.convert(EVTToAPFloatSemantics(VT), APFloat::rmNearestTiesToEven, |
5483 | &Unused); |
5484 | return getConstantFP(C1, DL, VT); |
5485 | } |
5486 | |
5487 | switch (Opcode) { |
5488 | case ISD::FSUB: |
5489 | // -0.0 - undef --> undef (consistent with "fneg undef") |
5490 | if (N1CFP && N1CFP->getValueAPF().isNegZero() && N2.isUndef()) |
5491 | return getUNDEF(VT); |
5492 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
5493 | |
5494 | case ISD::FADD: |
5495 | case ISD::FMUL: |
5496 | case ISD::FDIV: |
5497 | case ISD::FREM: |
5498 | // If both operands are undef, the result is undef. If 1 operand is undef, |
5499 | // the result is NaN. This should match the behavior of the IR optimizer. |
5500 | if (N1.isUndef() && N2.isUndef()) |
5501 | return getUNDEF(VT); |
5502 | if (N1.isUndef() || N2.isUndef()) |
5503 | return getConstantFP(APFloat::getNaN(EVTToAPFloatSemantics(VT)), DL, VT); |
5504 | } |
5505 | return SDValue(); |
5506 | } |
5507 | |
5508 | SDValue SelectionDAG::getAssertAlign(const SDLoc &DL, SDValue Val, Align A) { |
5509 | assert(Val.getValueType().isInteger() && "Invalid AssertAlign!")((void)0); |
5510 | |
5511 | // There's no need to assert on a byte-aligned pointer. All pointers are at |
5512 | // least byte aligned. |
5513 | if (A == Align(1)) |
5514 | return Val; |
5515 | |
5516 | FoldingSetNodeID ID; |
5517 | AddNodeIDNode(ID, ISD::AssertAlign, getVTList(Val.getValueType()), {Val}); |
5518 | ID.AddInteger(A.value()); |
5519 | |
5520 | void *IP = nullptr; |
5521 | if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) |
5522 | return SDValue(E, 0); |
5523 | |
5524 | auto *N = newSDNode<AssertAlignSDNode>(DL.getIROrder(), DL.getDebugLoc(), |
5525 | Val.getValueType(), A); |
5526 | createOperands(N, {Val}); |
5527 | |
5528 | CSEMap.InsertNode(N, IP); |
5529 | InsertNode(N); |
5530 | |
5531 | SDValue V(N, 0); |
5532 | NewSDValueDbgMsg(V, "Creating new node: ", this); |
5533 | return V; |
5534 | } |
5535 | |
5536 | SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, |
5537 | SDValue N1, SDValue N2) { |
5538 | SDNodeFlags Flags; |
5539 | if (Inserter) |
5540 | Flags = Inserter->getFlags(); |
5541 | return getNode(Opcode, DL, VT, N1, N2, Flags); |
5542 | } |
5543 | |
5544 | SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, |
5545 | SDValue N1, SDValue N2, const SDNodeFlags Flags) { |
5546 | assert(N1.getOpcode() != ISD::DELETED_NODE &&((void)0) |
5547 | N2.getOpcode() != ISD::DELETED_NODE &&((void)0) |
5548 | "Operand is DELETED_NODE!")((void)0); |
5549 | ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); |
5550 | ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2); |
5551 | ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); |
5552 | ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2); |
5553 | |
5554 | // Canonicalize constant to RHS if commutative. |
5555 | if (TLI->isCommutativeBinOp(Opcode)) { |
5556 | if (N1C && !N2C) { |
5557 | std::swap(N1C, N2C); |
5558 | std::swap(N1, N2); |
5559 | } else if (N1CFP && !N2CFP) { |
5560 | std::swap(N1CFP, N2CFP); |
5561 | std::swap(N1, N2); |
5562 | } |
5563 | } |
5564 | |
5565 | switch (Opcode) { |
5566 | default: break; |
5567 | case ISD::TokenFactor: |
5568 | assert(VT == MVT::Other && N1.getValueType() == MVT::Other &&((void)0) |
5569 | N2.getValueType() == MVT::Other && "Invalid token factor!")((void)0); |
5570 | // Fold trivial token factors. |
5571 | if (N1.getOpcode() == ISD::EntryToken) return N2; |
5572 | if (N2.getOpcode() == ISD::EntryToken) return N1; |
5573 | if (N1 == N2) return N1; |
5574 | break; |
5575 | case ISD::BUILD_VECTOR: { |
5576 | // Attempt to simplify BUILD_VECTOR. |
5577 | SDValue Ops[] = {N1, N2}; |
5578 | if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this)) |
5579 | return V; |
5580 | break; |
5581 | } |
5582 | case ISD::CONCAT_VECTORS: { |
5583 | SDValue Ops[] = {N1, N2}; |
5584 | if (SDValue V = foldCONCAT_VECTORS(DL, VT, Ops, *this)) |
5585 | return V; |
5586 | break; |
5587 | } |
5588 | case ISD::AND: |
5589 | assert(VT.isInteger() && "This operator does not apply to FP types!")((void)0); |
5590 | assert(N1.getValueType() == N2.getValueType() &&((void)0) |
5591 | N1.getValueType() == VT && "Binary operator types must match!")((void)0); |
5592 | // (X & 0) -> 0. This commonly occurs when legalizing i64 values, so it's |
5593 | // worth handling here. |
5594 | if (N2C && N2C->isNullValue()) |
5595 | return N2; |
5596 | if (N2C && N2C->isAllOnesValue()) // X & -1 -> X |
5597 | return N1; |
5598 | break; |
5599 | case ISD::OR: |
5600 | case ISD::XOR: |
5601 | case ISD::ADD: |
5602 | case ISD::SUB: |
5603 | assert(VT.isInteger() && "This operator does not apply to FP types!")((void)0); |
5604 | assert(N1.getValueType() == N2.getValueType() &&((void)0) |
5605 | N1.getValueType() == VT && "Binary operator types must match!")((void)0); |
5606 | // (X ^|+- 0) -> X. This commonly occurs when legalizing i64 values, so |
5607 | // it's worth handling here. |
5608 | if (N2C && N2C->isNullValue()) |
5609 | return N1; |
5610 | if ((Opcode == ISD::ADD || Opcode == ISD::SUB) && VT.isVector() && |
5611 | VT.getVectorElementType() == MVT::i1) |
5612 | return getNode(ISD::XOR, DL, VT, N1, N2); |
5613 | break; |
5614 | case ISD::MUL: |
5615 | assert(VT.isInteger() && "This operator does not apply to FP types!")((void)0); |
5616 | assert(N1.getValueType() == N2.getValueType() &&((void)0) |
5617 | N1.getValueType() == VT && "Binary operator types must match!")((void)0); |
5618 | if (VT.isVector() && VT.getVectorElementType() == MVT::i1) |
5619 | return getNode(ISD::AND, DL, VT, N1, N2); |
5620 | if (N2C && (N1.getOpcode() == ISD::VSCALE) && Flags.hasNoSignedWrap()) { |
5621 | const APInt &MulImm = N1->getConstantOperandAPInt(0); |
5622 | const APInt &N2CImm = N2C->getAPIntValue(); |
5623 | return getVScale(DL, VT, MulImm * N2CImm); |
5624 | } |
5625 | break; |
5626 | case ISD::UDIV: |
5627 | case ISD::UREM: |
5628 | case ISD::MULHU: |
5629 | case ISD::MULHS: |
5630 | case ISD::SDIV: |
5631 | case ISD::SREM: |
5632 | case ISD::SADDSAT: |
5633 | case ISD::SSUBSAT: |
5634 | case ISD::UADDSAT: |
5635 | case ISD::USUBSAT: |
5636 | assert(VT.isInteger() && "This operator does not apply to FP types!")((void)0); |
5637 | assert(N1.getValueType() == N2.getValueType() &&((void)0) |
5638 | N1.getValueType() == VT && "Binary operator types must match!")((void)0); |
5639 | if (VT.isVector() && VT.getVectorElementType() == MVT::i1) { |
5640 | // fold (add_sat x, y) -> (or x, y) for bool types. |
5641 | if (Opcode == ISD::SADDSAT || Opcode == ISD::UADDSAT) |
5642 | return getNode(ISD::OR, DL, VT, N1, N2); |
5643 | // fold (sub_sat x, y) -> (and x, ~y) for bool types. |
5644 | if (Opcode == ISD::SSUBSAT || Opcode == ISD::USUBSAT) |
5645 | return getNode(ISD::AND, DL, VT, N1, getNOT(DL, N2, VT)); |
5646 | } |
5647 | break; |
5648 | case ISD::SMIN: |
5649 | case ISD::UMAX: |
5650 | assert(VT.isInteger() && "This operator does not apply to FP types!")((void)0); |
5651 | assert(N1.getValueType() == N2.getValueType() &&((void)0) |
5652 | N1.getValueType() == VT && "Binary operator types must match!")((void)0); |
5653 | if (VT.isVector() && VT.getVectorElementType() == MVT::i1) |
5654 | return getNode(ISD::OR, DL, VT, N1, N2); |
5655 | break; |
5656 | case ISD::SMAX: |
5657 | case ISD::UMIN: |
5658 | assert(VT.isInteger() && "This operator does not apply to FP types!")((void)0); |
5659 | assert(N1.getValueType() == N2.getValueType() &&((void)0) |
5660 | N1.getValueType() == VT && "Binary operator types must match!")((void)0); |
5661 | if (VT.isVector() && VT.getVectorElementType() == MVT::i1) |
5662 | return getNode(ISD::AND, DL, VT, N1, N2); |
5663 | break; |
5664 | case ISD::FADD: |
5665 | case ISD::FSUB: |
5666 | case ISD::FMUL: |
5667 | case ISD::FDIV: |
5668 | case ISD::FREM: |
5669 | assert(VT.isFloatingPoint() && "This operator only applies to FP types!")((void)0); |
5670 | assert(N1.getValueType() == N2.getValueType() &&((void)0) |
5671 | N1.getValueType() == VT && "Binary operator types must match!")((void)0); |
5672 | if (SDValue V = simplifyFPBinop(Opcode, N1, N2, Flags)) |
5673 | return V; |
5674 | break; |
5675 | case ISD::FCOPYSIGN: // N1 and result must match. N1/N2 need not match. |
5676 | assert(N1.getValueType() == VT &&((void)0) |
5677 | N1.getValueType().isFloatingPoint() &&((void)0) |
5678 | N2.getValueType().isFloatingPoint() &&((void)0) |
5679 | "Invalid FCOPYSIGN!")((void)0); |
5680 | break; |
5681 | case ISD::SHL: |
5682 | if (N2C && (N1.getOpcode() == ISD::VSCALE) && Flags.hasNoSignedWrap()) { |
5683 | const APInt &MulImm = N1->getConstantOperandAPInt(0); |
5684 | const APInt &ShiftImm = N2C->getAPIntValue(); |
5685 | return getVScale(DL, VT, MulImm << ShiftImm); |
5686 | } |
5687 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
5688 | case ISD::SRA: |
5689 | case ISD::SRL: |
5690 | if (SDValue V = simplifyShift(N1, N2)) |
5691 | return V; |
5692 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
5693 | case ISD::ROTL: |
5694 | case ISD::ROTR: |
5695 | assert(VT == N1.getValueType() &&((void)0) |
5696 | "Shift operators return type must be the same as their first arg")((void)0); |
5697 | assert(VT.isInteger() && N2.getValueType().isInteger() &&((void)0) |
5698 | "Shifts only work on integers")((void)0); |
5699 | assert((!VT.isVector() || VT == N2.getValueType()) &&((void)0) |
5700 | "Vector shift amounts must be in the same as their first arg")((void)0); |
5701 | // Verify that the shift amount VT is big enough to hold valid shift |
5702 | // amounts. This catches things like trying to shift an i1024 value by an |
5703 | // i8, which is easy to fall into in generic code that uses |
5704 | // TLI.getShiftAmount(). |
5705 | assert(N2.getValueType().getScalarSizeInBits() >=((void)0) |
5706 | Log2_32_Ceil(VT.getScalarSizeInBits()) &&((void)0) |
5707 | "Invalid use of small shift amount with oversized value!")((void)0); |
5708 | |
5709 | // Always fold shifts of i1 values so the code generator doesn't need to |
5710 | // handle them. Since we know the size of the shift has to be less than the |
5711 | // size of the value, the shift/rotate count is guaranteed to be zero. |
5712 | if (VT == MVT::i1) |
5713 | return N1; |
5714 | if (N2C && N2C->isNullValue()) |
5715 | return N1; |
5716 | break; |
5717 | case ISD::FP_ROUND: |
5718 | assert(VT.isFloatingPoint() &&((void)0) |
5719 | N1.getValueType().isFloatingPoint() &&((void)0) |
5720 | VT.bitsLE(N1.getValueType()) &&((void)0) |
5721 | N2C && (N2C->getZExtValue() == 0 || N2C->getZExtValue() == 1) &&((void)0) |
5722 | "Invalid FP_ROUND!")((void)0); |
5723 | if (N1.getValueType() == VT) return N1; // noop conversion. |
5724 | break; |
5725 | case ISD::AssertSext: |
5726 | case ISD::AssertZext: { |
5727 | EVT EVT = cast<VTSDNode>(N2)->getVT(); |
5728 | assert(VT == N1.getValueType() && "Not an inreg extend!")((void)0); |
5729 | assert(VT.isInteger() && EVT.isInteger() &&((void)0) |
5730 | "Cannot *_EXTEND_INREG FP types")((void)0); |
5731 | assert(!EVT.isVector() &&((void)0) |
5732 | "AssertSExt/AssertZExt type should be the vector element type "((void)0) |
5733 | "rather than the vector type!")((void)0); |
5734 | assert(EVT.bitsLE(VT.getScalarType()) && "Not extending!")((void)0); |
5735 | if (VT.getScalarType() == EVT) return N1; // noop assertion. |
5736 | break; |
5737 | } |
5738 | case ISD::SIGN_EXTEND_INREG: { |
5739 | EVT EVT = cast<VTSDNode>(N2)->getVT(); |
5740 | assert(VT == N1.getValueType() && "Not an inreg extend!")((void)0); |
5741 | assert(VT.isInteger() && EVT.isInteger() &&((void)0) |
5742 | "Cannot *_EXTEND_INREG FP types")((void)0); |
5743 | assert(EVT.isVector() == VT.isVector() &&((void)0) |
5744 | "SIGN_EXTEND_INREG type should be vector iff the operand "((void)0) |
5745 | "type is vector!")((void)0); |
5746 | assert((!EVT.isVector() ||((void)0) |
5747 | EVT.getVectorElementCount() == VT.getVectorElementCount()) &&((void)0) |
5748 | "Vector element counts must match in SIGN_EXTEND_INREG")((void)0); |
5749 | assert(EVT.bitsLE(VT) && "Not extending!")((void)0); |
5750 | if (EVT == VT) return N1; // Not actually extending |
5751 | |
5752 | auto SignExtendInReg = [&](APInt Val, llvm::EVT ConstantVT) { |
5753 | unsigned FromBits = EVT.getScalarSizeInBits(); |
5754 | Val <<= Val.getBitWidth() - FromBits; |
5755 | Val.ashrInPlace(Val.getBitWidth() - FromBits); |
5756 | return getConstant(Val, DL, ConstantVT); |
5757 | }; |
5758 | |
5759 | if (N1C) { |
5760 | const APInt &Val = N1C->getAPIntValue(); |
5761 | return SignExtendInReg(Val, VT); |
5762 | } |
5763 | |
5764 | if (ISD::isBuildVectorOfConstantSDNodes(N1.getNode())) { |
5765 | SmallVector<SDValue, 8> Ops; |
5766 | llvm::EVT OpVT = N1.getOperand(0).getValueType(); |
5767 | for (int i = 0, e = VT.getVectorNumElements(); i != e; ++i) { |
5768 | SDValue Op = N1.getOperand(i); |
5769 | if (Op.isUndef()) { |
5770 | Ops.push_back(getUNDEF(OpVT)); |
5771 | continue; |
5772 | } |
5773 | ConstantSDNode *C = cast<ConstantSDNode>(Op); |
5774 | APInt Val = C->getAPIntValue(); |
5775 | Ops.push_back(SignExtendInReg(Val, OpVT)); |
5776 | } |
5777 | return getBuildVector(VT, DL, Ops); |
5778 | } |
5779 | break; |
5780 | } |
5781 | case ISD::FP_TO_SINT_SAT: |
5782 | case ISD::FP_TO_UINT_SAT: { |
5783 | assert(VT.isInteger() && cast<VTSDNode>(N2)->getVT().isInteger() &&((void)0) |
5784 | N1.getValueType().isFloatingPoint() && "Invalid FP_TO_*INT_SAT")((void)0); |
5785 | assert(N1.getValueType().isVector() == VT.isVector() &&((void)0) |
5786 | "FP_TO_*INT_SAT type should be vector iff the operand type is "((void)0) |
5787 | "vector!")((void)0); |
5788 | assert((!VT.isVector() || VT.getVectorNumElements() ==((void)0) |
5789 | N1.getValueType().getVectorNumElements()) &&((void)0) |
5790 | "Vector element counts must match in FP_TO_*INT_SAT")((void)0); |
5791 | assert(!cast<VTSDNode>(N2)->getVT().isVector() &&((void)0) |
5792 | "Type to saturate to must be a scalar.")((void)0); |
5793 | assert(cast<VTSDNode>(N2)->getVT().bitsLE(VT.getScalarType()) &&((void)0) |
5794 | "Not extending!")((void)0); |
5795 | break; |
5796 | } |
5797 | case ISD::EXTRACT_VECTOR_ELT: |
5798 | assert(VT.getSizeInBits() >= N1.getValueType().getScalarSizeInBits() &&((void)0) |
5799 | "The result of EXTRACT_VECTOR_ELT must be at least as wide as the \((void)0) |
5800 | element type of the vector.")((void)0); |
5801 | |
5802 | // Extract from an undefined value or using an undefined index is undefined. |
5803 | if (N1.isUndef() || N2.isUndef()) |
5804 | return getUNDEF(VT); |
5805 | |
5806 | // EXTRACT_VECTOR_ELT of out-of-bounds element is an UNDEF for fixed length |
5807 | // vectors. For scalable vectors we will provide appropriate support for |
5808 | // dealing with arbitrary indices. |
5809 | if (N2C && N1.getValueType().isFixedLengthVector() && |
5810 | N2C->getAPIntValue().uge(N1.getValueType().getVectorNumElements())) |
5811 | return getUNDEF(VT); |
5812 | |
5813 | // EXTRACT_VECTOR_ELT of CONCAT_VECTORS is often formed while lowering is |
5814 | // expanding copies of large vectors from registers. This only works for |
5815 | // fixed length vectors, since we need to know the exact number of |
5816 | // elements. |
5817 | if (N2C && N1.getOperand(0).getValueType().isFixedLengthVector() && |
5818 | N1.getOpcode() == ISD::CONCAT_VECTORS && N1.getNumOperands() > 0) { |
5819 | unsigned Factor = |
5820 | N1.getOperand(0).getValueType().getVectorNumElements(); |
5821 | return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, |
5822 | N1.getOperand(N2C->getZExtValue() / Factor), |
5823 | getVectorIdxConstant(N2C->getZExtValue() % Factor, DL)); |
5824 | } |
5825 | |
5826 | // EXTRACT_VECTOR_ELT of BUILD_VECTOR or SPLAT_VECTOR is often formed while |
5827 | // lowering is expanding large vector constants. |
5828 | if (N2C && (N1.getOpcode() == ISD::BUILD_VECTOR || |
5829 | N1.getOpcode() == ISD::SPLAT_VECTOR)) { |
5830 | assert((N1.getOpcode() != ISD::BUILD_VECTOR ||((void)0) |
5831 | N1.getValueType().isFixedLengthVector()) &&((void)0) |
5832 | "BUILD_VECTOR used for scalable vectors")((void)0); |
5833 | unsigned Index = |
5834 | N1.getOpcode() == ISD::BUILD_VECTOR ? N2C->getZExtValue() : 0; |
5835 | SDValue Elt = N1.getOperand(Index); |
5836 | |
5837 | if (VT != Elt.getValueType()) |
5838 | // If the vector element type is not legal, the BUILD_VECTOR operands |
5839 | // are promoted and implicitly truncated, and the result implicitly |
5840 | // extended. Make that explicit here. |
5841 | Elt = getAnyExtOrTrunc(Elt, DL, VT); |
5842 | |
5843 | return Elt; |
5844 | } |
5845 | |
5846 | // EXTRACT_VECTOR_ELT of INSERT_VECTOR_ELT is often formed when vector |
5847 | // operations are lowered to scalars. |
5848 | if (N1.getOpcode() == ISD::INSERT_VECTOR_ELT) { |
5849 | // If the indices are the same, return the inserted element else |
5850 | // if the indices are known different, extract the element from |
5851 | // the original vector. |
5852 | SDValue N1Op2 = N1.getOperand(2); |
5853 | ConstantSDNode *N1Op2C = dyn_cast<ConstantSDNode>(N1Op2); |
5854 | |
5855 | if (N1Op2C && N2C) { |
5856 | if (N1Op2C->getZExtValue() == N2C->getZExtValue()) { |
5857 | if (VT == N1.getOperand(1).getValueType()) |
5858 | return N1.getOperand(1); |
5859 | return getSExtOrTrunc(N1.getOperand(1), DL, VT); |
5860 | } |
5861 | return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), N2); |
5862 | } |
5863 | } |
5864 | |
5865 | // EXTRACT_VECTOR_ELT of v1iX EXTRACT_SUBVECTOR could be formed |
5866 | // when vector types are scalarized and v1iX is legal. |
5867 | // vextract (v1iX extract_subvector(vNiX, Idx)) -> vextract(vNiX,Idx). |
5868 | // Here we are completely ignoring the extract element index (N2), |
5869 | // which is fine for fixed width vectors, since any index other than 0 |
5870 | // is undefined anyway. However, this cannot be ignored for scalable |
5871 | // vectors - in theory we could support this, but we don't want to do this |
5872 | // without a profitability check. |
5873 | if (N1.getOpcode() == ISD::EXTRACT_SUBVECTOR && |
5874 | N1.getValueType().isFixedLengthVector() && |
5875 | N1.getValueType().getVectorNumElements() == 1) { |
5876 | return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), |
5877 | N1.getOperand(1)); |
5878 | } |
5879 | break; |
5880 | case ISD::EXTRACT_ELEMENT: |
5881 | assert(N2C && (unsigned)N2C->getZExtValue() < 2 && "Bad EXTRACT_ELEMENT!")((void)0); |
5882 | assert(!N1.getValueType().isVector() && !VT.isVector() &&((void)0) |
5883 | (N1.getValueType().isInteger() == VT.isInteger()) &&((void)0) |
5884 | N1.getValueType() != VT &&((void)0) |
5885 | "Wrong types for EXTRACT_ELEMENT!")((void)0); |
5886 | |
5887 | // EXTRACT_ELEMENT of BUILD_PAIR is often formed while legalize is expanding |
5888 | // 64-bit integers into 32-bit parts. Instead of building the extract of |
5889 | // the BUILD_PAIR, only to have legalize rip it apart, just do it now. |
5890 | if (N1.getOpcode() == ISD::BUILD_PAIR) |
5891 | return N1.getOperand(N2C->getZExtValue()); |
5892 | |
5893 | // EXTRACT_ELEMENT of a constant int is also very common. |
5894 | if (N1C) { |
5895 | unsigned ElementSize = VT.getSizeInBits(); |
5896 | unsigned Shift = ElementSize * N2C->getZExtValue(); |
5897 | const APInt &Val = N1C->getAPIntValue(); |
5898 | return getConstant(Val.extractBits(ElementSize, Shift), DL, VT); |
5899 | } |
5900 | break; |
5901 | case ISD::EXTRACT_SUBVECTOR: { |
5902 | EVT N1VT = N1.getValueType(); |
5903 | assert(VT.isVector() && N1VT.isVector() &&((void)0) |
5904 | "Extract subvector VTs must be vectors!")((void)0); |
5905 | assert(VT.getVectorElementType() == N1VT.getVectorElementType() &&((void)0) |
5906 | "Extract subvector VTs must have the same element type!")((void)0); |
5907 | assert((VT.isFixedLengthVector() || N1VT.isScalableVector()) &&((void)0) |
5908 | "Cannot extract a scalable vector from a fixed length vector!")((void)0); |
5909 | assert((VT.isScalableVector() != N1VT.isScalableVector() ||((void)0) |
5910 | VT.getVectorMinNumElements() <= N1VT.getVectorMinNumElements()) &&((void)0) |
5911 | "Extract subvector must be from larger vector to smaller vector!")((void)0); |
5912 | assert(N2C && "Extract subvector index must be a constant")((void)0); |
5913 | assert((VT.isScalableVector() != N1VT.isScalableVector() ||((void)0) |
5914 | (VT.getVectorMinNumElements() + N2C->getZExtValue()) <=((void)0) |
5915 | N1VT.getVectorMinNumElements()) &&((void)0) |
5916 | "Extract subvector overflow!")((void)0); |
5917 | assert(N2C->getAPIntValue().getBitWidth() ==((void)0) |
5918 | TLI->getVectorIdxTy(getDataLayout()).getFixedSizeInBits() &&((void)0) |
5919 | "Constant index for EXTRACT_SUBVECTOR has an invalid size")((void)0); |
5920 | |
5921 | // Trivial extraction. |
5922 | if (VT == N1VT) |
5923 | return N1; |
5924 | |
5925 | // EXTRACT_SUBVECTOR of an UNDEF is an UNDEF. |
5926 | if (N1.isUndef()) |
5927 | return getUNDEF(VT); |
5928 | |
5929 | // EXTRACT_SUBVECTOR of CONCAT_VECTOR can be simplified if the pieces of |
5930 | // the concat have the same type as the extract. |
5931 | if (N1.getOpcode() == ISD::CONCAT_VECTORS && N1.getNumOperands() > 0 && |
5932 | VT == N1.getOperand(0).getValueType()) { |
5933 | unsigned Factor = VT.getVectorMinNumElements(); |
5934 | return N1.getOperand(N2C->getZExtValue() / Factor); |
5935 | } |
5936 | |
5937 | // EXTRACT_SUBVECTOR of INSERT_SUBVECTOR is often created |
5938 | // during shuffle legalization. |
5939 | if (N1.getOpcode() == ISD::INSERT_SUBVECTOR && N2 == N1.getOperand(2) && |
5940 | VT == N1.getOperand(1).getValueType()) |
5941 | return N1.getOperand(1); |
5942 | break; |
5943 | } |
5944 | } |
5945 | |
5946 | // Perform trivial constant folding. |
5947 | if (SDValue SV = FoldConstantArithmetic(Opcode, DL, VT, {N1, N2})) |
5948 | return SV; |
5949 | |
5950 | if (SDValue V = foldConstantFPMath(Opcode, DL, VT, N1, N2)) |
5951 | return V; |
5952 | |
5953 | // Canonicalize an UNDEF to the RHS, even over a constant. |
5954 | if (N1.isUndef()) { |
5955 | if (TLI->isCommutativeBinOp(Opcode)) { |
5956 | std::swap(N1, N2); |
5957 | } else { |
5958 | switch (Opcode) { |
5959 | case ISD::SIGN_EXTEND_INREG: |
5960 | case ISD::SUB: |
5961 | return getUNDEF(VT); // fold op(undef, arg2) -> undef |
5962 | case ISD::UDIV: |
5963 | case ISD::SDIV: |
5964 | case ISD::UREM: |
5965 | case ISD::SREM: |
5966 | case ISD::SSUBSAT: |
5967 | case ISD::USUBSAT: |
5968 | return getConstant(0, DL, VT); // fold op(undef, arg2) -> 0 |
5969 | } |
5970 | } |
5971 | } |
5972 | |
5973 | // Fold a bunch of operators when the RHS is undef. |
5974 | if (N2.isUndef()) { |
5975 | switch (Opcode) { |
5976 | case ISD::XOR: |
5977 | if (N1.isUndef()) |
5978 | // Handle undef ^ undef -> 0 special case. This is a common |
5979 | // idiom (misuse). |
5980 | return getConstant(0, DL, VT); |
5981 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
5982 | case ISD::ADD: |
5983 | case ISD::SUB: |
5984 | case ISD::UDIV: |
5985 | case ISD::SDIV: |
5986 | case ISD::UREM: |
5987 | case ISD::SREM: |
5988 | return getUNDEF(VT); // fold op(arg1, undef) -> undef |
5989 | case ISD::MUL: |
5990 | case ISD::AND: |
5991 | case ISD::SSUBSAT: |
5992 | case ISD::USUBSAT: |
5993 | return getConstant(0, DL, VT); // fold op(arg1, undef) -> 0 |
5994 | case ISD::OR: |
5995 | case ISD::SADDSAT: |
5996 | case ISD::UADDSAT: |
5997 | return getAllOnesConstant(DL, VT); |
5998 | } |
5999 | } |
6000 | |
6001 | // Memoize this node if possible. |
6002 | SDNode *N; |
6003 | SDVTList VTs = getVTList(VT); |
6004 | SDValue Ops[] = {N1, N2}; |
6005 | if (VT != MVT::Glue) { |
6006 | FoldingSetNodeID ID; |
6007 | AddNodeIDNode(ID, Opcode, VTs, Ops); |
6008 | void *IP = nullptr; |
6009 | if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { |
6010 | E->intersectFlagsWith(Flags); |
6011 | return SDValue(E, 0); |
6012 | } |
6013 | |
6014 | N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); |
6015 | N->setFlags(Flags); |
6016 | createOperands(N, Ops); |
6017 | CSEMap.InsertNode(N, IP); |
6018 | } else { |
6019 | N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); |
6020 | createOperands(N, Ops); |
6021 | } |
6022 | |
6023 | InsertNode(N); |
6024 | SDValue V = SDValue(N, 0); |
6025 | NewSDValueDbgMsg(V, "Creating new node: ", this); |
6026 | return V; |
6027 | } |
6028 | |
6029 | SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, |
6030 | SDValue N1, SDValue N2, SDValue N3) { |
6031 | SDNodeFlags Flags; |
6032 | if (Inserter) |
6033 | Flags = Inserter->getFlags(); |
6034 | return getNode(Opcode, DL, VT, N1, N2, N3, Flags); |
6035 | } |
6036 | |
6037 | SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, |
6038 | SDValue N1, SDValue N2, SDValue N3, |
6039 | const SDNodeFlags Flags) { |
6040 | assert(N1.getOpcode() != ISD::DELETED_NODE &&((void)0) |
6041 | N2.getOpcode() != ISD::DELETED_NODE &&((void)0) |
6042 | N3.getOpcode() != ISD::DELETED_NODE &&((void)0) |
6043 | "Operand is DELETED_NODE!")((void)0); |
6044 | // Perform various simplifications. |
6045 | switch (Opcode) { |
6046 | case ISD::FMA: { |
6047 | assert(VT.isFloatingPoint() && "This operator only applies to FP types!")((void)0); |
6048 | assert(N1.getValueType() == VT && N2.getValueType() == VT &&((void)0) |
6049 | N3.getValueType() == VT && "FMA types must match!")((void)0); |
6050 | ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); |
6051 | ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2); |
6052 | ConstantFPSDNode *N3CFP = dyn_cast<ConstantFPSDNode>(N3); |
6053 | if (N1CFP && N2CFP && N3CFP) { |
6054 | APFloat V1 = N1CFP->getValueAPF(); |
6055 | const APFloat &V2 = N2CFP->getValueAPF(); |
6056 | const APFloat &V3 = N3CFP->getValueAPF(); |
6057 | V1.fusedMultiplyAdd(V2, V3, APFloat::rmNearestTiesToEven); |
6058 | return getConstantFP(V1, DL, VT); |
6059 | } |
6060 | break; |
6061 | } |
6062 | case ISD::BUILD_VECTOR: { |
6063 | // Attempt to simplify BUILD_VECTOR. |
6064 | SDValue Ops[] = {N1, N2, N3}; |
6065 | if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this)) |
6066 | return V; |
6067 | break; |
6068 | } |