clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name ConstantFolding.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model pic -pic-level 1 -fhalf-no-semantic-interposition -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/gnu/usr.bin/clang/libLLVM/obj -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Analysis -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ASMParser -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/BinaryFormat -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Bitcode -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Bitcode -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Bitstream -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /include/llvm/CodeGen -I /include/llvm/CodeGen/PBQP -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/IR -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/IR -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/Coroutines -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ProfileData/Coverage -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/CodeView -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/DWARF -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/MSF -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/PDB -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Demangle -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ExecutionEngine -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ExecutionEngine/JITLink -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ExecutionEngine/Orc -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Frontend -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Frontend/OpenACC -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Frontend -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Frontend/OpenMP -I /include/llvm/CodeGen/GlobalISel -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/IRReader -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/InstCombine -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/Transforms/InstCombine -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/LTO -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Linker -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/MC -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/MC/MCParser -I /include/llvm/CodeGen/MIRParser -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Object -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Option -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Passes -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ProfileData -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/Scalar -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ADT -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Support -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/Symbolize -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Target -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/Utils -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/Vectorize -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/IPO -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include -I /usr/src/gnu/usr.bin/clang/libLLVM/../include -I /usr/src/gnu/usr.bin/clang/libLLVM/obj -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include -D NDEBUG -D __STDC_LIMIT_MACROS -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D LLVM_PREFIX="/usr" -D PIC -internal-isystem /usr/include/c++/v1 -internal-isystem /usr/local/lib/clang/13.0.0/include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/usr/src/gnu/usr.bin/clang/libLLVM/obj -ferror-limit 19 -fvisibility-inlines-hidden -fwrapv -D_RET_PROTECTOR -ret-protector -fno-rtti -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /home/ben/Projects/vmm/scan-build/2022-01-12-194120-40624-1 -x c++ /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Analysis/ConstantFolding.cpp
| 1 | |
| 2 | |
| 3 | |
| 4 | |
| 5 | |
| 6 | |
| 7 | |
| 8 | |
| 9 | |
| 10 | |
| 11 | |
| 12 | |
| 13 | |
| 14 | |
| 15 | |
| 16 | |
| 17 | |
| 18 | #include "llvm/Analysis/ConstantFolding.h" |
| 19 | #include "llvm/ADT/APFloat.h" |
| 20 | #include "llvm/ADT/APInt.h" |
| 21 | #include "llvm/ADT/APSInt.h" |
| 22 | #include "llvm/ADT/ArrayRef.h" |
| 23 | #include "llvm/ADT/DenseMap.h" |
| 24 | #include "llvm/ADT/STLExtras.h" |
| 25 | #include "llvm/ADT/SmallVector.h" |
| 26 | #include "llvm/ADT/StringRef.h" |
| 27 | #include "llvm/Analysis/TargetFolder.h" |
| 28 | #include "llvm/Analysis/TargetLibraryInfo.h" |
| 29 | #include "llvm/Analysis/ValueTracking.h" |
| 30 | #include "llvm/Analysis/VectorUtils.h" |
| 31 | #include "llvm/Config/config.h" |
| 32 | #include "llvm/IR/Constant.h" |
| 33 | #include "llvm/IR/Constants.h" |
| 34 | #include "llvm/IR/DataLayout.h" |
| 35 | #include "llvm/IR/DerivedTypes.h" |
| 36 | #include "llvm/IR/Function.h" |
| 37 | #include "llvm/IR/GlobalValue.h" |
| 38 | #include "llvm/IR/GlobalVariable.h" |
| 39 | #include "llvm/IR/InstrTypes.h" |
| 40 | #include "llvm/IR/Instruction.h" |
| 41 | #include "llvm/IR/Instructions.h" |
| 42 | #include "llvm/IR/IntrinsicInst.h" |
| 43 | #include "llvm/IR/Intrinsics.h" |
| 44 | #include "llvm/IR/IntrinsicsAArch64.h" |
| 45 | #include "llvm/IR/IntrinsicsAMDGPU.h" |
| 46 | #include "llvm/IR/IntrinsicsARM.h" |
| 47 | #include "llvm/IR/IntrinsicsWebAssembly.h" |
| 48 | #include "llvm/IR/IntrinsicsX86.h" |
| 49 | #include "llvm/IR/Operator.h" |
| 50 | #include "llvm/IR/Type.h" |
| 51 | #include "llvm/IR/Value.h" |
| 52 | #include "llvm/Support/Casting.h" |
| 53 | #include "llvm/Support/ErrorHandling.h" |
| 54 | #include "llvm/Support/KnownBits.h" |
| 55 | #include "llvm/Support/MathExtras.h" |
| 56 | #include <cassert> |
| 57 | #include <cerrno> |
| 58 | #include <cfenv> |
| 59 | #include <cmath> |
| 60 | #include <cstddef> |
| 61 | #include <cstdint> |
| 62 | |
| 63 | using namespace llvm; |
| 64 | |
| 65 | namespace { |
| 66 | Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP, |
| 67 | ArrayRef<Constant *> Ops, |
| 68 | const DataLayout &DL, |
| 69 | const TargetLibraryInfo *TLI, |
| 70 | bool ForLoadOperand); |
| 71 | |
| 72 | |
| 73 | |
| 74 | |
| 75 | |
| 76 | static Constant *foldConstVectorToAPInt(APInt &Result, Type *DestTy, |
| 77 | Constant *C, Type *SrcEltTy, |
| 78 | unsigned NumSrcElts, |
| 79 | const DataLayout &DL) { |
| 80 | |
| 81 | |
| 82 | unsigned BitShift = DL.getTypeSizeInBits(SrcEltTy); |
| 83 | for (unsigned i = 0; i != NumSrcElts; ++i) { |
| 84 | Constant *Element; |
| 85 | if (DL.isLittleEndian()) |
| 86 | Element = C->getAggregateElement(NumSrcElts - i - 1); |
| 87 | else |
| 88 | Element = C->getAggregateElement(i); |
| 89 | |
| 90 | if (Element && isa<UndefValue>(Element)) { |
| 91 | Result <<= BitShift; |
| 92 | continue; |
| 93 | } |
| 94 | |
| 95 | auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element); |
| 96 | if (!ElementCI) |
| 97 | return ConstantExpr::getBitCast(C, DestTy); |
| 98 | |
| 99 | Result <<= BitShift; |
| 100 | Result |= ElementCI->getValue().zextOrSelf(Result.getBitWidth()); |
| 101 | } |
| 102 | |
| 103 | return nullptr; |
| 104 | } |
| 105 | |
| 106 | |
| 107 | |
| 108 | |
| 109 | Constant *FoldBitCast(Constant *C, Type *DestTy, const DataLayout &DL) { |
| 110 | assert(CastInst::castIsValid(Instruction::BitCast, C, DestTy) && |
| 111 | "Invalid constantexpr bitcast!"); |
| 112 | |
| 113 | |
| 114 | if (C->isNullValue() && !DestTy->isX86_MMXTy() && !DestTy->isX86_AMXTy()) |
| 115 | return Constant::getNullValue(DestTy); |
| 116 | if (C->isAllOnesValue() && !DestTy->isX86_MMXTy() && !DestTy->isX86_AMXTy() && |
| 117 | !DestTy->isPtrOrPtrVectorTy()) |
| 118 | return Constant::getAllOnesValue(DestTy); |
| 119 | |
| 120 | if (auto *VTy = dyn_cast<VectorType>(C->getType())) { |
| 121 | |
| 122 | if (isa<IntegerType>(DestTy) || DestTy->isFloatingPointTy()) { |
| 123 | unsigned NumSrcElts = cast<FixedVectorType>(VTy)->getNumElements(); |
| 124 | Type *SrcEltTy = VTy->getElementType(); |
| 125 | |
| 126 | |
| 127 | |
| 128 | if (SrcEltTy->isFloatingPointTy()) { |
| 129 | unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits(); |
| 130 | auto *SrcIVTy = FixedVectorType::get( |
| 131 | IntegerType::get(C->getContext(), FPWidth), NumSrcElts); |
| 132 | |
| 133 | C = ConstantExpr::getBitCast(C, SrcIVTy); |
| 134 | } |
| 135 | |
| 136 | APInt Result(DL.getTypeSizeInBits(DestTy), 0); |
| 137 | if (Constant *CE = foldConstVectorToAPInt(Result, DestTy, C, |
| 138 | SrcEltTy, NumSrcElts, DL)) |
| 139 | return CE; |
| 140 | |
| 141 | if (isa<IntegerType>(DestTy)) |
| 142 | return ConstantInt::get(DestTy, Result); |
| 143 | |
| 144 | APFloat FP(DestTy->getFltSemantics(), Result); |
| 145 | return ConstantFP::get(DestTy->getContext(), FP); |
| 146 | } |
| 147 | } |
| 148 | |
| 149 | |
| 150 | auto *DestVTy = dyn_cast<VectorType>(DestTy); |
| 151 | if (!DestVTy) |
| 152 | return ConstantExpr::getBitCast(C, DestTy); |
| 153 | |
| 154 | |
| 155 | |
| 156 | if (isa<ConstantFP>(C) || isa<ConstantInt>(C)) { |
| 157 | Constant *Ops = C; |
| 158 | return FoldBitCast(ConstantVector::get(Ops), DestTy, DL); |
| 159 | } |
| 160 | |
| 161 | |
| 162 | if (!isa<ConstantDataVector>(C) && !isa<ConstantVector>(C)) |
| 163 | return ConstantExpr::getBitCast(C, DestTy); |
| 164 | |
| 165 | |
| 166 | unsigned NumDstElt = cast<FixedVectorType>(DestVTy)->getNumElements(); |
| 167 | unsigned NumSrcElt = cast<FixedVectorType>(C->getType())->getNumElements(); |
| 168 | if (NumDstElt == NumSrcElt) |
| 169 | return ConstantExpr::getBitCast(C, DestTy); |
| 170 | |
| 171 | Type *SrcEltTy = cast<VectorType>(C->getType())->getElementType(); |
| 172 | Type *DstEltTy = DestVTy->getElementType(); |
| 173 | |
| 174 | |
| 175 | |
| 176 | |
| 177 | |
| 178 | |
| 179 | |
| 180 | |
| 181 | |
| 182 | |
| 183 | |
| 184 | if (DstEltTy->isFloatingPointTy()) { |
| 185 | |
| 186 | unsigned FPWidth = DstEltTy->getPrimitiveSizeInBits(); |
| 187 | auto *DestIVTy = FixedVectorType::get( |
| 188 | IntegerType::get(C->getContext(), FPWidth), NumDstElt); |
| 189 | |
| 190 | C = FoldBitCast(C, DestIVTy, DL); |
| 191 | |
| 192 | |
| 193 | return ConstantExpr::getBitCast(C, DestTy); |
| 194 | } |
| 195 | |
| 196 | |
| 197 | |
| 198 | if (SrcEltTy->isFloatingPointTy()) { |
| 199 | unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits(); |
| 200 | auto *SrcIVTy = FixedVectorType::get( |
| 201 | IntegerType::get(C->getContext(), FPWidth), NumSrcElt); |
| 202 | |
| 203 | C = ConstantExpr::getBitCast(C, SrcIVTy); |
| 204 | |
| 205 | if (!isa<ConstantVector>(C) && |
| 206 | !isa<ConstantDataVector>(C)) |
| 207 | return C; |
| 208 | } |
| 209 | |
| 210 | |
| 211 | |
| 212 | |
| 213 | |
| 214 | bool isLittleEndian = DL.isLittleEndian(); |
| 215 | |
| 216 | SmallVector<Constant*, 32> Result; |
| 217 | if (NumDstElt < NumSrcElt) { |
| 218 | |
| 219 | Constant *Zero = Constant::getNullValue(DstEltTy); |
| 220 | unsigned Ratio = NumSrcElt/NumDstElt; |
| 221 | unsigned SrcBitSize = SrcEltTy->getPrimitiveSizeInBits(); |
| 222 | unsigned SrcElt = 0; |
| 223 | for (unsigned i = 0; i != NumDstElt; ++i) { |
| 224 | |
| 225 | Constant *Elt = Zero; |
| 226 | unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize*(Ratio-1); |
| 227 | for (unsigned j = 0; j != Ratio; ++j) { |
| 228 | Constant *Src = C->getAggregateElement(SrcElt++); |
| 229 | if (Src && isa<UndefValue>(Src)) |
| 230 | Src = Constant::getNullValue( |
| 231 | cast<VectorType>(C->getType())->getElementType()); |
| 232 | else |
| 233 | Src = dyn_cast_or_null<ConstantInt>(Src); |
| 234 | if (!Src) |
| 235 | return ConstantExpr::getBitCast(C, DestTy); |
| 236 | |
| 237 | |
| 238 | Src = ConstantExpr::getZExt(Src, Elt->getType()); |
| 239 | |
| 240 | |
| 241 | Src = ConstantExpr::getShl(Src, |
| 242 | ConstantInt::get(Src->getType(), ShiftAmt)); |
| 243 | ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize; |
| 244 | |
| 245 | |
| 246 | Elt = ConstantExpr::getOr(Elt, Src); |
| 247 | } |
| 248 | Result.push_back(Elt); |
| 249 | } |
| 250 | return ConstantVector::get(Result); |
| 251 | } |
| 252 | |
| 253 | |
| 254 | unsigned Ratio = NumDstElt/NumSrcElt; |
| 255 | unsigned DstBitSize = DL.getTypeSizeInBits(DstEltTy); |
| 256 | |
| 257 | |
| 258 | for (unsigned i = 0; i != NumSrcElt; ++i) { |
| 259 | auto *Element = C->getAggregateElement(i); |
| 260 | |
| 261 | if (!Element) |
| 262 | return ConstantExpr::getBitCast(C, DestTy); |
| 263 | |
| 264 | if (isa<UndefValue>(Element)) { |
| 265 | |
| 266 | Result.append(Ratio, UndefValue::get(DstEltTy)); |
| 267 | continue; |
| 268 | } |
| 269 | |
| 270 | auto *Src = dyn_cast<ConstantInt>(Element); |
| 271 | if (!Src) |
| 272 | return ConstantExpr::getBitCast(C, DestTy); |
| 273 | |
| 274 | unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1); |
| 275 | for (unsigned j = 0; j != Ratio; ++j) { |
| 276 | |
| 277 | |
| 278 | Constant *Elt = ConstantExpr::getLShr(Src, |
| 279 | ConstantInt::get(Src->getType(), ShiftAmt)); |
| 280 | ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize; |
| 281 | |
| 282 | |
| 283 | |
| 284 | if (DstEltTy->isPointerTy()) { |
| 285 | IntegerType *DstIntTy = Type::getIntNTy(C->getContext(), DstBitSize); |
| 286 | Constant *CE = ConstantExpr::getTrunc(Elt, DstIntTy); |
| 287 | Result.push_back(ConstantExpr::getIntToPtr(CE, DstEltTy)); |
| 288 | continue; |
| 289 | } |
| 290 | |
| 291 | |
| 292 | Result.push_back(ConstantExpr::getTrunc(Elt, DstEltTy)); |
| 293 | } |
| 294 | } |
| 295 | |
| 296 | return ConstantVector::get(Result); |
| 297 | } |
| 298 | |
| 299 | } |
| 300 | |
| 301 | |
| 302 | |
| 303 | bool llvm::IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV, |
| 304 | APInt &Offset, const DataLayout &DL, |
| 305 | DSOLocalEquivalent **DSOEquiv) { |
| 306 | if (DSOEquiv) |
| 307 | *DSOEquiv = nullptr; |
| 308 | |
| 309 | |
| 310 | if ((GV = dyn_cast<GlobalValue>(C))) { |
| 311 | unsigned BitWidth = DL.getIndexTypeSizeInBits(GV->getType()); |
| 312 | Offset = APInt(BitWidth, 0); |
| 313 | return true; |
| 314 | } |
| 315 | |
| 316 | if (auto *FoundDSOEquiv = dyn_cast<DSOLocalEquivalent>(C)) { |
| 317 | if (DSOEquiv) |
| 318 | *DSOEquiv = FoundDSOEquiv; |
| 319 | GV = FoundDSOEquiv->getGlobalValue(); |
| 320 | unsigned BitWidth = DL.getIndexTypeSizeInBits(GV->getType()); |
| 321 | Offset = APInt(BitWidth, 0); |
| 322 | return true; |
| 323 | } |
| 324 | |
| 325 | |
| 326 | auto *CE = dyn_cast<ConstantExpr>(C); |
| 327 | if (!CE) return false; |
| 328 | |
| 329 | |
| 330 | if (CE->getOpcode() == Instruction::PtrToInt || |
| 331 | CE->getOpcode() == Instruction::BitCast) |
| 332 | return IsConstantOffsetFromGlobal(CE->getOperand(0), GV, Offset, DL, |
| 333 | DSOEquiv); |
| 334 | |
| 335 | |
| 336 | auto *GEP = dyn_cast<GEPOperator>(CE); |
| 337 | if (!GEP) |
| 338 | return false; |
| 339 | |
| 340 | unsigned BitWidth = DL.getIndexTypeSizeInBits(GEP->getType()); |
| 341 | APInt TmpOffset(BitWidth, 0); |
| 342 | |
| 343 | |
| 344 | if (!IsConstantOffsetFromGlobal(CE->getOperand(0), GV, TmpOffset, DL, |
| 345 | DSOEquiv)) |
| 346 | return false; |
| 347 | |
| 348 | |
| 349 | if (!GEP->accumulateConstantOffset(DL, TmpOffset)) |
| 350 | return false; |
| 351 | |
| 352 | Offset = TmpOffset; |
| 353 | return true; |
| 354 | } |
| 355 | |
| 356 | Constant *llvm::ConstantFoldLoadThroughBitcast(Constant *C, Type *DestTy, |
| 357 | const DataLayout &DL) { |
| 358 | do { |
| 359 | Type *SrcTy = C->getType(); |
| 360 | uint64_t DestSize = DL.getTypeSizeInBits(DestTy); |
| 361 | uint64_t SrcSize = DL.getTypeSizeInBits(SrcTy); |
| 362 | if (SrcSize < DestSize) |
| 363 | return nullptr; |
| 364 | |
| 365 | |
| 366 | |
| 367 | if (C->isNullValue() && !DestTy->isX86_MMXTy() && !DestTy->isX86_AMXTy()) |
| 368 | return Constant::getNullValue(DestTy); |
| 369 | if (C->isAllOnesValue() && |
| 370 | (DestTy->isIntegerTy() || DestTy->isFloatingPointTy() || |
| 371 | DestTy->isVectorTy()) && |
| 372 | !DestTy->isX86_AMXTy() && !DestTy->isX86_MMXTy() && |
| 373 | !DestTy->isPtrOrPtrVectorTy()) |
| 374 | |
| 375 | |
| 376 | return Constant::getAllOnesValue(DestTy); |
| 377 | |
| 378 | |
| 379 | |
| 380 | |
| 381 | if (SrcSize == DestSize && |
| 382 | DL.isNonIntegralPointerType(SrcTy->getScalarType()) == |
| 383 | DL.isNonIntegralPointerType(DestTy->getScalarType())) { |
| 384 | Instruction::CastOps Cast = Instruction::BitCast; |
| 385 | |
| 386 | |
| 387 | if (SrcTy->isIntegerTy() && DestTy->isPointerTy()) |
| 388 | Cast = Instruction::IntToPtr; |
| 389 | else if (SrcTy->isPointerTy() && DestTy->isIntegerTy()) |
| 390 | Cast = Instruction::PtrToInt; |
| 391 | |
| 392 | if (CastInst::castIsValid(Cast, C, DestTy)) |
| 393 | return ConstantExpr::getCast(Cast, C, DestTy); |
| 394 | } |
| 395 | |
| 396 | |
| 397 | |
| 398 | if (!SrcTy->isAggregateType() && !SrcTy->isVectorTy()) |
| 399 | return nullptr; |
| 400 | |
| 401 | |
| 402 | |
| 403 | |
| 404 | |
| 405 | if (SrcTy->isStructTy()) { |
| 406 | |
| 407 | |
| 408 | unsigned Elem = 0; |
| 409 | Constant *ElemC; |
| 410 | do { |
| 411 | ElemC = C->getAggregateElement(Elem++); |
| 412 | } while (ElemC && DL.getTypeSizeInBits(ElemC->getType()).isZero()); |
| 413 | C = ElemC; |
| 414 | } else { |
| 415 | C = C->getAggregateElement(0u); |
| 416 | } |
| 417 | } while (C); |
| 418 | |
| 419 | return nullptr; |
| 420 | } |
| 421 | |
| 422 | namespace { |
| 423 | |
| 424 | |
| 425 | |
| 426 | |
| 427 | |
| 428 | bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset, unsigned char *CurPtr, |
| 429 | unsigned BytesLeft, const DataLayout &DL) { |
| 430 | assert(ByteOffset <= DL.getTypeAllocSize(C->getType()) && |
| 431 | "Out of range access"); |
| 432 | |
| 433 | |
| 434 | |
| 435 | if (isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) |
| 436 | return true; |
| 437 | |
| 438 | if (auto *CI = dyn_cast<ConstantInt>(C)) { |
| 439 | if (CI->getBitWidth() > 64 || |
| 440 | (CI->getBitWidth() & 7) != 0) |
| 441 | return false; |
| 442 | |
| 443 | uint64_t Val = CI->getZExtValue(); |
| 444 | unsigned IntBytes = unsigned(CI->getBitWidth()/8); |
| 445 | |
| 446 | for (unsigned i = 0; i != BytesLeft && ByteOffset != IntBytes; ++i) { |
| 447 | int n = ByteOffset; |
| 448 | if (!DL.isLittleEndian()) |
| 449 | n = IntBytes - n - 1; |
| 450 | CurPtr[i] = (unsigned char)(Val >> (n * 8)); |
| 451 | ++ByteOffset; |
| 452 | } |
| 453 | return true; |
| 454 | } |
| 455 | |
| 456 | if (auto *CFP = dyn_cast<ConstantFP>(C)) { |
| 457 | if (CFP->getType()->isDoubleTy()) { |
| 458 | C = FoldBitCast(C, Type::getInt64Ty(C->getContext()), DL); |
| 459 | return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL); |
| 460 | } |
| 461 | if (CFP->getType()->isFloatTy()){ |
| 462 | C = FoldBitCast(C, Type::getInt32Ty(C->getContext()), DL); |
| 463 | return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL); |
| 464 | } |
| 465 | if (CFP->getType()->isHalfTy()){ |
| 466 | C = FoldBitCast(C, Type::getInt16Ty(C->getContext()), DL); |
| 467 | return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL); |
| 468 | } |
| 469 | return false; |
| 470 | } |
| 471 | |
| 472 | if (auto *CS = dyn_cast<ConstantStruct>(C)) { |
| 473 | const StructLayout *SL = DL.getStructLayout(CS->getType()); |
| 474 | unsigned Index = SL->getElementContainingOffset(ByteOffset); |
| 475 | uint64_t CurEltOffset = SL->getElementOffset(Index); |
| 476 | ByteOffset -= CurEltOffset; |
| 477 | |
| 478 | while (true) { |
| 479 | |
| 480 | |
| 481 | uint64_t EltSize = DL.getTypeAllocSize(CS->getOperand(Index)->getType()); |
| 482 | |
| 483 | if (ByteOffset < EltSize && |
| 484 | !ReadDataFromGlobal(CS->getOperand(Index), ByteOffset, CurPtr, |
| 485 | BytesLeft, DL)) |
| 486 | return false; |
| 487 | |
| 488 | ++Index; |
| 489 | |
| 490 | |
| 491 | if (Index == CS->getType()->getNumElements()) |
| 492 | return true; |
| 493 | |
| 494 | |
| 495 | uint64_t NextEltOffset = SL->getElementOffset(Index); |
| 496 | |
| 497 | if (BytesLeft <= NextEltOffset - CurEltOffset - ByteOffset) |
| 498 | return true; |
| 499 | |
| 500 | |
| 501 | CurPtr += NextEltOffset - CurEltOffset - ByteOffset; |
| 502 | BytesLeft -= NextEltOffset - CurEltOffset - ByteOffset; |
| 503 | ByteOffset = 0; |
| 504 | CurEltOffset = NextEltOffset; |
| 505 | } |
| 506 | |
| 507 | } |
| 508 | |
| 509 | if (isa<ConstantArray>(C) || isa<ConstantVector>(C) || |
| 510 | isa<ConstantDataSequential>(C)) { |
| 511 | uint64_t NumElts; |
| 512 | Type *EltTy; |
| 513 | if (auto *AT = dyn_cast<ArrayType>(C->getType())) { |
| 514 | NumElts = AT->getNumElements(); |
| 515 | EltTy = AT->getElementType(); |
| 516 | } else { |
| 517 | NumElts = cast<FixedVectorType>(C->getType())->getNumElements(); |
| 518 | EltTy = cast<FixedVectorType>(C->getType())->getElementType(); |
| 519 | } |
| 520 | uint64_t EltSize = DL.getTypeAllocSize(EltTy); |
| 521 | uint64_t Index = ByteOffset / EltSize; |
| 522 | uint64_t Offset = ByteOffset - Index * EltSize; |
| 523 | |
| 524 | for (; Index != NumElts; ++Index) { |
| 525 | if (!ReadDataFromGlobal(C->getAggregateElement(Index), Offset, CurPtr, |
| 526 | BytesLeft, DL)) |
| 527 | return false; |
| 528 | |
| 529 | uint64_t BytesWritten = EltSize - Offset; |
| 530 | assert(BytesWritten <= EltSize && "Not indexing into this element?"); |
| 531 | if (BytesWritten >= BytesLeft) |
| 532 | return true; |
| 533 | |
| 534 | Offset = 0; |
| 535 | BytesLeft -= BytesWritten; |
| 536 | CurPtr += BytesWritten; |
| 537 | } |
| 538 | return true; |
| 539 | } |
| 540 | |
| 541 | if (auto *CE = dyn_cast<ConstantExpr>(C)) { |
| 542 | if (CE->getOpcode() == Instruction::IntToPtr && |
| 543 | CE->getOperand(0)->getType() == DL.getIntPtrType(CE->getType())) { |
| 544 | return ReadDataFromGlobal(CE->getOperand(0), ByteOffset, CurPtr, |
| 545 | BytesLeft, DL); |
| 546 | } |
| 547 | } |
| 548 | |
| 549 | |
| 550 | return false; |
| 551 | } |
| 552 | |
| 553 | Constant *FoldReinterpretLoadFromConstPtr(Constant *C, Type *LoadTy, |
| 554 | const DataLayout &DL) { |
| 555 | |
| 556 | if (isa<ScalableVectorType>(LoadTy)) |
| 557 | return nullptr; |
| 558 | |
| 559 | auto *PTy = cast<PointerType>(C->getType()); |
| 560 | auto *IntType = dyn_cast<IntegerType>(LoadTy); |
| 561 | |
| 562 | |
| 563 | if (!IntType) { |
| 564 | unsigned AS = PTy->getAddressSpace(); |
| 565 | |
| 566 | |
| 567 | |
| 568 | |
| 569 | |
| 570 | Type *MapTy; |
| 571 | if (LoadTy->isHalfTy()) |
| 572 | MapTy = Type::getInt16Ty(C->getContext()); |
| 573 | else if (LoadTy->isFloatTy()) |
| 574 | MapTy = Type::getInt32Ty(C->getContext()); |
| 575 | else if (LoadTy->isDoubleTy()) |
| 576 | MapTy = Type::getInt64Ty(C->getContext()); |
| 577 | else if (LoadTy->isVectorTy()) { |
| 578 | MapTy = PointerType::getIntNTy( |
| 579 | C->getContext(), DL.getTypeSizeInBits(LoadTy).getFixedSize()); |
| 580 | } else |
| 581 | return nullptr; |
| 582 | |
| 583 | C = FoldBitCast(C, MapTy->getPointerTo(AS), DL); |
| 584 | if (Constant *Res = FoldReinterpretLoadFromConstPtr(C, MapTy, DL)) { |
| 585 | if (Res->isNullValue() && !LoadTy->isX86_MMXTy() && |
| 586 | !LoadTy->isX86_AMXTy()) |
| 587 | |
| 588 | return Constant::getNullValue(LoadTy); |
| 589 | Type *CastTy = LoadTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(LoadTy) : LoadTy; |
| 590 | Res = FoldBitCast(Res, CastTy, DL); |
| 591 | if (LoadTy->isPtrOrPtrVectorTy()) { |
| 592 | |
| 593 | if (Res->isNullValue() && !LoadTy->isX86_MMXTy() && |
| 594 | !LoadTy->isX86_AMXTy()) |
| 595 | return Constant::getNullValue(LoadTy); |
| 596 | if (DL.isNonIntegralPointerType(LoadTy->getScalarType())) |
| 597 | |
| 598 | return nullptr; |
| 599 | Res = ConstantExpr::getCast(Instruction::IntToPtr, Res, LoadTy); |
| 600 | } |
| 601 | return Res; |
| 602 | } |
| 603 | return nullptr; |
| 604 | } |
| 605 | |
| 606 | unsigned BytesLoaded = (IntType->getBitWidth() + 7) / 8; |
| 607 | if (BytesLoaded > 32 || BytesLoaded == 0) |
| 608 | return nullptr; |
| 609 | |
| 610 | GlobalValue *GVal; |
| 611 | APInt OffsetAI; |
| 612 | if (!IsConstantOffsetFromGlobal(C, GVal, OffsetAI, DL)) |
| 613 | return nullptr; |
| 614 | |
| 615 | auto *GV = dyn_cast<GlobalVariable>(GVal); |
| 616 | if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() || |
| 617 | !GV->getInitializer()->getType()->isSized()) |
| 618 | return nullptr; |
| 619 | |
| 620 | int64_t Offset = OffsetAI.getSExtValue(); |
| 621 | int64_t InitializerSize = |
| 622 | DL.getTypeAllocSize(GV->getInitializer()->getType()).getFixedSize(); |
| 623 | |
| 624 | |
| 625 | if (Offset <= -1 * static_cast<int64_t>(BytesLoaded)) |
| 626 | return UndefValue::get(IntType); |
| 627 | |
| 628 | |
| 629 | if (Offset >= InitializerSize) |
| 630 | return UndefValue::get(IntType); |
| 631 | |
| 632 | unsigned char RawBytes[32] = {0}; |
| 633 | unsigned char *CurPtr = RawBytes; |
| 634 | unsigned BytesLeft = BytesLoaded; |
| 635 | |
| 636 | |
| 637 | if (Offset < 0) { |
| 638 | CurPtr += -Offset; |
| 639 | BytesLeft += Offset; |
| 640 | Offset = 0; |
| 641 | } |
| 642 | |
| 643 | if (!ReadDataFromGlobal(GV->getInitializer(), Offset, CurPtr, BytesLeft, DL)) |
| 644 | return nullptr; |
| 645 | |
| 646 | APInt ResultVal = APInt(IntType->getBitWidth(), 0); |
| 647 | if (DL.isLittleEndian()) { |
| 648 | ResultVal = RawBytes[BytesLoaded - 1]; |
| 649 | for (unsigned i = 1; i != BytesLoaded; ++i) { |
| 650 | ResultVal <<= 8; |
| 651 | ResultVal |= RawBytes[BytesLoaded - 1 - i]; |
| 652 | } |
| 653 | } else { |
| 654 | ResultVal = RawBytes[0]; |
| 655 | for (unsigned i = 1; i != BytesLoaded; ++i) { |
| 656 | ResultVal <<= 8; |
| 657 | ResultVal |= RawBytes[i]; |
| 658 | } |
| 659 | } |
| 660 | |
| 661 | return ConstantInt::get(IntType->getContext(), ResultVal); |
| 662 | } |
| 663 | |
| 664 | Constant *ConstantFoldLoadThroughBitcastExpr(ConstantExpr *CE, Type *DestTy, |
| 665 | const DataLayout &DL) { |
| 666 | auto *SrcPtr = CE->getOperand(0); |
| 667 | if (!SrcPtr->getType()->isPointerTy()) |
| 668 | return nullptr; |
| 669 | |
| 670 | return ConstantFoldLoadFromConstPtr(SrcPtr, DestTy, DL); |
| 671 | } |
| 672 | |
| 673 | } |
| 674 | |
| 675 | Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty, |
| 676 | const DataLayout &DL) { |
| 677 | |
| 678 | if (auto *GV = dyn_cast<GlobalVariable>(C)) |
| 679 | if (GV->isConstant() && GV->hasDefinitiveInitializer()) |
| 680 | return ConstantFoldLoadThroughBitcast(GV->getInitializer(), Ty, DL); |
| 681 | |
| 682 | if (auto *GA = dyn_cast<GlobalAlias>(C)) |
| 683 | if (GA->getAliasee() && !GA->isInterposable()) |
| 684 | return ConstantFoldLoadFromConstPtr(GA->getAliasee(), Ty, DL); |
| 685 | |
| 686 | |
| 687 | auto *CE = dyn_cast<ConstantExpr>(C); |
| 688 | if (!CE) |
| 689 | return nullptr; |
| 690 | |
| 691 | if (CE->getOpcode() == Instruction::GetElementPtr) { |
| 692 | if (auto *GV = dyn_cast<GlobalVariable>(CE->getOperand(0))) { |
| 693 | if (GV->isConstant() && GV->hasDefinitiveInitializer()) { |
| 694 | if (Constant *V = ConstantFoldLoadThroughGEPConstantExpr( |
| 695 | GV->getInitializer(), CE, Ty, DL)) |
| 696 | return V; |
| 697 | } |
| 698 | } else { |
| 699 | |
| 700 | |
| 701 | |
| 702 | |
| 703 | SmallVector<Constant *> Ops; |
| 704 | for (unsigned I = 0, E = CE->getNumOperands(); I != E; ++I) |
| 705 | Ops.push_back(cast<Constant>(CE->getOperand(I))); |
| 706 | if (auto *Simplified = dyn_cast_or_null<ConstantExpr>( |
| 707 | SymbolicallyEvaluateGEP(cast<GEPOperator>(CE), Ops, DL, nullptr, |
| 708 | true))) { |
| 709 | |
| 710 | |
| 711 | |
| 712 | |
| 713 | if (isa<GEPOperator>(Simplified)) { |
| 714 | if (auto *GV = dyn_cast<GlobalVariable>(Simplified->getOperand(0))) { |
| 715 | if (GV->isConstant() && GV->hasDefinitiveInitializer()) { |
| 716 | if (Constant *V = ConstantFoldLoadThroughGEPConstantExpr( |
| 717 | GV->getInitializer(), Simplified, Ty, DL)) |
| 718 | return V; |
| 719 | } |
| 720 | } |
| 721 | } else { |
| 722 | return ConstantFoldLoadFromConstPtr(Simplified, Ty, DL); |
| 723 | } |
| 724 | } |
| 725 | } |
| 726 | } |
| 727 | |
| 728 | if (CE->getOpcode() == Instruction::BitCast) |
| 729 | if (Constant *LoadedC = ConstantFoldLoadThroughBitcastExpr(CE, Ty, DL)) |
| 730 | return LoadedC; |
| 731 | |
| 732 | |
| 733 | |
| 734 | StringRef Str; |
| 735 | if (getConstantStringInfo(CE, Str) && !Str.empty()) { |
| 736 | size_t StrLen = Str.size(); |
| 737 | unsigned NumBits = Ty->getPrimitiveSizeInBits(); |
| 738 | |
| 739 | |
| 740 | if ((NumBits >> 3) == StrLen + 1 && (NumBits & 7) == 0 && |
| 741 | (isa<IntegerType>(Ty) || Ty->isFloatingPointTy())) { |
| 742 | APInt StrVal(NumBits, 0); |
| 743 | APInt SingleChar(NumBits, 0); |
| 744 | if (DL.isLittleEndian()) { |
| 745 | for (unsigned char C : reverse(Str.bytes())) { |
| 746 | SingleChar = static_cast<uint64_t>(C); |
| 747 | StrVal = (StrVal << 8) | SingleChar; |
| 748 | } |
| 749 | } else { |
| 750 | for (unsigned char C : Str.bytes()) { |
| 751 | SingleChar = static_cast<uint64_t>(C); |
| 752 | StrVal = (StrVal << 8) | SingleChar; |
| 753 | } |
| 754 | |
| 755 | SingleChar = 0; |
| 756 | StrVal = (StrVal << 8) | SingleChar; |
| 757 | } |
| 758 | |
| 759 | Constant *Res = ConstantInt::get(CE->getContext(), StrVal); |
| 760 | if (Ty->isFloatingPointTy()) |
| 761 | Res = ConstantExpr::getBitCast(Res, Ty); |
| 762 | return Res; |
| 763 | } |
| 764 | } |
| 765 | |
| 766 | |
| 767 | |
| 768 | if (auto *GV = dyn_cast<GlobalVariable>(getUnderlyingObject(CE))) { |
| 769 | if (GV->isConstant() && GV->hasDefinitiveInitializer()) { |
| 770 | if (GV->getInitializer()->isNullValue()) |
| 771 | return Constant::getNullValue(Ty); |
| 772 | if (isa<UndefValue>(GV->getInitializer())) |
| 773 | return UndefValue::get(Ty); |
| 774 | } |
| 775 | } |
| 776 | |
| 777 | |
| 778 | return FoldReinterpretLoadFromConstPtr(CE, Ty, DL); |
| 779 | } |
| 780 | |
| 781 | namespace { |
| 782 | |
| 783 | |
| 784 | |
| 785 | |
| 786 | |
| 787 | Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0, Constant *Op1, |
| 788 | const DataLayout &DL) { |
| 789 | |
| 790 | |
| 791 | |
| 792 | |
| 793 | |
| 794 | |
| 795 | if (Opc == Instruction::And) { |
| 796 | KnownBits Known0 = computeKnownBits(Op0, DL); |
| 797 | KnownBits Known1 = computeKnownBits(Op1, DL); |
| 798 | if ((Known1.One | Known0.Zero).isAllOnesValue()) { |
| 799 | |
| 800 | return Op0; |
| 801 | } |
| 802 | if ((Known0.One | Known1.Zero).isAllOnesValue()) { |
| 803 | |
| 804 | return Op1; |
| 805 | } |
| 806 | |
| 807 | Known0 &= Known1; |
| 808 | if (Known0.isConstant()) |
| 809 | return ConstantInt::get(Op0->getType(), Known0.getConstant()); |
| 810 | } |
| 811 | |
| 812 | |
| 813 | |
| 814 | if (Opc == Instruction::Sub) { |
| 815 | GlobalValue *GV1, *GV2; |
| 816 | APInt Offs1, Offs2; |
| 817 | |
| 818 | if (IsConstantOffsetFromGlobal(Op0, GV1, Offs1, DL)) |
| 819 | if (IsConstantOffsetFromGlobal(Op1, GV2, Offs2, DL) && GV1 == GV2) { |
| 820 | unsigned OpSize = DL.getTypeSizeInBits(Op0->getType()); |
| 821 | |
| 822 | |
| 823 | |
| 824 | |
| 825 | return ConstantInt::get(Op0->getType(), Offs1.zextOrTrunc(OpSize) - |
| 826 | Offs2.zextOrTrunc(OpSize)); |
| 827 | } |
| 828 | } |
| 829 | |
| 830 | return nullptr; |
| 831 | } |
| 832 | |
| 833 | |
| 834 | |
| 835 | Constant *CastGEPIndices(Type *SrcElemTy, ArrayRef<Constant *> Ops, |
| 836 | Type *ResultTy, Optional<unsigned> InRangeIndex, |
| 837 | const DataLayout &DL, const TargetLibraryInfo *TLI) { |
| 838 | Type *IntIdxTy = DL.getIndexType(ResultTy); |
| 839 | Type *IntIdxScalarTy = IntIdxTy->getScalarType(); |
| 840 | |
| 841 | bool Any = false; |
| 842 | SmallVector<Constant*, 32> NewIdxs; |
| 843 | for (unsigned i = 1, e = Ops.size(); i != e; ++i) { |
| 844 | if ((i == 1 || |
| 845 | !isa<StructType>(GetElementPtrInst::getIndexedType( |
| 846 | SrcElemTy, Ops.slice(1, i - 1)))) && |
| 847 | Ops[i]->getType()->getScalarType() != IntIdxScalarTy) { |
| 848 | Any = true; |
| 849 | Type *NewType = Ops[i]->getType()->isVectorTy() |
| 850 | ? IntIdxTy |
| 851 | : IntIdxScalarTy; |
| 852 | NewIdxs.push_back(ConstantExpr::getCast(CastInst::getCastOpcode(Ops[i], |
| 853 | true, |
| 854 | NewType, |
| 855 | true), |
| 856 | Ops[i], NewType)); |
| 857 | } else |
| 858 | NewIdxs.push_back(Ops[i]); |
| 859 | } |
| 860 | |
| 861 | if (!Any) |
| 862 | return nullptr; |
| 863 | |
| 864 | Constant *C = ConstantExpr::getGetElementPtr( |
| 865 | SrcElemTy, Ops[0], NewIdxs, false, InRangeIndex); |
| 866 | return ConstantFoldConstant(C, DL, TLI); |
| 867 | } |
| 868 | |
| 869 | |
| 870 | Constant *StripPtrCastKeepAS(Constant *Ptr, bool ForLoadOperand) { |
| 871 | assert(Ptr->getType()->isPointerTy() && "Not a pointer type"); |
| 872 | auto *OldPtrTy = cast<PointerType>(Ptr->getType()); |
| 873 | Ptr = cast<Constant>(Ptr->stripPointerCasts()); |
| 874 | if (ForLoadOperand) { |
| 875 | while (isa<GlobalAlias>(Ptr) && !cast<GlobalAlias>(Ptr)->isInterposable() && |
| 876 | !cast<GlobalAlias>(Ptr)->getBaseObject()->isInterposable()) { |
| 877 | Ptr = cast<GlobalAlias>(Ptr)->getAliasee(); |
| 878 | } |
| 879 | } |
| 880 | |
| 881 | auto *NewPtrTy = cast<PointerType>(Ptr->getType()); |
| 882 | |
| 883 | |
| 884 | if (NewPtrTy->getAddressSpace() != OldPtrTy->getAddressSpace()) { |
| 885 | Ptr = ConstantExpr::getPointerCast( |
| 886 | Ptr, PointerType::getWithSamePointeeType(NewPtrTy, |
| 887 | OldPtrTy->getAddressSpace())); |
| 888 | } |
| 889 | return Ptr; |
| 890 | } |
| 891 | |
| 892 | |
| 893 | Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP, |
| 894 | ArrayRef<Constant *> Ops, |
| 895 | const DataLayout &DL, |
| 896 | const TargetLibraryInfo *TLI, |
| 897 | bool ForLoadOperand) { |
| 898 | const GEPOperator *InnermostGEP = GEP; |
| 899 | bool InBounds = GEP->isInBounds(); |
| 900 | |
| 901 | Type *SrcElemTy = GEP->getSourceElementType(); |
| 902 | Type *ResElemTy = GEP->getResultElementType(); |
| 903 | Type *ResTy = GEP->getType(); |
| 904 | if (!SrcElemTy->isSized() || isa<ScalableVectorType>(SrcElemTy)) |
| 905 | return nullptr; |
| 906 | |
| 907 | if (Constant *C = CastGEPIndices(SrcElemTy, Ops, ResTy, |
| 908 | GEP->getInRangeIndex(), DL, TLI)) |
| 909 | return C; |
| 910 | |
| 911 | Constant *Ptr = Ops[0]; |
| 912 | if (!Ptr->getType()->isPointerTy()) |
| 913 | return nullptr; |
| 914 | |
| 915 | Type *IntIdxTy = DL.getIndexType(Ptr->getType()); |
| 916 | |
| 917 | |
| 918 | |
| 919 | if (Ops.size() == 2 && ResElemTy->isIntegerTy(8)) { |
| 920 | auto *CE = dyn_cast<ConstantExpr>(Ops[1]); |
| 921 | assert((!CE || CE->getType() == IntIdxTy) && |
| 922 | "CastGEPIndices didn't canonicalize index types!"); |
| 923 | if (CE && CE->getOpcode() == Instruction::Sub && |
| 924 | CE->getOperand(0)->isNullValue()) { |
| 925 | Constant *Res = ConstantExpr::getPtrToInt(Ptr, CE->getType()); |
| 926 | Res = ConstantExpr::getSub(Res, CE->getOperand(1)); |
| 927 | Res = ConstantExpr::getIntToPtr(Res, ResTy); |
| 928 | return ConstantFoldConstant(Res, DL, TLI); |
| 929 | } |
| 930 | } |
| 931 | |
| 932 | for (unsigned i = 1, e = Ops.size(); i != e; ++i) |
| 933 | if (!isa<ConstantInt>(Ops[i])) |
| 934 | return nullptr; |
| 935 | |
| 936 | unsigned BitWidth = DL.getTypeSizeInBits(IntIdxTy); |
| 937 | APInt Offset = |
| 938 | APInt(BitWidth, |
| 939 | DL.getIndexedOffsetInType( |
| 940 | SrcElemTy, |
| 941 | makeArrayRef((Value * const *)Ops.data() + 1, Ops.size() - 1))); |
| 942 | Ptr = StripPtrCastKeepAS(Ptr, ForLoadOperand); |
| 943 | |
| 944 | |
| 945 | while (auto *GEP = dyn_cast<GEPOperator>(Ptr)) { |
| 946 | InnermostGEP = GEP; |
| 947 | InBounds &= GEP->isInBounds(); |
| 948 | |
| 949 | SmallVector<Value *, 4> NestedOps(GEP->op_begin() + 1, GEP->op_end()); |
| 950 | |
| 951 | |
| 952 | bool AllConstantInt = true; |
| 953 | for (Value *NestedOp : NestedOps) |
| 954 | if (!isa<ConstantInt>(NestedOp)) { |
| 955 | AllConstantInt = false; |
| 956 | break; |
| 957 | } |
| 958 | if (!AllConstantInt) |
| 959 | break; |
| 960 | |
| 961 | Ptr = cast<Constant>(GEP->getOperand(0)); |
| 962 | SrcElemTy = GEP->getSourceElementType(); |
| 963 | Offset += APInt(BitWidth, DL.getIndexedOffsetInType(SrcElemTy, NestedOps)); |
| 964 | Ptr = StripPtrCastKeepAS(Ptr, ForLoadOperand); |
| 965 | } |
| 966 | |
| 967 | |
| 968 | |
| 969 | APInt BasePtr(BitWidth, 0); |
| 970 | if (auto *CE = dyn_cast<ConstantExpr>(Ptr)) { |
| 971 | if (CE->getOpcode() == Instruction::IntToPtr) { |
| 972 | if (auto *Base = dyn_cast<ConstantInt>(CE->getOperand(0))) |
| 973 | BasePtr = Base->getValue().zextOrTrunc(BitWidth); |
| 974 | } |
| 975 | } |
| 976 | |
| 977 | auto *PTy = cast<PointerType>(Ptr->getType()); |
| 978 | if ((Ptr->isNullValue() || BasePtr != 0) && |
| 979 | !DL.isNonIntegralPointerType(PTy)) { |
| 980 | Constant *C = ConstantInt::get(Ptr->getContext(), Offset + BasePtr); |
| 981 | return ConstantExpr::getIntToPtr(C, ResTy); |
| 982 | } |
| 983 | |
| 984 | |
| 985 | |
| 986 | |
| 987 | |
| 988 | SmallVector<Constant *, 32> NewIdxs; |
| 989 | Type *Ty = PTy; |
| 990 | SrcElemTy = PTy->getElementType(); |
| 991 | |
| 992 | do { |
| 993 | if (!Ty->isStructTy()) { |
| 994 | if (Ty->isPointerTy()) { |
| 995 | |
| 996 | if (!NewIdxs.empty()) |
| 997 | break; |
| 998 | |
| 999 | Ty = SrcElemTy; |
| 1000 | |
| 1001 | |
| 1002 | if (!Ty->isSized()) |
| 1003 | return nullptr; |
| 1004 | } else { |
| 1005 | Type *NextTy = GetElementPtrInst::getTypeAtIndex(Ty, (uint64_t)0); |
| 1006 | if (!NextTy) |
| 1007 | break; |
| 1008 | Ty = NextTy; |
| 1009 | } |
| 1010 | |
| 1011 | |
| 1012 | APInt ElemSize(BitWidth, DL.getTypeAllocSize(Ty)); |
| 1013 | if (ElemSize == 0) { |
| 1014 | |
| 1015 | |
| 1016 | |
| 1017 | NewIdxs.push_back(ConstantInt::get(IntIdxTy, 0)); |
| 1018 | } else { |
| 1019 | |
| 1020 | |
| 1021 | bool Overflow; |
| 1022 | APInt NewIdx = Offset.sdiv_ov(ElemSize, Overflow); |
| 1023 | if (Overflow) |
| 1024 | break; |
| 1025 | Offset -= NewIdx * ElemSize; |
| 1026 | NewIdxs.push_back(ConstantInt::get(IntIdxTy, NewIdx)); |
| 1027 | } |
| 1028 | } else { |
| 1029 | auto *STy = cast<StructType>(Ty); |
| 1030 | |
| 1031 | |
| 1032 | |
| 1033 | |
| 1034 | const StructLayout &SL = *DL.getStructLayout(STy); |
| 1035 | if (Offset.isNegative() || Offset.uge(SL.getSizeInBytes())) |
| 1036 | break; |
| 1037 | |
| 1038 | |
| 1039 | |
| 1040 | |
| 1041 | unsigned ElIdx = SL.getElementContainingOffset(Offset.getZExtValue()); |
| 1042 | NewIdxs.push_back(ConstantInt::get(Type::getInt32Ty(Ty->getContext()), |
| 1043 | ElIdx)); |
| 1044 | Offset -= APInt(BitWidth, SL.getElementOffset(ElIdx)); |
| 1045 | Ty = STy->getTypeAtIndex(ElIdx); |
| 1046 | } |
| 1047 | } while (Ty != ResElemTy); |
| 1048 | |
| 1049 | |
| 1050 | |
| 1051 | |
| 1052 | if (Offset != 0) |
| 1053 | return nullptr; |
| 1054 | |
| 1055 | |
| 1056 | |
| 1057 | Optional<unsigned> InRangeIndex; |
| 1058 | if (Optional<unsigned> LastIRIndex = InnermostGEP->getInRangeIndex()) |
| 1059 | if (SrcElemTy == InnermostGEP->getSourceElementType() && |
| 1060 | NewIdxs.size() > *LastIRIndex) { |
| 1061 | InRangeIndex = LastIRIndex; |
| 1062 | for (unsigned I = 0; I <= *LastIRIndex; ++I) |
| 1063 | if (NewIdxs[I] != InnermostGEP->getOperand(I + 1)) |
| 1064 | return nullptr; |
| 1065 | } |
| 1066 | |
| 1067 | |
| 1068 | Constant *C = ConstantExpr::getGetElementPtr(SrcElemTy, Ptr, NewIdxs, |
| 1069 | InBounds, InRangeIndex); |
| 1070 | assert(C->getType()->getPointerElementType() == Ty && |
| 1071 | "Computed GetElementPtr has unexpected type!"); |
| 1072 | |
| 1073 | |
| 1074 | |
| 1075 | if (C->getType() != ResTy) |
| 1076 | C = FoldBitCast(C, ResTy, DL); |
| 1077 | |
| 1078 | return C; |
| 1079 | } |
| 1080 | |
| 1081 | |
| 1082 | |
| 1083 | |
| 1084 | |
| 1085 | |
| 1086 | Constant *ConstantFoldInstOperandsImpl(const Value *InstOrCE, unsigned Opcode, |
| 1087 | ArrayRef<Constant *> Ops, |
| 1088 | const DataLayout &DL, |
| 1089 | const TargetLibraryInfo *TLI) { |
| 1090 | Type *DestTy = InstOrCE->getType(); |
| 1091 | |
| 1092 | if (Instruction::isUnaryOp(Opcode)) |
| 1093 | return ConstantFoldUnaryOpOperand(Opcode, Ops[0], DL); |
| 1094 | |
| 1095 | if (Instruction::isBinaryOp(Opcode)) |
| 1096 | return ConstantFoldBinaryOpOperands(Opcode, Ops[0], Ops[1], DL); |
| 1097 | |
| 1098 | if (Instruction::isCast(Opcode)) |
| 1099 | return ConstantFoldCastOperand(Opcode, Ops[0], DestTy, DL); |
| 1100 | |
| 1101 | if (auto *GEP = dyn_cast<GEPOperator>(InstOrCE)) { |
| 1102 | if (Constant *C = SymbolicallyEvaluateGEP(GEP, Ops, DL, TLI, |
| 1103 | false)) |
| 1104 | return C; |
| 1105 | |
| 1106 | return ConstantExpr::getGetElementPtr(GEP->getSourceElementType(), Ops[0], |
| 1107 | Ops.slice(1), GEP->isInBounds(), |
| 1108 | GEP->getInRangeIndex()); |
| 1109 | } |
| 1110 | |
| 1111 | if (auto *CE = dyn_cast<ConstantExpr>(InstOrCE)) |
| 1112 | return CE->getWithOperands(Ops); |
| 1113 | |
| 1114 | switch (Opcode) { |
| 1115 | default: return nullptr; |
| 1116 | case Instruction::ICmp: |
| 1117 | case Instruction::FCmp: llvm_unreachable("Invalid for compares"); |
| 1118 | case Instruction::Freeze: |
| 1119 | return isGuaranteedNotToBeUndefOrPoison(Ops[0]) ? Ops[0] : nullptr; |
| 1120 | case Instruction::Call: |
| 1121 | if (auto *F = dyn_cast<Function>(Ops.back())) { |
| 1122 | const auto *Call = cast<CallBase>(InstOrCE); |
| 1123 | if (canConstantFoldCallTo(Call, F)) |
| 1124 | return ConstantFoldCall(Call, F, Ops.slice(0, Ops.size() - 1), TLI); |
| 1125 | } |
| 1126 | return nullptr; |
| 1127 | case Instruction::Select: |
| 1128 | return ConstantExpr::getSelect(Ops[0], Ops[1], Ops[2]); |
| 1129 | case Instruction::ExtractElement: |
| 1130 | return ConstantExpr::getExtractElement(Ops[0], Ops[1]); |
| 1131 | case Instruction::ExtractValue: |
| 1132 | return ConstantExpr::getExtractValue( |
| 1133 | Ops[0], cast<ExtractValueInst>(InstOrCE)->getIndices()); |
| 1134 | case Instruction::InsertElement: |
| 1135 | return ConstantExpr::getInsertElement(Ops[0], Ops[1], Ops[2]); |
| 1136 | case Instruction::ShuffleVector: |
| 1137 | return ConstantExpr::getShuffleVector( |
| 1138 | Ops[0], Ops[1], cast<ShuffleVectorInst>(InstOrCE)->getShuffleMask()); |
| 1139 | } |
| 1140 | } |
| 1141 | |
| 1142 | } |
| 1143 | |
| 1144 | |
| 1145 | |
| 1146 | |
| 1147 | |
| 1148 | namespace { |
| 1149 | |
| 1150 | Constant * |
| 1151 | ConstantFoldConstantImpl(const Constant *C, const DataLayout &DL, |
| 1152 | const TargetLibraryInfo *TLI, |
| 1153 | SmallDenseMap<Constant *, Constant *> &FoldedOps) { |
| 1154 | if (!isa<ConstantVector>(C) && !isa<ConstantExpr>(C)) |
| 1155 | return const_cast<Constant *>(C); |
| 1156 | |
| 1157 | SmallVector<Constant *, 8> Ops; |
| 1158 | for (const Use &OldU : C->operands()) { |
| 1159 | Constant *OldC = cast<Constant>(&OldU); |
| 1160 | Constant *NewC = OldC; |
| 1161 | |
| 1162 | |
| 1163 | if (isa<ConstantVector>(OldC) || isa<ConstantExpr>(OldC)) { |
| 1164 | auto It = FoldedOps.find(OldC); |
| 1165 | if (It == FoldedOps.end()) { |
| 1166 | NewC = ConstantFoldConstantImpl(OldC, DL, TLI, FoldedOps); |
| 1167 | FoldedOps.insert({OldC, NewC}); |
| 1168 | } else { |
| 1169 | NewC = It->second; |
| 1170 | } |
| 1171 | } |
| 1172 | Ops.push_back(NewC); |
| 1173 | } |
| 1174 | |
| 1175 | if (auto *CE = dyn_cast<ConstantExpr>(C)) { |
| 1176 | if (CE->isCompare()) |
| 1177 | return ConstantFoldCompareInstOperands(CE->getPredicate(), Ops[0], Ops[1], |
| 1178 | DL, TLI); |
| 1179 | |
| 1180 | return ConstantFoldInstOperandsImpl(CE, CE->getOpcode(), Ops, DL, TLI); |
| 1181 | } |
| 1182 | |
| 1183 | assert(isa<ConstantVector>(C)); |
| 1184 | return ConstantVector::get(Ops); |
| 1185 | } |
| 1186 | |
| 1187 | } |
| 1188 | |
| 1189 | Constant *llvm::ConstantFoldInstruction(Instruction *I, const DataLayout &DL, |
| 1190 | const TargetLibraryInfo *TLI) { |
| 1191 | |
| 1192 | if (auto *PN = dyn_cast<PHINode>(I)) { |
| 1193 | Constant *CommonValue = nullptr; |
| 1194 | |
| 1195 | SmallDenseMap<Constant *, Constant *> FoldedOps; |
| 1196 | for (Value *Incoming : PN->incoming_values()) { |
| 1197 | |
| 1198 | |
| 1199 | |
| 1200 | |
| 1201 | if (isa<UndefValue>(Incoming)) |
| 1202 | continue; |
| 1203 | |
| 1204 | auto *C = dyn_cast<Constant>(Incoming); |
| 1205 | if (!C) |
| 1206 | return nullptr; |
| 1207 | |
| 1208 | C = ConstantFoldConstantImpl(C, DL, TLI, FoldedOps); |
| 1209 | |
| 1210 | |
| 1211 | if (CommonValue && C != CommonValue) |
| 1212 | return nullptr; |
| 1213 | CommonValue = C; |
| 1214 | } |
| 1215 | |
| 1216 | |
| 1217 | return CommonValue ? CommonValue : UndefValue::get(PN->getType()); |
| 1218 | } |
| 1219 | |
| 1220 | |
| 1221 | |
| 1222 | if (!all_of(I->operands(), [](Use &U) { return isa<Constant>(U); })) |
| 1223 | return nullptr; |
| 1224 | |
| 1225 | SmallDenseMap<Constant *, Constant *> FoldedOps; |
| 1226 | SmallVector<Constant *, 8> Ops; |
| 1227 | for (const Use &OpU : I->operands()) { |
| 1228 | auto *Op = cast<Constant>(&OpU); |
| 1229 | |
| 1230 | Op = ConstantFoldConstantImpl(Op, DL, TLI, FoldedOps); |
| 1231 | Ops.push_back(Op); |
| 1232 | } |
| 1233 | |
| 1234 | if (const auto *CI = dyn_cast<CmpInst>(I)) |
| 1235 | return ConstantFoldCompareInstOperands(CI->getPredicate(), Ops[0], Ops[1], |
| 1236 | DL, TLI); |
| 1237 | |
| 1238 | if (const auto *LI = dyn_cast<LoadInst>(I)) { |
| 1239 | if (LI->isVolatile()) |
| 1240 | return nullptr; |
| 1241 | return ConstantFoldLoadFromConstPtr(Ops[0], LI->getType(), DL); |
| 1242 | } |
| 1243 | |
| 1244 | if (auto *IVI = dyn_cast<InsertValueInst>(I)) |
| 1245 | return ConstantExpr::getInsertValue(Ops[0], Ops[1], IVI->getIndices()); |
| 1246 | |
| 1247 | if (auto *EVI = dyn_cast<ExtractValueInst>(I)) |
| 1248 | return ConstantExpr::getExtractValue(Ops[0], EVI->getIndices()); |
| 1249 | |
| 1250 | return ConstantFoldInstOperands(I, Ops, DL, TLI); |
| 1251 | } |
| 1252 | |
| 1253 | Constant *llvm::ConstantFoldConstant(const Constant *C, const DataLayout &DL, |
| 1254 | const TargetLibraryInfo *TLI) { |
| 1255 | SmallDenseMap<Constant *, Constant *> FoldedOps; |
| 1256 | return ConstantFoldConstantImpl(C, DL, TLI, FoldedOps); |
| 1257 | } |
| 1258 | |
| 1259 | Constant *llvm::ConstantFoldInstOperands(Instruction *I, |
| 1260 | ArrayRef<Constant *> Ops, |
| 1261 | const DataLayout &DL, |
| 1262 | const TargetLibraryInfo *TLI) { |
| 1263 | return ConstantFoldInstOperandsImpl(I, I->getOpcode(), Ops, DL, TLI); |
| 1264 | } |
| 1265 | |
| 1266 | Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate, |
| 1267 | Constant *Ops0, Constant *Ops1, |
| 1268 | const DataLayout &DL, |
| 1269 | const TargetLibraryInfo *TLI) { |
| 1270 | |
| 1271 | |
| 1272 | |
| 1273 | |
| 1274 | |
| 1275 | |
| 1276 | |
| 1277 | |
| 1278 | |
| 1279 | |
| 1280 | if (auto *CE0 = dyn_cast<ConstantExpr>(Ops0)) { |
| 1281 | if (Ops1->isNullValue()) { |
| 1282 | if (CE0->getOpcode() == Instruction::IntToPtr) { |
| 1283 | Type *IntPtrTy = DL.getIntPtrType(CE0->getType()); |
| 1284 | |
| 1285 | |
| 1286 | Constant *C = ConstantExpr::getIntegerCast(CE0->getOperand(0), |
| 1287 | IntPtrTy, false); |
| 1288 | Constant *Null = Constant::getNullValue(C->getType()); |
| 1289 | return ConstantFoldCompareInstOperands(Predicate, C, Null, DL, TLI); |
| 1290 | } |
| 1291 | |
| 1292 | |
| 1293 | |
| 1294 | if (CE0->getOpcode() == Instruction::PtrToInt) { |
| 1295 | Type *IntPtrTy = DL.getIntPtrType(CE0->getOperand(0)->getType()); |
| 1296 | if (CE0->getType() == IntPtrTy) { |
| 1297 | Constant *C = CE0->getOperand(0); |
| 1298 | Constant *Null = Constant::getNullValue(C->getType()); |
| 1299 | return ConstantFoldCompareInstOperands(Predicate, C, Null, DL, TLI); |
| 1300 | } |
| 1301 | } |
| 1302 | } |
| 1303 | |
| 1304 | if (auto *CE1 = dyn_cast<ConstantExpr>(Ops1)) { |
| 1305 | if (CE0->getOpcode() == CE1->getOpcode()) { |
| 1306 | if (CE0->getOpcode() == Instruction::IntToPtr) { |
| 1307 | Type *IntPtrTy = DL.getIntPtrType(CE0->getType()); |
| 1308 | |
| 1309 | |
| 1310 | |
| 1311 | Constant *C0 = ConstantExpr::getIntegerCast(CE0->getOperand(0), |
| 1312 | IntPtrTy, false); |
| 1313 | Constant *C1 = ConstantExpr::getIntegerCast(CE1->getOperand(0), |
| 1314 | IntPtrTy, false); |
| 1315 | return ConstantFoldCompareInstOperands(Predicate, C0, C1, DL, TLI); |
| 1316 | } |
| 1317 | |
| 1318 | |
| 1319 | |
| 1320 | if (CE0->getOpcode() == Instruction::PtrToInt) { |
| 1321 | Type *IntPtrTy = DL.getIntPtrType(CE0->getOperand(0)->getType()); |
| 1322 | if (CE0->getType() == IntPtrTy && |
| 1323 | CE0->getOperand(0)->getType() == CE1->getOperand(0)->getType()) { |
| 1324 | return ConstantFoldCompareInstOperands( |
| 1325 | Predicate, CE0->getOperand(0), CE1->getOperand(0), DL, TLI); |
| 1326 | } |
| 1327 | } |
| 1328 | } |
| 1329 | } |
| 1330 | |
| 1331 | |
| 1332 | |
| 1333 | if ((Predicate == ICmpInst::ICMP_EQ || Predicate == ICmpInst::ICMP_NE) && |
| 1334 | CE0->getOpcode() == Instruction::Or && Ops1->isNullValue()) { |
| 1335 | Constant *LHS = ConstantFoldCompareInstOperands( |
| 1336 | Predicate, CE0->getOperand(0), Ops1, DL, TLI); |
| 1337 | Constant *RHS = ConstantFoldCompareInstOperands( |
| 1338 | Predicate, CE0->getOperand(1), Ops1, DL, TLI); |
| 1339 | unsigned OpC = |
| 1340 | Predicate == ICmpInst::ICMP_EQ ? Instruction::And : Instruction::Or; |
| 1341 | return ConstantFoldBinaryOpOperands(OpC, LHS, RHS, DL); |
| 1342 | } |
| 1343 | } else if (isa<ConstantExpr>(Ops1)) { |
| 1344 | |
| 1345 | |
| 1346 | Predicate = ICmpInst::getSwappedPredicate((ICmpInst::Predicate)Predicate); |
| 1347 | return ConstantFoldCompareInstOperands(Predicate, Ops1, Ops0, DL, TLI); |
| 1348 | } |
| 1349 | |
| 1350 | return ConstantExpr::getCompare(Predicate, Ops0, Ops1); |
| 1351 | } |
| 1352 | |
| 1353 | Constant *llvm::ConstantFoldUnaryOpOperand(unsigned Opcode, Constant *Op, |
| 1354 | const DataLayout &DL) { |
| 1355 | assert(Instruction::isUnaryOp(Opcode)); |
| 1356 | |
| 1357 | return ConstantExpr::get(Opcode, Op); |
| 1358 | } |
| 1359 | |
| 1360 | Constant *llvm::ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, |
| 1361 | Constant *RHS, |
| 1362 | const DataLayout &DL) { |
| 1363 | assert(Instruction::isBinaryOp(Opcode)); |
| 1364 | if (isa<ConstantExpr>(LHS) || isa<ConstantExpr>(RHS)) |
| 1365 | if (Constant *C = SymbolicallyEvaluateBinop(Opcode, LHS, RHS, DL)) |
| 1366 | return C; |
| 1367 | |
| 1368 | return ConstantExpr::get(Opcode, LHS, RHS); |
| 1369 | } |
| 1370 | |
| 1371 | Constant *llvm::ConstantFoldCastOperand(unsigned Opcode, Constant *C, |
| 1372 | Type *DestTy, const DataLayout &DL) { |
| 1373 | assert(Instruction::isCast(Opcode)); |
| 1374 | switch (Opcode) { |
| 1375 | default: |
| 1376 | llvm_unreachable("Missing case"); |
| 1377 | case Instruction::PtrToInt: |
| 1378 | |
| 1379 | |
| 1380 | if (auto *CE = dyn_cast<ConstantExpr>(C)) { |
| 1381 | if (CE->getOpcode() == Instruction::IntToPtr) { |
| 1382 | Constant *Input = CE->getOperand(0); |
| 1383 | unsigned InWidth = Input->getType()->getScalarSizeInBits(); |
| 1384 | unsigned PtrWidth = DL.getPointerTypeSizeInBits(CE->getType()); |
| 1385 | if (PtrWidth < InWidth) { |
| 1386 | Constant *Mask = |
| 1387 | ConstantInt::get(CE->getContext(), |
| 1388 | APInt::getLowBitsSet(InWidth, PtrWidth)); |
| 1389 | Input = ConstantExpr::getAnd(Input, Mask); |
| 1390 | } |
| 1391 | |
| 1392 | return ConstantExpr::getIntegerCast(Input, DestTy, false); |
| 1393 | } |
| 1394 | } |
| 1395 | return ConstantExpr::getCast(Opcode, C, DestTy); |
| 1396 | case Instruction::IntToPtr: |
| 1397 | |
| 1398 | |
| 1399 | |
| 1400 | |
| 1401 | if (auto *CE = dyn_cast<ConstantExpr>(C)) { |
| 1402 | if (CE->getOpcode() == Instruction::PtrToInt) { |
| 1403 | Constant *SrcPtr = CE->getOperand(0); |
| 1404 | unsigned SrcPtrSize = DL.getPointerTypeSizeInBits(SrcPtr->getType()); |
| 1405 | unsigned MidIntSize = CE->getType()->getScalarSizeInBits(); |
| 1406 | |
| 1407 | if (MidIntSize >= SrcPtrSize) { |
| 1408 | unsigned SrcAS = SrcPtr->getType()->getPointerAddressSpace(); |
| 1409 | if (SrcAS == DestTy->getPointerAddressSpace()) |
| 1410 | return FoldBitCast(CE->getOperand(0), DestTy, DL); |
| 1411 | } |
| 1412 | } |
| 1413 | } |
| 1414 | |
| 1415 | return ConstantExpr::getCast(Opcode, C, DestTy); |
| 1416 | case Instruction::Trunc: |
| 1417 | case Instruction::ZExt: |
| 1418 | case Instruction::SExt: |
| 1419 | case Instruction::FPTrunc: |
| 1420 | case Instruction::FPExt: |
| 1421 | case Instruction::UIToFP: |
| 1422 | case Instruction::SIToFP: |
| 1423 | case Instruction::FPToUI: |
| 1424 | case Instruction::FPToSI: |
| 1425 | case Instruction::AddrSpaceCast: |
| 1426 | return ConstantExpr::getCast(Opcode, C, DestTy); |
| 1427 | case Instruction::BitCast: |
| 1428 | return FoldBitCast(C, DestTy, DL); |
| 1429 | } |
| 1430 | } |
| 1431 | |
| 1432 | Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C, |
| 1433 | ConstantExpr *CE, |
| 1434 | Type *Ty, |
| 1435 | const DataLayout &DL) { |
| 1436 | if (!CE->getOperand(1)->isNullValue()) |
| 1437 | return nullptr; |
| 1438 | |
| 1439 | |
| 1440 | |
| 1441 | for (unsigned i = 2, e = CE->getNumOperands(); i != e; ++i) { |
| 1442 | C = C->getAggregateElement(CE->getOperand(i)); |
| 1443 | if (!C) |
| 1444 | return nullptr; |
| 1445 | } |
| 1446 | return ConstantFoldLoadThroughBitcast(C, Ty, DL); |
| 1447 | } |
| 1448 | |
| 1449 | Constant * |
| 1450 | llvm::ConstantFoldLoadThroughGEPIndices(Constant *C, |
| 1451 | ArrayRef<Constant *> Indices) { |
| 1452 | |
| 1453 | |
| 1454 | for (Constant *Index : Indices) { |
| 1455 | C = C->getAggregateElement(Index); |
| 1456 | if (!C) |
| 1457 | return nullptr; |
| 1458 | } |
| 1459 | return C; |
| 1460 | } |
| 1461 | |
| 1462 | |
| 1463 | |
| 1464 | |
| 1465 | |
| 1466 | bool llvm::canConstantFoldCallTo(const CallBase *Call, const Function *F) { |
| 1467 | if (Call->isNoBuiltin()) |
| 1468 | return false; |
| 1469 | switch (F->getIntrinsicID()) { |
| 1470 | |
| 1471 | |
| 1472 | case Intrinsic::bswap: |
| 1473 | case Intrinsic::ctpop: |
| 1474 | case Intrinsic::ctlz: |
| 1475 | case Intrinsic::cttz: |
| 1476 | case Intrinsic::fshl: |
| 1477 | case Intrinsic::fshr: |
| 1478 | case Intrinsic::launder_invariant_group: |
| 1479 | case Intrinsic::strip_invariant_group: |
| 1480 | case Intrinsic::masked_load: |
| 1481 | case Intrinsic::get_active_lane_mask: |
| 1482 | case Intrinsic::abs: |
| 1483 | case Intrinsic::smax: |
| 1484 | case Intrinsic::smin: |
| 1485 | case Intrinsic::umax: |
| 1486 | case Intrinsic::umin: |
| 1487 | case Intrinsic::sadd_with_overflow: |
| 1488 | case Intrinsic::uadd_with_overflow: |
| 1489 | case Intrinsic::ssub_with_overflow: |
| 1490 | case Intrinsic::usub_with_overflow: |
| 1491 | case Intrinsic::smul_with_overflow: |
| 1492 | case Intrinsic::umul_with_overflow: |
| 1493 | case Intrinsic::sadd_sat: |
| 1494 | case Intrinsic::uadd_sat: |
| 1495 | case Intrinsic::ssub_sat: |
| 1496 | case Intrinsic::usub_sat: |
| 1497 | case Intrinsic::smul_fix: |
| 1498 | case Intrinsic::smul_fix_sat: |
| 1499 | case Intrinsic::bitreverse: |
| 1500 | case Intrinsic::is_constant: |
| 1501 | case Intrinsic::vector_reduce_add: |
| 1502 | case Intrinsic::vector_reduce_mul: |
| 1503 | case Intrinsic::vector_reduce_and: |
| 1504 | case Intrinsic::vector_reduce_or: |
| 1505 | case Intrinsic::vector_reduce_xor: |
| 1506 | case Intrinsic::vector_reduce_smin: |
| 1507 | case Intrinsic::vector_reduce_smax: |
| 1508 | case Intrinsic::vector_reduce_umin: |
| 1509 | case Intrinsic::vector_reduce_umax: |
| 1510 | |
| 1511 | case Intrinsic::amdgcn_perm: |
| 1512 | case Intrinsic::arm_mve_vctp8: |
| 1513 | case Intrinsic::arm_mve_vctp16: |
| 1514 | case Intrinsic::arm_mve_vctp32: |
| 1515 | case Intrinsic::arm_mve_vctp64: |
| 1516 | case Intrinsic::aarch64_sve_convert_from_svbool: |
| 1517 | |
| 1518 | case Intrinsic::wasm_trunc_signed: |
| 1519 | case Intrinsic::wasm_trunc_unsigned: |
| 1520 | return true; |
| 1521 | |
| 1522 | |
| 1523 | |
| 1524 | case Intrinsic::minnum: |
| 1525 | case Intrinsic::maxnum: |
| 1526 | case Intrinsic::minimum: |
| 1527 | case Intrinsic::maximum: |
| 1528 | case Intrinsic::log: |
| 1529 | case Intrinsic::log2: |
| 1530 | case Intrinsic::log10: |
| 1531 | case Intrinsic::exp: |
| 1532 | case Intrinsic::exp2: |
| 1533 | case Intrinsic::sqrt: |
| 1534 | case Intrinsic::sin: |
| 1535 | case Intrinsic::cos: |
| 1536 | case Intrinsic::pow: |
| 1537 | case Intrinsic::powi: |
| 1538 | case Intrinsic::fma: |
| 1539 | case Intrinsic::fmuladd: |
| 1540 | case Intrinsic::fptoui_sat: |
| 1541 | case Intrinsic::fptosi_sat: |
| 1542 | case Intrinsic::convert_from_fp16: |
| 1543 | case Intrinsic::convert_to_fp16: |
| 1544 | case Intrinsic::amdgcn_cos: |
| 1545 | case Intrinsic::amdgcn_cubeid: |
| 1546 | case Intrinsic::amdgcn_cubema: |
| 1547 | case Intrinsic::amdgcn_cubesc: |
| 1548 | case Intrinsic::amdgcn_cubetc: |
| 1549 | case Intrinsic::amdgcn_fmul_legacy: |
| 1550 | case Intrinsic::amdgcn_fma_legacy: |
| 1551 | case Intrinsic::amdgcn_fract: |
| 1552 | case Intrinsic::amdgcn_ldexp: |
| 1553 | case Intrinsic::amdgcn_sin: |
| 1554 | |
| 1555 | case Intrinsic::x86_sse_cvtss2si: |
| 1556 | case Intrinsic::x86_sse_cvtss2si64: |
| 1557 | case Intrinsic::x86_sse_cvttss2si: |
| 1558 | case Intrinsic::x86_sse_cvttss2si64: |
| 1559 | case Intrinsic::x86_sse2_cvtsd2si: |
| 1560 | case Intrinsic::x86_sse2_cvtsd2si64: |
| 1561 | case Intrinsic::x86_sse2_cvttsd2si: |
| 1562 | case Intrinsic::x86_sse2_cvttsd2si64: |
| 1563 | case Intrinsic::x86_avx512_vcvtss2si32: |
| 1564 | case Intrinsic::x86_avx512_vcvtss2si64: |
| 1565 | case Intrinsic::x86_avx512_cvttss2si: |
| 1566 | case Intrinsic::x86_avx512_cvttss2si64: |
| 1567 | case Intrinsic::x86_avx512_vcvtsd2si32: |
| 1568 | case Intrinsic::x86_avx512_vcvtsd2si64: |
| 1569 | case Intrinsic::x86_avx512_cvttsd2si: |
| 1570 | case Intrinsic::x86_avx512_cvttsd2si64: |
| 1571 | case Intrinsic::x86_avx512_vcvtss2usi32: |
| 1572 | case Intrinsic::x86_avx512_vcvtss2usi64: |
| 1573 | case Intrinsic::x86_avx512_cvttss2usi: |
| 1574 | case Intrinsic::x86_avx512_cvttss2usi64: |
| 1575 | case Intrinsic::x86_avx512_vcvtsd2usi32: |
| 1576 | case Intrinsic::x86_avx512_vcvtsd2usi64: |
| 1577 | case Intrinsic::x86_avx512_cvttsd2usi: |
| 1578 | case Intrinsic::x86_avx512_cvttsd2usi64: |
| 1579 | return !Call->isStrictFP(); |
| 1580 | |
| 1581 | |
| 1582 | |
| 1583 | case Intrinsic::fabs: |
| 1584 | case Intrinsic::copysign: |
| 1585 | |
| 1586 | |
| 1587 | case Intrinsic::ceil: |
| 1588 | case Intrinsic::floor: |
| 1589 | case Intrinsic::round: |
| 1590 | case Intrinsic::roundeven: |
| 1591 | case Intrinsic::trunc: |
| 1592 | case Intrinsic::nearbyint: |
| 1593 | case Intrinsic::rint: |
| 1594 | |
| 1595 | |
| 1596 | case Intrinsic::experimental_constrained_fma: |
| 1597 | case Intrinsic::experimental_constrained_fmuladd: |
| 1598 | case Intrinsic::experimental_constrained_fadd: |
| 1599 | case Intrinsic::experimental_constrained_fsub: |
| 1600 | case Intrinsic::experimental_constrained_fmul: |
| 1601 | case Intrinsic::experimental_constrained_fdiv: |
| 1602 | case Intrinsic::experimental_constrained_frem: |
| 1603 | case Intrinsic::experimental_constrained_ceil: |
| 1604 | case Intrinsic::experimental_constrained_floor: |
| 1605 | case Intrinsic::experimental_constrained_round: |
| 1606 | case Intrinsic::experimental_constrained_roundeven: |
| 1607 | case Intrinsic::experimental_constrained_trunc: |
| 1608 | case Intrinsic::experimental_constrained_nearbyint: |
| 1609 | case Intrinsic::experimental_constrained_rint: |
| 1610 | return true; |
| 1611 | default: |
| 1612 | return false; |
| 1613 | case Intrinsic::not_intrinsic: break; |
| 1614 | } |
| 1615 | |
| 1616 | if (!F->hasName() || Call->isStrictFP()) |
| 1617 | return false; |
| 1618 | |
| 1619 | |
| 1620 | |
| 1621 | |
| 1622 | StringRef Name = F->getName(); |
| 1623 | switch (Name[0]) { |
| 1624 | default: |
| 1625 | return false; |
| 1626 | case 'a': |
| 1627 | return Name == "acos" || Name == "acosf" || |
| 1628 | Name == "asin" || Name == "asinf" || |
| 1629 | Name == "atan" || Name == "atanf" || |
| 1630 | Name == "atan2" || Name == "atan2f"; |
| 1631 | case 'c': |
| 1632 | return Name == "ceil" || Name == "ceilf" || |
| 1633 | Name == "cos" || Name == "cosf" || |
| 1634 | Name == "cosh" || Name == "coshf"; |
| 1635 | case 'e': |
| 1636 | return Name == "exp" || Name == "expf" || |
| 1637 | Name == "exp2" || Name == "exp2f"; |
| 1638 | case 'f': |
| 1639 | return Name == "fabs" || Name == "fabsf" || |
| 1640 | Name == "floor" || Name == "floorf" || |
| 1641 | Name == "fmod" || Name == "fmodf"; |
| 1642 | case 'l': |
| 1643 | return Name == "log" || Name == "logf" || |
| 1644 | Name == "log2" || Name == "log2f" || |
| 1645 | Name == "log10" || Name == "log10f"; |
| 1646 | case 'n': |
| 1647 | return Name == "nearbyint" || Name == "nearbyintf"; |
| 1648 | case 'p': |
| 1649 | return Name == "pow" || Name == "powf"; |
| 1650 | case 'r': |
| 1651 | return Name == "remainder" || Name == "remainderf" || |
| 1652 | Name == "rint" || Name == "rintf" || |
| 1653 | Name == "round" || Name == "roundf"; |
| 1654 | case 's': |
| 1655 | return Name == "sin" || Name == "sinf" || |
| 1656 | Name == "sinh" || Name == "sinhf" || |
| 1657 | Name == "sqrt" || Name == "sqrtf"; |
| 1658 | case 't': |
| 1659 | return Name == "tan" || Name == "tanf" || |
| 1660 | Name == "tanh" || Name == "tanhf" || |
| 1661 | Name == "trunc" || Name == "truncf"; |
| 1662 | case '_': |
| 1663 | |
| 1664 | |
| 1665 | |
| 1666 | |
| 1667 | |
| 1668 | |
| 1669 | if (Name.size() < 12 || Name[1] != '_') |
| 1670 | return false; |
| 1671 | switch (Name[2]) { |
| 1672 | default: |
| 1673 | return false; |
| 1674 | case 'a': |
| 1675 | return Name == "__acos_finite" || Name == "__acosf_finite" || |
| 1676 | Name == "__asin_finite" || Name == "__asinf_finite" || |
| 1677 | Name == "__atan2_finite" || Name == "__atan2f_finite"; |
| 1678 | case 'c': |
| 1679 | return Name == "__cosh_finite" || Name == "__coshf_finite"; |
| 1680 | case 'e': |
| 1681 | return Name == "__exp_finite" || Name == "__expf_finite" || |
| 1682 | Name == "__exp2_finite" || Name == "__exp2f_finite"; |
| 1683 | case 'l': |
| 1684 | return Name == "__log_finite" || Name == "__logf_finite" || |
| 1685 | Name == "__log10_finite" || Name == "__log10f_finite"; |
| 1686 | case 'p': |
| 1687 | return Name == "__pow_finite" || Name == "__powf_finite"; |
| 1688 | case 's': |
| 1689 | return Name == "__sinh_finite" || Name == "__sinhf_finite"; |
| 1690 | } |
| 1691 | } |
| 1692 | } |
| 1693 | |
| 1694 | namespace { |
| 1695 | |
| 1696 | Constant *GetConstantFoldFPValue(double V, Type *Ty) { |
| 1697 | if (Ty->isHalfTy() || Ty->isFloatTy()) { |
| 1698 | APFloat APF(V); |
| 1699 | bool unused; |
| 1700 | APF.convert(Ty->getFltSemantics(), APFloat::rmNearestTiesToEven, &unused); |
| 1701 | return ConstantFP::get(Ty->getContext(), APF); |
| 1702 | } |
| 1703 | if (Ty->isDoubleTy()) |
| 1704 | return ConstantFP::get(Ty->getContext(), APFloat(V)); |
| 1705 | llvm_unreachable("Can only constant fold half/float/double"); |
| 1706 | } |
| 1707 | |
| 1708 | |
| 1709 | inline void llvm_fenv_clearexcept() { |
| 1710 | #if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT |
| 1711 | feclearexcept(FE_ALL_EXCEPT); |
| 1712 | #endif |
| 1713 | errno = 0; |
| 1714 | } |
| 1715 | |
| 1716 | |
| 1717 | inline bool llvm_fenv_testexcept() { |
| 1718 | int errno_val = errno; |
| 1719 | if (errno_val == ERANGE || errno_val == EDOM) |
| 1720 | return true; |
| 1721 | #if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT && HAVE_DECL_FE_INEXACT |
| 1722 | if (fetestexcept(FE_ALL_EXCEPT & ~FE_INEXACT)) |
| 1723 | return true; |
| 1724 | #endif |
| 1725 | return false; |
| 1726 | } |
| 1727 | |
| 1728 | Constant *ConstantFoldFP(double (*NativeFP)(double), const APFloat &V, |
| 1729 | Type *Ty) { |
| 1730 | llvm_fenv_clearexcept(); |
| 1731 | double Result = NativeFP(V.convertToDouble()); |
| 1732 | if (llvm_fenv_testexcept()) { |
| 1733 | llvm_fenv_clearexcept(); |
| 1734 | return nullptr; |
| 1735 | } |
| 1736 | |
| 1737 | return GetConstantFoldFPValue(Result, Ty); |
| 1738 | } |
| 1739 | |
| 1740 | Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double), |
| 1741 | const APFloat &V, const APFloat &W, Type *Ty) { |
| 1742 | llvm_fenv_clearexcept(); |
| 1743 | double Result = NativeFP(V.convertToDouble(), W.convertToDouble()); |
| 1744 | if (llvm_fenv_testexcept()) { |
| 1745 | llvm_fenv_clearexcept(); |
| 1746 | return nullptr; |
| 1747 | } |
| 1748 | |
| 1749 | return GetConstantFoldFPValue(Result, Ty); |
| 1750 | } |
| 1751 | |
| 1752 | Constant *constantFoldVectorReduce(Intrinsic::ID IID, Constant *Op) { |
| 1753 | FixedVectorType *VT = dyn_cast<FixedVectorType>(Op->getType()); |
| 1754 | if (!VT) |
| 1755 | return nullptr; |
| 1756 | |
| 1757 | |
| 1758 | |
| 1759 | if (isa<ConstantAggregateZero>(Op)) |
| 1760 | return ConstantInt::get(VT->getElementType(), 0); |
| 1761 | |
| 1762 | |
| 1763 | if (isa<PoisonValue>(Op) || Op->containsPoisonElement()) |
| 1764 | return PoisonValue::get(VT->getElementType()); |
| 1765 | |
| 1766 | |
| 1767 | if (!isa<ConstantVector>(Op) && !isa<ConstantDataVector>(Op)) |
| 1768 | return nullptr; |
| 1769 | |
| 1770 | auto *EltC = dyn_cast<ConstantInt>(Op->getAggregateElement(0U)); |
| 1771 | if (!EltC) |
| 1772 | return nullptr; |
| 1773 | |
| 1774 | APInt Acc = EltC->getValue(); |
| 1775 | for (unsigned I = 1, E = VT->getNumElements(); I != E; I++) { |
| 1776 | if (!(EltC = dyn_cast<ConstantInt>(Op->getAggregateElement(I)))) |
| 1777 | return nullptr; |
| 1778 | const APInt &X = EltC->getValue(); |
| 1779 | switch (IID) { |
| 1780 | case Intrinsic::vector_reduce_add: |
| 1781 | Acc = Acc + X; |
| 1782 | break; |
| 1783 | case Intrinsic::vector_reduce_mul: |
| 1784 | Acc = Acc * X; |
| 1785 | break; |
| 1786 | case Intrinsic::vector_reduce_and: |
| 1787 | Acc = Acc & X; |
| 1788 | break; |
| 1789 | case Intrinsic::vector_reduce_or: |
| 1790 | Acc = Acc | X; |
| 1791 | break; |
| 1792 | case Intrinsic::vector_reduce_xor: |
| 1793 | Acc = Acc ^ X; |
| 1794 | break; |
| 1795 | case Intrinsic::vector_reduce_smin: |
| 1796 | Acc = APIntOps::smin(Acc, X); |
| 1797 | break; |
| 1798 | case Intrinsic::vector_reduce_smax: |
| 1799 | Acc = APIntOps::smax(Acc, X); |
| 1800 | break; |
| 1801 | case Intrinsic::vector_reduce_umin: |
| 1802 | Acc = APIntOps::umin(Acc, X); |
| 1803 | break; |
| 1804 | case Intrinsic::vector_reduce_umax: |
| 1805 | Acc = APIntOps::umax(Acc, X); |
| 1806 | break; |
| 1807 | } |
| 1808 | } |
| 1809 | |
| 1810 | return ConstantInt::get(Op->getContext(), Acc); |
| 1811 | } |
| 1812 | |
| 1813 | |
| 1814 | |
| 1815 | |
| 1816 | |
| 1817 | |
| 1818 | |
| 1819 | |
| 1820 | Constant *ConstantFoldSSEConvertToInt(const APFloat &Val, bool roundTowardZero, |
| 1821 | Type *Ty, bool IsSigned) { |
| 1822 | |
| 1823 | unsigned ResultWidth = Ty->getIntegerBitWidth(); |
| 1824 | assert(ResultWidth <= 64 && |
| 1825 | "Can only constant fold conversions to 64 and 32 bit ints"); |
| 1826 | |
| 1827 | uint64_t UIntVal; |
| 1828 | bool isExact = false; |
| 1829 | APFloat::roundingMode mode = roundTowardZero? APFloat::rmTowardZero |
| 1830 | : APFloat::rmNearestTiesToEven; |
| 1831 | APFloat::opStatus status = |
| 1832 | Val.convertToInteger(makeMutableArrayRef(UIntVal), ResultWidth, |
| 1833 | IsSigned, mode, &isExact); |
| 1834 | if (status != APFloat::opOK && |
| 1835 | (!roundTowardZero || status != APFloat::opInexact)) |
| 1836 | return nullptr; |
| 1837 | return ConstantInt::get(Ty, UIntVal, IsSigned); |
| 1838 | } |
| 1839 | |
| 1840 | double getValueAsDouble(ConstantFP *Op) { |
| 1841 | Type *Ty = Op->getType(); |
| 1842 | |
| 1843 | if (Ty->isBFloatTy() || Ty->isHalfTy() || Ty->isFloatTy() || Ty->isDoubleTy()) |
| 1844 | return Op->getValueAPF().convertToDouble(); |
| 1845 | |
| 1846 | bool unused; |
| 1847 | APFloat APF = Op->getValueAPF(); |
| 1848 | APF.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven, &unused); |
| 1849 | return APF.convertToDouble(); |
| 1850 | } |
| 1851 | |
| 1852 | static bool getConstIntOrUndef(Value *Op, const APInt *&C) { |
| 1853 | if (auto *CI = dyn_cast<ConstantInt>(Op)) { |
| 24 | | Assuming 'Op' is not a 'ConstantInt' | |
|
| |
| 32 | | Assuming 'Op' is a 'ConstantInt' | |
|
| |
| 1854 | C = &CI->getValue(); |
| 1855 | return true; |
| 34 | | Returning the value 1, which participates in a condition later | |
|
| 1856 | } |
| 1857 | if (isa<UndefValue>(Op)) { |
| 26 | | Assuming 'Op' is a 'UndefValue' | |
|
| |
| 1858 | C = nullptr; |
| 28 | | Null pointer value stored to 'C0' | |
|
| 1859 | return true; |
| 29 | | Returning the value 1, which participates in a condition later | |
|
| 1860 | } |
| 1861 | return false; |
| 1862 | } |
| 1863 | |
| 1864 | |
| 1865 | |
| 1866 | |
| 1867 | |
| 1868 | |
| 1869 | static bool mayFoldConstrained(ConstrainedFPIntrinsic *CI, |
| 1870 | APFloat::opStatus St) { |
| 1871 | Optional<RoundingMode> ORM = CI->getRoundingMode(); |
| 1872 | Optional<fp::ExceptionBehavior> EB = CI->getExceptionBehavior(); |
| 1873 | |
| 1874 | |
| 1875 | |
| 1876 | if (St == APFloat::opStatus::opOK) { |
| 1877 | |
| 1878 | |
| 1879 | |
| 1880 | |
| 1881 | if (EB && *EB != fp::ExceptionBehavior::ebIgnore) |
| 1882 | CI->addAttribute(AttributeList::FunctionIndex, Attribute::ReadNone); |
| 1883 | return true; |
| 1884 | } |
| 1885 | |
| 1886 | |
| 1887 | |
| 1888 | if (!ORM || *ORM == RoundingMode::Dynamic) |
| 1889 | return false; |
| 1890 | |
| 1891 | |
| 1892 | |
| 1893 | if (!EB || *EB != fp::ExceptionBehavior::ebStrict) |
| 1894 | return true; |
| 1895 | |
| 1896 | |
| 1897 | |
| 1898 | return false; |
| 1899 | } |
| 1900 | |
| 1901 | |
| 1902 | static RoundingMode |
| 1903 | getEvaluationRoundingMode(const ConstrainedFPIntrinsic *CI) { |
| 1904 | Optional<RoundingMode> ORM = CI->getRoundingMode(); |
| 1905 | if (!ORM || *ORM == RoundingMode::Dynamic) |
| 1906 | |
| 1907 | |
| 1908 | |
| 1909 | |
| 1910 | return RoundingMode::NearestTiesToEven; |
| 1911 | return *ORM; |
| 1912 | } |
| 1913 | |
| 1914 | static Constant *ConstantFoldScalarCall1(StringRef Name, |
| 1915 | Intrinsic::ID IntrinsicID, |
| 1916 | Type *Ty, |
| 1917 | ArrayRef<Constant *> Operands, |
| 1918 | const TargetLibraryInfo *TLI, |
| 1919 | const CallBase *Call) { |
| 1920 | assert(Operands.size() == 1 && "Wrong number of operands."); |
| 1921 | |
| 1922 | if (IntrinsicID == Intrinsic::is_constant) { |
| 1923 | |
| 1924 | |
| 1925 | |
| 1926 | if (Operands[0]->isManifestConstant()) |
| 1927 | return ConstantInt::getTrue(Ty->getContext()); |
| 1928 | return nullptr; |
| 1929 | } |
| 1930 | if (isa<UndefValue>(Operands[0])) { |
| 1931 | |
| 1932 | |
| 1933 | |
| 1934 | if (IntrinsicID == Intrinsic::cos || |
| 1935 | IntrinsicID == Intrinsic::ctpop || |
| 1936 | IntrinsicID == Intrinsic::fptoui_sat || |
| 1937 | IntrinsicID == Intrinsic::fptosi_sat) |
| 1938 | return Constant::getNullValue(Ty); |
| 1939 | if (IntrinsicID == Intrinsic::bswap || |
| 1940 | IntrinsicID == Intrinsic::bitreverse || |
| 1941 | IntrinsicID == Intrinsic::launder_invariant_group || |
| 1942 | IntrinsicID == Intrinsic::strip_invariant_group) |
| 1943 | return Operands[0]; |
| 1944 | } |
| 1945 | |
| 1946 | if (isa<ConstantPointerNull>(Operands[0])) { |
| 1947 | |
| 1948 | if (IntrinsicID == Intrinsic::launder_invariant_group || |
| 1949 | IntrinsicID == Intrinsic::strip_invariant_group) { |
| 1950 | |
| 1951 | |
| 1952 | |
| 1953 | const Function *Caller = |
| 1954 | Call->getParent() ? Call->getCaller() : nullptr; |
| 1955 | if (Caller && |
| 1956 | !NullPointerIsDefined( |
| 1957 | Caller, Operands[0]->getType()->getPointerAddressSpace())) { |
| 1958 | return Operands[0]; |
| 1959 | } |
| 1960 | return nullptr; |
| 1961 | } |
| 1962 | } |
| 1963 | |
| 1964 | if (auto *Op = dyn_cast<ConstantFP>(Operands[0])) { |
| 1965 | if (IntrinsicID == Intrinsic::convert_to_fp16) { |
| 1966 | APFloat Val(Op->getValueAPF()); |
| 1967 | |
| 1968 | bool lost = false; |
| 1969 | Val.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &lost); |
| 1970 | |
| 1971 | return ConstantInt::get(Ty->getContext(), Val.bitcastToAPInt()); |
| 1972 | } |
| 1973 | |
| 1974 | APFloat U = Op->getValueAPF(); |
| 1975 | |
| 1976 | if (IntrinsicID == Intrinsic::wasm_trunc_signed || |
| 1977 | IntrinsicID == Intrinsic::wasm_trunc_unsigned) { |
| 1978 | bool Signed = IntrinsicID == Intrinsic::wasm_trunc_signed; |
| 1979 | |
| 1980 | if (U.isNaN()) |
| 1981 | return nullptr; |
| 1982 | |
| 1983 | unsigned Width = Ty->getIntegerBitWidth(); |
| 1984 | APSInt Int(Width, !Signed); |
| 1985 | bool IsExact = false; |
| 1986 | APFloat::opStatus Status = |
| 1987 | U.convertToInteger(Int, APFloat::rmTowardZero, &IsExact); |
| 1988 | |
| 1989 | if (Status == APFloat::opOK || Status == APFloat::opInexact) |
| 1990 | return ConstantInt::get(Ty, Int); |
| 1991 | |
| 1992 | return nullptr; |
| 1993 | } |
| 1994 | |
| 1995 | if (IntrinsicID == Intrinsic::fptoui_sat || |
| 1996 | IntrinsicID == Intrinsic::fptosi_sat) { |
| 1997 | |
| 1998 | APSInt Int(Ty->getIntegerBitWidth(), |
| 1999 | IntrinsicID == Intrinsic::fptoui_sat); |
| 2000 | bool IsExact; |
| 2001 | U.convertToInteger(Int, APFloat::rmTowardZero, &IsExact); |
| 2002 | return ConstantInt::get(Ty, Int); |
| 2003 | } |
| 2004 | |
| 2005 | if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy()) |
| 2006 | return nullptr; |
| 2007 | |
| 2008 | |
| 2009 | |
| 2010 | if (IntrinsicID == Intrinsic::nearbyint || IntrinsicID == Intrinsic::rint) { |
| 2011 | U.roundToIntegral(APFloat::rmNearestTiesToEven); |
| 2012 | return ConstantFP::get(Ty->getContext(), U); |
| 2013 | } |
| 2014 | |
| 2015 | if (IntrinsicID == Intrinsic::round) { |
| 2016 | U.roundToIntegral(APFloat::rmNearestTiesToAway); |
| 2017 | return ConstantFP::get(Ty->getContext(), U); |
| 2018 | } |
| 2019 | |
| 2020 | if (IntrinsicID == Intrinsic::roundeven) { |
| 2021 | U.roundToIntegral(APFloat::rmNearestTiesToEven); |
| 2022 | return ConstantFP::get(Ty->getContext(), U); |
| 2023 | } |
| 2024 | |
| 2025 | if (IntrinsicID == Intrinsic::ceil) { |
| 2026 | U.roundToIntegral(APFloat::rmTowardPositive); |
| 2027 | return ConstantFP::get(Ty->getContext(), U); |
| 2028 | } |
| 2029 | |
| 2030 | if (IntrinsicID == Intrinsic::floor) { |
| 2031 | U.roundToIntegral(APFloat::rmTowardNegative); |
| 2032 | return ConstantFP::get(Ty->getContext(), U); |
| 2033 | } |
| 2034 | |
| 2035 | if (IntrinsicID == Intrinsic::trunc) { |
| 2036 | U.roundToIntegral(APFloat::rmTowardZero); |
| 2037 | return ConstantFP::get(Ty->getContext(), U); |
| 2038 | } |
| 2039 | |
| 2040 | if (IntrinsicID == Intrinsic::fabs) { |
| 2041 | U.clearSign(); |
| 2042 | return ConstantFP::get(Ty->getContext(), U); |
| 2043 | } |
| 2044 | |
| 2045 | if (IntrinsicID == Intrinsic::amdgcn_fract) { |
| 2046 | |
| 2047 | |
| 2048 | |
| 2049 | |
| 2050 | APFloat FloorU(U); |
| 2051 | FloorU.roundToIntegral(APFloat::rmTowardNegative); |
| 2052 | APFloat FractU(U - FloorU); |
| 2053 | APFloat AlmostOne(U.getSemantics(), 1); |
| 2054 | AlmostOne.next( true); |
| 2055 | return ConstantFP::get(Ty->getContext(), minimum(FractU, AlmostOne)); |
| 2056 | } |
| 2057 | |
| 2058 | |
| 2059 | |
| 2060 | |
| 2061 | Optional<APFloat::roundingMode> RM; |
| 2062 | switch (IntrinsicID) { |
| 2063 | default: |
| 2064 | break; |
| 2065 | case Intrinsic::experimental_constrained_nearbyint: |
| 2066 | case Intrinsic::experimental_constrained_rint: { |
| 2067 | auto CI = cast<ConstrainedFPIntrinsic>(Call); |
| 2068 | RM = CI->getRoundingMode(); |
| 2069 | if (!RM || RM.getValue() == RoundingMode::Dynamic) |
| 2070 | return nullptr; |
| 2071 | break; |
| 2072 | } |
| 2073 | case Intrinsic::experimental_constrained_round: |
| 2074 | RM = APFloat::rmNearestTiesToAway; |
| 2075 | break; |
| 2076 | case Intrinsic::experimental_constrained_ceil: |
| 2077 | RM = APFloat::rmTowardPositive; |
| 2078 | break; |
| 2079 | case Intrinsic::experimental_constrained_floor: |
| 2080 | RM = APFloat::rmTowardNegative; |
| 2081 | break; |
| 2082 | case Intrinsic::experimental_constrained_trunc: |
| 2083 | RM = APFloat::rmTowardZero; |
| 2084 | break; |
| 2085 | } |
| 2086 | if (RM) { |
| 2087 | auto CI = cast<ConstrainedFPIntrinsic>(Call); |
| 2088 | if (U.isFinite()) { |
| 2089 | APFloat::opStatus St = U.roundToIntegral(*RM); |
| 2090 | if (IntrinsicID == Intrinsic::experimental_constrained_rint && |
| 2091 | St == APFloat::opInexact) { |
| 2092 | Optional<fp::ExceptionBehavior> EB = CI->getExceptionBehavior(); |
| 2093 | if (EB && *EB == fp::ebStrict) |
| 2094 | return nullptr; |
| 2095 | } |
| 2096 | } else if (U.isSignaling()) { |
| 2097 | Optional<fp::ExceptionBehavior> EB = CI->getExceptionBehavior(); |
| 2098 | if (EB && *EB != fp::ebIgnore) |
| 2099 | return nullptr; |
| 2100 | U = APFloat::getQNaN(U.getSemantics()); |
| 2101 | } |
| 2102 | return ConstantFP::get(Ty->getContext(), U); |
| 2103 | } |
| 2104 | |
| 2105 | |
| 2106 | |
| 2107 | |
| 2108 | if (!U.isFinite()) |
| 2109 | return nullptr; |
| 2110 | |
| 2111 | |
| 2112 | |
| 2113 | |
| 2114 | |
| 2115 | APFloat APF = Op->getValueAPF(); |
| 2116 | |
| 2117 | switch (IntrinsicID) { |
| 2118 | default: break; |
| 2119 | case Intrinsic::log: |
| 2120 | return ConstantFoldFP(log, APF, Ty); |
| 2121 | case Intrinsic::log2: |
| 2122 | |
| 2123 | return ConstantFoldFP(Log2, APF, Ty); |
| 2124 | case Intrinsic::log10: |
| 2125 | |
| 2126 | return ConstantFoldFP(log10, APF, Ty); |
| 2127 | case Intrinsic::exp: |
| 2128 | return ConstantFoldFP(exp, APF, Ty); |
| 2129 | case Intrinsic::exp2: |
| 2130 | |
| 2131 | return ConstantFoldBinaryFP(pow, APFloat(2.0), APF, Ty); |
| 2132 | case Intrinsic::sin: |
| 2133 | return ConstantFoldFP(sin, APF, Ty); |
| 2134 | case Intrinsic::cos: |
| 2135 | return ConstantFoldFP(cos, APF, Ty); |
| 2136 | case Intrinsic::sqrt: |
| 2137 | return ConstantFoldFP(sqrt, APF, Ty); |
| 2138 | case Intrinsic::amdgcn_cos: |
| 2139 | case Intrinsic::amdgcn_sin: { |
| 2140 | double V = getValueAsDouble(Op); |
| 2141 | if (V < -256.0 || V > 256.0) |
| 2142 | |
| 2143 | |
| 2144 | |
| 2145 | return nullptr; |
| 2146 | bool IsCos = IntrinsicID == Intrinsic::amdgcn_cos; |
| 2147 | double V4 = V * 4.0; |
| 2148 | if (V4 == floor(V4)) { |
| 2149 | |
| 2150 | const double SinVals[4] = { 0.0, 1.0, 0.0, -1.0 }; |
| 2151 | V = SinVals[((int)V4 + (IsCos ? 1 : 0)) & 3]; |
| 2152 | } else { |
| 2153 | if (IsCos) |
| 2154 | V = cos(V * 2.0 * numbers::pi); |
| 2155 | else |
| 2156 | V = sin(V * 2.0 * numbers::pi); |
| 2157 | } |
| 2158 | return GetConstantFoldFPValue(V, Ty); |
| 2159 | } |
| 2160 | } |
| 2161 | |
| 2162 | if (!TLI) |
| 2163 | return nullptr; |
| 2164 | |
| 2165 | LibFunc Func = NotLibFunc; |
| 2166 | TLI->getLibFunc(Name, Func); |
| 2167 | switch (Func) { |
| 2168 | default: |
| 2169 | break; |
| 2170 | case LibFunc_acos: |
| 2171 | case LibFunc_acosf: |
| 2172 | case LibFunc_acos_finite: |
| 2173 | case LibFunc_acosf_finite: |
| 2174 | if (TLI->has(Func)) |
| 2175 | return ConstantFoldFP(acos, APF, Ty); |
| 2176 | break; |
| 2177 | case LibFunc_asin: |
| 2178 | case LibFunc_asinf: |
| 2179 | case LibFunc_asin_finite: |
| 2180 | case LibFunc_asinf_finite: |
| 2181 | if (TLI->has(Func)) |
| 2182 | return ConstantFoldFP(asin, APF, Ty); |
| 2183 | break; |
| 2184 | case LibFunc_atan: |
| 2185 | case LibFunc_atanf: |
| 2186 | if (TLI->has(Func)) |
| 2187 | return ConstantFoldFP(atan, APF, Ty); |
| 2188 | break; |
| 2189 | case LibFunc_ceil: |
| 2190 | case LibFunc_ceilf: |
| 2191 | if (TLI->has(Func)) { |
| 2192 | U.roundToIntegral(APFloat::rmTowardPositive); |
| 2193 | return ConstantFP::get(Ty->getContext(), U); |
| 2194 | } |
| 2195 | break; |
| 2196 | case LibFunc_cos: |
| 2197 | case LibFunc_cosf: |
| 2198 | if (TLI->has(Func)) |
| 2199 | return ConstantFoldFP(cos, APF, Ty); |
| 2200 | break; |
| 2201 | case LibFunc_cosh: |
| 2202 | case LibFunc_coshf: |
| 2203 | case LibFunc_cosh_finite: |
| 2204 | case LibFunc_coshf_finite: |
| 2205 | if (TLI->has(Func)) |
| 2206 | return ConstantFoldFP(cosh, APF, Ty); |
| 2207 | break; |
| 2208 | case LibFunc_exp: |
| 2209 | case LibFunc_expf: |
| 2210 | case LibFunc_exp_finite: |
| 2211 | case LibFunc_expf_finite: |
| 2212 | if (TLI->has(Func)) |
| 2213 | return ConstantFoldFP(exp, APF, Ty); |
| 2214 | break; |
| 2215 | case LibFunc_exp2: |
| 2216 | case LibFunc_exp2f: |
| 2217 | case LibFunc_exp2_finite: |
| 2218 | case LibFunc_exp2f_finite: |
| 2219 | if (TLI->has(Func)) |
| 2220 | |
| 2221 | return ConstantFoldBinaryFP(pow, APFloat(2.0), APF, Ty); |
| 2222 | break; |
| 2223 | case LibFunc_fabs: |
| 2224 | case LibFunc_fabsf: |
| 2225 | if (TLI->has(Func)) { |
| 2226 | U.clearSign(); |
| 2227 | return ConstantFP::get(Ty->getContext(), U); |
| 2228 | } |
| 2229 | break; |
| 2230 | case LibFunc_floor: |
| 2231 | case LibFunc_floorf: |
| 2232 | if (TLI->has(Func)) { |
| 2233 | U.roundToIntegral(APFloat::rmTowardNegative); |
| 2234 | return ConstantFP::get(Ty->getContext(), U); |
| 2235 | } |
| 2236 | break; |
| 2237 | case LibFunc_log: |
| 2238 | case LibFunc_logf: |
| 2239 | case LibFunc_log_finite: |
| 2240 | case LibFunc_logf_finite: |
| 2241 | if (!APF.isNegative() && !APF.isZero() && TLI->has(Func)) |
| 2242 | return ConstantFoldFP(log, APF, Ty); |
| 2243 | break; |
| 2244 | case LibFunc_log2: |
| 2245 | case LibFunc_log2f: |
| 2246 | case LibFunc_log2_finite: |
| 2247 | case LibFunc_log2f_finite: |
| 2248 | if (!APF.isNegative() && !APF.isZero() && TLI->has(Func)) |
| 2249 | |
| 2250 | return ConstantFoldFP(Log2, APF, Ty); |
| 2251 | break; |
| 2252 | case LibFunc_log10: |
| 2253 | case LibFunc_log10f: |
| 2254 | case LibFunc_log10_finite: |
| 2255 | case LibFunc_log10f_finite: |
| 2256 | if (!APF.isNegative() && !APF.isZero() && TLI->has(Func)) |
| 2257 | |
| 2258 | return ConstantFoldFP(log10, APF, Ty); |
| 2259 | break; |
| 2260 | case LibFunc_nearbyint: |
| 2261 | case LibFunc_nearbyintf: |
| 2262 | case LibFunc_rint: |
| 2263 | case LibFunc_rintf: |
| 2264 | if (TLI->has(Func)) { |
| 2265 | U.roundToIntegral(APFloat::rmNearestTiesToEven); |
| 2266 | return ConstantFP::get(Ty->getContext(), U); |
| 2267 | } |
| 2268 | break; |
| 2269 | case LibFunc_round: |
| 2270 | case LibFunc_roundf: |
| 2271 | if (TLI->has(Func)) { |
| 2272 | U.roundToIntegral(APFloat::rmNearestTiesToAway); |
| 2273 | return ConstantFP::get(Ty->getContext(), U); |
| 2274 | } |
| 2275 | break; |
| 2276 | case LibFunc_sin: |
| 2277 | case LibFunc_sinf: |
| 2278 | if (TLI->has(Func)) |
| 2279 | return ConstantFoldFP(sin, APF, Ty); |
| 2280 | break; |
| 2281 | case LibFunc_sinh: |
| 2282 | case LibFunc_sinhf: |
| 2283 | case LibFunc_sinh_finite: |
| 2284 | case LibFunc_sinhf_finite: |
| 2285 | if (TLI->has(Func)) |
| 2286 | return ConstantFoldFP(sinh, APF, Ty); |
| 2287 | break; |
| 2288 | case LibFunc_sqrt: |
| 2289 | case LibFunc_sqrtf: |
| 2290 | if (!APF.isNegative() && TLI->has(Func)) |
| 2291 | return ConstantFoldFP(sqrt, APF, Ty); |
| 2292 | break; |
| 2293 | case LibFunc_tan: |
| 2294 | case LibFunc_tanf: |
| 2295 | if (TLI->has(Func)) |
| 2296 | return ConstantFoldFP(tan, APF, Ty); |
| 2297 | break; |
| 2298 | case LibFunc_tanh: |
| 2299 | case LibFunc_tanhf: |
| 2300 | if (TLI->has(Func)) |
| 2301 | return ConstantFoldFP(tanh, APF, Ty); |
| 2302 | break; |
| 2303 | case LibFunc_trunc: |
| 2304 | case LibFunc_truncf: |
| 2305 | if (TLI->has(Func)) { |
| 2306 | U.roundToIntegral(APFloat::rmTowardZero); |
| 2307 | return ConstantFP::get(Ty->getContext(), U); |
| 2308 | } |
| 2309 | break; |
| 2310 | } |
| 2311 | return nullptr; |
| 2312 | } |
| 2313 | |
| 2314 | if (auto *Op = dyn_cast<ConstantInt>(Operands[0])) { |
| 2315 | switch (IntrinsicID) { |
| 2316 | case Intrinsic::bswap: |
| 2317 | return ConstantInt::get(Ty->getContext(), Op->getValue().byteSwap()); |
| 2318 | case Intrinsic::ctpop: |
| 2319 | return ConstantInt::get(Ty, Op->getValue().countPopulation()); |
| 2320 | case Intrinsic::bitreverse: |
| 2321 | return ConstantInt::get(Ty->getContext(), Op->getValue().reverseBits()); |
| 2322 | case Intrinsic::convert_from_fp16: { |
| 2323 | APFloat Val(APFloat::IEEEhalf(), Op->getValue()); |
| 2324 | |
| 2325 | bool lost = false; |
| 2326 | APFloat::opStatus status = Val.convert( |
| 2327 | Ty->getFltSemantics(), APFloat::rmNearestTiesToEven, &lost); |
| 2328 | |
| 2329 | |
| 2330 | (void)status; |
| 2331 | assert(status == APFloat::opOK && !lost && |
| 2332 | "Precision lost during fp16 constfolding"); |
| 2333 | |
| 2334 | return ConstantFP::get(Ty->getContext(), Val); |
| 2335 | } |
| 2336 | default: |
| 2337 | return nullptr; |
| 2338 | } |
| 2339 | } |
| 2340 | |
| 2341 | switch (IntrinsicID) { |
| 2342 | default: break; |
| 2343 | case Intrinsic::vector_reduce_add: |
| 2344 | case Intrinsic::vector_reduce_mul: |
| 2345 | case Intrinsic::vector_reduce_and: |
| 2346 | case Intrinsic::vector_reduce_or: |
| 2347 | case Intrinsic::vector_reduce_xor: |
| 2348 | case Intrinsic::vector_reduce_smin: |
| 2349 | case Intrinsic::vector_reduce_smax: |
| 2350 | case Intrinsic::vector_reduce_umin: |
| 2351 | case Intrinsic::vector_reduce_umax: |
| 2352 | if (Constant *C = constantFoldVectorReduce(IntrinsicID, Operands[0])) |
| 2353 | return C; |
| 2354 | break; |
| 2355 | } |
| 2356 | |
| 2357 | |
| 2358 | if (isa<ConstantVector>(Operands[0]) || |
| 2359 | isa<ConstantDataVector>(Operands[0])) { |
| 2360 | auto *Op = cast<Constant>(Operands[0]); |
| 2361 | switch (IntrinsicID) { |
| 2362 | default: break; |
| 2363 | case Intrinsic::x86_sse_cvtss2si: |
| 2364 | case Intrinsic::x86_sse_cvtss2si64: |
| 2365 | case Intrinsic::x86_sse2_cvtsd2si: |
| 2366 | case Intrinsic::x86_sse2_cvtsd2si64: |
| 2367 | if (ConstantFP *FPOp = |
| 2368 | dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U))) |
| 2369 | return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(), |
| 2370 | false, Ty, |
| 2371 | true); |
| 2372 | break; |
| 2373 | case Intrinsic::x86_sse_cvttss2si: |
| 2374 | case Intrinsic::x86_sse_cvttss2si64: |
| 2375 | case Intrinsic::x86_sse2_cvttsd2si: |
| 2376 | case Intrinsic::x86_sse2_cvttsd2si64: |
| 2377 | if (ConstantFP *FPOp = |
| 2378 | dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U))) |
| 2379 | return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(), |
| 2380 | true, Ty, |
| 2381 | true); |
| 2382 | break; |
| 2383 | } |
| 2384 | } |
| 2385 | |
| 2386 | return nullptr; |
| 2387 | } |
| 2388 | |
| 2389 | static Constant *ConstantFoldScalarCall2(StringRef Name, |
| 2390 | Intrinsic::ID IntrinsicID, |
| 2391 | Type *Ty, |
| 2392 | ArrayRef<Constant *> Operands, |
| 2393 | const TargetLibraryInfo *TLI, |
| 2394 | const CallBase *Call) { |
| 2395 | assert(Operands.size() == 2 && "Wrong number of operands."); |
| 2396 | |
| 2397 | if (Ty->isFloatingPointTy()) { |
| 1 | Calling 'Type::isFloatingPointTy' | |
|
| 10 | | Returning from 'Type::isFloatingPointTy' | |
|
| |
| 2398 | |
| 2399 | |
| 2400 | bool IsOp0Undef = isa<UndefValue>(Operands[0]); |
| 2401 | bool IsOp1Undef = isa<UndefValue>(Operands[1]); |
| 2402 | switch (IntrinsicID) { |
| 2403 | case Intrinsic::maxnum: |
| 2404 | case Intrinsic::minnum: |
| 2405 | case Intrinsic::maximum: |
| 2406 | case Intrinsic::minimum: |
| 2407 | |
| 2408 | if (IsOp0Undef) |
| 2409 | return Operands[1]; |
| 2410 | if (IsOp1Undef) |
| 2411 | return Operands[0]; |
| 2412 | break; |
| 2413 | } |
| 2414 | } |
| 2415 | |
| 2416 | if (const auto *Op1 = dyn_cast<ConstantFP>(Operands[0])) { |
| 12 | | Assuming the object is not a 'ConstantFP' | |
|
| |
| 2417 | if (!Ty->isFloatingPointTy()) |
| 2418 | return nullptr; |
| 2419 | APFloat Op1V = Op1->getValueAPF(); |
| 2420 | |
| 2421 | if (const auto *Op2 = dyn_cast<ConstantFP>(Operands[1])) { |
| 2422 | if (Op2->getType() != Op1->getType()) |
| 2423 | return nullptr; |
| 2424 | APFloat Op2V = Op2->getValueAPF(); |
| 2425 | |
| 2426 | if (const auto *ConstrIntr = dyn_cast<ConstrainedFPIntrinsic>(Call)) { |
| 2427 | RoundingMode RM = getEvaluationRoundingMode(ConstrIntr); |
| 2428 | APFloat Res = Op1V; |
| 2429 | APFloat::opStatus St; |
| 2430 | switch (IntrinsicID) { |
| 2431 | default: |
| 2432 | return nullptr; |
| 2433 | case Intrinsic::experimental_constrained_fadd: |
| 2434 | St = Res.add(Op2V, RM); |
| 2435 | break; |
| 2436 | case Intrinsic::experimental_constrained_fsub: |
| 2437 | St = Res.subtract(Op2V, RM); |
| 2438 | break; |
| 2439 | case Intrinsic::experimental_constrained_fmul: |
| 2440 | St = Res.multiply(Op2V, RM); |
| 2441 | break; |
| 2442 | case Intrinsic::experimental_constrained_fdiv: |
| 2443 | St = Res.divide(Op2V, RM); |
| 2444 | break; |
| 2445 | case Intrinsic::experimental_constrained_frem: |
| 2446 | St = Res.mod(Op2V); |
| 2447 | break; |
| 2448 | } |
| 2449 | if (mayFoldConstrained(const_cast<ConstrainedFPIntrinsic *>(ConstrIntr), |
| 2450 | St)) |
| 2451 | return ConstantFP::get(Ty->getContext(), Res); |
| 2452 | return nullptr; |
| 2453 | } |
| 2454 | |
| 2455 | switch (IntrinsicID) { |
| 2456 | default: |
| 2457 | break; |
| 2458 | case Intrinsic::copysign: |
| 2459 | return ConstantFP::get(Ty->getContext(), APFloat::copySign(Op1V, Op2V)); |
| 2460 | case Intrinsic::minnum: |
| 2461 | return ConstantFP::get(Ty->getContext(), minnum(Op1V, Op2V)); |
| 2462 | case Intrinsic::maxnum: |
| 2463 | return ConstantFP::get(Ty->getContext(), maxnum(Op1V, Op2V)); |
| 2464 | case Intrinsic::minimum: |
| 2465 | return ConstantFP::get(Ty->getContext(), minimum(Op1V, Op2V)); |
| 2466 | case Intrinsic::maximum: |
| 2467 | return ConstantFP::get(Ty->getContext(), maximum(Op1V, Op2V)); |
| 2468 | } |
| 2469 | |
| 2470 | if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy()) |
| 2471 | return nullptr; |
| 2472 | |
| 2473 | switch (IntrinsicID) { |
| 2474 | default: |
| 2475 | break; |
| 2476 | case Intrinsic::pow: |
| 2477 | return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty); |
| 2478 | case Intrinsic::amdgcn_fmul_legacy: |
| 2479 | |
| 2480 | |
| 2481 | if (Op1V.isZero() || Op2V.isZero()) |
| 2482 | return ConstantFP::getNullValue(Ty); |
| 2483 | return ConstantFP::get(Ty->getContext(), Op1V * Op2V); |
| 2484 | } |
| 2485 | |
| 2486 | if (!TLI) |
| 2487 | return nullptr; |
| 2488 | |
| 2489 | LibFunc Func = NotLibFunc; |
| 2490 | TLI->getLibFunc(Name, Func); |
| 2491 | switch (Func) { |
| 2492 | default: |
| 2493 | break; |
| 2494 | case LibFunc_pow: |
| 2495 | case LibFunc_powf: |
| 2496 | case LibFunc_pow_finite: |
| 2497 | case LibFunc_powf_finite: |
| 2498 | if (TLI->has(Func)) |
| 2499 | return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty); |
| 2500 | break; |
| 2501 | case LibFunc_fmod: |
| 2502 | case LibFunc_fmodf: |
| 2503 | if (TLI->has(Func)) { |
| 2504 | APFloat V = Op1->getValueAPF(); |
| 2505 | if (APFloat::opStatus::opOK == V.mod(Op2->getValueAPF())) |
| 2506 | return ConstantFP::get(Ty->getContext(), V); |
| 2507 | } |
| 2508 | break; |
| 2509 | case LibFunc_remainder: |
| 2510 | case LibFunc_remainderf: |
| 2511 | if (TLI->has(Func)) { |
| 2512 | APFloat V = Op1->getValueAPF(); |
| 2513 | if (APFloat::opStatus::opOK == V.remainder(Op2->getValueAPF())) |
| 2514 | return ConstantFP::get(Ty->getContext(), V); |
| 2515 | } |
| 2516 | break; |
| 2517 | case LibFunc_atan2: |
| 2518 | case LibFunc_atan2f: |
| 2519 | case LibFunc_atan2_finite: |
| 2520 | case LibFunc_atan2f_finite: |
| 2521 | if (TLI->has(Func)) |
| 2522 | return ConstantFoldBinaryFP(atan2, Op1V, Op2V, Ty); |
| 2523 | break; |
| 2524 | } |
| 2525 | } else if (auto *Op2C = dyn_cast<ConstantInt>(Operands[1])) { |
| 2526 | if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy()) |
| 2527 | return nullptr; |
| 2528 | if (IntrinsicID == Intrinsic::powi && Ty->isHalfTy()) |
| 2529 | return ConstantFP::get( |
| 2530 | Ty->getContext(), |
| 2531 | APFloat((float)std::pow((float)Op1V.convertToDouble(), |
| 2532 | (int)Op2C->getZExtValue()))); |
| 2533 | if (IntrinsicID == Intrinsic::powi && Ty->isFloatTy()) |
| 2534 | return ConstantFP::get( |
| 2535 | Ty->getContext(), |
| 2536 | APFloat((float)std::pow((float)Op1V.convertToDouble(), |
| 2537 | (int)Op2C->getZExtValue()))); |
| 2538 | if (IntrinsicID == Intrinsic::powi && Ty->isDoubleTy()) |
| 2539 | return ConstantFP::get( |
| 2540 | Ty->getContext(), |
| 2541 | APFloat((double)std::pow(Op1V.convertToDouble(), |
| 2542 | (int)Op2C->getZExtValue()))); |
| 2543 | |
| 2544 | if (IntrinsicID == Intrinsic::amdgcn_ldexp) { |
| 2545 | |
| 2546 | |
| 2547 | |
| 2548 | |
| 2549 | APFloat Result = scalbn(Op1->getValueAPF(), Op2C->getSExtValue(), |
| 2550 | APFloat::rmNearestTiesToEven); |
| 2551 | return ConstantFP::get(Ty->getContext(), Result); |
| 2552 | } |
| 2553 | } |
| 2554 | return nullptr; |
| 2555 | } |
| 2556 | |
| 2557 | if (Operands[0]->getType()->isIntegerTy() && |
| 14 | | Calling 'Type::isIntegerTy' | |
|
| 17 | | Returning from 'Type::isIntegerTy' | |
|
| |
| 2558 | Operands[1]->getType()->isIntegerTy()) { |
| 18 | | Calling 'Type::isIntegerTy' | |
|
| 21 | | Returning from 'Type::isIntegerTy' | |
|
| 2559 | const APInt *C0, *C1; |
| 2560 | if (!getConstIntOrUndef(Operands[0], C0) || |
| 23 | | Calling 'getConstIntOrUndef' | |
|
| 30 | | Returning from 'getConstIntOrUndef' | |
|
| |
| 2561 | !getConstIntOrUndef(Operands[1], C1)) |
| 31 | | Calling 'getConstIntOrUndef' | |
|
| 35 | | Returning from 'getConstIntOrUndef' | |
|
| 2562 | return nullptr; |
| 2563 | |
| 2564 | unsigned BitWidth = Ty->getScalarSizeInBits(); |
| 2565 | switch (IntrinsicID) { |
| 37 | | Control jumps to 'case abs:' at line 2683 | |
|
| 2566 | default: break; |
| 2567 | case Intrinsic::smax: |
| 2568 | if (!C0 && !C1) |
| 2569 | return UndefValue::get(Ty); |
| 2570 | if (!C0 || !C1) |
| 2571 | return ConstantInt::get(Ty, APInt::getSignedMaxValue(BitWidth)); |
| 2572 | return ConstantInt::get(Ty, C0->sgt(*C1) ? *C0 : *C1); |
| 2573 | |
| 2574 | case Intrinsic::smin: |
| 2575 | if (!C0 && !C1) |
| 2576 | return UndefValue::get(Ty); |
| 2577 | if (!C0 || !C1) |
| 2578 | return ConstantInt::get(Ty, APInt::getSignedMinValue(BitWidth)); |
| 2579 | return ConstantInt::get(Ty, C0->slt(*C1) ? *C0 : *C1); |
| 2580 | |
| 2581 | case Intrinsic::umax: |
| 2582 | if (!C0 && !C1) |
| 2583 | return UndefValue::get(Ty); |
| 2584 | if (!C0 || !C1) |
| 2585 | return ConstantInt::get(Ty, APInt::getMaxValue(BitWidth)); |
| 2586 | return ConstantInt::get(Ty, C0->ugt(*C1) ? *C0 : *C1); |
| 2587 | |
| 2588 | case Intrinsic::umin: |
| 2589 | if (!C0 && !C1) |
| 2590 | return UndefValue::get(Ty); |
| 2591 | if (!C0 || !C1) |
| 2592 | return ConstantInt::get(Ty, APInt::getMinValue(BitWidth)); |
| 2593 | return ConstantInt::get(Ty, C0->ult(*C1) ? *C0 : *C1); |
| 2594 | |
| 2595 | case Intrinsic::usub_with_overflow: |
| 2596 | case Intrinsic::ssub_with_overflow: |
| 2597 | |
| 2598 | |
| 2599 | if (!C0 || !C1) |
| 2600 | return Constant::getNullValue(Ty); |
| 2601 | LLVM_FALLTHROUGH; |
| 2602 | case Intrinsic::uadd_with_overflow: |
| 2603 | case Intrinsic::sadd_with_overflow: |
| 2604 | |
| 2605 | |
| 2606 | if (!C0 || !C1) { |
| 2607 | return ConstantStruct::get( |
| 2608 | cast<StructType>(Ty), |
| 2609 | {Constant::getAllOnesValue(Ty->getStructElementType(0)), |
| 2610 | Constant::getNullValue(Ty->getStructElementType(1))}); |
| 2611 | } |
| 2612 | LLVM_FALLTHROUGH; |
| 2613 | case Intrinsic::smul_with_overflow: |
| 2614 | case Intrinsic::umul_with_overflow: { |
| 2615 | |
| 2616 | |
| 2617 | if (!C0 || !C1) |
| 2618 | return Constant::getNullValue(Ty); |
| 2619 | |
| 2620 | APInt Res; |
| 2621 | bool Overflow; |
| 2622 | switch (IntrinsicID) { |
| 2623 | default: llvm_unreachable("Invalid case"); |
| 2624 | case Intrinsic::sadd_with_overflow: |
| 2625 | Res = C0->sadd_ov(*C1, Overflow); |
| 2626 | break; |
| 2627 | case Intrinsic::uadd_with_overflow: |
| 2628 | Res = C0->uadd_ov(*C1, Overflow); |
| 2629 | break; |
| 2630 | case Intrinsic::ssub_with_overflow: |
| 2631 | Res = C0->ssub_ov(*C1, Overflow); |
| 2632 | break; |
| 2633 | case Intrinsic::usub_with_overflow: |
| 2634 | Res = C0->usub_ov(*C1, Overflow); |
| 2635 | break; |
| 2636 | case Intrinsic::smul_with_overflow: |
| 2637 | Res = C0->smul_ov(*C1, Overflow); |
| 2638 | break; |
| 2639 | case Intrinsic::umul_with_overflow: |
| 2640 | Res = C0->umul_ov(*C1, Overflow); |
| 2641 | break; |
| 2642 | } |
| 2643 | Constant *Ops[] = { |
| 2644 | ConstantInt::get(Ty->getContext(), Res), |
| 2645 | ConstantInt::get(Type::getInt1Ty(Ty->getContext()), Overflow) |
| 2646 | }; |
| 2647 | return ConstantStruct::get(cast<StructType>(Ty), Ops); |
| 2648 | } |
| 2649 | case Intrinsic::uadd_sat: |
| 2650 | case Intrinsic::sadd_sat: |
| 2651 | if (!C0 && !C1) |
| 2652 | return UndefValue::get(Ty); |
| 2653 | if (!C0 || !C1) |
| 2654 | return Constant::getAllOnesValue(Ty); |
| 2655 | if (IntrinsicID == Intrinsic::uadd_sat) |
| 2656 | return ConstantInt::get(Ty, C0->uadd_sat(*C1)); |
| 2657 | else |
| 2658 | return ConstantInt::get(Ty, C0->sadd_sat(*C1)); |
| 2659 | case Intrinsic::usub_sat: |
| 2660 | case Intrinsic::ssub_sat: |
| 2661 | if (!C0 && !C1) |
| 2662 | return UndefValue::get(Ty); |
| 2663 | if (!C0 || !C1) |
| 2664 | return Constant::getNullValue(Ty); |
| 2665 | if (IntrinsicID == Intrinsic::usub_sat) |
| 2666 | return ConstantInt::get(Ty, C0->usub_sat(*C1)); |
| 2667 | else |
| 2668 | return ConstantInt::get(Ty, C0->ssub_sat(*C1)); |
| 2669 | case Intrinsic::cttz: |
| 2670 | case Intrinsic::ctlz: |
| 2671 | assert(C1 && "Must be constant int"); |
| 2672 | |
| 2673 | |
| 2674 | if (C1->isOneValue() && (!C0 || C0->isNullValue())) |
| 2675 | return UndefValue::get(Ty); |
| 2676 | if (!C0) |
| 2677 | return Constant::getNullValue(Ty); |
| 2678 | if (IntrinsicID == Intrinsic::cttz) |
| 2679 | return ConstantInt::get(Ty, C0->countTrailingZeros()); |
| 2680 | else |
| 2681 | return ConstantInt::get(Ty, C0->countLeadingZeros()); |
| 2682 | |
| 2683 | case Intrinsic::abs: |
| 2684 | |
| 2685 | assert(C1 && "Must be constant int"); |
| 2686 | if (C1->isOneValue() && (!C0 || C0->isMinSignedValue())) |
| 38 | | Calling 'APInt::isOneValue' | |
|
| 46 | | Returning from 'APInt::isOneValue' | |
|
| 2687 | return UndefValue::get(Ty); |
| 2688 | |
| 2689 | |
| 2690 | if (C1->isNullValue() && !C0) |
| 47 | | Calling 'APInt::isNullValue' | |
|
| 57 | | Returning from 'APInt::isNullValue' | |
|
| 2691 | return Constant::getNullValue(Ty); |
| 2692 | |
| 2693 | return ConstantInt::get(Ty, C0->abs()); |
| 58 | | Called C++ object pointer is null |
|
| 2694 | } |
| 2695 | |
| 2696 | return nullptr; |
| 2697 | } |
| 2698 | |
| 2699 | |
| 2700 | if ((isa<ConstantVector>(Operands[0]) || |
| 2701 | isa<ConstantDataVector>(Operands[0])) && |
| 2702 | |
| 2703 | |
| 2704 | isa<ConstantInt>(Operands[1]) && |
| 2705 | cast<ConstantInt>(Operands[1])->getValue() == 4) { |
| 2706 | auto *Op = cast<Constant>(Operands[0]); |
| 2707 | switch (IntrinsicID) { |
| 2708 | default: break; |
| 2709 | case Intrinsic::x86_avx512_vcvtss2si32: |
| 2710 | case Intrinsic::x86_avx512_vcvtss2si64: |
| 2711 | case Intrinsic::x86_avx512_vcvtsd2si32: |
| 2712 | case Intrinsic::x86_avx512_vcvtsd2si64: |
| 2713 | if (ConstantFP *FPOp = |
| 2714 | dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U))) |
| 2715 | return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(), |
| 2716 | false, Ty, |
| 2717 | true); |
| 2718 | break; |
| 2719 | case Intrinsic::x86_avx512_vcvtss2usi32: |
| 2720 | case Intrinsic::x86_avx512_vcvtss2usi64: |
| 2721 | case Intrinsic::x86_avx512_vcvtsd2usi32: |
| 2722 | case Intrinsic::x86_avx512_vcvtsd2usi64: |
| 2723 | if (ConstantFP *FPOp = |
| 2724 | dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U))) |
| 2725 | return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(), |
| 2726 | false, Ty, |
| 2727 | false); |
| 2728 | break; |
| 2729 | case Intrinsic::x86_avx512_cvttss2si: |
| 2730 | case Intrinsic::x86_avx512_cvttss2si64: |
| 2731 | case Intrinsic::x86_avx512_cvttsd2si: |
| 2732 | case Intrinsic::x86_avx512_cvttsd2si64: |
| 2733 | if (ConstantFP *FPOp = |
| 2734 | dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U))) |
| 2735 | return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(), |
| 2736 | true, Ty, |
| 2737 | true); |
| 2738 | break; |
| 2739 | case Intrinsic::x86_avx512_cvttss2usi: |
| 2740 | case Intrinsic::x86_avx512_cvttss2usi64: |
| 2741 | case Intrinsic::x86_avx512_cvttsd2usi: |
| 2742 | case Intrinsic::x86_avx512_cvttsd2usi64: |
| 2743 | if (ConstantFP *FPOp = |
| 2744 | dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U))) |
| 2745 | return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(), |
| 2746 | true, Ty, |
| 2747 | false); |
| 2748 | break; |
| 2749 | } |
| 2750 | } |
| 2751 | return nullptr; |
| 2752 | } |
| 2753 | |
| 2754 | static APFloat ConstantFoldAMDGCNCubeIntrinsic(Intrinsic::ID IntrinsicID, |
| 2755 | const APFloat &S0, |
| 2756 | const APFloat &S1, |
| 2757 | const APFloat &S2) { |
| 2758 | unsigned ID; |
| 2759 | const fltSemantics &Sem = S0.getSemantics(); |
| 2760 | APFloat MA(Sem), SC(Sem), TC(Sem); |
| 2761 | if (abs(S2) >= abs(S0) && abs(S2) >= abs(S1)) { |
| 2762 | if (S2.isNegative() && S2.isNonZero() && !S2.isNaN()) { |
| 2763 | |
| 2764 | ID = 5; |
| 2765 | SC = -S0; |
| 2766 | } else { |
| 2767 | ID = 4; |
| 2768 | SC = S0; |
| 2769 | } |
| 2770 | MA = S2; |
| 2771 | TC = -S1; |
| 2772 | } else if (abs(S1) >= abs(S0)) { |
| 2773 | if (S1.isNegative() && S1.isNonZero() && !S1.isNaN()) { |
| 2774 | |
| 2775 | ID = 3; |
| 2776 | TC = -S2; |
| 2777 | } else { |
| 2778 | ID = 2; |
| 2779 | TC = S2; |
| 2780 | } |
| 2781 | MA = S1; |
| 2782 | SC = S0; |
| 2783 | } else { |
| 2784 | if (S0.isNegative() && S0.isNonZero() && !S0.isNaN()) { |
| 2785 | |
| 2786 | ID = 1; |
| 2787 | SC = S2; |
| 2788 | } else { |
| 2789 | ID = 0; |
| 2790 | SC = -S2; |
| 2791 | } |
| 2792 | MA = S0; |
| 2793 | TC = -S1; |
| 2794 | } |
| 2795 | switch (IntrinsicID) { |
| 2796 | default: |
| 2797 | llvm_unreachable("unhandled amdgcn cube intrinsic"); |
| 2798 | case Intrinsic::amdgcn_cubeid: |
| 2799 | return APFloat(Sem, ID); |
| 2800 | case Intrinsic::amdgcn_cubema: |
| 2801 | return MA + MA; |
| 2802 | case Intrinsic::amdgcn_cubesc: |
| 2803 | return SC; |
| 2804 | case Intrinsic::amdgcn_cubetc: |
| 2805 | return TC; |
| 2806 | } |
| 2807 | } |
| 2808 | |
| 2809 | static Constant *ConstantFoldAMDGCNPermIntrinsic(ArrayRef<Constant *> Operands, |
| 2810 | Type *Ty) { |
| 2811 | const APInt *C0, *C1, *C2; |
| 2812 | if (!getConstIntOrUndef(Operands[0], C0) || |
| 2813 | !getConstIntOrUndef(Operands[1], C1) || |
| 2814 | !getConstIntOrUndef(Operands[2], C2)) |
| 2815 | return nullptr; |
| 2816 | |
| 2817 | if (!C2) |
| 2818 | return UndefValue::get(Ty); |
| 2819 | |
| 2820 | APInt Val(32, 0); |
| 2821 | unsigned NumUndefBytes = 0; |
| 2822 | for (unsigned I = 0; I < 32; I += 8) { |
| 2823 | unsigned Sel = C2->extractBitsAsZExtValue(8, I); |
| 2824 | unsigned B = 0; |
| 2825 | |
| 2826 | if (Sel >= 13) |
| 2827 | B = 0xff; |
| 2828 | else if (Sel == 12) |
| 2829 | B = 0x00; |
| 2830 | else { |
| 2831 | const APInt *Src = ((Sel & 10) == 10 || (Sel & 12) == 4) ? C0 : C1; |
| 2832 | if (!Src) |
| 2833 | ++NumUndefBytes; |
| 2834 | else if (Sel < 8) |
| 2835 | B = Src->extractBitsAsZExtValue(8, (Sel & 3) * 8); |
| 2836 | else |
| 2837 | B = Src->extractBitsAsZExtValue(1, (Sel & 1) ? 31 : 15) * 0xff; |
| 2838 | } |
| 2839 | |
| 2840 | Val.insertBits(B, I, 8); |
| 2841 | } |
| 2842 | |
| 2843 | if (NumUndefBytes == 4) |
| 2844 | return UndefValue::get(Ty); |
| 2845 | |
| 2846 | return ConstantInt::get(Ty, Val); |
| 2847 | } |
| 2848 | |
| 2849 | static Constant *ConstantFoldScalarCall3(StringRef Name, |
| 2850 | Intrinsic::ID IntrinsicID, |
| 2851 | Type *Ty, |
| 2852 | ArrayRef<Constant *> Operands, |
| 2853 | const TargetLibraryInfo *TLI, |
| 2854 | const CallBase *Call) { |
| 2855 | assert(Operands.size() == 3 && "Wrong number of operands."); |
| 2856 | |
| 2857 | if (const auto *Op1 = dyn_cast<ConstantFP>(Operands[0])) { |
| 2858 | if (const auto *Op2 = dyn_cast<ConstantFP>(Operands[1])) { |
| 2859 | if (const auto *Op3 = dyn_cast<ConstantFP>(Operands[2])) { |
| 2860 | const APFloat &C1 = Op1->getValueAPF(); |
| 2861 | const APFloat &C2 = Op2->getValueAPF(); |
| 2862 | const APFloat &C3 = Op3->getValueAPF(); |
| 2863 | |
| 2864 | if (const auto *ConstrIntr = dyn_cast<ConstrainedFPIntrinsic>(Call)) { |
| 2865 | RoundingMode RM = getEvaluationRoundingMode(ConstrIntr); |
| 2866 | APFloat Res = C1; |
| 2867 | APFloat::opStatus St; |
| 2868 | switch (IntrinsicID) { |
| 2869 | default: |
| 2870 | return nullptr; |
| 2871 | case Intrinsic::experimental_constrained_fma: |
| 2872 | case Intrinsic::experimental_constrained_fmuladd: |
| 2873 | St = Res.fusedMultiplyAdd(C2, C3, RM); |
| 2874 | break; |
| 2875 | } |
| 2876 | if (mayFoldConstrained( |
| 2877 | const_cast<ConstrainedFPIntrinsic *>(ConstrIntr), St)) |
| 2878 | return ConstantFP::get(Ty->getContext(), Res); |
| 2879 | return nullptr; |
| 2880 | } |
| 2881 | |
| 2882 | switch (IntrinsicID) { |
| 2883 | default: break; |
| 2884 | case Intrinsic::amdgcn_fma_legacy: { |
| 2885 | |
| 2886 | |
| 2887 | if (C1.isZero() || C2.isZero()) { |
| 2888 | |
| 2889 | |
| 2890 | return ConstantFP::get(Ty->getContext(), APFloat(0.0f) + C3); |
| 2891 | } |
| 2892 | LLVM_FALLTHROUGH; |
| 2893 | } |
| 2894 | case Intrinsic::fma: |
| 2895 | case Intrinsic::fmuladd: { |
| 2896 | APFloat V = C1; |
| 2897 | V.fusedMultiplyAdd(C2, C3, APFloat::rmNearestTiesToEven); |
| 2898 | return ConstantFP::get(Ty->getContext(), V); |
| 2899 | } |
| 2900 | case Intrinsic::amdgcn_cubeid: |
| 2901 | case Intrinsic::amdgcn_cubema: |
| 2902 | case Intrinsic::amdgcn_cubesc: |
| 2903 | case Intrinsic::amdgcn_cubetc: { |
| 2904 | APFloat V = ConstantFoldAMDGCNCubeIntrinsic(IntrinsicID, C1, C2, C3); |
| 2905 | return ConstantFP::get(Ty->getContext(), V); |
| 2906 | } |
| 2907 | } |
| 2908 | } |
| 2909 | } |
| 2910 | } |
| 2911 | |
| 2912 | if (IntrinsicID == Intrinsic::smul_fix || |
| 2913 | IntrinsicID == Intrinsic::smul_fix_sat) { |
| 2914 | |
| 2915 | |
| 2916 | if (isa<PoisonValue>(Operands[0]) || isa<PoisonValue>(Operands[1])) |
| 2917 | return PoisonValue::get(Ty); |
| 2918 | |
| 2919 | const APInt *C0, *C1; |
| 2920 | if (!getConstIntOrUndef(Operands[0], C0) || |
| 2921 | !getConstIntOrUndef(Operands[1], C1)) |
| 2922 | return nullptr; |
| 2923 | |
| 2924 | |
| 2925 | |
| 2926 | if (!C0 || !C1) |
| 2927 | return Constant::getNullValue(Ty); |
| 2928 | |
| 2929 | |
| 2930 | |
| 2931 | |
| 2932 | |
| 2933 | |
| 2934 | |
| 2935 | unsigned Scale = cast<ConstantInt>(Operands[2])->getZExtValue(); |
| 2936 | unsigned Width = C0->getBitWidth(); |
| 2937 | assert(Scale < Width && "Illegal scale."); |
| 2938 | unsigned ExtendedWidth = Width * 2; |
| 2939 | APInt Product = (C0->sextOrSelf(ExtendedWidth) * |
| 2940 | C1->sextOrSelf(ExtendedWidth)).ashr(Scale); |
| 2941 | if (IntrinsicID == Intrinsic::smul_fix_sat) { |
| 2942 | APInt Max = APInt::getSignedMaxValue(Width).sextOrSelf(ExtendedWidth); |
| 2943 | APInt Min = APInt::getSignedMinValue(Width).sextOrSelf(ExtendedWidth); |
| 2944 | Product = APIntOps::smin(Product, Max); |
| 2945 | Product = APIntOps::smax(Product, Min); |
| 2946 | } |
| 2947 | return ConstantInt::get(Ty->getContext(), Product.sextOrTrunc(Width)); |
| 2948 | } |
| 2949 | |
| 2950 | if (IntrinsicID == Intrinsic::fshl || IntrinsicID == Intrinsic::fshr) { |
| 2951 | const APInt *C0, *C1, *C2; |
| 2952 | if (!getConstIntOrUndef(Operands[0], C0) || |
| 2953 | !getConstIntOrUndef(Operands[1], C1) || |
| 2954 | !getConstIntOrUndef(Operands[2], C2)) |
| 2955 | return nullptr; |
| 2956 | |
| 2957 | bool IsRight = IntrinsicID == Intrinsic::fshr; |
| 2958 | if (!C2) |
| 2959 | return Operands[IsRight ? 1 : 0]; |
| 2960 | if (!C0 && !C1) |
| 2961 | return UndefValue::get(Ty); |
| 2962 | |
| 2963 | |
| 2964 | |
| 2965 | unsigned BitWidth = C2->getBitWidth(); |
| 2966 | unsigned ShAmt = C2->urem(BitWidth); |
| 2967 | if (!ShAmt) |
| 2968 | return Operands[IsRight ? 1 : 0]; |
| 2969 | |
| 2970 | |
| 2971 | unsigned LshrAmt = IsRight ? ShAmt : BitWidth - ShAmt; |
| 2972 | unsigned ShlAmt = !IsRight ? ShAmt : BitWidth - ShAmt; |
| 2973 | if (!C0) |
| 2974 | return ConstantInt::get(Ty, C1->lshr(LshrAmt)); |
| 2975 | if (!C1) |
| 2976 | return ConstantInt::get(Ty, C0->shl(ShlAmt)); |
| 2977 | return ConstantInt::get(Ty, C0->shl(ShlAmt) | C1->lshr(LshrAmt)); |
| 2978 | } |
| 2979 | |
| 2980 | if (IntrinsicID == Intrinsic::amdgcn_perm) |
| 2981 | return ConstantFoldAMDGCNPermIntrinsic(Operands, Ty); |
| 2982 | |
| 2983 | return nullptr; |
| 2984 | } |
| 2985 | |
| 2986 | static Constant *ConstantFoldScalarCall(StringRef Name, |
| 2987 | Intrinsic::ID IntrinsicID, |
| 2988 | Type *Ty, |
| 2989 | ArrayRef<Constant *> Operands, |
| 2990 | const TargetLibraryInfo *TLI, |
| 2991 | const CallBase *Call) { |
| 2992 | if (Operands.size() == 1) |
| 2993 | return ConstantFoldScalarCall1(Name, IntrinsicID, Ty, Operands, TLI, Call); |
| 2994 | |
| 2995 | if (Operands.size() == 2) |
| 2996 | return ConstantFoldScalarCall2(Name, IntrinsicID, Ty, Operands, TLI, Call); |
| 2997 | |
| 2998 | if (Operands.size() == 3) |
| 2999 | return ConstantFoldScalarCall3(Name, IntrinsicID, Ty, Operands, TLI, Call); |
| 3000 | |
| 3001 | return nullptr; |
| 3002 | } |
| 3003 | |
| 3004 | static Constant *ConstantFoldFixedVectorCall( |
| 3005 | StringRef Name, Intrinsic::ID IntrinsicID, FixedVectorType *FVTy, |
| 3006 | ArrayRef<Constant *> Operands, const DataLayout &DL, |
| 3007 | const TargetLibraryInfo *TLI, const CallBase *Call) { |
| 3008 | SmallVector<Constant *, 4> Result(FVTy->getNumElements()); |
| 3009 | SmallVector<Constant *, 4> Lane(Operands.size()); |
| 3010 | Type *Ty = FVTy->getElementType(); |
| 3011 | |
| 3012 | switch (IntrinsicID) { |
| 3013 | case Intrinsic::masked_load: { |
| 3014 | auto *SrcPtr = Operands[0]; |
| 3015 | auto *Mask = Operands[2]; |
| 3016 | auto *Passthru = Operands[3]; |
| 3017 | |
| 3018 | Constant *VecData = ConstantFoldLoadFromConstPtr(SrcPtr, FVTy, DL); |
| 3019 | |
| 3020 | SmallVector<Constant *, 32> NewElements; |
| 3021 | for (unsigned I = 0, E = FVTy->getNumElements(); I != E; ++I) { |
| 3022 | auto *MaskElt = Mask->getAggregateElement(I); |
| 3023 | if (!MaskElt) |
| 3024 | break; |
| 3025 | auto *PassthruElt = Passthru->getAggregateElement(I); |
| 3026 | auto *VecElt = VecData ? VecData->getAggregateElement(I) : nullptr; |
| 3027 | if (isa<UndefValue>(MaskElt)) { |
| 3028 | if (PassthruElt) |
| 3029 | NewElements.push_back(PassthruElt); |
| 3030 | else if (VecElt) |
| 3031 | NewElements.push_back(VecElt); |
| 3032 | else |
| 3033 | return nullptr; |
| 3034 | } |
| 3035 | if (MaskElt->isNullValue()) { |
| 3036 | if (!PassthruElt) |
| 3037 | return nullptr; |
| 3038 | NewElements.push_back(PassthruElt); |
| 3039 | } else if (MaskElt->isOneValue()) { |
| 3040 | if (!VecElt) |
| 3041 | return nullptr; |
| 3042 | NewElements.push_back(VecElt); |
| 3043 | } else { |
| 3044 | return nullptr; |
| 3045 | } |
| 3046 | } |
| 3047 | if (NewElements.size() != FVTy->getNumElements()) |
| 3048 | return nullptr; |
| 3049 | return ConstantVector::get(NewElements); |
| 3050 | } |
| 3051 | case Intrinsic::arm_mve_vctp8: |
| 3052 | case Intrinsic::arm_mve_vctp16: |
| 3053 | case Intrinsic::arm_mve_vctp32: |
| 3054 | case Intrinsic::arm_mve_vctp64: { |
| 3055 | if (auto *Op = dyn_cast<ConstantInt>(Operands[0])) { |
| 3056 | unsigned Lanes = FVTy->getNumElements(); |
| 3057 | uint64_t Limit = Op->getZExtValue(); |
| 3058 | |
| 3059 | |
| 3060 | if (IntrinsicID == Intrinsic::arm_mve_vctp64) |
| 3061 | Limit *= 2; |
| 3062 | |
| 3063 | SmallVector<Constant *, 16> NCs; |
| 3064 | for (unsigned i = 0; i < Lanes; i++) { |
| 3065 | if (i < Limit) |
| 3066 | NCs.push_back(ConstantInt::getTrue(Ty)); |
| 3067 | else |
| 3068 | NCs.push_back(ConstantInt::getFalse(Ty)); |
| 3069 | } |
| 3070 | return ConstantVector::get(NCs); |
| 3071 | } |
| 3072 | break; |
| 3073 | } |
| 3074 | case Intrinsic::get_active_lane_mask: { |
| 3075 | auto *Op0 = dyn_cast<ConstantInt>(Operands[0]); |
| 3076 | auto *Op1 = dyn_cast<ConstantInt>(Operands[1]); |
| 3077 | if (Op0 && Op1) { |
| 3078 | unsigned Lanes = FVTy->getNumElements(); |
| 3079 | uint64_t Base = Op0->getZExtValue(); |
| 3080 | uint64_t Limit = Op1->getZExtValue(); |
| 3081 | |
| 3082 | SmallVector<Constant *, 16> NCs; |
| 3083 | for (unsigned i = 0; i < Lanes; i++) { |
| 3084 | if (Base + i < Limit) |
| 3085 | NCs.push_back(ConstantInt::getTrue(Ty)); |
| 3086 | else |
| 3087 | NCs.push_back(ConstantInt::getFalse(Ty)); |
| 3088 | } |
| 3089 | return ConstantVector::get(NCs); |
| 3090 | } |
| 3091 | break; |
| 3092 | } |
| 3093 | default: |
| 3094 | break; |
| 3095 | } |
| 3096 | |
| 3097 | for (unsigned I = 0, E = FVTy->getNumElements(); I != E; ++I) { |
| 3098 | |
| 3099 | for (unsigned J = 0, JE = Operands.size(); J != JE; ++J) { |
| 3100 | |
| 3101 | if (hasVectorInstrinsicScalarOpd(IntrinsicID, J)) { |
| 3102 | Lane[J] = Operands[J]; |
| 3103 | continue; |
| 3104 | } |
| 3105 | |
| 3106 | Constant *Agg = Operands[J]->getAggregateElement(I); |
| 3107 | if (!Agg) |
| 3108 | return nullptr; |
| 3109 | |
| 3110 | Lane[J] = Agg; |
| 3111 | } |
| 3112 | |
| 3113 | |
| 3114 | Constant *Folded = |
| 3115 | ConstantFoldScalarCall(Name, IntrinsicID, Ty, Lane, TLI, Call); |
| 3116 | if (!Folded) |
| 3117 | return nullptr; |
| 3118 | Result[I] = Folded; |
| 3119 | } |
| 3120 | |
| 3121 | return ConstantVector::get(Result); |
| 3122 | } |
| 3123 | |
| 3124 | static Constant *ConstantFoldScalableVectorCall( |
| 3125 | StringRef Name, Intrinsic::ID IntrinsicID, ScalableVectorType *SVTy, |
| 3126 | ArrayRef<Constant *> Operands, const DataLayout &DL, |
| 3127 | const TargetLibraryInfo *TLI, const CallBase *Call) { |
| 3128 | switch (IntrinsicID) { |
| 3129 | case Intrinsic::aarch64_sve_convert_from_svbool: { |
| 3130 | auto *Src = dyn_cast<Constant>(Operands[0]); |
| 3131 | if (!Src || !Src->isNullValue()) |
| 3132 | break; |
| 3133 | |
| 3134 | return ConstantInt::getFalse(SVTy); |
| 3135 | } |
| 3136 | default: |
| 3137 | break; |
| 3138 | } |
| 3139 | return nullptr; |
| 3140 | } |
| 3141 | |
| 3142 | } |
| 3143 | |
| 3144 | Constant *llvm::ConstantFoldCall(const CallBase *Call, Function *F, |
| 3145 | ArrayRef<Constant *> Operands, |
| 3146 | const TargetLibraryInfo *TLI) { |
| 3147 | if (Call->isNoBuiltin()) |
| 3148 | return nullptr; |
| 3149 | if (!F->hasName()) |
| 3150 | return nullptr; |
| 3151 | |
| 3152 | |
| 3153 | if (F->getIntrinsicID() == Intrinsic::not_intrinsic) { |
| 3154 | if (!TLI) |
| 3155 | return nullptr; |
| 3156 | LibFunc LibF; |
| 3157 | if (!TLI->getLibFunc(*F, LibF)) |
| 3158 | return nullptr; |
| 3159 | } |
| 3160 | |
| 3161 | StringRef Name = F->getName(); |
| 3162 | Type *Ty = F->getReturnType(); |
| 3163 | if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) |
| 3164 | return ConstantFoldFixedVectorCall( |
| 3165 | Name, F->getIntrinsicID(), FVTy, Operands, |
| 3166 | F->getParent()->getDataLayout(), TLI, Call); |
| 3167 | |
| 3168 | if (auto *SVTy = dyn_cast<ScalableVectorType>(Ty)) |
| 3169 | return ConstantFoldScalableVectorCall( |
| 3170 | Name, F->getIntrinsicID(), SVTy, Operands, |
| 3171 | F->getParent()->getDataLayout(), TLI, Call); |
| 3172 | |
| 3173 | |
| 3174 | |
| 3175 | |
| 3176 | return ConstantFoldScalarCall(Name, F->getIntrinsicID(), Ty, Operands, TLI, |
| 3177 | Call); |
| 3178 | } |
| 3179 | |
| 3180 | bool llvm::isMathLibCallNoop(const CallBase *Call, |
| 3181 | const TargetLibraryInfo *TLI) { |
| 3182 | |
| 3183 | |
| 3184 | if (Call->isNoBuiltin() || Call->isStrictFP()) |
| 3185 | return false; |
| 3186 | Function *F = Call->getCalledFunction(); |
| 3187 | if (!F) |
| 3188 | return false; |
| 3189 | |
| 3190 | LibFunc Func; |
| 3191 | if (!TLI || !TLI->getLibFunc(*F, Func)) |
| 3192 | return false; |
| 3193 | |
| 3194 | if (Call->getNumArgOperands() == 1) { |
| 3195 | if (ConstantFP *OpC = dyn_cast<ConstantFP>(Call->getArgOperand(0))) { |
| 3196 | const APFloat &Op = OpC->getValueAPF(); |
| 3197 | switch (Func) { |
| 3198 | case LibFunc_logl: |
| 3199 | case LibFunc_log: |
| 3200 | case LibFunc_logf: |
| 3201 | case LibFunc_log2l: |
| 3202 | case LibFunc_log2: |
| 3203 | case LibFunc_log2f: |
| 3204 | case LibFunc_log10l: |
| 3205 | case LibFunc_log10: |
| 3206 | case LibFunc_log10f: |
| 3207 | return Op.isNaN() || (!Op.isZero() && !Op.isNegative()); |
| 3208 | |
| 3209 | case LibFunc_expl: |
| 3210 | case LibFunc_exp: |
| 3211 | case LibFunc_expf: |
| 3212 | |
| 3213 | if (OpC->getType()->isDoubleTy()) |
| 3214 | return !(Op < APFloat(-745.0) || Op > APFloat(709.0)); |
| 3215 | if (OpC->getType()->isFloatTy()) |
| 3216 | return !(Op < APFloat(-103.0f) || Op > APFloat(88.0f)); |
| 3217 | break; |
| 3218 | |
| 3219 | case LibFunc_exp2l: |
| 3220 | case LibFunc_exp2: |
| 3221 | case LibFunc_exp2f: |
| 3222 | |
| 3223 | if (OpC->getType()->isDoubleTy()) |
| 3224 | return !(Op < APFloat(-1074.0) || Op > APFloat(1023.0)); |
| 3225 | if (OpC->getType()->isFloatTy()) |
| 3226 | return !(Op < APFloat(-149.0f) || Op > APFloat(127.0f)); |
| 3227 | break; |
| 3228 | |
| 3229 | case LibFunc_sinl: |
| 3230 | case LibFunc_sin: |
| 3231 | case LibFunc_sinf: |
| 3232 | case LibFunc_cosl: |
| 3233 | case LibFunc_cos: |
| 3234 | case LibFunc_cosf: |
| 3235 | return !Op.isInfinity(); |
| 3236 | |
| 3237 | case LibFunc_tanl: |
| 3238 | case LibFunc_tan: |
| 3239 | case LibFunc_tanf: { |
| 3240 | |
| 3241 | |
| 3242 | Type *Ty = OpC->getType(); |
| 3243 | if (Ty->isDoubleTy() || Ty->isFloatTy() || Ty->isHalfTy()) |
| 3244 | return ConstantFoldFP(tan, OpC->getValueAPF(), Ty) != nullptr; |
| 3245 | break; |
| 3246 | } |
| 3247 | |
| 3248 | case LibFunc_asinl: |
| 3249 | case LibFunc_asin: |
| 3250 | case LibFunc_asinf: |
| 3251 | case LibFunc_acosl: |
| 3252 | case LibFunc_acos: |
| 3253 | case LibFunc_acosf: |
| 3254 | return !(Op < APFloat(Op.getSemantics(), "-1") || |
| 3255 | Op > APFloat(Op.getSemantics(), "1")); |
| 3256 | |
| 3257 | case LibFunc_sinh: |
| 3258 | case LibFunc_cosh: |
| 3259 | case LibFunc_sinhf: |
| 3260 | case LibFunc_coshf: |
| 3261 | case LibFunc_sinhl: |
| 3262 | case LibFunc_coshl: |
| 3263 | |
| 3264 | if (OpC->getType()->isDoubleTy()) |
| 3265 | return !(Op < APFloat(-710.0) || Op > APFloat(710.0)); |
| 3266 | if (OpC->getType()->isFloatTy()) |
| 3267 | return !(Op < APFloat(-89.0f) || Op > APFloat(89.0f)); |
| 3268 | break; |
| 3269 | |
| 3270 | case LibFunc_sqrtl: |
| 3271 | case LibFunc_sqrt: |
| 3272 | case LibFunc_sqrtf: |
| 3273 | return Op.isNaN() || Op.isZero() || !Op.isNegative(); |
| 3274 | |
| 3275 | |
| 3276 | |
| 3277 | default: |
| 3278 | break; |
| 3279 | } |
| 3280 | } |
| 3281 | } |
| 3282 | |
| 3283 | if (Call->getNumArgOperands() == 2) { |
| 3284 | ConstantFP *Op0C = dyn_cast<ConstantFP>(Call->getArgOperand(0)); |
| 3285 | ConstantFP *Op1C = dyn_cast<ConstantFP>(Call->getArgOperand(1)); |
| 3286 | if (Op0C && Op1C) { |
| 3287 | const APFloat &Op0 = Op0C->getValueAPF(); |
| 3288 | const APFloat &Op1 = Op1C->getValueAPF(); |
| 3289 | |
| 3290 | switch (Func) { |
| 3291 | case LibFunc_powl: |
| 3292 | case LibFunc_pow: |
| 3293 | case LibFunc_powf: { |
| 3294 | |
| 3295 | |
| 3296 | Type *Ty = Op0C->getType(); |
| 3297 | if (Ty->isDoubleTy() || Ty->isFloatTy() || Ty->isHalfTy()) { |
| 3298 | if (Ty == Op1C->getType()) |
| 3299 | return ConstantFoldBinaryFP(pow, Op0, Op1, Ty) != nullptr; |
| 3300 | } |
| 3301 | break; |
| 3302 | } |
| 3303 | |
| 3304 | case LibFunc_fmodl: |
| 3305 | case LibFunc_fmod: |
| 3306 | case LibFunc_fmodf: |
| 3307 | case LibFunc_remainderl: |
| 3308 | case LibFunc_remainder: |
| 3309 | case LibFunc_remainderf: |
| 3310 | return Op0.isNaN() || Op1.isNaN() || |
| 3311 | (!Op0.isInfinity() && !Op1.isZero()); |
| 3312 | |
| 3313 | default: |
| 3314 | break; |
| 3315 | } |
| 3316 | } |
| 3317 | } |
| 3318 | |
| 3319 | return false; |
| 3320 | } |
| 3321 | |
| 3322 | void TargetFolder::anchor() {} |
| 1 | |
| 2 | |
| 3 | |
| 4 | |
| 5 | |
| 6 | |
| 7 | |
| 8 | |
| 9 | |
| 10 | |
| 11 | |
| 12 | |
| 13 | |
| 14 | |
| 15 | #ifndef LLVM_ADT_APINT_H |
| 16 | #define LLVM_ADT_APINT_H |
| 17 | |
| 18 | #include "llvm/Support/Compiler.h" |
| 19 | #include "llvm/Support/MathExtras.h" |
| 20 | #include <cassert> |
| 21 | #include <climits> |
| 22 | #include <cstring> |
| 23 | #include <utility> |
| 24 | |
| 25 | namespace llvm { |
| 26 | class FoldingSetNodeID; |
| 27 | class StringRef; |
| 28 | class hash_code; |
| 29 | class raw_ostream; |
| 30 | |
| 31 | template <typename T> class SmallVectorImpl; |
| 32 | template <typename T> class ArrayRef; |
| 33 | template <typename T> class Optional; |
| 34 | template <typename T> struct DenseMapInfo; |
| 35 | |
| 36 | class APInt; |
| 37 | |
| 38 | inline APInt operator-(APInt); |
| 39 | |
| 40 | |
| 41 | |
| 42 | |
| 43 | |
| 44 | |
| 45 | |
| 46 | |
| 47 | |
| 48 | |
| 49 | |
| 50 | |
| 51 | |
| 52 | |
| 53 | |
| 54 | |
| 55 | |
| 56 | |
| 57 | |
| 58 | |
| 59 | |
| 60 | |
| 61 | |
| 62 | |
| 63 | |
| 64 | |
| 65 | |
| 66 | |
| 67 | |
| 68 | |
| 69 | |
| 70 | class LLVM_NODISCARD APInt { |
| 71 | public: |
| 72 | typedef uint64_t WordType; |
| 73 | |
| 74 | |
| 75 | enum : unsigned { |
| 76 | |
| 77 | APINT_WORD_SIZE = sizeof(WordType), |
| 78 | |
| 79 | APINT_BITS_PER_WORD = APINT_WORD_SIZE * CHAR_BIT |
| 80 | }; |
| 81 | |
| 82 | enum class Rounding { |
| 83 | DOWN, |
| 84 | TOWARD_ZERO, |
| 85 | UP, |
| 86 | }; |
| 87 | |
| 88 | static constexpr WordType WORDTYPE_MAX = ~WordType(0); |
| 89 | |
| 90 | private: |
| 91 | |
| 92 | |
| 93 | union { |
| 94 | uint64_t VAL; |
| 95 | uint64_t *pVal; |
| 96 | } U; |
| 97 | |
| 98 | unsigned BitWidth; |
| 99 | |
| 100 | friend struct DenseMapInfo<APInt>; |
| 101 | |
| 102 | friend class APSInt; |
| 103 | |
| 104 | |
| 105 | |
| 106 | |
| 107 | |
| 108 | APInt(uint64_t *val, unsigned bits) : BitWidth(bits) { |
| 109 | U.pVal = val; |
| 110 | } |
| 111 | |
| 112 | |
| 113 | |
| 114 | |
| 115 | static unsigned whichWord(unsigned bitPosition) { |
| 116 | return bitPosition / APINT_BITS_PER_WORD; |
| 117 | } |
| 118 | |
| 119 | |
| 120 | |
| 121 | |
| 122 | |
| 123 | static unsigned whichBit(unsigned bitPosition) { |
| 124 | return bitPosition % APINT_BITS_PER_WORD; |
| 125 | } |
| 126 | |
| 127 | |
| 128 | |
| 129 | |
| 130 | |
| 131 | |
| 132 | |
| 133 | static uint64_t maskBit(unsigned bitPosition) { |
| 134 | return 1ULL << whichBit(bitPosition); |
| 135 | } |
| 136 | |
| 137 | |
| 138 | |
| 139 | |
| 140 | |
| 141 | |
| 142 | |
| 143 | APInt &clearUnusedBits() { |
| 144 | |
| 145 | unsigned WordBits = ((BitWidth-1) % APINT_BITS_PER_WORD) + 1; |
| 146 | |
| 147 | |
| 148 | uint64_t mask = WORDTYPE_MAX >> (APINT_BITS_PER_WORD - WordBits); |
| 149 | if (isSingleWord()) |
| 150 | U.VAL &= mask; |
| 151 | else |
| 152 | U.pVal[getNumWords() - 1] &= mask; |
| 153 | return *this; |
| 154 | } |
| 155 | |
| 156 | |
| 157 | |
| 158 | uint64_t getWord(unsigned bitPosition) const { |
| 159 | return isSingleWord() ? U.VAL : U.pVal[whichWord(bitPosition)]; |
| 160 | } |
| 161 | |
| 162 | |
| 163 | |
| 164 | |
| 165 | void reallocate(unsigned NewBitWidth); |
| 166 | |
| 167 | |
| 168 | |
| 169 | |
| 170 | |
| 171 | |
| 172 | |
| 173 | |
| 174 | |
| 175 | |
| 176 | |
| 177 | |
| 178 | |
| 179 | void fromString(unsigned numBits, StringRef str, uint8_t radix); |
| 180 | |
| 181 | |
| 182 | |
| 183 | |
| 184 | |
| 185 | |
| 186 | |
| 187 | static void divide(const WordType *LHS, unsigned lhsWords, |
| 188 | const WordType *RHS, unsigned rhsWords, WordType *Quotient, |
| 189 | WordType *Remainder); |
| 190 | |
| 191 | |
| 192 | void initSlowCase(uint64_t val, bool isSigned); |
| 193 | |
| 194 | |
| 195 | void initFromArray(ArrayRef<uint64_t> array); |
| 196 | |
| 197 | |
| 198 | void initSlowCase(const APInt &that); |
| 199 | |
| 200 | |
| 201 | void shlSlowCase(unsigned ShiftAmt); |
| 202 | |
| 203 | |
| 204 | void lshrSlowCase(unsigned ShiftAmt); |
| 205 | |
| 206 | |
| 207 | void ashrSlowCase(unsigned ShiftAmt); |
| 208 | |
| 209 | |
| 210 | void AssignSlowCase(const APInt &RHS); |
| 211 | |
| 212 | |
| 213 | bool EqualSlowCase(const APInt &RHS) const LLVM_READONLY; |
| 214 | |
| 215 | |
| 216 | unsigned countLeadingZerosSlowCase() const LLVM_READONLY; |
| 217 | |
| 218 | |
| 219 | unsigned countLeadingOnesSlowCase() const LLVM_READONLY; |
| 220 | |
| 221 | |
| 222 | unsigned countTrailingZerosSlowCase() const LLVM_READONLY; |
| 223 | |
| 224 | |
| 225 | unsigned countTrailingOnesSlowCase() const LLVM_READONLY; |
| 226 | |
| 227 | |
| 228 | unsigned countPopulationSlowCase() const LLVM_READONLY; |
| 229 | |
| 230 | |
| 231 | bool intersectsSlowCase(const APInt &RHS) const LLVM_READONLY; |
| 232 | |
| 233 | |
| 234 | bool isSubsetOfSlowCase(const APInt &RHS) const LLVM_READONLY; |
| 235 | |
| 236 | |
| 237 | void setBitsSlowCase(unsigned loBit, unsigned hiBit); |
| 238 | |
| 239 | |
| 240 | void flipAllBitsSlowCase(); |
| 241 | |
| 242 | |
| 243 | void AndAssignSlowCase(const APInt& RHS); |
| 244 | |
| 245 | |
| 246 | void OrAssignSlowCase(const APInt& RHS); |
| 247 | |
| 248 | |
| 249 | void XorAssignSlowCase(const APInt& RHS); |
| 250 | |
| 251 | |
| 252 | |
| 253 | int compare(const APInt &RHS) const LLVM_READONLY; |
| 254 | |
| 255 | |
| 256 | |
| 257 | int compareSigned(const APInt &RHS) const LLVM_READONLY; |
| 258 | |
| 259 | public: |
| 260 | |
| 261 | |
| 262 | |
| 263 | |
| 264 | |
| 265 | |
| 266 | |
| 267 | |
| 268 | |
| 269 | |
| 270 | |
| 271 | |
| 272 | |
| 273 | APInt(unsigned numBits, uint64_t val, bool isSigned = false) |
| 274 | : BitWidth(numBits) { |
| 275 | assert(BitWidth && "bitwidth too small"); |
| 276 | if (isSingleWord()) { |
| 277 | U.VAL = val; |
| 278 | clearUnusedBits(); |
| 279 | } else { |
| 280 | initSlowCase(val, isSigned); |
| 281 | } |
| 282 | } |
| 283 | |
| 284 | |
| 285 | |
| 286 | |
| 287 | |
| 288 | |
| 289 | |
| 290 | |
| 291 | APInt(unsigned numBits, ArrayRef<uint64_t> bigVal); |
| 292 | |
| 293 | |
| 294 | |
| 295 | |
| 296 | |
| 297 | |
| 298 | |
| 299 | |
| 300 | APInt(unsigned numBits, unsigned numWords, const uint64_t bigVal[]); |
| 301 | |
| 302 | |
| 303 | |
| 304 | |
| 305 | |
| 306 | |
| 307 | |
| 308 | |
| 309 | |
| 310 | |
| 311 | |
| 312 | |
| 313 | APInt(unsigned numBits, StringRef str, uint8_t radix); |
| 314 | |
| 315 | |
| 316 | |
| 317 | APInt(const APInt &that) : BitWidth(that.BitWidth) { |
| 318 | if (isSingleWord()) |
| 319 | U.VAL = that.U.VAL; |
| 320 | else |
| 321 | initSlowCase(that); |
| 322 | } |
| 323 | |
| 324 | |
| 325 | APInt(APInt &&that) : BitWidth(that.BitWidth) { |
| 326 | memcpy(&U, &that.U, sizeof(U)); |
| 327 | that.BitWidth = 0; |
| 328 | } |
| 329 | |
| 330 | |
| 331 | ~APInt() { |
| 332 | if (needsCleanup()) |
| 333 | delete[] U.pVal; |
| 334 | } |
| 335 | |
| 336 | |
| 337 | |
| 338 | |
| 339 | |
| 340 | |
| 341 | explicit APInt() : BitWidth(1) { U.VAL = 0; } |
| 342 | |
| 343 | |
| 344 | bool needsCleanup() const { return !isSingleWord(); } |
| 345 | |
| 346 | |
| 347 | |
| 348 | void Profile(FoldingSetNodeID &id) const; |
| 349 | |
| 350 | |
| 351 | |
| 352 | |
| 353 | |
| 354 | |
| 355 | |
| 356 | |
| 357 | bool isSingleWord() const { return BitWidth <= APINT_BITS_PER_WORD; } |
| 40 | | Assuming field 'BitWidth' is > APINT_BITS_PER_WORD | |
|
| 41 | | Returning zero, which participates in a condition later | |
|
| 50 | | Returning zero, which participates in a condition later | |
|
| 358 | |
| 359 | |
| 360 | |
| 361 | |
| 362 | |
| 363 | |
| 364 | bool isNegative() const { return (*this)[BitWidth - 1]; } |
| 365 | |
| 366 | |
| 367 | |
| 368 | |
| 369 | bool isNonNegative() const { return !isNegative(); } |
| 370 | |
| 371 | |
| 372 | |
| 373 | |
| 374 | |
| 375 | |
| 376 | bool isSignBitSet() const { return (*this)[BitWidth-1]; } |
| 377 | |
| 378 | |
| 379 | |
| 380 | |
| 381 | |
| 382 | |
| 383 | bool isSignBitClear() const { return !isSignBitSet(); } |
| 384 | |
| 385 | |
| 386 | |
| 387 | |
| 388 | |
| 389 | |
| 390 | |
| 391 | bool isStrictlyPositive() const { return isNonNegative() && !isNullValue(); } |
| 392 | |
| 393 | |
| 394 | |
| 395 | |
| 396 | bool isNonPositive() const { return !isStrictlyPositive(); } |
| 397 | |
| 398 | |
| 399 | |
| 400 | |
| 401 | bool isAllOnesValue() const { |
| 402 | if (isSingleWord()) |
| 403 | return U.VAL == WORDTYPE_MAX >> (APINT_BITS_PER_WORD - BitWidth); |
| 404 | return countTrailingOnesSlowCase() == BitWidth; |
| 405 | } |
| 406 | |
| 407 | |
| 408 | |
| 409 | |
| 410 | |
| 411 | bool isNullValue() const { return !*this; } |
| 48 | | Calling 'APInt::operator!' | |
|
| 55 | | Returning from 'APInt::operator!' | |
|
| 56 | | Returning zero, which participates in a condition later | |
|
| 412 | |
| 413 | |
| 414 | |
| 415 | |
| 416 | bool isOneValue() const { |
| 417 | if (isSingleWord()) |
| 39 | | Calling 'APInt::isSingleWord' | |
|
| 42 | | Returning from 'APInt::isSingleWord' | |
|
| |
| 418 | return U.VAL == 1; |
| 419 | return countLeadingZerosSlowCase() == BitWidth - 1; |
| 44 | | Assuming the condition is false | |
|
| 45 | | Returning zero, which participates in a condition later | |
|
| 420 | } |
| 421 | |
| 422 | |
| 423 | |
| 424 | |
| 425 | |
| 426 | bool isMaxValue() const { return isAllOnesValue(); } |
| 427 | |
| 428 | |
| 429 | |
| 430 | |
| 431 | |
| 432 | bool isMaxSignedValue() const { |
| 433 | if (isSingleWord()) |
| 434 | return U.VAL == ((WordType(1) << (BitWidth - 1)) - 1); |
| 435 | return !isNegative() && countTrailingOnesSlowCase() == BitWidth - 1; |
| 436 | } |
| 437 | |
| 438 | |
| 439 | |
| 440 | |
| 441 | |
| 442 | bool isMinValue() const { return isNullValue(); } |
| 443 | |
| 444 | |
| 445 | |
| 446 | |
| 447 | |
| 448 | bool isMinSignedValue() const { |
| 449 | if (isSingleWord()) |
| 450 | return U.VAL == (WordType(1) << (BitWidth - 1)); |
| 451 | return isNegative() && countTrailingZerosSlowCase() == BitWidth - 1; |
| 452 | } |
| 453 | |
| 454 | |
| 455 | bool isIntN(unsigned N) const { |
| 456 | assert(N && "N == 0 ???"); |
| 457 | return getActiveBits() <= N; |
| 458 | } |
| 459 | |
| 460 | |
| 461 | bool isSignedIntN(unsigned N) const { |
| 462 | assert(N && "N == 0 ???"); |
| 463 | return getMinSignedBits() <= N; |
| 464 | } |
| 465 | |
| 466 | |
| 467 | |
| 468 | |
| 469 | bool isPowerOf2() const { |
| 470 | if (isSingleWord()) |
| 471 | return isPowerOf2_64(U.VAL); |
| 472 | return countPopulationSlowCase() == 1; |
| 473 | } |
| 474 | |
| 475 | |
| 476 | |
| 477 | |
| 478 | bool isSignMask() const { return isMinSignedValue(); } |
| 479 | |
| 480 | |
| 481 | |
| 482 | |
| 483 | bool getBoolValue() const { return !!*this; } |
| 484 | |
| 485 | |
| 486 | |
| 487 | uint64_t getLimitedValue(uint64_t Limit = UINT64_MAX) const { |
| 488 | return ugt(Limit) ? Limit : getZExtValue(); |
| 489 | } |
| 490 | |
| 491 | |
| 492 | |
| 493 | |
| 494 | |
| 495 | |
| 496 | bool isSplat(unsigned SplatSizeInBits) const; |
| 497 | |
| 498 | |
| 499 | |
| 500 | bool isMask(unsigned numBits) const { |
| 501 | assert(numBits != 0 && "numBits must be non-zero"); |
| 502 | assert(numBits <= BitWidth && "numBits out of range"); |
| 503 | if (isSingleWord()) |
| 504 | return U.VAL == (WORDTYPE_MAX >> (APINT_BITS_PER_WORD - numBits)); |
| 505 | unsigned Ones = countTrailingOnesSlowCase(); |
| 506 | return (numBits == Ones) && |
| 507 | ((Ones + countLeadingZerosSlowCase()) == BitWidth); |
| 508 | } |
| 509 | |
| 510 | |
| 511 | |
| 512 | |
| 513 | bool isMask() const { |
| 514 | if (isSingleWord()) |
| 515 | return isMask_64(U.VAL); |
| 516 | unsigned Ones = countTrailingOnesSlowCase(); |
| 517 | return (Ones > 0) && ((Ones + countLeadingZerosSlowCase()) == BitWidth); |
| 518 | } |
| 519 | |
| 520 | |
| 521 | |
| 522 | bool isShiftedMask() const { |
| 523 | if (isSingleWord()) |
| 524 | return isShiftedMask_64(U.VAL); |
| 525 | unsigned Ones = countPopulationSlowCase(); |
| 526 | unsigned LeadZ = countLeadingZerosSlowCase(); |
| 527 | return (Ones + LeadZ + countTrailingZeros()) == BitWidth; |
| 528 | } |
| 529 | |
| 530 | |
| 531 | |
| 532 | |
| 533 | |
| 534 | |
| 535 | static APInt getMaxValue(unsigned numBits) { |
| 536 | return getAllOnesValue(numBits); |
| 537 | } |
| 538 | |
| 539 | |
| 540 | static APInt getSignedMaxValue(unsigned numBits) { |
| 541 | APInt API = getAllOnesValue(numBits); |
| 542 | API.clearBit(numBits - 1); |
| 543 | return API; |
| 544 | } |
| 545 | |
| 546 | |
| 547 | static APInt getMinValue(unsigned numBits) { return APInt(numBits, 0); } |
| 548 | |
| 549 | |
| 550 | static APInt getSignedMinValue(unsigned numBits) { |
| 551 | APInt API(numBits, 0); |
| 552 | API.setBit(numBits - 1); |
| 553 | return API; |
| 554 | } |
| 555 | |
| 556 | |
| 557 | |
| 558 | |
| 559 | |
| 560 | static APInt getSignMask(unsigned BitWidth) { |
| 561 | return getSignedMinValue(BitWidth); |
| 562 | } |
| 563 | |
| 564 | |
| 565 | |
| 566 | |
| 567 | static APInt getAllOnesValue(unsigned numBits) { |
| 568 | return APInt(numBits, WORDTYPE_MAX, true); |
| 569 | } |
| 570 | |
| 571 | |
| 572 | |
| 573 | |
| 574 | static APInt getNullValue(unsigned numBits) { return APInt(numBits, 0); } |
| 575 | |
| 576 | |
| 577 | |
| 578 | |
| 579 | |
| 580 | |
| 581 | |
| 582 | APInt getHiBits(unsigned numBits) const; |
| 583 | |
| 584 | |
| 585 | |
| 586 | |
| 587 | |
| 588 | |
| 589 | |
| 590 | APInt getLoBits(unsigned numBits) const; |
| 591 | |
| 592 | |
| 593 | static APInt getOneBitSet(unsigned numBits, unsigned BitNo) { |
| 594 | APInt Res(numBits, 0); |
| 595 | Res.setBit(BitNo); |
| 596 | return Res; |
| 597 | } |
| 598 | |
| 599 | |
| 600 | |
| 601 | |
| 602 | |
| 603 | |
| 604 | |
| 605 | |
| 606 | |
| 607 | |
| 608 | |
| 609 | |
| 610 | |
| 611 | |
| 612 | static APInt getBitsSet(unsigned numBits, unsigned loBit, unsigned hiBit) { |
| 613 | assert(loBit <= hiBit && "loBit greater than hiBit"); |
| 614 | APInt Res(numBits, 0); |
| 615 | Res.setBits(loBit, hiBit); |
| 616 | return Res; |
| 617 | } |
| 618 | |
| 619 | |
| 620 | |
| 621 | |
| 622 | |
| 623 | |
| 624 | |
| 625 | static APInt getBitsSetWithWrap(unsigned numBits, unsigned loBit, |
| 626 | unsigned hiBit) { |
| 627 | APInt Res(numBits, 0); |
| 628 | Res.setBitsWithWrap(loBit, hiBit); |
| 629 | return Res; |
| 630 | } |
| 631 | |
| 632 | |
| 633 | |
| 634 | |
| 635 | |
| 636 | |
| 637 | |
| 638 | |
| 639 | |
| 640 | |
| 641 | |
| 642 | |
| 643 | static APInt getBitsSetFrom(unsigned numBits, unsigned loBit) { |
| 644 | APInt Res(numBits, 0); |
| 645 | Res.setBitsFrom(loBit); |
| 646 | return Res; |
| 647 | } |
| 648 | |
| 649 | |
| 650 | |
| 651 | |
| 652 | |
| 653 | |
| 654 | |
| 655 | static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet) { |
| 656 | APInt Res(numBits, 0); |
| 657 | Res.setHighBits(hiBitsSet); |
| 658 | return Res; |
| 659 | } |
| 660 | |
| 661 | |
| 662 | |
| 663 | |
| 664 | |
| 665 | |
| 666 | |
| 667 | static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet) { |
| 668 | APInt Res(numBits, 0); |
| 669 | Res.setLowBits(loBitsSet); |
| 670 | return Res; |
| 671 | } |
| 672 | |
| 673 | |
| 674 | static APInt getSplat(unsigned NewLen, const APInt &V); |
| 675 | |
| 676 | |
| 677 | |
| 678 | static bool isSameValue(const APInt &I1, const APInt &I2) { |
| 679 | if (I1.getBitWidth() == I2.getBitWidth()) |
| 680 | return I1 == I2; |
| 681 | |
| 682 | if (I1.getBitWidth() > I2.getBitWidth()) |
| 683 | return I1 == I2.zext(I1.getBitWidth()); |
| 684 | |
| 685 | return I1.zext(I2.getBitWidth()) == I2; |
| 686 | } |
| 687 | |
| 688 | |
| 689 | friend hash_code hash_value(const APInt &Arg); |
| 690 | |
| 691 | |
| 692 | |
| 693 | |
| 694 | const uint64_t *getRawData() const { |
| 695 | if (isSingleWord()) |
| 696 | return &U.VAL; |
| 697 | return &U.pVal[0]; |
| 698 | } |
| 699 | |
| 700 | |
| 701 | |
| 702 | |
| 703 | |
| 704 | |
| 705 | |
| 706 | |
| 707 | |
| 708 | |
| 709 | const APInt operator++(int) { |
| 710 | APInt API(*this); |
| 711 | ++(*this); |
| 712 | return API; |
| 713 | } |
| 714 | |
| 715 | |
| 716 | |
| 717 | |
| 718 | APInt &operator++(); |
| 719 | |
| 720 | |
| 721 | |
| 722 | |
| 723 | |
| 724 | |
| 725 | const APInt operator--(int) { |
| 726 | APInt API(*this); |
| 727 | --(*this); |
| 728 | return API; |
| 729 | } |
| 730 | |
| 731 | |
| 732 | |
| 733 | |
| 734 | APInt &operator--(); |
| 735 | |
| 736 | |
| 737 | |
| 738 | |
| 739 | |
| 740 | |
| 741 | bool operator!() const { |
| 742 | if (isSingleWord()) |
| 49 | | Calling 'APInt::isSingleWord' | |
|
| 51 | | Returning from 'APInt::isSingleWord' | |
|
| |
| 743 | return U.VAL == 0; |
| 744 | return countLeadingZerosSlowCase() == BitWidth; |
| 53 | | Assuming the condition is false | |
|
| 54 | | Returning zero, which participates in a condition later | |
|
| 745 | } |
| 746 | |
| 747 | |
| 748 | |
| 749 | |
| 750 | |
| 751 | |
| 752 | |
| 753 | |
| 754 | APInt &operator=(const APInt &RHS) { |
| 755 | |
| 756 | if (isSingleWord() && RHS.isSingleWord()) { |
| 757 | U.VAL = RHS.U.VAL; |
| 758 | BitWidth = RHS.BitWidth; |
| 759 | return clearUnusedBits(); |
| 760 | } |
| 761 | |
| 762 | AssignSlowCase(RHS); |
| 763 | return *this; |
| 764 | } |
| 765 | |
| 766 | |
| 767 | APInt &operator=(APInt &&that) { |
| 768 | #ifdef EXPENSIVE_CHECKS |
| 769 | |
| 770 | if (this == &that) |
| 771 | return *this; |
| 772 | #endif |
| 773 | assert(this != &that && "Self-move not supported"); |
| 774 | if (!isSingleWord()) |
| 775 | delete[] U.pVal; |
| 776 | |
| 777 | |
| 778 | |
| 779 | memcpy(&U, &that.U, sizeof(U)); |
| 780 | |
| 781 | BitWidth = that.BitWidth; |
| 782 | that.BitWidth = 0; |
| 783 | |
| 784 | return *this; |
| 785 | } |
| 786 | |
| 787 | |
| 788 | |
| 789 | |
| 790 | |
| 791 | |
| 792 | |
| 793 | |
| 794 | APInt &operator=(uint64_t RHS) { |
| 795 | if (isSingleWord()) { |
| 796 | U.VAL = RHS; |
| 797 | return clearUnusedBits(); |
| 798 | } |
| 799 | U.pVal[0] = RHS; |
| 800 | memset(U.pVal + 1, 0, (getNumWords() - 1) * APINT_WORD_SIZE); |
| 801 | return *this; |
| 802 | } |
| 803 | |
| 804 | |
| 805 | |
| 806 | |
| 807 | |
| 808 | |
| 809 | |
| 810 | APInt &operator&=(const APInt &RHS) { |
| 811 | assert(BitWidth == RHS.BitWidth && "Bit widths must be the same"); |
| 812 | if (isSingleWord()) |
| 813 | U.VAL &= RHS.U.VAL; |
| 814 | else |
| 815 | AndAssignSlowCase(RHS); |
| 816 | return *this; |
| 817 | } |
| 818 | |
| 819 | |
| 820 | |
| 821 | |
| 822 | |
| 823 | |
| 824 | APInt &operator&=(uint64_t RHS) { |
| 825 | if (isSingleWord()) { |
| 826 | U.VAL &= RHS; |
| 827 | return *this; |
| 828 | } |
| 829 | U.pVal[0] &= RHS; |
| 830 | memset(U.pVal+1, 0, (getNumWords() - 1) * APINT_WORD_SIZE); |
| 831 | return *this; |
| 832 | } |
| 833 | |
| 834 | |
| 835 | |
| 836 | |
| 837 | |
| 838 | |
| 839 | |
| 840 | APInt &operator|=(const APInt &RHS) { |
| 841 | assert(BitWidth == RHS.BitWidth && "Bit widths must be the same"); |
| 842 | if (isSingleWord()) |
| 843 | U.VAL |= RHS.U.VAL; |
| 844 | else |
| 845 | OrAssignSlowCase(RHS); |
| 846 | return *this; |
| 847 | } |
| 848 | |
| 849 | |
| 850 | |
| 851 | |
| 852 | |
| 853 | |
| 854 | APInt &operator|=(uint64_t RHS) { |
| 855 | if (isSingleWord()) { |
| 856 | U.VAL |= RHS; |
| 857 | return clearUnusedBits(); |
| 858 | } |
| 859 | U.pVal[0] |= RHS; |
| 860 | return *this; |
| 861 | } |
| 862 | |
| 863 | |
| 864 | |
| 865 | |
| 866 | |
| 867 | |
| 868 | |
| 869 | APInt &operator^=(const APInt &RHS) { |
| 870 | assert(BitWidth == RHS.BitWidth && "Bit widths must be the same"); |
| 871 | if (isSingleWord()) |
| 872 | U.VAL ^= RHS.U.VAL; |
| 873 | else |
| 874 | XorAssignSlowCase(RHS); |
| 875 | return *this; |
| 876 | } |
| 877 | |
| 878 | |
| 879 | |
| 880 | |
| 881 | |
| 882 | |
| 883 | APInt &operator^=(uint64_t RHS) { |
| 884 | if (isSingleWord()) { |
| 885 | U.VAL ^= RHS; |
| 886 | return clearUnusedBits(); |
| 887 | } |
| 888 | U.pVal[0] ^= RHS; |
| 889 | return *this; |
| 890 | } |
| 891 | |
| 892 | |
| 893 | |
| 894 | |
| 895 | |
| 896 | |
| 897 | APInt &operator*=(const APInt &RHS); |
| 898 | APInt &operator*=(uint64_t RHS); |
| 899 | |
| 900 | |
| 901 | |
| 902 | |
| 903 | |
| 904 | |
| 905 | APInt &operator+=(const APInt &RHS); |
| 906 | APInt &operator+=(uint64_t RHS); |
| 907 | |
| 908 | |
| 909 | |
| 910 | |
| 911 | |
| 912 | |
| 913 | APInt &operator-=(const APInt &RHS); |
| 914 | APInt &operator-=(uint64_t RHS); |
| 915 | |
| 916 | |
| 917 | |
| 918 | |
| 919 | |
| 920 | |
| 921 | APInt &operator<<=(unsigned ShiftAmt) { |
| 922 | assert(ShiftAmt <= BitWidth && "Invalid shift amount"); |
| 923 | if (isSingleWord()) { |
| 924 | if (ShiftAmt == BitWidth) |
| 925 | U.VAL = 0; |
| 926 | else |
| 927 | U.VAL <<= ShiftAmt; |
| 928 | return clearUnusedBits(); |
| 929 | } |
| 930 | shlSlowCase(ShiftAmt); |
| 931 | return *this; |
| 932 | } |
| 933 | |
| 934 | |
| 935 | |
| 936 | |
| 937 | |
| 938 | |
| 939 | APInt &operator<<=(const APInt &ShiftAmt); |
| 940 | |
| 941 | |
| 942 | |
| 943 | |
| 944 | |
| 945 | |
| 946 | |
| 947 | |
| 948 | APInt operator*(const APInt &RHS) const; |
| 949 | |
| 950 | |
| 951 | |
| 952 | |
| 953 | APInt operator<<(unsigned Bits) const { return shl(Bits); } |
| 954 | |
| 955 | |
| 956 | |
| 957 | |
| 958 | APInt operator<<(const APInt &Bits) const { return shl(Bits); } |
| 959 | |
| 960 | |
| 961 | |
| 962 | |
| 963 | APInt ashr(unsigned ShiftAmt) const { |
| 964 | APInt R(*this); |
| 965 | R.ashrInPlace(ShiftAmt); |
| 966 | return R; |
| 967 | } |
| 968 | |
| 969 | |
| 970 | void ashrInPlace(unsigned ShiftAmt) { |
| 971 | assert(ShiftAmt <= BitWidth && "Invalid shift amount"); |
| 972 | if (isSingleWord()) { |
| 973 | int64_t SExtVAL = SignExtend64(U.VAL, BitWidth); |
| 974 | if (ShiftAmt == BitWidth) |
| 975 | U.VAL = SExtVAL >> (APINT_BITS_PER_WORD - 1); |
| 976 | else |
| 977 | U.VAL = SExtVAL >> ShiftAmt; |
| 978 | clearUnusedBits(); |
| 979 | return; |
| 980 | } |
| 981 | ashrSlowCase(ShiftAmt); |
| 982 | } |
| 983 | |
| 984 | |
| 985 | |
| 986 | |
| 987 | APInt lshr(unsigned shiftAmt) const { |
| 988 | APInt R(*this); |
| 989 | R.lshrInPlace(shiftAmt); |
| 990 | return R; |
| 991 | } |
| 992 | |
| 993 | |
| 994 | void lshrInPlace(unsigned ShiftAmt) { |
| 995 | assert(ShiftAmt <= BitWidth && "Invalid shift amount"); |
| 996 | if (isSingleWord()) { |
| 997 | if (ShiftAmt == BitWidth) |
| 998 | U.VAL = 0; |
| 999 | else |
| 1000 | U.VAL >>= ShiftAmt; |
| 1001 | return; |
| 1002 | } |
| 1003 | lshrSlowCase(ShiftAmt); |
| 1004 | } |
| 1005 | |
| 1006 | |
| 1007 | |
| 1008 | |
| 1009 | APInt shl(unsigned shiftAmt) const { |
| 1010 | APInt R(*this); |
| 1011 | R <<= shiftAmt; |
| 1012 | return R; |
| 1013 | } |
| 1014 | |
| 1015 | |
| 1016 | APInt rotl(unsigned rotateAmt) const; |
| 1017 | |
| 1018 | |
| 1019 | APInt rotr(unsigned rotateAmt) const; |
| 1020 | |
| 1021 | |
| 1022 | |
| 1023 | |
| 1024 | APInt ashr(const APInt &ShiftAmt) const { |
| 1025 | APInt R(*this); |
| 1026 | R.ashrInPlace(ShiftAmt); |
| 1027 | return R; |
| 1028 | } |
| 1029 | |
| 1030 | |
| 1031 | void ashrInPlace(const APInt &shiftAmt); |
| 1032 | |
| 1033 | |
| 1034 | |
| 1035 | |
| 1036 | APInt lshr(const APInt &ShiftAmt) const { |
| 1037 | APInt R(*this); |
| 1038 | R.lshrInPlace(ShiftAmt); |
| 1039 | return R; |
| 1040 | } |
| 1041 | |
| 1042 | |
| 1043 | void lshrInPlace(const APInt &ShiftAmt); |
| 1044 | |
| 1045 | |
| 1046 | |
| 1047 | |
| 1048 | APInt shl(const APInt &ShiftAmt) const { |
| 1049 | APInt R(*this); |
| 1050 | R <<= ShiftAmt; |
| 1051 | return R; |
| 1052 | } |
| 1053 | |
| 1054 | |
| 1055 | APInt rotl(const APInt &rotateAmt) const; |
| 1056 | |
| 1057 | |
| 1058 | APInt rotr(const APInt &rotateAmt) const; |
| 1059 | |
| 1060 | |
| 1061 | |
| 1062 | |
| 1063 | |
| 1064 | |
| 1065 | |
| 1066 | |
| 1067 | APInt udiv(const APInt &RHS) const; |
| 1068 | APInt udiv(uint64_t RHS) const; |
| 1069 | |
| 1070 | |
| 1071 | |
| 1072 | |
| 1073 | |
| 1074 | |
| 1075 | APInt sdiv(const APInt &RHS) const; |
| 1076 | APInt sdiv(int64_t RHS) const; |
| 1077 | |
| 1078 | |
| 1079 | |
| 1080 | |
| 1081 | |
| 1082 | |
| 1083 | |
| 1084 | |
| 1085 | |
| 1086 | |
| 1087 | APInt urem(const APInt &RHS) const; |
| 1088 | uint64_t urem(uint64_t RHS) const; |
| 1089 | |
| 1090 | |
| 1091 | |
| 1092 | |
| 1093 | APInt srem(const APInt &RHS) const; |
| 1094 | int64_t srem(int64_t RHS) const; |
| 1095 | |
| 1096 | |
| 1097 | |
| 1098 | |
| 1099 | |
| 1100 | |
| 1101 | |
| 1102 | |
| 1103 | static void udivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, |
| 1104 | APInt &Remainder); |
| 1105 | static void udivrem(const APInt &LHS, uint64_t RHS, APInt &Quotient, |
| 1106 | uint64_t &Remainder); |
| 1107 | |
| 1108 | static void sdivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, |
| 1109 | APInt &Remainder); |
| 1110 | static void sdivrem(const APInt &LHS, int64_t RHS, APInt &Quotient, |
| 1111 | int64_t &Remainder); |
| 1112 | |
| 1113 | |
| 1114 | APInt sadd_ov(const APInt &RHS, bool &Overflow) const; |
| 1115 | APInt uadd_ov(const APInt &RHS, bool &Overflow) const; |
| 1116 | APInt ssub_ov(const APInt &RHS, bool &Overflow) const; |
| 1117 | APInt usub_ov(const APInt &RHS, bool &Overflow) const; |
| 1118 | APInt sdiv_ov(const APInt &RHS, bool &Overflow) const; |
| 1119 | APInt smul_ov(const APInt &RHS, bool &Overflow) const; |
| 1120 | APInt umul_ov(const APInt &RHS, bool &Overflow) const; |
| 1121 | APInt sshl_ov(const APInt &Amt, bool &Overflow) const; |
| 1122 | APInt ushl_ov(const APInt &Amt, bool &Overflow) const; |
| 1123 | |
| 1124 | |
| 1125 | APInt sadd_sat(const APInt &RHS) const; |
| 1126 | APInt uadd_sat(const APInt &RHS) const; |
| 1127 | APInt ssub_sat(const APInt &RHS) const; |
| 1128 | APInt usub_sat(const APInt &RHS) const; |
| 1129 | APInt smul_sat(const APInt &RHS) const; |
| 1130 | APInt umul_sat(const APInt &RHS) const; |
| 1131 | APInt sshl_sat(const APInt &RHS) const; |
| 1132 | APInt ushl_sat(const APInt &RHS) const; |
| 1133 | |
| 1134 | |
| 1135 | |
| 1136 | |
| 1137 | bool operator[](unsigned bitPosition) const { |
| 1138 | assert(bitPosition < getBitWidth() && "Bit position out of bounds!"); |
| 1139 | return (maskBit(bitPosition) & getWord(bitPosition)) != 0; |
| 1140 | } |
| 1141 | |
| 1142 | |
| 1143 | |
| 1144 | |
| 1145 | |
| 1146 | |
| 1147 | |
| 1148 | |
| 1149 | |
| 1150 | bool operator==(const APInt &RHS) const { |
| 1151 | assert(BitWidth == RHS.BitWidth && "Comparison requires equal bit widths"); |
| 1152 | if (isSingleWord()) |
| 1153 | return U.VAL == RHS.U.VAL; |
| 1154 | return EqualSlowCase(RHS); |
| 1155 | } |
| 1156 | |
| 1157 | |
| 1158 | |
| 1159 | |
| 1160 | |
| 1161 | |
| 1162 | |
| 1163 | bool operator==(uint64_t Val) const { |
| 1164 | return (isSingleWord() || getActiveBits() <= 64) && getZExtValue() == Val; |
| 1165 | } |
| 1166 | |
| 1167 | |
| 1168 | |
| 1169 | |
| 1170 | |
| 1171 | |
| 1172 | |
| 1173 | bool eq(const APInt &RHS) const { return (*this) == RHS; } |
| 1174 | |
| 1175 | |
| 1176 | |
| 1177 | |
| 1178 | |
| 1179 | |
| 1180 | |
| 1181 | bool operator!=(const APInt &RHS) const { return !((*this) == RHS); } |
| 1182 | |
| 1183 | |
| 1184 | |
| 1185 | |
| 1186 | |
| 1187 | |
| 1188 | |
| 1189 | bool operator!=(uint64_t Val) const { return !((*this) == Val); } |
| 1190 | |
| 1191 | |
| 1192 | |
| 1193 | |
| 1194 | |
| 1195 | |
| 1196 | |
| 1197 | bool ne(const APInt &RHS) const { return !((*this) == RHS); } |
| 1198 | |
| 1199 | |
| 1200 | |
| 1201 | |
| 1202 | |
| 1203 | |
| 1204 | |
| 1205 | bool ult(const APInt &RHS) const { return compare(RHS) < 0; } |
| 1206 | |
| 1207 | |
| 1208 | |
| 1209 | |
| 1210 | |
| 1211 | |
| 1212 | |
| 1213 | bool ult(uint64_t RHS) const { |
| 1214 | |
| 1215 | return (isSingleWord() || getActiveBits() <= 64) && getZExtValue() < RHS; |
| 1216 | } |
| 1217 | |
| 1218 | |
| 1219 | |
| 1220 | |
| 1221 | |
| 1222 | |
| 1223 | |
| 1224 | bool slt(const APInt &RHS) const { return compareSigned(RHS) < 0; } |
| 1225 | |
| 1226 | |
| 1227 | |
| 1228 | |
| 1229 | |
| 1230 | |
| 1231 | |
| 1232 | bool slt(int64_t RHS) const { |
| 1233 | return (!isSingleWord() && getMinSignedBits() > 64) ? isNegative() |
| 1234 | : getSExtValue() < RHS; |
| 1235 | } |
| 1236 | |
| 1237 | |
| 1238 | |
| 1239 | |
| 1240 | |
| 1241 | |
| 1242 | |
| 1243 | bool ule(const APInt &RHS) const { return compare(RHS) <= 0; } |
| 1244 | |
| 1245 | |
| 1246 | |
| 1247 | |
| 1248 | |
| 1249 | |
| 1250 | |
| 1251 | bool ule(uint64_t RHS) const { return !ugt(RHS); } |
| 1252 | |
| 1253 | |
| 1254 | |
| 1255 | |
| 1256 | |
| 1257 | |
| 1258 | |
| 1259 | bool sle(const APInt &RHS) const { return compareSigned(RHS) <= 0; } |
| 1260 | |
| 1261 | |
| 1262 | |
| 1263 | |
| 1264 | |
| 1265 | |
| 1266 | |
| 1267 | bool sle(uint64_t RHS) const { return !sgt(RHS); } |
| 1268 | |
| 1269 | |
| 1270 | |
| 1271 | |
| 1272 | |
| 1273 | |
| 1274 | |
| 1275 | bool ugt(const APInt &RHS) const { return !ule(RHS); } |
| 1276 | |
| 1277 | |
| 1278 | |
| 1279 | |
| 1280 | |
| 1281 | |
| 1282 | |
| 1283 | bool ugt(uint64_t RHS) const { |
| 1284 | |
| 1285 | return (!isSingleWord() && getActiveBits() > 64) || getZExtValue() > RHS; |
| 1286 | } |
| 1287 | |
| 1288 | |
| 1289 | |
| 1290 | |
| 1291 | |
| 1292 | |
| 1293 | |
| 1294 | bool sgt(const APInt &RHS) const { return !sle(RHS); } |
| 1295 | |
| 1296 | |
| 1297 | |
| 1298 | |
| 1299 | |
| 1300 | |
| 1301 | |
| 1302 | bool sgt(int64_t RHS) const { |
| 1303 | return (!isSingleWord() && getMinSignedBits() > 64) ? !isNegative() |
| 1304 | : getSExtValue() > RHS; |
| 1305 | } |
| 1306 | |
| 1307 | |
| 1308 | |
| 1309 | |
| 1310 | |
| 1311 | |
| 1312 | |
| 1313 | bool uge(const APInt &RHS) const { return !ult(RHS); } |
| 1314 | |
| 1315 | |
| 1316 | |
| 1317 | |
| 1318 | |
| 1319 | |
| 1320 | |
| 1321 | bool uge(uint64_t RHS) const { return !ult(RHS); } |
| 1322 | |
| 1323 | |
| 1324 | |
| 1325 | |
| 1326 | |
| 1327 | |
| 1328 | |
| 1329 | bool sge(const APInt &RHS) const { return !slt(RHS); } |
| 1330 | |
| 1331 | |
| 1332 | |
| 1333 | |
| 1334 | |
| 1335 | |
| 1336 | |
| 1337 | bool sge(int64_t RHS) const { return !slt(RHS); } |
| 1338 | |
| 1339 | |
| 1340 | |
| 1341 | bool intersects(const APInt &RHS) const { |
| 1342 | assert(BitWidth == RHS.BitWidth && "Bit widths must be the same"); |
| 1343 | if (isSingleWord()) |
| 1344 | return (U.VAL & RHS.U.VAL) != 0; |
| 1345 | return intersectsSlowCase(RHS); |
| 1346 | } |
| 1347 | |
| 1348 | |
| 1349 | bool isSubsetOf(const APInt &RHS) const { |
| 1350 | assert(BitWidth == RHS.BitWidth && "Bit widths must be the same"); |
| 1351 | if (isSingleWord()) |
| 1352 | return (U.VAL & ~RHS.U.VAL) == 0; |
| 1353 | return isSubsetOfSlowCase(RHS); |
| 1354 | } |
| 1355 | |
| 1356 | |
| 1357 | |
| 1358 | |
| 1359 | |
| 1360 | |
| 1361 | |
| 1362 | |
| 1363 | |
| 1364 | APInt trunc(unsigned width) const; |
| 1365 | |
| 1366 | |
| 1367 | |
| 1368 | |
| 1369 | |
| 1370 | APInt truncUSat(unsigned width) const; |
| 1371 | |
| 1372 | |
| 1373 | |
| 1374 | |
| 1375 | |
| 1376 | |
| 1377 | APInt truncSSat(unsigned width) const; |
| 1378 | |
| 1379 | |
| 1380 | |
| 1381 | |
| 1382 | |
| 1383 | |
| 1384 | |
| 1385 | APInt sext(unsigned width) const; |
| 1386 | |
| 1387 | |
| 1388 | |
| 1389 | |
| 1390 | |
| 1391 | |
| 1392 | APInt zext(unsigned width) const; |
| 1393 | |
| 1394 | |
| 1395 | |
| 1396 | |
| 1397 | |
| 1398 | APInt sextOrTrunc(unsigned width) const; |
| 1399 | |
| 1400 | |
| 1401 | |
| 1402 | |
| 1403 | |
| 1404 | APInt zextOrTrunc(unsigned width) const; |
| 1405 | |
| 1406 | |
| 1407 | |
| 1408 | |
| 1409 | |
| 1410 | APInt truncOrSelf(unsigned width) const; |
| 1411 | |
| 1412 | |
| 1413 | |
| 1414 | |
| 1415 | |
| 1416 | APInt sextOrSelf(unsigned width) const; |
| 1417 | |
| 1418 | |
| 1419 | |
| 1420 | |
| 1421 | |
| 1422 | APInt zextOrSelf(unsigned width) const; |
| 1423 | |
| 1424 | |
| 1425 | |
| 1426 | |
| 1427 | |
| 1428 | |
| 1429 | void setAllBits() { |
| 1430 | if (isSingleWord()) |
| 1431 | U.VAL = WORDTYPE_MAX; |
| 1432 | else |
| 1433 | |
| 1434 | memset(U.pVal, -1, getNumWords() * APINT_WORD_SIZE); |
| 1435 | |
| 1436 | clearUnusedBits(); |
| 1437 | } |
| 1438 | |
| 1439 | |
| 1440 | |
| 1441 | |
| 1442 | void setBit(unsigned BitPosition) { |
| 1443 | assert(BitPosition < BitWidth && "BitPosition out of range"); |
| 1444 | WordType Mask = maskBit(BitPosition); |
| 1445 | if (isSingleWord()) |
| 1446 | U.VAL |= Mask; |
| 1447 | else |
| 1448 | U.pVal[whichWord(BitPosition)] |= Mask; |
| 1449 | } |
| 1450 | |
| 1451 | |
| 1452 | void setSignBit() { |
| 1453 | setBit(BitWidth - 1); |
| 1454 | } |
| 1455 | |
| 1456 | |
| 1457 | void setBitVal(unsigned BitPosition, bool BitValue) { |
| 1458 | if (BitValue) |
| 1459 | setBit(BitPosition); |
| 1460 | else |
| 1461 | clearBit(BitPosition); |
| 1462 | } |
| 1463 | |
| 1464 | |
| 1465 | |
| 1466 | |
| 1467 | |
| 1468 | void setBitsWithWrap(unsigned loBit, unsigned hiBit) { |
| 1469 | assert(hiBit <= BitWidth && "hiBit out of range"); |
| 1470 | assert(loBit <= BitWidth && "loBit out of range"); |
| 1471 | if (loBit < hiBit) { |
| 1472 | setBits(loBit, hiBit); |
| 1473 | return; |
| 1474 | } |
| 1475 | setLowBits(hiBit); |
| 1476 | setHighBits(BitWidth - loBit); |
| 1477 | } |
| 1478 | |
| 1479 | |
| 1480 | |
| 1481 | void setBits(unsigned loBit, unsigned hiBit) { |
| 1482 | assert(hiBit <= BitWidth && "hiBit out of range"); |
| 1483 | assert(loBit <= BitWidth && "loBit out of range"); |
| 1484 | assert(loBit <= hiBit && "loBit greater than hiBit"); |
| 1485 | if (loBit == hiBit) |
| 1486 | return; |
| 1487 | if (loBit < APINT_BITS_PER_WORD && hiBit <= APINT_BITS_PER_WORD) { |
| 1488 | uint64_t mask = WORDTYPE_MAX >> (APINT_BITS_PER_WORD - (hiBit - loBit)); |
| 1489 | mask <<= loBit; |
| 1490 | if (isSingleWord()) |
| 1491 | U.VAL |= mask; |
| 1492 | else |
| 1493 | U.pVal[0] |= mask; |
| 1494 | } else { |
| 1495 | setBitsSlowCase(loBit, hiBit); |
| 1496 | } |
| 1497 | } |
| 1498 | |
| 1499 | |
| 1500 | void setBitsFrom(unsigned loBit) { |
| 1501 | return setBits(loBit, BitWidth); |
| 1502 | } |
| 1503 | |
| 1504 | |
| 1505 | void setLowBits(unsigned loBits) { |
| 1506 | return setBits(0, loBits); |
| 1507 | } |
| 1508 | |
| 1509 | |
| 1510 | void setHighBits(unsigned hiBits) { |
| 1511 | return setBits(BitWidth - hiBits, BitWidth); |
| 1512 | } |
| 1513 | |
| 1514 | |
| 1515 | void clearAllBits() { |
| 1516 | if (isSingleWord()) |
| 1517 | U.VAL = 0; |
| 1518 | else |
| 1519 | memset(U.pVal, 0, getNumWords() * APINT_WORD_SIZE); |
| 1520 | } |
| 1521 | |
| 1522 | |
| 1523 | |
| 1524 | |
| 1525 | void clearBit(unsigned BitPosition) { |
| 1526 | assert(BitPosition < BitWidth && "BitPosition out of range"); |
| 1527 | WordType Mask = ~maskBit(BitPosition); |
| 1528 | if (isSingleWord()) |
| 1529 | U.VAL &= Mask; |
| 1530 | else |
| 1531 | U.pVal[whichWord(BitPosition)] &= Mask; |
| 1532 | } |
| 1533 | |
| 1534 | |
| 1535 | void clearLowBits(unsigned loBits) { |
| 1536 | assert(loBits <= BitWidth && "More bits than bitwidth"); |
| 1537 | APInt Keep = getHighBitsSet(BitWidth, BitWidth - loBits); |
| 1538 | *this &= Keep; |
| 1539 | } |
| 1540 | |
| 1541 | |
| 1542 | void clearSignBit() { |
| 1543 | clearBit(BitWidth - 1); |
| 1544 | } |
| 1545 | |
| 1546 | |
| 1547 | void flipAllBits() { |
| 1548 | if (isSingleWord()) { |
| 1549 | U.VAL ^= WORDTYPE_MAX; |
| 1550 | clearUnusedBits(); |
| 1551 | } else { |
| 1552 | flipAllBitsSlowCase(); |
| 1553 | } |
| 1554 | } |
| 1555 | |
| 1556 | |
| 1557 | |
| 1558 | |
| 1559 | |
| 1560 | void flipBit(unsigned bitPosition); |
| 1561 | |
| 1562 | |
| 1563 | void negate() { |
| 1564 | flipAllBits(); |
| 1565 | ++(*this); |
| 1566 | } |
| 1567 | |
| 1568 | |
| 1569 | void insertBits(const APInt &SubBits, unsigned bitPosition); |
| 1570 | void insertBits(uint64_t SubBits, unsigned bitPosition, unsigned numBits); |
| 1571 | |
| 1572 | |
| 1573 | APInt extractBits(unsigned numBits, unsigned bitPosition) const; |
| 1574 | uint64_t extractBitsAsZExtValue(unsigned numBits, unsigned bitPosition) const; |
| 1575 | |
| 1576 | |
| 1577 | |
| 1578 | |
| 1579 | |
| 1580 | |
| 1581 | unsigned getBitWidth() const { return BitWidth; } |
| 1582 | |
| 1583 | |
| 1584 | |
| 1585 | |
| 1586 | |
| 1587 | |
| 1588 | unsigned getNumWords() const { return getNumWords(BitWidth); } |
| 1589 | |
| 1590 | |
| 1591 | |
| 1592 | |
| 1593 | |
| 1594 | |
| 1595 | |
| 1596 | static unsigned getNumWords(unsigned BitWidth) { |
| 1597 | return ((uint64_t)BitWidth + APINT_BITS_PER_WORD - 1) / APINT_BITS_PER_WORD; |
| 1598 | } |
| 1599 | |
| 1600 | |
| 1601 | |
| 1602 | |
| 1603 | |
| 1604 | |
| 1605 | unsigned getActiveBits() const { return BitWidth - countLeadingZeros(); } |
| 1606 | |
| 1607 | |
| 1608 | |
| 1609 | |
| 1610 | |
| 1611 | unsigned getActiveWords() const { |
| 1612 | unsigned numActiveBits = getActiveBits(); |
| 1613 | return numActiveBits ? whichWord(numActiveBits - 1) + 1 : 1; |
| 1614 | } |
| 1615 | |
| 1616 | |
| 1617 | |
| 1618 | |
| 1619 | |
| 1620 | |
| 1621 | |
| 1622 | |
| 1623 | |
| 1624 | unsigned getMinSignedBits() const { return BitWidth - getNumSignBits() + 1; } |
| 1625 | |
| 1626 | |
| 1627 | |
| 1628 | |
| 1629 | |
| 1630 | |
| 1631 | uint64_t getZExtValue() const { |
| 1632 | if (isSingleWord()) |
| 1633 | return U.VAL; |
| 1634 | assert(getActiveBits() <= 64 && "Too many bits for uint64_t"); |
| 1635 | return U.pVal[0]; |
| 1636 | } |
| 1637 | |
| 1638 | |
| 1639 | |
| 1640 | |
| 1641 | |
| 1642 | |
| 1643 | int64_t getSExtValue() const { |
| 1644 | if (isSingleWord()) |
| 1645 | return SignExtend64(U.VAL, BitWidth); |
| 1646 | assert(getMinSignedBits() <= 64 && "Too many bits for int64_t"); |
| 1647 | return int64_t(U.pVal[0]); |
| 1648 | } |
| 1649 | |
| 1650 | |
| 1651 | |
| 1652 | |
| 1653 | |
| 1654 | static unsigned getBitsNeeded(StringRef str, uint8_t radix); |
| 1655 | |
| 1656 | |
| 1657 | |
| 1658 | |
| 1659 | |
| 1660 | |
| 1661 | |
| 1662 | |
| 1663 | |
| 1664 | unsigned countLeadingZeros() const { |
| 1665 | if (isSingleWord()) { |
| 1666 | unsigned unusedBits = APINT_BITS_PER_WORD - BitWidth; |
| 1667 | return llvm::countLeadingZeros(U.VAL) - unusedBits; |
| 1668 | } |
| 1669 | return countLeadingZerosSlowCase(); |
| 1670 | } |
| 1671 | |
| 1672 | |
| 1673 | |
| 1674 | |
| 1675 | |
| 1676 | |
| 1677 | |
| 1678 | |
| 1679 | |
| 1680 | unsigned countLeadingOnes() const { |
| 1681 | if (isSingleWord()) |
| 1682 | return llvm::countLeadingOnes(U.VAL << (APINT_BITS_PER_WORD - BitWidth)); |
| 1683 | return countLeadingOnesSlowCase(); |
| 1684 | } |
| 1685 | |
| 1686 | |
| 1687 | |
| 1688 | unsigned getNumSignBits() const { |
| 1689 | return isNegative() ? countLeadingOnes() : countLeadingZeros(); |
| 1690 | } |
| 1691 | |
| 1692 | |
| 1693 | |
| 1694 | |
| 1695 | |
| 1696 | |
| 1697 | |
| 1698 | |
| 1699 | |
| 1700 | unsigned countTrailingZeros() const { |
| 1701 | if (isSingleWord()) { |
| 1702 | unsigned TrailingZeros = llvm::countTrailingZeros(U.VAL); |
| 1703 | return (TrailingZeros > BitWidth ? BitWidth : TrailingZeros); |
| 1704 | } |
| 1705 | return countTrailingZerosSlowCase(); |
| 1706 | } |
| 1707 | |
| 1708 | |
| 1709 | |
| 1710 | |
| 1711 | |
| 1712 | |
| 1713 | |
| 1714 | |
| 1715 | |
| 1716 | unsigned countTrailingOnes() const { |
| 1717 | if (isSingleWord()) |
| 1718 | return llvm::countTrailingOnes(U.VAL); |
| 1719 | return countTrailingOnesSlowCase(); |
| 1720 | } |
| 1721 | |
| 1722 | |
| 1723 | |
| 1724 | |
| 1725 | |
| 1726 | |
| 1727 | |
| 1728 | unsigned countPopulation() const { |
| 1729 | if (isSingleWord()) |
| 1730 | return llvm::countPopulation(U.VAL); |
| 1731 | return countPopulationSlowCase(); |
| 1732 | } |
| 1733 | |
| 1734 | |
| 1735 | |
| 1736 | |
| 1737 | void print(raw_ostream &OS, bool isSigned) const; |
| 1738 | |
| 1739 | |
| 1740 | |
| 1741 | void toString(SmallVectorImpl<char> &Str, unsigned Radix, bool Signed, |
| 1742 | bool formatAsCLiteral = false) const; |
| 1743 | |
| 1744 | |
| 1745 | |
| 1746 | void toStringUnsigned(SmallVectorImpl<char> &Str, unsigned Radix = 10) const { |
| 1747 | toString(Str, Radix, false, false); |
| 1748 | } |
| 1749 | |
| 1750 | |
| 1751 | |
| 1752 | void toStringSigned(SmallVectorImpl<char> &Str, unsigned Radix = 10) const { |
| 1753 | toString(Str, Radix, true, false); |
| 1754 | } |
| 1755 | |
| 1756 | |
| 1757 | APInt byteSwap() const; |
| 1758 | |
| 1759 | |
| 1760 | |
| 1761 | APInt reverseBits() const; |
| 1762 | |
| 1763 | |
| 1764 | double roundToDouble(bool isSigned) const; |
| 1765 | |
| 1766 | |
| 1767 | double roundToDouble() const { return roundToDouble(false); } |
| 1768 | |
| 1769 | |
| 1770 | double signedRoundToDouble() const { return roundToDouble(true); } |
| 1771 | |
| 1772 | |
| 1773 | |
| 1774 | |
| 1775 | |
| 1776 | |
| 1777 | double bitsToDouble() const { |
| 1778 | return BitsToDouble(getWord(0)); |
| 1779 | } |
| 1780 | |
| 1781 | |
| 1782 | |
| 1783 | |
| 1784 | |
| 1785 | |
| 1786 | float bitsToFloat() const { |
| 1787 | return BitsToFloat(static_cast<uint32_t>(getWord(0))); |
| 1788 | } |
| 1789 | |
| 1790 | |
| 1791 | |
| 1792 | |
| 1793 | |
| 1794 | static APInt doubleToBits(double V) { |
| 1795 | return APInt(sizeof(double) * CHAR_BIT, DoubleToBits(V)); |
| 1796 | } |
| 1797 | |
| 1798 | |
| 1799 | |
| 1800 | |
| 1801 | |
| 1802 | static APInt floatToBits(float V) { |
| 1803 | return APInt(sizeof(float) * CHAR_BIT, FloatToBits(V)); |
| 1804 | } |
| 1805 | |
| 1806 | |
| 1807 | |
| 1808 | |
| 1809 | |
| 1810 | |
| 1811 | unsigned logBase2() const { return getActiveBits() - 1; } |
| 1812 | |
| 1813 | |
| 1814 | unsigned ceilLogBase2() const { |
| 1815 | APInt temp(*this); |
| 1816 | --temp; |
| 1817 | return temp.getActiveBits(); |
| 1818 | } |
| 1819 | |
| 1820 | |
| 1821 | |
| 1822 | |
| 1823 | |
| 1824 | |
| 1825 | |
| 1826 | |
| 1827 | |
| 1828 | |
| 1829 | unsigned nearestLogBase2() const { |
| 1830 | |
| 1831 | |
| 1832 | |
| 1833 | if (BitWidth == 1) |
| 1834 | return U.VAL - 1; |
| 1835 | |
| 1836 | |
| 1837 | if (isNullValue()) |
| 1838 | return UINT32_MAX; |
| 1839 | |
| 1840 | |
| 1841 | |
| 1842 | |
| 1843 | |
| 1844 | |
| 1845 | unsigned lg = logBase2(); |
| 1846 | return lg + unsigned((*this)[lg - 1]); |
| 1847 | } |
| 1848 | |
| 1849 | |
| 1850 | |
| 1851 | int32_t exactLogBase2() const { |
| 1852 | if (!isPowerOf2()) |
| 1853 | return -1; |
| 1854 | return logBase2(); |
| 1855 | } |
| 1856 | |
| 1857 | |
| 1858 | APInt sqrt() const; |
| 1859 | |
| 1860 | |
| 1861 | |
| 1862 | |
| 1863 | APInt abs() const { |
| 1864 | if (isNegative()) |
| 1865 | return -(*this); |
| 1866 | return *this; |
| 1867 | } |
| 1868 | |
| 1869 | |
| 1870 | APInt multiplicativeInverse(const APInt &modulo) const; |
| 1871 | |
| 1872 | |
| 1873 | |
| 1874 | |
| 1875 | |
| 1876 | |
| 1877 | struct ms; |
| 1878 | ms magic() const; |
| 1879 | |
| 1880 | |
| 1881 | struct mu; |
| 1882 | mu magicu(unsigned LeadingZeros = 0) const; |
| 1883 | |
| 1884 | |
| 1885 | |
| 1886 | |
| 1887 | |
| 1888 | |
| 1889 | |
| 1890 | |
| 1891 | |
| 1892 | |
| 1893 | |
| 1894 | |
| 1895 | |
| 1896 | static void tcSet(WordType *, WordType, unsigned); |
| 1897 | |
| 1898 | |
| 1899 | static void tcAssign(WordType *, const WordType *, unsigned); |
| 1900 | |
| 1901 | |
| 1902 | static bool tcIsZero(const WordType *, unsigned); |
| 1903 | |
| 1904 | |
| 1905 | static int tcExtractBit(const WordType *, unsigned bit); |
| 1906 | |
| 1907 | |
| 1908 | |
| 1909 | |
| 1910 | |
| 1911 | static void tcExtract(WordType *, unsigned dstCount, |
| 1912 | const WordType *, unsigned srcBits, |
| 1913 | unsigned srcLSB); |
| 1914 | |
| 1915 | |
| 1916 | static void tcSetBit(WordType *, unsigned bit); |
| 1917 | |
| 1918 | |
| 1919 | static void tcClearBit(WordType *, unsigned bit); |
| 1920 | |
| 1921 | |
| 1922 | |
| 1923 | static unsigned tcLSB(const WordType *, unsigned n); |
| 1924 | static unsigned tcMSB(const WordType *parts, unsigned n); |
| 1925 | |
| 1926 | |
| 1927 | static void tcNegate(WordType *, unsigned); |
| 1928 | |
| 1929 | |
| 1930 | static WordType tcAdd(WordType *, const WordType *, |
| 1931 | WordType carry, unsigned); |
| 1932 | |
| 1933 | static WordType tcAddPart(WordType *, WordType, unsigned); |
| 1934 | |
| 1935 | |
| 1936 | static WordType tcSubtract(WordType *, const WordType *, |
| 1937 | WordType carry, unsigned); |
| 1938 | |
| 1939 | static WordType tcSubtractPart(WordType *, WordType, unsigned); |
| 1940 | |
| 1941 | |
| 1942 | |
| 1943 | |
| 1944 | |
| 1945 | |
| 1946 | |
| 1947 | |
| 1948 | |
| 1949 | |
| 1950 | |
| 1951 | static int tcMultiplyPart(WordType *dst, const WordType *src, |
| 1952 | WordType multiplier, WordType carry, |
| 1953 | unsigned srcParts, unsigned dstParts, |
| 1954 | bool add); |
| 1955 | |
| 1956 | |
| 1957 | |
| 1958 | |
| 1959 | |
| 1960 | static int tcMultiply(WordType *, const WordType *, const WordType *, |
| 1961 | unsigned); |
| 1962 | |
| 1963 | |
| 1964 | |
| 1965 | static void tcFullMultiply(WordType *, const WordType *, |
| 1966 | const WordType *, unsigned, unsigned); |
| 1967 | |
| 1968 | |
| 1969 | |
| 1970 | |
| 1971 | |
| 1972 | |
| 1973 | |
| 1974 | |
| 1975 | |
| 1976 | |
| 1977 | static int tcDivide(WordType *lhs, const WordType *rhs, |
| 1978 | WordType *remainder, WordType *scratch, |
| 1979 | unsigned parts); |
| 1980 | |
| 1981 | |
| 1982 | |
| 1983 | static void tcShiftLeft(WordType *, unsigned Words, unsigned Count); |
| 1984 | |
| 1985 | |
| 1986 | |
| 1987 | static void tcShiftRight(WordType *, unsigned Words, unsigned Count); |
| 1988 | |
| 1989 | |
| 1990 | static void tcAnd(WordType *, const WordType *, unsigned); |
| 1991 | static void tcOr(WordType *, const WordType *, unsigned); |
| 1992 | static void tcXor(WordType *, const WordType *, unsigned); |
| 1993 | static void tcComplement(WordType *, unsigned); |
| 1994 | |
| 1995 | |
| 1996 | static int tcCompare(const WordType *, const WordType *, unsigned); |
| 1997 | |
| 1998 | |
| 1999 | static WordType tcIncrement(WordType *dst, unsigned parts) { |
| 2000 | return tcAddPart(dst, 1, parts); |
| 2001 | } |
| 2002 | |
| 2003 | |
| 2004 | static WordType tcDecrement(WordType *dst, unsigned parts) { |
| 2005 | return tcSubtractPart(dst, 1, parts); |
| 2006 | } |
| 2007 | |
| 2008 | |
| 2009 | static void tcSetLeastSignificantBits(WordType *, unsigned, unsigned bits); |
| 2010 | |
| 2011 | |
| 2012 | void dump() const; |
| 2013 | |
| 2014 | |
| 2015 | }; |
| 2016 | |
| 2017 | |
| 2018 | struct APInt::ms { |
| 2019 | APInt m; |
| 2020 | unsigned s; |
| 2021 | }; |
| 2022 | |
| 2023 | |
| 2024 | struct APInt::mu { |
| 2025 | APInt m; |
| 2026 | bool a; |
| 2027 | unsigned s; |
| 2028 | }; |
| 2029 | |
| 2030 | inline bool operator==(uint64_t V1, const APInt &V2) { return V2 == V1; } |
| 2031 | |
| 2032 | inline bool operator!=(uint64_t V1, const APInt &V2) { return V2 != V1; } |
| 2033 | |
| 2034 | |
| 2035 | |
| 2036 | |
| 2037 | inline APInt operator~(APInt v) { |
| 2038 | v.flipAllBits(); |
| 2039 | return v; |
| 2040 | } |
| 2041 | |
| 2042 | inline APInt operator&(APInt a, const APInt &b) { |
| 2043 | a &= b; |
| 2044 | return a; |
| 2045 | } |
| 2046 | |
| 2047 | inline APInt operator&(const APInt &a, APInt &&b) { |
| 2048 | b &= a; |
| 2049 | return std::move(b); |
| 2050 | } |
| 2051 | |
| 2052 | inline APInt operator&(APInt a, uint64_t RHS) { |
| 2053 | a &= RHS; |
| 2054 | return a; |
| 2055 | } |
| 2056 | |
| 2057 | inline APInt operator&(uint64_t LHS, APInt b) { |
| 2058 | b &= LHS; |
| 2059 | return b; |
| 2060 | } |
| 2061 | |
| 2062 | inline APInt operator|(APInt a, const APInt &b) { |
| 2063 | a |= b; |
| 2064 | return a; |
| 2065 | } |
| 2066 | |
| 2067 | inline APInt operator|(const APInt &a, APInt &&b) { |
| 2068 | b |= a; |
| 2069 | return std::move(b); |
| 2070 | } |
| 2071 | |
| 2072 | inline APInt operator|(APInt a, uint64_t RHS) { |
| 2073 | a |= RHS; |
| 2074 | return a; |
| 2075 | } |
| 2076 | |
| 2077 | inline APInt operator|(uint64_t LHS, APInt b) { |
| 2078 | b |= LHS; |
| 2079 | return b; |
| 2080 | } |
| 2081 | |
| 2082 | inline APInt operator^(APInt a, const APInt &b) { |
| 2083 | a ^= b; |
| 2084 | return a; |
| 2085 | } |
| 2086 | |
| 2087 | inline APInt operator^(const APInt &a, APInt &&b) { |
| 2088 | b ^= a; |
| 2089 | return std::move(b); |
| 2090 | } |
| 2091 | |
| 2092 | inline APInt operator^(APInt a, uint64_t RHS) { |
| 2093 | a ^= RHS; |
| 2094 | return a; |
| 2095 | } |
| 2096 | |
| 2097 | inline APInt operator^(uint64_t LHS, APInt b) { |
| 2098 | b ^= LHS; |
| 2099 | return b; |
| 2100 | } |
| 2101 | |
| 2102 | inline raw_ostream &operator<<(raw_ostream &OS, const APInt &I) { |
| 2103 | I.print(OS, true); |
| 2104 | return OS; |
| 2105 | } |
| 2106 | |
| 2107 | inline APInt operator-(APInt v) { |
| 2108 | v.negate(); |
| 2109 | return v; |
| 2110 | } |
| 2111 | |
| 2112 | inline APInt operator+(APInt a, const APInt &b) { |
| 2113 | a += b; |
| 2114 | return a; |
| 2115 | } |
| 2116 | |
| 2117 | inline APInt operator+(const APInt &a, APInt &&b) { |
| 2118 | b += a; |
| 2119 | return std::move(b); |
| 2120 | } |
| 2121 | |
| 2122 | inline APInt operator+(APInt a, uint64_t RHS) { |
| 2123 | a += RHS; |
| 2124 | return a; |
| 2125 | } |
| 2126 | |
| 2127 | inline APInt operator+(uint64_t LHS, APInt b) { |
| 2128 | b += LHS; |
| 2129 | return b; |
| 2130 | } |
| 2131 | |
| 2132 | inline APInt operator-(APInt a, const APInt &b) { |
| 2133 | a -= b; |
| 2134 | return a; |
| 2135 | } |
| 2136 | |
| 2137 | inline APInt operator-(const APInt &a, APInt &&b) { |
| 2138 | b.negate(); |
| 2139 | b += a; |
| 2140 | return std::move(b); |
| 2141 | } |
| 2142 | |
| 2143 | inline APInt operator-(APInt a, uint64_t RHS) { |
| 2144 | a -= RHS; |
| 2145 | return a; |
| 2146 | } |
| 2147 | |
| 2148 | inline APInt operator-(uint64_t LHS, APInt b) { |
| 2149 | b.negate(); |
| 2150 | b += LHS; |
| 2151 | return b; |
| 2152 | } |
| 2153 | |
| 2154 | inline APInt operator*(APInt a, uint64_t RHS) { |
| 2155 | a *= RHS; |
| 2156 | return a; |
| 2157 | } |
| 2158 | |
| 2159 | inline APInt operator*(uint64_t LHS, APInt b) { |
| 2160 | b *= LHS; |
| 2161 | return b; |
| 2162 | } |
| 2163 | |
| 2164 | |
| 2165 | namespace APIntOps { |
| 2166 | |
| 2167 | |
| 2168 | inline const APInt &smin(const APInt &A, const APInt &B) { |
| 2169 | return A.slt(B) ? A : B; |
| 2170 | } |
| 2171 | |
| 2172 | |
| 2173 | inline const APInt &smax(const APInt &A, const APInt &B) { |
| 2174 | return A.sgt(B) ? A : B; |
| 2175 | } |
| 2176 | |
| 2177 | |
| 2178 | inline const APInt &umin(const APInt &A, const APInt &B) { |
| 2179 | return A.ult(B) ? A : B; |
| 2180 | } |
| 2181 | |
| 2182 | |
| 2183 | inline const APInt &umax(const APInt &A, const APInt &B) { |
| 2184 | return A.ugt(B) ? A : B; |
| 2185 | } |
| 2186 | |
| 2187 | |
| 2188 | |
| 2189 | |
| 2190 | |
| 2191 | |
| 2192 | |
| 2193 | APInt GreatestCommonDivisor(APInt A, APInt B); |
| 2194 | |
| 2195 | |
| 2196 | |
| 2197 | |
| 2198 | inline double RoundAPIntToDouble(const APInt &APIVal) { |
| 2199 | return APIVal.roundToDouble(); |
| 2200 | } |
| 2201 | |
| 2202 | |
| 2203 | |
| 2204 | |
| 2205 | inline double RoundSignedAPIntToDouble(const APInt &APIVal) { |
| 2206 | return APIVal.signedRoundToDouble(); |
| 2207 | } |
| 2208 | |
| 2209 | |
| 2210 | inline float RoundAPIntToFloat(const APInt &APIVal) { |
| 2211 | return float(RoundAPIntToDouble(APIVal)); |
| 2212 | } |
| 2213 | |
| 2214 | |
| 2215 | |
| 2216 | |
| 2217 | inline float RoundSignedAPIntToFloat(const APInt &APIVal) { |
| 2218 | return float(APIVal.signedRoundToDouble()); |
| 2219 | } |
| 2220 | |
| 2221 | |
| 2222 | |
| 2223 | |
| 2224 | APInt RoundDoubleToAPInt(double Double, unsigned width); |
| 2225 | |
| 2226 | |
| 2227 | |
| 2228 | |
| 2229 | inline APInt RoundFloatToAPInt(float Float, unsigned width) { |
| 2230 | return RoundDoubleToAPInt(double(Float), width); |
| 2231 | } |
| 2232 | |
| 2233 | |
| 2234 | APInt RoundingUDiv(const APInt &A, const APInt &B, APInt::Rounding RM); |
| 2235 | |
| 2236 | |
| 2237 | APInt RoundingSDiv(const APInt &A, const APInt &B, APInt::Rounding RM); |
| 2238 | |
| 2239 | |
| 2240 | |
| 2241 | |
| 2242 | |
| 2243 | |
| 2244 | |
| 2245 | |
| 2246 | |
| 2247 | |
| 2248 | |
| 2249 | |
| 2250 | |
| 2251 | |
| 2252 | |
| 2253 | |
| 2254 | |
| 2255 | |
| 2256 | |
| 2257 | |
| 2258 | |
| 2259 | |
| 2260 | |
| 2261 | |
| 2262 | |
| 2263 | |
| 2264 | |
| 2265 | |
| 2266 | |
| 2267 | |
| 2268 | |
| 2269 | |
| 2270 | |
| 2271 | |
| 2272 | Optional<APInt> SolveQuadraticEquationWrap(APInt A, APInt B, APInt C, |
| 2273 | unsigned RangeWidth); |
| 2274 | |
| 2275 | |
| 2276 | |
| 2277 | Optional<unsigned> GetMostSignificantDifferentBit(const APInt &A, |
| 2278 | const APInt &B); |
| 2279 | |
| 2280 | } |
| 2281 | |
| 2282 | |
| 2283 | |
| 2284 | hash_code hash_value(const APInt &Arg); |
| 2285 | |
| 2286 | |
| 2287 | |
| 2288 | void StoreIntToMemory(const APInt &IntVal, uint8_t *Dst, unsigned StoreBytes); |
| 2289 | |
| 2290 | |
| 2291 | |
| 2292 | void LoadIntFromMemory(APInt &IntVal, const uint8_t *Src, unsigned LoadBytes); |
| 2293 | |
| 2294 | |
| 2295 | template <> struct DenseMapInfo<APInt> { |
| 2296 | static inline APInt getEmptyKey() { |
| 2297 | APInt V(nullptr, 0); |
| 2298 | V.U.VAL = 0; |
| 2299 | return V; |
| 2300 | } |
| 2301 | |
| 2302 | static inline APInt getTombstoneKey() { |
| 2303 | APInt V(nullptr, 0); |
| 2304 | V.U.VAL = 1; |
| 2305 | return V; |
| 2306 | } |
| 2307 | |
| 2308 | static unsigned getHashValue(const APInt &Key); |
| 2309 | |
| 2310 | static bool isEqual(const APInt &LHS, const APInt &RHS) { |
| 2311 | return LHS.getBitWidth() == RHS.getBitWidth() && LHS == RHS; |
| 2312 | } |
| 2313 | }; |
| 2314 | |
| 2315 | } |
| 2316 | |
| 2317 | #endif |