Bug Summary

File:src/gnu/usr.bin/clang/libclangCodeGen/../../../llvm/llvm/include/llvm/IR/Instructions.h
Warning:line 1259, column 33
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name CGExprScalar.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/gnu/usr.bin/clang/libclangCodeGen/obj -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/gnu/usr.bin/clang/libclangCodeGen/../../../llvm/clang/include -I /usr/src/gnu/usr.bin/clang/libclangCodeGen/../../../llvm/llvm/include -I /usr/src/gnu/usr.bin/clang/libclangCodeGen/../include -I /usr/src/gnu/usr.bin/clang/libclangCodeGen/obj -I /usr/src/gnu/usr.bin/clang/libclangCodeGen/obj/../include -D NDEBUG -D __STDC_LIMIT_MACROS -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D LLVM_PREFIX="/usr" -internal-isystem /usr/include/c++/v1 -internal-isystem /usr/local/lib/clang/13.0.0/include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/usr/src/gnu/usr.bin/clang/libclangCodeGen/obj -ferror-limit 19 -fvisibility-inlines-hidden -fwrapv -stack-protector 2 -fno-rtti -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /home/ben/Projects/vmm/scan-build/2022-01-12-194120-40624-1 -x c++ /usr/src/gnu/usr.bin/clang/libclangCodeGen/../../../llvm/clang/lib/CodeGen/CGExprScalar.cpp

/usr/src/gnu/usr.bin/clang/libclangCodeGen/../../../llvm/clang/lib/CodeGen/CGExprScalar.cpp

1//===--- CGExprScalar.cpp - Emit LLVM Code for Scalar Exprs ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Expr nodes with scalar LLVM types as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGCXXABI.h"
14#include "CGCleanup.h"
15#include "CGDebugInfo.h"
16#include "CGObjCRuntime.h"
17#include "CGOpenMPRuntime.h"
18#include "CodeGenFunction.h"
19#include "CodeGenModule.h"
20#include "ConstantEmitter.h"
21#include "TargetInfo.h"
22#include "clang/AST/ASTContext.h"
23#include "clang/AST/Attr.h"
24#include "clang/AST/DeclObjC.h"
25#include "clang/AST/Expr.h"
26#include "clang/AST/RecordLayout.h"
27#include "clang/AST/StmtVisitor.h"
28#include "clang/Basic/CodeGenOptions.h"
29#include "clang/Basic/TargetInfo.h"
30#include "llvm/ADT/APFixedPoint.h"
31#include "llvm/ADT/Optional.h"
32#include "llvm/IR/CFG.h"
33#include "llvm/IR/Constants.h"
34#include "llvm/IR/DataLayout.h"
35#include "llvm/IR/FixedPointBuilder.h"
36#include "llvm/IR/Function.h"
37#include "llvm/IR/GetElementPtrTypeIterator.h"
38#include "llvm/IR/GlobalVariable.h"
39#include "llvm/IR/Intrinsics.h"
40#include "llvm/IR/IntrinsicsPowerPC.h"
41#include "llvm/IR/MatrixBuilder.h"
42#include "llvm/IR/Module.h"
43#include <cstdarg>
44
45using namespace clang;
46using namespace CodeGen;
47using llvm::Value;
48
49//===----------------------------------------------------------------------===//
50// Scalar Expression Emitter
51//===----------------------------------------------------------------------===//
52
53namespace {
54
55/// Determine whether the given binary operation may overflow.
56/// Sets \p Result to the value of the operation for BO_Add, BO_Sub, BO_Mul,
57/// and signed BO_{Div,Rem}. For these opcodes, and for unsigned BO_{Div,Rem},
58/// the returned overflow check is precise. The returned value is 'true' for
59/// all other opcodes, to be conservative.
60bool mayHaveIntegerOverflow(llvm::ConstantInt *LHS, llvm::ConstantInt *RHS,
61 BinaryOperator::Opcode Opcode, bool Signed,
62 llvm::APInt &Result) {
63 // Assume overflow is possible, unless we can prove otherwise.
64 bool Overflow = true;
65 const auto &LHSAP = LHS->getValue();
66 const auto &RHSAP = RHS->getValue();
67 if (Opcode == BO_Add) {
68 if (Signed)
69 Result = LHSAP.sadd_ov(RHSAP, Overflow);
70 else
71 Result = LHSAP.uadd_ov(RHSAP, Overflow);
72 } else if (Opcode == BO_Sub) {
73 if (Signed)
74 Result = LHSAP.ssub_ov(RHSAP, Overflow);
75 else
76 Result = LHSAP.usub_ov(RHSAP, Overflow);
77 } else if (Opcode == BO_Mul) {
78 if (Signed)
79 Result = LHSAP.smul_ov(RHSAP, Overflow);
80 else
81 Result = LHSAP.umul_ov(RHSAP, Overflow);
82 } else if (Opcode == BO_Div || Opcode == BO_Rem) {
83 if (Signed && !RHS->isZero())
84 Result = LHSAP.sdiv_ov(RHSAP, Overflow);
85 else
86 return false;
87 }
88 return Overflow;
89}
90
91struct BinOpInfo {
92 Value *LHS;
93 Value *RHS;
94 QualType Ty; // Computation Type.
95 BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform
96 FPOptions FPFeatures;
97 const Expr *E; // Entire expr, for error unsupported. May not be binop.
98
99 /// Check if the binop can result in integer overflow.
100 bool mayHaveIntegerOverflow() const {
101 // Without constant input, we can't rule out overflow.
102 auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS);
103 auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS);
104 if (!LHSCI || !RHSCI)
105 return true;
106
107 llvm::APInt Result;
108 return ::mayHaveIntegerOverflow(
109 LHSCI, RHSCI, Opcode, Ty->hasSignedIntegerRepresentation(), Result);
110 }
111
112 /// Check if the binop computes a division or a remainder.
113 bool isDivremOp() const {
114 return Opcode == BO_Div || Opcode == BO_Rem || Opcode == BO_DivAssign ||
115 Opcode == BO_RemAssign;
116 }
117
118 /// Check if the binop can result in an integer division by zero.
119 bool mayHaveIntegerDivisionByZero() const {
120 if (isDivremOp())
121 if (auto *CI = dyn_cast<llvm::ConstantInt>(RHS))
122 return CI->isZero();
123 return true;
124 }
125
126 /// Check if the binop can result in a float division by zero.
127 bool mayHaveFloatDivisionByZero() const {
128 if (isDivremOp())
129 if (auto *CFP = dyn_cast<llvm::ConstantFP>(RHS))
130 return CFP->isZero();
131 return true;
132 }
133
134 /// Check if at least one operand is a fixed point type. In such cases, this
135 /// operation did not follow usual arithmetic conversion and both operands
136 /// might not be of the same type.
137 bool isFixedPointOp() const {
138 // We cannot simply check the result type since comparison operations return
139 // an int.
140 if (const auto *BinOp = dyn_cast<BinaryOperator>(E)) {
141 QualType LHSType = BinOp->getLHS()->getType();
142 QualType RHSType = BinOp->getRHS()->getType();
143 return LHSType->isFixedPointType() || RHSType->isFixedPointType();
144 }
145 if (const auto *UnOp = dyn_cast<UnaryOperator>(E))
146 return UnOp->getSubExpr()->getType()->isFixedPointType();
147 return false;
148 }
149};
150
151static bool MustVisitNullValue(const Expr *E) {
152 // If a null pointer expression's type is the C++0x nullptr_t, then
153 // it's not necessarily a simple constant and it must be evaluated
154 // for its potential side effects.
155 return E->getType()->isNullPtrType();
156}
157
158/// If \p E is a widened promoted integer, get its base (unpromoted) type.
159static llvm::Optional<QualType> getUnwidenedIntegerType(const ASTContext &Ctx,
160 const Expr *E) {
161 const Expr *Base = E->IgnoreImpCasts();
162 if (E == Base)
163 return llvm::None;
164
165 QualType BaseTy = Base->getType();
166 if (!BaseTy->isPromotableIntegerType() ||
167 Ctx.getTypeSize(BaseTy) >= Ctx.getTypeSize(E->getType()))
168 return llvm::None;
169
170 return BaseTy;
171}
172
173/// Check if \p E is a widened promoted integer.
174static bool IsWidenedIntegerOp(const ASTContext &Ctx, const Expr *E) {
175 return getUnwidenedIntegerType(Ctx, E).hasValue();
176}
177
178/// Check if we can skip the overflow check for \p Op.
179static bool CanElideOverflowCheck(const ASTContext &Ctx, const BinOpInfo &Op) {
180 assert((isa<UnaryOperator>(Op.E) || isa<BinaryOperator>(Op.E)) &&((void)0)
181 "Expected a unary or binary operator")((void)0);
182
183 // If the binop has constant inputs and we can prove there is no overflow,
184 // we can elide the overflow check.
185 if (!Op.mayHaveIntegerOverflow())
186 return true;
187
188 // If a unary op has a widened operand, the op cannot overflow.
189 if (const auto *UO = dyn_cast<UnaryOperator>(Op.E))
190 return !UO->canOverflow();
191
192 // We usually don't need overflow checks for binops with widened operands.
193 // Multiplication with promoted unsigned operands is a special case.
194 const auto *BO = cast<BinaryOperator>(Op.E);
195 auto OptionalLHSTy = getUnwidenedIntegerType(Ctx, BO->getLHS());
196 if (!OptionalLHSTy)
197 return false;
198
199 auto OptionalRHSTy = getUnwidenedIntegerType(Ctx, BO->getRHS());
200 if (!OptionalRHSTy)
201 return false;
202
203 QualType LHSTy = *OptionalLHSTy;
204 QualType RHSTy = *OptionalRHSTy;
205
206 // This is the simple case: binops without unsigned multiplication, and with
207 // widened operands. No overflow check is needed here.
208 if ((Op.Opcode != BO_Mul && Op.Opcode != BO_MulAssign) ||
209 !LHSTy->isUnsignedIntegerType() || !RHSTy->isUnsignedIntegerType())
210 return true;
211
212 // For unsigned multiplication the overflow check can be elided if either one
213 // of the unpromoted types are less than half the size of the promoted type.
214 unsigned PromotedSize = Ctx.getTypeSize(Op.E->getType());
215 return (2 * Ctx.getTypeSize(LHSTy)) < PromotedSize ||
216 (2 * Ctx.getTypeSize(RHSTy)) < PromotedSize;
217}
218
219class ScalarExprEmitter
220 : public StmtVisitor<ScalarExprEmitter, Value*> {
221 CodeGenFunction &CGF;
222 CGBuilderTy &Builder;
223 bool IgnoreResultAssign;
224 llvm::LLVMContext &VMContext;
225public:
226
227 ScalarExprEmitter(CodeGenFunction &cgf, bool ira=false)
228 : CGF(cgf), Builder(CGF.Builder), IgnoreResultAssign(ira),
229 VMContext(cgf.getLLVMContext()) {
230 }
231
232 //===--------------------------------------------------------------------===//
233 // Utilities
234 //===--------------------------------------------------------------------===//
235
236 bool TestAndClearIgnoreResultAssign() {
237 bool I = IgnoreResultAssign;
238 IgnoreResultAssign = false;
239 return I;
240 }
241
242 llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); }
243 LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); }
244 LValue EmitCheckedLValue(const Expr *E, CodeGenFunction::TypeCheckKind TCK) {
245 return CGF.EmitCheckedLValue(E, TCK);
246 }
247
248 void EmitBinOpCheck(ArrayRef<std::pair<Value *, SanitizerMask>> Checks,
249 const BinOpInfo &Info);
250
251 Value *EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
252 return CGF.EmitLoadOfLValue(LV, Loc).getScalarVal();
253 }
254
255 void EmitLValueAlignmentAssumption(const Expr *E, Value *V) {
256 const AlignValueAttr *AVAttr = nullptr;
257 if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
258 const ValueDecl *VD = DRE->getDecl();
259
260 if (VD->getType()->isReferenceType()) {
261 if (const auto *TTy =
262 dyn_cast<TypedefType>(VD->getType().getNonReferenceType()))
263 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
264 } else {
265 // Assumptions for function parameters are emitted at the start of the
266 // function, so there is no need to repeat that here,
267 // unless the alignment-assumption sanitizer is enabled,
268 // then we prefer the assumption over alignment attribute
269 // on IR function param.
270 if (isa<ParmVarDecl>(VD) && !CGF.SanOpts.has(SanitizerKind::Alignment))
271 return;
272
273 AVAttr = VD->getAttr<AlignValueAttr>();
274 }
275 }
276
277 if (!AVAttr)
278 if (const auto *TTy =
279 dyn_cast<TypedefType>(E->getType()))
280 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
281
282 if (!AVAttr)
283 return;
284
285 Value *AlignmentValue = CGF.EmitScalarExpr(AVAttr->getAlignment());
286 llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(AlignmentValue);
287 CGF.emitAlignmentAssumption(V, E, AVAttr->getLocation(), AlignmentCI);
288 }
289
290 /// EmitLoadOfLValue - Given an expression with complex type that represents a
291 /// value l-value, this method emits the address of the l-value, then loads
292 /// and returns the result.
293 Value *EmitLoadOfLValue(const Expr *E) {
294 Value *V = EmitLoadOfLValue(EmitCheckedLValue(E, CodeGenFunction::TCK_Load),
295 E->getExprLoc());
296
297 EmitLValueAlignmentAssumption(E, V);
298 return V;
299 }
300
301 /// EmitConversionToBool - Convert the specified expression value to a
302 /// boolean (i1) truth value. This is equivalent to "Val != 0".
303 Value *EmitConversionToBool(Value *Src, QualType DstTy);
304
305 /// Emit a check that a conversion from a floating-point type does not
306 /// overflow.
307 void EmitFloatConversionCheck(Value *OrigSrc, QualType OrigSrcType,
308 Value *Src, QualType SrcType, QualType DstType,
309 llvm::Type *DstTy, SourceLocation Loc);
310
311 /// Known implicit conversion check kinds.
312 /// Keep in sync with the enum of the same name in ubsan_handlers.h
313 enum ImplicitConversionCheckKind : unsigned char {
314 ICCK_IntegerTruncation = 0, // Legacy, was only used by clang 7.
315 ICCK_UnsignedIntegerTruncation = 1,
316 ICCK_SignedIntegerTruncation = 2,
317 ICCK_IntegerSignChange = 3,
318 ICCK_SignedIntegerTruncationOrSignChange = 4,
319 };
320
321 /// Emit a check that an [implicit] truncation of an integer does not
322 /// discard any bits. It is not UB, so we use the value after truncation.
323 void EmitIntegerTruncationCheck(Value *Src, QualType SrcType, Value *Dst,
324 QualType DstType, SourceLocation Loc);
325
326 /// Emit a check that an [implicit] conversion of an integer does not change
327 /// the sign of the value. It is not UB, so we use the value after conversion.
328 /// NOTE: Src and Dst may be the exact same value! (point to the same thing)
329 void EmitIntegerSignChangeCheck(Value *Src, QualType SrcType, Value *Dst,
330 QualType DstType, SourceLocation Loc);
331
332 /// Emit a conversion from the specified type to the specified destination
333 /// type, both of which are LLVM scalar types.
334 struct ScalarConversionOpts {
335 bool TreatBooleanAsSigned;
336 bool EmitImplicitIntegerTruncationChecks;
337 bool EmitImplicitIntegerSignChangeChecks;
338
339 ScalarConversionOpts()
340 : TreatBooleanAsSigned(false),
341 EmitImplicitIntegerTruncationChecks(false),
342 EmitImplicitIntegerSignChangeChecks(false) {}
343
344 ScalarConversionOpts(clang::SanitizerSet SanOpts)
345 : TreatBooleanAsSigned(false),
346 EmitImplicitIntegerTruncationChecks(
347 SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation)),
348 EmitImplicitIntegerSignChangeChecks(
349 SanOpts.has(SanitizerKind::ImplicitIntegerSignChange)) {}
350 };
351 Value *EmitScalarCast(Value *Src, QualType SrcType, QualType DstType,
352 llvm::Type *SrcTy, llvm::Type *DstTy,
353 ScalarConversionOpts Opts);
354 Value *
355 EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy,
356 SourceLocation Loc,
357 ScalarConversionOpts Opts = ScalarConversionOpts());
358
359 /// Convert between either a fixed point and other fixed point or fixed point
360 /// and an integer.
361 Value *EmitFixedPointConversion(Value *Src, QualType SrcTy, QualType DstTy,
362 SourceLocation Loc);
363
364 /// Emit a conversion from the specified complex type to the specified
365 /// destination type, where the destination type is an LLVM scalar type.
366 Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
367 QualType SrcTy, QualType DstTy,
368 SourceLocation Loc);
369
370 /// EmitNullValue - Emit a value that corresponds to null for the given type.
371 Value *EmitNullValue(QualType Ty);
372
373 /// EmitFloatToBoolConversion - Perform an FP to boolean conversion.
374 Value *EmitFloatToBoolConversion(Value *V) {
375 // Compare against 0.0 for fp scalars.
376 llvm::Value *Zero = llvm::Constant::getNullValue(V->getType());
377 return Builder.CreateFCmpUNE(V, Zero, "tobool");
378 }
379
380 /// EmitPointerToBoolConversion - Perform a pointer to boolean conversion.
381 Value *EmitPointerToBoolConversion(Value *V, QualType QT) {
382 Value *Zero = CGF.CGM.getNullPointer(cast<llvm::PointerType>(V->getType()), QT);
383
384 return Builder.CreateICmpNE(V, Zero, "tobool");
385 }
386
387 Value *EmitIntToBoolConversion(Value *V) {
388 // Because of the type rules of C, we often end up computing a
389 // logical value, then zero extending it to int, then wanting it
390 // as a logical value again. Optimize this common case.
391 if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(V)) {
392 if (ZI->getOperand(0)->getType() == Builder.getInt1Ty()) {
393 Value *Result = ZI->getOperand(0);
394 // If there aren't any more uses, zap the instruction to save space.
395 // Note that there can be more uses, for example if this
396 // is the result of an assignment.
397 if (ZI->use_empty())
398 ZI->eraseFromParent();
399 return Result;
400 }
401 }
402
403 return Builder.CreateIsNotNull(V, "tobool");
404 }
405
406 //===--------------------------------------------------------------------===//
407 // Visitor Methods
408 //===--------------------------------------------------------------------===//
409
410 Value *Visit(Expr *E) {
411 ApplyDebugLocation DL(CGF, E);
412 return StmtVisitor<ScalarExprEmitter, Value*>::Visit(E);
413 }
414
415 Value *VisitStmt(Stmt *S) {
416 S->dump(llvm::errs(), CGF.getContext());
417 llvm_unreachable("Stmt can't have complex result type!")__builtin_unreachable();
418 }
419 Value *VisitExpr(Expr *S);
420
421 Value *VisitConstantExpr(ConstantExpr *E) {
422 if (Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(E)) {
423 if (E->isGLValue())
424 return CGF.Builder.CreateLoad(Address(
425 Result, CGF.getContext().getTypeAlignInChars(E->getType())));
426 return Result;
427 }
428 return Visit(E->getSubExpr());
429 }
430 Value *VisitParenExpr(ParenExpr *PE) {
431 return Visit(PE->getSubExpr());
432 }
433 Value *VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
434 return Visit(E->getReplacement());
435 }
436 Value *VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
437 return Visit(GE->getResultExpr());
438 }
439 Value *VisitCoawaitExpr(CoawaitExpr *S) {
440 return CGF.EmitCoawaitExpr(*S).getScalarVal();
441 }
442 Value *VisitCoyieldExpr(CoyieldExpr *S) {
443 return CGF.EmitCoyieldExpr(*S).getScalarVal();
444 }
445 Value *VisitUnaryCoawait(const UnaryOperator *E) {
446 return Visit(E->getSubExpr());
447 }
448
449 // Leaves.
450 Value *VisitIntegerLiteral(const IntegerLiteral *E) {
451 return Builder.getInt(E->getValue());
452 }
453 Value *VisitFixedPointLiteral(const FixedPointLiteral *E) {
454 return Builder.getInt(E->getValue());
455 }
456 Value *VisitFloatingLiteral(const FloatingLiteral *E) {
457 return llvm::ConstantFP::get(VMContext, E->getValue());
458 }
459 Value *VisitCharacterLiteral(const CharacterLiteral *E) {
460 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
461 }
462 Value *VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) {
463 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
464 }
465 Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) {
466 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
467 }
468 Value *VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) {
469 return EmitNullValue(E->getType());
470 }
471 Value *VisitGNUNullExpr(const GNUNullExpr *E) {
472 return EmitNullValue(E->getType());
473 }
474 Value *VisitOffsetOfExpr(OffsetOfExpr *E);
475 Value *VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E);
476 Value *VisitAddrLabelExpr(const AddrLabelExpr *E) {
477 llvm::Value *V = CGF.GetAddrOfLabel(E->getLabel());
478 return Builder.CreateBitCast(V, ConvertType(E->getType()));
479 }
480
481 Value *VisitSizeOfPackExpr(SizeOfPackExpr *E) {
482 return llvm::ConstantInt::get(ConvertType(E->getType()),E->getPackLength());
483 }
484
485 Value *VisitPseudoObjectExpr(PseudoObjectExpr *E) {
486 return CGF.EmitPseudoObjectRValue(E).getScalarVal();
487 }
488
489 Value *VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E);
490
491 Value *VisitOpaqueValueExpr(OpaqueValueExpr *E) {
492 if (E->isGLValue())
493 return EmitLoadOfLValue(CGF.getOrCreateOpaqueLValueMapping(E),
494 E->getExprLoc());
495
496 // Otherwise, assume the mapping is the scalar directly.
497 return CGF.getOrCreateOpaqueRValueMapping(E).getScalarVal();
498 }
499
500 // l-values.
501 Value *VisitDeclRefExpr(DeclRefExpr *E) {
502 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E))
503 return CGF.emitScalarConstant(Constant, E);
504 return EmitLoadOfLValue(E);
505 }
506
507 Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) {
508 return CGF.EmitObjCSelectorExpr(E);
509 }
510 Value *VisitObjCProtocolExpr(ObjCProtocolExpr *E) {
511 return CGF.EmitObjCProtocolExpr(E);
512 }
513 Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
514 return EmitLoadOfLValue(E);
515 }
516 Value *VisitObjCMessageExpr(ObjCMessageExpr *E) {
517 if (E->getMethodDecl() &&
518 E->getMethodDecl()->getReturnType()->isReferenceType())
519 return EmitLoadOfLValue(E);
520 return CGF.EmitObjCMessageExpr(E).getScalarVal();
521 }
522
523 Value *VisitObjCIsaExpr(ObjCIsaExpr *E) {
524 LValue LV = CGF.EmitObjCIsaExpr(E);
525 Value *V = CGF.EmitLoadOfLValue(LV, E->getExprLoc()).getScalarVal();
526 return V;
527 }
528
529 Value *VisitObjCAvailabilityCheckExpr(ObjCAvailabilityCheckExpr *E) {
530 VersionTuple Version = E->getVersion();
531
532 // If we're checking for a platform older than our minimum deployment
533 // target, we can fold the check away.
534 if (Version <= CGF.CGM.getTarget().getPlatformMinVersion())
535 return llvm::ConstantInt::get(Builder.getInt1Ty(), 1);
536
537 return CGF.EmitBuiltinAvailable(Version);
538 }
539
540 Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E);
541 Value *VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E);
542 Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E);
543 Value *VisitConvertVectorExpr(ConvertVectorExpr *E);
544 Value *VisitMemberExpr(MemberExpr *E);
545 Value *VisitExtVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); }
546 Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
547 // Strictly speaking, we shouldn't be calling EmitLoadOfLValue, which
548 // transitively calls EmitCompoundLiteralLValue, here in C++ since compound
549 // literals aren't l-values in C++. We do so simply because that's the
550 // cleanest way to handle compound literals in C++.
551 // See the discussion here: https://reviews.llvm.org/D64464
552 return EmitLoadOfLValue(E);
553 }
554
555 Value *VisitInitListExpr(InitListExpr *E);
556
557 Value *VisitArrayInitIndexExpr(ArrayInitIndexExpr *E) {
558 assert(CGF.getArrayInitIndex() &&((void)0)
559 "ArrayInitIndexExpr not inside an ArrayInitLoopExpr?")((void)0);
560 return CGF.getArrayInitIndex();
561 }
562
563 Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
564 return EmitNullValue(E->getType());
565 }
566 Value *VisitExplicitCastExpr(ExplicitCastExpr *E) {
567 CGF.CGM.EmitExplicitCastExprType(E, &CGF);
568 return VisitCastExpr(E);
569 }
570 Value *VisitCastExpr(CastExpr *E);
571
572 Value *VisitCallExpr(const CallExpr *E) {
573 if (E->getCallReturnType(CGF.getContext())->isReferenceType())
574 return EmitLoadOfLValue(E);
575
576 Value *V = CGF.EmitCallExpr(E).getScalarVal();
577
578 EmitLValueAlignmentAssumption(E, V);
579 return V;
580 }
581
582 Value *VisitStmtExpr(const StmtExpr *E);
583
584 // Unary Operators.
585 Value *VisitUnaryPostDec(const UnaryOperator *E) {
586 LValue LV = EmitLValue(E->getSubExpr());
587 return EmitScalarPrePostIncDec(E, LV, false, false);
588 }
589 Value *VisitUnaryPostInc(const UnaryOperator *E) {
590 LValue LV = EmitLValue(E->getSubExpr());
591 return EmitScalarPrePostIncDec(E, LV, true, false);
592 }
593 Value *VisitUnaryPreDec(const UnaryOperator *E) {
594 LValue LV = EmitLValue(E->getSubExpr());
595 return EmitScalarPrePostIncDec(E, LV, false, true);
596 }
597 Value *VisitUnaryPreInc(const UnaryOperator *E) {
598 LValue LV = EmitLValue(E->getSubExpr());
599 return EmitScalarPrePostIncDec(E, LV, true, true);
600 }
601
602 llvm::Value *EmitIncDecConsiderOverflowBehavior(const UnaryOperator *E,
603 llvm::Value *InVal,
604 bool IsInc);
605
606 llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
607 bool isInc, bool isPre);
608
609
610 Value *VisitUnaryAddrOf(const UnaryOperator *E) {
611 if (isa<MemberPointerType>(E->getType())) // never sugared
612 return CGF.CGM.getMemberPointerConstant(E);
613
614 return EmitLValue(E->getSubExpr()).getPointer(CGF);
615 }
616 Value *VisitUnaryDeref(const UnaryOperator *E) {
617 if (E->getType()->isVoidType())
618 return Visit(E->getSubExpr()); // the actual value should be unused
619 return EmitLoadOfLValue(E);
620 }
621 Value *VisitUnaryPlus(const UnaryOperator *E) {
622 // This differs from gcc, though, most likely due to a bug in gcc.
623 TestAndClearIgnoreResultAssign();
624 return Visit(E->getSubExpr());
625 }
626 Value *VisitUnaryMinus (const UnaryOperator *E);
627 Value *VisitUnaryNot (const UnaryOperator *E);
628 Value *VisitUnaryLNot (const UnaryOperator *E);
629 Value *VisitUnaryReal (const UnaryOperator *E);
630 Value *VisitUnaryImag (const UnaryOperator *E);
631 Value *VisitUnaryExtension(const UnaryOperator *E) {
632 return Visit(E->getSubExpr());
633 }
634
635 // C++
636 Value *VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E) {
637 return EmitLoadOfLValue(E);
638 }
639 Value *VisitSourceLocExpr(SourceLocExpr *SLE) {
640 auto &Ctx = CGF.getContext();
641 APValue Evaluated =
642 SLE->EvaluateInContext(Ctx, CGF.CurSourceLocExprScope.getDefaultExpr());
643 return ConstantEmitter(CGF).emitAbstract(SLE->getLocation(), Evaluated,
644 SLE->getType());
645 }
646
647 Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
648 CodeGenFunction::CXXDefaultArgExprScope Scope(CGF, DAE);
649 return Visit(DAE->getExpr());
650 }
651 Value *VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
652 CodeGenFunction::CXXDefaultInitExprScope Scope(CGF, DIE);
653 return Visit(DIE->getExpr());
654 }
655 Value *VisitCXXThisExpr(CXXThisExpr *TE) {
656 return CGF.LoadCXXThis();
657 }
658
659 Value *VisitExprWithCleanups(ExprWithCleanups *E);
660 Value *VisitCXXNewExpr(const CXXNewExpr *E) {
661 return CGF.EmitCXXNewExpr(E);
662 }
663 Value *VisitCXXDeleteExpr(const CXXDeleteExpr *E) {
664 CGF.EmitCXXDeleteExpr(E);
665 return nullptr;
666 }
667
668 Value *VisitTypeTraitExpr(const TypeTraitExpr *E) {
669 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
670 }
671
672 Value *VisitConceptSpecializationExpr(const ConceptSpecializationExpr *E) {
673 return Builder.getInt1(E->isSatisfied());
674 }
675
676 Value *VisitRequiresExpr(const RequiresExpr *E) {
677 return Builder.getInt1(E->isSatisfied());
678 }
679
680 Value *VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E) {
681 return llvm::ConstantInt::get(Builder.getInt32Ty(), E->getValue());
682 }
683
684 Value *VisitExpressionTraitExpr(const ExpressionTraitExpr *E) {
685 return llvm::ConstantInt::get(Builder.getInt1Ty(), E->getValue());
686 }
687
688 Value *VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E) {
689 // C++ [expr.pseudo]p1:
690 // The result shall only be used as the operand for the function call
691 // operator (), and the result of such a call has type void. The only
692 // effect is the evaluation of the postfix-expression before the dot or
693 // arrow.
694 CGF.EmitScalarExpr(E->getBase());
695 return nullptr;
696 }
697
698 Value *VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) {
699 return EmitNullValue(E->getType());
700 }
701
702 Value *VisitCXXThrowExpr(const CXXThrowExpr *E) {
703 CGF.EmitCXXThrowExpr(E);
704 return nullptr;
705 }
706
707 Value *VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) {
708 return Builder.getInt1(E->getValue());
709 }
710
711 // Binary Operators.
712 Value *EmitMul(const BinOpInfo &Ops) {
713 if (Ops.Ty->isSignedIntegerOrEnumerationType()) {
714 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
715 case LangOptions::SOB_Defined:
716 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
717 case LangOptions::SOB_Undefined:
718 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
719 return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
720 LLVM_FALLTHROUGH[[gnu::fallthrough]];
721 case LangOptions::SOB_Trapping:
722 if (CanElideOverflowCheck(CGF.getContext(), Ops))
723 return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
724 return EmitOverflowCheckedBinOp(Ops);
725 }
726 }
727
728 if (Ops.Ty->isConstantMatrixType()) {
729 llvm::MatrixBuilder<CGBuilderTy> MB(Builder);
730 // We need to check the types of the operands of the operator to get the
731 // correct matrix dimensions.
732 auto *BO = cast<BinaryOperator>(Ops.E);
733 auto *LHSMatTy = dyn_cast<ConstantMatrixType>(
734 BO->getLHS()->getType().getCanonicalType());
735 auto *RHSMatTy = dyn_cast<ConstantMatrixType>(
736 BO->getRHS()->getType().getCanonicalType());
737 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
738 if (LHSMatTy && RHSMatTy)
739 return MB.CreateMatrixMultiply(Ops.LHS, Ops.RHS, LHSMatTy->getNumRows(),
740 LHSMatTy->getNumColumns(),
741 RHSMatTy->getNumColumns());
742 return MB.CreateScalarMultiply(Ops.LHS, Ops.RHS);
743 }
744
745 if (Ops.Ty->isUnsignedIntegerType() &&
746 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
747 !CanElideOverflowCheck(CGF.getContext(), Ops))
748 return EmitOverflowCheckedBinOp(Ops);
749
750 if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
751 // Preserve the old values
752 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
753 return Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul");
754 }
755 if (Ops.isFixedPointOp())
756 return EmitFixedPointBinOp(Ops);
757 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
758 }
759 /// Create a binary op that checks for overflow.
760 /// Currently only supports +, - and *.
761 Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops);
762
763 // Check for undefined division and modulus behaviors.
764 void EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo &Ops,
765 llvm::Value *Zero,bool isDiv);
766 // Common helper for getting how wide LHS of shift is.
767 static Value *GetWidthMinusOneValue(Value* LHS,Value* RHS);
768
769 // Used for shifting constraints for OpenCL, do mask for powers of 2, URem for
770 // non powers of two.
771 Value *ConstrainShiftValue(Value *LHS, Value *RHS, const Twine &Name);
772
773 Value *EmitDiv(const BinOpInfo &Ops);
774 Value *EmitRem(const BinOpInfo &Ops);
775 Value *EmitAdd(const BinOpInfo &Ops);
776 Value *EmitSub(const BinOpInfo &Ops);
777 Value *EmitShl(const BinOpInfo &Ops);
778 Value *EmitShr(const BinOpInfo &Ops);
779 Value *EmitAnd(const BinOpInfo &Ops) {
780 return Builder.CreateAnd(Ops.LHS, Ops.RHS, "and");
781 }
782 Value *EmitXor(const BinOpInfo &Ops) {
783 return Builder.CreateXor(Ops.LHS, Ops.RHS, "xor");
784 }
785 Value *EmitOr (const BinOpInfo &Ops) {
786 return Builder.CreateOr(Ops.LHS, Ops.RHS, "or");
787 }
788
789 // Helper functions for fixed point binary operations.
790 Value *EmitFixedPointBinOp(const BinOpInfo &Ops);
791
792 BinOpInfo EmitBinOps(const BinaryOperator *E);
793 LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E,
794 Value *(ScalarExprEmitter::*F)(const BinOpInfo &),
795 Value *&Result);
796
797 Value *EmitCompoundAssign(const CompoundAssignOperator *E,
798 Value *(ScalarExprEmitter::*F)(const BinOpInfo &));
799
800 // Binary operators and binary compound assignment operators.
801#define HANDLEBINOP(OP) \
802 Value *VisitBin ## OP(const BinaryOperator *E) { \
803 return Emit ## OP(EmitBinOps(E)); \
804 } \
805 Value *VisitBin ## OP ## Assign(const CompoundAssignOperator *E) { \
806 return EmitCompoundAssign(E, &ScalarExprEmitter::Emit ## OP); \
807 }
808 HANDLEBINOP(Mul)
809 HANDLEBINOP(Div)
810 HANDLEBINOP(Rem)
811 HANDLEBINOP(Add)
812 HANDLEBINOP(Sub)
813 HANDLEBINOP(Shl)
814 HANDLEBINOP(Shr)
815 HANDLEBINOP(And)
816 HANDLEBINOP(Xor)
817 HANDLEBINOP(Or)
818#undef HANDLEBINOP
819
820 // Comparisons.
821 Value *EmitCompare(const BinaryOperator *E, llvm::CmpInst::Predicate UICmpOpc,
822 llvm::CmpInst::Predicate SICmpOpc,
823 llvm::CmpInst::Predicate FCmpOpc, bool IsSignaling);
824#define VISITCOMP(CODE, UI, SI, FP, SIG) \
825 Value *VisitBin##CODE(const BinaryOperator *E) { \
826 return EmitCompare(E, llvm::ICmpInst::UI, llvm::ICmpInst::SI, \
827 llvm::FCmpInst::FP, SIG); }
828 VISITCOMP(LT, ICMP_ULT, ICMP_SLT, FCMP_OLT, true)
829 VISITCOMP(GT, ICMP_UGT, ICMP_SGT, FCMP_OGT, true)
830 VISITCOMP(LE, ICMP_ULE, ICMP_SLE, FCMP_OLE, true)
831 VISITCOMP(GE, ICMP_UGE, ICMP_SGE, FCMP_OGE, true)
832 VISITCOMP(EQ, ICMP_EQ , ICMP_EQ , FCMP_OEQ, false)
833 VISITCOMP(NE, ICMP_NE , ICMP_NE , FCMP_UNE, false)
834#undef VISITCOMP
835
836 Value *VisitBinAssign (const BinaryOperator *E);
837
838 Value *VisitBinLAnd (const BinaryOperator *E);
839 Value *VisitBinLOr (const BinaryOperator *E);
840 Value *VisitBinComma (const BinaryOperator *E);
841
842 Value *VisitBinPtrMemD(const Expr *E) { return EmitLoadOfLValue(E); }
843 Value *VisitBinPtrMemI(const Expr *E) { return EmitLoadOfLValue(E); }
844
845 Value *VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) {
846 return Visit(E->getSemanticForm());
847 }
848
849 // Other Operators.
850 Value *VisitBlockExpr(const BlockExpr *BE);
851 Value *VisitAbstractConditionalOperator(const AbstractConditionalOperator *);
852 Value *VisitChooseExpr(ChooseExpr *CE);
853 Value *VisitVAArgExpr(VAArgExpr *VE);
854 Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) {
855 return CGF.EmitObjCStringLiteral(E);
856 }
857 Value *VisitObjCBoxedExpr(ObjCBoxedExpr *E) {
858 return CGF.EmitObjCBoxedExpr(E);
859 }
860 Value *VisitObjCArrayLiteral(ObjCArrayLiteral *E) {
861 return CGF.EmitObjCArrayLiteral(E);
862 }
863 Value *VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) {
864 return CGF.EmitObjCDictionaryLiteral(E);
865 }
866 Value *VisitAsTypeExpr(AsTypeExpr *CE);
867 Value *VisitAtomicExpr(AtomicExpr *AE);
868};
869} // end anonymous namespace.
870
871//===----------------------------------------------------------------------===//
872// Utilities
873//===----------------------------------------------------------------------===//
874
875/// EmitConversionToBool - Convert the specified expression value to a
876/// boolean (i1) truth value. This is equivalent to "Val != 0".
877Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) {
878 assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs")((void)0);
879
880 if (SrcType->isRealFloatingType())
881 return EmitFloatToBoolConversion(Src);
882
883 if (const MemberPointerType *MPT = dyn_cast<MemberPointerType>(SrcType))
884 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, Src, MPT);
885
886 assert((SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) &&((void)0)
887 "Unknown scalar type to convert")((void)0);
888
889 if (isa<llvm::IntegerType>(Src->getType()))
890 return EmitIntToBoolConversion(Src);
891
892 assert(isa<llvm::PointerType>(Src->getType()))((void)0);
893 return EmitPointerToBoolConversion(Src, SrcType);
894}
895
896void ScalarExprEmitter::EmitFloatConversionCheck(
897 Value *OrigSrc, QualType OrigSrcType, Value *Src, QualType SrcType,
898 QualType DstType, llvm::Type *DstTy, SourceLocation Loc) {
899 assert(SrcType->isFloatingType() && "not a conversion from floating point")((void)0);
900 if (!isa<llvm::IntegerType>(DstTy))
901 return;
902
903 CodeGenFunction::SanitizerScope SanScope(&CGF);
904 using llvm::APFloat;
905 using llvm::APSInt;
906
907 llvm::Value *Check = nullptr;
908 const llvm::fltSemantics &SrcSema =
909 CGF.getContext().getFloatTypeSemantics(OrigSrcType);
910
911 // Floating-point to integer. This has undefined behavior if the source is
912 // +-Inf, NaN, or doesn't fit into the destination type (after truncation
913 // to an integer).
914 unsigned Width = CGF.getContext().getIntWidth(DstType);
915 bool Unsigned = DstType->isUnsignedIntegerOrEnumerationType();
916
917 APSInt Min = APSInt::getMinValue(Width, Unsigned);
918 APFloat MinSrc(SrcSema, APFloat::uninitialized);
919 if (MinSrc.convertFromAPInt(Min, !Unsigned, APFloat::rmTowardZero) &
920 APFloat::opOverflow)
921 // Don't need an overflow check for lower bound. Just check for
922 // -Inf/NaN.
923 MinSrc = APFloat::getInf(SrcSema, true);
924 else
925 // Find the largest value which is too small to represent (before
926 // truncation toward zero).
927 MinSrc.subtract(APFloat(SrcSema, 1), APFloat::rmTowardNegative);
928
929 APSInt Max = APSInt::getMaxValue(Width, Unsigned);
930 APFloat MaxSrc(SrcSema, APFloat::uninitialized);
931 if (MaxSrc.convertFromAPInt(Max, !Unsigned, APFloat::rmTowardZero) &
932 APFloat::opOverflow)
933 // Don't need an overflow check for upper bound. Just check for
934 // +Inf/NaN.
935 MaxSrc = APFloat::getInf(SrcSema, false);
936 else
937 // Find the smallest value which is too large to represent (before
938 // truncation toward zero).
939 MaxSrc.add(APFloat(SrcSema, 1), APFloat::rmTowardPositive);
940
941 // If we're converting from __half, convert the range to float to match
942 // the type of src.
943 if (OrigSrcType->isHalfType()) {
944 const llvm::fltSemantics &Sema =
945 CGF.getContext().getFloatTypeSemantics(SrcType);
946 bool IsInexact;
947 MinSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
948 MaxSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
949 }
950
951 llvm::Value *GE =
952 Builder.CreateFCmpOGT(Src, llvm::ConstantFP::get(VMContext, MinSrc));
953 llvm::Value *LE =
954 Builder.CreateFCmpOLT(Src, llvm::ConstantFP::get(VMContext, MaxSrc));
955 Check = Builder.CreateAnd(GE, LE);
956
957 llvm::Constant *StaticArgs[] = {CGF.EmitCheckSourceLocation(Loc),
958 CGF.EmitCheckTypeDescriptor(OrigSrcType),
959 CGF.EmitCheckTypeDescriptor(DstType)};
960 CGF.EmitCheck(std::make_pair(Check, SanitizerKind::FloatCastOverflow),
961 SanitizerHandler::FloatCastOverflow, StaticArgs, OrigSrc);
962}
963
964// Should be called within CodeGenFunction::SanitizerScope RAII scope.
965// Returns 'i1 false' when the truncation Src -> Dst was lossy.
966static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
967 std::pair<llvm::Value *, SanitizerMask>>
968EmitIntegerTruncationCheckHelper(Value *Src, QualType SrcType, Value *Dst,
969 QualType DstType, CGBuilderTy &Builder) {
970 llvm::Type *SrcTy = Src->getType();
971 llvm::Type *DstTy = Dst->getType();
972 (void)DstTy; // Only used in assert()
973
974 // This should be truncation of integral types.
975 assert(Src != Dst)((void)0);
976 assert(SrcTy->getScalarSizeInBits() > Dst->getType()->getScalarSizeInBits())((void)0);
977 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&((void)0)
978 "non-integer llvm type")((void)0);
979
980 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
981 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
982
983 // If both (src and dst) types are unsigned, then it's an unsigned truncation.
984 // Else, it is a signed truncation.
985 ScalarExprEmitter::ImplicitConversionCheckKind Kind;
986 SanitizerMask Mask;
987 if (!SrcSigned && !DstSigned) {
988 Kind = ScalarExprEmitter::ICCK_UnsignedIntegerTruncation;
989 Mask = SanitizerKind::ImplicitUnsignedIntegerTruncation;
990 } else {
991 Kind = ScalarExprEmitter::ICCK_SignedIntegerTruncation;
992 Mask = SanitizerKind::ImplicitSignedIntegerTruncation;
993 }
994
995 llvm::Value *Check = nullptr;
996 // 1. Extend the truncated value back to the same width as the Src.
997 Check = Builder.CreateIntCast(Dst, SrcTy, DstSigned, "anyext");
998 // 2. Equality-compare with the original source value
999 Check = Builder.CreateICmpEQ(Check, Src, "truncheck");
1000 // If the comparison result is 'i1 false', then the truncation was lossy.
1001 return std::make_pair(Kind, std::make_pair(Check, Mask));
1002}
1003
1004static bool PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(
1005 QualType SrcType, QualType DstType) {
1006 return SrcType->isIntegerType() && DstType->isIntegerType();
1007}
1008
1009void ScalarExprEmitter::EmitIntegerTruncationCheck(Value *Src, QualType SrcType,
1010 Value *Dst, QualType DstType,
1011 SourceLocation Loc) {
1012 if (!CGF.SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation))
1013 return;
1014
1015 // We only care about int->int conversions here.
1016 // We ignore conversions to/from pointer and/or bool.
1017 if (!PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(SrcType,
1018 DstType))
1019 return;
1020
1021 unsigned SrcBits = Src->getType()->getScalarSizeInBits();
1022 unsigned DstBits = Dst->getType()->getScalarSizeInBits();
1023 // This must be truncation. Else we do not care.
1024 if (SrcBits <= DstBits)
1025 return;
1026
1027 assert(!DstType->isBooleanType() && "we should not get here with booleans.")((void)0);
1028
1029 // If the integer sign change sanitizer is enabled,
1030 // and we are truncating from larger unsigned type to smaller signed type,
1031 // let that next sanitizer deal with it.
1032 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1033 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1034 if (CGF.SanOpts.has(SanitizerKind::ImplicitIntegerSignChange) &&
1035 (!SrcSigned && DstSigned))
1036 return;
1037
1038 CodeGenFunction::SanitizerScope SanScope(&CGF);
1039
1040 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1041 std::pair<llvm::Value *, SanitizerMask>>
1042 Check =
1043 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1044 // If the comparison result is 'i1 false', then the truncation was lossy.
1045
1046 // Do we care about this type of truncation?
1047 if (!CGF.SanOpts.has(Check.second.second))
1048 return;
1049
1050 llvm::Constant *StaticArgs[] = {
1051 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType),
1052 CGF.EmitCheckTypeDescriptor(DstType),
1053 llvm::ConstantInt::get(Builder.getInt8Ty(), Check.first)};
1054 CGF.EmitCheck(Check.second, SanitizerHandler::ImplicitConversion, StaticArgs,
1055 {Src, Dst});
1056}
1057
1058// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1059// Returns 'i1 false' when the conversion Src -> Dst changed the sign.
1060static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1061 std::pair<llvm::Value *, SanitizerMask>>
1062EmitIntegerSignChangeCheckHelper(Value *Src, QualType SrcType, Value *Dst,
1063 QualType DstType, CGBuilderTy &Builder) {
1064 llvm::Type *SrcTy = Src->getType();
1065 llvm::Type *DstTy = Dst->getType();
1066
1067 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&((void)0)
1068 "non-integer llvm type")((void)0);
1069
1070 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1071 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1072 (void)SrcSigned; // Only used in assert()
1073 (void)DstSigned; // Only used in assert()
1074 unsigned SrcBits = SrcTy->getScalarSizeInBits();
1075 unsigned DstBits = DstTy->getScalarSizeInBits();
1076 (void)SrcBits; // Only used in assert()
1077 (void)DstBits; // Only used in assert()
1078
1079 assert(((SrcBits != DstBits) || (SrcSigned != DstSigned)) &&((void)0)
1080 "either the widths should be different, or the signednesses.")((void)0);
1081
1082 // NOTE: zero value is considered to be non-negative.
1083 auto EmitIsNegativeTest = [&Builder](Value *V, QualType VType,
1084 const char *Name) -> Value * {
1085 // Is this value a signed type?
1086 bool VSigned = VType->isSignedIntegerOrEnumerationType();
1087 llvm::Type *VTy = V->getType();
1088 if (!VSigned) {
1089 // If the value is unsigned, then it is never negative.
1090 // FIXME: can we encounter non-scalar VTy here?
1091 return llvm::ConstantInt::getFalse(VTy->getContext());
1092 }
1093 // Get the zero of the same type with which we will be comparing.
1094 llvm::Constant *Zero = llvm::ConstantInt::get(VTy, 0);
1095 // %V.isnegative = icmp slt %V, 0
1096 // I.e is %V *strictly* less than zero, does it have negative value?
1097 return Builder.CreateICmp(llvm::ICmpInst::ICMP_SLT, V, Zero,
1098 llvm::Twine(Name) + "." + V->getName() +
1099 ".negativitycheck");
1100 };
1101
1102 // 1. Was the old Value negative?
1103 llvm::Value *SrcIsNegative = EmitIsNegativeTest(Src, SrcType, "src");
1104 // 2. Is the new Value negative?
1105 llvm::Value *DstIsNegative = EmitIsNegativeTest(Dst, DstType, "dst");
1106 // 3. Now, was the 'negativity status' preserved during the conversion?
1107 // NOTE: conversion from negative to zero is considered to change the sign.
1108 // (We want to get 'false' when the conversion changed the sign)
1109 // So we should just equality-compare the negativity statuses.
1110 llvm::Value *Check = nullptr;
1111 Check = Builder.CreateICmpEQ(SrcIsNegative, DstIsNegative, "signchangecheck");
1112 // If the comparison result is 'false', then the conversion changed the sign.
1113 return std::make_pair(
1114 ScalarExprEmitter::ICCK_IntegerSignChange,
1115 std::make_pair(Check, SanitizerKind::ImplicitIntegerSignChange));
1116}
1117
1118void ScalarExprEmitter::EmitIntegerSignChangeCheck(Value *Src, QualType SrcType,
1119 Value *Dst, QualType DstType,
1120 SourceLocation Loc) {
1121 if (!CGF.SanOpts.has(SanitizerKind::ImplicitIntegerSignChange))
1122 return;
1123
1124 llvm::Type *SrcTy = Src->getType();
1125 llvm::Type *DstTy = Dst->getType();
1126
1127 // We only care about int->int conversions here.
1128 // We ignore conversions to/from pointer and/or bool.
1129 if (!PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(SrcType,
1130 DstType))
1131 return;
1132
1133 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1134 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1135 unsigned SrcBits = SrcTy->getScalarSizeInBits();
1136 unsigned DstBits = DstTy->getScalarSizeInBits();
1137
1138 // Now, we do not need to emit the check in *all* of the cases.
1139 // We can avoid emitting it in some obvious cases where it would have been
1140 // dropped by the opt passes (instcombine) always anyways.
1141 // If it's a cast between effectively the same type, no check.
1142 // NOTE: this is *not* equivalent to checking the canonical types.
1143 if (SrcSigned == DstSigned && SrcBits == DstBits)
1144 return;
1145 // At least one of the values needs to have signed type.
1146 // If both are unsigned, then obviously, neither of them can be negative.
1147 if (!SrcSigned && !DstSigned)
1148 return;
1149 // If the conversion is to *larger* *signed* type, then no check is needed.
1150 // Because either sign-extension happens (so the sign will remain),
1151 // or zero-extension will happen (the sign bit will be zero.)
1152 if ((DstBits > SrcBits) && DstSigned)
1153 return;
1154 if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) &&
1155 (SrcBits > DstBits) && SrcSigned) {
1156 // If the signed integer truncation sanitizer is enabled,
1157 // and this is a truncation from signed type, then no check is needed.
1158 // Because here sign change check is interchangeable with truncation check.
1159 return;
1160 }
1161 // That's it. We can't rule out any more cases with the data we have.
1162
1163 CodeGenFunction::SanitizerScope SanScope(&CGF);
1164
1165 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1166 std::pair<llvm::Value *, SanitizerMask>>
1167 Check;
1168
1169 // Each of these checks needs to return 'false' when an issue was detected.
1170 ImplicitConversionCheckKind CheckKind;
1171 llvm::SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks;
1172 // So we can 'and' all the checks together, and still get 'false',
1173 // if at least one of the checks detected an issue.
1174
1175 Check = EmitIntegerSignChangeCheckHelper(Src, SrcType, Dst, DstType, Builder);
1176 CheckKind = Check.first;
1177 Checks.emplace_back(Check.second);
1178
1179 if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) &&
1180 (SrcBits > DstBits) && !SrcSigned && DstSigned) {
1181 // If the signed integer truncation sanitizer was enabled,
1182 // and we are truncating from larger unsigned type to smaller signed type,
1183 // let's handle the case we skipped in that check.
1184 Check =
1185 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1186 CheckKind = ICCK_SignedIntegerTruncationOrSignChange;
1187 Checks.emplace_back(Check.second);
1188 // If the comparison result is 'i1 false', then the truncation was lossy.
1189 }
1190
1191 llvm::Constant *StaticArgs[] = {
1192 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType),
1193 CGF.EmitCheckTypeDescriptor(DstType),
1194 llvm::ConstantInt::get(Builder.getInt8Ty(), CheckKind)};
1195 // EmitCheck() will 'and' all the checks together.
1196 CGF.EmitCheck(Checks, SanitizerHandler::ImplicitConversion, StaticArgs,
1197 {Src, Dst});
1198}
1199
1200Value *ScalarExprEmitter::EmitScalarCast(Value *Src, QualType SrcType,
1201 QualType DstType, llvm::Type *SrcTy,
1202 llvm::Type *DstTy,
1203 ScalarConversionOpts Opts) {
1204 // The Element types determine the type of cast to perform.
1205 llvm::Type *SrcElementTy;
1206 llvm::Type *DstElementTy;
1207 QualType SrcElementType;
1208 QualType DstElementType;
1209 if (SrcType->isMatrixType() && DstType->isMatrixType()) {
1210 SrcElementTy = cast<llvm::VectorType>(SrcTy)->getElementType();
1211 DstElementTy = cast<llvm::VectorType>(DstTy)->getElementType();
1212 SrcElementType = SrcType->castAs<MatrixType>()->getElementType();
1213 DstElementType = DstType->castAs<MatrixType>()->getElementType();
1214 } else {
1215 assert(!SrcType->isMatrixType() && !DstType->isMatrixType() &&((void)0)
1216 "cannot cast between matrix and non-matrix types")((void)0);
1217 SrcElementTy = SrcTy;
1218 DstElementTy = DstTy;
1219 SrcElementType = SrcType;
1220 DstElementType = DstType;
1221 }
1222
1223 if (isa<llvm::IntegerType>(SrcElementTy)) {
1224 bool InputSigned = SrcElementType->isSignedIntegerOrEnumerationType();
1225 if (SrcElementType->isBooleanType() && Opts.TreatBooleanAsSigned) {
1226 InputSigned = true;
1227 }
1228
1229 if (isa<llvm::IntegerType>(DstElementTy))
1230 return Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
1231 if (InputSigned)
1232 return Builder.CreateSIToFP(Src, DstTy, "conv");
1233 return Builder.CreateUIToFP(Src, DstTy, "conv");
1234 }
1235
1236 if (isa<llvm::IntegerType>(DstElementTy)) {
1237 assert(SrcElementTy->isFloatingPointTy() && "Unknown real conversion")((void)0);
1238 if (DstElementType->isSignedIntegerOrEnumerationType())
1239 return Builder.CreateFPToSI(Src, DstTy, "conv");
1240 return Builder.CreateFPToUI(Src, DstTy, "conv");
1241 }
1242
1243 if (DstElementTy->getTypeID() < SrcElementTy->getTypeID())
1244 return Builder.CreateFPTrunc(Src, DstTy, "conv");
1245 return Builder.CreateFPExt(Src, DstTy, "conv");
1246}
1247
1248/// Emit a conversion from the specified type to the specified destination type,
1249/// both of which are LLVM scalar types.
1250Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
1251 QualType DstType,
1252 SourceLocation Loc,
1253 ScalarConversionOpts Opts) {
1254 // All conversions involving fixed point types should be handled by the
1255 // EmitFixedPoint family functions. This is done to prevent bloating up this
1256 // function more, and although fixed point numbers are represented by
1257 // integers, we do not want to follow any logic that assumes they should be
1258 // treated as integers.
1259 // TODO(leonardchan): When necessary, add another if statement checking for
1260 // conversions to fixed point types from other types.
1261 if (SrcType->isFixedPointType()) {
1262 if (DstType->isBooleanType())
1263 // It is important that we check this before checking if the dest type is
1264 // an integer because booleans are technically integer types.
1265 // We do not need to check the padding bit on unsigned types if unsigned
1266 // padding is enabled because overflow into this bit is undefined
1267 // behavior.
1268 return Builder.CreateIsNotNull(Src, "tobool");
1269 if (DstType->isFixedPointType() || DstType->isIntegerType() ||
1270 DstType->isRealFloatingType())
1271 return EmitFixedPointConversion(Src, SrcType, DstType, Loc);
1272
1273 llvm_unreachable(__builtin_unreachable()
1274 "Unhandled scalar conversion from a fixed point type to another type.")__builtin_unreachable();
1275 } else if (DstType->isFixedPointType()) {
1276 if (SrcType->isIntegerType() || SrcType->isRealFloatingType())
1277 // This also includes converting booleans and enums to fixed point types.
1278 return EmitFixedPointConversion(Src, SrcType, DstType, Loc);
1279
1280 llvm_unreachable(__builtin_unreachable()
1281 "Unhandled scalar conversion to a fixed point type from another type.")__builtin_unreachable();
1282 }
1283
1284 QualType NoncanonicalSrcType = SrcType;
1285 QualType NoncanonicalDstType = DstType;
1286
1287 SrcType = CGF.getContext().getCanonicalType(SrcType);
1288 DstType = CGF.getContext().getCanonicalType(DstType);
1289 if (SrcType == DstType) return Src;
1290
1291 if (DstType->isVoidType()) return nullptr;
1292
1293 llvm::Value *OrigSrc = Src;
1294 QualType OrigSrcType = SrcType;
1295 llvm::Type *SrcTy = Src->getType();
1296
1297 // Handle conversions to bool first, they are special: comparisons against 0.
1298 if (DstType->isBooleanType())
1299 return EmitConversionToBool(Src, SrcType);
1300
1301 llvm::Type *DstTy = ConvertType(DstType);
1302
1303 // Cast from half through float if half isn't a native type.
1304 if (SrcType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1305 // Cast to FP using the intrinsic if the half type itself isn't supported.
1306 if (DstTy->isFloatingPointTy()) {
1307 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics())
1308 return Builder.CreateCall(
1309 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16, DstTy),
1310 Src);
1311 } else {
1312 // Cast to other types through float, using either the intrinsic or FPExt,
1313 // depending on whether the half type itself is supported
1314 // (as opposed to operations on half, available with NativeHalfType).
1315 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
1316 Src = Builder.CreateCall(
1317 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
1318 CGF.CGM.FloatTy),
1319 Src);
1320 } else {
1321 Src = Builder.CreateFPExt(Src, CGF.CGM.FloatTy, "conv");
1322 }
1323 SrcType = CGF.getContext().FloatTy;
1324 SrcTy = CGF.FloatTy;
1325 }
1326 }
1327
1328 // Ignore conversions like int -> uint.
1329 if (SrcTy == DstTy) {
1330 if (Opts.EmitImplicitIntegerSignChangeChecks)
1331 EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Src,
1332 NoncanonicalDstType, Loc);
1333
1334 return Src;
1335 }
1336
1337 // Handle pointer conversions next: pointers can only be converted to/from
1338 // other pointers and integers. Check for pointer types in terms of LLVM, as
1339 // some native types (like Obj-C id) may map to a pointer type.
1340 if (auto DstPT = dyn_cast<llvm::PointerType>(DstTy)) {
1341 // The source value may be an integer, or a pointer.
1342 if (isa<llvm::PointerType>(SrcTy))
1343 return Builder.CreateBitCast(Src, DstTy, "conv");
1344
1345 assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?")((void)0);
1346 // First, convert to the correct width so that we control the kind of
1347 // extension.
1348 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DstPT);
1349 bool InputSigned = SrcType->isSignedIntegerOrEnumerationType();
1350 llvm::Value* IntResult =
1351 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
1352 // Then, cast to pointer.
1353 return Builder.CreateIntToPtr(IntResult, DstTy, "conv");
1354 }
1355
1356 if (isa<llvm::PointerType>(SrcTy)) {
1357 // Must be an ptr to int cast.
1358 assert(isa<llvm::IntegerType>(DstTy) && "not ptr->int?")((void)0);
1359 return Builder.CreatePtrToInt(Src, DstTy, "conv");
1360 }
1361
1362 // A scalar can be splatted to an extended vector of the same element type
1363 if (DstType->isExtVectorType() && !SrcType->isVectorType()) {
1364 // Sema should add casts to make sure that the source expression's type is
1365 // the same as the vector's element type (sans qualifiers)
1366 assert(DstType->castAs<ExtVectorType>()->getElementType().getTypePtr() ==((void)0)
1367 SrcType.getTypePtr() &&((void)0)
1368 "Splatted expr doesn't match with vector element type?")((void)0);
1369
1370 // Splat the element across to all elements
1371 unsigned NumElements = cast<llvm::FixedVectorType>(DstTy)->getNumElements();
1372 return Builder.CreateVectorSplat(NumElements, Src, "splat");
1373 }
1374
1375 if (SrcType->isMatrixType() && DstType->isMatrixType())
1376 return EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts);
1377
1378 if (isa<llvm::VectorType>(SrcTy) || isa<llvm::VectorType>(DstTy)) {
1379 // Allow bitcast from vector to integer/fp of the same size.
1380 unsigned SrcSize = SrcTy->getPrimitiveSizeInBits();
1381 unsigned DstSize = DstTy->getPrimitiveSizeInBits();
1382 if (SrcSize == DstSize)
1383 return Builder.CreateBitCast(Src, DstTy, "conv");
1384
1385 // Conversions between vectors of different sizes are not allowed except
1386 // when vectors of half are involved. Operations on storage-only half
1387 // vectors require promoting half vector operands to float vectors and
1388 // truncating the result, which is either an int or float vector, to a
1389 // short or half vector.
1390
1391 // Source and destination are both expected to be vectors.
1392 llvm::Type *SrcElementTy = cast<llvm::VectorType>(SrcTy)->getElementType();
1393 llvm::Type *DstElementTy = cast<llvm::VectorType>(DstTy)->getElementType();
1394 (void)DstElementTy;
1395
1396 assert(((SrcElementTy->isIntegerTy() &&((void)0)
1397 DstElementTy->isIntegerTy()) ||((void)0)
1398 (SrcElementTy->isFloatingPointTy() &&((void)0)
1399 DstElementTy->isFloatingPointTy())) &&((void)0)
1400 "unexpected conversion between a floating-point vector and an "((void)0)
1401 "integer vector")((void)0);
1402
1403 // Truncate an i32 vector to an i16 vector.
1404 if (SrcElementTy->isIntegerTy())
1405 return Builder.CreateIntCast(Src, DstTy, false, "conv");
1406
1407 // Truncate a float vector to a half vector.
1408 if (SrcSize > DstSize)
1409 return Builder.CreateFPTrunc(Src, DstTy, "conv");
1410
1411 // Promote a half vector to a float vector.
1412 return Builder.CreateFPExt(Src, DstTy, "conv");
1413 }
1414
1415 // Finally, we have the arithmetic types: real int/float.
1416 Value *Res = nullptr;
1417 llvm::Type *ResTy = DstTy;
1418
1419 // An overflowing conversion has undefined behavior if either the source type
1420 // or the destination type is a floating-point type. However, we consider the
1421 // range of representable values for all floating-point types to be
1422 // [-inf,+inf], so no overflow can ever happen when the destination type is a
1423 // floating-point type.
1424 if (CGF.SanOpts.has(SanitizerKind::FloatCastOverflow) &&
1425 OrigSrcType->isFloatingType())
1426 EmitFloatConversionCheck(OrigSrc, OrigSrcType, Src, SrcType, DstType, DstTy,
1427 Loc);
1428
1429 // Cast to half through float if half isn't a native type.
1430 if (DstType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1431 // Make sure we cast in a single step if from another FP type.
1432 if (SrcTy->isFloatingPointTy()) {
1433 // Use the intrinsic if the half type itself isn't supported
1434 // (as opposed to operations on half, available with NativeHalfType).
1435 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics())
1436 return Builder.CreateCall(
1437 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, SrcTy), Src);
1438 // If the half type is supported, just use an fptrunc.
1439 return Builder.CreateFPTrunc(Src, DstTy);
1440 }
1441 DstTy = CGF.FloatTy;
1442 }
1443
1444 Res = EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts);
1445
1446 if (DstTy != ResTy) {
1447 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
1448 assert(ResTy->isIntegerTy(16) && "Only half FP requires extra conversion")((void)0);
1449 Res = Builder.CreateCall(
1450 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, CGF.CGM.FloatTy),
1451 Res);
1452 } else {
1453 Res = Builder.CreateFPTrunc(Res, ResTy, "conv");
1454 }
1455 }
1456
1457 if (Opts.EmitImplicitIntegerTruncationChecks)
1458 EmitIntegerTruncationCheck(Src, NoncanonicalSrcType, Res,
1459 NoncanonicalDstType, Loc);
1460
1461 if (Opts.EmitImplicitIntegerSignChangeChecks)
1462 EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Res,
1463 NoncanonicalDstType, Loc);
1464
1465 return Res;
1466}
1467
1468Value *ScalarExprEmitter::EmitFixedPointConversion(Value *Src, QualType SrcTy,
1469 QualType DstTy,
1470 SourceLocation Loc) {
1471 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
1472 llvm::Value *Result;
1473 if (SrcTy->isRealFloatingType())
1474 Result = FPBuilder.CreateFloatingToFixed(Src,
1475 CGF.getContext().getFixedPointSemantics(DstTy));
1476 else if (DstTy->isRealFloatingType())
1477 Result = FPBuilder.CreateFixedToFloating(Src,
1478 CGF.getContext().getFixedPointSemantics(SrcTy),
1479 ConvertType(DstTy));
1480 else {
1481 auto SrcFPSema = CGF.getContext().getFixedPointSemantics(SrcTy);
1482 auto DstFPSema = CGF.getContext().getFixedPointSemantics(DstTy);
1483
1484 if (DstTy->isIntegerType())
1485 Result = FPBuilder.CreateFixedToInteger(Src, SrcFPSema,
1486 DstFPSema.getWidth(),
1487 DstFPSema.isSigned());
1488 else if (SrcTy->isIntegerType())
1489 Result = FPBuilder.CreateIntegerToFixed(Src, SrcFPSema.isSigned(),
1490 DstFPSema);
1491 else
1492 Result = FPBuilder.CreateFixedToFixed(Src, SrcFPSema, DstFPSema);
1493 }
1494 return Result;
1495}
1496
1497/// Emit a conversion from the specified complex type to the specified
1498/// destination type, where the destination type is an LLVM scalar type.
1499Value *ScalarExprEmitter::EmitComplexToScalarConversion(
1500 CodeGenFunction::ComplexPairTy Src, QualType SrcTy, QualType DstTy,
1501 SourceLocation Loc) {
1502 // Get the source element type.
1503 SrcTy = SrcTy->castAs<ComplexType>()->getElementType();
1504
1505 // Handle conversions to bool first, they are special: comparisons against 0.
1506 if (DstTy->isBooleanType()) {
1507 // Complex != 0 -> (Real != 0) | (Imag != 0)
1508 Src.first = EmitScalarConversion(Src.first, SrcTy, DstTy, Loc);
1509 Src.second = EmitScalarConversion(Src.second, SrcTy, DstTy, Loc);
1510 return Builder.CreateOr(Src.first, Src.second, "tobool");
1511 }
1512
1513 // C99 6.3.1.7p2: "When a value of complex type is converted to a real type,
1514 // the imaginary part of the complex value is discarded and the value of the
1515 // real part is converted according to the conversion rules for the
1516 // corresponding real type.
1517 return EmitScalarConversion(Src.first, SrcTy, DstTy, Loc);
1518}
1519
1520Value *ScalarExprEmitter::EmitNullValue(QualType Ty) {
1521 return CGF.EmitFromMemory(CGF.CGM.EmitNullConstant(Ty), Ty);
1522}
1523
1524/// Emit a sanitization check for the given "binary" operation (which
1525/// might actually be a unary increment which has been lowered to a binary
1526/// operation). The check passes if all values in \p Checks (which are \c i1),
1527/// are \c true.
1528void ScalarExprEmitter::EmitBinOpCheck(
1529 ArrayRef<std::pair<Value *, SanitizerMask>> Checks, const BinOpInfo &Info) {
1530 assert(CGF.IsSanitizerScope)((void)0);
1531 SanitizerHandler Check;
1532 SmallVector<llvm::Constant *, 4> StaticData;
1533 SmallVector<llvm::Value *, 2> DynamicData;
1534
1535 BinaryOperatorKind Opcode = Info.Opcode;
1536 if (BinaryOperator::isCompoundAssignmentOp(Opcode))
1537 Opcode = BinaryOperator::getOpForCompoundAssignment(Opcode);
1538
1539 StaticData.push_back(CGF.EmitCheckSourceLocation(Info.E->getExprLoc()));
1540 const UnaryOperator *UO = dyn_cast<UnaryOperator>(Info.E);
1541 if (UO && UO->getOpcode() == UO_Minus) {
1542 Check = SanitizerHandler::NegateOverflow;
1543 StaticData.push_back(CGF.EmitCheckTypeDescriptor(UO->getType()));
1544 DynamicData.push_back(Info.RHS);
1545 } else {
1546 if (BinaryOperator::isShiftOp(Opcode)) {
1547 // Shift LHS negative or too large, or RHS out of bounds.
1548 Check = SanitizerHandler::ShiftOutOfBounds;
1549 const BinaryOperator *BO = cast<BinaryOperator>(Info.E);
1550 StaticData.push_back(
1551 CGF.EmitCheckTypeDescriptor(BO->getLHS()->getType()));
1552 StaticData.push_back(
1553 CGF.EmitCheckTypeDescriptor(BO->getRHS()->getType()));
1554 } else if (Opcode == BO_Div || Opcode == BO_Rem) {
1555 // Divide or modulo by zero, or signed overflow (eg INT_MAX / -1).
1556 Check = SanitizerHandler::DivremOverflow;
1557 StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
1558 } else {
1559 // Arithmetic overflow (+, -, *).
1560 switch (Opcode) {
1561 case BO_Add: Check = SanitizerHandler::AddOverflow; break;
1562 case BO_Sub: Check = SanitizerHandler::SubOverflow; break;
1563 case BO_Mul: Check = SanitizerHandler::MulOverflow; break;
1564 default: llvm_unreachable("unexpected opcode for bin op check")__builtin_unreachable();
1565 }
1566 StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
1567 }
1568 DynamicData.push_back(Info.LHS);
1569 DynamicData.push_back(Info.RHS);
1570 }
1571
1572 CGF.EmitCheck(Checks, Check, StaticData, DynamicData);
1573}
1574
1575//===----------------------------------------------------------------------===//
1576// Visitor Methods
1577//===----------------------------------------------------------------------===//
1578
1579Value *ScalarExprEmitter::VisitExpr(Expr *E) {
1580 CGF.ErrorUnsupported(E, "scalar expression");
1581 if (E->getType()->isVoidType())
1582 return nullptr;
1583 return llvm::UndefValue::get(CGF.ConvertType(E->getType()));
1584}
1585
1586Value *
1587ScalarExprEmitter::VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E) {
1588 ASTContext &Context = CGF.getContext();
1589 llvm::Optional<LangAS> GlobalAS =
1590 Context.getTargetInfo().getConstantAddressSpace();
1591 llvm::Constant *GlobalConstStr = Builder.CreateGlobalStringPtr(
1592 E->ComputeName(Context), "__usn_str",
1593 static_cast<unsigned>(GlobalAS.getValueOr(LangAS::Default)));
1594
1595 unsigned ExprAS = Context.getTargetAddressSpace(E->getType());
1596
1597 if (GlobalConstStr->getType()->getPointerAddressSpace() == ExprAS)
1598 return GlobalConstStr;
1599
1600 llvm::Type *EltTy = GlobalConstStr->getType()->getPointerElementType();
1601 llvm::PointerType *NewPtrTy = llvm::PointerType::get(EltTy, ExprAS);
1602 return Builder.CreateAddrSpaceCast(GlobalConstStr, NewPtrTy, "usn_addr_cast");
1603}
1604
1605Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
1606 // Vector Mask Case
1607 if (E->getNumSubExprs() == 2) {
1608 Value *LHS = CGF.EmitScalarExpr(E->getExpr(0));
1609 Value *RHS = CGF.EmitScalarExpr(E->getExpr(1));
1610 Value *Mask;
1611
1612 auto *LTy = cast<llvm::FixedVectorType>(LHS->getType());
1613 unsigned LHSElts = LTy->getNumElements();
1614
1615 Mask = RHS;
1616
1617 auto *MTy = cast<llvm::FixedVectorType>(Mask->getType());
1618
1619 // Mask off the high bits of each shuffle index.
1620 Value *MaskBits =
1621 llvm::ConstantInt::get(MTy, llvm::NextPowerOf2(LHSElts - 1) - 1);
1622 Mask = Builder.CreateAnd(Mask, MaskBits, "mask");
1623
1624 // newv = undef
1625 // mask = mask & maskbits
1626 // for each elt
1627 // n = extract mask i
1628 // x = extract val n
1629 // newv = insert newv, x, i
1630 auto *RTy = llvm::FixedVectorType::get(LTy->getElementType(),
1631 MTy->getNumElements());
1632 Value* NewV = llvm::UndefValue::get(RTy);
1633 for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) {
1634 Value *IIndx = llvm::ConstantInt::get(CGF.SizeTy, i);
1635 Value *Indx = Builder.CreateExtractElement(Mask, IIndx, "shuf_idx");
1636
1637 Value *VExt = Builder.CreateExtractElement(LHS, Indx, "shuf_elt");
1638 NewV = Builder.CreateInsertElement(NewV, VExt, IIndx, "shuf_ins");
1639 }
1640 return NewV;
1641 }
1642
1643 Value* V1 = CGF.EmitScalarExpr(E->getExpr(0));
1644 Value* V2 = CGF.EmitScalarExpr(E->getExpr(1));
1645
1646 SmallVector<int, 32> Indices;
1647 for (unsigned i = 2; i < E->getNumSubExprs(); ++i) {
1648 llvm::APSInt Idx = E->getShuffleMaskIdx(CGF.getContext(), i-2);
1649 // Check for -1 and output it as undef in the IR.
1650 if (Idx.isSigned() && Idx.isAllOnesValue())
1651 Indices.push_back(-1);
1652 else
1653 Indices.push_back(Idx.getZExtValue());
1654 }
1655
1656 return Builder.CreateShuffleVector(V1, V2, Indices, "shuffle");
1657}
1658
1659Value *ScalarExprEmitter::VisitConvertVectorExpr(ConvertVectorExpr *E) {
1660 QualType SrcType = E->getSrcExpr()->getType(),
1661 DstType = E->getType();
1662
1663 Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());
1664
1665 SrcType = CGF.getContext().getCanonicalType(SrcType);
1666 DstType = CGF.getContext().getCanonicalType(DstType);
1667 if (SrcType == DstType) return Src;
1668
1669 assert(SrcType->isVectorType() &&((void)0)
1670 "ConvertVector source type must be a vector")((void)0);
1671 assert(DstType->isVectorType() &&((void)0)
1672 "ConvertVector destination type must be a vector")((void)0);
1673
1674 llvm::Type *SrcTy = Src->getType();
1675 llvm::Type *DstTy = ConvertType(DstType);
1676
1677 // Ignore conversions like int -> uint.
1678 if (SrcTy == DstTy)
1679 return Src;
1680
1681 QualType SrcEltType = SrcType->castAs<VectorType>()->getElementType(),
1682 DstEltType = DstType->castAs<VectorType>()->getElementType();
1683
1684 assert(SrcTy->isVectorTy() &&((void)0)
1685 "ConvertVector source IR type must be a vector")((void)0);
1686 assert(DstTy->isVectorTy() &&((void)0)
1687 "ConvertVector destination IR type must be a vector")((void)0);
1688
1689 llvm::Type *SrcEltTy = cast<llvm::VectorType>(SrcTy)->getElementType(),
1690 *DstEltTy = cast<llvm::VectorType>(DstTy)->getElementType();
1691
1692 if (DstEltType->isBooleanType()) {
1693 assert((SrcEltTy->isFloatingPointTy() ||((void)0)
1694 isa<llvm::IntegerType>(SrcEltTy)) && "Unknown boolean conversion")((void)0);
1695
1696 llvm::Value *Zero = llvm::Constant::getNullValue(SrcTy);
1697 if (SrcEltTy->isFloatingPointTy()) {
1698 return Builder.CreateFCmpUNE(Src, Zero, "tobool");
1699 } else {
1700 return Builder.CreateICmpNE(Src, Zero, "tobool");
1701 }
1702 }
1703
1704 // We have the arithmetic types: real int/float.
1705 Value *Res = nullptr;
1706
1707 if (isa<llvm::IntegerType>(SrcEltTy)) {
1708 bool InputSigned = SrcEltType->isSignedIntegerOrEnumerationType();
1709 if (isa<llvm::IntegerType>(DstEltTy))
1710 Res = Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
1711 else if (InputSigned)
1712 Res = Builder.CreateSIToFP(Src, DstTy, "conv");
1713 else
1714 Res = Builder.CreateUIToFP(Src, DstTy, "conv");
1715 } else if (isa<llvm::IntegerType>(DstEltTy)) {
1716 assert(SrcEltTy->isFloatingPointTy() && "Unknown real conversion")((void)0);
1717 if (DstEltType->isSignedIntegerOrEnumerationType())
1718 Res = Builder.CreateFPToSI(Src, DstTy, "conv");
1719 else
1720 Res = Builder.CreateFPToUI(Src, DstTy, "conv");
1721 } else {
1722 assert(SrcEltTy->isFloatingPointTy() && DstEltTy->isFloatingPointTy() &&((void)0)
1723 "Unknown real conversion")((void)0);
1724 if (DstEltTy->getTypeID() < SrcEltTy->getTypeID())
1725 Res = Builder.CreateFPTrunc(Src, DstTy, "conv");
1726 else
1727 Res = Builder.CreateFPExt(Src, DstTy, "conv");
1728 }
1729
1730 return Res;
1731}
1732
1733Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) {
1734 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E)) {
1735 CGF.EmitIgnoredExpr(E->getBase());
1736 return CGF.emitScalarConstant(Constant, E);
1737 } else {
1738 Expr::EvalResult Result;
1739 if (E->EvaluateAsInt(Result, CGF.getContext(), Expr::SE_AllowSideEffects)) {
1740 llvm::APSInt Value = Result.Val.getInt();
1741 CGF.EmitIgnoredExpr(E->getBase());
1742 return Builder.getInt(Value);
1743 }
1744 }
1745
1746 return EmitLoadOfLValue(E);
1747}
1748
1749Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
1750 TestAndClearIgnoreResultAssign();
1751
1752 // Emit subscript expressions in rvalue context's. For most cases, this just
1753 // loads the lvalue formed by the subscript expr. However, we have to be
1754 // careful, because the base of a vector subscript is occasionally an rvalue,
1755 // so we can't get it as an lvalue.
1756 if (!E->getBase()->getType()->isVectorType())
1757 return EmitLoadOfLValue(E);
1758
1759 // Handle the vector case. The base must be a vector, the index must be an
1760 // integer value.
1761 Value *Base = Visit(E->getBase());
1762 Value *Idx = Visit(E->getIdx());
1763 QualType IdxTy = E->getIdx()->getType();
1764
1765 if (CGF.SanOpts.has(SanitizerKind::ArrayBounds))
1766 CGF.EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, /*Accessed*/true);
1767
1768 return Builder.CreateExtractElement(Base, Idx, "vecext");
1769}
1770
1771Value *ScalarExprEmitter::VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E) {
1772 TestAndClearIgnoreResultAssign();
1773
1774 // Handle the vector case. The base must be a vector, the index must be an
1775 // integer value.
1776 Value *RowIdx = Visit(E->getRowIdx());
1777 Value *ColumnIdx = Visit(E->getColumnIdx());
1778 Value *Matrix = Visit(E->getBase());
1779
1780 // TODO: Should we emit bounds checks with SanitizerKind::ArrayBounds?
1781 llvm::MatrixBuilder<CGBuilderTy> MB(Builder);
1782 return MB.CreateExtractElement(
1783 Matrix, RowIdx, ColumnIdx,
1784 E->getBase()->getType()->castAs<ConstantMatrixType>()->getNumRows());
1785}
1786
1787static int getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx,
1788 unsigned Off) {
1789 int MV = SVI->getMaskValue(Idx);
1790 if (MV == -1)
1791 return -1;
1792 return Off + MV;
1793}
1794
1795static int getAsInt32(llvm::ConstantInt *C, llvm::Type *I32Ty) {
1796 assert(llvm::ConstantInt::isValueValidForType(I32Ty, C->getZExtValue()) &&((void)0)
1797 "Index operand too large for shufflevector mask!")((void)0);
1798 return C->getZExtValue();
1799}
1800
1801Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
1802 bool Ignore = TestAndClearIgnoreResultAssign();
1803 (void)Ignore;
1804 assert (Ignore == false && "init list ignored")((void)0);
1805 unsigned NumInitElements = E->getNumInits();
1806
1807 if (E->hadArrayRangeDesignator())
1808 CGF.ErrorUnsupported(E, "GNU array range designator extension");
1809
1810 llvm::VectorType *VType =
1811 dyn_cast<llvm::VectorType>(ConvertType(E->getType()));
1812
1813 if (!VType) {
1814 if (NumInitElements == 0) {
1815 // C++11 value-initialization for the scalar.
1816 return EmitNullValue(E->getType());
1817 }
1818 // We have a scalar in braces. Just use the first element.
1819 return Visit(E->getInit(0));
1820 }
1821
1822 unsigned ResElts = cast<llvm::FixedVectorType>(VType)->getNumElements();
1823
1824 // Loop over initializers collecting the Value for each, and remembering
1825 // whether the source was swizzle (ExtVectorElementExpr). This will allow
1826 // us to fold the shuffle for the swizzle into the shuffle for the vector
1827 // initializer, since LLVM optimizers generally do not want to touch
1828 // shuffles.
1829 unsigned CurIdx = 0;
1830 bool VIsUndefShuffle = false;
1831 llvm::Value *V = llvm::UndefValue::get(VType);
1832 for (unsigned i = 0; i != NumInitElements; ++i) {
1833 Expr *IE = E->getInit(i);
1834 Value *Init = Visit(IE);
1835 SmallVector<int, 16> Args;
1836
1837 llvm::VectorType *VVT = dyn_cast<llvm::VectorType>(Init->getType());
1838
1839 // Handle scalar elements. If the scalar initializer is actually one
1840 // element of a different vector of the same width, use shuffle instead of
1841 // extract+insert.
1842 if (!VVT) {
1843 if (isa<ExtVectorElementExpr>(IE)) {
1844 llvm::ExtractElementInst *EI = cast<llvm::ExtractElementInst>(Init);
1845
1846 if (cast<llvm::FixedVectorType>(EI->getVectorOperandType())
1847 ->getNumElements() == ResElts) {
1848 llvm::ConstantInt *C = cast<llvm::ConstantInt>(EI->getIndexOperand());
1849 Value *LHS = nullptr, *RHS = nullptr;
1850 if (CurIdx == 0) {
1851 // insert into undef -> shuffle (src, undef)
1852 // shufflemask must use an i32
1853 Args.push_back(getAsInt32(C, CGF.Int32Ty));
1854 Args.resize(ResElts, -1);
1855
1856 LHS = EI->getVectorOperand();
1857 RHS = V;
1858 VIsUndefShuffle = true;
1859 } else if (VIsUndefShuffle) {
1860 // insert into undefshuffle && size match -> shuffle (v, src)
1861 llvm::ShuffleVectorInst *SVV = cast<llvm::ShuffleVectorInst>(V);
1862 for (unsigned j = 0; j != CurIdx; ++j)
1863 Args.push_back(getMaskElt(SVV, j, 0));
1864 Args.push_back(ResElts + C->getZExtValue());
1865 Args.resize(ResElts, -1);
1866
1867 LHS = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
1868 RHS = EI->getVectorOperand();
1869 VIsUndefShuffle = false;
1870 }
1871 if (!Args.empty()) {
1872 V = Builder.CreateShuffleVector(LHS, RHS, Args);
1873 ++CurIdx;
1874 continue;
1875 }
1876 }
1877 }
1878 V = Builder.CreateInsertElement(V, Init, Builder.getInt32(CurIdx),
1879 "vecinit");
1880 VIsUndefShuffle = false;
1881 ++CurIdx;
1882 continue;
1883 }
1884
1885 unsigned InitElts = cast<llvm::FixedVectorType>(VVT)->getNumElements();
1886
1887 // If the initializer is an ExtVecEltExpr (a swizzle), and the swizzle's
1888 // input is the same width as the vector being constructed, generate an
1889 // optimized shuffle of the swizzle input into the result.
1890 unsigned Offset = (CurIdx == 0) ? 0 : ResElts;
1891 if (isa<ExtVectorElementExpr>(IE)) {
1892 llvm::ShuffleVectorInst *SVI = cast<llvm::ShuffleVectorInst>(Init);
1893 Value *SVOp = SVI->getOperand(0);
1894 auto *OpTy = cast<llvm::FixedVectorType>(SVOp->getType());
1895
1896 if (OpTy->getNumElements() == ResElts) {
1897 for (unsigned j = 0; j != CurIdx; ++j) {
1898 // If the current vector initializer is a shuffle with undef, merge
1899 // this shuffle directly into it.
1900 if (VIsUndefShuffle) {
1901 Args.push_back(getMaskElt(cast<llvm::ShuffleVectorInst>(V), j, 0));
1902 } else {
1903 Args.push_back(j);
1904 }
1905 }
1906 for (unsigned j = 0, je = InitElts; j != je; ++j)
1907 Args.push_back(getMaskElt(SVI, j, Offset));
1908 Args.resize(ResElts, -1);
1909
1910 if (VIsUndefShuffle)
1911 V = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
1912
1913 Init = SVOp;
1914 }
1915 }
1916
1917 // Extend init to result vector length, and then shuffle its contribution
1918 // to the vector initializer into V.
1919 if (Args.empty()) {
1920 for (unsigned j = 0; j != InitElts; ++j)
1921 Args.push_back(j);
1922 Args.resize(ResElts, -1);
1923 Init = Builder.CreateShuffleVector(Init, Args, "vext");
1924
1925 Args.clear();
1926 for (unsigned j = 0; j != CurIdx; ++j)
1927 Args.push_back(j);
1928 for (unsigned j = 0; j != InitElts; ++j)
1929 Args.push_back(j + Offset);
1930 Args.resize(ResElts, -1);
1931 }
1932
1933 // If V is undef, make sure it ends up on the RHS of the shuffle to aid
1934 // merging subsequent shuffles into this one.
1935 if (CurIdx == 0)
1936 std::swap(V, Init);
1937 V = Builder.CreateShuffleVector(V, Init, Args, "vecinit");
1938 VIsUndefShuffle = isa<llvm::UndefValue>(Init);
1939 CurIdx += InitElts;
1940 }
1941
1942 // FIXME: evaluate codegen vs. shuffling against constant null vector.
1943 // Emit remaining default initializers.
1944 llvm::Type *EltTy = VType->getElementType();
1945
1946 // Emit remaining default initializers
1947 for (/* Do not initialize i*/; CurIdx < ResElts; ++CurIdx) {
1948 Value *Idx = Builder.getInt32(CurIdx);
1949 llvm::Value *Init = llvm::Constant::getNullValue(EltTy);
1950 V = Builder.CreateInsertElement(V, Init, Idx, "vecinit");
1951 }
1952 return V;
1953}
1954
1955bool CodeGenFunction::ShouldNullCheckClassCastValue(const CastExpr *CE) {
1956 const Expr *E = CE->getSubExpr();
1957
1958 if (CE->getCastKind() == CK_UncheckedDerivedToBase)
1959 return false;
1960
1961 if (isa<CXXThisExpr>(E->IgnoreParens())) {
1962 // We always assume that 'this' is never null.
1963 return false;
1964 }
1965
1966 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
1967 // And that glvalue casts are never null.
1968 if (ICE->isGLValue())
1969 return false;
1970 }
1971
1972 return true;
1973}
1974
1975// VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts
1976// have to handle a more broad range of conversions than explicit casts, as they
1977// handle things like function to ptr-to-function decay etc.
1978Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
1979 Expr *E = CE->getSubExpr();
1980 QualType DestTy = CE->getType();
1981 CastKind Kind = CE->getCastKind();
1982
1983 // These cases are generally not written to ignore the result of
1984 // evaluating their sub-expressions, so we clear this now.
1985 bool Ignored = TestAndClearIgnoreResultAssign();
1986
1987 // Since almost all cast kinds apply to scalars, this switch doesn't have
1988 // a default case, so the compiler will warn on a missing case. The cases
1989 // are in the same order as in the CastKind enum.
1990 switch (Kind) {
1991 case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!")__builtin_unreachable();
1992 case CK_BuiltinFnToFnPtr:
1993 llvm_unreachable("builtin functions are handled elsewhere")__builtin_unreachable();
1994
1995 case CK_LValueBitCast:
1996 case CK_ObjCObjectLValueCast: {
1997 Address Addr = EmitLValue(E).getAddress(CGF);
1998 Addr = Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(DestTy));
1999 LValue LV = CGF.MakeAddrLValue(Addr, DestTy);
2000 return EmitLoadOfLValue(LV, CE->getExprLoc());
2001 }
2002
2003 case CK_LValueToRValueBitCast: {
2004 LValue SourceLVal = CGF.EmitLValue(E);
2005 Address Addr = Builder.CreateElementBitCast(SourceLVal.getAddress(CGF),
2006 CGF.ConvertTypeForMem(DestTy));
2007 LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
2008 DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
2009 return EmitLoadOfLValue(DestLV, CE->getExprLoc());
2010 }
2011
2012 case CK_CPointerToObjCPointerCast:
2013 case CK_BlockPointerToObjCPointerCast:
2014 case CK_AnyPointerToBlockPointerCast:
2015 case CK_BitCast: {
2016 Value *Src = Visit(const_cast<Expr*>(E));
2017 llvm::Type *SrcTy = Src->getType();
2018 llvm::Type *DstTy = ConvertType(DestTy);
2019 if (SrcTy->isPtrOrPtrVectorTy() && DstTy->isPtrOrPtrVectorTy() &&
2020 SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace()) {
2021 llvm_unreachable("wrong cast for pointers in different address spaces"__builtin_unreachable()
2022 "(must be an address space cast)!")__builtin_unreachable();
2023 }
2024
2025 if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast)) {
2026 if (auto PT = DestTy->getAs<PointerType>())
2027 CGF.EmitVTablePtrCheckForCast(PT->getPointeeType(), Src,
2028 /*MayBeNull=*/true,
2029 CodeGenFunction::CFITCK_UnrelatedCast,
2030 CE->getBeginLoc());
2031 }
2032
2033 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2034 const QualType SrcType = E->getType();
2035
2036 if (SrcType.mayBeNotDynamicClass() && DestTy.mayBeDynamicClass()) {
2037 // Casting to pointer that could carry dynamic information (provided by
2038 // invariant.group) requires launder.
2039 Src = Builder.CreateLaunderInvariantGroup(Src);
2040 } else if (SrcType.mayBeDynamicClass() && DestTy.mayBeNotDynamicClass()) {
2041 // Casting to pointer that does not carry dynamic information (provided
2042 // by invariant.group) requires stripping it. Note that we don't do it
2043 // if the source could not be dynamic type and destination could be
2044 // dynamic because dynamic information is already laundered. It is
2045 // because launder(strip(src)) == launder(src), so there is no need to
2046 // add extra strip before launder.
2047 Src = Builder.CreateStripInvariantGroup(Src);
2048 }
2049 }
2050
2051 // Update heapallocsite metadata when there is an explicit pointer cast.
2052 if (auto *CI = dyn_cast<llvm::CallBase>(Src)) {
2053 if (CI->getMetadata("heapallocsite") && isa<ExplicitCastExpr>(CE)) {
2054 QualType PointeeType = DestTy->getPointeeType();
2055 if (!PointeeType.isNull())
2056 CGF.getDebugInfo()->addHeapAllocSiteMetadata(CI, PointeeType,
2057 CE->getExprLoc());
2058 }
2059 }
2060
2061 // If Src is a fixed vector and Dst is a scalable vector, and both have the
2062 // same element type, use the llvm.experimental.vector.insert intrinsic to
2063 // perform the bitcast.
2064 if (const auto *FixedSrc = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
2065 if (const auto *ScalableDst = dyn_cast<llvm::ScalableVectorType>(DstTy)) {
2066 if (FixedSrc->getElementType() == ScalableDst->getElementType()) {
2067 llvm::Value *UndefVec = llvm::UndefValue::get(DstTy);
2068 llvm::Value *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty);
2069 return Builder.CreateInsertVector(DstTy, UndefVec, Src, Zero,
2070 "castScalableSve");
2071 }
2072 }
2073 }
2074
2075 // If Src is a scalable vector and Dst is a fixed vector, and both have the
2076 // same element type, use the llvm.experimental.vector.extract intrinsic to
2077 // perform the bitcast.
2078 if (const auto *ScalableSrc = dyn_cast<llvm::ScalableVectorType>(SrcTy)) {
2079 if (const auto *FixedDst = dyn_cast<llvm::FixedVectorType>(DstTy)) {
2080 if (ScalableSrc->getElementType() == FixedDst->getElementType()) {
2081 llvm::Value *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty);
2082 return Builder.CreateExtractVector(DstTy, Src, Zero, "castFixedSve");
2083 }
2084 }
2085 }
2086
2087 // Perform VLAT <-> VLST bitcast through memory.
2088 // TODO: since the llvm.experimental.vector.{insert,extract} intrinsics
2089 // require the element types of the vectors to be the same, we
2090 // need to keep this around for casting between predicates, or more
2091 // generally for bitcasts between VLAT <-> VLST where the element
2092 // types of the vectors are not the same, until we figure out a better
2093 // way of doing these casts.
2094 if ((isa<llvm::FixedVectorType>(SrcTy) &&
2095 isa<llvm::ScalableVectorType>(DstTy)) ||
2096 (isa<llvm::ScalableVectorType>(SrcTy) &&
2097 isa<llvm::FixedVectorType>(DstTy))) {
2098 Address Addr = CGF.CreateDefaultAlignTempAlloca(SrcTy, "saved-value");
2099 LValue LV = CGF.MakeAddrLValue(Addr, E->getType());
2100 CGF.EmitStoreOfScalar(Src, LV);
2101 Addr = Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(DestTy),
2102 "castFixedSve");
2103 LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
2104 DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
2105 return EmitLoadOfLValue(DestLV, CE->getExprLoc());
2106 }
2107
2108 return Builder.CreateBitCast(Src, DstTy);
2109 }
2110 case CK_AddressSpaceConversion: {
2111 Expr::EvalResult Result;
2112 if (E->EvaluateAsRValue(Result, CGF.getContext()) &&
2113 Result.Val.isNullPointer()) {
2114 // If E has side effect, it is emitted even if its final result is a
2115 // null pointer. In that case, a DCE pass should be able to
2116 // eliminate the useless instructions emitted during translating E.
2117 if (Result.HasSideEffects)
2118 Visit(E);
2119 return CGF.CGM.getNullPointer(cast<llvm::PointerType>(
2120 ConvertType(DestTy)), DestTy);
2121 }
2122 // Since target may map different address spaces in AST to the same address
2123 // space, an address space conversion may end up as a bitcast.
2124 return CGF.CGM.getTargetCodeGenInfo().performAddrSpaceCast(
2125 CGF, Visit(E), E->getType()->getPointeeType().getAddressSpace(),
2126 DestTy->getPointeeType().getAddressSpace(), ConvertType(DestTy));
2127 }
2128 case CK_AtomicToNonAtomic:
2129 case CK_NonAtomicToAtomic:
2130 case CK_NoOp:
2131 case CK_UserDefinedConversion:
2132 return Visit(const_cast<Expr*>(E));
2133
2134 case CK_BaseToDerived: {
2135 const CXXRecordDecl *DerivedClassDecl = DestTy->getPointeeCXXRecordDecl();
2136 assert(DerivedClassDecl && "BaseToDerived arg isn't a C++ object pointer!")((void)0);
2137
2138 Address Base = CGF.EmitPointerWithAlignment(E);
2139 Address Derived =
2140 CGF.GetAddressOfDerivedClass(Base, DerivedClassDecl,
2141 CE->path_begin(), CE->path_end(),
2142 CGF.ShouldNullCheckClassCastValue(CE));
2143
2144 // C++11 [expr.static.cast]p11: Behavior is undefined if a downcast is
2145 // performed and the object is not of the derived type.
2146 if (CGF.sanitizePerformTypeCheck())
2147 CGF.EmitTypeCheck(CodeGenFunction::TCK_DowncastPointer, CE->getExprLoc(),
2148 Derived.getPointer(), DestTy->getPointeeType());
2149
2150 if (CGF.SanOpts.has(SanitizerKind::CFIDerivedCast))
2151 CGF.EmitVTablePtrCheckForCast(
2152 DestTy->getPointeeType(), Derived.getPointer(),
2153 /*MayBeNull=*/true, CodeGenFunction::CFITCK_DerivedCast,
2154 CE->getBeginLoc());
2155
2156 return Derived.getPointer();
2157 }
2158 case CK_UncheckedDerivedToBase:
2159 case CK_DerivedToBase: {
2160 // The EmitPointerWithAlignment path does this fine; just discard
2161 // the alignment.
2162 return CGF.EmitPointerWithAlignment(CE).getPointer();
2163 }
2164
2165 case CK_Dynamic: {
2166 Address V = CGF.EmitPointerWithAlignment(E);
2167 const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(CE);
2168 return CGF.EmitDynamicCast(V, DCE);
2169 }
2170
2171 case CK_ArrayToPointerDecay:
2172 return CGF.EmitArrayToPointerDecay(E).getPointer();
2173 case CK_FunctionToPointerDecay:
2174 return EmitLValue(E).getPointer(CGF);
2175
2176 case CK_NullToPointer:
2177 if (MustVisitNullValue(E))
2178 CGF.EmitIgnoredExpr(E);
2179
2180 return CGF.CGM.getNullPointer(cast<llvm::PointerType>(ConvertType(DestTy)),
2181 DestTy);
2182
2183 case CK_NullToMemberPointer: {
2184 if (MustVisitNullValue(E))
2185 CGF.EmitIgnoredExpr(E);
2186
2187 const MemberPointerType *MPT = CE->getType()->getAs<MemberPointerType>();
2188 return CGF.CGM.getCXXABI().EmitNullMemberPointer(MPT);
2189 }
2190
2191 case CK_ReinterpretMemberPointer:
2192 case CK_BaseToDerivedMemberPointer:
2193 case CK_DerivedToBaseMemberPointer: {
2194 Value *Src = Visit(E);
2195
2196 // Note that the AST doesn't distinguish between checked and
2197 // unchecked member pointer conversions, so we always have to
2198 // implement checked conversions here. This is inefficient when
2199 // actual control flow may be required in order to perform the
2200 // check, which it is for data member pointers (but not member
2201 // function pointers on Itanium and ARM).
2202 return CGF.CGM.getCXXABI().EmitMemberPointerConversion(CGF, CE, Src);
2203 }
2204
2205 case CK_ARCProduceObject:
2206 return CGF.EmitARCRetainScalarExpr(E);
2207 case CK_ARCConsumeObject:
2208 return CGF.EmitObjCConsumeObject(E->getType(), Visit(E));
2209 case CK_ARCReclaimReturnedObject:
2210 return CGF.EmitARCReclaimReturnedObject(E, /*allowUnsafe*/ Ignored);
2211 case CK_ARCExtendBlockObject:
2212 return CGF.EmitARCExtendBlockObject(E);
2213
2214 case CK_CopyAndAutoreleaseBlockObject:
2215 return CGF.EmitBlockCopyAndAutorelease(Visit(E), E->getType());
2216
2217 case CK_FloatingRealToComplex:
2218 case CK_FloatingComplexCast:
2219 case CK_IntegralRealToComplex:
2220 case CK_IntegralComplexCast:
2221 case CK_IntegralComplexToFloatingComplex:
2222 case CK_FloatingComplexToIntegralComplex:
2223 case CK_ConstructorConversion:
2224 case CK_ToUnion:
2225 llvm_unreachable("scalar cast to non-scalar value")__builtin_unreachable();
2226
2227 case CK_LValueToRValue:
2228 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy))((void)0);
2229 assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!")((void)0);
2230 return Visit(const_cast<Expr*>(E));
2231
2232 case CK_IntegralToPointer: {
2233 Value *Src = Visit(const_cast<Expr*>(E));
2234
2235 // First, convert to the correct width so that we control the kind of
2236 // extension.
2237 auto DestLLVMTy = ConvertType(DestTy);
2238 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DestLLVMTy);
2239 bool InputSigned = E->getType()->isSignedIntegerOrEnumerationType();
2240 llvm::Value* IntResult =
2241 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
2242
2243 auto *IntToPtr = Builder.CreateIntToPtr(IntResult, DestLLVMTy);
2244
2245 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2246 // Going from integer to pointer that could be dynamic requires reloading
2247 // dynamic information from invariant.group.
2248 if (DestTy.mayBeDynamicClass())
2249 IntToPtr = Builder.CreateLaunderInvariantGroup(IntToPtr);
2250 }
2251 return IntToPtr;
2252 }
2253 case CK_PointerToIntegral: {
2254 assert(!DestTy->isBooleanType() && "bool should use PointerToBool")((void)0);
2255 auto *PtrExpr = Visit(E);
2256
2257 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2258 const QualType SrcType = E->getType();
2259
2260 // Casting to integer requires stripping dynamic information as it does
2261 // not carries it.
2262 if (SrcType.mayBeDynamicClass())
2263 PtrExpr = Builder.CreateStripInvariantGroup(PtrExpr);
2264 }
2265
2266 return Builder.CreatePtrToInt(PtrExpr, ConvertType(DestTy));
2267 }
2268 case CK_ToVoid: {
2269 CGF.EmitIgnoredExpr(E);
2270 return nullptr;
2271 }
2272 case CK_MatrixCast: {
2273 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2274 CE->getExprLoc());
2275 }
2276 case CK_VectorSplat: {
2277 llvm::Type *DstTy = ConvertType(DestTy);
2278 Value *Elt = Visit(const_cast<Expr*>(E));
2279 // Splat the element across to all elements
2280 unsigned NumElements = cast<llvm::FixedVectorType>(DstTy)->getNumElements();
2281 return Builder.CreateVectorSplat(NumElements, Elt, "splat");
2282 }
2283
2284 case CK_FixedPointCast:
2285 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2286 CE->getExprLoc());
2287
2288 case CK_FixedPointToBoolean:
2289 assert(E->getType()->isFixedPointType() &&((void)0)
2290 "Expected src type to be fixed point type")((void)0);
2291 assert(DestTy->isBooleanType() && "Expected dest type to be boolean type")((void)0);
2292 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2293 CE->getExprLoc());
2294
2295 case CK_FixedPointToIntegral:
2296 assert(E->getType()->isFixedPointType() &&((void)0)
2297 "Expected src type to be fixed point type")((void)0);
2298 assert(DestTy->isIntegerType() && "Expected dest type to be an integer")((void)0);
2299 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2300 CE->getExprLoc());
2301
2302 case CK_IntegralToFixedPoint:
2303 assert(E->getType()->isIntegerType() &&((void)0)
2304 "Expected src type to be an integer")((void)0);
2305 assert(DestTy->isFixedPointType() &&((void)0)
2306 "Expected dest type to be fixed point type")((void)0);
2307 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2308 CE->getExprLoc());
2309
2310 case CK_IntegralCast: {
2311 ScalarConversionOpts Opts;
2312 if (auto *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
2313 if (!ICE->isPartOfExplicitCast())
2314 Opts = ScalarConversionOpts(CGF.SanOpts);
2315 }
2316 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2317 CE->getExprLoc(), Opts);
2318 }
2319 case CK_IntegralToFloating:
2320 case CK_FloatingToIntegral:
2321 case CK_FloatingCast:
2322 case CK_FixedPointToFloating:
2323 case CK_FloatingToFixedPoint: {
2324 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2325 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2326 CE->getExprLoc());
2327 }
2328 case CK_BooleanToSignedIntegral: {
2329 ScalarConversionOpts Opts;
2330 Opts.TreatBooleanAsSigned = true;
2331 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2332 CE->getExprLoc(), Opts);
2333 }
2334 case CK_IntegralToBoolean:
2335 return EmitIntToBoolConversion(Visit(E));
2336 case CK_PointerToBoolean:
2337 return EmitPointerToBoolConversion(Visit(E), E->getType());
2338 case CK_FloatingToBoolean: {
2339 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2340 return EmitFloatToBoolConversion(Visit(E));
2341 }
2342 case CK_MemberPointerToBoolean: {
2343 llvm::Value *MemPtr = Visit(E);
2344 const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>();
2345 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, MemPtr, MPT);
2346 }
2347
2348 case CK_FloatingComplexToReal:
2349 case CK_IntegralComplexToReal:
2350 return CGF.EmitComplexExpr(E, false, true).first;
2351
2352 case CK_FloatingComplexToBoolean:
2353 case CK_IntegralComplexToBoolean: {
2354 CodeGenFunction::ComplexPairTy V = CGF.EmitComplexExpr(E);
2355
2356 // TODO: kill this function off, inline appropriate case here
2357 return EmitComplexToScalarConversion(V, E->getType(), DestTy,
2358 CE->getExprLoc());
2359 }
2360
2361 case CK_ZeroToOCLOpaqueType: {
2362 assert((DestTy->isEventT() || DestTy->isQueueT() ||((void)0)
2363 DestTy->isOCLIntelSubgroupAVCType()) &&((void)0)
2364 "CK_ZeroToOCLEvent cast on non-event type")((void)0);
2365 return llvm::Constant::getNullValue(ConvertType(DestTy));
2366 }
2367
2368 case CK_IntToOCLSampler:
2369 return CGF.CGM.createOpenCLIntToSamplerConversion(E, CGF);
2370
2371 } // end of switch
2372
2373 llvm_unreachable("unknown scalar cast")__builtin_unreachable();
2374}
2375
2376Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) {
2377 CodeGenFunction::StmtExprEvaluation eval(CGF);
2378 Address RetAlloca = CGF.EmitCompoundStmt(*E->getSubStmt(),
2379 !E->getType()->isVoidType());
2380 if (!RetAlloca.isValid())
2381 return nullptr;
2382 return CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(RetAlloca, E->getType()),
2383 E->getExprLoc());
2384}
2385
2386Value *ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
2387 CodeGenFunction::RunCleanupsScope Scope(CGF);
2388 Value *V = Visit(E->getSubExpr());
2389 // Defend against dominance problems caused by jumps out of expression
2390 // evaluation through the shared cleanup block.
2391 Scope.ForceCleanup({&V});
2392 return V;
2393}
2394
2395//===----------------------------------------------------------------------===//
2396// Unary Operators
2397//===----------------------------------------------------------------------===//
2398
2399static BinOpInfo createBinOpInfoFromIncDec(const UnaryOperator *E,
2400 llvm::Value *InVal, bool IsInc,
2401 FPOptions FPFeatures) {
2402 BinOpInfo BinOp;
2403 BinOp.LHS = InVal;
2404 BinOp.RHS = llvm::ConstantInt::get(InVal->getType(), 1, false);
2405 BinOp.Ty = E->getType();
2406 BinOp.Opcode = IsInc ? BO_Add : BO_Sub;
2407 BinOp.FPFeatures = FPFeatures;
2408 BinOp.E = E;
2409 return BinOp;
2410}
2411
2412llvm::Value *ScalarExprEmitter::EmitIncDecConsiderOverflowBehavior(
2413 const UnaryOperator *E, llvm::Value *InVal, bool IsInc) {
2414 llvm::Value *Amount =
2415 llvm::ConstantInt::get(InVal->getType(), IsInc ? 1 : -1, true);
2416 StringRef Name = IsInc ? "inc" : "dec";
2417 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
2418 case LangOptions::SOB_Defined:
2419 return Builder.CreateAdd(InVal, Amount, Name);
2420 case LangOptions::SOB_Undefined:
2421 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
2422 return Builder.CreateNSWAdd(InVal, Amount, Name);
2423 LLVM_FALLTHROUGH[[gnu::fallthrough]];
2424 case LangOptions::SOB_Trapping:
2425 if (!E->canOverflow())
2426 return Builder.CreateNSWAdd(InVal, Amount, Name);
2427 return EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(
2428 E, InVal, IsInc, E->getFPFeaturesInEffect(CGF.getLangOpts())));
2429 }
2430 llvm_unreachable("Unknown SignedOverflowBehaviorTy")__builtin_unreachable();
2431}
2432
2433namespace {
2434/// Handles check and update for lastprivate conditional variables.
2435class OMPLastprivateConditionalUpdateRAII {
2436private:
2437 CodeGenFunction &CGF;
2438 const UnaryOperator *E;
2439
2440public:
2441 OMPLastprivateConditionalUpdateRAII(CodeGenFunction &CGF,
2442 const UnaryOperator *E)
2443 : CGF(CGF), E(E) {}
2444 ~OMPLastprivateConditionalUpdateRAII() {
2445 if (CGF.getLangOpts().OpenMP)
2446 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(
2447 CGF, E->getSubExpr());
2448 }
2449};
2450} // namespace
2451
2452llvm::Value *
2453ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
2454 bool isInc, bool isPre) {
2455 OMPLastprivateConditionalUpdateRAII OMPRegion(CGF, E);
2456 QualType type = E->getSubExpr()->getType();
2457 llvm::PHINode *atomicPHI = nullptr;
2458 llvm::Value *value;
2459 llvm::Value *input;
2460
2461 int amount = (isInc ? 1 : -1);
2462 bool isSubtraction = !isInc;
2463
2464 if (const AtomicType *atomicTy = type->getAs<AtomicType>()) {
2465 type = atomicTy->getValueType();
2466 if (isInc && type->isBooleanType()) {
2467 llvm::Value *True = CGF.EmitToMemory(Builder.getTrue(), type);
2468 if (isPre) {
2469 Builder.CreateStore(True, LV.getAddress(CGF), LV.isVolatileQualified())
2470 ->setAtomic(llvm::AtomicOrdering::SequentiallyConsistent);
2471 return Builder.getTrue();
2472 }
2473 // For atomic bool increment, we just store true and return it for
2474 // preincrement, do an atomic swap with true for postincrement
2475 return Builder.CreateAtomicRMW(
2476 llvm::AtomicRMWInst::Xchg, LV.getPointer(CGF), True,
2477 llvm::AtomicOrdering::SequentiallyConsistent);
2478 }
2479 // Special case for atomic increment / decrement on integers, emit
2480 // atomicrmw instructions. We skip this if we want to be doing overflow
2481 // checking, and fall into the slow path with the atomic cmpxchg loop.
2482 if (!type->isBooleanType() && type->isIntegerType() &&
2483 !(type->isUnsignedIntegerType() &&
2484 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
2485 CGF.getLangOpts().getSignedOverflowBehavior() !=
2486 LangOptions::SOB_Trapping) {
2487 llvm::AtomicRMWInst::BinOp aop = isInc ? llvm::AtomicRMWInst::Add :
2488 llvm::AtomicRMWInst::Sub;
2489 llvm::Instruction::BinaryOps op = isInc ? llvm::Instruction::Add :
2490 llvm::Instruction::Sub;
2491 llvm::Value *amt = CGF.EmitToMemory(
2492 llvm::ConstantInt::get(ConvertType(type), 1, true), type);
2493 llvm::Value *old =
2494 Builder.CreateAtomicRMW(aop, LV.getPointer(CGF), amt,
2495 llvm::AtomicOrdering::SequentiallyConsistent);
2496 return isPre ? Builder.CreateBinOp(op, old, amt) : old;
2497 }
2498 value = EmitLoadOfLValue(LV, E->getExprLoc());
2499 input = value;
2500 // For every other atomic operation, we need to emit a load-op-cmpxchg loop
2501 llvm::BasicBlock *startBB = Builder.GetInsertBlock();
2502 llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
2503 value = CGF.EmitToMemory(value, type);
2504 Builder.CreateBr(opBB);
2505 Builder.SetInsertPoint(opBB);
2506 atomicPHI = Builder.CreatePHI(value->getType(), 2);
2507 atomicPHI->addIncoming(value, startBB);
2508 value = atomicPHI;
2509 } else {
2510 value = EmitLoadOfLValue(LV, E->getExprLoc());
2511 input = value;
2512 }
2513
2514 // Special case of integer increment that we have to check first: bool++.
2515 // Due to promotion rules, we get:
2516 // bool++ -> bool = bool + 1
2517 // -> bool = (int)bool + 1
2518 // -> bool = ((int)bool + 1 != 0)
2519 // An interesting aspect of this is that increment is always true.
2520 // Decrement does not have this property.
2521 if (isInc && type->isBooleanType()) {
2522 value = Builder.getTrue();
2523
2524 // Most common case by far: integer increment.
2525 } else if (type->isIntegerType()) {
2526 QualType promotedType;
2527 bool canPerformLossyDemotionCheck = false;
2528 if (type->isPromotableIntegerType()) {
2529 promotedType = CGF.getContext().getPromotedIntegerType(type);
2530 assert(promotedType != type && "Shouldn't promote to the same type.")((void)0);
2531 canPerformLossyDemotionCheck = true;
2532 canPerformLossyDemotionCheck &=
2533 CGF.getContext().getCanonicalType(type) !=
2534 CGF.getContext().getCanonicalType(promotedType);
2535 canPerformLossyDemotionCheck &=
2536 PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(
2537 type, promotedType);
2538 assert((!canPerformLossyDemotionCheck ||((void)0)
2539 type->isSignedIntegerOrEnumerationType() ||((void)0)
2540 promotedType->isSignedIntegerOrEnumerationType() ||((void)0)
2541 ConvertType(type)->getScalarSizeInBits() ==((void)0)
2542 ConvertType(promotedType)->getScalarSizeInBits()) &&((void)0)
2543 "The following check expects that if we do promotion to different "((void)0)
2544 "underlying canonical type, at least one of the types (either "((void)0)
2545 "base or promoted) will be signed, or the bitwidths will match.")((void)0);
2546 }
2547 if (CGF.SanOpts.hasOneOf(
2548 SanitizerKind::ImplicitIntegerArithmeticValueChange) &&
2549 canPerformLossyDemotionCheck) {
2550 // While `x += 1` (for `x` with width less than int) is modeled as
2551 // promotion+arithmetics+demotion, and we can catch lossy demotion with
2552 // ease; inc/dec with width less than int can't overflow because of
2553 // promotion rules, so we omit promotion+demotion, which means that we can
2554 // not catch lossy "demotion". Because we still want to catch these cases
2555 // when the sanitizer is enabled, we perform the promotion, then perform
2556 // the increment/decrement in the wider type, and finally
2557 // perform the demotion. This will catch lossy demotions.
2558
2559 value = EmitScalarConversion(value, type, promotedType, E->getExprLoc());
2560 Value *amt = llvm::ConstantInt::get(value->getType(), amount, true);
2561 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
2562 // Do pass non-default ScalarConversionOpts so that sanitizer check is
2563 // emitted.
2564 value = EmitScalarConversion(value, promotedType, type, E->getExprLoc(),
2565 ScalarConversionOpts(CGF.SanOpts));
2566
2567 // Note that signed integer inc/dec with width less than int can't
2568 // overflow because of promotion rules; we're just eliding a few steps
2569 // here.
2570 } else if (E->canOverflow() && type->isSignedIntegerOrEnumerationType()) {
2571 value = EmitIncDecConsiderOverflowBehavior(E, value, isInc);
2572 } else if (E->canOverflow() && type->isUnsignedIntegerType() &&
2573 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) {
2574 value = EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(
2575 E, value, isInc, E->getFPFeaturesInEffect(CGF.getLangOpts())));
2576 } else {
2577 llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount, true);
2578 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
2579 }
2580
2581 // Next most common: pointer increment.
2582 } else if (const PointerType *ptr = type->getAs<PointerType>()) {
2583 QualType type = ptr->getPointeeType();
2584
2585 // VLA types don't have constant size.
2586 if (const VariableArrayType *vla
2587 = CGF.getContext().getAsVariableArrayType(type)) {
2588 llvm::Value *numElts = CGF.getVLASize(vla).NumElts;
2589 if (!isInc) numElts = Builder.CreateNSWNeg(numElts, "vla.negsize");
2590 if (CGF.getLangOpts().isSignedOverflowDefined())
2591 value = Builder.CreateGEP(value->getType()->getPointerElementType(),
2592 value, numElts, "vla.inc");
2593 else
2594 value = CGF.EmitCheckedInBoundsGEP(
2595 value, numElts, /*SignedIndices=*/false, isSubtraction,
2596 E->getExprLoc(), "vla.inc");
2597
2598 // Arithmetic on function pointers (!) is just +-1.
2599 } else if (type->isFunctionType()) {
2600 llvm::Value *amt = Builder.getInt32(amount);
2601
2602 value = CGF.EmitCastToVoidPtr(value);
2603 if (CGF.getLangOpts().isSignedOverflowDefined())
2604 value = Builder.CreateGEP(CGF.Int8Ty, value, amt, "incdec.funcptr");
2605 else
2606 value = CGF.EmitCheckedInBoundsGEP(value, amt, /*SignedIndices=*/false,
2607 isSubtraction, E->getExprLoc(),
2608 "incdec.funcptr");
2609 value = Builder.CreateBitCast(value, input->getType());
2610
2611 // For everything else, we can just do a simple increment.
2612 } else {
2613 llvm::Value *amt = Builder.getInt32(amount);
2614 if (CGF.getLangOpts().isSignedOverflowDefined())
2615 value = Builder.CreateGEP(value->getType()->getPointerElementType(),
2616 value, amt, "incdec.ptr");
2617 else
2618 value = CGF.EmitCheckedInBoundsGEP(value, amt, /*SignedIndices=*/false,
2619 isSubtraction, E->getExprLoc(),
2620 "incdec.ptr");
2621 }
2622
2623 // Vector increment/decrement.
2624 } else if (type->isVectorType()) {
2625 if (type->hasIntegerRepresentation()) {
2626 llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount);
2627
2628 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
2629 } else {
2630 value = Builder.CreateFAdd(
2631 value,
2632 llvm::ConstantFP::get(value->getType(), amount),
2633 isInc ? "inc" : "dec");
2634 }
2635
2636 // Floating point.
2637 } else if (type->isRealFloatingType()) {
2638 // Add the inc/dec to the real part.
2639 llvm::Value *amt;
2640 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
2641
2642 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
2643 // Another special case: half FP increment should be done via float
2644 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
2645 value = Builder.CreateCall(
2646 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
2647 CGF.CGM.FloatTy),
2648 input, "incdec.conv");
2649 } else {
2650 value = Builder.CreateFPExt(input, CGF.CGM.FloatTy, "incdec.conv");
2651 }
2652 }
2653
2654 if (value->getType()->isFloatTy())
2655 amt = llvm::ConstantFP::get(VMContext,
2656 llvm::APFloat(static_cast<float>(amount)));
2657 else if (value->getType()->isDoubleTy())
2658 amt = llvm::ConstantFP::get(VMContext,
2659 llvm::APFloat(static_cast<double>(amount)));
2660 else {
2661 // Remaining types are Half, LongDouble or __float128. Convert from float.
2662 llvm::APFloat F(static_cast<float>(amount));
2663 bool ignored;
2664 const llvm::fltSemantics *FS;
2665 // Don't use getFloatTypeSemantics because Half isn't
2666 // necessarily represented using the "half" LLVM type.
2667 if (value->getType()->isFP128Ty())
2668 FS = &CGF.getTarget().getFloat128Format();
2669 else if (value->getType()->isHalfTy())
2670 FS = &CGF.getTarget().getHalfFormat();
2671 else
2672 FS = &CGF.getTarget().getLongDoubleFormat();
2673 F.convert(*FS, llvm::APFloat::rmTowardZero, &ignored);
2674 amt = llvm::ConstantFP::get(VMContext, F);
2675 }
2676 value = Builder.CreateFAdd(value, amt, isInc ? "inc" : "dec");
2677
2678 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
2679 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
2680 value = Builder.CreateCall(
2681 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16,
2682 CGF.CGM.FloatTy),
2683 value, "incdec.conv");
2684 } else {
2685 value = Builder.CreateFPTrunc(value, input->getType(), "incdec.conv");
2686 }
2687 }
2688
2689 // Fixed-point types.
2690 } else if (type->isFixedPointType()) {
2691 // Fixed-point types are tricky. In some cases, it isn't possible to
2692 // represent a 1 or a -1 in the type at all. Piggyback off of
2693 // EmitFixedPointBinOp to avoid having to reimplement saturation.
2694 BinOpInfo Info;
2695 Info.E = E;
2696 Info.Ty = E->getType();
2697 Info.Opcode = isInc ? BO_Add : BO_Sub;
2698 Info.LHS = value;
2699 Info.RHS = llvm::ConstantInt::get(value->getType(), 1, false);
2700 // If the type is signed, it's better to represent this as +(-1) or -(-1),
2701 // since -1 is guaranteed to be representable.
2702 if (type->isSignedFixedPointType()) {
2703 Info.Opcode = isInc ? BO_Sub : BO_Add;
2704 Info.RHS = Builder.CreateNeg(Info.RHS);
2705 }
2706 // Now, convert from our invented integer literal to the type of the unary
2707 // op. This will upscale and saturate if necessary. This value can become
2708 // undef in some cases.
2709 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
2710 auto DstSema = CGF.getContext().getFixedPointSemantics(Info.Ty);
2711 Info.RHS = FPBuilder.CreateIntegerToFixed(Info.RHS, true, DstSema);
2712 value = EmitFixedPointBinOp(Info);
2713
2714 // Objective-C pointer types.
2715 } else {
2716 const ObjCObjectPointerType *OPT = type->castAs<ObjCObjectPointerType>();
2717 value = CGF.EmitCastToVoidPtr(value);
2718
2719 CharUnits size = CGF.getContext().getTypeSizeInChars(OPT->getObjectType());
2720 if (!isInc) size = -size;
2721 llvm::Value *sizeValue =
2722 llvm::ConstantInt::get(CGF.SizeTy, size.getQuantity());
2723
2724 if (CGF.getLangOpts().isSignedOverflowDefined())
2725 value = Builder.CreateGEP(CGF.Int8Ty, value, sizeValue, "incdec.objptr");
2726 else
2727 value = CGF.EmitCheckedInBoundsGEP(value, sizeValue,
2728 /*SignedIndices=*/false, isSubtraction,
2729 E->getExprLoc(), "incdec.objptr");
2730 value = Builder.CreateBitCast(value, input->getType());
2731 }
2732
2733 if (atomicPHI) {
2734 llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
2735 llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
2736 auto Pair = CGF.EmitAtomicCompareExchange(
2737 LV, RValue::get(atomicPHI), RValue::get(value), E->getExprLoc());
2738 llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), type);
2739 llvm::Value *success = Pair.second;
2740 atomicPHI->addIncoming(old, curBlock);
2741 Builder.CreateCondBr(success, contBB, atomicPHI->getParent());
2742 Builder.SetInsertPoint(contBB);
2743 return isPre ? value : input;
2744 }
2745
2746 // Store the updated result through the lvalue.
2747 if (LV.isBitField())
2748 CGF.EmitStoreThroughBitfieldLValue(RValue::get(value), LV, &value);
2749 else
2750 CGF.EmitStoreThroughLValue(RValue::get(value), LV);
2751
2752 // If this is a postinc, return the value read from memory, otherwise use the
2753 // updated value.
2754 return isPre ? value : input;
2755}
2756
2757
2758
2759Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E) {
2760 TestAndClearIgnoreResultAssign();
2761 Value *Op = Visit(E->getSubExpr());
2762
2763 // Generate a unary FNeg for FP ops.
2764 if (Op->getType()->isFPOrFPVectorTy())
1
Taking false branch
2765 return Builder.CreateFNeg(Op, "fneg");
2766
2767 // Emit unary minus with EmitSub so we handle overflow cases etc.
2768 BinOpInfo BinOp;
2769 BinOp.RHS = Op;
2770 BinOp.LHS = llvm::Constant::getNullValue(BinOp.RHS->getType());
2771 BinOp.Ty = E->getType();
2772 BinOp.Opcode = BO_Sub;
2773 BinOp.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
2774 BinOp.E = E;
2775 return EmitSub(BinOp);
2
Calling 'ScalarExprEmitter::EmitSub'
2776}
2777
2778Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
2779 TestAndClearIgnoreResultAssign();
2780 Value *Op = Visit(E->getSubExpr());
2781 return Builder.CreateNot(Op, "neg");
2782}
2783
2784Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) {
2785 // Perform vector logical not on comparison with zero vector.
2786 if (E->getType()->isVectorType() &&
2787 E->getType()->castAs<VectorType>()->getVectorKind() ==
2788 VectorType::GenericVector) {
2789 Value *Oper = Visit(E->getSubExpr());
2790 Value *Zero = llvm::Constant::getNullValue(Oper->getType());
2791 Value *Result;
2792 if (Oper->getType()->isFPOrFPVectorTy()) {
2793 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
2794 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));
2795 Result = Builder.CreateFCmp(llvm::CmpInst::FCMP_OEQ, Oper, Zero, "cmp");
2796 } else
2797 Result = Builder.CreateICmp(llvm::CmpInst::ICMP_EQ, Oper, Zero, "cmp");
2798 return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
2799 }
2800
2801 // Compare operand to zero.
2802 Value *BoolVal = CGF.EvaluateExprAsBool(E->getSubExpr());
2803
2804 // Invert value.
2805 // TODO: Could dynamically modify easy computations here. For example, if
2806 // the operand is an icmp ne, turn into icmp eq.
2807 BoolVal = Builder.CreateNot(BoolVal, "lnot");
2808
2809 // ZExt result to the expr type.
2810 return Builder.CreateZExt(BoolVal, ConvertType(E->getType()), "lnot.ext");
2811}
2812
2813Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
2814 // Try folding the offsetof to a constant.
2815 Expr::EvalResult EVResult;
2816 if (E->EvaluateAsInt(EVResult, CGF.getContext())) {
2817 llvm::APSInt Value = EVResult.Val.getInt();
2818 return Builder.getInt(Value);
2819 }
2820
2821 // Loop over the components of the offsetof to compute the value.
2822 unsigned n = E->getNumComponents();
2823 llvm::Type* ResultType = ConvertType(E->getType());
2824 llvm::Value* Result = llvm::Constant::getNullValue(ResultType);
2825 QualType CurrentType = E->getTypeSourceInfo()->getType();
2826 for (unsigned i = 0; i != n; ++i) {
2827 OffsetOfNode ON = E->getComponent(i);
2828 llvm::Value *Offset = nullptr;
2829 switch (ON.getKind()) {
2830 case OffsetOfNode::Array: {
2831 // Compute the index
2832 Expr *IdxExpr = E->getIndexExpr(ON.getArrayExprIndex());
2833 llvm::Value* Idx = CGF.EmitScalarExpr(IdxExpr);
2834 bool IdxSigned = IdxExpr->getType()->isSignedIntegerOrEnumerationType();
2835 Idx = Builder.CreateIntCast(Idx, ResultType, IdxSigned, "conv");
2836
2837 // Save the element type
2838 CurrentType =
2839 CGF.getContext().getAsArrayType(CurrentType)->getElementType();
2840
2841 // Compute the element size
2842 llvm::Value* ElemSize = llvm::ConstantInt::get(ResultType,
2843 CGF.getContext().getTypeSizeInChars(CurrentType).getQuantity());
2844
2845 // Multiply out to compute the result
2846 Offset = Builder.CreateMul(Idx, ElemSize);
2847 break;
2848 }
2849
2850 case OffsetOfNode::Field: {
2851 FieldDecl *MemberDecl = ON.getField();
2852 RecordDecl *RD = CurrentType->castAs<RecordType>()->getDecl();
2853 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
2854
2855 // Compute the index of the field in its parent.
2856 unsigned i = 0;
2857 // FIXME: It would be nice if we didn't have to loop here!
2858 for (RecordDecl::field_iterator Field = RD->field_begin(),
2859 FieldEnd = RD->field_end();
2860 Field != FieldEnd; ++Field, ++i) {
2861 if (*Field == MemberDecl)
2862 break;
2863 }
2864 assert(i < RL.getFieldCount() && "offsetof field in wrong type")((void)0);
2865
2866 // Compute the offset to the field
2867 int64_t OffsetInt = RL.getFieldOffset(i) /
2868 CGF.getContext().getCharWidth();
2869 Offset = llvm::ConstantInt::get(ResultType, OffsetInt);
2870
2871 // Save the element type.
2872 CurrentType = MemberDecl->getType();
2873 break;
2874 }
2875
2876 case OffsetOfNode::Identifier:
2877 llvm_unreachable("dependent __builtin_offsetof")__builtin_unreachable();
2878
2879 case OffsetOfNode::Base: {
2880 if (ON.getBase()->isVirtual()) {
2881 CGF.ErrorUnsupported(E, "virtual base in offsetof");
2882 continue;
2883 }
2884
2885 RecordDecl *RD = CurrentType->castAs<RecordType>()->getDecl();
2886 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
2887
2888 // Save the element type.
2889 CurrentType = ON.getBase()->getType();
2890
2891 // Compute the offset to the base.
2892 const RecordType *BaseRT = CurrentType->getAs<RecordType>();
2893 CXXRecordDecl *BaseRD = cast<CXXRecordDecl>(BaseRT->getDecl());
2894 CharUnits OffsetInt = RL.getBaseClassOffset(BaseRD);
2895 Offset = llvm::ConstantInt::get(ResultType, OffsetInt.getQuantity());
2896 break;
2897 }
2898 }
2899 Result = Builder.CreateAdd(Result, Offset);
2900 }
2901 return Result;
2902}
2903
2904/// VisitUnaryExprOrTypeTraitExpr - Return the size or alignment of the type of
2905/// argument of the sizeof expression as an integer.
2906Value *
2907ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
2908 const UnaryExprOrTypeTraitExpr *E) {
2909 QualType TypeToSize = E->getTypeOfArgument();
2910 if (E->getKind() == UETT_SizeOf) {
2911 if (const VariableArrayType *VAT =
2912 CGF.getContext().getAsVariableArrayType(TypeToSize)) {
2913 if (E->isArgumentType()) {
2914 // sizeof(type) - make sure to emit the VLA size.
2915 CGF.EmitVariablyModifiedType(TypeToSize);
2916 } else {
2917 // C99 6.5.3.4p2: If the argument is an expression of type
2918 // VLA, it is evaluated.
2919 CGF.EmitIgnoredExpr(E->getArgumentExpr());
2920 }
2921
2922 auto VlaSize = CGF.getVLASize(VAT);
2923 llvm::Value *size = VlaSize.NumElts;
2924
2925 // Scale the number of non-VLA elements by the non-VLA element size.
2926 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(VlaSize.Type);
2927 if (!eltSize.isOne())
2928 size = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), size);
2929
2930 return size;
2931 }
2932 } else if (E->getKind() == UETT_OpenMPRequiredSimdAlign) {
2933 auto Alignment =
2934 CGF.getContext()
2935 .toCharUnitsFromBits(CGF.getContext().getOpenMPDefaultSimdAlign(
2936 E->getTypeOfArgument()->getPointeeType()))
2937 .getQuantity();
2938 return llvm::ConstantInt::get(CGF.SizeTy, Alignment);
2939 }
2940
2941 // If this isn't sizeof(vla), the result must be constant; use the constant
2942 // folding logic so we don't have to duplicate it here.
2943 return Builder.getInt(E->EvaluateKnownConstInt(CGF.getContext()));
2944}
2945
2946Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E) {
2947 Expr *Op = E->getSubExpr();
2948 if (Op->getType()->isAnyComplexType()) {
2949 // If it's an l-value, load through the appropriate subobject l-value.
2950 // Note that we have to ask E because Op might be an l-value that
2951 // this won't work for, e.g. an Obj-C property.
2952 if (E->isGLValue())
2953 return CGF.EmitLoadOfLValue(CGF.EmitLValue(E),
2954 E->getExprLoc()).getScalarVal();
2955
2956 // Otherwise, calculate and project.
2957 return CGF.EmitComplexExpr(Op, false, true).first;
2958 }
2959
2960 return Visit(Op);
2961}
2962
2963Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E) {
2964 Expr *Op = E->getSubExpr();
2965 if (Op->getType()->isAnyComplexType()) {
2966 // If it's an l-value, load through the appropriate subobject l-value.
2967 // Note that we have to ask E because Op might be an l-value that
2968 // this won't work for, e.g. an Obj-C property.
2969 if (Op->isGLValue())
2970 return CGF.EmitLoadOfLValue(CGF.EmitLValue(E),
2971 E->getExprLoc()).getScalarVal();
2972
2973 // Otherwise, calculate and project.
2974 return CGF.EmitComplexExpr(Op, true, false).second;
2975 }
2976
2977 // __imag on a scalar returns zero. Emit the subexpr to ensure side
2978 // effects are evaluated, but not the actual value.
2979 if (Op->isGLValue())
2980 CGF.EmitLValue(Op);
2981 else
2982 CGF.EmitScalarExpr(Op, true);
2983 return llvm::Constant::getNullValue(ConvertType(E->getType()));
2984}
2985
2986//===----------------------------------------------------------------------===//
2987// Binary Operators
2988//===----------------------------------------------------------------------===//
2989
2990BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E) {
2991 TestAndClearIgnoreResultAssign();
2992 BinOpInfo Result;
2993 Result.LHS = Visit(E->getLHS());
2994 Result.RHS = Visit(E->getRHS());
2995 Result.Ty = E->getType();
2996 Result.Opcode = E->getOpcode();
2997 Result.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
2998 Result.E = E;
2999 return Result;
3000}
3001
3002LValue ScalarExprEmitter::EmitCompoundAssignLValue(
3003 const CompoundAssignOperator *E,
3004 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &),
3005 Value *&Result) {
3006 QualType LHSTy = E->getLHS()->getType();
3007 BinOpInfo OpInfo;
3008
3009 if (E->getComputationResultType()->isAnyComplexType())
3010 return CGF.EmitScalarCompoundAssignWithComplex(E, Result);
3011
3012 // Emit the RHS first. __block variables need to have the rhs evaluated
3013 // first, plus this should improve codegen a little.
3014 OpInfo.RHS = Visit(E->getRHS());
3015 OpInfo.Ty = E->getComputationResultType();
3016 OpInfo.Opcode = E->getOpcode();
3017 OpInfo.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
3018 OpInfo.E = E;
3019 // Load/convert the LHS.
3020 LValue LHSLV = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
3021
3022 llvm::PHINode *atomicPHI = nullptr;
3023 if (const AtomicType *atomicTy = LHSTy->getAs<AtomicType>()) {
3024 QualType type = atomicTy->getValueType();
3025 if (!type->isBooleanType() && type->isIntegerType() &&
3026 !(type->isUnsignedIntegerType() &&
3027 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
3028 CGF.getLangOpts().getSignedOverflowBehavior() !=
3029 LangOptions::SOB_Trapping) {
3030 llvm::AtomicRMWInst::BinOp AtomicOp = llvm::AtomicRMWInst::BAD_BINOP;
3031 llvm::Instruction::BinaryOps Op;
3032 switch (OpInfo.Opcode) {
3033 // We don't have atomicrmw operands for *, %, /, <<, >>
3034 case BO_MulAssign: case BO_DivAssign:
3035 case BO_RemAssign:
3036 case BO_ShlAssign:
3037 case BO_ShrAssign:
3038 break;
3039 case BO_AddAssign:
3040 AtomicOp = llvm::AtomicRMWInst::Add;
3041 Op = llvm::Instruction::Add;
3042 break;
3043 case BO_SubAssign:
3044 AtomicOp = llvm::AtomicRMWInst::Sub;
3045 Op = llvm::Instruction::Sub;
3046 break;
3047 case BO_AndAssign:
3048 AtomicOp = llvm::AtomicRMWInst::And;
3049 Op = llvm::Instruction::And;
3050 break;
3051 case BO_XorAssign:
3052 AtomicOp = llvm::AtomicRMWInst::Xor;
3053 Op = llvm::Instruction::Xor;
3054 break;
3055 case BO_OrAssign:
3056 AtomicOp = llvm::AtomicRMWInst::Or;
3057 Op = llvm::Instruction::Or;
3058 break;
3059 default:
3060 llvm_unreachable("Invalid compound assignment type")__builtin_unreachable();
3061 }
3062 if (AtomicOp != llvm::AtomicRMWInst::BAD_BINOP) {
3063 llvm::Value *Amt = CGF.EmitToMemory(
3064 EmitScalarConversion(OpInfo.RHS, E->getRHS()->getType(), LHSTy,
3065 E->getExprLoc()),
3066 LHSTy);
3067 Value *OldVal = Builder.CreateAtomicRMW(
3068 AtomicOp, LHSLV.getPointer(CGF), Amt,
3069 llvm::AtomicOrdering::SequentiallyConsistent);
3070
3071 // Since operation is atomic, the result type is guaranteed to be the
3072 // same as the input in LLVM terms.
3073 Result = Builder.CreateBinOp(Op, OldVal, Amt);
3074 return LHSLV;
3075 }
3076 }
3077 // FIXME: For floating point types, we should be saving and restoring the
3078 // floating point environment in the loop.
3079 llvm::BasicBlock *startBB = Builder.GetInsertBlock();
3080 llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
3081 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
3082 OpInfo.LHS = CGF.EmitToMemory(OpInfo.LHS, type);
3083 Builder.CreateBr(opBB);
3084 Builder.SetInsertPoint(opBB);
3085 atomicPHI = Builder.CreatePHI(OpInfo.LHS->getType(), 2);
3086 atomicPHI->addIncoming(OpInfo.LHS, startBB);
3087 OpInfo.LHS = atomicPHI;
3088 }
3089 else
3090 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
3091
3092 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, OpInfo.FPFeatures);
3093 SourceLocation Loc = E->getExprLoc();
3094 OpInfo.LHS =
3095 EmitScalarConversion(OpInfo.LHS, LHSTy, E->getComputationLHSType(), Loc);
3096
3097 // Expand the binary operator.
3098 Result = (this->*Func)(OpInfo);
3099
3100 // Convert the result back to the LHS type,
3101 // potentially with Implicit Conversion sanitizer check.
3102 Result = EmitScalarConversion(Result, E->getComputationResultType(), LHSTy,
3103 Loc, ScalarConversionOpts(CGF.SanOpts));
3104
3105 if (atomicPHI) {
3106 llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
3107 llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
3108 auto Pair = CGF.EmitAtomicCompareExchange(
3109 LHSLV, RValue::get(atomicPHI), RValue::get(Result), E->getExprLoc());
3110 llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), LHSTy);
3111 llvm::Value *success = Pair.second;
3112 atomicPHI->addIncoming(old, curBlock);
3113 Builder.CreateCondBr(success, contBB, atomicPHI->getParent());
3114 Builder.SetInsertPoint(contBB);
3115 return LHSLV;
3116 }
3117
3118 // Store the result value into the LHS lvalue. Bit-fields are handled
3119 // specially because the result is altered by the store, i.e., [C99 6.5.16p1]
3120 // 'An assignment expression has the value of the left operand after the
3121 // assignment...'.
3122 if (LHSLV.isBitField())
3123 CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, &Result);
3124 else
3125 CGF.EmitStoreThroughLValue(RValue::get(Result), LHSLV);
3126
3127 if (CGF.getLangOpts().OpenMP)
3128 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF,
3129 E->getLHS());
3130 return LHSLV;
3131}
3132
3133Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
3134 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) {
3135 bool Ignore = TestAndClearIgnoreResultAssign();
3136 Value *RHS = nullptr;
3137 LValue LHS = EmitCompoundAssignLValue(E, Func, RHS);
3138
3139 // If the result is clearly ignored, return now.
3140 if (Ignore)
3141 return nullptr;
3142
3143 // The result of an assignment in C is the assigned r-value.
3144 if (!CGF.getLangOpts().CPlusPlus)
3145 return RHS;
3146
3147 // If the lvalue is non-volatile, return the computed value of the assignment.
3148 if (!LHS.isVolatileQualified())
3149 return RHS;
3150
3151 // Otherwise, reload the value.
3152 return EmitLoadOfLValue(LHS, E->getExprLoc());
3153}
3154
3155void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck(
3156 const BinOpInfo &Ops, llvm::Value *Zero, bool isDiv) {
3157 SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks;
3158
3159 if (CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero)) {
3160 Checks.push_back(std::make_pair(Builder.CreateICmpNE(Ops.RHS, Zero),
3161 SanitizerKind::IntegerDivideByZero));
3162 }
3163
3164 const auto *BO = cast<BinaryOperator>(Ops.E);
3165 if (CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow) &&
3166 Ops.Ty->hasSignedIntegerRepresentation() &&
3167 !IsWidenedIntegerOp(CGF.getContext(), BO->getLHS()) &&
3168 Ops.mayHaveIntegerOverflow()) {
3169 llvm::IntegerType *Ty = cast<llvm::IntegerType>(Zero->getType());
3170
3171 llvm::Value *IntMin =
3172 Builder.getInt(llvm::APInt::getSignedMinValue(Ty->getBitWidth()));
3173 llvm::Value *NegOne = llvm::Constant::getAllOnesValue(Ty);
3174
3175 llvm::Value *LHSCmp = Builder.CreateICmpNE(Ops.LHS, IntMin);
3176 llvm::Value *RHSCmp = Builder.CreateICmpNE(Ops.RHS, NegOne);
3177 llvm::Value *NotOverflow = Builder.CreateOr(LHSCmp, RHSCmp, "or");
3178 Checks.push_back(
3179 std::make_pair(NotOverflow, SanitizerKind::SignedIntegerOverflow));
3180 }
3181
3182 if (Checks.size() > 0)
3183 EmitBinOpCheck(Checks, Ops);
3184}
3185
3186Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
3187 {
3188 CodeGenFunction::SanitizerScope SanScope(&CGF);
3189 if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
3190 CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
3191 Ops.Ty->isIntegerType() &&
3192 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
3193 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
3194 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, true);
3195 } else if (CGF.SanOpts.has(SanitizerKind::FloatDivideByZero) &&
3196 Ops.Ty->isRealFloatingType() &&
3197 Ops.mayHaveFloatDivisionByZero()) {
3198 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
3199 llvm::Value *NonZero = Builder.CreateFCmpUNE(Ops.RHS, Zero);
3200 EmitBinOpCheck(std::make_pair(NonZero, SanitizerKind::FloatDivideByZero),
3201 Ops);
3202 }
3203 }
3204
3205 if (Ops.Ty->isConstantMatrixType()) {
3206 llvm::MatrixBuilder<CGBuilderTy> MB(Builder);
3207 // We need to check the types of the operands of the operator to get the
3208 // correct matrix dimensions.
3209 auto *BO = cast<BinaryOperator>(Ops.E);
3210 (void)BO;
3211 assert(((void)0)
3212 isa<ConstantMatrixType>(BO->getLHS()->getType().getCanonicalType()) &&((void)0)
3213 "first operand must be a matrix")((void)0);
3214 assert(BO->getRHS()->getType().getCanonicalType()->isArithmeticType() &&((void)0)
3215 "second operand must be an arithmetic type")((void)0);
3216 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
3217 return MB.CreateScalarDiv(Ops.LHS, Ops.RHS,
3218 Ops.Ty->hasUnsignedIntegerRepresentation());
3219 }
3220
3221 if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
3222 llvm::Value *Val;
3223 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
3224 Val = Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div");
3225 if ((CGF.getLangOpts().OpenCL &&
3226 !CGF.CGM.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt) ||
3227 (CGF.getLangOpts().HIP && CGF.getLangOpts().CUDAIsDevice &&
3228 !CGF.CGM.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt)) {
3229 // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 2.5ulp
3230 // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt
3231 // build option allows an application to specify that single precision
3232 // floating-point divide (x/y and 1/x) and sqrt used in the program
3233 // source are correctly rounded.
3234 llvm::Type *ValTy = Val->getType();
3235 if (ValTy->isFloatTy() ||
3236 (isa<llvm::VectorType>(ValTy) &&
3237 cast<llvm::VectorType>(ValTy)->getElementType()->isFloatTy()))
3238 CGF.SetFPAccuracy(Val, 2.5);
3239 }
3240 return Val;
3241 }
3242 else if (Ops.isFixedPointOp())
3243 return EmitFixedPointBinOp(Ops);
3244 else if (Ops.Ty->hasUnsignedIntegerRepresentation())
3245 return Builder.CreateUDiv(Ops.LHS, Ops.RHS, "div");
3246 else
3247 return Builder.CreateSDiv(Ops.LHS, Ops.RHS, "div");
3248}
3249
3250Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) {
3251 // Rem in C can't be a floating point type: C99 6.5.5p2.
3252 if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
3253 CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
3254 Ops.Ty->isIntegerType() &&
3255 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
3256 CodeGenFunction::SanitizerScope SanScope(&CGF);
3257 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
3258 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, false);
3259 }
3260
3261 if (Ops.Ty->hasUnsignedIntegerRepresentation())
3262 return Builder.CreateURem(Ops.LHS, Ops.RHS, "rem");
3263 else
3264 return Builder.CreateSRem(Ops.LHS, Ops.RHS, "rem");
3265}
3266
3267Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
3268 unsigned IID;
3269 unsigned OpID = 0;
3270 SanitizerHandler OverflowKind;
3271
3272 bool isSigned = Ops.Ty->isSignedIntegerOrEnumerationType();
3273 switch (Ops.Opcode) {
3274 case BO_Add:
3275 case BO_AddAssign:
3276 OpID = 1;
3277 IID = isSigned ? llvm::Intrinsic::sadd_with_overflow :
3278 llvm::Intrinsic::uadd_with_overflow;
3279 OverflowKind = SanitizerHandler::AddOverflow;
3280 break;
3281 case BO_Sub:
3282 case BO_SubAssign:
3283 OpID = 2;
3284 IID = isSigned ? llvm::Intrinsic::ssub_with_overflow :
3285 llvm::Intrinsic::usub_with_overflow;
3286 OverflowKind = SanitizerHandler::SubOverflow;
3287 break;
3288 case BO_Mul:
3289 case BO_MulAssign:
3290 OpID = 3;
3291 IID = isSigned ? llvm::Intrinsic::smul_with_overflow :
3292 llvm::Intrinsic::umul_with_overflow;
3293 OverflowKind = SanitizerHandler::MulOverflow;
3294 break;
3295 default:
3296 llvm_unreachable("Unsupported operation for overflow detection")__builtin_unreachable();
3297 }
3298 OpID <<= 1;
3299 if (isSigned)
3300 OpID |= 1;
3301
3302 CodeGenFunction::SanitizerScope SanScope(&CGF);
3303 llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(Ops.Ty);
3304
3305 llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, opTy);
3306
3307 Value *resultAndOverflow = Builder.CreateCall(intrinsic, {Ops.LHS, Ops.RHS});
3308 Value *result = Builder.CreateExtractValue(resultAndOverflow, 0);
3309 Value *overflow = Builder.CreateExtractValue(resultAndOverflow, 1);
3310
3311 // Handle overflow with llvm.trap if no custom handler has been specified.
3312 const std::string *handlerName =
3313 &CGF.getLangOpts().OverflowHandler;
3314 if (handlerName->empty()) {
3315 // If the signed-integer-overflow sanitizer is enabled, emit a call to its
3316 // runtime. Otherwise, this is a -ftrapv check, so just emit a trap.
3317 if (!isSigned || CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) {
3318 llvm::Value *NotOverflow = Builder.CreateNot(overflow);
3319 SanitizerMask Kind = isSigned ? SanitizerKind::SignedIntegerOverflow
3320 : SanitizerKind::UnsignedIntegerOverflow;
3321 EmitBinOpCheck(std::make_pair(NotOverflow, Kind), Ops);
3322 } else
3323 CGF.EmitTrapCheck(Builder.CreateNot(overflow), OverflowKind);
3324 return result;
3325 }
3326
3327 // Branch in case of overflow.
3328 llvm::BasicBlock *initialBB = Builder.GetInsertBlock();
3329 llvm::BasicBlock *continueBB =
3330 CGF.createBasicBlock("nooverflow", CGF.CurFn, initialBB->getNextNode());
3331 llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn);
3332
3333 Builder.CreateCondBr(overflow, overflowBB, continueBB);
3334
3335 // If an overflow handler is set, then we want to call it and then use its
3336 // result, if it returns.
3337 Builder.SetInsertPoint(overflowBB);
3338
3339 // Get the overflow handler.
3340 llvm::Type *Int8Ty = CGF.Int8Ty;
3341 llvm::Type *argTypes[] = { CGF.Int64Ty, CGF.Int64Ty, Int8Ty, Int8Ty };
3342 llvm::FunctionType *handlerTy =
3343 llvm::FunctionType::get(CGF.Int64Ty, argTypes, true);
3344 llvm::FunctionCallee handler =
3345 CGF.CGM.CreateRuntimeFunction(handlerTy, *handlerName);
3346
3347 // Sign extend the args to 64-bit, so that we can use the same handler for
3348 // all types of overflow.
3349 llvm::Value *lhs = Builder.CreateSExt(Ops.LHS, CGF.Int64Ty);
3350 llvm::Value *rhs = Builder.CreateSExt(Ops.RHS, CGF.Int64Ty);
3351
3352 // Call the handler with the two arguments, the operation, and the size of
3353 // the result.
3354 llvm::Value *handlerArgs[] = {
3355 lhs,
3356 rhs,
3357 Builder.getInt8(OpID),
3358 Builder.getInt8(cast<llvm::IntegerType>(opTy)->getBitWidth())
3359 };
3360 llvm::Value *handlerResult =
3361 CGF.EmitNounwindRuntimeCall(handler, handlerArgs);
3362
3363 // Truncate the result back to the desired size.
3364 handlerResult = Builder.CreateTrunc(handlerResult, opTy);
3365 Builder.CreateBr(continueBB);
3366
3367 Builder.SetInsertPoint(continueBB);
3368 llvm::PHINode *phi = Builder.CreatePHI(opTy, 2);
3369 phi->addIncoming(result, initialBB);
3370 phi->addIncoming(handlerResult, overflowBB);
3371
3372 return phi;
3373}
3374
3375/// Emit pointer + index arithmetic.
3376static Value *emitPointerArithmetic(CodeGenFunction &CGF,
3377 const BinOpInfo &op,
3378 bool isSubtraction) {
3379 // Must have binary (not unary) expr here. Unary pointer
3380 // increment/decrement doesn't use this path.
3381 const BinaryOperator *expr = cast<BinaryOperator>(op.E);
6
Field 'E' is a 'BinaryOperator'
3382
3383 Value *pointer = op.LHS;
3384 Expr *pointerOperand = expr->getLHS();
3385 Value *index = op.RHS;
3386 Expr *indexOperand = expr->getRHS();
3387
3388 // In a subtraction, the LHS is always the pointer.
3389 if (!isSubtraction
6.1
'isSubtraction' is true
6.1
'isSubtraction' is true
6.1
'isSubtraction' is true
6.1
'isSubtraction' is true
6.1
'isSubtraction' is true
6.1
'isSubtraction' is true
&& !pointer->getType()->isPointerTy()) {
3390 std::swap(pointer, index);
3391 std::swap(pointerOperand, indexOperand);
3392 }
3393
3394 bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType();
3395
3396 unsigned width = cast<llvm::IntegerType>(index->getType())->getBitWidth();
7
The object is a 'IntegerType'
3397 auto &DL = CGF.CGM.getDataLayout();
3398 auto PtrTy = cast<llvm::PointerType>(pointer->getType());
8
The object is a 'PointerType'
3399
3400 // Some versions of glibc and gcc use idioms (particularly in their malloc
3401 // routines) that add a pointer-sized integer (known to be a pointer value)
3402 // to a null pointer in order to cast the value back to an integer or as
3403 // part of a pointer alignment algorithm. This is undefined behavior, but
3404 // we'd like to be able to compile programs that use it.
3405 //
3406 // Normally, we'd generate a GEP with a null-pointer base here in response
3407 // to that code, but it's also UB to dereference a pointer created that
3408 // way. Instead (as an acknowledged hack to tolerate the idiom) we will
3409 // generate a direct cast of the integer value to a pointer.
3410 //
3411 // The idiom (p = nullptr + N) is not met if any of the following are true:
3412 //
3413 // The operation is subtraction.
3414 // The index is not pointer-sized.
3415 // The pointer type is not byte-sized.
3416 //
3417 if (BinaryOperator::isNullPointerArithmeticExtension(CGF.getContext(),
9
Assuming the condition is false
10
Taking false branch
3418 op.Opcode,
3419 expr->getLHS(),
3420 expr->getRHS()))
3421 return CGF.Builder.CreateIntToPtr(index, pointer->getType());
3422
3423 if (width != DL.getIndexTypeSizeInBits(PtrTy)) {
11
Assuming the condition is false
12
Taking false branch
3424 // Zero-extend or sign-extend the pointer value according to
3425 // whether the index is signed or not.
3426 index = CGF.Builder.CreateIntCast(index, DL.getIndexType(PtrTy), isSigned,
3427 "idx.ext");
3428 }
3429
3430 // If this is subtraction, negate the index.
3431 if (isSubtraction
12.1
'isSubtraction' is true
12.1
'isSubtraction' is true
12.1
'isSubtraction' is true
12.1
'isSubtraction' is true
12.1
'isSubtraction' is true
12.1
'isSubtraction' is true
)
13
Taking true branch
3432 index = CGF.Builder.CreateNeg(index, "idx.neg");
3433
3434 if (CGF.SanOpts.has(SanitizerKind::ArrayBounds))
14
Assuming the condition is false
15
Taking false branch
3435 CGF.EmitBoundsCheck(op.E, pointerOperand, index, indexOperand->getType(),
3436 /*Accessed*/ false);
3437
3438 const PointerType *pointerType
3439 = pointerOperand->getType()->getAs<PointerType>();
16
Assuming the object is a 'PointerType'
3440 if (!pointerType) {
17
Assuming 'pointerType' is non-null
18
Taking false branch
3441 QualType objectType = pointerOperand->getType()
3442 ->castAs<ObjCObjectPointerType>()
3443 ->getPointeeType();
3444 llvm::Value *objectSize
3445 = CGF.CGM.getSize(CGF.getContext().getTypeSizeInChars(objectType));
3446
3447 index = CGF.Builder.CreateMul(index, objectSize);
3448
3449 Value *result = CGF.Builder.CreateBitCast(pointer, CGF.VoidPtrTy);
3450 result = CGF.Builder.CreateGEP(CGF.Int8Ty, result, index, "add.ptr");
3451 return CGF.Builder.CreateBitCast(result, pointer->getType());
3452 }
3453
3454 QualType elementType = pointerType->getPointeeType();
3455 if (const VariableArrayType *vla
22.1
'vla' is null
22.1
'vla' is null
22.1
'vla' is null
22.1
'vla' is null
22.1
'vla' is null
22.1
'vla' is null
23
Taking false branch
3456 = CGF.getContext().getAsVariableArrayType(elementType)) {
19
Calling 'ASTContext::getAsVariableArrayType'
22
Returning from 'ASTContext::getAsVariableArrayType'
3457 // The element count here is the total number of non-VLA elements. 3458 llvm::Value *numElements = CGF.getVLASize(vla).NumElts; 3459 3460 // Effectively, the multiply by the VLA size is part of the GEP. 3461 // GEP indexes are signed, and scaling an index isn't permitted to 3462 // signed-overflow, so we use the same semantics for our explicit 3463 // multiply. We suppress this if overflow is not undefined behavior. 3464 if (CGF.getLangOpts().isSignedOverflowDefined()) { 3465 index = CGF.Builder.CreateMul(index, numElements, "vla.index"); 3466 pointer = CGF.Builder.CreateGEP( 3467 pointer->getType()->getPointerElementType(), pointer, index, 3468 "add.ptr"); 3469 } else { 3470 index = CGF.Builder.CreateNSWMul(index, numElements, "vla.index"); 3471 pointer = 3472 CGF.EmitCheckedInBoundsGEP(pointer, index, isSigned, isSubtraction, 3473 op.E->getExprLoc(), "add.ptr"); 3474 } 3475 return pointer; 3476 } 3477 3478 // Explicitly handle GNU void* and function pointer arithmetic extensions. The 3479 // GNU void* casts amount to no-ops since our void* type is i8*, but this is 3480 // future proof. 3481 if (elementType->isVoidType() || elementType->isFunctionType()) {
24
Calling 'Type::isVoidType'
31
Returning from 'Type::isVoidType'
32
Calling 'Type::isFunctionType'
35
Returning from 'Type::isFunctionType'
36
Taking false branch
3482 Value *result = CGF.EmitCastToVoidPtr(pointer); 3483 result = CGF.Builder.CreateGEP(CGF.Int8Ty, result, index, "add.ptr"); 3484 return CGF.Builder.CreateBitCast(result, pointer->getType()); 3485 } 3486 3487 if (CGF.getLangOpts().isSignedOverflowDefined())
37
Calling 'LangOptions::isSignedOverflowDefined'
40
Returning from 'LangOptions::isSignedOverflowDefined'
41
Taking false branch
3488 return CGF.Builder.CreateGEP( 3489 pointer->getType()->getPointerElementType(), pointer, index, "add.ptr"); 3490 3491 return CGF.EmitCheckedInBoundsGEP(pointer, index, isSigned, isSubtraction,
42
Calling 'CodeGenFunction::EmitCheckedInBoundsGEP'
3492 op.E->getExprLoc(), "add.ptr"); 3493} 3494 3495// Construct an fmuladd intrinsic to represent a fused mul-add of MulOp and 3496// Addend. Use negMul and negAdd to negate the first operand of the Mul or 3497// the add operand respectively. This allows fmuladd to represent a*b-c, or 3498// c-a*b. Patterns in LLVM should catch the negated forms and translate them to 3499// efficient operations. 3500static Value* buildFMulAdd(llvm::Instruction *MulOp, Value *Addend, 3501 const CodeGenFunction &CGF, CGBuilderTy &Builder, 3502 bool negMul, bool negAdd) { 3503 assert(!(negMul && negAdd) && "Only one of negMul and negAdd should be set.")((void)0); 3504 3505 Value *MulOp0 = MulOp->getOperand(0); 3506 Value *MulOp1 = MulOp->getOperand(1); 3507 if (negMul) 3508 MulOp0 = Builder.CreateFNeg(MulOp0, "neg"); 3509 if (negAdd) 3510 Addend = Builder.CreateFNeg(Addend, "neg"); 3511 3512 Value *FMulAdd = nullptr; 3513 if (Builder.getIsFPConstrained()) { 3514 assert(isa<llvm::ConstrainedFPIntrinsic>(MulOp) &&((void)0) 3515 "Only constrained operation should be created when Builder is in FP "((void)0) 3516 "constrained mode")((void)0); 3517 FMulAdd = Builder.CreateConstrainedFPCall( 3518 CGF.CGM.getIntrinsic(llvm::Intrinsic::experimental_constrained_fmuladd, 3519 Addend->getType()), 3520 {MulOp0, MulOp1, Addend}); 3521 } else { 3522 FMulAdd = Builder.CreateCall( 3523 CGF.CGM.getIntrinsic(llvm::Intrinsic::fmuladd, Addend->getType()), 3524 {MulOp0, MulOp1, Addend}); 3525 } 3526 MulOp->eraseFromParent(); 3527 3528 return FMulAdd; 3529} 3530 3531// Check whether it would be legal to emit an fmuladd intrinsic call to 3532// represent op and if so, build the fmuladd. 3533// 3534// Checks that (a) the operation is fusable, and (b) -ffp-contract=on. 3535// Does NOT check the type of the operation - it's assumed that this function 3536// will be called from contexts where it's known that the type is contractable. 3537static Value* tryEmitFMulAdd(const BinOpInfo &op, 3538 const CodeGenFunction &CGF, CGBuilderTy &Builder, 3539 bool isSub=false) { 3540 3541 assert((op.Opcode == BO_Add || op.Opcode == BO_AddAssign ||((void)0) 3542 op.Opcode == BO_Sub || op.Opcode == BO_SubAssign) &&((void)0) 3543 "Only fadd/fsub can be the root of an fmuladd.")((void)0); 3544 3545 // Check whether this op is marked as fusable. 3546 if (!op.FPFeatures.allowFPContractWithinStatement()) 3547 return nullptr; 3548 3549 // We have a potentially fusable op. Look for a mul on one of the operands. 3550 // Also, make sure that the mul result isn't used directly. In that case, 3551 // there's no point creating a muladd operation. 3552 if (auto *LHSBinOp = dyn_cast<llvm::BinaryOperator>(op.LHS)) { 3553 if (LHSBinOp->getOpcode() == llvm::Instruction::FMul && 3554 LHSBinOp->use_empty()) 3555 return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, false, isSub); 3556 } 3557 if (auto *RHSBinOp = dyn_cast<llvm::BinaryOperator>(op.RHS)) { 3558 if (RHSBinOp->getOpcode() == llvm::Instruction::FMul && 3559 RHSBinOp->use_empty()) 3560 return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub, false); 3561 } 3562 3563 if (auto *LHSBinOp = dyn_cast<llvm::CallBase>(op.LHS)) { 3564 if (LHSBinOp->getIntrinsicID() == 3565 llvm::Intrinsic::experimental_constrained_fmul && 3566 LHSBinOp->use_empty()) 3567 return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, false, isSub); 3568 } 3569 if (auto *RHSBinOp = dyn_cast<llvm::CallBase>(op.RHS)) { 3570 if (RHSBinOp->getIntrinsicID() == 3571 llvm::Intrinsic::experimental_constrained_fmul && 3572 RHSBinOp->use_empty()) 3573 return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub, false); 3574 } 3575 3576 return nullptr; 3577} 3578 3579Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) { 3580 if (op.LHS->getType()->isPointerTy() || 3581 op.RHS->getType()->isPointerTy()) 3582 return emitPointerArithmetic(CGF, op, CodeGenFunction::NotSubtraction); 3583 3584 if (op.Ty->isSignedIntegerOrEnumerationType()) { 3585 switch (CGF.getLangOpts().getSignedOverflowBehavior()) { 3586 case LangOptions::SOB_Defined: 3587 return Builder.CreateAdd(op.LHS, op.RHS, "add"); 3588 case LangOptions::SOB_Undefined: 3589 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) 3590 return Builder.CreateNSWAdd(op.LHS, op.RHS, "add"); 3591 LLVM_FALLTHROUGH[[gnu::fallthrough]]; 3592 case LangOptions::SOB_Trapping: 3593 if (CanElideOverflowCheck(CGF.getContext(), op)) 3594 return Builder.CreateNSWAdd(op.LHS, op.RHS, "add"); 3595 return EmitOverflowCheckedBinOp(op); 3596 } 3597 } 3598 3599 if (op.Ty->isConstantMatrixType()) { 3600 llvm::MatrixBuilder<CGBuilderTy> MB(Builder); 3601 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures); 3602 return MB.CreateAdd(op.LHS, op.RHS); 3603 } 3604 3605 if (op.Ty->isUnsignedIntegerType() && 3606 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) && 3607 !CanElideOverflowCheck(CGF.getContext(), op)) 3608 return EmitOverflowCheckedBinOp(op); 3609 3610 if (op.LHS->getType()->isFPOrFPVectorTy()) { 3611 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures); 3612 // Try to form an fmuladd. 3613 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder)) 3614 return FMulAdd; 3615 3616 return Builder.CreateFAdd(op.LHS, op.RHS, "add"); 3617 } 3618 3619 if (op.isFixedPointOp()) 3620 return EmitFixedPointBinOp(op); 3621 3622 return Builder.CreateAdd(op.LHS, op.RHS, "add"); 3623} 3624 3625/// The resulting value must be calculated with exact precision, so the operands 3626/// may not be the same type. 3627Value *ScalarExprEmitter::EmitFixedPointBinOp(const BinOpInfo &op) { 3628 using llvm::APSInt; 3629 using llvm::ConstantInt; 3630 3631 // This is either a binary operation where at least one of the operands is 3632 // a fixed-point type, or a unary operation where the operand is a fixed-point 3633 // type. The result type of a binary operation is determined by 3634 // Sema::handleFixedPointConversions(). 3635 QualType ResultTy = op.Ty; 3636 QualType LHSTy, RHSTy; 3637 if (const auto *BinOp = dyn_cast<BinaryOperator>(op.E)) { 3638 RHSTy = BinOp->getRHS()->getType(); 3639 if (const auto *CAO = dyn_cast<CompoundAssignOperator>(BinOp)) { 3640 // For compound assignment, the effective type of the LHS at this point 3641 // is the computation LHS type, not the actual LHS type, and the final 3642 // result type is not the type of the expression but rather the 3643 // computation result type. 3644 LHSTy = CAO->getComputationLHSType(); 3645 ResultTy = CAO->getComputationResultType(); 3646 } else 3647 LHSTy = BinOp->getLHS()->getType(); 3648 } else if (const auto *UnOp = dyn_cast<UnaryOperator>(op.E)) { 3649 LHSTy = UnOp->getSubExpr()->getType(); 3650 RHSTy = UnOp->getSubExpr()->getType(); 3651 } 3652 ASTContext &Ctx = CGF.getContext(); 3653 Value *LHS = op.LHS; 3654 Value *RHS = op.RHS; 3655 3656 auto LHSFixedSema = Ctx.getFixedPointSemantics(LHSTy); 3657 auto RHSFixedSema = Ctx.getFixedPointSemantics(RHSTy); 3658 auto ResultFixedSema = Ctx.getFixedPointSemantics(ResultTy); 3659 auto CommonFixedSema = LHSFixedSema.getCommonSemantics(RHSFixedSema); 3660 3661 // Perform the actual operation. 3662 Value *Result; 3663 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder); 3664 switch (op.Opcode) { 3665 case BO_AddAssign: 3666 case BO_Add: 3667 Result = FPBuilder.CreateAdd(LHS, LHSFixedSema, RHS, RHSFixedSema); 3668 break; 3669 case BO_SubAssign: 3670 case BO_Sub: 3671 Result = FPBuilder.CreateSub(LHS, LHSFixedSema, RHS, RHSFixedSema); 3672 break; 3673 case BO_MulAssign: 3674 case BO_Mul: 3675 Result = FPBuilder.CreateMul(LHS, LHSFixedSema, RHS, RHSFixedSema); 3676 break; 3677 case BO_DivAssign: 3678 case BO_Div: 3679 Result = FPBuilder.CreateDiv(LHS, LHSFixedSema, RHS, RHSFixedSema); 3680 break; 3681 case BO_ShlAssign: 3682 case BO_Shl: 3683 Result = FPBuilder.CreateShl(LHS, LHSFixedSema, RHS); 3684 break; 3685 case BO_ShrAssign: 3686 case BO_Shr: 3687 Result = FPBuilder.CreateShr(LHS, LHSFixedSema, RHS); 3688 break; 3689 case BO_LT: 3690 return FPBuilder.CreateLT(LHS, LHSFixedSema, RHS, RHSFixedSema); 3691 case BO_GT: 3692 return FPBuilder.CreateGT(LHS, LHSFixedSema, RHS, RHSFixedSema); 3693 case BO_LE: 3694 return FPBuilder.CreateLE(LHS, LHSFixedSema, RHS, RHSFixedSema); 3695 case BO_GE: 3696 return FPBuilder.CreateGE(LHS, LHSFixedSema, RHS, RHSFixedSema); 3697 case BO_EQ: 3698 // For equality operations, we assume any padding bits on unsigned types are 3699 // zero'd out. They could be overwritten through non-saturating operations 3700 // that cause overflow, but this leads to undefined behavior. 3701 return FPBuilder.CreateEQ(LHS, LHSFixedSema, RHS, RHSFixedSema); 3702 case BO_NE: 3703 return FPBuilder.CreateNE(LHS, LHSFixedSema, RHS, RHSFixedSema); 3704 case BO_Cmp: 3705 case BO_LAnd: 3706 case BO_LOr: 3707 llvm_unreachable("Found unimplemented fixed point binary operation")__builtin_unreachable(); 3708 case BO_PtrMemD: 3709 case BO_PtrMemI: 3710 case BO_Rem: 3711 case BO_Xor: 3712 case BO_And: 3713 case BO_Or: 3714 case BO_Assign: 3715 case BO_RemAssign: 3716 case BO_AndAssign: 3717 case BO_XorAssign: 3718 case BO_OrAssign: 3719 case BO_Comma: 3720 llvm_unreachable("Found unsupported binary operation for fixed point types.")__builtin_unreachable(); 3721 } 3722 3723 bool IsShift = BinaryOperator::isShiftOp(op.Opcode) || 3724 BinaryOperator::isShiftAssignOp(op.Opcode); 3725 // Convert to the result type. 3726 return FPBuilder.CreateFixedToFixed(Result, IsShift ? LHSFixedSema 3727 : CommonFixedSema, 3728 ResultFixedSema); 3729} 3730 3731Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) { 3732 // The LHS is always a pointer if either side is. 3733 if (!op.LHS->getType()->isPointerTy()) {
3
Taking false branch
3734 if (op.Ty->isSignedIntegerOrEnumerationType()) { 3735 switch (CGF.getLangOpts().getSignedOverflowBehavior()) { 3736 case LangOptions::SOB_Defined: 3737 return Builder.CreateSub(op.LHS, op.RHS, "sub"); 3738 case LangOptions::SOB_Undefined: 3739 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) 3740 return Builder.CreateNSWSub(op.LHS, op.RHS, "sub"); 3741 LLVM_FALLTHROUGH[[gnu::fallthrough]]; 3742 case LangOptions::SOB_Trapping: 3743 if (CanElideOverflowCheck(CGF.getContext(), op)) 3744 return Builder.CreateNSWSub(op.LHS, op.RHS, "sub"); 3745 return EmitOverflowCheckedBinOp(op); 3746 } 3747 } 3748 3749 if (op.Ty->isConstantMatrixType()) { 3750 llvm::MatrixBuilder<CGBuilderTy> MB(Builder); 3751 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures); 3752 return MB.CreateSub(op.LHS, op.RHS); 3753 } 3754 3755 if (op.Ty->isUnsignedIntegerType() && 3756 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) && 3757 !CanElideOverflowCheck(CGF.getContext(), op)) 3758 return EmitOverflowCheckedBinOp(op); 3759 3760 if (op.LHS->getType()->isFPOrFPVectorTy()) { 3761 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures); 3762 // Try to form an fmuladd. 3763 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder, true)) 3764 return FMulAdd; 3765 return Builder.CreateFSub(op.LHS, op.RHS, "sub"); 3766 } 3767 3768 if (op.isFixedPointOp()) 3769 return EmitFixedPointBinOp(op); 3770 3771 return Builder.CreateSub(op.LHS, op.RHS, "sub"); 3772 } 3773 3774 // If the RHS is not a pointer, then we have normal pointer 3775 // arithmetic. 3776 if (!op.RHS->getType()->isPointerTy())
4
Taking true branch
3777 return emitPointerArithmetic(CGF, op, CodeGenFunction::IsSubtraction);
5
Calling 'emitPointerArithmetic'
3778 3779 // Otherwise, this is a pointer subtraction. 3780 3781 // Do the raw subtraction part. 3782 llvm::Value *LHS 3783 = Builder.CreatePtrToInt(op.LHS, CGF.PtrDiffTy, "sub.ptr.lhs.cast"); 3784 llvm::Value *RHS 3785 = Builder.CreatePtrToInt(op.RHS, CGF.PtrDiffTy, "sub.ptr.rhs.cast"); 3786 Value *diffInChars = Builder.CreateSub(LHS, RHS, "sub.ptr.sub"); 3787 3788 // Okay, figure out the element size. 3789 const BinaryOperator *expr = cast<BinaryOperator>(op.E); 3790 QualType elementType = expr->getLHS()->getType()->getPointeeType(); 3791 3792 llvm::Value *divisor = nullptr; 3793 3794 // For a variable-length array, this is going to be non-constant. 3795 if (const VariableArrayType *vla 3796 = CGF.getContext().getAsVariableArrayType(elementType)) { 3797 auto VlaSize = CGF.getVLASize(vla); 3798 elementType = VlaSize.Type; 3799 divisor = VlaSize.NumElts; 3800 3801 // Scale the number of non-VLA elements by the non-VLA element size. 3802 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(elementType); 3803 if (!eltSize.isOne()) 3804 divisor = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), divisor); 3805 3806 // For everything elese, we can just compute it, safe in the 3807 // assumption that Sema won't let anything through that we can't 3808 // safely compute the size of. 3809 } else { 3810 CharUnits elementSize; 3811 // Handle GCC extension for pointer arithmetic on void* and 3812 // function pointer types. 3813 if (elementType->isVoidType() || elementType->isFunctionType()) 3814 elementSize = CharUnits::One(); 3815 else 3816 elementSize = CGF.getContext().getTypeSizeInChars(elementType); 3817 3818 // Don't even emit the divide for element size of 1. 3819 if (elementSize.isOne()) 3820 return diffInChars; 3821 3822 divisor = CGF.CGM.getSize(elementSize); 3823 } 3824 3825 // Otherwise, do a full sdiv. This uses the "exact" form of sdiv, since 3826 // pointer difference in C is only defined in the case where both operands 3827 // are pointing to elements of an array. 3828 return Builder.CreateExactSDiv(diffInChars, divisor, "sub.ptr.div"); 3829} 3830 3831Value *ScalarExprEmitter::GetWidthMinusOneValue(Value* LHS,Value* RHS) { 3832 llvm::IntegerType *Ty; 3833 if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(LHS->getType())) 3834 Ty = cast<llvm::IntegerType>(VT->getElementType()); 3835 else 3836 Ty = cast<llvm::IntegerType>(LHS->getType()); 3837 return llvm::ConstantInt::get(RHS->getType(), Ty->getBitWidth() - 1); 3838} 3839 3840Value *ScalarExprEmitter::ConstrainShiftValue(Value *LHS, Value *RHS, 3841 const Twine &Name) { 3842 llvm::IntegerType *Ty; 3843 if (auto *VT = dyn_cast<llvm::VectorType>(LHS->getType())) 3844 Ty = cast<llvm::IntegerType>(VT->getElementType()); 3845 else 3846 Ty = cast<llvm::IntegerType>(LHS->getType()); 3847 3848 if (llvm::isPowerOf2_64(Ty->getBitWidth())) 3849 return Builder.CreateAnd(RHS, GetWidthMinusOneValue(LHS, RHS), Name); 3850 3851 return Builder.CreateURem( 3852 RHS, llvm::ConstantInt::get(RHS->getType(), Ty->getBitWidth()), Name); 3853} 3854 3855Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) { 3856 // TODO: This misses out on the sanitizer check below. 3857 if (Ops.isFixedPointOp()) 3858 return EmitFixedPointBinOp(Ops); 3859 3860 // LLVM requires the LHS and RHS to be the same type: promote or truncate the 3861 // RHS to the same size as the LHS. 3862 Value *RHS = Ops.RHS; 3863 if (Ops.LHS->getType() != RHS->getType()) 3864 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom"); 3865 3866 bool SanitizeSignedBase = CGF.SanOpts.has(SanitizerKind::ShiftBase) && 3867 Ops.Ty->hasSignedIntegerRepresentation() && 3868 !CGF.getLangOpts().isSignedOverflowDefined() && 3869 !CGF.getLangOpts().CPlusPlus20; 3870 bool SanitizeUnsignedBase = 3871 CGF.SanOpts.has(SanitizerKind::UnsignedShiftBase) && 3872 Ops.Ty->hasUnsignedIntegerRepresentation(); 3873 bool SanitizeBase = SanitizeSignedBase || SanitizeUnsignedBase; 3874 bool SanitizeExponent = CGF.SanOpts.has(SanitizerKind::ShiftExponent); 3875 // OpenCL 6.3j: shift values are effectively % word size of LHS. 3876 if (CGF.getLangOpts().OpenCL) 3877 RHS = ConstrainShiftValue(Ops.LHS, RHS, "shl.mask"); 3878 else if ((SanitizeBase || SanitizeExponent) && 3879 isa<llvm::IntegerType>(Ops.LHS->getType())) { 3880 CodeGenFunction::SanitizerScope SanScope(&CGF); 3881 SmallVector<std::pair<Value *, SanitizerMask>, 2> Checks; 3882 llvm::Value *WidthMinusOne = GetWidthMinusOneValue(Ops.LHS, Ops.RHS); 3883 llvm::Value *ValidExponent = Builder.CreateICmpULE(Ops.RHS, WidthMinusOne); 3884 3885 if (SanitizeExponent) { 3886 Checks.push_back( 3887 std::make_pair(ValidExponent, SanitizerKind::ShiftExponent)); 3888 } 3889 3890 if (SanitizeBase) { 3891 // Check whether we are shifting any non-zero bits off the top of the 3892 // integer. We only emit this check if exponent is valid - otherwise 3893 // instructions below will have undefined behavior themselves. 3894 llvm::BasicBlock *Orig = Builder.GetInsertBlock(); 3895 llvm::BasicBlock *Cont = CGF.createBasicBlock("cont"); 3896 llvm::BasicBlock *CheckShiftBase = CGF.createBasicBlock("check"); 3897 Builder.CreateCondBr(ValidExponent, CheckShiftBase, Cont); 3898 llvm::Value *PromotedWidthMinusOne = 3899 (RHS == Ops.RHS) ? WidthMinusOne 3900 : GetWidthMinusOneValue(Ops.LHS, RHS); 3901 CGF.EmitBlock(CheckShiftBase); 3902 llvm::Value *BitsShiftedOff = Builder.CreateLShr( 3903 Ops.LHS, Builder.CreateSub(PromotedWidthMinusOne, RHS, "shl.zeros", 3904 /*NUW*/ true, /*NSW*/ true), 3905 "shl.check"); 3906 if (SanitizeUnsignedBase || CGF.getLangOpts().CPlusPlus) { 3907 // In C99, we are not permitted to shift a 1 bit into the sign bit. 3908 // Under C++11's rules, shifting a 1 bit into the sign bit is 3909 // OK, but shifting a 1 bit out of it is not. (C89 and C++03 don't 3910 // define signed left shifts, so we use the C99 and C++11 rules there). 3911 // Unsigned shifts can always shift into the top bit. 3912 llvm::Value *One = llvm::ConstantInt::get(BitsShiftedOff->getType(), 1); 3913 BitsShiftedOff = Builder.CreateLShr(BitsShiftedOff, One); 3914 } 3915 llvm::Value *Zero = llvm::ConstantInt::get(BitsShiftedOff->getType(), 0); 3916 llvm::Value *ValidBase = Builder.CreateICmpEQ(BitsShiftedOff, Zero); 3917 CGF.EmitBlock(Cont); 3918 llvm::PHINode *BaseCheck = Builder.CreatePHI(ValidBase->getType(), 2); 3919 BaseCheck->addIncoming(Builder.getTrue(), Orig); 3920 BaseCheck->addIncoming(ValidBase, CheckShiftBase); 3921 Checks.push_back(std::make_pair( 3922 BaseCheck, SanitizeSignedBase ? SanitizerKind::ShiftBase 3923 : SanitizerKind::UnsignedShiftBase)); 3924 } 3925 3926 assert(!Checks.empty())((void)0); 3927 EmitBinOpCheck(Checks, Ops); 3928 } 3929 3930 return Builder.CreateShl(Ops.LHS, RHS, "shl"); 3931} 3932 3933Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) { 3934 // TODO: This misses out on the sanitizer check below. 3935 if (Ops.isFixedPointOp()) 3936 return EmitFixedPointBinOp(Ops); 3937 3938 // LLVM requires the LHS and RHS to be the same type: promote or truncate the 3939 // RHS to the same size as the LHS. 3940 Value *RHS = Ops.RHS; 3941 if (Ops.LHS->getType() != RHS->getType()) 3942 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom"); 3943 3944 // OpenCL 6.3j: shift values are effectively % word size of LHS. 3945 if (CGF.getLangOpts().OpenCL) 3946 RHS = ConstrainShiftValue(Ops.LHS, RHS, "shr.mask"); 3947 else if (CGF.SanOpts.has(SanitizerKind::ShiftExponent) && 3948 isa<llvm::IntegerType>(Ops.LHS->getType())) { 3949 CodeGenFunction::SanitizerScope SanScope(&CGF); 3950 llvm::Value *Valid = 3951 Builder.CreateICmpULE(RHS, GetWidthMinusOneValue(Ops.LHS, RHS)); 3952 EmitBinOpCheck(std::make_pair(Valid, SanitizerKind::ShiftExponent), Ops); 3953 } 3954 3955 if (Ops.Ty->hasUnsignedIntegerRepresentation()) 3956 return Builder.CreateLShr(Ops.LHS, RHS, "shr"); 3957 return Builder.CreateAShr(Ops.LHS, RHS, "shr"); 3958} 3959 3960enum IntrinsicType { VCMPEQ, VCMPGT }; 3961// return corresponding comparison intrinsic for given vector type 3962static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT, 3963 BuiltinType::Kind ElemKind) { 3964 switch (ElemKind) { 3965 default: llvm_unreachable("unexpected element type")__builtin_unreachable(); 3966 case BuiltinType::Char_U: 3967 case BuiltinType::UChar: 3968 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p : 3969 llvm::Intrinsic::ppc_altivec_vcmpgtub_p; 3970 case BuiltinType::Char_S: 3971 case BuiltinType::SChar: 3972 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p : 3973 llvm::Intrinsic::ppc_altivec_vcmpgtsb_p; 3974 case BuiltinType::UShort: 3975 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p : 3976 llvm::Intrinsic::ppc_altivec_vcmpgtuh_p; 3977 case BuiltinType::Short: 3978 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p : 3979 llvm::Intrinsic::ppc_altivec_vcmpgtsh_p; 3980 case BuiltinType::UInt: 3981 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p : 3982 llvm::Intrinsic::ppc_altivec_vcmpgtuw_p; 3983 case BuiltinType::Int: 3984 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p : 3985 llvm::Intrinsic::ppc_altivec_vcmpgtsw_p; 3986 case BuiltinType::ULong: 3987 case BuiltinType::ULongLong: 3988 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p : 3989 llvm::Intrinsic::ppc_altivec_vcmpgtud_p; 3990 case BuiltinType::Long: 3991 case BuiltinType::LongLong: 3992 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p : 3993 llvm::Intrinsic::ppc_altivec_vcmpgtsd_p; 3994 case BuiltinType::Float: 3995 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpeqfp_p : 3996 llvm::Intrinsic::ppc_altivec_vcmpgtfp_p; 3997 case BuiltinType::Double: 3998 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_vsx_xvcmpeqdp_p : 3999 llvm::Intrinsic::ppc_vsx_xvcmpgtdp_p; 4000 case BuiltinType::UInt128: 4001 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p 4002 : llvm::Intrinsic::ppc_altivec_vcmpgtuq_p; 4003 case BuiltinType::Int128: 4004 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p 4005 : llvm::Intrinsic::ppc_altivec_vcmpgtsq_p; 4006 } 4007} 4008 4009Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E, 4010 llvm::CmpInst::Predicate UICmpOpc, 4011 llvm::CmpInst::Predicate SICmpOpc, 4012 llvm::CmpInst::Predicate FCmpOpc, 4013 bool IsSignaling) { 4014 TestAndClearIgnoreResultAssign(); 4015 Value *Result; 4016 QualType LHSTy = E->getLHS()->getType(); 4017 QualType RHSTy = E->getRHS()->getType(); 4018 if (const MemberPointerType *MPT = LHSTy->getAs<MemberPointerType>()) { 4019 assert(E->getOpcode() == BO_EQ ||((void)0) 4020 E->getOpcode() == BO_NE)((void)0); 4021 Value *LHS = CGF.EmitScalarExpr(E->getLHS()); 4022 Value *RHS = CGF.EmitScalarExpr(E->getRHS()); 4023 Result = CGF.CGM.getCXXABI().EmitMemberPointerComparison( 4024 CGF, LHS, RHS, MPT, E->getOpcode() == BO_NE); 4025 } else if (!LHSTy->isAnyComplexType() && !RHSTy->isAnyComplexType()) { 4026 BinOpInfo BOInfo = EmitBinOps(E); 4027 Value *LHS = BOInfo.LHS; 4028 Value *RHS = BOInfo.RHS; 4029 4030 // If AltiVec, the comparison results in a numeric type, so we use 4031 // intrinsics comparing vectors and giving 0 or 1 as a result 4032 if (LHSTy->isVectorType() && !E->getType()->isVectorType()) { 4033 // constants for mapping CR6 register bits to predicate result 4034 enum { CR6_EQ=0, CR6_EQ_REV, CR6_LT, CR6_LT_REV } CR6; 4035 4036 llvm::Intrinsic::ID ID = llvm::Intrinsic::not_intrinsic; 4037 4038 // in several cases vector arguments order will be reversed 4039 Value *FirstVecArg = LHS, 4040 *SecondVecArg = RHS; 4041 4042 QualType ElTy = LHSTy->castAs<VectorType>()->getElementType(); 4043 BuiltinType::Kind ElementKind = ElTy->castAs<BuiltinType>()->getKind(); 4044 4045 switch(E->getOpcode()) { 4046 default: llvm_unreachable("is not a comparison operation")__builtin_unreachable(); 4047 case BO_EQ: 4048 CR6 = CR6_LT; 4049 ID = GetIntrinsic(VCMPEQ, ElementKind); 4050 break; 4051 case BO_NE: 4052 CR6 = CR6_EQ; 4053 ID = GetIntrinsic(VCMPEQ, ElementKind); 4054 break; 4055 case BO_LT: 4056 CR6 = CR6_LT; 4057 ID = GetIntrinsic(VCMPGT, ElementKind); 4058 std::swap(FirstVecArg, SecondVecArg); 4059 break; 4060 case BO_GT: 4061 CR6 = CR6_LT; 4062 ID = GetIntrinsic(VCMPGT, ElementKind); 4063 break; 4064 case BO_LE: 4065 if (ElementKind == BuiltinType::Float) { 4066 CR6 = CR6_LT; 4067 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p; 4068 std::swap(FirstVecArg, SecondVecArg); 4069 } 4070 else { 4071 CR6 = CR6_EQ; 4072 ID = GetIntrinsic(VCMPGT, ElementKind); 4073 } 4074 break; 4075 case BO_GE: 4076 if (ElementKind == BuiltinType::Float) { 4077 CR6 = CR6_LT; 4078 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p; 4079 } 4080 else { 4081 CR6 = CR6_EQ; 4082 ID = GetIntrinsic(VCMPGT, ElementKind); 4083 std::swap(FirstVecArg, SecondVecArg); 4084 } 4085 break; 4086 } 4087 4088 Value *CR6Param = Builder.getInt32(CR6); 4089 llvm::Function *F = CGF.CGM.getIntrinsic(ID); 4090 Result = Builder.CreateCall(F, {CR6Param, FirstVecArg, SecondVecArg}); 4091 4092 // The result type of intrinsic may not be same as E->getType(). 4093 // If E->getType() is not BoolTy, EmitScalarConversion will do the 4094 // conversion work. If E->getType() is BoolTy, EmitScalarConversion will 4095 // do nothing, if ResultTy is not i1 at the same time, it will cause 4096 // crash later. 4097 llvm::IntegerType *ResultTy = cast<llvm::IntegerType>(Result->getType()); 4098 if (ResultTy->getBitWidth() > 1 && 4099 E->getType() == CGF.getContext().BoolTy) 4100 Result = Builder.CreateTrunc(Result, Builder.getInt1Ty()); 4101 return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(), 4102 E->getExprLoc()); 4103 } 4104 4105 if (BOInfo.isFixedPointOp()) { 4106 Result = EmitFixedPointBinOp(BOInfo); 4107 } else if (LHS->getType()->isFPOrFPVectorTy()) { 4108 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, BOInfo.FPFeatures); 4109 if (!IsSignaling) 4110 Result = Builder.CreateFCmp(FCmpOpc, LHS, RHS, "cmp"); 4111 else 4112 Result = Builder.CreateFCmpS(FCmpOpc, LHS, RHS, "cmp"); 4113 } else if (LHSTy->hasSignedIntegerRepresentation()) { 4114 Result = Builder.CreateICmp(SICmpOpc, LHS, RHS, "cmp"); 4115 } else { 4116 // Unsigned integers and pointers. 4117 4118 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers && 4119 !isa<llvm::ConstantPointerNull>(LHS) && 4120 !isa<llvm::ConstantPointerNull>(RHS)) { 4121 4122 // Dynamic information is required to be stripped for comparisons, 4123 // because it could leak the dynamic information. Based on comparisons 4124 // of pointers to dynamic objects, the optimizer can replace one pointer 4125 // with another, which might be incorrect in presence of invariant 4126 // groups. Comparison with null is safe because null does not carry any 4127 // dynamic information. 4128 if (LHSTy.mayBeDynamicClass()) 4129 LHS = Builder.CreateStripInvariantGroup(LHS); 4130 if (RHSTy.mayBeDynamicClass()) 4131 RHS = Builder.CreateStripInvariantGroup(RHS); 4132 } 4133 4134 Result = Builder.CreateICmp(UICmpOpc, LHS, RHS, "cmp"); 4135 } 4136 4137 // If this is a vector comparison, sign extend the result to the appropriate 4138 // vector integer type and return it (don't convert to bool). 4139 if (LHSTy->isVectorType()) 4140 return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext"); 4141 4142 } else { 4143 // Complex Comparison: can only be an equality comparison. 4144 CodeGenFunction::ComplexPairTy LHS, RHS; 4145 QualType CETy; 4146 if (auto *CTy = LHSTy->getAs<ComplexType>()) { 4147 LHS = CGF.EmitComplexExpr(E->getLHS()); 4148 CETy = CTy->getElementType(); 4149 } else { 4150 LHS.first = Visit(E->getLHS()); 4151 LHS.second = llvm::Constant::getNullValue(LHS.first->getType()); 4152 CETy = LHSTy; 4153 } 4154 if (auto *CTy = RHSTy->getAs<ComplexType>()) { 4155 RHS = CGF.EmitComplexExpr(E->getRHS()); 4156 assert(CGF.getContext().hasSameUnqualifiedType(CETy,((void)0) 4157 CTy->getElementType()) &&((void)0) 4158 "The element types must always match.")((void)0); 4159 (void)CTy; 4160 } else { 4161 RHS.first = Visit(E->getRHS()); 4162 RHS.second = llvm::Constant::getNullValue(RHS.first->getType()); 4163 assert(CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) &&((void)0) 4164 "The element types must always match.")((void)0); 4165 } 4166 4167 Value *ResultR, *ResultI; 4168 if (CETy->isRealFloatingType()) { 4169 // As complex comparisons can only be equality comparisons, they 4170 // are never signaling comparisons. 4171 ResultR = Builder.CreateFCmp(FCmpOpc, LHS.first, RHS.first, "cmp.r"); 4172 ResultI = Builder.CreateFCmp(FCmpOpc, LHS.second, RHS.second, "cmp.i"); 4173 } else { 4174 // Complex comparisons can only be equality comparisons. As such, signed 4175 // and unsigned opcodes are the same. 4176 ResultR = Builder.CreateICmp(UICmpOpc, LHS.first, RHS.first, "cmp.r"); 4177 ResultI = Builder.CreateICmp(UICmpOpc, LHS.second, RHS.second, "cmp.i"); 4178 } 4179 4180 if (E->getOpcode() == BO_EQ) { 4181 Result = Builder.CreateAnd(ResultR, ResultI, "and.ri"); 4182 } else { 4183 assert(E->getOpcode() == BO_NE &&((void)0) 4184 "Complex comparison other than == or != ?")((void)0); 4185 Result = Builder.CreateOr(ResultR, ResultI, "or.ri"); 4186 } 4187 } 4188 4189 return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(), 4190 E->getExprLoc()); 4191} 4192 4193Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) { 4194 bool Ignore = TestAndClearIgnoreResultAssign(); 4195 4196 Value *RHS; 4197 LValue LHS; 4198 4199 switch (E->getLHS()->getType().getObjCLifetime()) { 4200 case Qualifiers::OCL_Strong: 4201 std::tie(LHS, RHS) = CGF.EmitARCStoreStrong(E, Ignore); 4202 break; 4203 4204 case Qualifiers::OCL_Autoreleasing: 4205 std::tie(LHS, RHS) = CGF.EmitARCStoreAutoreleasing(E); 4206 break; 4207 4208 case Qualifiers::OCL_ExplicitNone: 4209 std::tie(LHS, RHS) = CGF.EmitARCStoreUnsafeUnretained(E, Ignore); 4210 break; 4211 4212 case Qualifiers::OCL_Weak: 4213 RHS = Visit(E->getRHS()); 4214 LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store); 4215 RHS = CGF.EmitARCStoreWeak(LHS.getAddress(CGF), RHS, Ignore); 4216 break; 4217 4218 case Qualifiers::OCL_None: 4219 // __block variables need to have the rhs evaluated first, plus 4220 // this should improve codegen just a little. 4221 RHS = Visit(E->getRHS()); 4222 LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store); 4223 4224 // Store the value into the LHS. Bit-fields are handled specially 4225 // because the result is altered by the store, i.e., [C99 6.5.16p1] 4226 // 'An assignment expression has the value of the left operand after 4227 // the assignment...'. 4228 if (LHS.isBitField()) { 4229 CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, &RHS); 4230 } else { 4231 CGF.EmitNullabilityCheck(LHS, RHS, E->getExprLoc()); 4232 CGF.EmitStoreThroughLValue(RValue::get(RHS), LHS); 4233 } 4234 } 4235 4236 // If the result is clearly ignored, return now. 4237 if (Ignore) 4238 return nullptr; 4239 4240 // The result of an assignment in C is the assigned r-value. 4241 if (!CGF.getLangOpts().CPlusPlus) 4242 return RHS; 4243 4244 // If the lvalue is non-volatile, return the computed value of the assignment. 4245 if (!LHS.isVolatileQualified()) 4246 return RHS; 4247 4248 // Otherwise, reload the value. 4249 return EmitLoadOfLValue(LHS, E->getExprLoc()); 4250} 4251 4252Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) { 4253 // Perform vector logical and on comparisons with zero vectors. 4254 if (E->getType()->isVectorType()) { 4255 CGF.incrementProfileCounter(E); 4256 4257 Value *LHS = Visit(E->getLHS()); 4258 Value *RHS = Visit(E->getRHS()); 4259 Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType()); 4260 if (LHS->getType()->isFPOrFPVectorTy()) { 4261 CodeGenFunction::CGFPOptionsRAII FPOptsRAII( 4262 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts())); 4263 LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp"); 4264 RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp"); 4265 } else { 4266 LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp"); 4267 RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp"); 4268 } 4269 Value *And = Builder.CreateAnd(LHS, RHS); 4270 return Builder.CreateSExt(And, ConvertType(E->getType()), "sext"); 4271 } 4272 4273 bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr(); 4274 llvm::Type *ResTy = ConvertType(E->getType()); 4275 4276 // If we have 0 && RHS, see if we can elide RHS, if so, just return 0. 4277 // If we have 1 && X, just emit X without inserting the control flow. 4278 bool LHSCondVal; 4279 if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) { 4280 if (LHSCondVal) { // If we have 1 && X, just emit X. 4281 CGF.incrementProfileCounter(E); 4282 4283 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); 4284 4285 // If we're generating for profiling or coverage, generate a branch to a 4286 // block that increments the RHS counter needed to track branch condition 4287 // coverage. In this case, use "FBlock" as both the final "TrueBlock" and 4288 // "FalseBlock" after the increment is done. 4289 if (InstrumentRegions && 4290 CodeGenFunction::isInstrumentedCondition(E->getRHS())) { 4291 llvm::BasicBlock *FBlock = CGF.createBasicBlock("land.end"); 4292 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("land.rhscnt"); 4293 Builder.CreateCondBr(RHSCond, RHSBlockCnt, FBlock); 4294 CGF.EmitBlock(RHSBlockCnt); 4295 CGF.incrementProfileCounter(E->getRHS()); 4296 CGF.EmitBranch(FBlock); 4297 CGF.EmitBlock(FBlock); 4298 } 4299 4300 // ZExt result to int or bool. 4301 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "land.ext"); 4302 } 4303 4304 // 0 && RHS: If it is safe, just elide the RHS, and return 0/false. 4305 if (!CGF.ContainsLabel(E->getRHS())) 4306 return llvm::Constant::getNullValue(ResTy); 4307 } 4308 4309 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("land.end"); 4310 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("land.rhs"); 4311 4312 CodeGenFunction::ConditionalEvaluation eval(CGF); 4313 4314 // Branch on the LHS first. If it is false, go to the failure (cont) block. 4315 CGF.EmitBranchOnBoolExpr(E->getLHS(), RHSBlock, ContBlock, 4316 CGF.getProfileCount(E->getRHS())); 4317 4318 // Any edges into the ContBlock are now from an (indeterminate number of) 4319 // edges from this first condition. All of these values will be false. Start 4320 // setting up the PHI node in the Cont Block for this. 4321 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2, 4322 "", ContBlock); 4323 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock); 4324 PI != PE; ++PI) 4325 PN->addIncoming(llvm::ConstantInt::getFalse(VMContext), *PI); 4326 4327 eval.begin(CGF); 4328 CGF.EmitBlock(RHSBlock); 4329 CGF.incrementProfileCounter(E); 4330 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); 4331 eval.end(CGF); 4332 4333 // Reaquire the RHS block, as there may be subblocks inserted. 4334 RHSBlock = Builder.GetInsertBlock(); 4335 4336 // If we're generating for profiling or coverage, generate a branch on the 4337 // RHS to a block that increments the RHS true counter needed to track branch 4338 // condition coverage. 4339 if (InstrumentRegions && 4340 CodeGenFunction::isInstrumentedCondition(E->getRHS())) { 4341 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("land.rhscnt"); 4342 Builder.CreateCondBr(RHSCond, RHSBlockCnt, ContBlock); 4343 CGF.EmitBlock(RHSBlockCnt); 4344 CGF.incrementProfileCounter(E->getRHS()); 4345 CGF.EmitBranch(ContBlock); 4346 PN->addIncoming(RHSCond, RHSBlockCnt); 4347 } 4348 4349 // Emit an unconditional branch from this block to ContBlock. 4350 { 4351 // There is no need to emit line number for unconditional branch. 4352 auto NL = ApplyDebugLocation::CreateEmpty(CGF); 4353 CGF.EmitBlock(ContBlock); 4354 } 4355 // Insert an entry into the phi node for the edge with the value of RHSCond. 4356 PN->addIncoming(RHSCond, RHSBlock); 4357 4358 // Artificial location to preserve the scope information 4359 { 4360 auto NL = ApplyDebugLocation::CreateArtificial(CGF); 4361 PN->setDebugLoc(Builder.getCurrentDebugLocation()); 4362 } 4363 4364 // ZExt result to int. 4365 return Builder.CreateZExtOrBitCast(PN, ResTy, "land.ext"); 4366} 4367 4368Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) { 4369 // Perform vector logical or on comparisons with zero vectors. 4370 if (E->getType()->isVectorType()) { 4371 CGF.incrementProfileCounter(E); 4372 4373 Value *LHS = Visit(E->getLHS()); 4374 Value *RHS = Visit(E->getRHS()); 4375 Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType()); 4376 if (LHS->getType()->isFPOrFPVectorTy()) { 4377 CodeGenFunction::CGFPOptionsRAII FPOptsRAII( 4378 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts())); 4379 LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp"); 4380 RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp"); 4381 } else { 4382 LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp"); 4383 RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp"); 4384 } 4385 Value *Or = Builder.CreateOr(LHS, RHS); 4386 return Builder.CreateSExt(Or, ConvertType(E->getType()), "sext"); 4387 } 4388 4389 bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr(); 4390 llvm::Type *ResTy = ConvertType(E->getType()); 4391 4392 // If we have 1 || RHS, see if we can elide RHS, if so, just return 1. 4393 // If we have 0 || X, just emit X without inserting the control flow. 4394 bool LHSCondVal; 4395 if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) { 4396 if (!LHSCondVal) { // If we have 0 || X, just emit X. 4397 CGF.incrementProfileCounter(E); 4398 4399 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); 4400 4401 // If we're generating for profiling or coverage, generate a branch to a 4402 // block that increments the RHS counter need to track branch condition 4403 // coverage. In this case, use "FBlock" as both the final "TrueBlock" and 4404 // "FalseBlock" after the increment is done. 4405 if (InstrumentRegions && 4406 CodeGenFunction::isInstrumentedCondition(E->getRHS())) { 4407 llvm::BasicBlock *FBlock = CGF.createBasicBlock("lor.end"); 4408 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("lor.rhscnt"); 4409 Builder.CreateCondBr(RHSCond, FBlock, RHSBlockCnt); 4410 CGF.EmitBlock(RHSBlockCnt); 4411 CGF.incrementProfileCounter(E->getRHS()); 4412 CGF.EmitBranch(FBlock); 4413 CGF.EmitBlock(FBlock); 4414 } 4415 4416 // ZExt result to int or bool. 4417 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "lor.ext"); 4418 } 4419 4420 // 1 || RHS: If it is safe, just elide the RHS, and return 1/true. 4421 if (!CGF.ContainsLabel(E->getRHS())) 4422 return llvm::ConstantInt::get(ResTy, 1); 4423 } 4424 4425 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("lor.end"); 4426 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("lor.rhs"); 4427 4428 CodeGenFunction::ConditionalEvaluation eval(CGF); 4429 4430 // Branch on the LHS first. If it is true, go to the success (cont) block. 4431 CGF.EmitBranchOnBoolExpr(E->getLHS(), ContBlock, RHSBlock, 4432 CGF.getCurrentProfileCount() - 4433 CGF.getProfileCount(E->getRHS())); 4434 4435 // Any edges into the ContBlock are now from an (indeterminate number of) 4436 // edges from this first condition. All of these values will be true. Start 4437 // setting up the PHI node in the Cont Block for this. 4438 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2, 4439 "", ContBlock); 4440 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock); 4441 PI != PE; ++PI) 4442 PN->addIncoming(llvm::ConstantInt::getTrue(VMContext), *PI); 4443 4444 eval.begin(CGF); 4445 4446 // Emit the RHS condition as a bool value. 4447 CGF.EmitBlock(RHSBlock); 4448 CGF.incrementProfileCounter(E); 4449 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); 4450 4451 eval.end(CGF); 4452 4453 // Reaquire the RHS block, as there may be subblocks inserted. 4454 RHSBlock = Builder.GetInsertBlock(); 4455 4456 // If we're generating for profiling or coverage, generate a branch on the 4457 // RHS to a block that increments the RHS true counter needed to track branch 4458 // condition coverage. 4459 if (InstrumentRegions && 4460 CodeGenFunction::isInstrumentedCondition(E->getRHS())) { 4461 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("lor.rhscnt"); 4462 Builder.CreateCondBr(RHSCond, ContBlock, RHSBlockCnt); 4463 CGF.EmitBlock(RHSBlockCnt); 4464 CGF.incrementProfileCounter(E->getRHS()); 4465 CGF.EmitBranch(ContBlock); 4466 PN->addIncoming(RHSCond, RHSBlockCnt); 4467 } 4468 4469 // Emit an unconditional branch from this block to ContBlock. Insert an entry 4470 // into the phi node for the edge with the value of RHSCond. 4471 CGF.EmitBlock(ContBlock); 4472 PN->addIncoming(RHSCond, RHSBlock); 4473 4474 // ZExt result to int. 4475 return Builder.CreateZExtOrBitCast(PN, ResTy, "lor.ext"); 4476} 4477 4478Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) { 4479 CGF.EmitIgnoredExpr(E->getLHS()); 4480 CGF.EnsureInsertPoint(); 4481 return Visit(E->getRHS()); 4482} 4483 4484//===----------------------------------------------------------------------===// 4485// Other Operators 4486//===----------------------------------------------------------------------===// 4487 4488/// isCheapEnoughToEvaluateUnconditionally - Return true if the specified 4489/// expression is cheap enough and side-effect-free enough to evaluate 4490/// unconditionally instead of conditionally. This is used to convert control 4491/// flow into selects in some cases. 4492static bool isCheapEnoughToEvaluateUnconditionally(const Expr *E, 4493 CodeGenFunction &CGF) { 4494 // Anything that is an integer or floating point constant is fine. 4495 return E->IgnoreParens()->isEvaluatable(CGF.getContext()); 4496 4497 // Even non-volatile automatic variables can't be evaluated unconditionally. 4498 // Referencing a thread_local may cause non-trivial initialization work to 4499 // occur. If we're inside a lambda and one of the variables is from the scope 4500 // outside the lambda, that function may have returned already. Reading its 4501 // locals is a bad idea. Also, these reads may introduce races there didn't 4502 // exist in the source-level program. 4503} 4504 4505 4506Value *ScalarExprEmitter:: 4507VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) { 4508 TestAndClearIgnoreResultAssign(); 4509 4510 // Bind the common expression if necessary. 4511 CodeGenFunction::OpaqueValueMapping binding(CGF, E); 4512 4513 Expr *condExpr = E->getCond(); 4514 Expr *lhsExpr = E->getTrueExpr(); 4515 Expr *rhsExpr = E->getFalseExpr(); 4516 4517 // If the condition constant folds and can be elided, try to avoid emitting 4518 // the condition and the dead arm. 4519 bool CondExprBool; 4520 if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) { 4521 Expr *live = lhsExpr, *dead = rhsExpr; 4522 if (!CondExprBool) std::swap(live, dead); 4523 4524 // If the dead side doesn't have labels we need, just emit the Live part. 4525 if (!CGF.ContainsLabel(dead)) { 4526 if (CondExprBool) 4527 CGF.incrementProfileCounter(E); 4528 Value *Result = Visit(live); 4529 4530 // If the live part is a throw expression, it acts like it has a void 4531 // type, so evaluating it returns a null Value*. However, a conditional 4532 // with non-void type must return a non-null Value*. 4533 if (!Result && !E->getType()->isVoidType()) 4534 Result = llvm::UndefValue::get(CGF.ConvertType(E->getType())); 4535 4536 return Result; 4537 } 4538 } 4539 4540 // OpenCL: If the condition is a vector, we can treat this condition like 4541 // the select function. 4542 if ((CGF.getLangOpts().OpenCL && condExpr->getType()->isVectorType()) || 4543 condExpr->getType()->isExtVectorType()) { 4544 CGF.incrementProfileCounter(E); 4545 4546 llvm::Value *CondV = CGF.EmitScalarExpr(condExpr); 4547 llvm::Value *LHS = Visit(lhsExpr); 4548 llvm::Value *RHS = Visit(rhsExpr); 4549 4550 llvm::Type *condType = ConvertType(condExpr->getType()); 4551 auto *vecTy = cast<llvm::FixedVectorType>(condType); 4552 4553 unsigned numElem = vecTy->getNumElements(); 4554 llvm::Type *elemType = vecTy->getElementType(); 4555 4556 llvm::Value *zeroVec = llvm::Constant::getNullValue(vecTy); 4557 llvm::Value *TestMSB = Builder.CreateICmpSLT(CondV, zeroVec); 4558 llvm::Value *tmp = Builder.CreateSExt( 4559 TestMSB, llvm::FixedVectorType::get(elemType, numElem), "sext"); 4560 llvm::Value *tmp2 = Builder.CreateNot(tmp); 4561 4562 // Cast float to int to perform ANDs if necessary. 4563 llvm::Value *RHSTmp = RHS; 4564 llvm::Value *LHSTmp = LHS; 4565 bool wasCast = false; 4566 llvm::VectorType *rhsVTy = cast<llvm::VectorType>(RHS->getType()); 4567 if (rhsVTy->getElementType()->isFloatingPointTy()) { 4568 RHSTmp = Builder.CreateBitCast(RHS, tmp2->getType()); 4569 LHSTmp = Builder.CreateBitCast(LHS, tmp->getType()); 4570 wasCast = true; 4571 } 4572 4573 llvm::Value *tmp3 = Builder.CreateAnd(RHSTmp, tmp2); 4574 llvm::Value *tmp4 = Builder.CreateAnd(LHSTmp, tmp); 4575 llvm::Value *tmp5 = Builder.CreateOr(tmp3, tmp4, "cond"); 4576 if (wasCast) 4577 tmp5 = Builder.CreateBitCast(tmp5, RHS->getType()); 4578 4579 return tmp5; 4580 } 4581 4582 if (condExpr->getType()->isVectorType()) { 4583 CGF.incrementProfileCounter(E); 4584 4585 llvm::Value *CondV = CGF.EmitScalarExpr(condExpr); 4586 llvm::Value *LHS = Visit(lhsExpr); 4587 llvm::Value *RHS = Visit(rhsExpr); 4588 4589 llvm::Type *CondType = ConvertType(condExpr->getType()); 4590 auto *VecTy = cast<llvm::VectorType>(CondType); 4591 llvm::Value *ZeroVec = llvm::Constant::getNullValue(VecTy); 4592 4593 CondV = Builder.CreateICmpNE(CondV, ZeroVec, "vector_cond"); 4594 return Builder.CreateSelect(CondV, LHS, RHS, "vector_select"); 4595 } 4596 4597 // If this is a really simple expression (like x ? 4 : 5), emit this as a 4598 // select instead of as control flow. We can only do this if it is cheap and 4599 // safe to evaluate the LHS and RHS unconditionally. 4600 if (isCheapEnoughToEvaluateUnconditionally(lhsExpr, CGF) && 4601 isCheapEnoughToEvaluateUnconditionally(rhsExpr, CGF)) { 4602 llvm::Value *CondV = CGF.EvaluateExprAsBool(condExpr); 4603 llvm::Value *StepV = Builder.CreateZExtOrBitCast(CondV, CGF.Int64Ty); 4604 4605 CGF.incrementProfileCounter(E, StepV); 4606 4607 llvm::Value *LHS = Visit(lhsExpr); 4608 llvm::Value *RHS = Visit(rhsExpr); 4609 if (!LHS) { 4610 // If the conditional has void type, make sure we return a null Value*. 4611 assert(!RHS && "LHS and RHS types must match")((void)0); 4612 return nullptr; 4613 } 4614 return Builder.CreateSelect(CondV, LHS, RHS, "cond"); 4615 } 4616 4617 llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true"); 4618 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false"); 4619 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end"); 4620 4621 CodeGenFunction::ConditionalEvaluation eval(CGF); 4622 CGF.EmitBranchOnBoolExpr(condExpr, LHSBlock, RHSBlock, 4623 CGF.getProfileCount(lhsExpr)); 4624 4625 CGF.EmitBlock(LHSBlock); 4626 CGF.incrementProfileCounter(E); 4627 eval.begin(CGF); 4628 Value *LHS = Visit(lhsExpr); 4629 eval.end(CGF); 4630 4631 LHSBlock = Builder.GetInsertBlock(); 4632 Builder.CreateBr(ContBlock); 4633 4634 CGF.EmitBlock(RHSBlock); 4635 eval.begin(CGF); 4636 Value *RHS = Visit(rhsExpr); 4637 eval.end(CGF); 4638 4639 RHSBlock = Builder.GetInsertBlock(); 4640 CGF.EmitBlock(ContBlock); 4641 4642 // If the LHS or RHS is a throw expression, it will be legitimately null. 4643 if (!LHS) 4644 return RHS; 4645 if (!RHS) 4646 return LHS; 4647 4648 // Create a PHI node for the real part. 4649 llvm::PHINode *PN = Builder.CreatePHI(LHS->getType(), 2, "cond"); 4650 PN->addIncoming(LHS, LHSBlock); 4651 PN->addIncoming(RHS, RHSBlock); 4652 return PN; 4653} 4654 4655Value *ScalarExprEmitter::VisitChooseExpr(ChooseExpr *E) { 4656 return Visit(E->getChosenSubExpr()); 4657} 4658 4659Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) { 4660 QualType Ty = VE->getType(); 4661 4662 if (Ty->isVariablyModifiedType()) 4663 CGF.EmitVariablyModifiedType(Ty); 4664 4665 Address ArgValue = Address::invalid(); 4666 Address ArgPtr = CGF.EmitVAArg(VE, ArgValue); 4667 4668 llvm::Type *ArgTy = ConvertType(VE->getType()); 4669 4670 // If EmitVAArg fails, emit an error. 4671 if (!ArgPtr.isValid()) { 4672 CGF.ErrorUnsupported(VE, "va_arg expression"); 4673 return llvm::UndefValue::get(ArgTy); 4674 } 4675 4676 // FIXME Volatility. 4677 llvm::Value *Val = Builder.CreateLoad(ArgPtr); 4678 4679 // If EmitVAArg promoted the type, we must truncate it. 4680 if (ArgTy != Val->getType()) { 4681 if (ArgTy->isPointerTy() && !Val->getType()->isPointerTy()) 4682 Val = Builder.CreateIntToPtr(Val, ArgTy); 4683 else 4684 Val = Builder.CreateTrunc(Val, ArgTy); 4685 } 4686 4687 return Val; 4688} 4689 4690Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *block) { 4691 return CGF.EmitBlockLiteral(block); 4692} 4693 4694// Convert a vec3 to vec4, or vice versa. 4695static Value *ConvertVec3AndVec4(CGBuilderTy &Builder, CodeGenFunction &CGF, 4696 Value *Src, unsigned NumElementsDst) { 4697 static constexpr int Mask[] = {0, 1, 2, -1}; 4698 return Builder.CreateShuffleVector(Src, 4699 llvm::makeArrayRef(Mask, NumElementsDst)); 4700} 4701 4702// Create cast instructions for converting LLVM value \p Src to LLVM type \p 4703// DstTy. \p Src has the same size as \p DstTy. Both are single value types 4704// but could be scalar or vectors of different lengths, and either can be 4705// pointer. 4706// There are 4 cases: 4707// 1. non-pointer -> non-pointer : needs 1 bitcast 4708// 2. pointer -> pointer : needs 1 bitcast or addrspacecast 4709// 3. pointer -> non-pointer 4710// a) pointer -> intptr_t : needs 1 ptrtoint 4711// b) pointer -> non-intptr_t : needs 1 ptrtoint then 1 bitcast 4712// 4. non-pointer -> pointer 4713// a) intptr_t -> pointer : needs 1 inttoptr 4714// b) non-intptr_t -> pointer : needs 1 bitcast then 1 inttoptr 4715// Note: for cases 3b and 4b two casts are required since LLVM casts do not 4716// allow casting directly between pointer types and non-integer non-pointer 4717// types. 4718static Value *createCastsForTypeOfSameSize(CGBuilderTy &Builder, 4719 const llvm::DataLayout &DL, 4720 Value *Src, llvm::Type *DstTy, 4721 StringRef Name = "") { 4722 auto SrcTy = Src->getType(); 4723 4724 // Case 1. 4725 if (!SrcTy->isPointerTy() && !DstTy->isPointerTy()) 4726 return Builder.CreateBitCast(Src, DstTy, Name); 4727 4728 // Case 2. 4729 if (SrcTy->isPointerTy() && DstTy->isPointerTy()) 4730 return Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DstTy, Name); 4731 4732 // Case 3. 4733 if (SrcTy->isPointerTy() && !DstTy->isPointerTy()) { 4734 // Case 3b. 4735 if (!DstTy->isIntegerTy()) 4736 Src = Builder.CreatePtrToInt(Src, DL.getIntPtrType(SrcTy)); 4737 // Cases 3a and 3b. 4738 return Builder.CreateBitOrPointerCast(Src, DstTy, Name); 4739 } 4740 4741 // Case 4b. 4742 if (!SrcTy->isIntegerTy()) 4743 Src = Builder.CreateBitCast(Src, DL.getIntPtrType(DstTy)); 4744 // Cases 4a and 4b. 4745 return Builder.CreateIntToPtr(Src, DstTy, Name); 4746} 4747 4748Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) { 4749 Value *Src = CGF.EmitScalarExpr(E->getSrcExpr()); 4750 llvm::Type *DstTy = ConvertType(E->getType()); 4751 4752 llvm::Type *SrcTy = Src->getType(); 4753 unsigned NumElementsSrc = 4754 isa<llvm::VectorType>(SrcTy) 4755 ? cast<llvm::FixedVectorType>(SrcTy)->getNumElements() 4756 : 0; 4757 unsigned NumElementsDst = 4758 isa<llvm::VectorType>(DstTy) 4759 ? cast<llvm::FixedVectorType>(DstTy)->getNumElements() 4760 : 0; 4761 4762 // Going from vec3 to non-vec3 is a special case and requires a shuffle 4763 // vector to get a vec4, then a bitcast if the target type is different. 4764 if (NumElementsSrc == 3 && NumElementsDst != 3) { 4765 Src = ConvertVec3AndVec4(Builder, CGF, Src, 4); 4766 4767 if (!CGF.CGM.getCodeGenOpts().PreserveVec3Type) { 4768 Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src, 4769 DstTy); 4770 } 4771 4772 Src->setName("astype"); 4773 return Src; 4774 } 4775 4776 // Going from non-vec3 to vec3 is a special case and requires a bitcast 4777 // to vec4 if the original type is not vec4, then a shuffle vector to 4778 // get a vec3. 4779 if (NumElementsSrc != 3 && NumElementsDst == 3) { 4780 if (!CGF.CGM.getCodeGenOpts().PreserveVec3Type) { 4781 auto *Vec4Ty = llvm::FixedVectorType::get( 4782 cast<llvm::VectorType>(DstTy)->getElementType(), 4); 4783 Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src, 4784 Vec4Ty); 4785 } 4786 4787 Src = ConvertVec3AndVec4(Builder, CGF, Src, 3); 4788 Src->setName("astype"); 4789 return Src; 4790 } 4791 4792 return createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), 4793 Src, DstTy, "astype"); 4794} 4795 4796Value *ScalarExprEmitter::VisitAtomicExpr(AtomicExpr *E) { 4797 return CGF.EmitAtomicExpr(E).getScalarVal(); 4798} 4799 4800//===----------------------------------------------------------------------===// 4801// Entry Point into this File 4802//===----------------------------------------------------------------------===// 4803 4804/// Emit the computation of the specified expression of scalar type, ignoring 4805/// the result. 4806Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) { 4807 assert(E && hasScalarEvaluationKind(E->getType()) &&((void)0) 4808 "Invalid scalar expression to emit")((void)0); 4809 4810 return ScalarExprEmitter(*this, IgnoreResultAssign) 4811 .Visit(const_cast<Expr *>(E)); 4812} 4813 4814/// Emit a conversion from the specified type to the specified destination type, 4815/// both of which are LLVM scalar types. 4816Value *CodeGenFunction::EmitScalarConversion(Value *Src, QualType SrcTy, 4817 QualType DstTy, 4818 SourceLocation Loc) { 4819 assert(hasScalarEvaluationKind(SrcTy) && hasScalarEvaluationKind(DstTy) &&((void)0) 4820 "Invalid scalar expression to emit")((void)0); 4821 return ScalarExprEmitter(*this).EmitScalarConversion(Src, SrcTy, DstTy, Loc); 4822} 4823 4824/// Emit a conversion from the specified complex type to the specified 4825/// destination type, where the destination type is an LLVM scalar type. 4826Value *CodeGenFunction::EmitComplexToScalarConversion(ComplexPairTy Src, 4827 QualType SrcTy, 4828 QualType DstTy, 4829 SourceLocation Loc) { 4830 assert(SrcTy->isAnyComplexType() && hasScalarEvaluationKind(DstTy) &&((void)0) 4831 "Invalid complex -> scalar conversion")((void)0); 4832 return ScalarExprEmitter(*this) 4833 .EmitComplexToScalarConversion(Src, SrcTy, DstTy, Loc); 4834} 4835 4836 4837llvm::Value *CodeGenFunction:: 4838EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, 4839 bool isInc, bool isPre) { 4840 return ScalarExprEmitter(*this).EmitScalarPrePostIncDec(E, LV, isInc, isPre); 4841} 4842 4843LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) { 4844 // object->isa or (*object).isa 4845 // Generate code as for: *(Class*)object 4846 4847 Expr *BaseExpr = E->getBase(); 4848 Address Addr = Address::invalid(); 4849 if (BaseExpr->isPRValue()) { 4850 Addr = Address(EmitScalarExpr(BaseExpr), getPointerAlign()); 4851 } else { 4852 Addr = EmitLValue(BaseExpr).getAddress(*this); 4853 } 4854 4855 // Cast the address to Class*. 4856 Addr = Builder.CreateElementBitCast(Addr, ConvertType(E->getType())); 4857 return MakeAddrLValue(Addr, E->getType()); 4858} 4859 4860 4861LValue CodeGenFunction::EmitCompoundAssignmentLValue( 4862 const CompoundAssignOperator *E) { 4863 ScalarExprEmitter Scalar(*this); 4864 Value *Result = nullptr; 4865 switch (E->getOpcode()) { 4866#define COMPOUND_OP(Op) \ 4867 case BO_##Op##Assign: \ 4868 return Scalar.EmitCompoundAssignLValue(E, &ScalarExprEmitter::Emit##Op, \ 4869 Result) 4870 COMPOUND_OP(Mul); 4871 COMPOUND_OP(Div); 4872 COMPOUND_OP(Rem); 4873 COMPOUND_OP(Add); 4874 COMPOUND_OP(Sub); 4875 COMPOUND_OP(Shl); 4876 COMPOUND_OP(Shr); 4877 COMPOUND_OP(And); 4878 COMPOUND_OP(Xor); 4879 COMPOUND_OP(Or); 4880#undef COMPOUND_OP 4881 4882 case BO_PtrMemD: 4883 case BO_PtrMemI: 4884 case BO_Mul: 4885 case BO_Div: 4886 case BO_Rem: 4887 case BO_Add: 4888 case BO_Sub: 4889 case BO_Shl: 4890 case BO_Shr: 4891 case BO_LT: 4892 case BO_GT: 4893 case BO_LE: 4894 case BO_GE: 4895 case BO_EQ: 4896 case BO_NE: 4897 case BO_Cmp: 4898 case BO_And: 4899 case BO_Xor: 4900 case BO_Or: 4901 case BO_LAnd: 4902 case BO_LOr: 4903 case BO_Assign: 4904 case BO_Comma: 4905 llvm_unreachable("Not valid compound assignment operators")__builtin_unreachable(); 4906 } 4907 4908 llvm_unreachable("Unhandled compound assignment operator")__builtin_unreachable(); 4909} 4910 4911struct GEPOffsetAndOverflow { 4912 // The total (signed) byte offset for the GEP. 4913 llvm::Value *TotalOffset; 4914 // The offset overflow flag - true if the total offset overflows. 4915 llvm::Value *OffsetOverflows; 4916}; 4917 4918/// Evaluate given GEPVal, which is either an inbounds GEP, or a constant, 4919/// and compute the total offset it applies from it's base pointer BasePtr. 4920/// Returns offset in bytes and a boolean flag whether an overflow happened 4921/// during evaluation. 4922static GEPOffsetAndOverflow EmitGEPOffsetInBytes(Value *BasePtr, Value *GEPVal, 4923 llvm::LLVMContext &VMContext, 4924 CodeGenModule &CGM, 4925 CGBuilderTy &Builder) { 4926 const auto &DL = CGM.getDataLayout(); 4927 4928 // The total (signed) byte offset for the GEP. 4929 llvm::Value *TotalOffset = nullptr; 4930 4931 // Was the GEP already reduced to a constant? 4932 if (isa<llvm::Constant>(GEPVal)) { 4933 // Compute the offset by casting both pointers to integers and subtracting: 4934 // GEPVal = BasePtr + ptr(Offset) <--> Offset = int(GEPVal) - int(BasePtr) 4935 Value *BasePtr_int = 4936 Builder.CreatePtrToInt(BasePtr, DL.getIntPtrType(BasePtr->getType())); 4937 Value *GEPVal_int = 4938 Builder.CreatePtrToInt(GEPVal, DL.getIntPtrType(GEPVal->getType())); 4939 TotalOffset = Builder.CreateSub(GEPVal_int, BasePtr_int); 4940 return {TotalOffset, /*OffsetOverflows=*/Builder.getFalse()}; 4941 } 4942 4943 auto *GEP = cast<llvm::GEPOperator>(GEPVal); 4944 assert(GEP->getPointerOperand() == BasePtr &&((void)0) 4945 "BasePtr must be the the base of the GEP.")((void)0); 4946 assert(GEP->isInBounds() && "Expected inbounds GEP")((void)0); 4947 4948 auto *IntPtrTy = DL.getIntPtrType(GEP->getPointerOperandType()); 4949 4950 // Grab references to the signed add/mul overflow intrinsics for intptr_t. 4951 auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy); 4952 auto *SAddIntrinsic = 4953 CGM.getIntrinsic(llvm::Intrinsic::sadd_with_overflow, IntPtrTy); 4954 auto *SMulIntrinsic = 4955 CGM.getIntrinsic(llvm::Intrinsic::smul_with_overflow, IntPtrTy); 4956 4957 // The offset overflow flag - true if the total offset overflows. 4958 llvm::Value *OffsetOverflows = Builder.getFalse(); 4959 4960 /// Return the result of the given binary operation. 4961 auto eval = [&](BinaryOperator::Opcode Opcode, llvm::Value *LHS, 4962 llvm::Value *RHS) -> llvm::Value * { 4963 assert((Opcode == BO_Add || Opcode == BO_Mul) && "Can't eval binop")((void)0); 4964 4965 // If the operands are constants, return a constant result. 4966 if (auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS)) { 4967 if (auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS)) { 4968 llvm::APInt N; 4969 bool HasOverflow = mayHaveIntegerOverflow(LHSCI, RHSCI, Opcode, 4970 /*Signed=*/true, N); 4971 if (HasOverflow) 4972 OffsetOverflows = Builder.getTrue(); 4973 return llvm::ConstantInt::get(VMContext, N); 4974 } 4975 } 4976 4977 // Otherwise, compute the result with checked arithmetic. 4978 auto *ResultAndOverflow = Builder.CreateCall( 4979 (Opcode == BO_Add) ? SAddIntrinsic : SMulIntrinsic, {LHS, RHS}); 4980 OffsetOverflows = Builder.CreateOr( 4981 Builder.CreateExtractValue(ResultAndOverflow, 1), OffsetOverflows); 4982 return Builder.CreateExtractValue(ResultAndOverflow, 0); 4983 }; 4984 4985 // Determine the total byte offset by looking at each GEP operand. 4986 for (auto GTI = llvm::gep_type_begin(GEP), GTE = llvm::gep_type_end(GEP); 4987 GTI != GTE; ++GTI) { 4988 llvm::Value *LocalOffset; 4989 auto *Index = GTI.getOperand(); 4990 // Compute the local offset contributed by this indexing step: 4991 if (auto *STy = GTI.getStructTypeOrNull()) { 4992 // For struct indexing, the local offset is the byte position of the 4993 // specified field. 4994 unsigned FieldNo = cast<llvm::ConstantInt>(Index)->getZExtValue(); 4995 LocalOffset = llvm::ConstantInt::get( 4996 IntPtrTy, DL.getStructLayout(STy)->getElementOffset(FieldNo)); 4997 } else { 4998 // Otherwise this is array-like indexing. The local offset is the index 4999 // multiplied by the element size. 5000 auto *ElementSize = llvm::ConstantInt::get( 5001 IntPtrTy, DL.getTypeAllocSize(GTI.getIndexedType())); 5002 auto *IndexS = Builder.CreateIntCast(Index, IntPtrTy, /*isSigned=*/true); 5003 LocalOffset = eval(BO_Mul, ElementSize, IndexS); 5004 } 5005 5006 // If this is the first offset, set it as the total offset. Otherwise, add 5007 // the local offset into the running total. 5008 if (!TotalOffset || TotalOffset == Zero) 5009 TotalOffset = LocalOffset; 5010 else 5011 TotalOffset = eval(BO_Add, TotalOffset, LocalOffset); 5012 } 5013 5014 return {TotalOffset, OffsetOverflows}; 5015} 5016 5017Value * 5018CodeGenFunction::EmitCheckedInBoundsGEP(Value *Ptr, ArrayRef<Value *> IdxList, 5019 bool SignedIndices, bool IsSubtraction, 5020 SourceLocation Loc, const Twine &Name) { 5021 llvm::Type *PtrTy = Ptr->getType(); 5022 Value *GEPVal = Builder.CreateInBoundsGEP( 5023 PtrTy->getPointerElementType(), Ptr, IdxList, Name); 5024 5025 // If the pointer overflow sanitizer isn't enabled, do nothing. 5026 if (!SanOpts.has(SanitizerKind::PointerOverflow))
43
Assuming the condition is false
44
Taking false branch
5027 return GEPVal; 5028 5029 // Perform nullptr-and-offset check unless the nullptr is defined. 5030 bool PerformNullCheck = !NullPointerIsDefined(
45
Assuming the condition is false
5031 Builder.GetInsertBlock()->getParent(), PtrTy->getPointerAddressSpace()); 5032 // Check for overflows unless the GEP got constant-folded, 5033 // and only in the default address space 5034 bool PerformOverflowCheck = 5035 !isa<llvm::Constant>(GEPVal) && PtrTy->getPointerAddressSpace() == 0;
46
Assuming 'GEPVal' is not a 'Constant'
47
Assuming the condition is true
5036 5037 if (!(PerformNullCheck
47.1
'PerformNullCheck' is false
47.1
'PerformNullCheck' is false
47.1
'PerformNullCheck' is false
47.1
'PerformNullCheck' is false
47.1
'PerformNullCheck' is false
47.1
'PerformNullCheck' is false
|| PerformOverflowCheck))
48
Taking false branch
5038 return GEPVal; 5039 5040 const auto &DL = CGM.getDataLayout(); 5041 5042 SanitizerScope SanScope(this); 5043 llvm::Type *IntPtrTy = DL.getIntPtrType(PtrTy); 5044 5045 GEPOffsetAndOverflow EvaluatedGEP = 5046 EmitGEPOffsetInBytes(Ptr, GEPVal, getLLVMContext(), CGM, Builder);
49
Null pointer value stored to 'EvaluatedGEP.TotalOffset'
5047 5048 assert((!isa<llvm::Constant>(EvaluatedGEP.TotalOffset) ||((void)0) 5049 EvaluatedGEP.OffsetOverflows == Builder.getFalse()) &&((void)0) 5050 "If the offset got constant-folded, we don't expect that there was an "((void)0) 5051 "overflow.")((void)0); 5052 5053 auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy); 5054 5055 // Common case: if the total offset is zero, and we are using C++ semantics, 5056 // where nullptr+0 is defined, don't emit a check. 5057 if (EvaluatedGEP.TotalOffset == Zero && CGM.getLangOpts().CPlusPlus)
50
Assuming 'Zero' is not equal to field 'TotalOffset'
5058 return GEPVal; 5059 5060 // Now that we've computed the total offset, add it to the base pointer (with 5061 // wrapping semantics). 5062 auto *IntPtr = Builder.CreatePtrToInt(Ptr, IntPtrTy); 5063 auto *ComputedGEP = Builder.CreateAdd(IntPtr, EvaluatedGEP.TotalOffset); 5064 5065 llvm::SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks; 5066 5067 if (PerformNullCheck
50.1
'PerformNullCheck' is false
50.1
'PerformNullCheck' is false
50.1
'PerformNullCheck' is false
50.1
'PerformNullCheck' is false
50.1
'PerformNullCheck' is false
50.1
'PerformNullCheck' is false
) {
51
Taking false branch
5068 // In C++, if the base pointer evaluates to a null pointer value, 5069 // the only valid pointer this inbounds GEP can produce is also 5070 // a null pointer, so the offset must also evaluate to zero. 5071 // Likewise, if we have non-zero base pointer, we can not get null pointer 5072 // as a result, so the offset can not be -intptr_t(BasePtr). 5073 // In other words, both pointers are either null, or both are non-null, 5074 // or the behaviour is undefined. 5075 // 5076 // C, however, is more strict in this regard, and gives more 5077 // optimization opportunities: in C, additionally, nullptr+0 is undefined. 5078 // So both the input to the 'gep inbounds' AND the output must not be null. 5079 auto *BaseIsNotNullptr = Builder.CreateIsNotNull(Ptr); 5080 auto *ResultIsNotNullptr = Builder.CreateIsNotNull(ComputedGEP); 5081 auto *Valid = 5082 CGM.getLangOpts().CPlusPlus 5083 ? Builder.CreateICmpEQ(BaseIsNotNullptr, ResultIsNotNullptr) 5084 : Builder.CreateAnd(BaseIsNotNullptr, ResultIsNotNullptr); 5085 Checks.emplace_back(Valid, SanitizerKind::PointerOverflow); 5086 } 5087 5088 if (PerformOverflowCheck
51.1
'PerformOverflowCheck' is true
51.1
'PerformOverflowCheck' is true
51.1
'PerformOverflowCheck' is true
51.1
'PerformOverflowCheck' is true
51.1
'PerformOverflowCheck' is true
51.1
'PerformOverflowCheck' is true
) {
52
Taking true branch
5089 // The GEP is valid if: 5090 // 1) The total offset doesn't overflow, and 5091 // 2) The sign of the difference between the computed address and the base 5092 // pointer matches the sign of the total offset. 5093 llvm::Value *ValidGEP; 5094 auto *NoOffsetOverflow = Builder.CreateNot(EvaluatedGEP.OffsetOverflows); 5095 if (SignedIndices) {
53
Assuming 'SignedIndices' is true
54
Taking true branch
5096 // GEP is computed as `unsigned base + signed offset`, therefore: 5097 // * If offset was positive, then the computed pointer can not be 5098 // [unsigned] less than the base pointer, unless it overflowed. 5099 // * If offset was negative, then the computed pointer can not be 5100 // [unsigned] greater than the bas pointere, unless it overflowed. 5101 auto *PosOrZeroValid = Builder.CreateICmpUGE(ComputedGEP, IntPtr); 5102 auto *PosOrZeroOffset = 5103 Builder.CreateICmpSGE(EvaluatedGEP.TotalOffset, Zero);
55
Passing null pointer value via 1st parameter 'LHS'
56
Calling 'IRBuilderBase::CreateICmpSGE'
5104 llvm::Value *NegValid = Builder.CreateICmpULT(ComputedGEP, IntPtr); 5105 ValidGEP = 5106 Builder.CreateSelect(PosOrZeroOffset, PosOrZeroValid, NegValid); 5107 } else if (!IsSubtraction) { 5108 // GEP is computed as `unsigned base + unsigned offset`, therefore the 5109 // computed pointer can not be [unsigned] less than base pointer, 5110 // unless there was an overflow. 5111 // Equivalent to `@llvm.uadd.with.overflow(%base, %offset)`. 5112 ValidGEP = Builder.CreateICmpUGE(ComputedGEP, IntPtr); 5113 } else { 5114 // GEP is computed as `unsigned base - unsigned offset`, therefore the 5115 // computed pointer can not be [unsigned] greater than base pointer, 5116 // unless there was an overflow. 5117 // Equivalent to `@llvm.usub.with.overflow(%base, sub(0, %offset))`. 5118 ValidGEP = Builder.CreateICmpULE(ComputedGEP, IntPtr); 5119 } 5120 ValidGEP = Builder.CreateAnd(ValidGEP, NoOffsetOverflow); 5121 Checks.emplace_back(ValidGEP, SanitizerKind::PointerOverflow); 5122 } 5123 5124 assert(!Checks.empty() && "Should have produced some checks.")((void)0); 5125 5126 llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc)}; 5127 // Pass the computed GEP to the runtime to avoid emitting poisoned arguments. 5128 llvm::Value *DynamicArgs[] = {IntPtr, ComputedGEP}; 5129 EmitCheck(Checks, SanitizerHandler::PointerOverflow, StaticArgs, DynamicArgs); 5130 5131 return GEPVal; 5132}

/usr/src/gnu/usr.bin/clang/libclangCodeGen/../../../llvm/clang/include/clang/AST/ASTContext.h

1//===- ASTContext.h - Context to hold long-lived AST nodes ------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// Defines the clang::ASTContext interface.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_CLANG_AST_ASTCONTEXT_H
15#define LLVM_CLANG_AST_ASTCONTEXT_H
16
17#include "clang/AST/ASTContextAllocate.h"
18#include "clang/AST/ASTFwd.h"
19#include "clang/AST/CanonicalType.h"
20#include "clang/AST/CommentCommandTraits.h"
21#include "clang/AST/ComparisonCategories.h"
22#include "clang/AST/Decl.h"
23#include "clang/AST/DeclBase.h"
24#include "clang/AST/DeclarationName.h"
25#include "clang/AST/ExternalASTSource.h"
26#include "clang/AST/NestedNameSpecifier.h"
27#include "clang/AST/PrettyPrinter.h"
28#include "clang/AST/RawCommentList.h"
29#include "clang/AST/TemplateName.h"
30#include "clang/AST/Type.h"
31#include "clang/Basic/AddressSpaces.h"
32#include "clang/Basic/AttrKinds.h"
33#include "clang/Basic/IdentifierTable.h"
34#include "clang/Basic/LLVM.h"
35#include "clang/Basic/LangOptions.h"
36#include "clang/Basic/Linkage.h"
37#include "clang/Basic/NoSanitizeList.h"
38#include "clang/Basic/OperatorKinds.h"
39#include "clang/Basic/PartialDiagnostic.h"
40#include "clang/Basic/ProfileList.h"
41#include "clang/Basic/SourceLocation.h"
42#include "clang/Basic/Specifiers.h"
43#include "clang/Basic/TargetCXXABI.h"
44#include "clang/Basic/XRayLists.h"
45#include "llvm/ADT/APSInt.h"
46#include "llvm/ADT/ArrayRef.h"
47#include "llvm/ADT/DenseMap.h"
48#include "llvm/ADT/DenseSet.h"
49#include "llvm/ADT/FoldingSet.h"
50#include "llvm/ADT/IntrusiveRefCntPtr.h"
51#include "llvm/ADT/MapVector.h"
52#include "llvm/ADT/None.h"
53#include "llvm/ADT/Optional.h"
54#include "llvm/ADT/PointerIntPair.h"
55#include "llvm/ADT/PointerUnion.h"
56#include "llvm/ADT/SmallVector.h"
57#include "llvm/ADT/StringMap.h"
58#include "llvm/ADT/StringRef.h"
59#include "llvm/ADT/TinyPtrVector.h"
60#include "llvm/ADT/Triple.h"
61#include "llvm/ADT/iterator_range.h"
62#include "llvm/Support/AlignOf.h"
63#include "llvm/Support/Allocator.h"
64#include "llvm/Support/Casting.h"
65#include "llvm/Support/Compiler.h"
66#include "llvm/Support/TypeSize.h"
67#include <cassert>
68#include <cstddef>
69#include <cstdint>
70#include <iterator>
71#include <memory>
72#include <string>
73#include <type_traits>
74#include <utility>
75#include <vector>
76
77namespace llvm {
78
79class APFixedPoint;
80class FixedPointSemantics;
81struct fltSemantics;
82template <typename T, unsigned N> class SmallPtrSet;
83
84} // namespace llvm
85
86namespace clang {
87
88class APValue;
89class ASTMutationListener;
90class ASTRecordLayout;
91class AtomicExpr;
92class BlockExpr;
93class BuiltinTemplateDecl;
94class CharUnits;
95class ConceptDecl;
96class CXXABI;
97class CXXConstructorDecl;
98class CXXMethodDecl;
99class CXXRecordDecl;
100class DiagnosticsEngine;
101class ParentMapContext;
102class DynTypedNode;
103class DynTypedNodeList;
104class Expr;
105class GlobalDecl;
106class ItaniumMangleContext;
107class MangleContext;
108class MangleNumberingContext;
109class MaterializeTemporaryExpr;
110class MemberSpecializationInfo;
111class Module;
112struct MSGuidDeclParts;
113class ObjCCategoryDecl;
114class ObjCCategoryImplDecl;
115class ObjCContainerDecl;
116class ObjCImplDecl;
117class ObjCImplementationDecl;
118class ObjCInterfaceDecl;
119class ObjCIvarDecl;
120class ObjCMethodDecl;
121class ObjCPropertyDecl;
122class ObjCPropertyImplDecl;
123class ObjCProtocolDecl;
124class ObjCTypeParamDecl;
125class OMPTraitInfo;
126struct ParsedTargetAttr;
127class Preprocessor;
128class Stmt;
129class StoredDeclsMap;
130class TargetAttr;
131class TargetInfo;
132class TemplateDecl;
133class TemplateParameterList;
134class TemplateTemplateParmDecl;
135class TemplateTypeParmDecl;
136class UnresolvedSetIterator;
137class UsingShadowDecl;
138class VarTemplateDecl;
139class VTableContextBase;
140struct BlockVarCopyInit;
141
142namespace Builtin {
143
144class Context;
145
146} // namespace Builtin
147
148enum BuiltinTemplateKind : int;
149enum OpenCLTypeKind : uint8_t;
150
151namespace comments {
152
153class FullComment;
154
155} // namespace comments
156
157namespace interp {
158
159class Context;
160
161} // namespace interp
162
163namespace serialization {
164template <class> class AbstractTypeReader;
165} // namespace serialization
166
167struct TypeInfo {
168 uint64_t Width = 0;
169 unsigned Align = 0;
170 bool AlignIsRequired : 1;
171
172 TypeInfo() : AlignIsRequired(false) {}
173 TypeInfo(uint64_t Width, unsigned Align, bool AlignIsRequired)
174 : Width(Width), Align(Align), AlignIsRequired(AlignIsRequired) {}
175};
176
177struct TypeInfoChars {
178 CharUnits Width;
179 CharUnits Align;
180 bool AlignIsRequired : 1;
181
182 TypeInfoChars() : AlignIsRequired(false) {}
183 TypeInfoChars(CharUnits Width, CharUnits Align, bool AlignIsRequired)
184 : Width(Width), Align(Align), AlignIsRequired(AlignIsRequired) {}
185};
186
187/// Holds long-lived AST nodes (such as types and decls) that can be
188/// referred to throughout the semantic analysis of a file.
189class ASTContext : public RefCountedBase<ASTContext> {
190 friend class NestedNameSpecifier;
191
192 mutable SmallVector<Type *, 0> Types;
193 mutable llvm::FoldingSet<ExtQuals> ExtQualNodes;
194 mutable llvm::FoldingSet<ComplexType> ComplexTypes;
195 mutable llvm::FoldingSet<PointerType> PointerTypes;
196 mutable llvm::FoldingSet<AdjustedType> AdjustedTypes;
197 mutable llvm::FoldingSet<BlockPointerType> BlockPointerTypes;
198 mutable llvm::FoldingSet<LValueReferenceType> LValueReferenceTypes;
199 mutable llvm::FoldingSet<RValueReferenceType> RValueReferenceTypes;
200 mutable llvm::FoldingSet<MemberPointerType> MemberPointerTypes;
201 mutable llvm::ContextualFoldingSet<ConstantArrayType, ASTContext &>
202 ConstantArrayTypes;
203 mutable llvm::FoldingSet<IncompleteArrayType> IncompleteArrayTypes;
204 mutable std::vector<VariableArrayType*> VariableArrayTypes;
205 mutable llvm::FoldingSet<DependentSizedArrayType> DependentSizedArrayTypes;
206 mutable llvm::FoldingSet<DependentSizedExtVectorType>
207 DependentSizedExtVectorTypes;
208 mutable llvm::FoldingSet<DependentAddressSpaceType>
209 DependentAddressSpaceTypes;
210 mutable llvm::FoldingSet<VectorType> VectorTypes;
211 mutable llvm::FoldingSet<DependentVectorType> DependentVectorTypes;
212 mutable llvm::FoldingSet<ConstantMatrixType> MatrixTypes;
213 mutable llvm::FoldingSet<DependentSizedMatrixType> DependentSizedMatrixTypes;
214 mutable llvm::FoldingSet<FunctionNoProtoType> FunctionNoProtoTypes;
215 mutable llvm::ContextualFoldingSet<FunctionProtoType, ASTContext&>
216 FunctionProtoTypes;
217 mutable llvm::FoldingSet<DependentTypeOfExprType> DependentTypeOfExprTypes;
218 mutable llvm::FoldingSet<DependentDecltypeType> DependentDecltypeTypes;
219 mutable llvm::FoldingSet<TemplateTypeParmType> TemplateTypeParmTypes;
220 mutable llvm::FoldingSet<ObjCTypeParamType> ObjCTypeParamTypes;
221 mutable llvm::FoldingSet<SubstTemplateTypeParmType>
222 SubstTemplateTypeParmTypes;
223 mutable llvm::FoldingSet<SubstTemplateTypeParmPackType>
224 SubstTemplateTypeParmPackTypes;
225 mutable llvm::ContextualFoldingSet<TemplateSpecializationType, ASTContext&>
226 TemplateSpecializationTypes;
227 mutable llvm::FoldingSet<ParenType> ParenTypes;
228 mutable llvm::FoldingSet<ElaboratedType> ElaboratedTypes;
229 mutable llvm::FoldingSet<DependentNameType> DependentNameTypes;
230 mutable llvm::ContextualFoldingSet<DependentTemplateSpecializationType,
231 ASTContext&>
232 DependentTemplateSpecializationTypes;
233 llvm::FoldingSet<PackExpansionType> PackExpansionTypes;
234 mutable llvm::FoldingSet<ObjCObjectTypeImpl> ObjCObjectTypes;
235 mutable llvm::FoldingSet<ObjCObjectPointerType> ObjCObjectPointerTypes;
236 mutable llvm::FoldingSet<DependentUnaryTransformType>
237 DependentUnaryTransformTypes;
238 mutable llvm::ContextualFoldingSet<AutoType, ASTContext&> AutoTypes;
239 mutable llvm::FoldingSet<DeducedTemplateSpecializationType>
240 DeducedTemplateSpecializationTypes;
241 mutable llvm::FoldingSet<AtomicType> AtomicTypes;
242 llvm::FoldingSet<AttributedType> AttributedTypes;
243 mutable llvm::FoldingSet<PipeType> PipeTypes;
244 mutable llvm::FoldingSet<ExtIntType> ExtIntTypes;
245 mutable llvm::FoldingSet<DependentExtIntType> DependentExtIntTypes;
246
247 mutable llvm::FoldingSet<QualifiedTemplateName> QualifiedTemplateNames;
248 mutable llvm::FoldingSet<DependentTemplateName> DependentTemplateNames;
249 mutable llvm::FoldingSet<SubstTemplateTemplateParmStorage>
250 SubstTemplateTemplateParms;
251 mutable llvm::ContextualFoldingSet<SubstTemplateTemplateParmPackStorage,
252 ASTContext&>
253 SubstTemplateTemplateParmPacks;
254
255 /// The set of nested name specifiers.
256 ///
257 /// This set is managed by the NestedNameSpecifier class.
258 mutable llvm::FoldingSet<NestedNameSpecifier> NestedNameSpecifiers;
259 mutable NestedNameSpecifier *GlobalNestedNameSpecifier = nullptr;
260
261 /// A cache mapping from RecordDecls to ASTRecordLayouts.
262 ///
263 /// This is lazily created. This is intentionally not serialized.
264 mutable llvm::DenseMap<const RecordDecl*, const ASTRecordLayout*>
265 ASTRecordLayouts;
266 mutable llvm::DenseMap<const ObjCContainerDecl*, const ASTRecordLayout*>
267 ObjCLayouts;
268
269 /// A cache from types to size and alignment information.
270 using TypeInfoMap = llvm::DenseMap<const Type *, struct TypeInfo>;
271 mutable TypeInfoMap MemoizedTypeInfo;
272
273 /// A cache from types to unadjusted alignment information. Only ARM and
274 /// AArch64 targets need this information, keeping it separate prevents
275 /// imposing overhead on TypeInfo size.
276 using UnadjustedAlignMap = llvm::DenseMap<const Type *, unsigned>;
277 mutable UnadjustedAlignMap MemoizedUnadjustedAlign;
278
279 /// A cache mapping from CXXRecordDecls to key functions.
280 llvm::DenseMap<const CXXRecordDecl*, LazyDeclPtr> KeyFunctions;
281
282 /// Mapping from ObjCContainers to their ObjCImplementations.
283 llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*> ObjCImpls;
284
285 /// Mapping from ObjCMethod to its duplicate declaration in the same
286 /// interface.
287 llvm::DenseMap<const ObjCMethodDecl*,const ObjCMethodDecl*> ObjCMethodRedecls;
288
289 /// Mapping from __block VarDecls to BlockVarCopyInit.
290 llvm::DenseMap<const VarDecl *, BlockVarCopyInit> BlockVarCopyInits;
291
292 /// Mapping from GUIDs to the corresponding MSGuidDecl.
293 mutable llvm::FoldingSet<MSGuidDecl> MSGuidDecls;
294
295 /// Mapping from APValues to the corresponding TemplateParamObjects.
296 mutable llvm::FoldingSet<TemplateParamObjectDecl> TemplateParamObjectDecls;
297
298 /// A cache mapping a string value to a StringLiteral object with the same
299 /// value.
300 ///
301 /// This is lazily created. This is intentionally not serialized.
302 mutable llvm::StringMap<StringLiteral *> StringLiteralCache;
303
304 /// MD5 hash of CUID. It is calculated when first used and cached by this
305 /// data member.
306 mutable std::string CUIDHash;
307
308 /// Representation of a "canonical" template template parameter that
309 /// is used in canonical template names.
310 class CanonicalTemplateTemplateParm : public llvm::FoldingSetNode {
311 TemplateTemplateParmDecl *Parm;
312
313 public:
314 CanonicalTemplateTemplateParm(TemplateTemplateParmDecl *Parm)
315 : Parm(Parm) {}
316
317 TemplateTemplateParmDecl *getParam() const { return Parm; }
318
319 void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &C) {
320 Profile(ID, C, Parm);
321 }
322
323 static void Profile(llvm::FoldingSetNodeID &ID,
324 const ASTContext &C,
325 TemplateTemplateParmDecl *Parm);
326 };
327 mutable llvm::ContextualFoldingSet<CanonicalTemplateTemplateParm,
328 const ASTContext&>
329 CanonTemplateTemplateParms;
330
331 TemplateTemplateParmDecl *
332 getCanonicalTemplateTemplateParmDecl(TemplateTemplateParmDecl *TTP) const;
333
334 /// The typedef for the __int128_t type.
335 mutable TypedefDecl *Int128Decl = nullptr;
336
337 /// The typedef for the __uint128_t type.
338 mutable TypedefDecl *UInt128Decl = nullptr;
339
340 /// The typedef for the target specific predefined
341 /// __builtin_va_list type.
342 mutable TypedefDecl *BuiltinVaListDecl = nullptr;
343
344 /// The typedef for the predefined \c __builtin_ms_va_list type.
345 mutable TypedefDecl *BuiltinMSVaListDecl = nullptr;
346
347 /// The typedef for the predefined \c id type.
348 mutable TypedefDecl *ObjCIdDecl = nullptr;
349
350 /// The typedef for the predefined \c SEL type.
351 mutable TypedefDecl *ObjCSelDecl = nullptr;
352
353 /// The typedef for the predefined \c Class type.
354 mutable TypedefDecl *ObjCClassDecl = nullptr;
355
356 /// The typedef for the predefined \c Protocol class in Objective-C.
357 mutable ObjCInterfaceDecl *ObjCProtocolClassDecl = nullptr;
358
359 /// The typedef for the predefined 'BOOL' type.
360 mutable TypedefDecl *BOOLDecl = nullptr;
361
362 // Typedefs which may be provided defining the structure of Objective-C
363 // pseudo-builtins
364 QualType ObjCIdRedefinitionType;
365 QualType ObjCClassRedefinitionType;
366 QualType ObjCSelRedefinitionType;
367
368 /// The identifier 'bool'.
369 mutable IdentifierInfo *BoolName = nullptr;
370
371 /// The identifier 'NSObject'.
372 mutable IdentifierInfo *NSObjectName = nullptr;
373
374 /// The identifier 'NSCopying'.
375 IdentifierInfo *NSCopyingName = nullptr;
376
377 /// The identifier '__make_integer_seq'.
378 mutable IdentifierInfo *MakeIntegerSeqName = nullptr;
379
380 /// The identifier '__type_pack_element'.
381 mutable IdentifierInfo *TypePackElementName = nullptr;
382
383 QualType ObjCConstantStringType;
384 mutable RecordDecl *CFConstantStringTagDecl = nullptr;
385 mutable TypedefDecl *CFConstantStringTypeDecl = nullptr;
386
387 mutable QualType ObjCSuperType;
388
389 QualType ObjCNSStringType;
390
391 /// The typedef declaration for the Objective-C "instancetype" type.
392 TypedefDecl *ObjCInstanceTypeDecl = nullptr;
393
394 /// The type for the C FILE type.
395 TypeDecl *FILEDecl = nullptr;
396
397 /// The type for the C jmp_buf type.
398 TypeDecl *jmp_bufDecl = nullptr;
399
400 /// The type for the C sigjmp_buf type.
401 TypeDecl *sigjmp_bufDecl = nullptr;
402
403 /// The type for the C ucontext_t type.
404 TypeDecl *ucontext_tDecl = nullptr;
405
406 /// Type for the Block descriptor for Blocks CodeGen.
407 ///
408 /// Since this is only used for generation of debug info, it is not
409 /// serialized.
410 mutable RecordDecl *BlockDescriptorType = nullptr;
411
412 /// Type for the Block descriptor for Blocks CodeGen.
413 ///
414 /// Since this is only used for generation of debug info, it is not
415 /// serialized.
416 mutable RecordDecl *BlockDescriptorExtendedType = nullptr;
417
418 /// Declaration for the CUDA cudaConfigureCall function.
419 FunctionDecl *cudaConfigureCallDecl = nullptr;
420
421 /// Keeps track of all declaration attributes.
422 ///
423 /// Since so few decls have attrs, we keep them in a hash map instead of
424 /// wasting space in the Decl class.
425 llvm::DenseMap<const Decl*, AttrVec*> DeclAttrs;
426
427 /// A mapping from non-redeclarable declarations in modules that were
428 /// merged with other declarations to the canonical declaration that they were
429 /// merged into.
430 llvm::DenseMap<Decl*, Decl*> MergedDecls;
431
432 /// A mapping from a defining declaration to a list of modules (other
433 /// than the owning module of the declaration) that contain merged
434 /// definitions of that entity.
435 llvm::DenseMap<NamedDecl*, llvm::TinyPtrVector<Module*>> MergedDefModules;
436
437 /// Initializers for a module, in order. Each Decl will be either
438 /// something that has a semantic effect on startup (such as a variable with
439 /// a non-constant initializer), or an ImportDecl (which recursively triggers
440 /// initialization of another module).
441 struct PerModuleInitializers {
442 llvm::SmallVector<Decl*, 4> Initializers;
443 llvm::SmallVector<uint32_t, 4> LazyInitializers;
444
445 void resolve(ASTContext &Ctx);
446 };
447 llvm::DenseMap<Module*, PerModuleInitializers*> ModuleInitializers;
448
449 ASTContext &this_() { return *this; }
450
451public:
452 /// A type synonym for the TemplateOrInstantiation mapping.
453 using TemplateOrSpecializationInfo =
454 llvm::PointerUnion<VarTemplateDecl *, MemberSpecializationInfo *>;
455
456private:
457 friend class ASTDeclReader;
458 friend class ASTReader;
459 friend class ASTWriter;
460 template <class> friend class serialization::AbstractTypeReader;
461 friend class CXXRecordDecl;
462 friend class IncrementalParser;
463
464 /// A mapping to contain the template or declaration that
465 /// a variable declaration describes or was instantiated from,
466 /// respectively.
467 ///
468 /// For non-templates, this value will be NULL. For variable
469 /// declarations that describe a variable template, this will be a
470 /// pointer to a VarTemplateDecl. For static data members
471 /// of class template specializations, this will be the
472 /// MemberSpecializationInfo referring to the member variable that was
473 /// instantiated or specialized. Thus, the mapping will keep track of
474 /// the static data member templates from which static data members of
475 /// class template specializations were instantiated.
476 ///
477 /// Given the following example:
478 ///
479 /// \code
480 /// template<typename T>
481 /// struct X {
482 /// static T value;
483 /// };
484 ///
485 /// template<typename T>
486 /// T X<T>::value = T(17);
487 ///
488 /// int *x = &X<int>::value;
489 /// \endcode
490 ///
491 /// This mapping will contain an entry that maps from the VarDecl for
492 /// X<int>::value to the corresponding VarDecl for X<T>::value (within the
493 /// class template X) and will be marked TSK_ImplicitInstantiation.
494 llvm::DenseMap<const VarDecl *, TemplateOrSpecializationInfo>
495 TemplateOrInstantiation;
496
497 /// Keeps track of the declaration from which a using declaration was
498 /// created during instantiation.
499 ///
500 /// The source and target declarations are always a UsingDecl, an
501 /// UnresolvedUsingValueDecl, or an UnresolvedUsingTypenameDecl.
502 ///
503 /// For example:
504 /// \code
505 /// template<typename T>
506 /// struct A {
507 /// void f();
508 /// };
509 ///
510 /// template<typename T>
511 /// struct B : A<T> {
512 /// using A<T>::f;
513 /// };
514 ///
515 /// template struct B<int>;
516 /// \endcode
517 ///
518 /// This mapping will contain an entry that maps from the UsingDecl in
519 /// B<int> to the UnresolvedUsingDecl in B<T>.
520 llvm::DenseMap<NamedDecl *, NamedDecl *> InstantiatedFromUsingDecl;
521
522 /// Like InstantiatedFromUsingDecl, but for using-enum-declarations. Maps
523 /// from the instantiated using-enum to the templated decl from whence it
524 /// came.
525 /// Note that using-enum-declarations cannot be dependent and
526 /// thus will never be instantiated from an "unresolved"
527 /// version thereof (as with using-declarations), so each mapping is from
528 /// a (resolved) UsingEnumDecl to a (resolved) UsingEnumDecl.
529 llvm::DenseMap<UsingEnumDecl *, UsingEnumDecl *>
530 InstantiatedFromUsingEnumDecl;
531
532 /// Simlarly maps instantiated UsingShadowDecls to their origin.
533 llvm::DenseMap<UsingShadowDecl*, UsingShadowDecl*>
534 InstantiatedFromUsingShadowDecl;
535
536 llvm::DenseMap<FieldDecl *, FieldDecl *> InstantiatedFromUnnamedFieldDecl;
537
538 /// Mapping that stores the methods overridden by a given C++
539 /// member function.
540 ///
541 /// Since most C++ member functions aren't virtual and therefore
542 /// don't override anything, we store the overridden functions in
543 /// this map on the side rather than within the CXXMethodDecl structure.
544 using CXXMethodVector = llvm::TinyPtrVector<const CXXMethodDecl *>;
545 llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector> OverriddenMethods;
546
547 /// Mapping from each declaration context to its corresponding
548 /// mangling numbering context (used for constructs like lambdas which
549 /// need to be consistently numbered for the mangler).
550 llvm::DenseMap<const DeclContext *, std::unique_ptr<MangleNumberingContext>>
551 MangleNumberingContexts;
552 llvm::DenseMap<const Decl *, std::unique_ptr<MangleNumberingContext>>
553 ExtraMangleNumberingContexts;
554
555 /// Side-table of mangling numbers for declarations which rarely
556 /// need them (like static local vars).
557 llvm::MapVector<const NamedDecl *, unsigned> MangleNumbers;
558 llvm::MapVector<const VarDecl *, unsigned> StaticLocalNumbers;
559 /// Mapping the associated device lambda mangling number if present.
560 mutable llvm::DenseMap<const CXXRecordDecl *, unsigned>
561 DeviceLambdaManglingNumbers;
562
563 /// Mapping that stores parameterIndex values for ParmVarDecls when
564 /// that value exceeds the bitfield size of ParmVarDeclBits.ParameterIndex.
565 using ParameterIndexTable = llvm::DenseMap<const VarDecl *, unsigned>;
566 ParameterIndexTable ParamIndices;
567
568 ImportDecl *FirstLocalImport = nullptr;
569 ImportDecl *LastLocalImport = nullptr;
570
571 TranslationUnitDecl *TUDecl = nullptr;
572 mutable ExternCContextDecl *ExternCContext = nullptr;
573 mutable BuiltinTemplateDecl *MakeIntegerSeqDecl = nullptr;
574 mutable BuiltinTemplateDecl *TypePackElementDecl = nullptr;
575
576 /// The associated SourceManager object.
577 SourceManager &SourceMgr;
578
579 /// The language options used to create the AST associated with
580 /// this ASTContext object.
581 LangOptions &LangOpts;
582
583 /// NoSanitizeList object that is used by sanitizers to decide which
584 /// entities should not be instrumented.
585 std::unique_ptr<NoSanitizeList> NoSanitizeL;
586
587 /// Function filtering mechanism to determine whether a given function
588 /// should be imbued with the XRay "always" or "never" attributes.
589 std::unique_ptr<XRayFunctionFilter> XRayFilter;
590
591 /// ProfileList object that is used by the profile instrumentation
592 /// to decide which entities should be instrumented.
593 std::unique_ptr<ProfileList> ProfList;
594
595 /// The allocator used to create AST objects.
596 ///
597 /// AST objects are never destructed; rather, all memory associated with the
598 /// AST objects will be released when the ASTContext itself is destroyed.
599 mutable llvm::BumpPtrAllocator BumpAlloc;
600
601 /// Allocator for partial diagnostics.
602 PartialDiagnostic::DiagStorageAllocator DiagAllocator;
603
604 /// The current C++ ABI.
605 std::unique_ptr<CXXABI> ABI;
606 CXXABI *createCXXABI(const TargetInfo &T);
607
608 /// The logical -> physical address space map.
609 const LangASMap *AddrSpaceMap = nullptr;
610
611 /// Address space map mangling must be used with language specific
612 /// address spaces (e.g. OpenCL/CUDA)
613 bool AddrSpaceMapMangling;
614
615 const TargetInfo *Target = nullptr;
616 const TargetInfo *AuxTarget = nullptr;
617 clang::PrintingPolicy PrintingPolicy;
618 std::unique_ptr<interp::Context> InterpContext;
619 std::unique_ptr<ParentMapContext> ParentMapCtx;
620
621 /// Keeps track of the deallocated DeclListNodes for future reuse.
622 DeclListNode *ListNodeFreeList = nullptr;
623
624public:
625 IdentifierTable &Idents;
626 SelectorTable &Selectors;
627 Builtin::Context &BuiltinInfo;
628 const TranslationUnitKind TUKind;
629 mutable DeclarationNameTable DeclarationNames;
630 IntrusiveRefCntPtr<ExternalASTSource> ExternalSource;
631 ASTMutationListener *Listener = nullptr;
632
633 /// Returns the clang bytecode interpreter context.
634 interp::Context &getInterpContext();
635
636 /// Returns the dynamic AST node parent map context.
637 ParentMapContext &getParentMapContext();
638
639 // A traversal scope limits the parts of the AST visible to certain analyses.
640 // RecursiveASTVisitor only visits specified children of TranslationUnitDecl.
641 // getParents() will only observe reachable parent edges.
642 //
643 // The scope is defined by a set of "top-level" declarations which will be
644 // visible under the TranslationUnitDecl.
645 // Initially, it is the entire TU, represented by {getTranslationUnitDecl()}.
646 //
647 // After setTraversalScope({foo, bar}), the exposed AST looks like:
648 // TranslationUnitDecl
649 // - foo
650 // - ...
651 // - bar
652 // - ...
653 // All other siblings of foo and bar are pruned from the tree.
654 // (However they are still accessible via TranslationUnitDecl->decls())
655 //
656 // Changing the scope clears the parent cache, which is expensive to rebuild.
657 std::vector<Decl *> getTraversalScope() const { return TraversalScope; }
658 void setTraversalScope(const std::vector<Decl *> &);
659
660 /// Forwards to get node parents from the ParentMapContext. New callers should
661 /// use ParentMapContext::getParents() directly.
662 template <typename NodeT> DynTypedNodeList getParents(const NodeT &Node);
663
664 const clang::PrintingPolicy &getPrintingPolicy() const {
665 return PrintingPolicy;
666 }
667
668 void setPrintingPolicy(const clang::PrintingPolicy &Policy) {
669 PrintingPolicy = Policy;
670 }
671
672 SourceManager& getSourceManager() { return SourceMgr; }
673 const SourceManager& getSourceManager() const { return SourceMgr; }
674
675 llvm::BumpPtrAllocator &getAllocator() const {
676 return BumpAlloc;
677 }
678
679 void *Allocate(size_t Size, unsigned Align = 8) const {
680 return BumpAlloc.Allocate(Size, Align);
681 }
682 template <typename T> T *Allocate(size_t Num = 1) const {
683 return static_cast<T *>(Allocate(Num * sizeof(T), alignof(T)));
684 }
685 void Deallocate(void *Ptr) const {}
686
687 /// Allocates a \c DeclListNode or returns one from the \c ListNodeFreeList
688 /// pool.
689 DeclListNode *AllocateDeclListNode(clang::NamedDecl *ND) {
690 if (DeclListNode *Alloc = ListNodeFreeList) {
691 ListNodeFreeList = Alloc->Rest.dyn_cast<DeclListNode*>();
692 Alloc->D = ND;
693 Alloc->Rest = nullptr;
694 return Alloc;
695 }
696 return new (*this) DeclListNode(ND);
697 }
698 /// Deallcates a \c DeclListNode by returning it to the \c ListNodeFreeList
699 /// pool.
700 void DeallocateDeclListNode(DeclListNode *N) {
701 N->Rest = ListNodeFreeList;
702 ListNodeFreeList = N;
703 }
704
705 /// Return the total amount of physical memory allocated for representing
706 /// AST nodes and type information.
707 size_t getASTAllocatedMemory() const {
708 return BumpAlloc.getTotalMemory();
709 }
710
711 /// Return the total memory used for various side tables.
712 size_t getSideTableAllocatedMemory() const;
713
714 PartialDiagnostic::DiagStorageAllocator &getDiagAllocator() {
715 return DiagAllocator;
716 }
717
718 const TargetInfo &getTargetInfo() const { return *Target; }
719 const TargetInfo *getAuxTargetInfo() const { return AuxTarget; }
720
721 /// getIntTypeForBitwidth -
722 /// sets integer QualTy according to specified details:
723 /// bitwidth, signed/unsigned.
724 /// Returns empty type if there is no appropriate target types.
725 QualType getIntTypeForBitwidth(unsigned DestWidth,
726 unsigned Signed) const;
727
728 /// getRealTypeForBitwidth -
729 /// sets floating point QualTy according to specified bitwidth.
730 /// Returns empty type if there is no appropriate target types.
731 QualType getRealTypeForBitwidth(unsigned DestWidth, bool ExplicitIEEE) const;
732
733 bool AtomicUsesUnsupportedLibcall(const AtomicExpr *E) const;
734
735 const LangOptions& getLangOpts() const { return LangOpts; }
736
737 // If this condition is false, typo correction must be performed eagerly
738 // rather than delayed in many places, as it makes use of dependent types.
739 // the condition is false for clang's C-only codepath, as it doesn't support
740 // dependent types yet.
741 bool isDependenceAllowed() const {
742 return LangOpts.CPlusPlus || LangOpts.RecoveryAST;
743 }
744
745 const NoSanitizeList &getNoSanitizeList() const { return *NoSanitizeL; }
746
747 const XRayFunctionFilter &getXRayFilter() const {
748 return *XRayFilter;
749 }
750
751 const ProfileList &getProfileList() const { return *ProfList; }
752
753 DiagnosticsEngine &getDiagnostics() const;
754
755 FullSourceLoc getFullLoc(SourceLocation Loc) const {
756 return FullSourceLoc(Loc,SourceMgr);
757 }
758
759 /// Return the C++ ABI kind that should be used. The C++ ABI can be overriden
760 /// at compile time with `-fc++-abi=`. If this is not provided, we instead use
761 /// the default ABI set by the target.
762 TargetCXXABI::Kind getCXXABIKind() const;
763
764 /// All comments in this translation unit.
765 RawCommentList Comments;
766
767 /// True if comments are already loaded from ExternalASTSource.
768 mutable bool CommentsLoaded = false;
769
770 /// Mapping from declaration to directly attached comment.
771 ///
772 /// Raw comments are owned by Comments list. This mapping is populated
773 /// lazily.
774 mutable llvm::DenseMap<const Decl *, const RawComment *> DeclRawComments;
775
776 /// Mapping from canonical declaration to the first redeclaration in chain
777 /// that has a comment attached.
778 ///
779 /// Raw comments are owned by Comments list. This mapping is populated
780 /// lazily.
781 mutable llvm::DenseMap<const Decl *, const Decl *> RedeclChainComments;
782
783 /// Keeps track of redeclaration chains that don't have any comment attached.
784 /// Mapping from canonical declaration to redeclaration chain that has no
785 /// comments attached to any redeclaration. Specifically it's mapping to
786 /// the last redeclaration we've checked.
787 ///
788 /// Shall not contain declarations that have comments attached to any
789 /// redeclaration in their chain.
790 mutable llvm::DenseMap<const Decl *, const Decl *> CommentlessRedeclChains;
791
792 /// Mapping from declarations to parsed comments attached to any
793 /// redeclaration.
794 mutable llvm::DenseMap<const Decl *, comments::FullComment *> ParsedComments;
795
796 /// Attaches \p Comment to \p OriginalD and to its redeclaration chain
797 /// and removes the redeclaration chain from the set of commentless chains.
798 ///
799 /// Don't do anything if a comment has already been attached to \p OriginalD
800 /// or its redeclaration chain.
801 void cacheRawCommentForDecl(const Decl &OriginalD,
802 const RawComment &Comment) const;
803
804 /// \returns searches \p CommentsInFile for doc comment for \p D.
805 ///
806 /// \p RepresentativeLocForDecl is used as a location for searching doc
807 /// comments. \p CommentsInFile is a mapping offset -> comment of files in the
808 /// same file where \p RepresentativeLocForDecl is.
809 RawComment *getRawCommentForDeclNoCacheImpl(
810 const Decl *D, const SourceLocation RepresentativeLocForDecl,
811 const std::map<unsigned, RawComment *> &CommentsInFile) const;
812
813 /// Return the documentation comment attached to a given declaration,
814 /// without looking into cache.
815 RawComment *getRawCommentForDeclNoCache(const Decl *D) const;
816
817public:
818 void addComment(const RawComment &RC);
819
820 /// Return the documentation comment attached to a given declaration.
821 /// Returns nullptr if no comment is attached.
822 ///
823 /// \param OriginalDecl if not nullptr, is set to declaration AST node that
824 /// had the comment, if the comment we found comes from a redeclaration.
825 const RawComment *
826 getRawCommentForAnyRedecl(const Decl *D,
827 const Decl **OriginalDecl = nullptr) const;
828
829 /// Searches existing comments for doc comments that should be attached to \p
830 /// Decls. If any doc comment is found, it is parsed.
831 ///
832 /// Requirement: All \p Decls are in the same file.
833 ///
834 /// If the last comment in the file is already attached we assume
835 /// there are not comments left to be attached to \p Decls.
836 void attachCommentsToJustParsedDecls(ArrayRef<Decl *> Decls,
837 const Preprocessor *PP);
838
839 /// Return parsed documentation comment attached to a given declaration.
840 /// Returns nullptr if no comment is attached.
841 ///
842 /// \param PP the Preprocessor used with this TU. Could be nullptr if
843 /// preprocessor is not available.
844 comments::FullComment *getCommentForDecl(const Decl *D,
845 const Preprocessor *PP) const;
846
847 /// Return parsed documentation comment attached to a given declaration.
848 /// Returns nullptr if no comment is attached. Does not look at any
849 /// redeclarations of the declaration.
850 comments::FullComment *getLocalCommentForDeclUncached(const Decl *D) const;
851
852 comments::FullComment *cloneFullComment(comments::FullComment *FC,
853 const Decl *D) const;
854
855private:
856 mutable comments::CommandTraits CommentCommandTraits;
857
858 /// Iterator that visits import declarations.
859 class import_iterator {
860 ImportDecl *Import = nullptr;
861
862 public:
863 using value_type = ImportDecl *;
864 using reference = ImportDecl *;
865 using pointer = ImportDecl *;
866 using difference_type = int;
867 using iterator_category = std::forward_iterator_tag;
868
869 import_iterator() = default;
870 explicit import_iterator(ImportDecl *Import) : Import(Import) {}
871
872 reference operator*() const { return Import; }
873 pointer operator->() const { return Import; }
874
875 import_iterator &operator++() {
876 Import = ASTContext::getNextLocalImport(Import);
877 return *this;
878 }
879
880 import_iterator operator++(int) {
881 import_iterator Other(*this);
882 ++(*this);
883 return Other;
884 }
885
886 friend bool operator==(import_iterator X, import_iterator Y) {
887 return X.Import == Y.Import;
888 }
889
890 friend bool operator!=(import_iterator X, import_iterator Y) {
891 return X.Import != Y.Import;
892 }
893 };
894
895public:
896 comments::CommandTraits &getCommentCommandTraits() const {
897 return CommentCommandTraits;
898 }
899
900 /// Retrieve the attributes for the given declaration.
901 AttrVec& getDeclAttrs(const Decl *D);
902
903 /// Erase the attributes corresponding to the given declaration.
904 void eraseDeclAttrs(const Decl *D);
905
906 /// If this variable is an instantiated static data member of a
907 /// class template specialization, returns the templated static data member
908 /// from which it was instantiated.
909 // FIXME: Remove ?
910 MemberSpecializationInfo *getInstantiatedFromStaticDataMember(
911 const VarDecl *Var);
912
913 /// Note that the static data member \p Inst is an instantiation of
914 /// the static data member template \p Tmpl of a class template.
915 void setInstantiatedFromStaticDataMember(VarDecl *Inst, VarDecl *Tmpl,
916 TemplateSpecializationKind TSK,
917 SourceLocation PointOfInstantiation = SourceLocation());
918
919 TemplateOrSpecializationInfo
920 getTemplateOrSpecializationInfo(const VarDecl *Var);
921
922 void setTemplateOrSpecializationInfo(VarDecl *Inst,
923 TemplateOrSpecializationInfo TSI);
924
925 /// If the given using decl \p Inst is an instantiation of
926 /// another (possibly unresolved) using decl, return it.
927 NamedDecl *getInstantiatedFromUsingDecl(NamedDecl *Inst);
928
929 /// Remember that the using decl \p Inst is an instantiation
930 /// of the using decl \p Pattern of a class template.
931 void setInstantiatedFromUsingDecl(NamedDecl *Inst, NamedDecl *Pattern);
932
933 /// If the given using-enum decl \p Inst is an instantiation of
934 /// another using-enum decl, return it.
935 UsingEnumDecl *getInstantiatedFromUsingEnumDecl(UsingEnumDecl *Inst);
936
937 /// Remember that the using enum decl \p Inst is an instantiation
938 /// of the using enum decl \p Pattern of a class template.
939 void setInstantiatedFromUsingEnumDecl(UsingEnumDecl *Inst,
940 UsingEnumDecl *Pattern);
941
942 UsingShadowDecl *getInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst);
943 void setInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst,
944 UsingShadowDecl *Pattern);
945
946 FieldDecl *getInstantiatedFromUnnamedFieldDecl(FieldDecl *Field);
947
948 void setInstantiatedFromUnnamedFieldDecl(FieldDecl *Inst, FieldDecl *Tmpl);
949
950 // Access to the set of methods overridden by the given C++ method.
951 using overridden_cxx_method_iterator = CXXMethodVector::const_iterator;
952 overridden_cxx_method_iterator
953 overridden_methods_begin(const CXXMethodDecl *Method) const;
954
955 overridden_cxx_method_iterator
956 overridden_methods_end(const CXXMethodDecl *Method) const;
957
958 unsigned overridden_methods_size(const CXXMethodDecl *Method) const;
959
960 using overridden_method_range =
961 llvm::iterator_range<overridden_cxx_method_iterator>;
962
963 overridden_method_range overridden_methods(const CXXMethodDecl *Method) const;
964
965 /// Note that the given C++ \p Method overrides the given \p
966 /// Overridden method.
967 void addOverriddenMethod(const CXXMethodDecl *Method,
968 const CXXMethodDecl *Overridden);
969
970 /// Return C++ or ObjC overridden methods for the given \p Method.
971 ///
972 /// An ObjC method is considered to override any method in the class's
973 /// base classes, its protocols, or its categories' protocols, that has
974 /// the same selector and is of the same kind (class or instance).
975 /// A method in an implementation is not considered as overriding the same
976 /// method in the interface or its categories.
977 void getOverriddenMethods(
978 const NamedDecl *Method,
979 SmallVectorImpl<const NamedDecl *> &Overridden) const;
980
981 /// Notify the AST context that a new import declaration has been
982 /// parsed or implicitly created within this translation unit.
983 void addedLocalImportDecl(ImportDecl *Import);
984
985 static ImportDecl *getNextLocalImport(ImportDecl *Import) {
986 return Import->getNextLocalImport();
987 }
988
989 using import_range = llvm::iterator_range<import_iterator>;
990
991 import_range local_imports() const {
992 return import_range(import_iterator(FirstLocalImport), import_iterator());
993 }
994
995 Decl *getPrimaryMergedDecl(Decl *D) {
996 Decl *Result = MergedDecls.lookup(D);
997 return Result ? Result : D;
998 }
999 void setPrimaryMergedDecl(Decl *D, Decl *Primary) {
1000 MergedDecls[D] = Primary;
1001 }
1002
1003 /// Note that the definition \p ND has been merged into module \p M,
1004 /// and should be visible whenever \p M is visible.
1005 void mergeDefinitionIntoModule(NamedDecl *ND, Module *M,
1006 bool NotifyListeners = true);
1007
1008 /// Clean up the merged definition list. Call this if you might have
1009 /// added duplicates into the list.
1010 void deduplicateMergedDefinitonsFor(NamedDecl *ND);
1011
1012 /// Get the additional modules in which the definition \p Def has
1013 /// been merged.
1014 ArrayRef<Module*> getModulesWithMergedDefinition(const NamedDecl *Def);
1015
1016 /// Add a declaration to the list of declarations that are initialized
1017 /// for a module. This will typically be a global variable (with internal
1018 /// linkage) that runs module initializers, such as the iostream initializer,
1019 /// or an ImportDecl nominating another module that has initializers.
1020 void addModuleInitializer(Module *M, Decl *Init);
1021
1022 void addLazyModuleInitializers(Module *M, ArrayRef<uint32_t> IDs);
1023
1024 /// Get the initializations to perform when importing a module, if any.
1025 ArrayRef<Decl*> getModuleInitializers(Module *M);
1026
1027 TranslationUnitDecl *getTranslationUnitDecl() const {
1028 return TUDecl->getMostRecentDecl();
1029 }
1030 void addTranslationUnitDecl() {
1031 assert(!TUDecl || TUKind == TU_Incremental)((void)0);
1032 TranslationUnitDecl *NewTUDecl = TranslationUnitDecl::Create(*this);
1033 if (TraversalScope.empty() || TraversalScope.back() == TUDecl)
1034 TraversalScope = {NewTUDecl};
1035 if (TUDecl)
1036 NewTUDecl->setPreviousDecl(TUDecl);
1037 TUDecl = NewTUDecl;
1038 }
1039
1040 ExternCContextDecl *getExternCContextDecl() const;
1041 BuiltinTemplateDecl *getMakeIntegerSeqDecl() const;
1042 BuiltinTemplateDecl *getTypePackElementDecl() const;
1043
1044 // Builtin Types.
1045 CanQualType VoidTy;
1046 CanQualType BoolTy;
1047 CanQualType CharTy;
1048 CanQualType WCharTy; // [C++ 3.9.1p5].
1049 CanQualType WideCharTy; // Same as WCharTy in C++, integer type in C99.
1050 CanQualType WIntTy; // [C99 7.24.1], integer type unchanged by default promotions.
1051 CanQualType Char8Ty; // [C++20 proposal]
1052 CanQualType Char16Ty; // [C++0x 3.9.1p5], integer type in C99.
1053 CanQualType Char32Ty; // [C++0x 3.9.1p5], integer type in C99.
1054 CanQualType SignedCharTy, ShortTy, IntTy, LongTy, LongLongTy, Int128Ty;
1055 CanQualType UnsignedCharTy, UnsignedShortTy, UnsignedIntTy, UnsignedLongTy;
1056 CanQualType UnsignedLongLongTy, UnsignedInt128Ty;
1057 CanQualType FloatTy, DoubleTy, LongDoubleTy, Float128Ty;
1058 CanQualType ShortAccumTy, AccumTy,
1059 LongAccumTy; // ISO/IEC JTC1 SC22 WG14 N1169 Extension
1060 CanQualType UnsignedShortAccumTy, UnsignedAccumTy, UnsignedLongAccumTy;
1061 CanQualType ShortFractTy, FractTy, LongFractTy;
1062 CanQualType UnsignedShortFractTy, UnsignedFractTy, UnsignedLongFractTy;
1063 CanQualType SatShortAccumTy, SatAccumTy, SatLongAccumTy;
1064 CanQualType SatUnsignedShortAccumTy, SatUnsignedAccumTy,
1065 SatUnsignedLongAccumTy;
1066 CanQualType SatShortFractTy, SatFractTy, SatLongFractTy;
1067 CanQualType SatUnsignedShortFractTy, SatUnsignedFractTy,
1068 SatUnsignedLongFractTy;
1069 CanQualType HalfTy; // [OpenCL 6.1.1.1], ARM NEON
1070 CanQualType BFloat16Ty;
1071 CanQualType Float16Ty; // C11 extension ISO/IEC TS 18661-3
1072 CanQualType FloatComplexTy, DoubleComplexTy, LongDoubleComplexTy;
1073 CanQualType Float128ComplexTy;
1074 CanQualType VoidPtrTy, NullPtrTy;
1075 CanQualType DependentTy, OverloadTy, BoundMemberTy, UnknownAnyTy;
1076 CanQualType BuiltinFnTy;
1077 CanQualType PseudoObjectTy, ARCUnbridgedCastTy;
1078 CanQualType ObjCBuiltinIdTy, ObjCBuiltinClassTy, ObjCBuiltinSelTy;
1079 CanQualType ObjCBuiltinBoolTy;
1080#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
1081 CanQualType SingletonId;
1082#include "clang/Basic/OpenCLImageTypes.def"
1083 CanQualType OCLSamplerTy, OCLEventTy, OCLClkEventTy;
1084 CanQualType OCLQueueTy, OCLReserveIDTy;
1085 CanQualType IncompleteMatrixIdxTy;
1086 CanQualType OMPArraySectionTy, OMPArrayShapingTy, OMPIteratorTy;
1087#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
1088 CanQualType Id##Ty;
1089#include "clang/Basic/OpenCLExtensionTypes.def"
1090#define SVE_TYPE(Name, Id, SingletonId) \
1091 CanQualType SingletonId;
1092#include "clang/Basic/AArch64SVEACLETypes.def"
1093#define PPC_VECTOR_TYPE(Name, Id, Size) \
1094 CanQualType Id##Ty;
1095#include "clang/Basic/PPCTypes.def"
1096#define RVV_TYPE(Name, Id, SingletonId) \
1097 CanQualType SingletonId;
1098#include "clang/Basic/RISCVVTypes.def"
1099
1100 // Types for deductions in C++0x [stmt.ranged]'s desugaring. Built on demand.
1101 mutable QualType AutoDeductTy; // Deduction against 'auto'.
1102 mutable QualType AutoRRefDeductTy; // Deduction against 'auto &&'.
1103
1104 // Decl used to help define __builtin_va_list for some targets.
1105 // The decl is built when constructing 'BuiltinVaListDecl'.
1106 mutable Decl *VaListTagDecl = nullptr;
1107
1108 // Implicitly-declared type 'struct _GUID'.
1109 mutable TagDecl *MSGuidTagDecl = nullptr;
1110
1111 /// Keep track of CUDA/HIP device-side variables ODR-used by host code.
1112 llvm::DenseSet<const VarDecl *> CUDADeviceVarODRUsedByHost;
1113
1114 ASTContext(LangOptions &LOpts, SourceManager &SM, IdentifierTable &idents,
1115 SelectorTable &sels, Builtin::Context &builtins,
1116 TranslationUnitKind TUKind);
1117 ASTContext(const ASTContext &) = delete;
1118 ASTContext &operator=(const ASTContext &) = delete;
1119 ~ASTContext();
1120
1121 /// Attach an external AST source to the AST context.
1122 ///
1123 /// The external AST source provides the ability to load parts of
1124 /// the abstract syntax tree as needed from some external storage,
1125 /// e.g., a precompiled header.
1126 void setExternalSource(IntrusiveRefCntPtr<ExternalASTSource> Source);
1127
1128 /// Retrieve a pointer to the external AST source associated
1129 /// with this AST context, if any.
1130 ExternalASTSource *getExternalSource() const {
1131 return ExternalSource.get();
1132 }
1133
1134 /// Attach an AST mutation listener to the AST context.
1135 ///
1136 /// The AST mutation listener provides the ability to track modifications to
1137 /// the abstract syntax tree entities committed after they were initially
1138 /// created.
1139 void setASTMutationListener(ASTMutationListener *Listener) {
1140 this->Listener = Listener;
1141 }
1142
1143 /// Retrieve a pointer to the AST mutation listener associated
1144 /// with this AST context, if any.
1145 ASTMutationListener *getASTMutationListener() const { return Listener; }
1146
1147 void PrintStats() const;
1148 const SmallVectorImpl<Type *>& getTypes() const { return Types; }
1149
1150 BuiltinTemplateDecl *buildBuiltinTemplateDecl(BuiltinTemplateKind BTK,
1151 const IdentifierInfo *II) const;
1152
1153 /// Create a new implicit TU-level CXXRecordDecl or RecordDecl
1154 /// declaration.
1155 RecordDecl *buildImplicitRecord(StringRef Name,
1156 RecordDecl::TagKind TK = TTK_Struct) const;
1157
1158 /// Create a new implicit TU-level typedef declaration.
1159 TypedefDecl *buildImplicitTypedef(QualType T, StringRef Name) const;
1160
1161 /// Retrieve the declaration for the 128-bit signed integer type.
1162 TypedefDecl *getInt128Decl() const;
1163
1164 /// Retrieve the declaration for the 128-bit unsigned integer type.
1165 TypedefDecl *getUInt128Decl() const;
1166
1167 //===--------------------------------------------------------------------===//
1168 // Type Constructors
1169 //===--------------------------------------------------------------------===//
1170
1171private:
1172 /// Return a type with extended qualifiers.
1173 QualType getExtQualType(const Type *Base, Qualifiers Quals) const;
1174
1175 QualType getTypeDeclTypeSlow(const TypeDecl *Decl) const;
1176
1177 QualType getPipeType(QualType T, bool ReadOnly) const;
1178
1179public:
1180 /// Return the uniqued reference to the type for an address space
1181 /// qualified type with the specified type and address space.
1182 ///
1183 /// The resulting type has a union of the qualifiers from T and the address
1184 /// space. If T already has an address space specifier, it is silently
1185 /// replaced.
1186 QualType getAddrSpaceQualType(QualType T, LangAS AddressSpace) const;
1187
1188 /// Remove any existing address space on the type and returns the type
1189 /// with qualifiers intact (or that's the idea anyway)
1190 ///
1191 /// The return type should be T with all prior qualifiers minus the address
1192 /// space.
1193 QualType removeAddrSpaceQualType(QualType T) const;
1194
1195 /// Apply Objective-C protocol qualifiers to the given type.
1196 /// \param allowOnPointerType specifies if we can apply protocol
1197 /// qualifiers on ObjCObjectPointerType. It can be set to true when
1198 /// constructing the canonical type of a Objective-C type parameter.
1199 QualType applyObjCProtocolQualifiers(QualType type,
1200 ArrayRef<ObjCProtocolDecl *> protocols, bool &hasError,
1201 bool allowOnPointerType = false) const;
1202
1203 /// Return the uniqued reference to the type for an Objective-C
1204 /// gc-qualified type.
1205 ///
1206 /// The resulting type has a union of the qualifiers from T and the gc
1207 /// attribute.
1208 QualType getObjCGCQualType(QualType T, Qualifiers::GC gcAttr) const;
1209
1210 /// Remove the existing address space on the type if it is a pointer size
1211 /// address space and return the type with qualifiers intact.
1212 QualType removePtrSizeAddrSpace(QualType T) const;
1213
1214 /// Return the uniqued reference to the type for a \c restrict
1215 /// qualified type.
1216 ///
1217 /// The resulting type has a union of the qualifiers from \p T and
1218 /// \c restrict.
1219 QualType getRestrictType(QualType T) const {
1220 return T.withFastQualifiers(Qualifiers::Restrict);
1221 }
1222
1223 /// Return the uniqued reference to the type for a \c volatile
1224 /// qualified type.
1225 ///
1226 /// The resulting type has a union of the qualifiers from \p T and
1227 /// \c volatile.
1228 QualType getVolatileType(QualType T) const {
1229 return T.withFastQualifiers(Qualifiers::Volatile);
1230 }
1231
1232 /// Return the uniqued reference to the type for a \c const
1233 /// qualified type.
1234 ///
1235 /// The resulting type has a union of the qualifiers from \p T and \c const.
1236 ///
1237 /// It can be reasonably expected that this will always be equivalent to
1238 /// calling T.withConst().
1239 QualType getConstType(QualType T) const { return T.withConst(); }
1240
1241 /// Change the ExtInfo on a function type.
1242 const FunctionType *adjustFunctionType(const FunctionType *Fn,
1243 FunctionType::ExtInfo EInfo);
1244
1245 /// Adjust the given function result type.
1246 CanQualType getCanonicalFunctionResultType(QualType ResultType) const;
1247
1248 /// Change the result type of a function type once it is deduced.
1249 void adjustDeducedFunctionResultType(FunctionDecl *FD, QualType ResultType);
1250
1251 /// Get a function type and produce the equivalent function type with the
1252 /// specified exception specification. Type sugar that can be present on a
1253 /// declaration of a function with an exception specification is permitted
1254 /// and preserved. Other type sugar (for instance, typedefs) is not.
1255 QualType getFunctionTypeWithExceptionSpec(
1256 QualType Orig, const FunctionProtoType::ExceptionSpecInfo &ESI);
1257
1258 /// Determine whether two function types are the same, ignoring
1259 /// exception specifications in cases where they're part of the type.
1260 bool hasSameFunctionTypeIgnoringExceptionSpec(QualType T, QualType U);
1261
1262 /// Change the exception specification on a function once it is
1263 /// delay-parsed, instantiated, or computed.
1264 void adjustExceptionSpec(FunctionDecl *FD,
1265 const FunctionProtoType::ExceptionSpecInfo &ESI,
1266 bool AsWritten = false);
1267
1268 /// Get a function type and produce the equivalent function type where
1269 /// pointer size address spaces in the return type and parameter tyeps are
1270 /// replaced with the default address space.
1271 QualType getFunctionTypeWithoutPtrSizes(QualType T);
1272
1273 /// Determine whether two function types are the same, ignoring pointer sizes
1274 /// in the return type and parameter types.
1275 bool hasSameFunctionTypeIgnoringPtrSizes(QualType T, QualType U);
1276
1277 /// Return the uniqued reference to the type for a complex
1278 /// number with the specified element type.
1279 QualType getComplexType(QualType T) const;
1280 CanQualType getComplexType(CanQualType T) const {
1281 return CanQualType::CreateUnsafe(getComplexType((QualType) T));
1282 }
1283
1284 /// Return the uniqued reference to the type for a pointer to
1285 /// the specified type.
1286 QualType getPointerType(QualType T) const;
1287 CanQualType getPointerType(CanQualType T) const {
1288 return CanQualType::CreateUnsafe(getPointerType((QualType) T));
1289 }
1290
1291 /// Return the uniqued reference to a type adjusted from the original
1292 /// type to a new type.
1293 QualType getAdjustedType(QualType Orig, QualType New) const;
1294 CanQualType getAdjustedType(CanQualType Orig, CanQualType New) const {
1295 return CanQualType::CreateUnsafe(
1296 getAdjustedType((QualType)Orig, (QualType)New));
1297 }
1298
1299 /// Return the uniqued reference to the decayed version of the given
1300 /// type. Can only be called on array and function types which decay to
1301 /// pointer types.
1302 QualType getDecayedType(QualType T) const;
1303 CanQualType getDecayedType(CanQualType T) const {
1304 return CanQualType::CreateUnsafe(getDecayedType((QualType) T));
1305 }
1306
1307 /// Return the uniqued reference to the atomic type for the specified
1308 /// type.
1309 QualType getAtomicType(QualType T) const;
1310
1311 /// Return the uniqued reference to the type for a block of the
1312 /// specified type.
1313 QualType getBlockPointerType(QualType T) const;
1314
1315 /// Gets the struct used to keep track of the descriptor for pointer to
1316 /// blocks.
1317 QualType getBlockDescriptorType() const;
1318
1319 /// Return a read_only pipe type for the specified type.
1320 QualType getReadPipeType(QualType T) const;
1321
1322 /// Return a write_only pipe type for the specified type.
1323 QualType getWritePipeType(QualType T) const;
1324
1325 /// Return an extended integer type with the specified signedness and bit
1326 /// count.
1327 QualType getExtIntType(bool Unsigned, unsigned NumBits) const;
1328
1329 /// Return a dependent extended integer type with the specified signedness and
1330 /// bit count.
1331 QualType getDependentExtIntType(bool Unsigned, Expr *BitsExpr) const;
1332
1333 /// Gets the struct used to keep track of the extended descriptor for
1334 /// pointer to blocks.
1335 QualType getBlockDescriptorExtendedType() const;
1336
1337 /// Map an AST Type to an OpenCLTypeKind enum value.
1338 OpenCLTypeKind getOpenCLTypeKind(const Type *T) const;
1339
1340 /// Get address space for OpenCL type.
1341 LangAS getOpenCLTypeAddrSpace(const Type *T) const;
1342
1343 void setcudaConfigureCallDecl(FunctionDecl *FD) {
1344 cudaConfigureCallDecl = FD;
1345 }
1346
1347 FunctionDecl *getcudaConfigureCallDecl() {
1348 return cudaConfigureCallDecl;
1349 }
1350
1351 /// Returns true iff we need copy/dispose helpers for the given type.
1352 bool BlockRequiresCopying(QualType Ty, const VarDecl *D);
1353
1354 /// Returns true, if given type has a known lifetime. HasByrefExtendedLayout
1355 /// is set to false in this case. If HasByrefExtendedLayout returns true,
1356 /// byref variable has extended lifetime.
1357 bool getByrefLifetime(QualType Ty,
1358 Qualifiers::ObjCLifetime &Lifetime,
1359 bool &HasByrefExtendedLayout) const;
1360
1361 /// Return the uniqued reference to the type for an lvalue reference
1362 /// to the specified type.
1363 QualType getLValueReferenceType(QualType T, bool SpelledAsLValue = true)
1364 const;
1365
1366 /// Return the uniqued reference to the type for an rvalue reference
1367 /// to the specified type.
1368 QualType getRValueReferenceType(QualType T) const;
1369
1370 /// Return the uniqued reference to the type for a member pointer to
1371 /// the specified type in the specified class.
1372 ///
1373 /// The class \p Cls is a \c Type because it could be a dependent name.
1374 QualType getMemberPointerType(QualType T, const Type *Cls) const;
1375
1376 /// Return a non-unique reference to the type for a variable array of
1377 /// the specified element type.
1378 QualType getVariableArrayType(QualType EltTy, Expr *NumElts,
1379 ArrayType::ArraySizeModifier ASM,
1380 unsigned IndexTypeQuals,
1381 SourceRange Brackets) const;
1382
1383 /// Return a non-unique reference to the type for a dependently-sized
1384 /// array of the specified element type.
1385 ///
1386 /// FIXME: We will need these to be uniqued, or at least comparable, at some
1387 /// point.
1388 QualType getDependentSizedArrayType(QualType EltTy, Expr *NumElts,
1389 ArrayType::ArraySizeModifier ASM,
1390 unsigned IndexTypeQuals,
1391 SourceRange Brackets) const;
1392
1393 /// Return a unique reference to the type for an incomplete array of
1394 /// the specified element type.
1395 QualType getIncompleteArrayType(QualType EltTy,
1396 ArrayType::ArraySizeModifier ASM,
1397 unsigned IndexTypeQuals) const;
1398
1399 /// Return the unique reference to the type for a constant array of
1400 /// the specified element type.
1401 QualType getConstantArrayType(QualType EltTy, const llvm::APInt &ArySize,
1402 const Expr *SizeExpr,
1403 ArrayType::ArraySizeModifier ASM,
1404 unsigned IndexTypeQuals) const;
1405
1406 /// Return a type for a constant array for a string literal of the
1407 /// specified element type and length.
1408 QualType getStringLiteralArrayType(QualType EltTy, unsigned Length) const;
1409
1410 /// Returns a vla type where known sizes are replaced with [*].
1411 QualType getVariableArrayDecayedType(QualType Ty) const;
1412
1413 // Convenience struct to return information about a builtin vector type.
1414 struct BuiltinVectorTypeInfo {
1415 QualType ElementType;
1416 llvm::ElementCount EC;
1417 unsigned NumVectors;
1418 BuiltinVectorTypeInfo(QualType ElementType, llvm::ElementCount EC,
1419 unsigned NumVectors)
1420 : ElementType(ElementType), EC(EC), NumVectors(NumVectors) {}
1421 };
1422
1423 /// Returns the element type, element count and number of vectors
1424 /// (in case of tuple) for a builtin vector type.
1425 BuiltinVectorTypeInfo
1426 getBuiltinVectorTypeInfo(const BuiltinType *VecTy) const;
1427
1428 /// Return the unique reference to a scalable vector type of the specified
1429 /// element type and scalable number of elements.
1430 ///
1431 /// \pre \p EltTy must be a built-in type.
1432 QualType getScalableVectorType(QualType EltTy, unsigned NumElts) const;
1433
1434 /// Return the unique reference to a vector type of the specified
1435 /// element type and size.
1436 ///
1437 /// \pre \p VectorType must be a built-in type.
1438 QualType getVectorType(QualType VectorType, unsigned NumElts,
1439 VectorType::VectorKind VecKind) const;
1440 /// Return the unique reference to the type for a dependently sized vector of
1441 /// the specified element type.
1442 QualType getDependentVectorType(QualType VectorType, Expr *SizeExpr,
1443 SourceLocation AttrLoc,
1444 VectorType::VectorKind VecKind) const;
1445
1446 /// Return the unique reference to an extended vector type
1447 /// of the specified element type and size.
1448 ///
1449 /// \pre \p VectorType must be a built-in type.
1450 QualType getExtVectorType(QualType VectorType, unsigned NumElts) const;
1451
1452 /// \pre Return a non-unique reference to the type for a dependently-sized
1453 /// vector of the specified element type.
1454 ///
1455 /// FIXME: We will need these to be uniqued, or at least comparable, at some
1456 /// point.
1457 QualType getDependentSizedExtVectorType(QualType VectorType,
1458 Expr *SizeExpr,
1459 SourceLocation AttrLoc) const;
1460
1461 /// Return the unique reference to the matrix type of the specified element
1462 /// type and size
1463 ///
1464 /// \pre \p ElementType must be a valid matrix element type (see
1465 /// MatrixType::isValidElementType).
1466 QualType getConstantMatrixType(QualType ElementType, unsigned NumRows,
1467 unsigned NumColumns) const;
1468
1469 /// Return the unique reference to the matrix type of the specified element
1470 /// type and size
1471 QualType getDependentSizedMatrixType(QualType ElementType, Expr *RowExpr,
1472 Expr *ColumnExpr,
1473 SourceLocation AttrLoc) const;
1474
1475 QualType getDependentAddressSpaceType(QualType PointeeType,
1476 Expr *AddrSpaceExpr,
1477 SourceLocation AttrLoc) const;
1478
1479 /// Return a K&R style C function type like 'int()'.
1480 QualType getFunctionNoProtoType(QualType ResultTy,
1481 const FunctionType::ExtInfo &Info) const;
1482
1483 QualType getFunctionNoProtoType(QualType ResultTy) const {
1484 return getFunctionNoProtoType(ResultTy, FunctionType::ExtInfo());
1485 }
1486
1487 /// Return a normal function type with a typed argument list.
1488 QualType getFunctionType(QualType ResultTy, ArrayRef<QualType> Args,
1489 const FunctionProtoType::ExtProtoInfo &EPI) const {
1490 return getFunctionTypeInternal(ResultTy, Args, EPI, false);
1491 }
1492
1493 QualType adjustStringLiteralBaseType(QualType StrLTy) const;
1494
1495private:
1496 /// Return a normal function type with a typed argument list.
1497 QualType getFunctionTypeInternal(QualType ResultTy, ArrayRef<QualType> Args,
1498 const FunctionProtoType::ExtProtoInfo &EPI,
1499 bool OnlyWantCanonical) const;
1500
1501public:
1502 /// Return the unique reference to the type for the specified type
1503 /// declaration.
1504 QualType getTypeDeclType(const TypeDecl *Decl,
1505 const TypeDecl *PrevDecl = nullptr) const {
1506 assert(Decl && "Passed null for Decl param")((void)0);
1507 if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
1508
1509 if (PrevDecl) {
1510 assert(PrevDecl->TypeForDecl && "previous decl has no TypeForDecl")((void)0);
1511 Decl->TypeForDecl = PrevDecl->TypeForDecl;
1512 return QualType(PrevDecl->TypeForDecl, 0);
1513 }
1514
1515 return getTypeDeclTypeSlow(Decl);
1516 }
1517
1518 /// Return the unique reference to the type for the specified
1519 /// typedef-name decl.
1520 QualType getTypedefType(const TypedefNameDecl *Decl,
1521 QualType Underlying = QualType()) const;
1522
1523 QualType getRecordType(const RecordDecl *Decl) const;
1524
1525 QualType getEnumType(const EnumDecl *Decl) const;
1526
1527 QualType getInjectedClassNameType(CXXRecordDecl *Decl, QualType TST) const;
1528
1529 QualType getAttributedType(attr::Kind attrKind,
1530 QualType modifiedType,
1531 QualType equivalentType);
1532
1533 QualType getSubstTemplateTypeParmType(const TemplateTypeParmType *Replaced,
1534 QualType Replacement) const;
1535 QualType getSubstTemplateTypeParmPackType(
1536 const TemplateTypeParmType *Replaced,
1537 const TemplateArgument &ArgPack);
1538
1539 QualType
1540 getTemplateTypeParmType(unsigned Depth, unsigned Index,
1541 bool ParameterPack,
1542 TemplateTypeParmDecl *ParmDecl = nullptr) const;
1543
1544 QualType getTemplateSpecializationType(TemplateName T,
1545 ArrayRef<TemplateArgument> Args,
1546 QualType Canon = QualType()) const;
1547
1548 QualType
1549 getCanonicalTemplateSpecializationType(TemplateName T,
1550 ArrayRef<TemplateArgument> Args) const;
1551
1552 QualType getTemplateSpecializationType(TemplateName T,
1553 const TemplateArgumentListInfo &Args,
1554 QualType Canon = QualType()) const;
1555
1556 TypeSourceInfo *
1557 getTemplateSpecializationTypeInfo(TemplateName T, SourceLocation TLoc,
1558 const TemplateArgumentListInfo &Args,
1559 QualType Canon = QualType()) const;
1560
1561 QualType getParenType(QualType NamedType) const;
1562
1563 QualType getMacroQualifiedType(QualType UnderlyingTy,
1564 const IdentifierInfo *MacroII) const;
1565
1566 QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
1567 NestedNameSpecifier *NNS, QualType NamedType,
1568 TagDecl *OwnedTagDecl = nullptr) const;
1569 QualType getDependentNameType(ElaboratedTypeKeyword Keyword,
1570 NestedNameSpecifier *NNS,
1571 const IdentifierInfo *Name,
1572 QualType Canon = QualType()) const;
1573
1574 QualType getDependentTemplateSpecializationType(ElaboratedTypeKeyword Keyword,
1575 NestedNameSpecifier *NNS,
1576 const IdentifierInfo *Name,
1577 const TemplateArgumentListInfo &Args) const;
1578 QualType getDependentTemplateSpecializationType(
1579 ElaboratedTypeKeyword Keyword, NestedNameSpecifier *NNS,
1580 const IdentifierInfo *Name, ArrayRef<TemplateArgument> Args) const;
1581
1582 TemplateArgument getInjectedTemplateArg(NamedDecl *ParamDecl);
1583
1584 /// Get a template argument list with one argument per template parameter
1585 /// in a template parameter list, such as for the injected class name of
1586 /// a class template.
1587 void getInjectedTemplateArgs(const TemplateParameterList *Params,
1588 SmallVectorImpl<TemplateArgument> &Args);
1589
1590 /// Form a pack expansion type with the given pattern.
1591 /// \param NumExpansions The number of expansions for the pack, if known.
1592 /// \param ExpectPackInType If \c false, we should not expect \p Pattern to
1593 /// contain an unexpanded pack. This only makes sense if the pack
1594 /// expansion is used in a context where the arity is inferred from
1595 /// elsewhere, such as if the pattern contains a placeholder type or
1596 /// if this is the canonical type of another pack expansion type.
1597 QualType getPackExpansionType(QualType Pattern,
1598 Optional<unsigned> NumExpansions,
1599 bool ExpectPackInType = true);
1600
1601 QualType getObjCInterfaceType(const ObjCInterfaceDecl *Decl,
1602 ObjCInterfaceDecl *PrevDecl = nullptr) const;
1603
1604 /// Legacy interface: cannot provide type arguments or __kindof.
1605 QualType getObjCObjectType(QualType Base,
1606 ObjCProtocolDecl * const *Protocols,
1607 unsigned NumProtocols) const;
1608
1609 QualType getObjCObjectType(QualType Base,
1610 ArrayRef<QualType> typeArgs,
1611 ArrayRef<ObjCProtocolDecl *> protocols,
1612 bool isKindOf) const;
1613
1614 QualType getObjCTypeParamType(const ObjCTypeParamDecl *Decl,
1615 ArrayRef<ObjCProtocolDecl *> protocols) const;
1616 void adjustObjCTypeParamBoundType(const ObjCTypeParamDecl *Orig,
1617 ObjCTypeParamDecl *New) const;
1618
1619 bool ObjCObjectAdoptsQTypeProtocols(QualType QT, ObjCInterfaceDecl *Decl);
1620
1621 /// QIdProtocolsAdoptObjCObjectProtocols - Checks that protocols in
1622 /// QT's qualified-id protocol list adopt all protocols in IDecl's list
1623 /// of protocols.
1624 bool QIdProtocolsAdoptObjCObjectProtocols(QualType QT,
1625 ObjCInterfaceDecl *IDecl);
1626
1627 /// Return a ObjCObjectPointerType type for the given ObjCObjectType.
1628 QualType getObjCObjectPointerType(QualType OIT) const;
1629
1630 /// GCC extension.
1631 QualType getTypeOfExprType(Expr *e) const;
1632 QualType getTypeOfType(QualType t) const;
1633
1634 /// C++11 decltype.
1635 QualType getDecltypeType(Expr *e, QualType UnderlyingType) const;
1636
1637 /// Unary type transforms
1638 QualType getUnaryTransformType(QualType BaseType, QualType UnderlyingType,
1639 UnaryTransformType::UTTKind UKind) const;
1640
1641 /// C++11 deduced auto type.
1642 QualType getAutoType(QualType DeducedType, AutoTypeKeyword Keyword,
1643 bool IsDependent, bool IsPack = false,
1644 ConceptDecl *TypeConstraintConcept = nullptr,
1645 ArrayRef<TemplateArgument> TypeConstraintArgs ={}) const;
1646
1647 /// C++11 deduction pattern for 'auto' type.
1648 QualType getAutoDeductType() const;
1649
1650 /// C++11 deduction pattern for 'auto &&' type.
1651 QualType getAutoRRefDeductType() const;
1652
1653 /// C++17 deduced class template specialization type.
1654 QualType getDeducedTemplateSpecializationType(TemplateName Template,
1655 QualType DeducedType,
1656 bool IsDependent) const;
1657
1658 /// Return the unique reference to the type for the specified TagDecl
1659 /// (struct/union/class/enum) decl.
1660 QualType getTagDeclType(const TagDecl *Decl) const;
1661
1662 /// Return the unique type for "size_t" (C99 7.17), defined in
1663 /// <stddef.h>.
1664 ///
1665 /// The sizeof operator requires this (C99 6.5.3.4p4).
1666 CanQualType getSizeType() const;
1667
1668 /// Return the unique signed counterpart of
1669 /// the integer type corresponding to size_t.
1670 CanQualType getSignedSizeType() const;
1671
1672 /// Return the unique type for "intmax_t" (C99 7.18.1.5), defined in
1673 /// <stdint.h>.
1674 CanQualType getIntMaxType() const;
1675
1676 /// Return the unique type for "uintmax_t" (C99 7.18.1.5), defined in
1677 /// <stdint.h>.
1678 CanQualType getUIntMaxType() const;
1679
1680 /// Return the unique wchar_t type available in C++ (and available as
1681 /// __wchar_t as a Microsoft extension).
1682 QualType getWCharType() const { return WCharTy; }
1683
1684 /// Return the type of wide characters. In C++, this returns the
1685 /// unique wchar_t type. In C99, this returns a type compatible with the type
1686 /// defined in <stddef.h> as defined by the target.
1687 QualType getWideCharType() const { return WideCharTy; }
1688
1689 /// Return the type of "signed wchar_t".
1690 ///
1691 /// Used when in C++, as a GCC extension.
1692 QualType getSignedWCharType() const;
1693
1694 /// Return the type of "unsigned wchar_t".
1695 ///
1696 /// Used when in C++, as a GCC extension.
1697 QualType getUnsignedWCharType() const;
1698
1699 /// In C99, this returns a type compatible with the type
1700 /// defined in <stddef.h> as defined by the target.
1701 QualType getWIntType() const { return WIntTy; }
1702
1703 /// Return a type compatible with "intptr_t" (C99 7.18.1.4),
1704 /// as defined by the target.
1705 QualType getIntPtrType() const;
1706
1707 /// Return a type compatible with "uintptr_t" (C99 7.18.1.4),
1708 /// as defined by the target.
1709 QualType getUIntPtrType() const;
1710
1711 /// Return the unique type for "ptrdiff_t" (C99 7.17) defined in
1712 /// <stddef.h>. Pointer - pointer requires this (C99 6.5.6p9).
1713 QualType getPointerDiffType() const;
1714
1715 /// Return the unique unsigned counterpart of "ptrdiff_t"
1716 /// integer type. The standard (C11 7.21.6.1p7) refers to this type
1717 /// in the definition of %tu format specifier.
1718 QualType getUnsignedPointerDiffType() const;
1719
1720 /// Return the unique type for "pid_t" defined in
1721 /// <sys/types.h>. We need this to compute the correct type for vfork().
1722 QualType getProcessIDType() const;
1723
1724 /// Return the C structure type used to represent constant CFStrings.
1725 QualType getCFConstantStringType() const;
1726
1727 /// Returns the C struct type for objc_super
1728 QualType getObjCSuperType() const;
1729 void setObjCSuperType(QualType ST) { ObjCSuperType = ST; }
1730
1731 /// Get the structure type used to representation CFStrings, or NULL
1732 /// if it hasn't yet been built.
1733 QualType getRawCFConstantStringType() const {
1734 if (CFConstantStringTypeDecl)
1735 return getTypedefType(CFConstantStringTypeDecl);
1736 return QualType();
1737 }
1738 void setCFConstantStringType(QualType T);
1739 TypedefDecl *getCFConstantStringDecl() const;
1740 RecordDecl *getCFConstantStringTagDecl() const;
1741
1742 // This setter/getter represents the ObjC type for an NSConstantString.
1743 void setObjCConstantStringInterface(ObjCInterfaceDecl *Decl);
1744 QualType getObjCConstantStringInterface() const {
1745 return ObjCConstantStringType;
1746 }
1747
1748 QualType getObjCNSStringType() const {
1749 return ObjCNSStringType;
1750 }
1751
1752 void setObjCNSStringType(QualType T) {
1753 ObjCNSStringType = T;
1754 }
1755
1756 /// Retrieve the type that \c id has been defined to, which may be
1757 /// different from the built-in \c id if \c id has been typedef'd.
1758 QualType getObjCIdRedefinitionType() const {
1759 if (ObjCIdRedefinitionType.isNull())
1760 return getObjCIdType();
1761 return ObjCIdRedefinitionType;
1762 }
1763
1764 /// Set the user-written type that redefines \c id.
1765 void setObjCIdRedefinitionType(QualType RedefType) {
1766 ObjCIdRedefinitionType = RedefType;
1767 }
1768
1769 /// Retrieve the type that \c Class has been defined to, which may be
1770 /// different from the built-in \c Class if \c Class has been typedef'd.
1771 QualType getObjCClassRedefinitionType() const {
1772 if (ObjCClassRedefinitionType.isNull())
1773 return getObjCClassType();
1774 return ObjCClassRedefinitionType;
1775 }
1776
1777 /// Set the user-written type that redefines 'SEL'.
1778 void setObjCClassRedefinitionType(QualType RedefType) {
1779 ObjCClassRedefinitionType = RedefType;
1780 }
1781
1782 /// Retrieve the type that 'SEL' has been defined to, which may be
1783 /// different from the built-in 'SEL' if 'SEL' has been typedef'd.
1784 QualType getObjCSelRedefinitionType() const {
1785 if (ObjCSelRedefinitionType.isNull())
1786 return getObjCSelType();
1787 return ObjCSelRedefinitionType;
1788 }
1789
1790 /// Set the user-written type that redefines 'SEL'.
1791 void setObjCSelRedefinitionType(QualType RedefType) {
1792 ObjCSelRedefinitionType = RedefType;
1793 }
1794
1795 /// Retrieve the identifier 'NSObject'.
1796 IdentifierInfo *getNSObjectName() const {
1797 if (!NSObjectName) {
1798 NSObjectName = &Idents.get("NSObject");
1799 }
1800
1801 return NSObjectName;
1802 }
1803
1804 /// Retrieve the identifier 'NSCopying'.
1805 IdentifierInfo *getNSCopyingName() {
1806 if (!NSCopyingName) {
1807 NSCopyingName = &Idents.get("NSCopying");
1808 }
1809
1810 return NSCopyingName;
1811 }
1812
1813 CanQualType getNSUIntegerType() const;
1814
1815 CanQualType getNSIntegerType() const;
1816
1817 /// Retrieve the identifier 'bool'.
1818 IdentifierInfo *getBoolName() const {
1819 if (!BoolName)
1820 BoolName = &Idents.get("bool");
1821 return BoolName;
1822 }
1823
1824 IdentifierInfo *getMakeIntegerSeqName() const {
1825 if (!MakeIntegerSeqName)
1826 MakeIntegerSeqName = &Idents.get("__make_integer_seq");
1827 return MakeIntegerSeqName;
1828 }
1829
1830 IdentifierInfo *getTypePackElementName() const {
1831 if (!TypePackElementName)
1832 TypePackElementName = &Idents.get("__type_pack_element");
1833 return TypePackElementName;
1834 }
1835
1836 /// Retrieve the Objective-C "instancetype" type, if already known;
1837 /// otherwise, returns a NULL type;
1838 QualType getObjCInstanceType() {
1839 return getTypeDeclType(getObjCInstanceTypeDecl());
1840 }
1841
1842 /// Retrieve the typedef declaration corresponding to the Objective-C
1843 /// "instancetype" type.
1844 TypedefDecl *getObjCInstanceTypeDecl();
1845
1846 /// Set the type for the C FILE type.
1847 void setFILEDecl(TypeDecl *FILEDecl) { this->FILEDecl = FILEDecl; }
1848
1849 /// Retrieve the C FILE type.
1850 QualType getFILEType() const {
1851 if (FILEDecl)
1852 return getTypeDeclType(FILEDecl);
1853 return QualType();
1854 }
1855
1856 /// Set the type for the C jmp_buf type.
1857 void setjmp_bufDecl(TypeDecl *jmp_bufDecl) {
1858 this->jmp_bufDecl = jmp_bufDecl;
1859 }
1860
1861 /// Retrieve the C jmp_buf type.
1862 QualType getjmp_bufType() const {
1863 if (jmp_bufDecl)
1864 return getTypeDeclType(jmp_bufDecl);
1865 return QualType();
1866 }
1867
1868 /// Set the type for the C sigjmp_buf type.
1869 void setsigjmp_bufDecl(TypeDecl *sigjmp_bufDecl) {
1870 this->sigjmp_bufDecl = sigjmp_bufDecl;
1871 }
1872
1873 /// Retrieve the C sigjmp_buf type.
1874 QualType getsigjmp_bufType() const {
1875 if (sigjmp_bufDecl)
1876 return getTypeDeclType(sigjmp_bufDecl);
1877 return QualType();
1878 }
1879
1880 /// Set the type for the C ucontext_t type.
1881 void setucontext_tDecl(TypeDecl *ucontext_tDecl) {
1882 this->ucontext_tDecl = ucontext_tDecl;
1883 }
1884
1885 /// Retrieve the C ucontext_t type.
1886 QualType getucontext_tType() const {
1887 if (ucontext_tDecl)
1888 return getTypeDeclType(ucontext_tDecl);
1889 return QualType();
1890 }
1891
1892 /// The result type of logical operations, '<', '>', '!=', etc.
1893 QualType getLogicalOperationType() const {
1894 return getLangOpts().CPlusPlus ? BoolTy : IntTy;
1895 }
1896
1897 /// Emit the Objective-CC type encoding for the given type \p T into
1898 /// \p S.
1899 ///
1900 /// If \p Field is specified then record field names are also encoded.
1901 void getObjCEncodingForType(QualType T, std::string &S,
1902 const FieldDecl *Field=nullptr,
1903 QualType *NotEncodedT=nullptr) const;
1904
1905 /// Emit the Objective-C property type encoding for the given
1906 /// type \p T into \p S.
1907 void getObjCEncodingForPropertyType(QualType T, std::string &S) const;
1908
1909 void getLegacyIntegralTypeEncoding(QualType &t) const;
1910
1911 /// Put the string version of the type qualifiers \p QT into \p S.
1912 void getObjCEncodingForTypeQualifier(Decl::ObjCDeclQualifier QT,
1913 std::string &S) const;
1914
1915 /// Emit the encoded type for the function \p Decl into \p S.
1916 ///
1917 /// This is in the same format as Objective-C method encodings.
1918 ///
1919 /// \returns true if an error occurred (e.g., because one of the parameter
1920 /// types is incomplete), false otherwise.
1921 std::string getObjCEncodingForFunctionDecl(const FunctionDecl *Decl) const;
1922
1923 /// Emit the encoded type for the method declaration \p Decl into
1924 /// \p S.
1925 std::string getObjCEncodingForMethodDecl(const ObjCMethodDecl *Decl,
1926 bool Extended = false) const;
1927
1928 /// Return the encoded type for this block declaration.
1929 std::string getObjCEncodingForBlock(const BlockExpr *blockExpr) const;
1930
1931 /// getObjCEncodingForPropertyDecl - Return the encoded type for
1932 /// this method declaration. If non-NULL, Container must be either
1933 /// an ObjCCategoryImplDecl or ObjCImplementationDecl; it should
1934 /// only be NULL when getting encodings for protocol properties.
1935 std::string getObjCEncodingForPropertyDecl(const ObjCPropertyDecl *PD,
1936 const Decl *Container) const;
1937
1938 bool ProtocolCompatibleWithProtocol(ObjCProtocolDecl *lProto,
1939 ObjCProtocolDecl *rProto) const;
1940
1941 ObjCPropertyImplDecl *getObjCPropertyImplDeclForPropertyDecl(
1942 const ObjCPropertyDecl *PD,
1943 const Decl *Container) const;
1944
1945 /// Return the size of type \p T for Objective-C encoding purpose,
1946 /// in characters.
1947 CharUnits getObjCEncodingTypeSize(QualType T) const;
1948
1949 /// Retrieve the typedef corresponding to the predefined \c id type
1950 /// in Objective-C.
1951 TypedefDecl *getObjCIdDecl() const;
1952
1953 /// Represents the Objective-CC \c id type.
1954 ///
1955 /// This is set up lazily, by Sema. \c id is always a (typedef for a)
1956 /// pointer type, a pointer to a struct.
1957 QualType getObjCIdType() const {
1958 return getTypeDeclType(getObjCIdDecl());
1959 }
1960
1961 /// Retrieve the typedef corresponding to the predefined 'SEL' type
1962 /// in Objective-C.
1963 TypedefDecl *getObjCSelDecl() const;
1964
1965 /// Retrieve the type that corresponds to the predefined Objective-C
1966 /// 'SEL' type.
1967 QualType getObjCSelType() const {
1968 return getTypeDeclType(getObjCSelDecl());
1969 }
1970
1971 /// Retrieve the typedef declaration corresponding to the predefined
1972 /// Objective-C 'Class' type.
1973 TypedefDecl *getObjCClassDecl() const;
1974
1975 /// Represents the Objective-C \c Class type.
1976 ///
1977 /// This is set up lazily, by Sema. \c Class is always a (typedef for a)
1978 /// pointer type, a pointer to a struct.
1979 QualType getObjCClassType() const {
1980 return getTypeDeclType(getObjCClassDecl());
1981 }
1982
1983 /// Retrieve the Objective-C class declaration corresponding to
1984 /// the predefined \c Protocol class.
1985 ObjCInterfaceDecl *getObjCProtocolDecl() const;
1986
1987 /// Retrieve declaration of 'BOOL' typedef
1988 TypedefDecl *getBOOLDecl() const {
1989 return BOOLDecl;
1990 }
1991
1992 /// Save declaration of 'BOOL' typedef
1993 void setBOOLDecl(TypedefDecl *TD) {
1994 BOOLDecl = TD;
1995 }
1996
1997 /// type of 'BOOL' type.
1998 QualType getBOOLType() const {
1999 return getTypeDeclType(getBOOLDecl());
2000 }
2001
2002 /// Retrieve the type of the Objective-C \c Protocol class.
2003 QualType getObjCProtoType() const {
2004 return getObjCInterfaceType(getObjCProtocolDecl());
2005 }
2006
2007 /// Retrieve the C type declaration corresponding to the predefined
2008 /// \c __builtin_va_list type.
2009 TypedefDecl *getBuiltinVaListDecl() const;
2010
2011 /// Retrieve the type of the \c __builtin_va_list type.
2012 QualType getBuiltinVaListType() const {
2013 return getTypeDeclType(getBuiltinVaListDecl());
2014 }
2015
2016 /// Retrieve the C type declaration corresponding to the predefined
2017 /// \c __va_list_tag type used to help define the \c __builtin_va_list type
2018 /// for some targets.
2019 Decl *getVaListTagDecl() const;
2020
2021 /// Retrieve the C type declaration corresponding to the predefined
2022 /// \c __builtin_ms_va_list type.
2023 TypedefDecl *getBuiltinMSVaListDecl() const;
2024
2025 /// Retrieve the type of the \c __builtin_ms_va_list type.
2026 QualType getBuiltinMSVaListType() const {
2027 return getTypeDeclType(getBuiltinMSVaListDecl());
2028 }
2029
2030 /// Retrieve the implicitly-predeclared 'struct _GUID' declaration.
2031 TagDecl *getMSGuidTagDecl() const { return MSGuidTagDecl; }
2032
2033 /// Retrieve the implicitly-predeclared 'struct _GUID' type.
2034 QualType getMSGuidType() const {
2035 assert(MSGuidTagDecl && "asked for GUID type but MS extensions disabled")((void)0);
2036 return getTagDeclType(MSGuidTagDecl);
2037 }
2038
2039 /// Return whether a declaration to a builtin is allowed to be
2040 /// overloaded/redeclared.
2041 bool canBuiltinBeRedeclared(const FunctionDecl *) const;
2042
2043 /// Return a type with additional \c const, \c volatile, or
2044 /// \c restrict qualifiers.
2045 QualType getCVRQualifiedType(QualType T, unsigned CVR) const {
2046 return getQualifiedType(T, Qualifiers::fromCVRMask(CVR));
2047 }
2048
2049 /// Un-split a SplitQualType.
2050 QualType getQualifiedType(SplitQualType split) const {
2051 return getQualifiedType(split.Ty, split.Quals);
2052 }
2053
2054 /// Return a type with additional qualifiers.
2055 QualType getQualifiedType(QualType T, Qualifiers Qs) const {
2056 if (!Qs.hasNonFastQualifiers())
2057 return T.withFastQualifiers(Qs.getFastQualifiers());
2058 QualifierCollector Qc(Qs);
2059 const Type *Ptr = Qc.strip(T);
2060 return getExtQualType(Ptr, Qc);
2061 }
2062
2063 /// Return a type with additional qualifiers.
2064 QualType getQualifiedType(const Type *T, Qualifiers Qs) const {
2065 if (!Qs.hasNonFastQualifiers())
2066 return QualType(T, Qs.getFastQualifiers());
2067 return getExtQualType(T, Qs);
2068 }
2069
2070 /// Return a type with the given lifetime qualifier.
2071 ///
2072 /// \pre Neither type.ObjCLifetime() nor \p lifetime may be \c OCL_None.
2073 QualType getLifetimeQualifiedType(QualType type,
2074 Qualifiers::ObjCLifetime lifetime) {
2075 assert(type.getObjCLifetime() == Qualifiers::OCL_None)((void)0);
2076 assert(lifetime != Qualifiers::OCL_None)((void)0);
2077
2078 Qualifiers qs;
2079 qs.addObjCLifetime(lifetime);
2080 return getQualifiedType(type, qs);
2081 }
2082
2083 /// getUnqualifiedObjCPointerType - Returns version of
2084 /// Objective-C pointer type with lifetime qualifier removed.
2085 QualType getUnqualifiedObjCPointerType(QualType type) const {
2086 if (!type.getTypePtr()->isObjCObjectPointerType() ||
2087 !type.getQualifiers().hasObjCLifetime())
2088 return type;
2089 Qualifiers Qs = type.getQualifiers();
2090 Qs.removeObjCLifetime();
2091 return getQualifiedType(type.getUnqualifiedType(), Qs);
2092 }
2093
2094 unsigned char getFixedPointScale(QualType Ty) const;
2095 unsigned char getFixedPointIBits(QualType Ty) const;
2096 llvm::FixedPointSemantics getFixedPointSemantics(QualType Ty) const;
2097 llvm::APFixedPoint getFixedPointMax(QualType Ty) const;
2098 llvm::APFixedPoint getFixedPointMin(QualType Ty) const;
2099
2100 DeclarationNameInfo getNameForTemplate(TemplateName Name,
2101 SourceLocation NameLoc) const;
2102
2103 TemplateName getOverloadedTemplateName(UnresolvedSetIterator Begin,
2104 UnresolvedSetIterator End) const;
2105 TemplateName getAssumedTemplateName(DeclarationName Name) const;
2106
2107 TemplateName getQualifiedTemplateName(NestedNameSpecifier *NNS,
2108 bool TemplateKeyword,
2109 TemplateDecl *Template) const;
2110
2111 TemplateName getDependentTemplateName(NestedNameSpecifier *NNS,
2112 const IdentifierInfo *Name) const;
2113 TemplateName getDependentTemplateName(NestedNameSpecifier *NNS,
2114 OverloadedOperatorKind Operator) const;
2115 TemplateName getSubstTemplateTemplateParm(TemplateTemplateParmDecl *param,
2116 TemplateName replacement) const;
2117 TemplateName getSubstTemplateTemplateParmPack(TemplateTemplateParmDecl *Param,
2118 const TemplateArgument &ArgPack) const;
2119
2120 enum GetBuiltinTypeError {
2121 /// No error
2122 GE_None,
2123
2124 /// Missing a type
2125 GE_Missing_type,
2126
2127 /// Missing a type from <stdio.h>
2128 GE_Missing_stdio,
2129
2130 /// Missing a type from <setjmp.h>
2131 GE_Missing_setjmp,
2132
2133 /// Missing a type from <ucontext.h>
2134 GE_Missing_ucontext
2135 };
2136
2137 QualType DecodeTypeStr(const char *&Str, const ASTContext &Context,
2138 ASTContext::GetBuiltinTypeError &Error,
2139 bool &RequireICE, bool AllowTypeModifiers) const;
2140
2141 /// Return the type for the specified builtin.
2142 ///
2143 /// If \p IntegerConstantArgs is non-null, it is filled in with a bitmask of
2144 /// arguments to the builtin that are required to be integer constant
2145 /// expressions.
2146 QualType GetBuiltinType(unsigned ID, GetBuiltinTypeError &Error,
2147 unsigned *IntegerConstantArgs = nullptr) const;
2148
2149 /// Types and expressions required to build C++2a three-way comparisons
2150 /// using operator<=>, including the values return by builtin <=> operators.
2151 ComparisonCategories CompCategories;
2152
2153private:
2154 CanQualType getFromTargetType(unsigned Type) const;
2155 TypeInfo getTypeInfoImpl(const Type *T) const;
2156
2157 //===--------------------------------------------------------------------===//
2158 // Type Predicates.
2159 //===--------------------------------------------------------------------===//
2160
2161public:
2162 /// Return one of the GCNone, Weak or Strong Objective-C garbage
2163 /// collection attributes.
2164 Qualifiers::GC getObjCGCAttrKind(QualType Ty) const;
2165
2166 /// Return true if the given vector types are of the same unqualified
2167 /// type or if they are equivalent to the same GCC vector type.
2168 ///
2169 /// \note This ignores whether they are target-specific (AltiVec or Neon)
2170 /// types.
2171 bool areCompatibleVectorTypes(QualType FirstVec, QualType SecondVec);
2172
2173 /// Return true if the given types are an SVE builtin and a VectorType that
2174 /// is a fixed-length representation of the SVE builtin for a specific
2175 /// vector-length.
2176 bool areCompatibleSveTypes(QualType FirstType, QualType SecondType);
2177
2178 /// Return true if the given vector types are lax-compatible SVE vector types,
2179 /// false otherwise.
2180 bool areLaxCompatibleSveTypes(QualType FirstType, QualType SecondType);
2181
2182 /// Return true if the type has been explicitly qualified with ObjC ownership.
2183 /// A type may be implicitly qualified with ownership under ObjC ARC, and in
2184 /// some cases the compiler treats these differently.
2185 bool hasDirectOwnershipQualifier(QualType Ty) const;
2186
2187 /// Return true if this is an \c NSObject object with its \c NSObject
2188 /// attribute set.
2189 static bool isObjCNSObjectType(QualType Ty) {
2190 return Ty->isObjCNSObjectType();
2191 }
2192
2193 //===--------------------------------------------------------------------===//
2194 // Type Sizing and Analysis
2195 //===--------------------------------------------------------------------===//
2196
2197 /// Return the APFloat 'semantics' for the specified scalar floating
2198 /// point type.
2199 const llvm::fltSemantics &getFloatTypeSemantics(QualType T) const;
2200
2201 /// Get the size and alignment of the specified complete type in bits.
2202 TypeInfo getTypeInfo(const Type *T) const;
2203 TypeInfo getTypeInfo(QualType T) const { return getTypeInfo(T.getTypePtr()); }
2204
2205 /// Get default simd alignment of the specified complete type in bits.
2206 unsigned getOpenMPDefaultSimdAlign(QualType T) const;
2207
2208 /// Return the size of the specified (complete) type \p T, in bits.
2209 uint64_t getTypeSize(QualType T) const { return getTypeInfo(T).Width; }
2210 uint64_t getTypeSize(const Type *T) const { return getTypeInfo(T).Width; }
2211
2212 /// Return the size of the character type, in bits.
2213 uint64_t getCharWidth() const {
2214 return getTypeSize(CharTy);
2215 }
2216
2217 /// Convert a size in bits to a size in characters.
2218 CharUnits toCharUnitsFromBits(int64_t BitSize) const;
2219
2220 /// Convert a size in characters to a size in bits.
2221 int64_t toBits(CharUnits CharSize) const;
2222
2223 /// Return the size of the specified (complete) type \p T, in
2224 /// characters.
2225 CharUnits getTypeSizeInChars(QualType T) const;
2226 CharUnits getTypeSizeInChars(const Type *T) const;
2227
2228 Optional<CharUnits> getTypeSizeInCharsIfKnown(QualType Ty) const {
2229 if (Ty->isIncompleteType() || Ty->isDependentType())
2230 return None;
2231 return getTypeSizeInChars(Ty);
2232 }
2233
2234 Optional<CharUnits> getTypeSizeInCharsIfKnown(const Type *Ty) const {
2235 return getTypeSizeInCharsIfKnown(QualType(Ty, 0));
2236 }
2237
2238 /// Return the ABI-specified alignment of a (complete) type \p T, in
2239 /// bits.
2240 unsigned getTypeAlign(QualType T) const { return getTypeInfo(T).Align; }
2241 unsigned getTypeAlign(const Type *T) const { return getTypeInfo(T).Align; }
2242
2243 /// Return the ABI-specified natural alignment of a (complete) type \p T,
2244 /// before alignment adjustments, in bits.
2245 ///
2246 /// This alignment is curently used only by ARM and AArch64 when passing
2247 /// arguments of a composite type.
2248 unsigned getTypeUnadjustedAlign(QualType T) const {
2249 return getTypeUnadjustedAlign(T.getTypePtr());
2250 }
2251 unsigned getTypeUnadjustedAlign(const Type *T) const;
2252
2253 /// Return the alignment of a type, in bits, or 0 if
2254 /// the type is incomplete and we cannot determine the alignment (for
2255 /// example, from alignment attributes). The returned alignment is the
2256 /// Preferred alignment if NeedsPreferredAlignment is true, otherwise is the
2257 /// ABI alignment.
2258 unsigned getTypeAlignIfKnown(QualType T,
2259 bool NeedsPreferredAlignment = false) const;
2260
2261 /// Return the ABI-specified alignment of a (complete) type \p T, in
2262 /// characters.
2263 CharUnits getTypeAlignInChars(QualType T) const;
2264 CharUnits getTypeAlignInChars(const Type *T) const;
2265
2266 /// Return the PreferredAlignment of a (complete) type \p T, in
2267 /// characters.
2268 CharUnits getPreferredTypeAlignInChars(QualType T) const {
2269 return toCharUnitsFromBits(getPreferredTypeAlign(T));
2270 }
2271
2272 /// getTypeUnadjustedAlignInChars - Return the ABI-specified alignment of a type,
2273 /// in characters, before alignment adjustments. This method does not work on
2274 /// incomplete types.
2275 CharUnits getTypeUnadjustedAlignInChars(QualType T) const;
2276 CharUnits getTypeUnadjustedAlignInChars(const Type *T) const;
2277
2278 // getTypeInfoDataSizeInChars - Return the size of a type, in chars. If the
2279 // type is a record, its data size is returned.
2280 TypeInfoChars getTypeInfoDataSizeInChars(QualType T) const;
2281
2282 TypeInfoChars getTypeInfoInChars(const Type *T) const;
2283 TypeInfoChars getTypeInfoInChars(QualType T) const;
2284
2285 /// Determine if the alignment the type has was required using an
2286 /// alignment attribute.
2287 bool isAlignmentRequired(const Type *T) const;
2288 bool isAlignmentRequired(QualType T) const;
2289
2290 /// Return the "preferred" alignment of the specified type \p T for
2291 /// the current target, in bits.
2292 ///
2293 /// This can be different than the ABI alignment in cases where it is
2294 /// beneficial for performance or backwards compatibility preserving to
2295 /// overalign a data type. (Note: despite the name, the preferred alignment
2296 /// is ABI-impacting, and not an optimization.)
2297 unsigned getPreferredTypeAlign(QualType T) const {
2298 return getPreferredTypeAlign(T.getTypePtr());
2299 }
2300 unsigned getPreferredTypeAlign(const Type *T) const;
2301
2302 /// Return the default alignment for __attribute__((aligned)) on
2303 /// this target, to be used if no alignment value is specified.
2304 unsigned getTargetDefaultAlignForAttributeAligned() const;
2305
2306 /// Return the alignment in bits that should be given to a
2307 /// global variable with type \p T.
2308 unsigned getAlignOfGlobalVar(QualType T) const;
2309
2310 /// Return the alignment in characters that should be given to a
2311 /// global variable with type \p T.
2312 CharUnits getAlignOfGlobalVarInChars(QualType T) const;
2313
2314 /// Return a conservative estimate of the alignment of the specified
2315 /// decl \p D.
2316 ///
2317 /// \pre \p D must not be a bitfield type, as bitfields do not have a valid
2318 /// alignment.
2319 ///
2320 /// If \p ForAlignof, references are treated like their underlying type
2321 /// and large arrays don't get any special treatment. If not \p ForAlignof
2322 /// it computes the value expected by CodeGen: references are treated like
2323 /// pointers and large arrays get extra alignment.
2324 CharUnits getDeclAlign(const Decl *D, bool ForAlignof = false) const;
2325
2326 /// Return the alignment (in bytes) of the thrown exception object. This is
2327 /// only meaningful for targets that allocate C++ exceptions in a system
2328 /// runtime, such as those using the Itanium C++ ABI.
2329 CharUnits getExnObjectAlignment() const;
2330
2331 /// Get or compute information about the layout of the specified
2332 /// record (struct/union/class) \p D, which indicates its size and field
2333 /// position information.
2334 const ASTRecordLayout &getASTRecordLayout(const RecordDecl *D) const;
2335
2336 /// Get or compute information about the layout of the specified
2337 /// Objective-C interface.
2338 const ASTRecordLayout &getASTObjCInterfaceLayout(const ObjCInterfaceDecl *D)
2339 const;
2340
2341 void DumpRecordLayout(const RecordDecl *RD, raw_ostream &OS,
2342 bool Simple = false) const;
2343
2344 /// Get or compute information about the layout of the specified
2345 /// Objective-C implementation.
2346 ///
2347 /// This may differ from the interface if synthesized ivars are present.
2348 const ASTRecordLayout &
2349 getASTObjCImplementationLayout(const ObjCImplementationDecl *D) const;
2350
2351 /// Get our current best idea for the key function of the
2352 /// given record decl, or nullptr if there isn't one.
2353 ///
2354 /// The key function is, according to the Itanium C++ ABI section 5.2.3:
2355 /// ...the first non-pure virtual function that is not inline at the
2356 /// point of class definition.
2357 ///
2358 /// Other ABIs use the same idea. However, the ARM C++ ABI ignores
2359 /// virtual functions that are defined 'inline', which means that
2360 /// the result of this computation can change.
2361 const CXXMethodDecl *getCurrentKeyFunction(const CXXRecordDecl *RD);
2362
2363 /// Observe that the given method cannot be a key function.
2364 /// Checks the key-function cache for the method's class and clears it
2365 /// if matches the given declaration.
2366 ///
2367 /// This is used in ABIs where out-of-line definitions marked
2368 /// inline are not considered to be key functions.
2369 ///
2370 /// \param method should be the declaration from the class definition
2371 void setNonKeyFunction(const CXXMethodDecl *method);
2372
2373 /// Loading virtual member pointers using the virtual inheritance model
2374 /// always results in an adjustment using the vbtable even if the index is
2375 /// zero.
2376 ///
2377 /// This is usually OK because the first slot in the vbtable points
2378 /// backwards to the top of the MDC. However, the MDC might be reusing a
2379 /// vbptr from an nv-base. In this case, the first slot in the vbtable
2380 /// points to the start of the nv-base which introduced the vbptr and *not*
2381 /// the MDC. Modify the NonVirtualBaseAdjustment to account for this.
2382 CharUnits getOffsetOfBaseWithVBPtr(const CXXRecordDecl *RD) const;
2383
2384 /// Get the offset of a FieldDecl or IndirectFieldDecl, in bits.
2385 uint64_t getFieldOffset(const ValueDecl *FD) const;
2386
2387 /// Get the offset of an ObjCIvarDecl in bits.
2388 uint64_t lookupFieldBitOffset(const ObjCInterfaceDecl *OID,
2389 const ObjCImplementationDecl *ID,
2390 const ObjCIvarDecl *Ivar) const;
2391
2392 /// Find the 'this' offset for the member path in a pointer-to-member
2393 /// APValue.
2394 CharUnits getMemberPointerPathAdjustment(const APValue &MP) const;
2395
2396 bool isNearlyEmpty(const CXXRecordDecl *RD) const;
2397
2398 VTableContextBase *getVTableContext();
2399
2400 /// If \p T is null pointer, assume the target in ASTContext.
2401 MangleContext *createMangleContext(const TargetInfo *T = nullptr);
2402
2403 /// Creates a device mangle context to correctly mangle lambdas in a mixed
2404 /// architecture compile by setting the lambda mangling number source to the
2405 /// DeviceLambdaManglingNumber. Currently this asserts that the TargetInfo
2406 /// (from the AuxTargetInfo) is a an itanium target.
2407 MangleContext *createDeviceMangleContext(const TargetInfo &T);
2408
2409 void DeepCollectObjCIvars(const ObjCInterfaceDecl *OI, bool leafClass,
2410 SmallVectorImpl<const ObjCIvarDecl*> &Ivars) const;
2411
2412 unsigned CountNonClassIvars(const ObjCInterfaceDecl *OI) const;
2413 void CollectInheritedProtocols(const Decl *CDecl,
2414 llvm::SmallPtrSet<ObjCProtocolDecl*, 8> &Protocols);
2415
2416 /// Return true if the specified type has unique object representations
2417 /// according to (C++17 [meta.unary.prop]p9)
2418 bool hasUniqueObjectRepresentations(QualType Ty) const;
2419
2420 //===--------------------------------------------------------------------===//
2421 // Type Operators
2422 //===--------------------------------------------------------------------===//
2423
2424 /// Return the canonical (structural) type corresponding to the
2425 /// specified potentially non-canonical type \p T.
2426 ///
2427 /// The non-canonical version of a type may have many "decorated" versions of
2428 /// types. Decorators can include typedefs, 'typeof' operators, etc. The
2429 /// returned type is guaranteed to be free of any of these, allowing two
2430 /// canonical types to be compared for exact equality with a simple pointer
2431 /// comparison.
2432 CanQualType getCanonicalType(QualType T) const {
2433 return CanQualType::CreateUnsafe(T.getCanonicalType());
2434 }
2435
2436 const Type *getCanonicalType(const Type *T) const {
2437 return T->getCanonicalTypeInternal().getTypePtr();
2438 }
2439
2440 /// Return the canonical parameter type corresponding to the specific
2441 /// potentially non-canonical one.
2442 ///
2443 /// Qualifiers are stripped off, functions are turned into function
2444 /// pointers, and arrays decay one level into pointers.
2445 CanQualType getCanonicalParamType(QualType T) const;
2446
2447 /// Determine whether the given types \p T1 and \p T2 are equivalent.
2448 bool hasSameType(QualType T1, QualType T2) const {
2449 return getCanonicalType(T1) == getCanonicalType(T2);
2450 }
2451 bool hasSameType(const Type *T1, const Type *T2) const {
2452 return getCanonicalType(T1) == getCanonicalType(T2);
2453 }
2454
2455 /// Return this type as a completely-unqualified array type,
2456 /// capturing the qualifiers in \p Quals.
2457 ///
2458 /// This will remove the minimal amount of sugaring from the types, similar
2459 /// to the behavior of QualType::getUnqualifiedType().
2460 ///
2461 /// \param T is the qualified type, which may be an ArrayType
2462 ///
2463 /// \param Quals will receive the full set of qualifiers that were
2464 /// applied to the array.
2465 ///
2466 /// \returns if this is an array type, the completely unqualified array type
2467 /// that corresponds to it. Otherwise, returns T.getUnqualifiedType().
2468 QualType getUnqualifiedArrayType(QualType T, Qualifiers &Quals);
2469
2470 /// Determine whether the given types are equivalent after
2471 /// cvr-qualifiers have been removed.
2472 bool hasSameUnqualifiedType(QualType T1, QualType T2) const {
2473 return getCanonicalType(T1).getTypePtr() ==
2474 getCanonicalType(T2).getTypePtr();
2475 }
2476
2477 bool hasSameNullabilityTypeQualifier(QualType SubT, QualType SuperT,
2478 bool IsParam) const {
2479 auto SubTnullability = SubT->getNullability(*this);
2480 auto SuperTnullability = SuperT->getNullability(*this);
2481 if (SubTnullability.hasValue() == SuperTnullability.hasValue()) {
2482 // Neither has nullability; return true
2483 if (!SubTnullability)
2484 return true;
2485 // Both have nullability qualifier.
2486 if (*SubTnullability == *SuperTnullability ||
2487 *SubTnullability == NullabilityKind::Unspecified ||
2488 *SuperTnullability == NullabilityKind::Unspecified)
2489 return true;
2490
2491 if (IsParam) {
2492 // Ok for the superclass method parameter to be "nonnull" and the subclass
2493 // method parameter to be "nullable"
2494 return (*SuperTnullability == NullabilityKind::NonNull &&
2495 *SubTnullability == NullabilityKind::Nullable);
2496 }
2497 // For the return type, it's okay for the superclass method to specify
2498 // "nullable" and the subclass method specify "nonnull"
2499 return (*SuperTnullability == NullabilityKind::Nullable &&
2500 *SubTnullability == NullabilityKind::NonNull);
2501 }
2502 return true;
2503 }
2504
2505 bool ObjCMethodsAreEqual(const ObjCMethodDecl *MethodDecl,
2506 const ObjCMethodDecl *MethodImp);
2507
2508 bool UnwrapSimilarTypes(QualType &T1, QualType &T2);
2509 void UnwrapSimilarArrayTypes(QualType &T1, QualType &T2);
2510
2511 /// Determine if two types are similar, according to the C++ rules. That is,
2512 /// determine if they are the same other than qualifiers on the initial
2513 /// sequence of pointer / pointer-to-member / array (and in Clang, object
2514 /// pointer) types and their element types.
2515 ///
2516 /// Clang offers a number of qualifiers in addition to the C++ qualifiers;
2517 /// those qualifiers are also ignored in the 'similarity' check.
2518 bool hasSimilarType(QualType T1, QualType T2);
2519
2520 /// Determine if two types are similar, ignoring only CVR qualifiers.
2521 bool hasCvrSimilarType(QualType T1, QualType T2);
2522
2523 /// Retrieves the "canonical" nested name specifier for a
2524 /// given nested name specifier.
2525 ///
2526 /// The canonical nested name specifier is a nested name specifier
2527 /// that uniquely identifies a type or namespace within the type
2528 /// system. For example, given:
2529 ///
2530 /// \code
2531 /// namespace N {
2532 /// struct S {
2533 /// template<typename T> struct X { typename T* type; };
2534 /// };
2535 /// }
2536 ///
2537 /// template<typename T> struct Y {
2538 /// typename N::S::X<T>::type member;
2539 /// };
2540 /// \endcode
2541 ///
2542 /// Here, the nested-name-specifier for N::S::X<T>:: will be
2543 /// S::X<template-param-0-0>, since 'S' and 'X' are uniquely defined
2544 /// by declarations in the type system and the canonical type for
2545 /// the template type parameter 'T' is template-param-0-0.
2546 NestedNameSpecifier *
2547 getCanonicalNestedNameSpecifier(NestedNameSpecifier *NNS) const;
2548
2549 /// Retrieves the default calling convention for the current target.
2550 CallingConv getDefaultCallingConvention(bool IsVariadic,
2551 bool IsCXXMethod,
2552 bool IsBuiltin = false) const;
2553
2554 /// Retrieves the "canonical" template name that refers to a
2555 /// given template.
2556 ///
2557 /// The canonical template name is the simplest expression that can
2558 /// be used to refer to a given template. For most templates, this
2559 /// expression is just the template declaration itself. For example,
2560 /// the template std::vector can be referred to via a variety of
2561 /// names---std::vector, \::std::vector, vector (if vector is in
2562 /// scope), etc.---but all of these names map down to the same
2563 /// TemplateDecl, which is used to form the canonical template name.
2564 ///
2565 /// Dependent template names are more interesting. Here, the
2566 /// template name could be something like T::template apply or
2567 /// std::allocator<T>::template rebind, where the nested name
2568 /// specifier itself is dependent. In this case, the canonical
2569 /// template name uses the shortest form of the dependent
2570 /// nested-name-specifier, which itself contains all canonical
2571 /// types, values, and templates.
2572 TemplateName getCanonicalTemplateName(TemplateName Name) const;
2573
2574 /// Determine whether the given template names refer to the same
2575 /// template.
2576 bool hasSameTemplateName(TemplateName X, TemplateName Y);
2577
2578 /// Retrieve the "canonical" template argument.
2579 ///
2580 /// The canonical template argument is the simplest template argument
2581 /// (which may be a type, value, expression, or declaration) that
2582 /// expresses the value of the argument.
2583 TemplateArgument getCanonicalTemplateArgument(const TemplateArgument &Arg)
2584 const;
2585
2586 /// Type Query functions. If the type is an instance of the specified class,
2587 /// return the Type pointer for the underlying maximally pretty type. This
2588 /// is a member of ASTContext because this may need to do some amount of
2589 /// canonicalization, e.g. to move type qualifiers into the element type.
2590 const ArrayType *getAsArrayType(QualType T) const;
2591 const ConstantArrayType *getAsConstantArrayType(QualType T) const {
2592 return dyn_cast_or_null<ConstantArrayType>(getAsArrayType(T));
2593 }
2594 const VariableArrayType *getAsVariableArrayType(QualType T) const {
2595 return dyn_cast_or_null<VariableArrayType>(getAsArrayType(T));
20
Assuming null pointer is passed into cast
21
Returning null pointer, which participates in a condition later
2596 }
2597 const IncompleteArrayType *getAsIncompleteArrayType(QualType T) const {
2598 return dyn_cast_or_null<IncompleteArrayType>(getAsArrayType(T));
2599 }
2600 const DependentSizedArrayType *getAsDependentSizedArrayType(QualType T)
2601 const {
2602 return dyn_cast_or_null<DependentSizedArrayType>(getAsArrayType(T));
2603 }
2604
2605 /// Return the innermost element type of an array type.
2606 ///
2607 /// For example, will return "int" for int[m][n]
2608 QualType getBaseElementType(const ArrayType *VAT) const;
2609
2610 /// Return the innermost element type of a type (which needn't
2611 /// actually be an array type).
2612 QualType getBaseElementType(QualType QT) const;
2613
2614 /// Return number of constant array elements.
2615 uint64_t getConstantArrayElementCount(const ConstantArrayType *CA) const;
2616
2617 /// Perform adjustment on the parameter type of a function.
2618 ///
2619 /// This routine adjusts the given parameter type @p T to the actual
2620 /// parameter type used by semantic analysis (C99 6.7.5.3p[7,8],
2621 /// C++ [dcl.fct]p3). The adjusted parameter type is returned.
2622 QualType getAdjustedParameterType(QualType T) const;
2623
2624 /// Retrieve the parameter type as adjusted for use in the signature
2625 /// of a function, decaying array and function types and removing top-level
2626 /// cv-qualifiers.
2627 QualType getSignatureParameterType(QualType T) const;
2628
2629 QualType getExceptionObjectType(QualType T) const;
2630
2631 /// Return the properly qualified result of decaying the specified
2632 /// array type to a pointer.
2633 ///
2634 /// This operation is non-trivial when handling typedefs etc. The canonical
2635 /// type of \p T must be an array type, this returns a pointer to a properly
2636 /// qualified element of the array.
2637 ///
2638 /// See C99 6.7.5.3p7 and C99 6.3.2.1p3.
2639 QualType getArrayDecayedType(QualType T) const;
2640
2641 /// Return the type that \p PromotableType will promote to: C99
2642 /// 6.3.1.1p2, assuming that \p PromotableType is a promotable integer type.
2643 QualType getPromotedIntegerType(QualType PromotableType) const;
2644
2645 /// Recurses in pointer/array types until it finds an Objective-C
2646 /// retainable type and returns its ownership.
2647 Qualifiers::ObjCLifetime getInnerObjCOwnership(QualType T) const;
2648
2649 /// Whether this is a promotable bitfield reference according
2650 /// to C99 6.3.1.1p2, bullet 2 (and GCC extensions).
2651 ///
2652 /// \returns the type this bit-field will promote to, or NULL if no
2653 /// promotion occurs.
2654 QualType isPromotableBitField(Expr *E) const;
2655
2656 /// Return the highest ranked integer type, see C99 6.3.1.8p1.
2657 ///
2658 /// If \p LHS > \p RHS, returns 1. If \p LHS == \p RHS, returns 0. If
2659 /// \p LHS < \p RHS, return -1.
2660 int getIntegerTypeOrder(QualType LHS, QualType RHS) const;
2661
2662 /// Compare the rank of the two specified floating point types,
2663 /// ignoring the domain of the type (i.e. 'double' == '_Complex double').
2664 ///
2665 /// If \p LHS > \p RHS, returns 1. If \p LHS == \p RHS, returns 0. If
2666 /// \p LHS < \p RHS, return -1.
2667 int getFloatingTypeOrder(QualType LHS, QualType RHS) const;
2668
2669 /// Compare the rank of two floating point types as above, but compare equal
2670 /// if both types have the same floating-point semantics on the target (i.e.
2671 /// long double and double on AArch64 will return 0).
2672 int getFloatingTypeSemanticOrder(QualType LHS, QualType RHS) const;
2673
2674 /// Return a real floating point or a complex type (based on
2675 /// \p typeDomain/\p typeSize).
2676 ///
2677 /// \param typeDomain a real floating point or complex type.
2678 /// \param typeSize a real floating point or complex type.
2679 QualType getFloatingTypeOfSizeWithinDomain(QualType typeSize,
2680 QualType typeDomain) const;
2681
2682 unsigned getTargetAddressSpace(QualType T) const {
2683 return getTargetAddressSpace(T.getQualifiers());
2684 }
2685
2686 unsigned getTargetAddressSpace(Qualifiers Q) const {
2687 return getTargetAddressSpace(Q.getAddressSpace());
2688 }
2689
2690 unsigned getTargetAddressSpace(LangAS AS) const;
2691
2692 LangAS getLangASForBuiltinAddressSpace(unsigned AS) const;
2693
2694 /// Get target-dependent integer value for null pointer which is used for
2695 /// constant folding.
2696 uint64_t getTargetNullPointerValue(QualType QT) const;
2697
2698 bool addressSpaceMapManglingFor(LangAS AS) const {
2699 return AddrSpaceMapMangling || isTargetAddressSpace(AS);
2700 }
2701
2702private:
2703 // Helper for integer ordering
2704 unsigned getIntegerRank(const Type *T) const;
2705
2706public:
2707 //===--------------------------------------------------------------------===//
2708 // Type Compatibility Predicates
2709 //===--------------------------------------------------------------------===//
2710
2711 /// Compatibility predicates used to check assignment expressions.
2712 bool typesAreCompatible(QualType T1, QualType T2,
2713 bool CompareUnqualified = false); // C99 6.2.7p1
2714
2715 bool propertyTypesAreCompatible(QualType, QualType);
2716 bool typesAreBlockPointerCompatible(QualType, QualType);
2717
2718 bool isObjCIdType(QualType T) const {
2719 return T == getObjCIdType();
2720 }
2721
2722 bool isObjCClassType(QualType T) const {
2723 return T == getObjCClassType();
2724 }
2725
2726 bool isObjCSelType(QualType T) const {
2727 return T == getObjCSelType();
2728 }
2729
2730 bool ObjCQualifiedIdTypesAreCompatible(const ObjCObjectPointerType *LHS,
2731 const ObjCObjectPointerType *RHS,
2732 bool ForCompare);
2733
2734 bool ObjCQualifiedClassTypesAreCompatible(const ObjCObjectPointerType *LHS,
2735 const ObjCObjectPointerType *RHS);
2736
2737 // Check the safety of assignment from LHS to RHS
2738 bool canAssignObjCInterfaces(const ObjCObjectPointerType *LHSOPT,
2739 const ObjCObjectPointerType *RHSOPT);
2740 bool canAssignObjCInterfaces(const ObjCObjectType *LHS,
2741 const ObjCObjectType *RHS);
2742 bool canAssignObjCInterfacesInBlockPointer(
2743 const ObjCObjectPointerType *LHSOPT,
2744 const ObjCObjectPointerType *RHSOPT,
2745 bool BlockReturnType);
2746 bool areComparableObjCPointerTypes(QualType LHS, QualType RHS);
2747 QualType areCommonBaseCompatible(const ObjCObjectPointerType *LHSOPT,
2748 const ObjCObjectPointerType *RHSOPT);
2749 bool canBindObjCObjectType(QualType To, QualType From);
2750
2751 // Functions for calculating composite types
2752 QualType mergeTypes(QualType, QualType, bool OfBlockPointer=false,
2753 bool Unqualified = false, bool BlockReturnType = false);
2754 QualType mergeFunctionTypes(QualType, QualType, bool OfBlockPointer=false,
2755 bool Unqualified = false, bool AllowCXX = false);
2756 QualType mergeFunctionParameterTypes(QualType, QualType,
2757 bool OfBlockPointer = false,
2758 bool Unqualified = false);
2759 QualType mergeTransparentUnionType(QualType, QualType,
2760 bool OfBlockPointer=false,
2761 bool Unqualified = false);
2762
2763 QualType mergeObjCGCQualifiers(QualType, QualType);
2764
2765 /// This function merges the ExtParameterInfo lists of two functions. It
2766 /// returns true if the lists are compatible. The merged list is returned in
2767 /// NewParamInfos.
2768 ///
2769 /// \param FirstFnType The type of the first function.
2770 ///
2771 /// \param SecondFnType The type of the second function.
2772 ///
2773 /// \param CanUseFirst This flag is set to true if the first function's
2774 /// ExtParameterInfo list can be used as the composite list of
2775 /// ExtParameterInfo.
2776 ///
2777 /// \param CanUseSecond This flag is set to true if the second function's
2778 /// ExtParameterInfo list can be used as the composite list of
2779 /// ExtParameterInfo.
2780 ///
2781 /// \param NewParamInfos The composite list of ExtParameterInfo. The list is
2782 /// empty if none of the flags are set.
2783 ///
2784 bool mergeExtParameterInfo(
2785 const FunctionProtoType *FirstFnType,
2786 const FunctionProtoType *SecondFnType,
2787 bool &CanUseFirst, bool &CanUseSecond,
2788 SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &NewParamInfos);
2789
2790 void ResetObjCLayout(const ObjCContainerDecl *CD);
2791
2792 //===--------------------------------------------------------------------===//
2793 // Integer Predicates
2794 //===--------------------------------------------------------------------===//
2795
2796 // The width of an integer, as defined in C99 6.2.6.2. This is the number
2797 // of bits in an integer type excluding any padding bits.
2798 unsigned getIntWidth(QualType T) const;
2799
2800 // Per C99 6.2.5p6, for every signed integer type, there is a corresponding
2801 // unsigned integer type. This method takes a signed type, and returns the
2802 // corresponding unsigned integer type.
2803 // With the introduction of fixed point types in ISO N1169, this method also
2804 // accepts fixed point types and returns the corresponding unsigned type for
2805 // a given fixed point type.
2806 QualType getCorrespondingUnsignedType(QualType T) const;
2807
2808 // Per C99 6.2.5p6, for every signed integer type, there is a corresponding
2809 // unsigned integer type. This method takes an unsigned type, and returns the
2810 // corresponding signed integer type.
2811 // With the introduction of fixed point types in ISO N1169, this method also
2812 // accepts fixed point types and returns the corresponding signed type for
2813 // a given fixed point type.
2814 QualType getCorrespondingSignedType(QualType T) const;
2815
2816 // Per ISO N1169, this method accepts fixed point types and returns the
2817 // corresponding saturated type for a given fixed point type.
2818 QualType getCorrespondingSaturatedType(QualType Ty) const;
2819
2820 // This method accepts fixed point types and returns the corresponding signed
2821 // type. Unlike getCorrespondingUnsignedType(), this only accepts unsigned
2822 // fixed point types because there are unsigned integer types like bool and
2823 // char8_t that don't have signed equivalents.
2824 QualType getCorrespondingSignedFixedPointType(QualType Ty) const;
2825
2826 //===--------------------------------------------------------------------===//
2827 // Integer Values
2828 //===--------------------------------------------------------------------===//
2829
2830 /// Make an APSInt of the appropriate width and signedness for the
2831 /// given \p Value and integer \p Type.
2832 llvm::APSInt MakeIntValue(uint64_t Value, QualType Type) const {
2833 // If Type is a signed integer type larger than 64 bits, we need to be sure
2834 // to sign extend Res appropriately.
2835 llvm::APSInt Res(64, !Type->isSignedIntegerOrEnumerationType());
2836 Res = Value;
2837 unsigned Width = getIntWidth(Type);
2838 if (Width != Res.getBitWidth())
2839 return Res.extOrTrunc(Width);
2840 return Res;
2841 }
2842
2843 bool isSentinelNullExpr(const Expr *E);
2844
2845 /// Get the implementation of the ObjCInterfaceDecl \p D, or nullptr if
2846 /// none exists.
2847 ObjCImplementationDecl *getObjCImplementation(ObjCInterfaceDecl *D);
2848
2849 /// Get the implementation of the ObjCCategoryDecl \p D, or nullptr if
2850 /// none exists.
2851 ObjCCategoryImplDecl *getObjCImplementation(ObjCCategoryDecl *D);
2852
2853 /// Return true if there is at least one \@implementation in the TU.
2854 bool AnyObjCImplementation() {
2855 return !ObjCImpls.empty();
2856 }
2857
2858 /// Set the implementation of ObjCInterfaceDecl.
2859 void setObjCImplementation(ObjCInterfaceDecl *IFaceD,
2860 ObjCImplementationDecl *ImplD);
2861
2862 /// Set the implementation of ObjCCategoryDecl.
2863 void setObjCImplementation(ObjCCategoryDecl *CatD,
2864 ObjCCategoryImplDecl *ImplD);
2865
2866 /// Get the duplicate declaration of a ObjCMethod in the same
2867 /// interface, or null if none exists.
2868 const ObjCMethodDecl *
2869 getObjCMethodRedeclaration(const ObjCMethodDecl *MD) const;
2870
2871 void setObjCMethodRedeclaration(const ObjCMethodDecl *MD,
2872 const ObjCMethodDecl *Redecl);
2873
2874 /// Returns the Objective-C interface that \p ND belongs to if it is
2875 /// an Objective-C method/property/ivar etc. that is part of an interface,
2876 /// otherwise returns null.
2877 const ObjCInterfaceDecl *getObjContainingInterface(const NamedDecl *ND) const;
2878
2879 /// Set the copy initialization expression of a block var decl. \p CanThrow
2880 /// indicates whether the copy expression can throw or not.
2881 void setBlockVarCopyInit(const VarDecl* VD, Expr *CopyExpr, bool CanThrow);
2882
2883 /// Get the copy initialization expression of the VarDecl \p VD, or
2884 /// nullptr if none exists.
2885 BlockVarCopyInit getBlockVarCopyInit(const VarDecl* VD) const;
2886
2887 /// Allocate an uninitialized TypeSourceInfo.
2888 ///
2889 /// The caller should initialize the memory held by TypeSourceInfo using
2890 /// the TypeLoc wrappers.
2891 ///
2892 /// \param T the type that will be the basis for type source info. This type
2893 /// should refer to how the declarator was written in source code, not to
2894 /// what type semantic analysis resolved the declarator to.
2895 ///
2896 /// \param Size the size of the type info to create, or 0 if the size
2897 /// should be calculated based on the type.
2898 TypeSourceInfo *CreateTypeSourceInfo(QualType T, unsigned Size = 0) const;
2899
2900 /// Allocate a TypeSourceInfo where all locations have been
2901 /// initialized to a given location, which defaults to the empty
2902 /// location.
2903 TypeSourceInfo *
2904 getTrivialTypeSourceInfo(QualType T,
2905 SourceLocation Loc = SourceLocation()) const;
2906
2907 /// Add a deallocation callback that will be invoked when the
2908 /// ASTContext is destroyed.
2909 ///
2910 /// \param Callback A callback function that will be invoked on destruction.
2911 ///
2912 /// \param Data Pointer data that will be provided to the callback function
2913 /// when it is called.
2914 void AddDeallocation(void (*Callback)(void *), void *Data) const;
2915
2916 /// If T isn't trivially destructible, calls AddDeallocation to register it
2917 /// for destruction.
2918 template <typename T> void addDestruction(T *Ptr) const {
2919 if (!std::is_trivially_destructible<T>::value) {
2920 auto DestroyPtr = [](void *V) { static_cast<T *>(V)->~T(); };
2921 AddDeallocation(DestroyPtr, Ptr);
2922 }
2923 }
2924
2925 GVALinkage GetGVALinkageForFunction(const FunctionDecl *FD) const;
2926 GVALinkage GetGVALinkageForVariable(const VarDecl *VD);
2927
2928 /// Determines if the decl can be CodeGen'ed or deserialized from PCH
2929 /// lazily, only when used; this is only relevant for function or file scoped
2930 /// var definitions.
2931 ///
2932 /// \returns true if the function/var must be CodeGen'ed/deserialized even if
2933 /// it is not used.
2934 bool DeclMustBeEmitted(const Decl *D);
2935
2936 /// Visits all versions of a multiversioned function with the passed
2937 /// predicate.
2938 void forEachMultiversionedFunctionVersion(
2939 const FunctionDecl *FD,
2940 llvm::function_ref<void(FunctionDecl *)> Pred) const;
2941
2942 const CXXConstructorDecl *
2943 getCopyConstructorForExceptionObject(CXXRecordDecl *RD);
2944
2945 void addCopyConstructorForExceptionObject(CXXRecordDecl *RD,
2946 CXXConstructorDecl *CD);
2947
2948 void addTypedefNameForUnnamedTagDecl(TagDecl *TD, TypedefNameDecl *TND);
2949
2950 TypedefNameDecl *getTypedefNameForUnnamedTagDecl(const TagDecl *TD);
2951
2952 void addDeclaratorForUnnamedTagDecl(TagDecl *TD, DeclaratorDecl *DD);
2953
2954 DeclaratorDecl *getDeclaratorForUnnamedTagDecl(const TagDecl *TD);
2955
2956 void setManglingNumber(const NamedDecl *ND, unsigned Number);
2957 unsigned getManglingNumber(const NamedDecl *ND) const;
2958
2959 void setStaticLocalNumber(const VarDecl *VD, unsigned Number);
2960 unsigned getStaticLocalNumber(const VarDecl *VD) const;
2961
2962 /// Retrieve the context for computing mangling numbers in the given
2963 /// DeclContext.
2964 MangleNumberingContext &getManglingNumberContext(const DeclContext *DC);
2965 enum NeedExtraManglingDecl_t { NeedExtraManglingDecl };
2966 MangleNumberingContext &getManglingNumberContext(NeedExtraManglingDecl_t,
2967 const Decl *D);
2968
2969 std::unique_ptr<MangleNumberingContext> createMangleNumberingContext() const;
2970
2971 /// Used by ParmVarDecl to store on the side the
2972 /// index of the parameter when it exceeds the size of the normal bitfield.
2973 void setParameterIndex(const ParmVarDecl *D, unsigned index);
2974
2975 /// Used by ParmVarDecl to retrieve on the side the
2976 /// index of the parameter when it exceeds the size of the normal bitfield.
2977 unsigned getParameterIndex(const ParmVarDecl *D) const;
2978
2979 /// Return a string representing the human readable name for the specified
2980 /// function declaration or file name. Used by SourceLocExpr and
2981 /// PredefinedExpr to cache evaluated results.
2982 StringLiteral *getPredefinedStringLiteralFromCache(StringRef Key) const;
2983
2984 /// Return a declaration for the global GUID object representing the given
2985 /// GUID value.
2986 MSGuidDecl *getMSGuidDecl(MSGuidDeclParts Parts) const;
2987
2988 /// Return the template parameter object of the given type with the given
2989 /// value.
2990 TemplateParamObjectDecl *getTemplateParamObjectDecl(QualType T,
2991 const APValue &V) const;
2992
2993 /// Parses the target attributes passed in, and returns only the ones that are
2994 /// valid feature names.
2995 ParsedTargetAttr filterFunctionTargetAttrs(const TargetAttr *TD) const;
2996
2997 void getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap,
2998 const FunctionDecl *) const;
2999 void getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap,
3000 GlobalDecl GD) const;
3001
3002 //===--------------------------------------------------------------------===//
3003 // Statistics
3004 //===--------------------------------------------------------------------===//
3005
3006 /// The number of implicitly-declared default constructors.
3007 unsigned NumImplicitDefaultConstructors = 0;
3008
3009 /// The number of implicitly-declared default constructors for
3010 /// which declarations were built.
3011 unsigned NumImplicitDefaultConstructorsDeclared = 0;
3012
3013 /// The number of implicitly-declared copy constructors.
3014 unsigned NumImplicitCopyConstructors = 0;
3015
3016 /// The number of implicitly-declared copy constructors for
3017 /// which declarations were built.
3018 unsigned NumImplicitCopyConstructorsDeclared = 0;
3019
3020 /// The number of implicitly-declared move constructors.
3021 unsigned NumImplicitMoveConstructors = 0;
3022
3023 /// The number of implicitly-declared move constructors for
3024 /// which declarations were built.
3025 unsigned NumImplicitMoveConstructorsDeclared = 0;
3026
3027 /// The number of implicitly-declared copy assignment operators.
3028 unsigned NumImplicitCopyAssignmentOperators = 0;
3029
3030 /// The number of implicitly-declared copy assignment operators for
3031 /// which declarations were built.
3032 unsigned NumImplicitCopyAssignmentOperatorsDeclared = 0;
3033
3034 /// The number of implicitly-declared move assignment operators.
3035 unsigned NumImplicitMoveAssignmentOperators = 0;
3036
3037 /// The number of implicitly-declared move assignment operators for
3038 /// which declarations were built.
3039 unsigned NumImplicitMoveAssignmentOperatorsDeclared = 0;
3040
3041 /// The number of implicitly-declared destructors.
3042 unsigned NumImplicitDestructors = 0;
3043
3044 /// The number of implicitly-declared destructors for which
3045 /// declarations were built.
3046 unsigned NumImplicitDestructorsDeclared = 0;
3047
3048public:
3049 /// Initialize built-in types.
3050 ///
3051 /// This routine may only be invoked once for a given ASTContext object.
3052 /// It is normally invoked after ASTContext construction.
3053 ///
3054 /// \param Target The target
3055 void InitBuiltinTypes(const TargetInfo &Target,
3056 const TargetInfo *AuxTarget = nullptr);
3057
3058private:
3059 void InitBuiltinType(CanQualType &R, BuiltinType::Kind K);
3060
3061 class ObjCEncOptions {
3062 unsigned Bits;
3063
3064 ObjCEncOptions(unsigned Bits) : Bits(Bits) {}
3065
3066 public:
3067 ObjCEncOptions() : Bits(0) {}
3068 ObjCEncOptions(const ObjCEncOptions &RHS) : Bits(RHS.Bits) {}
3069
3070#define OPT_LIST(V) \
3071 V(ExpandPointedToStructures, 0) \
3072 V(ExpandStructures, 1) \
3073 V(IsOutermostType, 2) \
3074 V(EncodingProperty, 3) \
3075 V(IsStructField, 4) \
3076 V(EncodeBlockParameters, 5) \
3077 V(EncodeClassNames, 6) \
3078
3079#define V(N,I) ObjCEncOptions& set##N() { Bits |= 1 << I; return *this; }
3080OPT_LIST(V)
3081#undef V
3082
3083#define V(N,I) bool N() const { return Bits & 1 << I; }
3084OPT_LIST(V)
3085#undef V
3086
3087#undef OPT_LIST
3088
3089 LLVM_NODISCARD[[clang::warn_unused_result]] ObjCEncOptions keepingOnly(ObjCEncOptions Mask) const {
3090 return Bits & Mask.Bits;
3091 }
3092
3093 LLVM_NODISCARD[[clang::warn_unused_result]] ObjCEncOptions forComponentType() const {
3094 ObjCEncOptions Mask = ObjCEncOptions()
3095 .setIsOutermostType()
3096 .setIsStructField();
3097 return Bits & ~Mask.Bits;
3098 }
3099 };
3100
3101 // Return the Objective-C type encoding for a given type.
3102 void getObjCEncodingForTypeImpl(QualType t, std::string &S,
3103 ObjCEncOptions Options,
3104 const FieldDecl *Field,
3105 QualType *NotEncodedT = nullptr) const;
3106
3107 // Adds the encoding of the structure's members.
3108 void getObjCEncodingForStructureImpl(RecordDecl *RD, std::string &S,
3109 const FieldDecl *Field,
3110 bool includeVBases = true,
3111 QualType *NotEncodedT=nullptr) const;
3112
3113public:
3114 // Adds the encoding of a method parameter or return type.
3115 void getObjCEncodingForMethodParameter(Decl::ObjCDeclQualifier QT,
3116 QualType T, std::string& S,
3117 bool Extended) const;
3118
3119 /// Returns true if this is an inline-initialized static data member
3120 /// which is treated as a definition for MSVC compatibility.
3121 bool isMSStaticDataMemberInlineDefinition(const VarDecl *VD) const;
3122
3123 enum class InlineVariableDefinitionKind {
3124 /// Not an inline variable.
3125 None,
3126
3127 /// Weak definition of inline variable.
3128 Weak,
3129
3130 /// Weak for now, might become strong later in this TU.
3131 WeakUnknown,
3132
3133 /// Strong definition.
3134 Strong
3135 };
3136
3137 /// Determine whether a definition of this inline variable should
3138 /// be treated as a weak or strong definition. For compatibility with
3139 /// C++14 and before, for a constexpr static data member, if there is an
3140 /// out-of-line declaration of the member, we may promote it from weak to
3141 /// strong.
3142 InlineVariableDefinitionKind
3143 getInlineVariableDefinitionKind(const VarDecl *VD) const;
3144
3145private:
3146 friend class DeclarationNameTable;
3147 friend class DeclContext;
3148
3149 const ASTRecordLayout &
3150 getObjCLayout(const ObjCInterfaceDecl *D,
3151 const ObjCImplementationDecl *Impl) const;
3152
3153 /// A set of deallocations that should be performed when the
3154 /// ASTContext is destroyed.
3155 // FIXME: We really should have a better mechanism in the ASTContext to
3156 // manage running destructors for types which do variable sized allocation
3157 // within the AST. In some places we thread the AST bump pointer allocator
3158 // into the datastructures which avoids this mess during deallocation but is
3159 // wasteful of memory, and here we require a lot of error prone book keeping
3160 // in order to track and run destructors while we're tearing things down.
3161 using DeallocationFunctionsAndArguments =
3162 llvm::SmallVector<std::pair<void (*)(void *), void *>, 16>;
3163 mutable DeallocationFunctionsAndArguments Deallocations;
3164
3165 // FIXME: This currently contains the set of StoredDeclMaps used
3166 // by DeclContext objects. This probably should not be in ASTContext,
3167 // but we include it here so that ASTContext can quickly deallocate them.
3168 llvm::PointerIntPair<StoredDeclsMap *, 1> LastSDM;
3169
3170 std::vector<Decl *> TraversalScope;
3171
3172 std::unique_ptr<VTableContextBase> VTContext;
3173
3174 void ReleaseDeclContextMaps();
3175
3176public:
3177 enum PragmaSectionFlag : unsigned {
3178 PSF_None = 0,
3179 PSF_Read = 0x1,
3180 PSF_Write = 0x2,
3181 PSF_Execute = 0x4,
3182 PSF_Implicit = 0x8,
3183 PSF_ZeroInit = 0x10,
3184 PSF_Invalid = 0x80000000U,
3185 };
3186
3187 struct SectionInfo {
3188 NamedDecl *Decl;
3189 SourceLocation PragmaSectionLocation;
3190 int SectionFlags;
3191
3192 SectionInfo() = default;
3193 SectionInfo(NamedDecl *Decl, SourceLocation PragmaSectionLocation,
3194 int SectionFlags)
3195 : Decl(Decl), PragmaSectionLocation(PragmaSectionLocation),
3196 SectionFlags(SectionFlags) {}
3197 };
3198
3199 llvm::StringMap<SectionInfo> SectionInfos;
3200
3201 /// Return a new OMPTraitInfo object owned by this context.
3202 OMPTraitInfo &getNewOMPTraitInfo();
3203
3204 /// Whether a C++ static variable may be externalized.
3205 bool mayExternalizeStaticVar(const Decl *D) const;
3206
3207 /// Whether a C++ static variable should be externalized.
3208 bool shouldExternalizeStaticVar(const Decl *D) const;
3209
3210 StringRef getCUIDHash() const;
3211
3212 void AddSYCLKernelNamingDecl(const CXXRecordDecl *RD);
3213 bool IsSYCLKernelNamingDecl(const NamedDecl *RD) const;
3214 unsigned GetSYCLKernelNamingIndex(const NamedDecl *RD);
3215 /// A SourceLocation to store whether we have evaluated a kernel name already,
3216 /// and where it happened. If so, we need to diagnose an illegal use of the
3217 /// builtin.
3218 llvm::MapVector<const SYCLUniqueStableNameExpr *, std::string>
3219 SYCLUniqueStableNameEvaluatedValues;
3220
3221private:
3222 /// All OMPTraitInfo objects live in this collection, one per
3223 /// `pragma omp [begin] declare variant` directive.
3224 SmallVector<std::unique_ptr<OMPTraitInfo>, 4> OMPTraitInfoVector;
3225
3226 /// A list of the (right now just lambda decls) declarations required to
3227 /// name all the SYCL kernels in the translation unit, so that we can get the
3228 /// correct kernel name, as well as implement
3229 /// __builtin_sycl_unique_stable_name.
3230 llvm::DenseMap<const DeclContext *,
3231 llvm::SmallPtrSet<const CXXRecordDecl *, 4>>
3232 SYCLKernelNamingTypes;
3233 std::unique_ptr<ItaniumMangleContext> SYCLKernelFilterContext;
3234 void FilterSYCLKernelNamingDecls(
3235 const CXXRecordDecl *RD,
3236 llvm::SmallVectorImpl<const CXXRecordDecl *> &Decls);
3237};
3238
3239/// Insertion operator for diagnostics.
3240const StreamingDiagnostic &operator<<(const StreamingDiagnostic &DB,
3241 const ASTContext::SectionInfo &Section);
3242
3243/// Utility function for constructing a nullary selector.
3244inline Selector GetNullarySelector(StringRef name, ASTContext &Ctx) {
3245 IdentifierInfo* II = &Ctx.Idents.get(name);
3246 return Ctx.Selectors.getSelector(0, &II);
3247}
3248
3249/// Utility function for constructing an unary selector.
3250inline Selector GetUnarySelector(StringRef name, ASTContext &Ctx) {
3251 IdentifierInfo* II = &Ctx.Idents.get(name);
3252 return Ctx.Selectors.getSelector(1, &II);
3253}
3254
3255} // namespace clang
3256
3257// operator new and delete aren't allowed inside namespaces.
3258
3259/// Placement new for using the ASTContext's allocator.
3260///
3261/// This placement form of operator new uses the ASTContext's allocator for
3262/// obtaining memory.
3263///
3264/// IMPORTANT: These are also declared in clang/AST/ASTContextAllocate.h!
3265/// Any changes here need to also be made there.
3266///
3267/// We intentionally avoid using a nothrow specification here so that the calls
3268/// to this operator will not perform a null check on the result -- the
3269/// underlying allocator never returns null pointers.
3270///
3271/// Usage looks like this (assuming there's an ASTContext 'Context' in scope):
3272/// @code
3273/// // Default alignment (8)
3274/// IntegerLiteral *Ex = new (Context) IntegerLiteral(arguments);
3275/// // Specific alignment
3276/// IntegerLiteral *Ex2 = new (Context, 4) IntegerLiteral(arguments);
3277/// @endcode
3278/// Memory allocated through this placement new operator does not need to be
3279/// explicitly freed, as ASTContext will free all of this memory when it gets
3280/// destroyed. Please note that you cannot use delete on the pointer.
3281///
3282/// @param Bytes The number of bytes to allocate. Calculated by the compiler.
3283/// @param C The ASTContext that provides the allocator.
3284/// @param Alignment The alignment of the allocated memory (if the underlying
3285/// allocator supports it).
3286/// @return The allocated memory. Could be nullptr.
3287inline void *operator new(size_t Bytes, const clang::ASTContext &C,
3288 size_t Alignment /* = 8 */) {
3289 return C.Allocate(Bytes, Alignment);
3290}
3291
3292/// Placement delete companion to the new above.
3293///
3294/// This operator is just a companion to the new above. There is no way of
3295/// invoking it directly; see the new operator for more details. This operator
3296/// is called implicitly by the compiler if a placement new expression using
3297/// the ASTContext throws in the object constructor.
3298inline void operator delete(void *Ptr, const clang::ASTContext &C, size_t) {
3299 C.Deallocate(Ptr);
3300}
3301
3302/// This placement form of operator new[] uses the ASTContext's allocator for
3303/// obtaining memory.
3304///
3305/// We intentionally avoid using a nothrow specification here so that the calls
3306/// to this operator will not perform a null check on the result -- the
3307/// underlying allocator never returns null pointers.
3308///
3309/// Usage looks like this (assuming there's an ASTContext 'Context' in scope):
3310/// @code
3311/// // Default alignment (8)
3312/// char *data = new (Context) char[10];
3313/// // Specific alignment
3314/// char *data = new (Context, 4) char[10];
3315/// @endcode
3316/// Memory allocated through this placement new[] operator does not need to be
3317/// explicitly freed, as ASTContext will free all of this memory when it gets
3318/// destroyed. Please note that you cannot use delete on the pointer.
3319///
3320/// @param Bytes The number of bytes to allocate. Calculated by the compiler.
3321/// @param C The ASTContext that provides the allocator.
3322/// @param Alignment The alignment of the allocated memory (if the underlying
3323/// allocator supports it).
3324/// @return The allocated memory. Could be nullptr.
3325inline void *operator new[](size_t Bytes, const clang::ASTContext& C,
3326 size_t Alignment /* = 8 */) {
3327 return C.Allocate(Bytes, Alignment);
3328}
3329
3330/// Placement delete[] companion to the new[] above.
3331///
3332/// This operator is just a companion to the new[] above. There is no way of
3333/// invoking it directly; see the new[] operator for more details. This operator
3334/// is called implicitly by the compiler if a placement new[] expression using
3335/// the ASTContext throws in the object constructor.
3336inline void operator delete[](void *Ptr, const clang::ASTContext &C, size_t) {
3337 C.Deallocate(Ptr);
3338}
3339
3340/// Create the representation of a LazyGenerationalUpdatePtr.
3341template <typename Owner, typename T,
3342 void (clang::ExternalASTSource::*Update)(Owner)>
3343typename clang::LazyGenerationalUpdatePtr<Owner, T, Update>::ValueType
3344 clang::LazyGenerationalUpdatePtr<Owner, T, Update>::makeValue(
3345 const clang::ASTContext &Ctx, T Value) {
3346 // Note, this is implemented here so that ExternalASTSource.h doesn't need to
3347 // include ASTContext.h. We explicitly instantiate it for all relevant types
3348 // in ASTContext.cpp.
3349 if (auto *Source = Ctx.getExternalSource())
3350 return new (Ctx) LazyData(Source, Value);
3351 return Value;
3352}
3353
3354#endif // LLVM_CLANG_AST_ASTCONTEXT_H

/usr/src/gnu/usr.bin/clang/libclangCodeGen/../../../llvm/clang/include/clang/AST/Type.h

1//===- Type.h - C Language Family Type Representation -----------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// C Language Family Type Representation
11///
12/// This file defines the clang::Type interface and subclasses, used to
13/// represent types for languages in the C family.
14//
15//===----------------------------------------------------------------------===//
16
17#ifndef LLVM_CLANG_AST_TYPE_H
18#define LLVM_CLANG_AST_TYPE_H
19
20#include "clang/AST/DependenceFlags.h"
21#include "clang/AST/NestedNameSpecifier.h"
22#include "clang/AST/TemplateName.h"
23#include "clang/Basic/AddressSpaces.h"
24#include "clang/Basic/AttrKinds.h"
25#include "clang/Basic/Diagnostic.h"
26#include "clang/Basic/ExceptionSpecificationType.h"
27#include "clang/Basic/LLVM.h"
28#include "clang/Basic/Linkage.h"
29#include "clang/Basic/PartialDiagnostic.h"
30#include "clang/Basic/SourceLocation.h"
31#include "clang/Basic/Specifiers.h"
32#include "clang/Basic/Visibility.h"
33#include "llvm/ADT/APInt.h"
34#include "llvm/ADT/APSInt.h"
35#include "llvm/ADT/ArrayRef.h"
36#include "llvm/ADT/FoldingSet.h"
37#include "llvm/ADT/None.h"
38#include "llvm/ADT/Optional.h"
39#include "llvm/ADT/PointerIntPair.h"
40#include "llvm/ADT/PointerUnion.h"
41#include "llvm/ADT/StringRef.h"
42#include "llvm/ADT/Twine.h"
43#include "llvm/ADT/iterator_range.h"
44#include "llvm/Support/Casting.h"
45#include "llvm/Support/Compiler.h"
46#include "llvm/Support/ErrorHandling.h"
47#include "llvm/Support/PointerLikeTypeTraits.h"
48#include "llvm/Support/TrailingObjects.h"
49#include "llvm/Support/type_traits.h"
50#include <cassert>
51#include <cstddef>
52#include <cstdint>
53#include <cstring>
54#include <string>
55#include <type_traits>
56#include <utility>
57
58namespace clang {
59
60class ExtQuals;
61class QualType;
62class ConceptDecl;
63class TagDecl;
64class TemplateParameterList;
65class Type;
66
67enum {
68 TypeAlignmentInBits = 4,
69 TypeAlignment = 1 << TypeAlignmentInBits
70};
71
72namespace serialization {
73 template <class T> class AbstractTypeReader;
74 template <class T> class AbstractTypeWriter;
75}
76
77} // namespace clang
78
79namespace llvm {
80
81 template <typename T>
82 struct PointerLikeTypeTraits;
83 template<>
84 struct PointerLikeTypeTraits< ::clang::Type*> {
85 static inline void *getAsVoidPointer(::clang::Type *P) { return P; }
86
87 static inline ::clang::Type *getFromVoidPointer(void *P) {
88 return static_cast< ::clang::Type*>(P);
89 }
90
91 static constexpr int NumLowBitsAvailable = clang::TypeAlignmentInBits;
92 };
93
94 template<>
95 struct PointerLikeTypeTraits< ::clang::ExtQuals*> {
96 static inline void *getAsVoidPointer(::clang::ExtQuals *P) { return P; }
97
98 static inline ::clang::ExtQuals *getFromVoidPointer(void *P) {
99 return static_cast< ::clang::ExtQuals*>(P);
100 }
101
102 static constexpr int NumLowBitsAvailable = clang::TypeAlignmentInBits;
103 };
104
105} // namespace llvm
106
107namespace clang {
108
109class ASTContext;
110template <typename> class CanQual;
111class CXXRecordDecl;
112class DeclContext;
113class EnumDecl;
114class Expr;
115class ExtQualsTypeCommonBase;
116class FunctionDecl;
117class IdentifierInfo;
118class NamedDecl;
119class ObjCInterfaceDecl;
120class ObjCProtocolDecl;
121class ObjCTypeParamDecl;
122struct PrintingPolicy;
123class RecordDecl;
124class Stmt;
125class TagDecl;
126class TemplateArgument;
127class TemplateArgumentListInfo;
128class TemplateArgumentLoc;
129class TemplateTypeParmDecl;
130class TypedefNameDecl;
131class UnresolvedUsingTypenameDecl;
132
133using CanQualType = CanQual<Type>;
134
135// Provide forward declarations for all of the *Type classes.
136#define TYPE(Class, Base) class Class##Type;
137#include "clang/AST/TypeNodes.inc"
138
139/// The collection of all-type qualifiers we support.
140/// Clang supports five independent qualifiers:
141/// * C99: const, volatile, and restrict
142/// * MS: __unaligned
143/// * Embedded C (TR18037): address spaces
144/// * Objective C: the GC attributes (none, weak, or strong)
145class Qualifiers {
146public:
147 enum TQ { // NOTE: These flags must be kept in sync with DeclSpec::TQ.
148 Const = 0x1,
149 Restrict = 0x2,
150 Volatile = 0x4,
151 CVRMask = Const | Volatile | Restrict
152 };
153
154 enum GC {
155 GCNone = 0,
156 Weak,
157 Strong
158 };
159
160 enum ObjCLifetime {
161 /// There is no lifetime qualification on this type.
162 OCL_None,
163
164 /// This object can be modified without requiring retains or
165 /// releases.
166 OCL_ExplicitNone,
167
168 /// Assigning into this object requires the old value to be
169 /// released and the new value to be retained. The timing of the
170 /// release of the old value is inexact: it may be moved to
171 /// immediately after the last known point where the value is
172 /// live.
173 OCL_Strong,
174
175 /// Reading or writing from this object requires a barrier call.
176 OCL_Weak,
177
178 /// Assigning into this object requires a lifetime extension.
179 OCL_Autoreleasing
180 };
181
182 enum {
183 /// The maximum supported address space number.
184 /// 23 bits should be enough for anyone.
185 MaxAddressSpace = 0x7fffffu,
186
187 /// The width of the "fast" qualifier mask.
188 FastWidth = 3,
189
190 /// The fast qualifier mask.
191 FastMask = (1 << FastWidth) - 1
192 };
193
194 /// Returns the common set of qualifiers while removing them from
195 /// the given sets.
196 static Qualifiers removeCommonQualifiers(Qualifiers &L, Qualifiers &R) {
197 // If both are only CVR-qualified, bit operations are sufficient.
198 if (!(L.Mask & ~CVRMask) && !(R.Mask & ~CVRMask)) {
199 Qualifiers Q;
200 Q.Mask = L.Mask & R.Mask;
201 L.Mask &= ~Q.Mask;
202 R.Mask &= ~Q.Mask;
203 return Q;
204 }
205
206 Qualifiers Q;
207 unsigned CommonCRV = L.getCVRQualifiers() & R.getCVRQualifiers();
208 Q.addCVRQualifiers(CommonCRV);
209 L.removeCVRQualifiers(CommonCRV);
210 R.removeCVRQualifiers(CommonCRV);
211
212 if (L.getObjCGCAttr() == R.getObjCGCAttr()) {
213 Q.setObjCGCAttr(L.getObjCGCAttr());
214 L.removeObjCGCAttr();
215 R.removeObjCGCAttr();
216 }
217
218 if (L.getObjCLifetime() == R.getObjCLifetime()) {
219 Q.setObjCLifetime(L.getObjCLifetime());
220 L.removeObjCLifetime();
221 R.removeObjCLifetime();
222 }
223
224 if (L.getAddressSpace() == R.getAddressSpace()) {
225 Q.setAddressSpace(L.getAddressSpace());
226 L.removeAddressSpace();
227 R.removeAddressSpace();
228 }
229 return Q;
230 }
231
232 static Qualifiers fromFastMask(unsigned Mask) {
233 Qualifiers Qs;
234 Qs.addFastQualifiers(Mask);
235 return Qs;
236 }
237
238 static Qualifiers fromCVRMask(unsigned CVR) {
239 Qualifiers Qs;
240 Qs.addCVRQualifiers(CVR);
241 return Qs;
242 }
243
244 static Qualifiers fromCVRUMask(unsigned CVRU) {
245 Qualifiers Qs;
246 Qs.addCVRUQualifiers(CVRU);
247 return Qs;
248 }
249
250 // Deserialize qualifiers from an opaque representation.
251 static Qualifiers fromOpaqueValue(unsigned opaque) {
252 Qualifiers Qs;
253 Qs.Mask = opaque;
254 return Qs;
255 }
256
257 // Serialize these qualifiers into an opaque representation.
258 unsigned getAsOpaqueValue() const {
259 return Mask;
260 }
261
262 bool hasConst() const { return Mask & Const; }
263 bool hasOnlyConst() const { return Mask == Const; }
264 void removeConst() { Mask &= ~Const; }
265 void addConst() { Mask |= Const; }
266
267 bool hasVolatile() const { return Mask & Volatile; }
268 bool hasOnlyVolatile() const { return Mask == Volatile; }
269 void removeVolatile() { Mask &= ~Volatile; }
270 void addVolatile() { Mask |= Volatile; }
271
272 bool hasRestrict() const { return Mask & Restrict; }
273 bool hasOnlyRestrict() const { return Mask == Restrict; }
274 void removeRestrict() { Mask &= ~Restrict; }
275 void addRestrict() { Mask |= Restrict; }
276
277 bool hasCVRQualifiers() const { return getCVRQualifiers(); }
278 unsigned getCVRQualifiers() const { return Mask & CVRMask; }
279 unsigned getCVRUQualifiers() const { return Mask & (CVRMask | UMask); }
280
281 void setCVRQualifiers(unsigned mask) {
282 assert(!(mask & ~CVRMask) && "bitmask contains non-CVR bits")((void)0);
283 Mask = (Mask & ~CVRMask) | mask;
284 }
285 void removeCVRQualifiers(unsigned mask) {
286 assert(!(mask & ~CVRMask) && "bitmask contains non-CVR bits")((void)0);
287 Mask &= ~mask;
288 }
289 void removeCVRQualifiers() {
290 removeCVRQualifiers(CVRMask);
291 }
292 void addCVRQualifiers(unsigned mask) {
293 assert(!(mask & ~CVRMask) && "bitmask contains non-CVR bits")((void)0);
294 Mask |= mask;
295 }
296 void addCVRUQualifiers(unsigned mask) {
297 assert(!(mask & ~CVRMask & ~UMask) && "bitmask contains non-CVRU bits")((void)0);
298 Mask |= mask;
299 }
300
301 bool hasUnaligned() const { return Mask & UMask; }
302 void setUnaligned(bool flag) {
303 Mask = (Mask & ~UMask) | (flag ? UMask : 0);
304 }
305 void removeUnaligned() { Mask &= ~UMask; }
306 void addUnaligned() { Mask |= UMask; }
307
308 bool hasObjCGCAttr() const { return Mask & GCAttrMask; }
309 GC getObjCGCAttr() const { return GC((Mask & GCAttrMask) >> GCAttrShift); }
310 void setObjCGCAttr(GC type) {
311 Mask = (Mask & ~GCAttrMask) | (type << GCAttrShift);
312 }
313 void removeObjCGCAttr() { setObjCGCAttr(GCNone); }
314 void addObjCGCAttr(GC type) {
315 assert(type)((void)0);
316 setObjCGCAttr(type);
317 }
318 Qualifiers withoutObjCGCAttr() const {
319 Qualifiers qs = *this;
320 qs.removeObjCGCAttr();
321 return qs;
322 }
323 Qualifiers withoutObjCLifetime() const {
324 Qualifiers qs = *this;
325 qs.removeObjCLifetime();
326 return qs;
327 }
328 Qualifiers withoutAddressSpace() const {
329 Qualifiers qs = *this;
330 qs.removeAddressSpace();
331 return qs;
332 }
333
334 bool hasObjCLifetime() const { return Mask & LifetimeMask; }
335 ObjCLifetime getObjCLifetime() const {
336 return ObjCLifetime((Mask & LifetimeMask) >> LifetimeShift);
337 }
338 void setObjCLifetime(ObjCLifetime type) {
339 Mask = (Mask & ~LifetimeMask) | (type << LifetimeShift);
340 }
341 void removeObjCLifetime() { setObjCLifetime(OCL_None); }
342 void addObjCLifetime(ObjCLifetime type) {
343 assert(type)((void)0);
344 assert(!hasObjCLifetime())((void)0);
345 Mask |= (type << LifetimeShift);
346 }
347
348 /// True if the lifetime is neither None or ExplicitNone.
349 bool hasNonTrivialObjCLifetime() const {
350 ObjCLifetime lifetime = getObjCLifetime();
351 return (lifetime > OCL_ExplicitNone);
352 }
353
354 /// True if the lifetime is either strong or weak.
355 bool hasStrongOrWeakObjCLifetime() const {
356 ObjCLifetime lifetime = getObjCLifetime();
357 return (lifetime == OCL_Strong || lifetime == OCL_Weak);
358 }
359
360 bool hasAddressSpace() const { return Mask & AddressSpaceMask; }
361 LangAS getAddressSpace() const {
362 return static_cast<LangAS>(Mask >> AddressSpaceShift);
363 }
364 bool hasTargetSpecificAddressSpace() const {
365 return isTargetAddressSpace(getAddressSpace());
366 }
367 /// Get the address space attribute value to be printed by diagnostics.
368 unsigned getAddressSpaceAttributePrintValue() const {
369 auto Addr = getAddressSpace();
370 // This function is not supposed to be used with language specific
371 // address spaces. If that happens, the diagnostic message should consider
372 // printing the QualType instead of the address space value.
373 assert(Addr == LangAS::Default || hasTargetSpecificAddressSpace())((void)0);
374 if (Addr != LangAS::Default)
375 return toTargetAddressSpace(Addr);
376 // TODO: The diagnostic messages where Addr may be 0 should be fixed
377 // since it cannot differentiate the situation where 0 denotes the default
378 // address space or user specified __attribute__((address_space(0))).
379 return 0;
380 }
381 void setAddressSpace(LangAS space) {
382 assert((unsigned)space <= MaxAddressSpace)((void)0);
383 Mask = (Mask & ~AddressSpaceMask)
384 | (((uint32_t) space) << AddressSpaceShift);
385 }
386 void removeAddressSpace() { setAddressSpace(LangAS::Default); }
387 void addAddressSpace(LangAS space) {
388 assert(space != LangAS::Default)((void)0);
389 setAddressSpace(space);
390 }
391
392 // Fast qualifiers are those that can be allocated directly
393 // on a QualType object.
394 bool hasFastQualifiers() const { return getFastQualifiers(); }
395 unsigned getFastQualifiers() const { return Mask & FastMask; }
396 void setFastQualifiers(unsigned mask) {
397 assert(!(mask & ~FastMask) && "bitmask contains non-fast qualifier bits")((void)0);
398 Mask = (Mask & ~FastMask) | mask;
399 }
400 void removeFastQualifiers(unsigned mask) {
401 assert(!(mask & ~FastMask) && "bitmask contains non-fast qualifier bits")((void)0);
402 Mask &= ~mask;
403 }
404 void removeFastQualifiers() {
405 removeFastQualifiers(FastMask);
406 }
407 void addFastQualifiers(unsigned mask) {
408 assert(!(mask & ~FastMask) && "bitmask contains non-fast qualifier bits")((void)0);
409 Mask |= mask;
410 }
411
412 /// Return true if the set contains any qualifiers which require an ExtQuals
413 /// node to be allocated.
414 bool hasNonFastQualifiers() const { return Mask & ~FastMask; }
415 Qualifiers getNonFastQualifiers() const {
416 Qualifiers Quals = *this;
417 Quals.setFastQualifiers(0);
418 return Quals;
419 }
420
421 /// Return true if the set contains any qualifiers.
422 bool hasQualifiers() const { return Mask; }
423 bool empty() const { return !Mask; }
424
425 /// Add the qualifiers from the given set to this set.
426 void addQualifiers(Qualifiers Q) {
427 // If the other set doesn't have any non-boolean qualifiers, just
428 // bit-or it in.
429 if (!(Q.Mask & ~CVRMask))
430 Mask |= Q.Mask;
431 else {
432 Mask |= (Q.Mask & CVRMask);
433 if (Q.hasAddressSpace())
434 addAddressSpace(Q.getAddressSpace());
435 if (Q.hasObjCGCAttr())
436 addObjCGCAttr(Q.getObjCGCAttr());
437 if (Q.hasObjCLifetime())
438 addObjCLifetime(Q.getObjCLifetime());
439 }
440 }
441
442 /// Remove the qualifiers from the given set from this set.
443 void removeQualifiers(Qualifiers Q) {
444 // If the other set doesn't have any non-boolean qualifiers, just
445 // bit-and the inverse in.
446 if (!(Q.Mask & ~CVRMask))
447 Mask &= ~Q.Mask;
448 else {
449 Mask &= ~(Q.Mask & CVRMask);
450 if (getObjCGCAttr() == Q.getObjCGCAttr())
451 removeObjCGCAttr();
452 if (getObjCLifetime() == Q.getObjCLifetime())
453 removeObjCLifetime();
454 if (getAddressSpace() == Q.getAddressSpace())
455 removeAddressSpace();
456 }
457 }
458
459 /// Add the qualifiers from the given set to this set, given that
460 /// they don't conflict.
461 void addConsistentQualifiers(Qualifiers qs) {
462 assert(getAddressSpace() == qs.getAddressSpace() ||((void)0)
463 !hasAddressSpace() || !qs.hasAddressSpace())((void)0);
464 assert(getObjCGCAttr() == qs.getObjCGCAttr() ||((void)0)
465 !hasObjCGCAttr() || !qs.hasObjCGCAttr())((void)0);
466 assert(getObjCLifetime() == qs.getObjCLifetime() ||((void)0)
467 !hasObjCLifetime() || !qs.hasObjCLifetime())((void)0);
468 Mask |= qs.Mask;
469 }
470
471 /// Returns true if address space A is equal to or a superset of B.
472 /// OpenCL v2.0 defines conversion rules (OpenCLC v2.0 s6.5.5) and notion of
473 /// overlapping address spaces.
474 /// CL1.1 or CL1.2:
475 /// every address space is a superset of itself.
476 /// CL2.0 adds:
477 /// __generic is a superset of any address space except for __constant.
478 static bool isAddressSpaceSupersetOf(LangAS A, LangAS B) {
479 // Address spaces must match exactly.
480 return A == B ||
481 // Otherwise in OpenCLC v2.0 s6.5.5: every address space except
482 // for __constant can be used as __generic.
483 (A == LangAS::opencl_generic && B != LangAS::opencl_constant) ||
484 // We also define global_device and global_host address spaces,
485 // to distinguish global pointers allocated on host from pointers
486 // allocated on device, which are a subset of __global.
487 (A == LangAS::opencl_global && (B == LangAS::opencl_global_device ||
488 B == LangAS::opencl_global_host)) ||
489 (A == LangAS::sycl_global && (B == LangAS::sycl_global_device ||
490 B == LangAS::sycl_global_host)) ||
491 // Consider pointer size address spaces to be equivalent to default.
492 ((isPtrSizeAddressSpace(A) || A == LangAS::Default) &&
493 (isPtrSizeAddressSpace(B) || B == LangAS::Default)) ||
494 // Default is a superset of SYCL address spaces.
495 (A == LangAS::Default &&
496 (B == LangAS::sycl_private || B == LangAS::sycl_local ||
497 B == LangAS::sycl_global || B == LangAS::sycl_global_device ||
498 B == LangAS::sycl_global_host));
499 }
500
501 /// Returns true if the address space in these qualifiers is equal to or
502 /// a superset of the address space in the argument qualifiers.
503 bool isAddressSpaceSupersetOf(Qualifiers other) const {
504 return isAddressSpaceSupersetOf(getAddressSpace(), other.getAddressSpace());
505 }
506
507 /// Determines if these qualifiers compatibly include another set.
508 /// Generally this answers the question of whether an object with the other
509 /// qualifiers can be safely used as an object with these qualifiers.
510 bool compatiblyIncludes(Qualifiers other) const {
511 return isAddressSpaceSupersetOf(other) &&
512 // ObjC GC qualifiers can match, be added, or be removed, but can't
513 // be changed.
514 (getObjCGCAttr() == other.getObjCGCAttr() || !hasObjCGCAttr() ||
515 !other.hasObjCGCAttr()) &&
516 // ObjC lifetime qualifiers must match exactly.
517 getObjCLifetime() == other.getObjCLifetime() &&
518 // CVR qualifiers may subset.
519 (((Mask & CVRMask) | (other.Mask & CVRMask)) == (Mask & CVRMask)) &&
520 // U qualifier may superset.
521 (!other.hasUnaligned() || hasUnaligned());
522 }
523
524 /// Determines if these qualifiers compatibly include another set of
525 /// qualifiers from the narrow perspective of Objective-C ARC lifetime.
526 ///
527 /// One set of Objective-C lifetime qualifiers compatibly includes the other
528 /// if the lifetime qualifiers match, or if both are non-__weak and the
529 /// including set also contains the 'const' qualifier, or both are non-__weak
530 /// and one is None (which can only happen in non-ARC modes).
531 bool compatiblyIncludesObjCLifetime(Qualifiers other) const {
532 if (getObjCLifetime() == other.getObjCLifetime())
533 return true;
534
535 if (getObjCLifetime() == OCL_Weak || other.getObjCLifetime() == OCL_Weak)
536 return false;
537
538 if (getObjCLifetime() == OCL_None || other.getObjCLifetime() == OCL_None)
539 return true;
540
541 return hasConst();
542 }
543
544 /// Determine whether this set of qualifiers is a strict superset of
545 /// another set of qualifiers, not considering qualifier compatibility.
546 bool isStrictSupersetOf(Qualifiers Other) const;
547
548 bool operator==(Qualifiers Other) const { return Mask == Other.Mask; }
549 bool operator!=(Qualifiers Other) const { return Mask != Other.Mask; }
550
551 explicit operator bool() const { return hasQualifiers(); }
552
553 Qualifiers &operator+=(Qualifiers R) {
554 addQualifiers(R);
555 return *this;
556 }
557
558 // Union two qualifier sets. If an enumerated qualifier appears
559 // in both sets, use the one from the right.
560 friend Qualifiers operator+(Qualifiers L, Qualifiers R) {
561 L += R;
562 return L;
563 }
564
565 Qualifiers &operator-=(Qualifiers R) {
566 removeQualifiers(R);
567 return *this;
568 }
569
570 /// Compute the difference between two qualifier sets.
571 friend Qualifiers operator-(Qualifiers L, Qualifiers R) {
572 L -= R;
573 return L;
574 }
575
576 std::string getAsString() const;
577 std::string getAsString(const PrintingPolicy &Policy) const;
578
579 static std::string getAddrSpaceAsString(LangAS AS);
580
581 bool isEmptyWhenPrinted(const PrintingPolicy &Policy) const;
582 void print(raw_ostream &OS, const PrintingPolicy &Policy,
583 bool appendSpaceIfNonEmpty = false) const;
584
585 void Profile(llvm::FoldingSetNodeID &ID) const {
586 ID.AddInteger(Mask);
587 }
588
589private:
590 // bits: |0 1 2|3|4 .. 5|6 .. 8|9 ... 31|
591 // |C R V|U|GCAttr|Lifetime|AddressSpace|
592 uint32_t Mask = 0;
593
594 static const uint32_t UMask = 0x8;
595 static const uint32_t UShift = 3;
596 static const uint32_t GCAttrMask = 0x30;
597 static const uint32_t GCAttrShift = 4;
598 static const uint32_t LifetimeMask = 0x1C0;
599 static const uint32_t LifetimeShift = 6;
600 static const uint32_t AddressSpaceMask =
601 ~(CVRMask | UMask | GCAttrMask | LifetimeMask);
602 static const uint32_t AddressSpaceShift = 9;
603};
604
605/// A std::pair-like structure for storing a qualified type split
606/// into its local qualifiers and its locally-unqualified type.
607struct SplitQualType {
608 /// The locally-unqualified type.
609 const Type *Ty = nullptr;
610
611 /// The local qualifiers.
612 Qualifiers Quals;
613
614 SplitQualType() = default;
615 SplitQualType(const Type *ty, Qualifiers qs) : Ty(ty), Quals(qs) {}
616
617 SplitQualType getSingleStepDesugaredType() const; // end of this file
618
619 // Make std::tie work.
620 std::pair<const Type *,Qualifiers> asPair() const {
621 return std::pair<const Type *, Qualifiers>(Ty, Quals);
622 }
623
624 friend bool operator==(SplitQualType a, SplitQualType b) {
625 return a.Ty == b.Ty && a.Quals == b.Quals;
626 }
627 friend bool operator!=(SplitQualType a, SplitQualType b) {
628 return a.Ty != b.Ty || a.Quals != b.Quals;
629 }
630};
631
632/// The kind of type we are substituting Objective-C type arguments into.
633///
634/// The kind of substitution affects the replacement of type parameters when
635/// no concrete type information is provided, e.g., when dealing with an
636/// unspecialized type.
637enum class ObjCSubstitutionContext {
638 /// An ordinary type.
639 Ordinary,
640
641 /// The result type of a method or function.
642 Result,
643
644 /// The parameter type of a method or function.
645 Parameter,
646
647 /// The type of a property.
648 Property,
649
650 /// The superclass of a type.
651 Superclass,
652};
653
654/// A (possibly-)qualified type.
655///
656/// For efficiency, we don't store CV-qualified types as nodes on their
657/// own: instead each reference to a type stores the qualifiers. This
658/// greatly reduces the number of nodes we need to allocate for types (for
659/// example we only need one for 'int', 'const int', 'volatile int',
660/// 'const volatile int', etc).
661///
662/// As an added efficiency bonus, instead of making this a pair, we
663/// just store the two bits we care about in the low bits of the
664/// pointer. To handle the packing/unpacking, we make QualType be a
665/// simple wrapper class that acts like a smart pointer. A third bit
666/// indicates whether there are extended qualifiers present, in which
667/// case the pointer points to a special structure.
668class QualType {
669 friend class QualifierCollector;
670
671 // Thankfully, these are efficiently composable.
672 llvm::PointerIntPair<llvm::PointerUnion<const Type *, const ExtQuals *>,
673 Qualifiers::FastWidth> Value;
674
675 const ExtQuals *getExtQualsUnsafe() const {
676 return Value.getPointer().get<const ExtQuals*>();
677 }
678
679 const Type *getTypePtrUnsafe() const {
680 return Value.getPointer().get<const Type*>();
681 }
682
683 const ExtQualsTypeCommonBase *getCommonPtr() const {
684 assert(!isNull() && "Cannot retrieve a NULL type pointer")((void)0);
685 auto CommonPtrVal = reinterpret_cast<uintptr_t>(Value.getOpaqueValue());
686 CommonPtrVal &= ~(uintptr_t)((1 << TypeAlignmentInBits) - 1);
687 return reinterpret_cast<ExtQualsTypeCommonBase*>(CommonPtrVal);
688 }
689
690public:
691 QualType() = default;
692 QualType(const Type *Ptr, unsigned Quals) : Value(Ptr, Quals) {}
693 QualType(const ExtQuals *Ptr, unsigned Quals) : Value(Ptr, Quals) {}
694
695 unsigned getLocalFastQualifiers() const { return Value.getInt(); }
696 void setLocalFastQualifiers(unsigned Quals) { Value.setInt(Quals); }
697
698 /// Retrieves a pointer to the underlying (unqualified) type.
699 ///
700 /// This function requires that the type not be NULL. If the type might be
701 /// NULL, use the (slightly less efficient) \c getTypePtrOrNull().
702 const Type *getTypePtr() const;
703
704 const Type *getTypePtrOrNull() const;
705
706 /// Retrieves a pointer to the name of the base type.
707 const IdentifierInfo *getBaseTypeIdentifier() const;
708
709 /// Divides a QualType into its unqualified type and a set of local
710 /// qualifiers.
711 SplitQualType split() const;
712
713 void *getAsOpaquePtr() const { return Value.getOpaqueValue(); }
714
715 static QualType getFromOpaquePtr(const void *Ptr) {
716 QualType T;
717 T.Value.setFromOpaqueValue(const_cast<void*>(Ptr));
718 return T;
719 }
720
721 const Type &operator*() const {
722 return *getTypePtr();
723 }
724
725 const Type *operator->() const {
726 return getTypePtr();
727 }
728
729 bool isCanonical() const;
730 bool isCanonicalAsParam() const;
731
732 /// Return true if this QualType doesn't point to a type yet.
733 bool isNull() const {
734 return Value.getPointer().isNull();
735 }
736
737 /// Determine whether this particular QualType instance has the
738 /// "const" qualifier set, without looking through typedefs that may have
739 /// added "const" at a different level.
740 bool isLocalConstQualified() const {
741 return (getLocalFastQualifiers() & Qualifiers::Const);
742 }
743
744 /// Determine whether this type is const-qualified.
745 bool isConstQualified() const;
746
747 /// Determine whether this particular QualType instance has the
748 /// "restrict" qualifier set, without looking through typedefs that may have
749 /// added "restrict" at a different level.
750 bool isLocalRestrictQualified() const {
751 return (getLocalFastQualifiers() & Qualifiers::Restrict);
752 }
753
754 /// Determine whether this type is restrict-qualified.
755 bool isRestrictQualified() const;
756
757 /// Determine whether this particular QualType instance has the
758 /// "volatile" qualifier set, without looking through typedefs that may have
759 /// added "volatile" at a different level.
760 bool isLocalVolatileQualified() const {
761 return (getLocalFastQualifiers() & Qualifiers::Volatile);
762 }
763
764 /// Determine whether this type is volatile-qualified.
765 bool isVolatileQualified() const;
766
767 /// Determine whether this particular QualType instance has any
768 /// qualifiers, without looking through any typedefs that might add
769 /// qualifiers at a different level.
770 bool hasLocalQualifiers() const {
771 return getLocalFastQualifiers() || hasLocalNonFastQualifiers();
772 }
773
774 /// Determine whether this type has any qualifiers.
775 bool hasQualifiers() const;
776
777 /// Determine whether this particular QualType instance has any
778 /// "non-fast" qualifiers, e.g., those that are stored in an ExtQualType
779 /// instance.
780 bool hasLocalNonFastQualifiers() const {
781 return Value.getPointer().is<const ExtQuals*>();
782 }
783
784 /// Retrieve the set of qualifiers local to this particular QualType
785 /// instance, not including any qualifiers acquired through typedefs or
786 /// other sugar.
787 Qualifiers getLocalQualifiers() const;
788
789 /// Retrieve the set of qualifiers applied to this type.
790 Qualifiers getQualifiers() const;
791
792 /// Retrieve the set of CVR (const-volatile-restrict) qualifiers
793 /// local to this particular QualType instance, not including any qualifiers
794 /// acquired through typedefs or other sugar.
795 unsigned getLocalCVRQualifiers() const {
796 return getLocalFastQualifiers();
797 }
798
799 /// Retrieve the set of CVR (const-volatile-restrict) qualifiers
800 /// applied to this type.
801 unsigned getCVRQualifiers() const;
802
803 bool isConstant(const ASTContext& Ctx) const {
804 return QualType::isConstant(*this, Ctx);
805 }
806
807 /// Determine whether this is a Plain Old Data (POD) type (C++ 3.9p10).
808 bool isPODType(const ASTContext &Context) const;
809
810 /// Return true if this is a POD type according to the rules of the C++98
811 /// standard, regardless of the current compilation's language.
812 bool isCXX98PODType(const ASTContext &Context) const;
813
814 /// Return true if this is a POD type according to the more relaxed rules
815 /// of the C++11 standard, regardless of the current compilation's language.
816 /// (C++0x [basic.types]p9). Note that, unlike
817 /// CXXRecordDecl::isCXX11StandardLayout, this takes DRs into account.
818 bool isCXX11PODType(const ASTContext &Context) const;
819
820 /// Return true if this is a trivial type per (C++0x [basic.types]p9)
821 bool isTrivialType(const ASTContext &Context) const;
822
823 /// Return true if this is a trivially copyable type (C++0x [basic.types]p9)
824 bool isTriviallyCopyableType(const ASTContext &Context) const;
825
826
827 /// Returns true if it is a class and it might be dynamic.
828 bool mayBeDynamicClass() const;
829
830 /// Returns true if it is not a class or if the class might not be dynamic.
831 bool mayBeNotDynamicClass() const;
832
833 // Don't promise in the API that anything besides 'const' can be
834 // easily added.
835
836 /// Add the `const` type qualifier to this QualType.
837 void addConst() {
838 addFastQualifiers(Qualifiers::Const);
839 }
840 QualType withConst() const {
841 return withFastQualifiers(Qualifiers::Const);
842 }
843
844 /// Add the `volatile` type qualifier to this QualType.
845 void addVolatile() {
846 addFastQualifiers(Qualifiers::Volatile);
847 }
848 QualType withVolatile() const {
849 return withFastQualifiers(Qualifiers::Volatile);
850 }
851
852 /// Add the `restrict` qualifier to this QualType.
853 void addRestrict() {
854 addFastQualifiers(Qualifiers::Restrict);
855 }
856 QualType withRestrict() const {
857 return withFastQualifiers(Qualifiers::Restrict);
858 }
859
860 QualType withCVRQualifiers(unsigned CVR) const {
861 return withFastQualifiers(CVR);
862 }
863
864 void addFastQualifiers(unsigned TQs) {
865 assert(!(TQs & ~Qualifiers::FastMask)((void)0)
866 && "non-fast qualifier bits set in mask!")((void)0);
867 Value.setInt(Value.getInt() | TQs);
868 }
869
870 void removeLocalConst();
871 void removeLocalVolatile();
872 void removeLocalRestrict();
873 void removeLocalCVRQualifiers(unsigned Mask);
874
875 void removeLocalFastQualifiers() { Value.setInt(0); }
876 void removeLocalFastQualifiers(unsigned Mask) {
877 assert(!(Mask & ~Qualifiers::FastMask) && "mask has non-fast qualifiers")((void)0);
878 Value.setInt(Value.getInt() & ~Mask);
879 }
880
881 // Creates a type with the given qualifiers in addition to any
882 // qualifiers already on this type.
883 QualType withFastQualifiers(unsigned TQs) const {
884 QualType T = *this;
885 T.addFastQualifiers(TQs);
886 return T;
887 }
888
889 // Creates a type with exactly the given fast qualifiers, removing
890 // any existing fast qualifiers.
891 QualType withExactLocalFastQualifiers(unsigned TQs) const {
892 return withoutLocalFastQualifiers().withFastQualifiers(TQs);
893 }
894
895 // Removes fast qualifiers, but leaves any extended qualifiers in place.
896 QualType withoutLocalFastQualifiers() const {
897 QualType T = *this;
898 T.removeLocalFastQualifiers();
899 return T;
900 }
901
902 QualType getCanonicalType() const;
903
904 /// Return this type with all of the instance-specific qualifiers
905 /// removed, but without removing any qualifiers that may have been applied
906 /// through typedefs.
907 QualType getLocalUnqualifiedType() const { return QualType(getTypePtr(), 0); }
908
909 /// Retrieve the unqualified variant of the given type,
910 /// removing as little sugar as possible.
911 ///
912 /// This routine looks through various kinds of sugar to find the
913 /// least-desugared type that is unqualified. For example, given:
914 ///
915 /// \code
916 /// typedef int Integer;
917 /// typedef const Integer CInteger;
918 /// typedef CInteger DifferenceType;
919 /// \endcode
920 ///
921 /// Executing \c getUnqualifiedType() on the type \c DifferenceType will
922 /// desugar until we hit the type \c Integer, which has no qualifiers on it.
923 ///
924 /// The resulting type might still be qualified if it's sugar for an array
925 /// type. To strip qualifiers even from within a sugared array type, use
926 /// ASTContext::getUnqualifiedArrayType.
927 inline QualType getUnqualifiedType() const;
928
929 /// Retrieve the unqualified variant of the given type, removing as little
930 /// sugar as possible.
931 ///
932 /// Like getUnqualifiedType(), but also returns the set of
933 /// qualifiers that were built up.
934 ///
935 /// The resulting type might still be qualified if it's sugar for an array
936 /// type. To strip qualifiers even from within a sugared array type, use
937 /// ASTContext::getUnqualifiedArrayType.
938 inline SplitQualType getSplitUnqualifiedType() const;
939
940 /// Determine whether this type is more qualified than the other
941 /// given type, requiring exact equality for non-CVR qualifiers.
942 bool isMoreQualifiedThan(QualType Other) const;
943
944 /// Determine whether this type is at least as qualified as the other
945 /// given type, requiring exact equality for non-CVR qualifiers.
946 bool isAtLeastAsQualifiedAs(QualType Other) const;
947
948 QualType getNonReferenceType() const;
949
950 /// Determine the type of a (typically non-lvalue) expression with the
951 /// specified result type.
952 ///
953 /// This routine should be used for expressions for which the return type is
954 /// explicitly specified (e.g., in a cast or call) and isn't necessarily
955 /// an lvalue. It removes a top-level reference (since there are no
956 /// expressions of reference type) and deletes top-level cvr-qualifiers
957 /// from non-class types (in C++) or all types (in C).
958 QualType getNonLValueExprType(const ASTContext &Context) const;
959
960 /// Remove an outer pack expansion type (if any) from this type. Used as part
961 /// of converting the type of a declaration to the type of an expression that
962 /// references that expression. It's meaningless for an expression to have a
963 /// pack expansion type.
964 QualType getNonPackExpansionType() const;
965
966 /// Return the specified type with any "sugar" removed from
967 /// the type. This takes off typedefs, typeof's etc. If the outer level of
968 /// the type is already concrete, it returns it unmodified. This is similar
969 /// to getting the canonical type, but it doesn't remove *all* typedefs. For
970 /// example, it returns "T*" as "T*", (not as "int*"), because the pointer is
971 /// concrete.
972 ///
973 /// Qualifiers are left in place.
974 QualType getDesugaredType(const ASTContext &Context) const {
975 return getDesugaredType(*this, Context);
976 }
977
978 SplitQualType getSplitDesugaredType() const {
979 return getSplitDesugaredType(*this);
980 }
981
982 /// Return the specified type with one level of "sugar" removed from
983 /// the type.
984 ///
985 /// This routine takes off the first typedef, typeof, etc. If the outer level
986 /// of the type is already concrete, it returns it unmodified.
987 QualType getSingleStepDesugaredType(const ASTContext &Context) const {
988 return getSingleStepDesugaredTypeImpl(*this, Context);
989 }
990
991 /// Returns the specified type after dropping any
992 /// outer-level parentheses.
993 QualType IgnoreParens() const {
994 if (isa<ParenType>(*this))
995 return QualType::IgnoreParens(*this);
996 return *this;
997 }
998
999 /// Indicate whether the specified types and qualifiers are identical.
1000 friend bool operator==(const QualType &LHS, const QualType &RHS) {
1001 return LHS.Value == RHS.Value;
1002 }
1003 friend bool operator!=(const QualType &LHS, const QualType &RHS) {
1004 return LHS.Value != RHS.Value;
1005 }
1006 friend bool operator<(const QualType &LHS, const QualType &RHS) {
1007 return LHS.Value < RHS.Value;
1008 }
1009
1010 static std::string getAsString(SplitQualType split,
1011 const PrintingPolicy &Policy) {
1012 return getAsString(split.Ty, split.Quals, Policy);
1013 }
1014 static std::string getAsString(const Type *ty, Qualifiers qs,
1015 const PrintingPolicy &Policy);
1016
1017 std::string getAsString() const;
1018 std::string getAsString(const PrintingPolicy &Policy) const;
1019
1020 void print(raw_ostream &OS, const PrintingPolicy &Policy,
1021 const Twine &PlaceHolder = Twine(),
1022 unsigned Indentation = 0) const;
1023
1024 static void print(SplitQualType split, raw_ostream &OS,
1025 const PrintingPolicy &policy, const Twine &PlaceHolder,
1026 unsigned Indentation = 0) {
1027 return print(split.Ty, split.Quals, OS, policy, PlaceHolder, Indentation);
1028 }
1029
1030 static void print(const Type *ty, Qualifiers qs,
1031 raw_ostream &OS, const PrintingPolicy &policy,
1032 const Twine &PlaceHolder,
1033 unsigned Indentation = 0);
1034
1035 void getAsStringInternal(std::string &Str,
1036 const PrintingPolicy &Policy) const;
1037
1038 static void getAsStringInternal(SplitQualType split, std::string &out,
1039 const PrintingPolicy &policy) {
1040 return getAsStringInternal(split.Ty, split.Quals, out, policy);
1041 }
1042
1043 static void getAsStringInternal(const Type *ty, Qualifiers qs,
1044 std::string &out,
1045 const PrintingPolicy &policy);
1046
1047 class StreamedQualTypeHelper {
1048 const QualType &T;
1049 const PrintingPolicy &Policy;
1050 const Twine &PlaceHolder;
1051 unsigned Indentation;
1052
1053 public:
1054 StreamedQualTypeHelper(const QualType &T, const PrintingPolicy &Policy,
1055 const Twine &PlaceHolder, unsigned Indentation)
1056 : T(T), Policy(Policy), PlaceHolder(PlaceHolder),
1057 Indentation(Indentation) {}
1058
1059 friend raw_ostream &operator<<(raw_ostream &OS,
1060 const StreamedQualTypeHelper &SQT) {
1061 SQT.T.print(OS, SQT.Policy, SQT.PlaceHolder, SQT.Indentation);
1062 return OS;
1063 }
1064 };
1065
1066 StreamedQualTypeHelper stream(const PrintingPolicy &Policy,
1067 const Twine &PlaceHolder = Twine(),
1068 unsigned Indentation = 0) const {
1069 return StreamedQualTypeHelper(*this, Policy, PlaceHolder, Indentation);
1070 }
1071
1072 void dump(const char *s) const;
1073 void dump() const;
1074 void dump(llvm::raw_ostream &OS, const ASTContext &Context) const;
1075
1076 void Profile(llvm::FoldingSetNodeID &ID) const {
1077 ID.AddPointer(getAsOpaquePtr());
1078 }
1079
1080 /// Check if this type has any address space qualifier.
1081 inline bool hasAddressSpace() const;
1082
1083 /// Return the address space of this type.
1084 inline LangAS getAddressSpace() const;
1085
1086 /// Returns true if address space qualifiers overlap with T address space
1087 /// qualifiers.
1088 /// OpenCL C defines conversion rules for pointers to different address spaces
1089 /// and notion of overlapping address spaces.
1090 /// CL1.1 or CL1.2:
1091 /// address spaces overlap iff they are they same.
1092 /// OpenCL C v2.0 s6.5.5 adds:
1093 /// __generic overlaps with any address space except for __constant.
1094 bool isAddressSpaceOverlapping(QualType T) const {
1095 Qualifiers Q = getQualifiers();
1096 Qualifiers TQ = T.getQualifiers();
1097 // Address spaces overlap if at least one of them is a superset of another
1098 return Q.isAddressSpaceSupersetOf(TQ) || TQ.isAddressSpaceSupersetOf(Q);
1099 }
1100
1101 /// Returns gc attribute of this type.
1102 inline Qualifiers::GC getObjCGCAttr() const;
1103
1104 /// true when Type is objc's weak.
1105 bool isObjCGCWeak() const {
1106 return getObjCGCAttr() == Qualifiers::Weak;
1107 }
1108
1109 /// true when Type is objc's strong.
1110 bool isObjCGCStrong() const {
1111 return getObjCGCAttr() == Qualifiers::Strong;
1112 }
1113
1114 /// Returns lifetime attribute of this type.
1115 Qualifiers::ObjCLifetime getObjCLifetime() const {
1116 return getQualifiers().getObjCLifetime();
1117 }
1118
1119 bool hasNonTrivialObjCLifetime() const {
1120 return getQualifiers().hasNonTrivialObjCLifetime();
1121 }
1122
1123 bool hasStrongOrWeakObjCLifetime() const {
1124 return getQualifiers().hasStrongOrWeakObjCLifetime();
1125 }
1126
1127 // true when Type is objc's weak and weak is enabled but ARC isn't.
1128 bool isNonWeakInMRRWithObjCWeak(const ASTContext &Context) const;
1129
1130 enum PrimitiveDefaultInitializeKind {
1131 /// The type does not fall into any of the following categories. Note that
1132 /// this case is zero-valued so that values of this enum can be used as a
1133 /// boolean condition for non-triviality.
1134 PDIK_Trivial,
1135
1136 /// The type is an Objective-C retainable pointer type that is qualified
1137 /// with the ARC __strong qualifier.
1138 PDIK_ARCStrong,
1139
1140 /// The type is an Objective-C retainable pointer type that is qualified
1141 /// with the ARC __weak qualifier.
1142 PDIK_ARCWeak,
1143
1144 /// The type is a struct containing a field whose type is not PCK_Trivial.
1145 PDIK_Struct
1146 };
1147
1148 /// Functions to query basic properties of non-trivial C struct types.
1149
1150 /// Check if this is a non-trivial type that would cause a C struct
1151 /// transitively containing this type to be non-trivial to default initialize
1152 /// and return the kind.
1153 PrimitiveDefaultInitializeKind
1154 isNonTrivialToPrimitiveDefaultInitialize() const;
1155
1156 enum PrimitiveCopyKind {
1157 /// The type does not fall into any of the following categories. Note that
1158 /// this case is zero-valued so that values of this enum can be used as a
1159 /// boolean condition for non-triviality.
1160 PCK_Trivial,
1161
1162 /// The type would be trivial except that it is volatile-qualified. Types
1163 /// that fall into one of the other non-trivial cases may additionally be
1164 /// volatile-qualified.
1165 PCK_VolatileTrivial,
1166
1167 /// The type is an Objective-C retainable pointer type that is qualified
1168 /// with the ARC __strong qualifier.
1169 PCK_ARCStrong,
1170
1171 /// The type is an Objective-C retainable pointer type that is qualified
1172 /// with the ARC __weak qualifier.
1173 PCK_ARCWeak,
1174
1175 /// The type is a struct containing a field whose type is neither
1176 /// PCK_Trivial nor PCK_VolatileTrivial.
1177 /// Note that a C++ struct type does not necessarily match this; C++ copying
1178 /// semantics are too complex to express here, in part because they depend
1179 /// on the exact constructor or assignment operator that is chosen by
1180 /// overload resolution to do the copy.
1181 PCK_Struct
1182 };
1183
1184 /// Check if this is a non-trivial type that would cause a C struct
1185 /// transitively containing this type to be non-trivial to copy and return the
1186 /// kind.
1187 PrimitiveCopyKind isNonTrivialToPrimitiveCopy() const;
1188
1189 /// Check if this is a non-trivial type that would cause a C struct
1190 /// transitively containing this type to be non-trivial to destructively
1191 /// move and return the kind. Destructive move in this context is a C++-style
1192 /// move in which the source object is placed in a valid but unspecified state
1193 /// after it is moved, as opposed to a truly destructive move in which the
1194 /// source object is placed in an uninitialized state.
1195 PrimitiveCopyKind isNonTrivialToPrimitiveDestructiveMove() const;
1196
1197 enum DestructionKind {
1198 DK_none,
1199 DK_cxx_destructor,
1200 DK_objc_strong_lifetime,
1201 DK_objc_weak_lifetime,
1202 DK_nontrivial_c_struct
1203 };
1204
1205 /// Returns a nonzero value if objects of this type require
1206 /// non-trivial work to clean up after. Non-zero because it's
1207 /// conceivable that qualifiers (objc_gc(weak)?) could make
1208 /// something require destruction.
1209 DestructionKind isDestructedType() const {
1210 return isDestructedTypeImpl(*this);
1211 }
1212
1213 /// Check if this is or contains a C union that is non-trivial to
1214 /// default-initialize, which is a union that has a member that is non-trivial
1215 /// to default-initialize. If this returns true,
1216 /// isNonTrivialToPrimitiveDefaultInitialize returns PDIK_Struct.
1217 bool hasNonTrivialToPrimitiveDefaultInitializeCUnion() const;
1218
1219 /// Check if this is or contains a C union that is non-trivial to destruct,
1220 /// which is a union that has a member that is non-trivial to destruct. If
1221 /// this returns true, isDestructedType returns DK_nontrivial_c_struct.
1222 bool hasNonTrivialToPrimitiveDestructCUnion() const;
1223
1224 /// Check if this is or contains a C union that is non-trivial to copy, which
1225 /// is a union that has a member that is non-trivial to copy. If this returns
1226 /// true, isNonTrivialToPrimitiveCopy returns PCK_Struct.
1227 bool hasNonTrivialToPrimitiveCopyCUnion() const;
1228
1229 /// Determine whether expressions of the given type are forbidden
1230 /// from being lvalues in C.
1231 ///
1232 /// The expression types that are forbidden to be lvalues are:
1233 /// - 'void', but not qualified void
1234 /// - function types
1235 ///
1236 /// The exact rule here is C99 6.3.2.1:
1237 /// An lvalue is an expression with an object type or an incomplete
1238 /// type other than void.
1239 bool isCForbiddenLValueType() const;
1240
1241 /// Substitute type arguments for the Objective-C type parameters used in the
1242 /// subject type.
1243 ///
1244 /// \param ctx ASTContext in which the type exists.
1245 ///
1246 /// \param typeArgs The type arguments that will be substituted for the
1247 /// Objective-C type parameters in the subject type, which are generally
1248 /// computed via \c Type::getObjCSubstitutions. If empty, the type
1249 /// parameters will be replaced with their bounds or id/Class, as appropriate
1250 /// for the context.
1251 ///
1252 /// \param context The context in which the subject type was written.
1253 ///
1254 /// \returns the resulting type.
1255 QualType substObjCTypeArgs(ASTContext &ctx,
1256 ArrayRef<QualType> typeArgs,
1257 ObjCSubstitutionContext context) const;
1258
1259 /// Substitute type arguments from an object type for the Objective-C type
1260 /// parameters used in the subject type.
1261 ///
1262 /// This operation combines the computation of type arguments for
1263 /// substitution (\c Type::getObjCSubstitutions) with the actual process of
1264 /// substitution (\c QualType::substObjCTypeArgs) for the convenience of
1265 /// callers that need to perform a single substitution in isolation.
1266 ///
1267 /// \param objectType The type of the object whose member type we're
1268 /// substituting into. For example, this might be the receiver of a message
1269 /// or the base of a property access.
1270 ///
1271 /// \param dc The declaration context from which the subject type was
1272 /// retrieved, which indicates (for example) which type parameters should
1273 /// be substituted.
1274 ///
1275 /// \param context The context in which the subject type was written.
1276 ///
1277 /// \returns the subject type after replacing all of the Objective-C type
1278 /// parameters with their corresponding arguments.
1279 QualType substObjCMemberType(QualType objectType,
1280 const DeclContext *dc,
1281 ObjCSubstitutionContext context) const;
1282
1283 /// Strip Objective-C "__kindof" types from the given type.
1284 QualType stripObjCKindOfType(const ASTContext &ctx) const;
1285
1286 /// Remove all qualifiers including _Atomic.
1287 QualType getAtomicUnqualifiedType() const;
1288
1289private:
1290 // These methods are implemented in a separate translation unit;
1291 // "static"-ize them to avoid creating temporary QualTypes in the
1292 // caller.
1293 static bool isConstant(QualType T, const ASTContext& Ctx);
1294 static QualType getDesugaredType(QualType T, const ASTContext &Context);
1295 static SplitQualType getSplitDesugaredType(QualType T);
1296 static SplitQualType getSplitUnqualifiedTypeImpl(QualType type);
1297 static QualType getSingleStepDesugaredTypeImpl(QualType type,
1298 const ASTContext &C);
1299 static QualType IgnoreParens(QualType T);
1300 static DestructionKind isDestructedTypeImpl(QualType type);
1301
1302 /// Check if \param RD is or contains a non-trivial C union.
1303 static bool hasNonTrivialToPrimitiveDefaultInitializeCUnion(const RecordDecl *RD);
1304 static bool hasNonTrivialToPrimitiveDestructCUnion(const RecordDecl *RD);
1305 static bool hasNonTrivialToPrimitiveCopyCUnion(const RecordDecl *RD);
1306};
1307
1308} // namespace clang
1309
1310namespace llvm {
1311
1312/// Implement simplify_type for QualType, so that we can dyn_cast from QualType
1313/// to a specific Type class.
1314template<> struct simplify_type< ::clang::QualType> {
1315 using SimpleType = const ::clang::Type *;
1316
1317 static SimpleType getSimplifiedValue(::clang::QualType Val) {
1318 return Val.getTypePtr();
1319 }
1320};
1321
1322// Teach SmallPtrSet that QualType is "basically a pointer".
1323template<>
1324struct PointerLikeTypeTraits<clang::QualType> {
1325 static inline void *getAsVoidPointer(clang::QualType P) {
1326 return P.getAsOpaquePtr();
1327 }
1328
1329 static inline clang::QualType getFromVoidPointer(void *P) {
1330 return clang::QualType::getFromOpaquePtr(P);
1331 }
1332
1333 // Various qualifiers go in low bits.
1334 static constexpr int NumLowBitsAvailable = 0;
1335};
1336
1337} // namespace llvm
1338
1339namespace clang {
1340
1341/// Base class that is common to both the \c ExtQuals and \c Type
1342/// classes, which allows \c QualType to access the common fields between the
1343/// two.
1344class ExtQualsTypeCommonBase {
1345 friend class ExtQuals;
1346 friend class QualType;
1347 friend class Type;
1348
1349 /// The "base" type of an extended qualifiers type (\c ExtQuals) or
1350 /// a self-referential pointer (for \c Type).
1351 ///
1352 /// This pointer allows an efficient mapping from a QualType to its
1353 /// underlying type pointer.
1354 const Type *const BaseType;
1355
1356 /// The canonical type of this type. A QualType.
1357 QualType CanonicalType;
1358
1359 ExtQualsTypeCommonBase(const Type *baseType, QualType canon)
1360 : BaseType(baseType), CanonicalType(canon) {}
1361};
1362
1363/// We can encode up to four bits in the low bits of a
1364/// type pointer, but there are many more type qualifiers that we want
1365/// to be able to apply to an arbitrary type. Therefore we have this
1366/// struct, intended to be heap-allocated and used by QualType to
1367/// store qualifiers.
1368///
1369/// The current design tags the 'const', 'restrict', and 'volatile' qualifiers
1370/// in three low bits on the QualType pointer; a fourth bit records whether
1371/// the pointer is an ExtQuals node. The extended qualifiers (address spaces,
1372/// Objective-C GC attributes) are much more rare.
1373class ExtQuals : public ExtQualsTypeCommonBase, public llvm::FoldingSetNode {
1374 // NOTE: changing the fast qualifiers should be straightforward as
1375 // long as you don't make 'const' non-fast.
1376 // 1. Qualifiers:
1377 // a) Modify the bitmasks (Qualifiers::TQ and DeclSpec::TQ).
1378 // Fast qualifiers must occupy the low-order bits.
1379 // b) Update Qualifiers::FastWidth and FastMask.
1380 // 2. QualType:
1381 // a) Update is{Volatile,Restrict}Qualified(), defined inline.
1382 // b) Update remove{Volatile,Restrict}, defined near the end of
1383 // this header.
1384 // 3. ASTContext:
1385 // a) Update get{Volatile,Restrict}Type.
1386
1387 /// The immutable set of qualifiers applied by this node. Always contains
1388 /// extended qualifiers.
1389 Qualifiers Quals;
1390
1391 ExtQuals *this_() { return this; }
1392
1393public:
1394 ExtQuals(const Type *baseType, QualType canon, Qualifiers quals)
1395 : ExtQualsTypeCommonBase(baseType,
1396 canon.isNull() ? QualType(this_(), 0) : canon),
1397 Quals(quals) {
1398 assert(Quals.hasNonFastQualifiers()((void)0)
1399 && "ExtQuals created with no fast qualifiers")((void)0);
1400 assert(!Quals.hasFastQualifiers()((void)0)
1401 && "ExtQuals created with fast qualifiers")((void)0);
1402 }
1403
1404 Qualifiers getQualifiers() const { return Quals; }
1405
1406 bool hasObjCGCAttr() const { return Quals.hasObjCGCAttr(); }
1407 Qualifiers::GC getObjCGCAttr() const { return Quals.getObjCGCAttr(); }
1408
1409 bool hasObjCLifetime() const { return Quals.hasObjCLifetime(); }
1410 Qualifiers::ObjCLifetime getObjCLifetime() const {
1411 return Quals.getObjCLifetime();
1412 }
1413
1414 bool hasAddressSpace() const { return Quals.hasAddressSpace(); }
1415 LangAS getAddressSpace() const { return Quals.getAddressSpace(); }
1416
1417 const Type *getBaseType() const { return BaseType; }
1418
1419public:
1420 void Profile(llvm::FoldingSetNodeID &ID) const {
1421 Profile(ID, getBaseType(), Quals);
1422 }
1423
1424 static void Profile(llvm::FoldingSetNodeID &ID,
1425 const Type *BaseType,
1426 Qualifiers Quals) {
1427 assert(!Quals.hasFastQualifiers() && "fast qualifiers in ExtQuals hash!")((void)0);
1428 ID.AddPointer(BaseType);
1429 Quals.Profile(ID);
1430 }
1431};
1432
1433/// The kind of C++11 ref-qualifier associated with a function type.
1434/// This determines whether a member function's "this" object can be an
1435/// lvalue, rvalue, or neither.
1436enum RefQualifierKind {
1437 /// No ref-qualifier was provided.
1438 RQ_None = 0,
1439
1440 /// An lvalue ref-qualifier was provided (\c &).
1441 RQ_LValue,
1442
1443 /// An rvalue ref-qualifier was provided (\c &&).
1444 RQ_RValue
1445};
1446
1447/// Which keyword(s) were used to create an AutoType.
1448enum class AutoTypeKeyword {
1449 /// auto
1450 Auto,
1451
1452 /// decltype(auto)
1453 DecltypeAuto,
1454
1455 /// __auto_type (GNU extension)
1456 GNUAutoType
1457};
1458
1459/// The base class of the type hierarchy.
1460///
1461/// A central concept with types is that each type always has a canonical
1462/// type. A canonical type is the type with any typedef names stripped out
1463/// of it or the types it references. For example, consider:
1464///
1465/// typedef int foo;
1466/// typedef foo* bar;
1467/// 'int *' 'foo *' 'bar'
1468///
1469/// There will be a Type object created for 'int'. Since int is canonical, its
1470/// CanonicalType pointer points to itself. There is also a Type for 'foo' (a
1471/// TypedefType). Its CanonicalType pointer points to the 'int' Type. Next
1472/// there is a PointerType that represents 'int*', which, like 'int', is
1473/// canonical. Finally, there is a PointerType type for 'foo*' whose canonical
1474/// type is 'int*', and there is a TypedefType for 'bar', whose canonical type
1475/// is also 'int*'.
1476///
1477/// Non-canonical types are useful for emitting diagnostics, without losing
1478/// information about typedefs being used. Canonical types are useful for type
1479/// comparisons (they allow by-pointer equality tests) and useful for reasoning
1480/// about whether something has a particular form (e.g. is a function type),
1481/// because they implicitly, recursively, strip all typedefs out of a type.
1482///
1483/// Types, once created, are immutable.
1484///
1485class alignas(8) Type : public ExtQualsTypeCommonBase {
1486public:
1487 enum TypeClass {
1488#define TYPE(Class, Base) Class,
1489#define LAST_TYPE(Class) TypeLast = Class
1490#define ABSTRACT_TYPE(Class, Base)
1491#include "clang/AST/TypeNodes.inc"
1492 };
1493
1494private:
1495 /// Bitfields required by the Type class.
1496 class TypeBitfields {
1497 friend class Type;
1498 template <class T> friend class TypePropertyCache;
1499
1500 /// TypeClass bitfield - Enum that specifies what subclass this belongs to.
1501 unsigned TC : 8;
1502
1503 /// Store information on the type dependency.
1504 unsigned Dependence : llvm::BitWidth<TypeDependence>;
1505
1506 /// True if the cache (i.e. the bitfields here starting with
1507 /// 'Cache') is valid.
1508 mutable unsigned CacheValid : 1;
1509
1510 /// Linkage of this type.
1511 mutable unsigned CachedLinkage : 3;
1512
1513 /// Whether this type involves and local or unnamed types.
1514 mutable unsigned CachedLocalOrUnnamed : 1;
1515
1516 /// Whether this type comes from an AST file.
1517 mutable unsigned FromAST : 1;
1518
1519 bool isCacheValid() const {
1520 return CacheValid;
1521 }
1522
1523 Linkage getLinkage() const {
1524 assert(isCacheValid() && "getting linkage from invalid cache")((void)0);
1525 return static_cast<Linkage>(CachedLinkage);
1526 }
1527
1528 bool hasLocalOrUnnamedType() const {
1529 assert(isCacheValid() && "getting linkage from invalid cache")((void)0);
1530 return CachedLocalOrUnnamed;
1531 }
1532 };
1533 enum { NumTypeBits = 8 + llvm::BitWidth<TypeDependence> + 6 };
1534
1535protected:
1536 // These classes allow subclasses to somewhat cleanly pack bitfields
1537 // into Type.
1538
1539 class ArrayTypeBitfields {
1540 friend class ArrayType;
1541
1542 unsigned : NumTypeBits;
1543
1544 /// CVR qualifiers from declarations like
1545 /// 'int X[static restrict 4]'. For function parameters only.
1546 unsigned IndexTypeQuals : 3;
1547
1548 /// Storage class qualifiers from declarations like
1549 /// 'int X[static restrict 4]'. For function parameters only.
1550 /// Actually an ArrayType::ArraySizeModifier.
1551 unsigned SizeModifier : 3;
1552 };
1553
1554 class ConstantArrayTypeBitfields {
1555 friend class ConstantArrayType;
1556
1557 unsigned : NumTypeBits + 3 + 3;
1558
1559 /// Whether we have a stored size expression.
1560 unsigned HasStoredSizeExpr : 1;
1561 };
1562
1563 class BuiltinTypeBitfields {
1564 friend class BuiltinType;
1565
1566 unsigned : NumTypeBits;
1567
1568 /// The kind (BuiltinType::Kind) of builtin type this is.
1569 unsigned Kind : 8;
1570 };
1571
1572 /// FunctionTypeBitfields store various bits belonging to FunctionProtoType.
1573 /// Only common bits are stored here. Additional uncommon bits are stored
1574 /// in a trailing object after FunctionProtoType.
1575 class FunctionTypeBitfields {
1576 friend class FunctionProtoType;
1577 friend class FunctionType;
1578
1579 unsigned : NumTypeBits;
1580
1581 /// Extra information which affects how the function is called, like
1582 /// regparm and the calling convention.
1583 unsigned ExtInfo : 13;
1584
1585 /// The ref-qualifier associated with a \c FunctionProtoType.
1586 ///
1587 /// This is a value of type \c RefQualifierKind.
1588 unsigned RefQualifier : 2;
1589
1590 /// Used only by FunctionProtoType, put here to pack with the
1591 /// other bitfields.
1592 /// The qualifiers are part of FunctionProtoType because...
1593 ///
1594 /// C++ 8.3.5p4: The return type, the parameter type list and the
1595 /// cv-qualifier-seq, [...], are part of the function type.
1596 unsigned FastTypeQuals : Qualifiers::FastWidth;
1597 /// Whether this function has extended Qualifiers.
1598 unsigned HasExtQuals : 1;
1599
1600 /// The number of parameters this function has, not counting '...'.
1601 /// According to [implimits] 8 bits should be enough here but this is
1602 /// somewhat easy to exceed with metaprogramming and so we would like to
1603 /// keep NumParams as wide as reasonably possible.
1604 unsigned NumParams : 16;
1605
1606 /// The type of exception specification this function has.
1607 unsigned ExceptionSpecType : 4;
1608
1609 /// Whether this function has extended parameter information.
1610 unsigned HasExtParameterInfos : 1;
1611
1612 /// Whether the function is variadic.
1613 unsigned Variadic : 1;
1614
1615 /// Whether this function has a trailing return type.
1616 unsigned HasTrailingReturn : 1;
1617 };
1618
1619 class ObjCObjectTypeBitfields {
1620 friend class ObjCObjectType;
1621
1622 unsigned : NumTypeBits;
1623
1624 /// The number of type arguments stored directly on this object type.
1625 unsigned NumTypeArgs : 7;
1626
1627 /// The number of protocols stored directly on this object type.
1628 unsigned NumProtocols : 6;
1629
1630 /// Whether this is a "kindof" type.
1631 unsigned IsKindOf : 1;
1632 };
1633
1634 class ReferenceTypeBitfields {
1635 friend class ReferenceType;
1636
1637 unsigned : NumTypeBits;
1638
1639 /// True if the type was originally spelled with an lvalue sigil.
1640 /// This is never true of rvalue references but can also be false
1641 /// on lvalue references because of C++0x [dcl.typedef]p9,
1642 /// as follows:
1643 ///
1644 /// typedef int &ref; // lvalue, spelled lvalue
1645 /// typedef int &&rvref; // rvalue
1646 /// ref &a; // lvalue, inner ref, spelled lvalue
1647 /// ref &&a; // lvalue, inner ref
1648 /// rvref &a; // lvalue, inner ref, spelled lvalue
1649 /// rvref &&a; // rvalue, inner ref
1650 unsigned SpelledAsLValue : 1;
1651
1652 /// True if the inner type is a reference type. This only happens
1653 /// in non-canonical forms.
1654 unsigned InnerRef : 1;
1655 };
1656
1657 class TypeWithKeywordBitfields {
1658 friend class TypeWithKeyword;
1659
1660 unsigned : NumTypeBits;
1661
1662 /// An ElaboratedTypeKeyword. 8 bits for efficient access.
1663 unsigned Keyword : 8;
1664 };
1665
1666 enum { NumTypeWithKeywordBits = 8 };
1667
1668 class ElaboratedTypeBitfields {
1669 friend class ElaboratedType;
1670
1671 unsigned : NumTypeBits;
1672 unsigned : NumTypeWithKeywordBits;
1673
1674 /// Whether the ElaboratedType has a trailing OwnedTagDecl.
1675 unsigned HasOwnedTagDecl : 1;
1676 };
1677
1678 class VectorTypeBitfields {
1679 friend class VectorType;
1680 friend class DependentVectorType;
1681
1682 unsigned : NumTypeBits;
1683
1684 /// The kind of vector, either a generic vector type or some
1685 /// target-specific vector type such as for AltiVec or Neon.
1686 unsigned VecKind : 3;
1687 /// The number of elements in the vector.
1688 uint32_t NumElements;
1689 };
1690
1691 class AttributedTypeBitfields {
1692 friend class AttributedType;
1693
1694 unsigned : NumTypeBits;
1695
1696 /// An AttributedType::Kind
1697 unsigned AttrKind : 32 - NumTypeBits;
1698 };
1699
1700 class AutoTypeBitfields {
1701 friend class AutoType;
1702
1703 unsigned : NumTypeBits;
1704
1705 /// Was this placeholder type spelled as 'auto', 'decltype(auto)',
1706 /// or '__auto_type'? AutoTypeKeyword value.
1707 unsigned Keyword : 2;
1708
1709 /// The number of template arguments in the type-constraints, which is
1710 /// expected to be able to hold at least 1024 according to [implimits].
1711 /// However as this limit is somewhat easy to hit with template
1712 /// metaprogramming we'd prefer to keep it as large as possible.
1713 /// At the moment it has been left as a non-bitfield since this type
1714 /// safely fits in 64 bits as an unsigned, so there is no reason to
1715 /// introduce the performance impact of a bitfield.
1716 unsigned NumArgs;
1717 };
1718
1719 class SubstTemplateTypeParmPackTypeBitfields {
1720 friend class SubstTemplateTypeParmPackType;
1721
1722 unsigned : NumTypeBits;
1723
1724 /// The number of template arguments in \c Arguments, which is
1725 /// expected to be able to hold at least 1024 according to [implimits].
1726 /// However as this limit is somewhat easy to hit with template
1727 /// metaprogramming we'd prefer to keep it as large as possible.
1728 /// At the moment it has been left as a non-bitfield since this type
1729 /// safely fits in 64 bits as an unsigned, so there is no reason to
1730 /// introduce the performance impact of a bitfield.
1731 unsigned NumArgs;
1732 };
1733
1734 class TemplateSpecializationTypeBitfields {
1735 friend class TemplateSpecializationType;
1736
1737 unsigned : NumTypeBits;
1738
1739 /// Whether this template specialization type is a substituted type alias.
1740 unsigned TypeAlias : 1;
1741
1742 /// The number of template arguments named in this class template
1743 /// specialization, which is expected to be able to hold at least 1024
1744 /// according to [implimits]. However, as this limit is somewhat easy to
1745 /// hit with template metaprogramming we'd prefer to keep it as large
1746 /// as possible. At the moment it has been left as a non-bitfield since
1747 /// this type safely fits in 64 bits as an unsigned, so there is no reason
1748 /// to introduce the performance impact of a bitfield.
1749 unsigned NumArgs;
1750 };
1751
1752 class DependentTemplateSpecializationTypeBitfields {
1753 friend class DependentTemplateSpecializationType;
1754
1755 unsigned : NumTypeBits;
1756 unsigned : NumTypeWithKeywordBits;
1757
1758 /// The number of template arguments named in this class template
1759 /// specialization, which is expected to be able to hold at least 1024
1760 /// according to [implimits]. However, as this limit is somewhat easy to
1761 /// hit with template metaprogramming we'd prefer to keep it as large
1762 /// as possible. At the moment it has been left as a non-bitfield since
1763 /// this type safely fits in 64 bits as an unsigned, so there is no reason
1764 /// to introduce the performance impact of a bitfield.
1765 unsigned NumArgs;
1766 };
1767
1768 class PackExpansionTypeBitfields {
1769 friend class PackExpansionType;
1770
1771 unsigned : NumTypeBits;
1772
1773 /// The number of expansions that this pack expansion will
1774 /// generate when substituted (+1), which is expected to be able to
1775 /// hold at least 1024 according to [implimits]. However, as this limit
1776 /// is somewhat easy to hit with template metaprogramming we'd prefer to
1777 /// keep it as large as possible. At the moment it has been left as a
1778 /// non-bitfield since this type safely fits in 64 bits as an unsigned, so
1779 /// there is no reason to introduce the performance impact of a bitfield.
1780 ///
1781 /// This field will only have a non-zero value when some of the parameter
1782 /// packs that occur within the pattern have been substituted but others
1783 /// have not.
1784 unsigned NumExpansions;
1785 };
1786
1787 union {
1788 TypeBitfields TypeBits;
1789 ArrayTypeBitfields ArrayTypeBits;
1790 ConstantArrayTypeBitfields ConstantArrayTypeBits;
1791 AttributedTypeBitfields AttributedTypeBits;
1792 AutoTypeBitfields AutoTypeBits;
1793 BuiltinTypeBitfields BuiltinTypeBits;
1794 FunctionTypeBitfields FunctionTypeBits;
1795 ObjCObjectTypeBitfields ObjCObjectTypeBits;
1796 ReferenceTypeBitfields ReferenceTypeBits;
1797 TypeWithKeywordBitfields TypeWithKeywordBits;
1798 ElaboratedTypeBitfields ElaboratedTypeBits;
1799 VectorTypeBitfields VectorTypeBits;
1800 SubstTemplateTypeParmPackTypeBitfields SubstTemplateTypeParmPackTypeBits;
1801 TemplateSpecializationTypeBitfields TemplateSpecializationTypeBits;
1802 DependentTemplateSpecializationTypeBitfields
1803 DependentTemplateSpecializationTypeBits;
1804 PackExpansionTypeBitfields PackExpansionTypeBits;
1805 };
1806
1807private:
1808 template <class T> friend class TypePropertyCache;
1809
1810 /// Set whether this type comes from an AST file.
1811 void setFromAST(bool V = true) const {
1812 TypeBits.FromAST = V;
1813 }
1814
1815protected:
1816 friend class ASTContext;
1817
1818 Type(TypeClass tc, QualType canon, TypeDependence Dependence)
1819 : ExtQualsTypeCommonBase(this,
1820 canon.isNull() ? QualType(this_(), 0) : canon) {
1821 static_assert(sizeof(*this) <= 8 + sizeof(ExtQualsTypeCommonBase),
1822 "changing bitfields changed sizeof(Type)!");
1823 static_assert(alignof(decltype(*this)) % sizeof(void *) == 0,
1824 "Insufficient alignment!");
1825 TypeBits.TC = tc;
1826 TypeBits.Dependence = static_cast<unsigned>(Dependence);
1827 TypeBits.CacheValid = false;
1828 TypeBits.CachedLocalOrUnnamed = false;
1829 TypeBits.CachedLinkage = NoLinkage;
1830 TypeBits.FromAST = false;
1831 }
1832
1833 // silence VC++ warning C4355: 'this' : used in base member initializer list
1834 Type *this_() { return this; }
1835
1836 void setDependence(TypeDependence D) {
1837 TypeBits.Dependence = static_cast<unsigned>(D);
1838 }
1839
1840 void addDependence(TypeDependence D) { setDependence(getDependence() | D); }
1841
1842public:
1843 friend class ASTReader;
1844 friend class ASTWriter;
1845 template <class T> friend class serialization::AbstractTypeReader;
1846 template <class T> friend class serialization::AbstractTypeWriter;
1847
1848 Type(const Type &) = delete;
1849 Type(Type &&) = delete;
1850 Type &operator=(const Type &) = delete;
1851 Type &operator=(Type &&) = delete;
1852
1853 TypeClass getTypeClass() const { return static_cast<TypeClass>(TypeBits.TC); }
1854
1855 /// Whether this type comes from an AST file.
1856 bool isFromAST() const { return TypeBits.FromAST; }
1857
1858 /// Whether this type is or contains an unexpanded parameter
1859 /// pack, used to support C++0x variadic templates.
1860 ///
1861 /// A type that contains a parameter pack shall be expanded by the
1862 /// ellipsis operator at some point. For example, the typedef in the
1863 /// following example contains an unexpanded parameter pack 'T':
1864 ///
1865 /// \code
1866 /// template<typename ...T>
1867 /// struct X {
1868 /// typedef T* pointer_types; // ill-formed; T is a parameter pack.
1869 /// };
1870 /// \endcode
1871 ///
1872 /// Note that this routine does not specify which
1873 bool containsUnexpandedParameterPack() const {
1874 return getDependence() & TypeDependence::UnexpandedPack;
1875 }
1876
1877 /// Determines if this type would be canonical if it had no further
1878 /// qualification.
1879 bool isCanonicalUnqualified() const {
1880 return CanonicalType == QualType(this, 0);
1881 }
1882
1883 /// Pull a single level of sugar off of this locally-unqualified type.
1884 /// Users should generally prefer SplitQualType::getSingleStepDesugaredType()
1885 /// or QualType::getSingleStepDesugaredType(const ASTContext&).
1886 QualType getLocallyUnqualifiedSingleStepDesugaredType() const;
1887
1888 /// As an extension, we classify types as one of "sized" or "sizeless";
1889 /// every type is one or the other. Standard types are all sized;
1890 /// sizeless types are purely an extension.
1891 ///
1892 /// Sizeless types contain data with no specified size, alignment,
1893 /// or layout.
1894 bool isSizelessType() const;
1895 bool isSizelessBuiltinType() const;
1896
1897 /// Determines if this is a sizeless type supported by the
1898 /// 'arm_sve_vector_bits' type attribute, which can be applied to a single
1899 /// SVE vector or predicate, excluding tuple types such as svint32x4_t.
1900 bool isVLSTBuiltinType() const;
1901
1902 /// Returns the representative type for the element of an SVE builtin type.
1903 /// This is used to represent fixed-length SVE vectors created with the
1904 /// 'arm_sve_vector_bits' type attribute as VectorType.
1905 QualType getSveEltType(const ASTContext &Ctx) const;
1906
1907 /// Types are partitioned into 3 broad categories (C99 6.2.5p1):
1908 /// object types, function types, and incomplete types.
1909
1910 /// Return true if this is an incomplete type.
1911 /// A type that can describe objects, but which lacks information needed to
1912 /// determine its size (e.g. void, or a fwd declared struct). Clients of this
1913 /// routine will need to determine if the size is actually required.
1914 ///
1915 /// Def If non-null, and the type refers to some kind of declaration
1916 /// that can be completed (such as a C struct, C++ class, or Objective-C
1917 /// class), will be set to the declaration.
1918 bool isIncompleteType(NamedDecl **Def = nullptr) const;
1919
1920 /// Return true if this is an incomplete or object
1921 /// type, in other words, not a function type.
1922 bool isIncompleteOrObjectType() const {
1923 return !isFunctionType();
1924 }
1925
1926 /// Determine whether this type is an object type.
1927 bool isObjectType() const {
1928 // C++ [basic.types]p8:
1929 // An object type is a (possibly cv-qualified) type that is not a
1930 // function type, not a reference type, and not a void type.
1931 return !isReferenceType() && !isFunctionType() && !isVoidType();
1932 }
1933
1934 /// Return true if this is a literal type
1935 /// (C++11 [basic.types]p10)
1936 bool isLiteralType(const ASTContext &Ctx) const;
1937
1938 /// Determine if this type is a structural type, per C++20 [temp.param]p7.
1939 bool isStructuralType() const;
1940
1941 /// Test if this type is a standard-layout type.
1942 /// (C++0x [basic.type]p9)
1943 bool isStandardLayoutType() const;
1944
1945 /// Helper methods to distinguish type categories. All type predicates
1946 /// operate on the canonical type, ignoring typedefs and qualifiers.
1947
1948 /// Returns true if the type is a builtin type.
1949 bool isBuiltinType() const;
1950
1951 /// Test for a particular builtin type.
1952 bool isSpecificBuiltinType(unsigned K) const;
1953
1954 /// Test for a type which does not represent an actual type-system type but
1955 /// is instead used as a placeholder for various convenient purposes within
1956 /// Clang. All such types are BuiltinTypes.
1957 bool isPlaceholderType() const;
1958 const BuiltinType *getAsPlaceholderType() const;
1959
1960 /// Test for a specific placeholder type.
1961 bool isSpecificPlaceholderType(unsigned K) const;
1962
1963 /// Test for a placeholder type other than Overload; see
1964 /// BuiltinType::isNonOverloadPlaceholderType.
1965 bool isNonOverloadPlaceholderType() const;
1966
1967 /// isIntegerType() does *not* include complex integers (a GCC extension).
1968 /// isComplexIntegerType() can be used to test for complex integers.
1969 bool isIntegerType() const; // C99 6.2.5p17 (int, char, bool, enum)
1970 bool isEnumeralType() const;
1971
1972 /// Determine whether this type is a scoped enumeration type.
1973 bool isScopedEnumeralType() const;
1974 bool isBooleanType() const;
1975 bool isCharType() const;
1976 bool isWideCharType() const;
1977 bool isChar8Type() const;
1978 bool isChar16Type() const;
1979 bool isChar32Type() const;
1980 bool isAnyCharacterType() const;
1981 bool isIntegralType(const ASTContext &Ctx) const;
1982
1983 /// Determine whether this type is an integral or enumeration type.
1984 bool isIntegralOrEnumerationType() const;
1985
1986 /// Determine whether this type is an integral or unscoped enumeration type.
1987 bool isIntegralOrUnscopedEnumerationType() const;
1988 bool isUnscopedEnumerationType() const;
1989
1990 /// Floating point categories.
1991 bool isRealFloatingType() const; // C99 6.2.5p10 (float, double, long double)
1992 /// isComplexType() does *not* include complex integers (a GCC extension).
1993 /// isComplexIntegerType() can be used to test for complex integers.
1994 bool isComplexType() const; // C99 6.2.5p11 (complex)
1995 bool isAnyComplexType() const; // C99 6.2.5p11 (complex) + Complex Int.
1996 bool isFloatingType() const; // C99 6.2.5p11 (real floating + complex)
1997 bool isHalfType() const; // OpenCL 6.1.1.1, NEON (IEEE 754-2008 half)
1998 bool isFloat16Type() const; // C11 extension ISO/IEC TS 18661
1999 bool isBFloat16Type() const;
2000 bool isFloat128Type() const;
2001 bool isRealType() const; // C99 6.2.5p17 (real floating + integer)
2002 bool isArithmeticType() const; // C99 6.2.5p18 (integer + floating)
2003 bool isVoidType() const; // C99 6.2.5p19
2004 bool isScalarType() const; // C99 6.2.5p21 (arithmetic + pointers)
2005 bool isAggregateType() const;
2006 bool isFundamentalType() const;
2007 bool isCompoundType() const;
2008
2009 // Type Predicates: Check to see if this type is structurally the specified
2010 // type, ignoring typedefs and qualifiers.
2011 bool isFunctionType() const;
2012 bool isFunctionNoProtoType() const { return getAs<FunctionNoProtoType>(); }
2013 bool isFunctionProtoType() const { return getAs<FunctionProtoType>(); }
2014 bool isPointerType() const;
2015 bool isAnyPointerType() const; // Any C pointer or ObjC object pointer
2016 bool isBlockPointerType() const;
2017 bool isVoidPointerType() const;
2018 bool isReferenceType() const;
2019 bool isLValueReferenceType() const;
2020 bool isRValueReferenceType() const;
2021 bool isObjectPointerType() const;
2022 bool isFunctionPointerType() const;
2023 bool isFunctionReferenceType() const;
2024 bool isMemberPointerType() const;
2025 bool isMemberFunctionPointerType() const;
2026 bool isMemberDataPointerType() const;
2027 bool isArrayType() const;
2028 bool isConstantArrayType() const;
2029 bool isIncompleteArrayType() const;
2030 bool isVariableArrayType() const;
2031 bool isDependentSizedArrayType() const;
2032 bool isRecordType() const;
2033 bool isClassType() const;
2034 bool isStructureType() const;
2035 bool isObjCBoxableRecordType() const;
2036 bool isInterfaceType() const;
2037 bool isStructureOrClassType() const;
2038 bool isUnionType() const;
2039 bool isComplexIntegerType() const; // GCC _Complex integer type.
2040 bool isVectorType() const; // GCC vector type.
2041 bool isExtVectorType() const; // Extended vector type.
2042 bool isMatrixType() const; // Matrix type.
2043 bool isConstantMatrixType() const; // Constant matrix type.
2044 bool isDependentAddressSpaceType() const; // value-dependent address space qualifier
2045 bool isObjCObjectPointerType() const; // pointer to ObjC object
2046 bool isObjCRetainableType() const; // ObjC object or block pointer
2047 bool isObjCLifetimeType() const; // (array of)* retainable type
2048 bool isObjCIndirectLifetimeType() const; // (pointer to)* lifetime type
2049 bool isObjCNSObjectType() const; // __attribute__((NSObject))
2050 bool isObjCIndependentClassType() const; // __attribute__((objc_independent_class))
2051 // FIXME: change this to 'raw' interface type, so we can used 'interface' type
2052 // for the common case.
2053 bool isObjCObjectType() const; // NSString or typeof(*(id)0)
2054 bool isObjCQualifiedInterfaceType() const; // NSString<foo>
2055 bool isObjCQualifiedIdType() const; // id<foo>
2056 bool isObjCQualifiedClassType() const; // Class<foo>
2057 bool isObjCObjectOrInterfaceType() const;
2058 bool isObjCIdType() const; // id
2059 bool isDecltypeType() const;
2060 /// Was this type written with the special inert-in-ARC __unsafe_unretained
2061 /// qualifier?
2062 ///
2063 /// This approximates the answer to the following question: if this
2064 /// translation unit were compiled in ARC, would this type be qualified
2065 /// with __unsafe_unretained?
2066 bool isObjCInertUnsafeUnretainedType() const {
2067 return hasAttr(attr::ObjCInertUnsafeUnretained);
2068 }
2069
2070 /// Whether the type is Objective-C 'id' or a __kindof type of an
2071 /// object type, e.g., __kindof NSView * or __kindof id
2072 /// <NSCopying>.
2073 ///
2074 /// \param bound Will be set to the bound on non-id subtype types,
2075 /// which will be (possibly specialized) Objective-C class type, or
2076 /// null for 'id.
2077 bool isObjCIdOrObjectKindOfType(const ASTContext &ctx,
2078 const ObjCObjectType *&bound) const;
2079
2080 bool isObjCClassType() const; // Class
2081
2082 /// Whether the type is Objective-C 'Class' or a __kindof type of an
2083 /// Class type, e.g., __kindof Class <NSCopying>.
2084 ///
2085 /// Unlike \c isObjCIdOrObjectKindOfType, there is no relevant bound
2086 /// here because Objective-C's type system cannot express "a class
2087 /// object for a subclass of NSFoo".
2088 bool isObjCClassOrClassKindOfType() const;
2089
2090 bool isBlockCompatibleObjCPointerType(ASTContext &ctx) const;
2091 bool isObjCSelType() const; // Class
2092 bool isObjCBuiltinType() const; // 'id' or 'Class'
2093 bool isObjCARCBridgableType() const;
2094 bool isCARCBridgableType() const;
2095 bool isTemplateTypeParmType() const; // C++ template type parameter
2096 bool isNullPtrType() const; // C++11 std::nullptr_t
2097 bool isNothrowT() const; // C++ std::nothrow_t
2098 bool isAlignValT() const; // C++17 std::align_val_t
2099 bool isStdByteType() const; // C++17 std::byte
2100 bool isAtomicType() const; // C11 _Atomic()
2101 bool isUndeducedAutoType() const; // C++11 auto or
2102 // C++14 decltype(auto)
2103 bool isTypedefNameType() const; // typedef or alias template
2104
2105#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
2106 bool is##Id##Type() const;
2107#include "clang/Basic/OpenCLImageTypes.def"
2108
2109 bool isImageType() const; // Any OpenCL image type
2110
2111 bool isSamplerT() const; // OpenCL sampler_t
2112 bool isEventT() const; // OpenCL event_t
2113 bool isClkEventT() const; // OpenCL clk_event_t
2114 bool isQueueT() const; // OpenCL queue_t
2115 bool isReserveIDT() const; // OpenCL reserve_id_t
2116
2117#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
2118 bool is##Id##Type() const;
2119#include "clang/Basic/OpenCLExtensionTypes.def"
2120 // Type defined in cl_intel_device_side_avc_motion_estimation OpenCL extension
2121 bool isOCLIntelSubgroupAVCType() const;
2122 bool isOCLExtOpaqueType() const; // Any OpenCL extension type
2123
2124 bool isPipeType() const; // OpenCL pipe type
2125 bool isExtIntType() const; // Extended Int Type
2126 bool isOpenCLSpecificType() const; // Any OpenCL specific type
2127
2128 /// Determines if this type, which must satisfy
2129 /// isObjCLifetimeType(), is implicitly __unsafe_unretained rather
2130 /// than implicitly __strong.
2131 bool isObjCARCImplicitlyUnretainedType() const;
2132
2133 /// Check if the type is the CUDA device builtin surface type.
2134 bool isCUDADeviceBuiltinSurfaceType() const;
2135 /// Check if the type is the CUDA device builtin texture type.
2136 bool isCUDADeviceBuiltinTextureType() const;
2137
2138 /// Return the implicit lifetime for this type, which must not be dependent.
2139 Qualifiers::ObjCLifetime getObjCARCImplicitLifetime() const;
2140
2141 enum ScalarTypeKind {
2142 STK_CPointer,
2143 STK_BlockPointer,
2144 STK_ObjCObjectPointer,
2145 STK_MemberPointer,
2146 STK_Bool,
2147 STK_Integral,
2148 STK_Floating,
2149 STK_IntegralComplex,
2150 STK_FloatingComplex,
2151 STK_FixedPoint
2152 };
2153
2154 /// Given that this is a scalar type, classify it.
2155 ScalarTypeKind getScalarTypeKind() const;
2156
2157 TypeDependence getDependence() const {
2158 return static_cast<TypeDependence>(TypeBits.Dependence);
2159 }
2160
2161 /// Whether this type is an error type.
2162 bool containsErrors() const {
2163 return getDependence() & TypeDependence::Error;
2164 }
2165
2166 /// Whether this type is a dependent type, meaning that its definition
2167 /// somehow depends on a template parameter (C++ [temp.dep.type]).
2168 bool isDependentType() const {
2169 return getDependence() & TypeDependence::Dependent;
2170 }
2171
2172 /// Determine whether this type is an instantiation-dependent type,
2173 /// meaning that the type involves a template parameter (even if the
2174 /// definition does not actually depend on the type substituted for that
2175 /// template parameter).
2176 bool isInstantiationDependentType() const {
2177 return getDependence() & TypeDependence::Instantiation;
2178 }
2179
2180 /// Determine whether this type is an undeduced type, meaning that
2181 /// it somehow involves a C++11 'auto' type or similar which has not yet been
2182 /// deduced.
2183 bool isUndeducedType() const;
2184
2185 /// Whether this type is a variably-modified type (C99 6.7.5).
2186 bool isVariablyModifiedType() const {
2187 return getDependence() & TypeDependence::VariablyModified;
2188 }
2189
2190 /// Whether this type involves a variable-length array type
2191 /// with a definite size.
2192 bool hasSizedVLAType() const;
2193
2194 /// Whether this type is or contains a local or unnamed type.
2195 bool hasUnnamedOrLocalType() const;
2196
2197 bool isOverloadableType() const;
2198
2199 /// Determine wither this type is a C++ elaborated-type-specifier.
2200 bool isElaboratedTypeSpecifier() const;
2201
2202 bool canDecayToPointerType() const;
2203
2204 /// Whether this type is represented natively as a pointer. This includes
2205 /// pointers, references, block pointers, and Objective-C interface,
2206 /// qualified id, and qualified interface types, as well as nullptr_t.
2207 bool hasPointerRepresentation() const;
2208
2209 /// Whether this type can represent an objective pointer type for the
2210 /// purpose of GC'ability
2211 bool hasObjCPointerRepresentation() const;
2212
2213 /// Determine whether this type has an integer representation
2214 /// of some sort, e.g., it is an integer type or a vector.
2215 bool hasIntegerRepresentation() const;
2216
2217 /// Determine whether this type has an signed integer representation
2218 /// of some sort, e.g., it is an signed integer type or a vector.
2219 bool hasSignedIntegerRepresentation() const;
2220
2221 /// Determine whether this type has an unsigned integer representation
2222 /// of some sort, e.g., it is an unsigned integer type or a vector.
2223 bool hasUnsignedIntegerRepresentation() const;
2224
2225 /// Determine whether this type has a floating-point representation
2226 /// of some sort, e.g., it is a floating-point type or a vector thereof.
2227 bool hasFloatingRepresentation() const;
2228
2229 // Type Checking Functions: Check to see if this type is structurally the
2230 // specified type, ignoring typedefs and qualifiers, and return a pointer to
2231 // the best type we can.
2232 const RecordType *getAsStructureType() const;
2233 /// NOTE: getAs*ArrayType are methods on ASTContext.
2234 const RecordType *getAsUnionType() const;
2235 const ComplexType *getAsComplexIntegerType() const; // GCC complex int type.
2236 const ObjCObjectType *getAsObjCInterfaceType() const;
2237
2238 // The following is a convenience method that returns an ObjCObjectPointerType
2239 // for object declared using an interface.
2240 const ObjCObjectPointerType *getAsObjCInterfacePointerType() const;
2241 const ObjCObjectPointerType *getAsObjCQualifiedIdType() const;
2242 const ObjCObjectPointerType *getAsObjCQualifiedClassType() const;
2243 const ObjCObjectType *getAsObjCQualifiedInterfaceType() const;
2244
2245 /// Retrieves the CXXRecordDecl that this type refers to, either
2246 /// because the type is a RecordType or because it is the injected-class-name
2247 /// type of a class template or class template partial specialization.
2248 CXXRecordDecl *getAsCXXRecordDecl() const;
2249
2250 /// Retrieves the RecordDecl this type refers to.
2251 RecordDecl *getAsRecordDecl() const;
2252
2253 /// Retrieves the TagDecl that this type refers to, either
2254 /// because the type is a TagType or because it is the injected-class-name
2255 /// type of a class template or class template partial specialization.
2256 TagDecl *getAsTagDecl() const;
2257
2258 /// If this is a pointer or reference to a RecordType, return the
2259 /// CXXRecordDecl that the type refers to.
2260 ///
2261 /// If this is not a pointer or reference, or the type being pointed to does
2262 /// not refer to a CXXRecordDecl, returns NULL.
2263 const CXXRecordDecl *getPointeeCXXRecordDecl() const;
2264
2265 /// Get the DeducedType whose type will be deduced for a variable with
2266 /// an initializer of this type. This looks through declarators like pointer
2267 /// types, but not through decltype or typedefs.
2268 DeducedType *getContainedDeducedType() const;
2269
2270 /// Get the AutoType whose type will be deduced for a variable with
2271 /// an initializer of this type. This looks through declarators like pointer
2272 /// types, but not through decltype or typedefs.
2273 AutoType *getContainedAutoType() const {
2274 return dyn_cast_or_null<AutoType>(getContainedDeducedType());
2275 }
2276
2277 /// Determine whether this type was written with a leading 'auto'
2278 /// corresponding to a trailing return type (possibly for a nested
2279 /// function type within a pointer to function type or similar).
2280 bool hasAutoForTrailingReturnType() const;
2281
2282 /// Member-template getAs<specific type>'. Look through sugar for
2283 /// an instance of \<specific type>. This scheme will eventually
2284 /// replace the specific getAsXXXX methods above.
2285 ///
2286 /// There are some specializations of this member template listed
2287 /// immediately following this class.
2288 template <typename T> const T *getAs() const;
2289
2290 /// Member-template getAsAdjusted<specific type>. Look through specific kinds
2291 /// of sugar (parens, attributes, etc) for an instance of \<specific type>.
2292 /// This is used when you need to walk over sugar nodes that represent some
2293 /// kind of type adjustment from a type that was written as a \<specific type>
2294 /// to another type that is still canonically a \<specific type>.
2295 template <typename T> const T *getAsAdjusted() const;
2296
2297 /// A variant of getAs<> for array types which silently discards
2298 /// qualifiers from the outermost type.
2299 const ArrayType *getAsArrayTypeUnsafe() const;
2300
2301 /// Member-template castAs<specific type>. Look through sugar for
2302 /// the underlying instance of \<specific type>.
2303 ///
2304 /// This method has the same relationship to getAs<T> as cast<T> has
2305 /// to dyn_cast<T>; which is to say, the underlying type *must*
2306 /// have the intended type, and this method will never return null.
2307 template <typename T> const T *castAs() const;
2308
2309 /// A variant of castAs<> for array type which silently discards
2310 /// qualifiers from the outermost type.
2311 const ArrayType *castAsArrayTypeUnsafe() const;
2312
2313 /// Determine whether this type had the specified attribute applied to it
2314 /// (looking through top-level type sugar).
2315 bool hasAttr(attr::Kind AK) const;
2316
2317 /// Get the base element type of this type, potentially discarding type
2318 /// qualifiers. This should never be used when type qualifiers
2319 /// are meaningful.
2320 const Type *getBaseElementTypeUnsafe() const;
2321
2322 /// If this is an array type, return the element type of the array,
2323 /// potentially with type qualifiers missing.
2324 /// This should never be used when type qualifiers are meaningful.
2325 const Type *getArrayElementTypeNoTypeQual() const;
2326
2327 /// If this is a pointer type, return the pointee type.
2328 /// If this is an array type, return the array element type.
2329 /// This should never be used when type qualifiers are meaningful.
2330 const Type *getPointeeOrArrayElementType() const;
2331
2332 /// If this is a pointer, ObjC object pointer, or block
2333 /// pointer, this returns the respective pointee.
2334 QualType getPointeeType() const;
2335
2336 /// Return the specified type with any "sugar" removed from the type,
2337 /// removing any typedefs, typeofs, etc., as well as any qualifiers.
2338 const Type *getUnqualifiedDesugaredType() const;
2339
2340 /// More type predicates useful for type checking/promotion
2341 bool isPromotableIntegerType() const; // C99 6.3.1.1p2
2342
2343 /// Return true if this is an integer type that is
2344 /// signed, according to C99 6.2.5p4 [char, signed char, short, int, long..],
2345 /// or an enum decl which has a signed representation.
2346 bool isSignedIntegerType() const;
2347
2348 /// Return true if this is an integer type that is
2349 /// unsigned, according to C99 6.2.5p6 [which returns true for _Bool],
2350 /// or an enum decl which has an unsigned representation.
2351 bool isUnsignedIntegerType() const;
2352
2353 /// Determines whether this is an integer type that is signed or an
2354 /// enumeration types whose underlying type is a signed integer type.
2355 bool isSignedIntegerOrEnumerationType() const;
2356
2357 /// Determines whether this is an integer type that is unsigned or an
2358 /// enumeration types whose underlying type is a unsigned integer type.
2359 bool isUnsignedIntegerOrEnumerationType() const;
2360
2361 /// Return true if this is a fixed point type according to
2362 /// ISO/IEC JTC1 SC22 WG14 N1169.
2363 bool isFixedPointType() const;
2364
2365 /// Return true if this is a fixed point or integer type.
2366 bool isFixedPointOrIntegerType() const;
2367
2368 /// Return true if this is a saturated fixed point type according to
2369 /// ISO/IEC JTC1 SC22 WG14 N1169. This type can be signed or unsigned.
2370 bool isSaturatedFixedPointType() const;
2371
2372 /// Return true if this is a saturated fixed point type according to
2373 /// ISO/IEC JTC1 SC22 WG14 N1169. This type can be signed or unsigned.
2374 bool isUnsaturatedFixedPointType() const;
2375
2376 /// Return true if this is a fixed point type that is signed according
2377 /// to ISO/IEC JTC1 SC22 WG14 N1169. This type can also be saturated.
2378 bool isSignedFixedPointType() const;
2379
2380 /// Return true if this is a fixed point type that is unsigned according
2381 /// to ISO/IEC JTC1 SC22 WG14 N1169. This type can also be saturated.
2382 bool isUnsignedFixedPointType() const;
2383
2384 /// Return true if this is not a variable sized type,
2385 /// according to the rules of C99 6.7.5p3. It is not legal to call this on
2386 /// incomplete types.
2387 bool isConstantSizeType() const;
2388
2389 /// Returns true if this type can be represented by some
2390 /// set of type specifiers.
2391 bool isSpecifierType() const;
2392
2393 /// Determine the linkage of this type.
2394 Linkage getLinkage() const;
2395
2396 /// Determine the visibility of this type.
2397 Visibility getVisibility() const {
2398 return getLinkageAndVisibility().getVisibility();
2399 }
2400
2401 /// Return true if the visibility was explicitly set is the code.
2402 bool isVisibilityExplicit() const {
2403 return getLinkageAndVisibility().isVisibilityExplicit();
2404 }
2405
2406 /// Determine the linkage and visibility of this type.
2407 LinkageInfo getLinkageAndVisibility() const;
2408
2409 /// True if the computed linkage is valid. Used for consistency
2410 /// checking. Should always return true.
2411 bool isLinkageValid() const;
2412
2413 /// Determine the nullability of the given type.
2414 ///
2415 /// Note that nullability is only captured as sugar within the type
2416 /// system, not as part of the canonical type, so nullability will
2417 /// be lost by canonicalization and desugaring.
2418 Optional<NullabilityKind> getNullability(const ASTContext &context) const;
2419
2420 /// Determine whether the given type can have a nullability
2421 /// specifier applied to it, i.e., if it is any kind of pointer type.
2422 ///
2423 /// \param ResultIfUnknown The value to return if we don't yet know whether
2424 /// this type can have nullability because it is dependent.
2425 bool canHaveNullability(bool ResultIfUnknown = true) const;
2426
2427 /// Retrieve the set of substitutions required when accessing a member
2428 /// of the Objective-C receiver type that is declared in the given context.
2429 ///
2430 /// \c *this is the type of the object we're operating on, e.g., the
2431 /// receiver for a message send or the base of a property access, and is
2432 /// expected to be of some object or object pointer type.
2433 ///
2434 /// \param dc The declaration context for which we are building up a
2435 /// substitution mapping, which should be an Objective-C class, extension,
2436 /// category, or method within.
2437 ///
2438 /// \returns an array of type arguments that can be substituted for
2439 /// the type parameters of the given declaration context in any type described
2440 /// within that context, or an empty optional to indicate that no
2441 /// substitution is required.
2442 Optional<ArrayRef<QualType>>
2443 getObjCSubstitutions(const DeclContext *dc) const;
2444
2445 /// Determines if this is an ObjC interface type that may accept type
2446 /// parameters.
2447 bool acceptsObjCTypeParams() const;
2448
2449 const char *getTypeClassName() const;
2450
2451 QualType getCanonicalTypeInternal() const {
2452 return CanonicalType;
2453 }
2454
2455 CanQualType getCanonicalTypeUnqualified() const; // in CanonicalType.h
2456 void dump() const;
2457 void dump(llvm::raw_ostream &OS, const ASTContext &Context) const;
2458};
2459
2460/// This will check for a TypedefType by removing any existing sugar
2461/// until it reaches a TypedefType or a non-sugared type.
2462template <> const TypedefType *Type::getAs() const;
2463
2464/// This will check for a TemplateSpecializationType by removing any
2465/// existing sugar until it reaches a TemplateSpecializationType or a
2466/// non-sugared type.
2467template <> const TemplateSpecializationType *Type::getAs() const;
2468
2469/// This will check for an AttributedType by removing any existing sugar
2470/// until it reaches an AttributedType or a non-sugared type.
2471template <> const AttributedType *Type::getAs() const;
2472
2473// We can do canonical leaf types faster, because we don't have to
2474// worry about preserving child type decoration.
2475#define TYPE(Class, Base)
2476#define LEAF_TYPE(Class) \
2477template <> inline const Class##Type *Type::getAs() const { \
2478 return dyn_cast<Class##Type>(CanonicalType); \
2479} \
2480template <> inline const Class##Type *Type::castAs() const { \
2481 return cast<Class##Type>(CanonicalType); \
2482}
2483#include "clang/AST/TypeNodes.inc"
2484
2485/// This class is used for builtin types like 'int'. Builtin
2486/// types are always canonical and have a literal name field.
2487class BuiltinType : public Type {
2488public:
2489 enum Kind {
2490// OpenCL image types
2491#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) Id,
2492#include "clang/Basic/OpenCLImageTypes.def"
2493// OpenCL extension types
2494#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) Id,
2495#include "clang/Basic/OpenCLExtensionTypes.def"
2496// SVE Types
2497#define SVE_TYPE(Name, Id, SingletonId) Id,
2498#include "clang/Basic/AArch64SVEACLETypes.def"
2499// PPC MMA Types
2500#define PPC_VECTOR_TYPE(Name, Id, Size) Id,
2501#include "clang/Basic/PPCTypes.def"
2502// RVV Types
2503#define RVV_TYPE(Name, Id, SingletonId) Id,
2504#include "clang/Basic/RISCVVTypes.def"
2505// All other builtin types
2506#define BUILTIN_TYPE(Id, SingletonId) Id,
2507#define LAST_BUILTIN_TYPE(Id) LastKind = Id
2508#include "clang/AST/BuiltinTypes.def"
2509 };
2510
2511private:
2512 friend class ASTContext; // ASTContext creates these.
2513
2514 BuiltinType(Kind K)
2515 : Type(Builtin, QualType(),
2516 K == Dependent ? TypeDependence::DependentInstantiation
2517 : TypeDependence::None) {
2518 BuiltinTypeBits.Kind = K;
2519 }
2520
2521public:
2522 Kind getKind() const { return static_cast<Kind>(BuiltinTypeBits.Kind); }
2523 StringRef getName(const PrintingPolicy &Policy) const;
2524
2525 const char *getNameAsCString(const PrintingPolicy &Policy) const {
2526 // The StringRef is null-terminated.
2527 StringRef str = getName(Policy);
2528 assert(!str.empty() && str.data()[str.size()] == '\0')((void)0);
2529 return str.data();
2530 }
2531
2532 bool isSugared() const { return false; }
2533 QualType desugar() const { return QualType(this, 0); }
2534
2535 bool isInteger() const {
2536 return getKind() >= Bool && getKind() <= Int128;
2537 }
2538
2539 bool isSignedInteger() const {
2540 return getKind() >= Char_S && getKind() <= Int128;
2541 }
2542
2543 bool isUnsignedInteger() const {
2544 return getKind() >= Bool && getKind() <= UInt128;
2545 }
2546
2547 bool isFloatingPoint() const {
2548 return getKind() >= Half && getKind() <= Float128;
2549 }
2550
2551 /// Determines whether the given kind corresponds to a placeholder type.
2552 static bool isPlaceholderTypeKind(Kind K) {
2553 return K >= Overload;
2554 }
2555
2556 /// Determines whether this type is a placeholder type, i.e. a type
2557 /// which cannot appear in arbitrary positions in a fully-formed
2558 /// expression.
2559 bool isPlaceholderType() const {
2560 return isPlaceholderTypeKind(getKind());
2561 }
2562
2563 /// Determines whether this type is a placeholder type other than
2564 /// Overload. Most placeholder types require only syntactic
2565 /// information about their context in order to be resolved (e.g.
2566 /// whether it is a call expression), which means they can (and
2567 /// should) be resolved in an earlier "phase" of analysis.
2568 /// Overload expressions sometimes pick up further information
2569 /// from their context, like whether the context expects a
2570 /// specific function-pointer type, and so frequently need
2571 /// special treatment.
2572 bool isNonOverloadPlaceholderType() const {
2573 return getKind() > Overload;
2574 }
2575
2576 static bool classof(const Type *T) { return T->getTypeClass() == Builtin; }
2577};
2578
2579/// Complex values, per C99 6.2.5p11. This supports the C99 complex
2580/// types (_Complex float etc) as well as the GCC integer complex extensions.
2581class ComplexType : public Type, public llvm::FoldingSetNode {
2582 friend class ASTContext; // ASTContext creates these.
2583
2584 QualType ElementType;
2585
2586 ComplexType(QualType Element, QualType CanonicalPtr)
2587 : Type(Complex, CanonicalPtr, Element->getDependence()),
2588 ElementType(Element) {}
2589
2590public:
2591 QualType getElementType() const { return ElementType; }
2592
2593 bool isSugared() const { return false; }
2594 QualType desugar() const { return QualType(this, 0); }
2595
2596 void Profile(llvm::FoldingSetNodeID &ID) {
2597 Profile(ID, getElementType());
2598 }
2599
2600 static void Profile(llvm::FoldingSetNodeID &ID, QualType Element) {
2601 ID.AddPointer(Element.getAsOpaquePtr());
2602 }
2603
2604 static bool classof(const Type *T) { return T->getTypeClass() == Complex; }
2605};
2606
2607/// Sugar for parentheses used when specifying types.
2608class ParenType : public Type, public llvm::FoldingSetNode {
2609 friend class ASTContext; // ASTContext creates these.
2610
2611 QualType Inner;
2612
2613 ParenType(QualType InnerType, QualType CanonType)
2614 : Type(Paren, CanonType, InnerType->getDependence()), Inner(InnerType) {}
2615
2616public:
2617 QualType getInnerType() const { return Inner; }
2618
2619 bool isSugared() const { return true; }
2620 QualType desugar() const { return getInnerType(); }
2621
2622 void Profile(llvm::FoldingSetNodeID &ID) {
2623 Profile(ID, getInnerType());
2624 }
2625
2626 static void Profile(llvm::FoldingSetNodeID &ID, QualType Inner) {
2627 Inner.Profile(ID);
2628 }
2629
2630 static bool classof(const Type *T) { return T->getTypeClass() == Paren; }
2631};
2632
2633/// PointerType - C99 6.7.5.1 - Pointer Declarators.
2634class PointerType : public Type, public llvm::FoldingSetNode {
2635 friend class ASTContext; // ASTContext creates these.
2636
2637 QualType PointeeType;
2638
2639 PointerType(QualType Pointee, QualType CanonicalPtr)
2640 : Type(Pointer, CanonicalPtr, Pointee->getDependence()),
2641 PointeeType(Pointee) {}
2642
2643public:
2644 QualType getPointeeType() const { return PointeeType; }
2645
2646 bool isSugared() const { return false; }
2647 QualType desugar() const { return QualType(this, 0); }
2648
2649 void Profile(llvm::FoldingSetNodeID &ID) {
2650 Profile(ID, getPointeeType());
2651 }
2652
2653 static void Profile(llvm::FoldingSetNodeID &ID, QualType Pointee) {
2654 ID.AddPointer(Pointee.getAsOpaquePtr());
2655 }
2656
2657 static bool classof(const Type *T) { return T->getTypeClass() == Pointer; }
2658};
2659
2660/// Represents a type which was implicitly adjusted by the semantic
2661/// engine for arbitrary reasons. For example, array and function types can
2662/// decay, and function types can have their calling conventions adjusted.
2663class AdjustedType : public Type, public llvm::FoldingSetNode {
2664 QualType OriginalTy;
2665 QualType AdjustedTy;
2666
2667protected:
2668 friend class ASTContext; // ASTContext creates these.
2669
2670 AdjustedType(TypeClass TC, QualType OriginalTy, QualType AdjustedTy,
2671 QualType CanonicalPtr)
2672 : Type(TC, CanonicalPtr, OriginalTy->getDependence()),
2673 OriginalTy(OriginalTy), AdjustedTy(AdjustedTy) {}
2674
2675public:
2676 QualType getOriginalType() const { return OriginalTy; }
2677 QualType getAdjustedType() const { return AdjustedTy; }
2678
2679 bool isSugared() const { return true; }
2680 QualType desugar() const { return AdjustedTy; }
2681
2682 void Profile(llvm::FoldingSetNodeID &ID) {
2683 Profile(ID, OriginalTy, AdjustedTy);
2684 }
2685
2686 static void Profile(llvm::FoldingSetNodeID &ID, QualType Orig, QualType New) {
2687 ID.AddPointer(Orig.getAsOpaquePtr());
2688 ID.AddPointer(New.getAsOpaquePtr());
2689 }
2690
2691 static bool classof(const Type *T) {
2692 return T->getTypeClass() == Adjusted || T->getTypeClass() == Decayed;
2693 }
2694};
2695
2696/// Represents a pointer type decayed from an array or function type.
2697class DecayedType : public AdjustedType {
2698 friend class ASTContext; // ASTContext creates these.
2699
2700 inline
2701 DecayedType(QualType OriginalType, QualType Decayed, QualType Canonical);
2702
2703public:
2704 QualType getDecayedType() const { return getAdjustedType(); }
2705
2706 inline QualType getPointeeType() const;
2707
2708 static bool classof(const Type *T) { return T->getTypeClass() == Decayed; }
2709};
2710
2711/// Pointer to a block type.
2712/// This type is to represent types syntactically represented as
2713/// "void (^)(int)", etc. Pointee is required to always be a function type.
2714class BlockPointerType : public Type, public llvm::FoldingSetNode {
2715 friend class ASTContext; // ASTContext creates these.
2716
2717 // Block is some kind of pointer type
2718 QualType PointeeType;
2719
2720 BlockPointerType(QualType Pointee, QualType CanonicalCls)
2721 : Type(BlockPointer, CanonicalCls, Pointee->getDependence()),
2722 PointeeType(Pointee) {}
2723
2724public:
2725 // Get the pointee type. Pointee is required to always be a function type.
2726 QualType getPointeeType() const { return PointeeType; }
2727
2728 bool isSugared() const { return false; }
2729 QualType desugar() const { return QualType(this, 0); }
2730
2731 void Profile(llvm::FoldingSetNodeID &ID) {
2732 Profile(ID, getPointeeType());
2733 }
2734
2735 static void Profile(llvm::FoldingSetNodeID &ID, QualType Pointee) {
2736 ID.AddPointer(Pointee.getAsOpaquePtr());
2737 }
2738
2739 static bool classof(const Type *T) {
2740 return T->getTypeClass() == BlockPointer;
2741 }
2742};
2743
2744/// Base for LValueReferenceType and RValueReferenceType
2745class ReferenceType : public Type, public llvm::FoldingSetNode {
2746 QualType PointeeType;
2747
2748protected:
2749 ReferenceType(TypeClass tc, QualType Referencee, QualType CanonicalRef,
2750 bool SpelledAsLValue)
2751 : Type(tc, CanonicalRef, Referencee->getDependence()),
2752 PointeeType(Referencee) {
2753 ReferenceTypeBits.SpelledAsLValue = SpelledAsLValue;
2754 ReferenceTypeBits.InnerRef = Referencee->isReferenceType();
2755 }
2756
2757public:
2758 bool isSpelledAsLValue() const { return ReferenceTypeBits.SpelledAsLValue; }
2759 bool isInnerRef() const { return ReferenceTypeBits.InnerRef; }
2760
2761 QualType getPointeeTypeAsWritten() const { return PointeeType; }
2762
2763 QualType getPointeeType() const {
2764 // FIXME: this might strip inner qualifiers; okay?
2765 const ReferenceType *T = this;
2766 while (T->isInnerRef())
2767 T = T->PointeeType->castAs<ReferenceType>();
2768 return T->PointeeType;
2769 }
2770
2771 void Profile(llvm::FoldingSetNodeID &ID) {
2772 Profile(ID, PointeeType, isSpelledAsLValue());
2773 }
2774
2775 static void Profile(llvm::FoldingSetNodeID &ID,
2776 QualType Referencee,
2777 bool SpelledAsLValue) {
2778 ID.AddPointer(Referencee.getAsOpaquePtr());
2779 ID.AddBoolean(SpelledAsLValue);
2780 }
2781
2782 static bool classof(const Type *T) {
2783 return T->getTypeClass() == LValueReference ||
2784 T->getTypeClass() == RValueReference;
2785 }
2786};
2787
2788/// An lvalue reference type, per C++11 [dcl.ref].
2789class LValueReferenceType : public ReferenceType {
2790 friend class ASTContext; // ASTContext creates these
2791
2792 LValueReferenceType(QualType Referencee, QualType CanonicalRef,
2793 bool SpelledAsLValue)
2794 : ReferenceType(LValueReference, Referencee, CanonicalRef,
2795 SpelledAsLValue) {}
2796
2797public:
2798 bool isSugared() const { return false; }
2799 QualType desugar() const { return QualType(this, 0); }
2800
2801 static bool classof(const Type *T) {
2802 return T->getTypeClass() == LValueReference;
2803 }
2804};
2805
2806/// An rvalue reference type, per C++11 [dcl.ref].
2807class RValueReferenceType : public ReferenceType {
2808 friend class ASTContext; // ASTContext creates these
2809
2810 RValueReferenceType(QualType Referencee, QualType CanonicalRef)
2811 : ReferenceType(RValueReference, Referencee, CanonicalRef, false) {}
2812
2813public:
2814 bool isSugared() const { return false; }
2815 QualType desugar() const { return QualType(this, 0); }
2816
2817 static bool classof(const Type *T) {
2818 return T->getTypeClass() == RValueReference;
2819 }
2820};
2821
2822/// A pointer to member type per C++ 8.3.3 - Pointers to members.
2823///
2824/// This includes both pointers to data members and pointer to member functions.
2825class MemberPointerType : public Type, public llvm::FoldingSetNode {
2826 friend class ASTContext; // ASTContext creates these.
2827
2828 QualType PointeeType;
2829
2830 /// The class of which the pointee is a member. Must ultimately be a
2831 /// RecordType, but could be a typedef or a template parameter too.
2832 const Type *Class;
2833
2834 MemberPointerType(QualType Pointee, const Type *Cls, QualType CanonicalPtr)
2835 : Type(MemberPointer, CanonicalPtr,
2836 (Cls->getDependence() & ~TypeDependence::VariablyModified) |
2837 Pointee->getDependence()),
2838 PointeeType(Pointee), Class(Cls) {}
2839
2840public:
2841 QualType getPointeeType() const { return PointeeType; }
2842
2843 /// Returns true if the member type (i.e. the pointee type) is a
2844 /// function type rather than a data-member type.
2845 bool isMemberFunctionPointer() const {
2846 return PointeeType->isFunctionProtoType();
2847 }
2848
2849 /// Returns true if the member type (i.e. the pointee type) is a
2850 /// data type rather than a function type.
2851 bool isMemberDataPointer() const {
2852 return !PointeeType->isFunctionProtoType();
2853 }
2854
2855 const Type *getClass() const { return Class; }
2856 CXXRecordDecl *getMostRecentCXXRecordDecl() const;
2857
2858 bool isSugared() const { return false; }
2859 QualType desugar() const { return QualType(this, 0); }
2860
2861 void Profile(llvm::FoldingSetNodeID &ID) {
2862 Profile(ID, getPointeeType(), getClass());
2863 }
2864
2865 static void Profile(llvm::FoldingSetNodeID &ID, QualType Pointee,
2866 const Type *Class) {
2867 ID.AddPointer(Pointee.getAsOpaquePtr());
2868 ID.AddPointer(Class);
2869 }
2870
2871 static bool classof(const Type *T) {
2872 return T->getTypeClass() == MemberPointer;
2873 }
2874};
2875
2876/// Represents an array type, per C99 6.7.5.2 - Array Declarators.
2877class ArrayType : public Type, public llvm::FoldingSetNode {
2878public:
2879 /// Capture whether this is a normal array (e.g. int X[4])
2880 /// an array with a static size (e.g. int X[static 4]), or an array
2881 /// with a star size (e.g. int X[*]).
2882 /// 'static' is only allowed on function parameters.
2883 enum ArraySizeModifier {
2884 Normal, Static, Star
2885 };
2886
2887private:
2888 /// The element type of the array.
2889 QualType ElementType;
2890
2891protected:
2892 friend class ASTContext; // ASTContext creates these.
2893
2894 ArrayType(TypeClass tc, QualType et, QualType can, ArraySizeModifier sm,
2895 unsigned tq, const Expr *sz = nullptr);
2896
2897public:
2898 QualType getElementType() const { return ElementType; }
2899
2900 ArraySizeModifier getSizeModifier() const {
2901 return ArraySizeModifier(ArrayTypeBits.SizeModifier);
2902 }
2903
2904 Qualifiers getIndexTypeQualifiers() const {
2905 return Qualifiers::fromCVRMask(getIndexTypeCVRQualifiers());
2906 }
2907
2908 unsigned getIndexTypeCVRQualifiers() const {
2909 return ArrayTypeBits.IndexTypeQuals;
2910 }
2911
2912 static bool classof(const Type *T) {
2913 return T->getTypeClass() == ConstantArray ||
2914 T->getTypeClass() == VariableArray ||
2915 T->getTypeClass() == IncompleteArray ||
2916 T->getTypeClass() == DependentSizedArray;
2917 }
2918};
2919
2920/// Represents the canonical version of C arrays with a specified constant size.
2921/// For example, the canonical type for 'int A[4 + 4*100]' is a
2922/// ConstantArrayType where the element type is 'int' and the size is 404.
2923class ConstantArrayType final
2924 : public ArrayType,
2925 private llvm::TrailingObjects<ConstantArrayType, const Expr *> {
2926 friend class ASTContext; // ASTContext creates these.
2927 friend TrailingObjects;
2928
2929 llvm::APInt Size; // Allows us to unique the type.
2930
2931 ConstantArrayType(QualType et, QualType can, const llvm::APInt &size,
2932 const Expr *sz, ArraySizeModifier sm, unsigned tq)
2933 : ArrayType(ConstantArray, et, can, sm, tq, sz), Size(size) {
2934 ConstantArrayTypeBits.HasStoredSizeExpr = sz != nullptr;
2935 if (ConstantArrayTypeBits.HasStoredSizeExpr) {
2936 assert(!can.isNull() && "canonical constant array should not have size")((void)0);
2937 *getTrailingObjects<const Expr*>() = sz;
2938 }
2939 }
2940
2941 unsigned numTrailingObjects(OverloadToken<const Expr*>) const {
2942 return ConstantArrayTypeBits.HasStoredSizeExpr;
2943 }
2944
2945public:
2946 const llvm::APInt &getSize() const { return Size; }
2947 const Expr *getSizeExpr() const {
2948 return ConstantArrayTypeBits.HasStoredSizeExpr
2949 ? *getTrailingObjects<const Expr *>()
2950 : nullptr;
2951 }
2952 bool isSugared() const { return false; }
2953 QualType desugar() const { return QualType(this, 0); }
2954
2955 /// Determine the number of bits required to address a member of
2956 // an array with the given element type and number of elements.
2957 static unsigned getNumAddressingBits(const ASTContext &Context,
2958 QualType ElementType,
2959 const llvm::APInt &NumElements);
2960
2961 /// Determine the maximum number of active bits that an array's size
2962 /// can require, which limits the maximum size of the array.
2963 static unsigned getMaxSizeBits(const ASTContext &Context);
2964
2965 void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Ctx) {
2966 Profile(ID, Ctx, getElementType(), getSize(), getSizeExpr(),
2967 getSizeModifier(), getIndexTypeCVRQualifiers());
2968 }
2969
2970 static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Ctx,
2971 QualType ET, const llvm::APInt &ArraySize,
2972 const Expr *SizeExpr, ArraySizeModifier SizeMod,
2973 unsigned TypeQuals);
2974
2975 static bool classof(const Type *T) {
2976 return T->getTypeClass() == ConstantArray;
2977 }
2978};
2979
2980/// Represents a C array with an unspecified size. For example 'int A[]' has
2981/// an IncompleteArrayType where the element type is 'int' and the size is
2982/// unspecified.
2983class IncompleteArrayType : public ArrayType {
2984 friend class ASTContext; // ASTContext creates these.
2985
2986 IncompleteArrayType(QualType et, QualType can,
2987 ArraySizeModifier sm, unsigned tq)
2988 : ArrayType(IncompleteArray, et, can, sm, tq) {}
2989
2990public:
2991 friend class StmtIteratorBase;
2992
2993 bool isSugared() const { return false; }
2994 QualType desugar() const { return QualType(this, 0); }
2995
2996 static bool classof(const Type *T) {
2997 return T->getTypeClass() == IncompleteArray;
2998 }
2999
3000 void Profile(llvm::FoldingSetNodeID &ID) {
3001 Profile(ID, getElementType(), getSizeModifier(),
3002 getIndexTypeCVRQualifiers());
3003 }
3004
3005 static void Profile(llvm::FoldingSetNodeID &ID, QualType ET,
3006 ArraySizeModifier SizeMod, unsigned TypeQuals) {
3007 ID.AddPointer(ET.getAsOpaquePtr());
3008 ID.AddInteger(SizeMod);
3009 ID.AddInteger(TypeQuals);
3010 }
3011};
3012
3013/// Represents a C array with a specified size that is not an
3014/// integer-constant-expression. For example, 'int s[x+foo()]'.
3015/// Since the size expression is an arbitrary expression, we store it as such.
3016///
3017/// Note: VariableArrayType's aren't uniqued (since the expressions aren't) and
3018/// should not be: two lexically equivalent variable array types could mean
3019/// different things, for example, these variables do not have the same type
3020/// dynamically:
3021///
3022/// void foo(int x) {
3023/// int Y[x];
3024/// ++x;
3025/// int Z[x];
3026/// }
3027class VariableArrayType : public ArrayType {
3028 friend class ASTContext; // ASTContext creates these.
3029
3030 /// An assignment-expression. VLA's are only permitted within
3031 /// a function block.
3032 Stmt *SizeExpr;
3033
3034 /// The range spanned by the left and right array brackets.
3035 SourceRange Brackets;
3036
3037 VariableArrayType(QualType et, QualType can, Expr *e,
3038 ArraySizeModifier sm, unsigned tq,
3039 SourceRange brackets)
3040 : ArrayType(VariableArray, et, can, sm, tq, e),
3041 SizeExpr((Stmt*) e), Brackets(brackets) {}
3042
3043public:
3044 friend class StmtIteratorBase;
3045
3046 Expr *getSizeExpr() const {
3047 // We use C-style casts instead of cast<> here because we do not wish
3048 // to have a dependency of Type.h on Stmt.h/Expr.h.
3049 return (Expr*) SizeExpr;
3050 }
3051
3052 SourceRange getBracketsRange() const { return Brackets; }
3053 SourceLocation getLBracketLoc() const { return Brackets.getBegin(); }
3054 SourceLocation getRBracketLoc() const { return Brackets.getEnd(); }
3055
3056 bool isSugared() const { return false; }
3057 QualType desugar() const { return QualType(this, 0); }
3058
3059 static bool classof(const Type *T) {
3060 return T->getTypeClass() == VariableArray;
3061 }
3062
3063 void Profile(llvm::FoldingSetNodeID &ID) {
3064 llvm_unreachable("Cannot unique VariableArrayTypes.")__builtin_unreachable();
3065 }
3066};
3067
3068/// Represents an array type in C++ whose size is a value-dependent expression.
3069///
3070/// For example:
3071/// \code
3072/// template<typename T, int Size>
3073/// class array {
3074/// T data[Size];
3075/// };
3076/// \endcode
3077///
3078/// For these types, we won't actually know what the array bound is
3079/// until template instantiation occurs, at which point this will
3080/// become either a ConstantArrayType or a VariableArrayType.
3081class DependentSizedArrayType : public ArrayType {
3082 friend class ASTContext; // ASTContext creates these.
3083
3084 const ASTContext &Context;
3085
3086 /// An assignment expression that will instantiate to the
3087 /// size of the array.
3088 ///
3089 /// The expression itself might be null, in which case the array
3090 /// type will have its size deduced from an initializer.
3091 Stmt *SizeExpr;
3092
3093 /// The range spanned by the left and right array brackets.
3094 SourceRange Brackets;
3095
3096 DependentSizedArrayType(const ASTContext &Context, QualType et, QualType can,
3097 Expr *e, ArraySizeModifier sm, unsigned tq,
3098 SourceRange brackets);
3099
3100public:
3101 friend class StmtIteratorBase;
3102
3103 Expr *getSizeExpr() const {
3104 // We use C-style casts instead of cast<> here because we do not wish
3105 // to have a dependency of Type.h on Stmt.h/Expr.h.
3106 return (Expr*) SizeExpr;
3107 }
3108
3109 SourceRange getBracketsRange() const { return Brackets; }
3110 SourceLocation getLBracketLoc() const { return Brackets.getBegin(); }
3111 SourceLocation getRBracketLoc() const { return Brackets.getEnd(); }
3112
3113 bool isSugared() const { return false; }
3114 QualType desugar() const { return QualType(this, 0); }
3115
3116 static bool classof(const Type *T) {
3117 return T->getTypeClass() == DependentSizedArray;
3118 }
3119
3120 void Profile(llvm::FoldingSetNodeID &ID) {
3121 Profile(ID, Context, getElementType(),
3122 getSizeModifier(), getIndexTypeCVRQualifiers(), getSizeExpr());
3123 }
3124
3125 static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
3126 QualType ET, ArraySizeModifier SizeMod,
3127 unsigned TypeQuals, Expr *E);
3128};
3129
3130/// Represents an extended address space qualifier where the input address space
3131/// value is dependent. Non-dependent address spaces are not represented with a
3132/// special Type subclass; they are stored on an ExtQuals node as part of a QualType.
3133///
3134/// For example:
3135/// \code
3136/// template<typename T, int AddrSpace>
3137/// class AddressSpace {
3138/// typedef T __attribute__((address_space(AddrSpace))) type;
3139/// }
3140/// \endcode
3141class DependentAddressSpaceType : public Type, public llvm::FoldingSetNode {
3142 friend class ASTContext;
3143
3144 const ASTContext &Context;
3145 Expr *AddrSpaceExpr;
3146 QualType PointeeType;
3147 SourceLocation loc;
3148
3149 DependentAddressSpaceType(const ASTContext &Context, QualType PointeeType,
3150 QualType can, Expr *AddrSpaceExpr,
3151 SourceLocation loc);
3152
3153public:
3154 Expr *getAddrSpaceExpr() const { return AddrSpaceExpr; }
3155 QualType getPointeeType() const { return PointeeType; }
3156 SourceLocation getAttributeLoc() const { return loc; }
3157
3158 bool isSugared() const { return false; }
3159 QualType desugar() const { return QualType(this, 0); }
3160
3161 static bool classof(const Type *T) {
3162 return T->getTypeClass() == DependentAddressSpace;
3163 }
3164
3165 void Profile(llvm::FoldingSetNodeID &ID) {
3166 Profile(ID, Context, getPointeeType(), getAddrSpaceExpr());
3167 }
3168
3169 static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
3170 QualType PointeeType, Expr *AddrSpaceExpr);
3171};
3172
3173/// Represents an extended vector type where either the type or size is
3174/// dependent.
3175///
3176/// For example:
3177/// \code
3178/// template<typename T, int Size>
3179/// class vector {
3180/// typedef T __attribute__((ext_vector_type(Size))) type;
3181/// }
3182/// \endcode
3183class DependentSizedExtVectorType : public Type, public llvm::FoldingSetNode {
3184 friend class ASTContext;
3185
3186 const ASTContext &Context;
3187 Expr *SizeExpr;
3188
3189 /// The element type of the array.
3190 QualType ElementType;
3191
3192 SourceLocation loc;
3193
3194 DependentSizedExtVectorType(const ASTContext &Context, QualType ElementType,
3195 QualType can, Expr *SizeExpr, SourceLocation loc);
3196
3197public:
3198 Expr *getSizeExpr() const { return SizeExpr; }
3199 QualType getElementType() const { return ElementType; }
3200 SourceLocation getAttributeLoc() const { return loc; }
3201
3202 bool isSugared() const { return false; }
3203 QualType desugar() const { return QualType(this, 0); }
3204
3205 static bool classof(const Type *T) {
3206 return T->getTypeClass() == DependentSizedExtVector;
3207 }
3208
3209 void Profile(llvm::FoldingSetNodeID &ID) {
3210 Profile(ID, Context, getElementType(), getSizeExpr());
3211 }
3212
3213 static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
3214 QualType ElementType, Expr *SizeExpr);
3215};
3216
3217
3218/// Represents a GCC generic vector type. This type is created using
3219/// __attribute__((vector_size(n)), where "n" specifies the vector size in
3220/// bytes; or from an Altivec __vector or vector declaration.
3221/// Since the constructor takes the number of vector elements, the
3222/// client is responsible for converting the size into the number of elements.
3223class VectorType : public Type, public llvm::FoldingSetNode {
3224public:
3225 enum VectorKind {
3226 /// not a target-specific vector type
3227 GenericVector,
3228
3229 /// is AltiVec vector
3230 AltiVecVector,
3231
3232 /// is AltiVec 'vector Pixel'
3233 AltiVecPixel,
3234
3235 /// is AltiVec 'vector bool ...'
3236 AltiVecBool,
3237
3238 /// is ARM Neon vector
3239 NeonVector,
3240
3241 /// is ARM Neon polynomial vector
3242 NeonPolyVector,
3243
3244 /// is AArch64 SVE fixed-length data vector
3245 SveFixedLengthDataVector,
3246
3247 /// is AArch64 SVE fixed-length predicate vector
3248 SveFixedLengthPredicateVector
3249 };
3250
3251protected:
3252 friend class ASTContext; // ASTContext creates these.
3253
3254 /// The element type of the vector.
3255 QualType ElementType;
3256
3257 VectorType(QualType vecType, unsigned nElements, QualType canonType,
3258 VectorKind vecKind);
3259
3260 VectorType(TypeClass tc, QualType vecType, unsigned nElements,
3261 QualType canonType, VectorKind vecKind);
3262
3263public:
3264 QualType getElementType() const { return ElementType; }
3265 unsigned getNumElements() const { return VectorTypeBits.NumElements; }
3266
3267 bool isSugared() const { return false; }
3268 QualType desugar() const { return QualType(this, 0); }
3269
3270 VectorKind getVectorKind() const {
3271 return VectorKind(VectorTypeBits.VecKind);
3272 }
3273
3274 void Profile(llvm::FoldingSetNodeID &ID) {
3275 Profile(ID, getElementType(), getNumElements(),
3276 getTypeClass(), getVectorKind());
3277 }
3278
3279 static void Profile(llvm::FoldingSetNodeID &ID, QualType ElementType,
3280 unsigned NumElements, TypeClass TypeClass,
3281 VectorKind VecKind) {
3282 ID.AddPointer(ElementType.getAsOpaquePtr());
3283 ID.AddInteger(NumElements);
3284 ID.AddInteger(TypeClass);
3285 ID.AddInteger(VecKind);
3286 }
3287
3288 static bool classof(const Type *T) {
3289 return T->getTypeClass() == Vector || T->getTypeClass() == ExtVector;
3290 }
3291};
3292
3293/// Represents a vector type where either the type or size is dependent.
3294////
3295/// For example:
3296/// \code
3297/// template<typename T, int Size>
3298/// class vector {
3299/// typedef T __attribute__((vector_size(Size))) type;
3300/// }
3301/// \endcode
3302class DependentVectorType : public Type, public llvm::FoldingSetNode {
3303 friend class ASTContext;
3304
3305 const ASTContext &Context;
3306 QualType ElementType;
3307 Expr *SizeExpr;
3308 SourceLocation Loc;
3309
3310 DependentVectorType(const ASTContext &Context, QualType ElementType,
3311 QualType CanonType, Expr *SizeExpr,
3312 SourceLocation Loc, VectorType::VectorKind vecKind);
3313
3314public:
3315 Expr *getSizeExpr() const { return SizeExpr; }
3316 QualType getElementType() const { return ElementType; }
3317 SourceLocation getAttributeLoc() const { return Loc; }
3318 VectorType::VectorKind getVectorKind() const {
3319 return VectorType::VectorKind(VectorTypeBits.VecKind);
3320 }
3321
3322 bool isSugared() const { return false; }
3323 QualType desugar() const { return QualType(this, 0); }
3324
3325 static bool classof(const Type *T) {
3326 return T->getTypeClass() == DependentVector;
3327 }
3328
3329 void Profile(llvm::FoldingSetNodeID &ID) {
3330 Profile(ID, Context, getElementType(), getSizeExpr(), getVectorKind());
3331 }
3332
3333 static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
3334 QualType ElementType, const Expr *SizeExpr,
3335 VectorType::VectorKind VecKind);
3336};
3337
3338/// ExtVectorType - Extended vector type. This type is created using
3339/// __attribute__((ext_vector_type(n)), where "n" is the number of elements.
3340/// Unlike vector_size, ext_vector_type is only allowed on typedef's. This
3341/// class enables syntactic extensions, like Vector Components for accessing
3342/// points (as .xyzw), colors (as .rgba), and textures (modeled after OpenGL
3343/// Shading Language).
3344class ExtVectorType : public VectorType {
3345 friend class ASTContext; // ASTContext creates these.
3346
3347 ExtVectorType(QualType vecType, unsigned nElements, QualType canonType)
3348 : VectorType(ExtVector, vecType, nElements, canonType, GenericVector) {}
3349
3350public:
3351 static int getPointAccessorIdx(char c) {
3352 switch (c) {
3353 default: return -1;
3354 case 'x': case 'r': return 0;
3355 case 'y': case 'g': return 1;
3356 case 'z': case 'b': return 2;
3357 case 'w': case 'a': return 3;
3358 }
3359 }
3360
3361 static int getNumericAccessorIdx(char c) {
3362 switch (c) {
3363 default: return -1;
3364 case '0': return 0;
3365 case '1': return 1;
3366 case '2': return 2;
3367 case '3': return 3;
3368 case '4': return 4;
3369 case '5': return 5;
3370 case '6': return 6;
3371 case '7': return 7;
3372 case '8': return 8;
3373 case '9': return 9;
3374 case 'A':
3375 case 'a': return 10;
3376 case 'B':
3377 case 'b': return 11;
3378 case 'C':
3379 case 'c': return 12;
3380 case 'D':
3381 case 'd': return 13;
3382 case 'E':
3383 case 'e': return 14;
3384 case 'F':
3385 case 'f': return 15;
3386 }
3387 }
3388
3389 static int getAccessorIdx(char c, bool isNumericAccessor) {
3390 if (isNumericAccessor)
3391 return getNumericAccessorIdx(c);
3392 else
3393 return getPointAccessorIdx(c);
3394 }
3395
3396 bool isAccessorWithinNumElements(char c, bool isNumericAccessor) const {
3397 if (int idx = getAccessorIdx(c, isNumericAccessor)+1)
3398 return unsigned(idx-1) < getNumElements();
3399 return false;
3400 }
3401
3402 bool isSugared() const { return false; }
3403 QualType desugar() const { return QualType(this, 0); }
3404
3405 static bool classof(const Type *T) {
3406 return T->getTypeClass() == ExtVector;
3407 }
3408};
3409
3410/// Represents a matrix type, as defined in the Matrix Types clang extensions.
3411/// __attribute__((matrix_type(rows, columns))), where "rows" specifies
3412/// number of rows and "columns" specifies the number of columns.
3413class MatrixType : public Type, public llvm::FoldingSetNode {
3414protected:
3415 friend class ASTContext;
3416
3417 /// The element type of the matrix.
3418 QualType ElementType;
3419
3420 MatrixType(QualType ElementTy, QualType CanonElementTy);
3421
3422 MatrixType(TypeClass TypeClass, QualType ElementTy, QualType CanonElementTy,
3423 const Expr *RowExpr = nullptr, const Expr *ColumnExpr = nullptr);
3424
3425public:
3426 /// Returns type of the elements being stored in the matrix
3427 QualType getElementType() const { return ElementType; }
3428
3429 /// Valid elements types are the following:
3430 /// * an integer type (as in C2x 6.2.5p19), but excluding enumerated types
3431 /// and _Bool
3432 /// * the standard floating types float or double
3433 /// * a half-precision floating point type, if one is supported on the target
3434 static bool isValidElementType(QualType T) {
3435 return T->isDependentType() ||
3436 (T->isRealType() && !T->isBooleanType() && !T->isEnumeralType());
3437 }
3438
3439 bool isSugared() const { return false; }
3440 QualType desugar() const { return QualType(this, 0); }
3441
3442 static bool classof(const Type *T) {
3443 return T->getTypeClass() == ConstantMatrix ||
3444 T->getTypeClass() == DependentSizedMatrix;
3445 }
3446};
3447
3448/// Represents a concrete matrix type with constant number of rows and columns
3449class ConstantMatrixType final : public MatrixType {
3450protected:
3451 friend class ASTContext;
3452
3453 /// The element type of the matrix.
3454 // FIXME: Appears to be unused? There is also MatrixType::ElementType...
3455 QualType ElementType;
3456
3457 /// Number of rows and columns.
3458 unsigned NumRows;
3459 unsigned NumColumns;
3460
3461 static constexpr unsigned MaxElementsPerDimension = (1 << 20) - 1;
3462
3463 ConstantMatrixType(QualType MatrixElementType, unsigned NRows,
3464 unsigned NColumns, QualType CanonElementType);
3465
3466 ConstantMatrixType(TypeClass typeClass, QualType MatrixType, unsigned NRows,
3467 unsigned NColumns, QualType CanonElementType);
3468
3469public:
3470 /// Returns the number of rows in the matrix.
3471 unsigned getNumRows() const { return NumRows; }
3472
3473 /// Returns the number of columns in the matrix.
3474 unsigned getNumColumns() const { return NumColumns; }
3475
3476 /// Returns the number of elements required to embed the matrix into a vector.
3477 unsigned getNumElementsFlattened() const {
3478 return getNumRows() * getNumColumns();
3479 }
3480
3481 /// Returns true if \p NumElements is a valid matrix dimension.
3482 static constexpr bool isDimensionValid(size_t NumElements) {
3483 return NumElements > 0 && NumElements <= MaxElementsPerDimension;
3484 }
3485
3486 /// Returns the maximum number of elements per dimension.
3487 static constexpr unsigned getMaxElementsPerDimension() {
3488 return MaxElementsPerDimension;
3489 }
3490
3491 void Profile(llvm::FoldingSetNodeID &ID) {
3492 Profile(ID, getElementType(), getNumRows(), getNumColumns(),
3493 getTypeClass());
3494 }
3495
3496 static void Profile(llvm::FoldingSetNodeID &ID, QualType ElementType,
3497 unsigned NumRows, unsigned NumColumns,
3498 TypeClass TypeClass) {
3499 ID.AddPointer(ElementType.getAsOpaquePtr());
3500 ID.AddInteger(NumRows);
3501 ID.AddInteger(NumColumns);
3502 ID.AddInteger(TypeClass);
3503 }
3504
3505 static bool classof(const Type *T) {
3506 return T->getTypeClass() == ConstantMatrix;
3507 }
3508};
3509
3510/// Represents a matrix type where the type and the number of rows and columns
3511/// is dependent on a template.
3512class DependentSizedMatrixType final : public MatrixType {
3513 friend class ASTContext;
3514
3515 const ASTContext &Context;
3516 Expr *RowExpr;
3517 Expr *ColumnExpr;
3518
3519 SourceLocation loc;
3520
3521 DependentSizedMatrixType(const ASTContext &Context, QualType ElementType,
3522 QualType CanonicalType, Expr *RowExpr,
3523 Expr *ColumnExpr, SourceLocation loc);
3524
3525public:
3526 QualType getElementType() const { return ElementType; }
3527 Expr *getRowExpr() const { return RowExpr; }
3528 Expr *getColumnExpr() const { return ColumnExpr; }
3529 SourceLocation getAttributeLoc() const { return loc; }
3530
3531 bool isSugared() const { return false; }
3532 QualType desugar() const { return QualType(this, 0); }
3533
3534 static bool classof(const Type *T) {
3535 return T->getTypeClass() == DependentSizedMatrix;
3536 }
3537
3538 void Profile(llvm::FoldingSetNodeID &ID) {
3539 Profile(ID, Context, getElementType(), getRowExpr(), getColumnExpr());
3540 }
3541
3542 static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
3543 QualType ElementType, Expr *RowExpr, Expr *ColumnExpr);
3544};
3545
3546/// FunctionType - C99 6.7.5.3 - Function Declarators. This is the common base
3547/// class of FunctionNoProtoType and FunctionProtoType.
3548class FunctionType : public Type {
3549 // The type returned by the function.
3550 QualType ResultType;
3551
3552public:
3553 /// Interesting information about a specific parameter that can't simply
3554 /// be reflected in parameter's type. This is only used by FunctionProtoType
3555 /// but is in FunctionType to make this class available during the
3556 /// specification of the bases of FunctionProtoType.
3557 ///
3558 /// It makes sense to model language features this way when there's some
3559 /// sort of parameter-specific override (such as an attribute) that
3560 /// affects how the function is called. For example, the ARC ns_consumed
3561 /// attribute changes whether a parameter is passed at +0 (the default)
3562 /// or +1 (ns_consumed). This must be reflected in the function type,
3563 /// but isn't really a change to the parameter type.
3564 ///
3565 /// One serious disadvantage of modelling language features this way is
3566 /// that they generally do not work with language features that attempt
3567 /// to destructure types. For example, template argument deduction will
3568 /// not be able to match a parameter declared as
3569 /// T (*)(U)
3570 /// against an argument of type
3571 /// void (*)(__attribute__((ns_consumed)) id)
3572 /// because the substitution of T=void, U=id into the former will
3573 /// not produce the latter.
3574 class ExtParameterInfo {
3575 enum {
3576 ABIMask = 0x0F,
3577 IsConsumed = 0x10,
3578 HasPassObjSize = 0x20,
3579 IsNoEscape = 0x40,
3580 };
3581 unsigned char Data = 0;
3582
3583 public:
3584 ExtParameterInfo() = default;
3585
3586 /// Return the ABI treatment of this parameter.
3587 ParameterABI getABI() const { return ParameterABI(Data & ABIMask); }
3588 ExtParameterInfo withABI(ParameterABI kind) const {
3589 ExtParameterInfo copy = *this;
3590 copy.Data = (copy.Data & ~ABIMask) | unsigned(kind);
3591 return copy;
3592 }
3593
3594 /// Is this parameter considered "consumed" by Objective-C ARC?
3595 /// Consumed parameters must have retainable object type.
3596 bool isConsumed() const { return (Data & IsConsumed); }
3597 ExtParameterInfo withIsConsumed(bool consumed) const {
3598 ExtParameterInfo copy = *this;
3599 if (consumed)
3600 copy.Data |= IsConsumed;
3601 else
3602 copy.Data &= ~IsConsumed;
3603 return copy;
3604 }
3605
3606 bool hasPassObjectSize() const { return Data & HasPassObjSize; }
3607 ExtParameterInfo withHasPassObjectSize() const {
3608 ExtParameterInfo Copy = *this;
3609 Copy.Data |= HasPassObjSize;
3610 return Copy;
3611 }
3612
3613 bool isNoEscape() const { return Data & IsNoEscape; }
3614 ExtParameterInfo withIsNoEscape(bool NoEscape) const {
3615 ExtParameterInfo Copy = *this;
3616 if (NoEscape)
3617 Copy.Data |= IsNoEscape;
3618 else
3619 Copy.Data &= ~IsNoEscape;
3620 return Copy;
3621 }
3622
3623 unsigned char getOpaqueValue() const { return Data; }
3624 static ExtParameterInfo getFromOpaqueValue(unsigned char data) {
3625 ExtParameterInfo result;
3626 result.Data = data;
3627 return result;
3628 }
3629
3630 friend bool operator==(ExtParameterInfo lhs, ExtParameterInfo rhs) {
3631 return lhs.Data == rhs.Data;
3632 }
3633
3634 friend bool operator!=(ExtParameterInfo lhs, ExtParameterInfo rhs) {
3635 return lhs.Data != rhs.Data;
3636 }
3637 };
3638
3639 /// A class which abstracts out some details necessary for
3640 /// making a call.
3641 ///
3642 /// It is not actually used directly for storing this information in
3643 /// a FunctionType, although FunctionType does currently use the
3644 /// same bit-pattern.
3645 ///
3646 // If you add a field (say Foo), other than the obvious places (both,
3647 // constructors, compile failures), what you need to update is
3648 // * Operator==
3649 // * getFoo
3650 // * withFoo
3651 // * functionType. Add Foo, getFoo.
3652 // * ASTContext::getFooType
3653 // * ASTContext::mergeFunctionTypes
3654 // * FunctionNoProtoType::Profile
3655 // * FunctionProtoType::Profile
3656 // * TypePrinter::PrintFunctionProto
3657 // * AST read and write
3658 // * Codegen
3659 class ExtInfo {
3660 friend class FunctionType;
3661
3662 // Feel free to rearrange or add bits, but if you go over 16, you'll need to
3663 // adjust the Bits field below, and if you add bits, you'll need to adjust
3664 // Type::FunctionTypeBitfields::ExtInfo as well.
3665
3666 // | CC |noreturn|produces|nocallersavedregs|regparm|nocfcheck|cmsenscall|
3667 // |0 .. 4| 5 | 6 | 7 |8 .. 10| 11 | 12 |
3668 //
3669 // regparm is either 0 (no regparm attribute) or the regparm value+1.
3670 enum { CallConvMask = 0x1F };
3671 enum { NoReturnMask = 0x20 };
3672 enum { ProducesResultMask = 0x40 };
3673 enum { NoCallerSavedRegsMask = 0x80 };
3674 enum {
3675 RegParmMask = 0x700,
3676 RegParmOffset = 8
3677 };
3678 enum { NoCfCheckMask = 0x800 };
3679 enum { CmseNSCallMask = 0x1000 };
3680 uint16_t Bits = CC_C;
3681
3682 ExtInfo(unsigned Bits) : Bits(static_cast<uint16_t>(Bits)) {}
3683
3684 public:
3685 // Constructor with no defaults. Use this when you know that you
3686 // have all the elements (when reading an AST file for example).
3687 ExtInfo(bool noReturn, bool hasRegParm, unsigned regParm, CallingConv cc,
3688 bool producesResult, bool noCallerSavedRegs, bool NoCfCheck,
3689 bool cmseNSCall) {
3690 assert((!hasRegParm || regParm < 7) && "Invalid regparm value")((void)0);
3691 Bits = ((unsigned)cc) | (noReturn ? NoReturnMask : 0) |
3692 (producesResult ? ProducesResultMask : 0) |
3693 (noCallerSavedRegs ? NoCallerSavedRegsMask : 0) |
3694 (hasRegParm ? ((regParm + 1) << RegParmOffset) : 0) |
3695 (NoCfCheck ? NoCfCheckMask : 0) |
3696 (cmseNSCall ? CmseNSCallMask : 0);
3697 }
3698
3699 // Constructor with all defaults. Use when for example creating a
3700 // function known to use defaults.
3701 ExtInfo() = default;
3702
3703 // Constructor with just the calling convention, which is an important part
3704 // of the canonical type.
3705 ExtInfo(CallingConv CC) : Bits(CC) {}
3706
3707 bool getNoReturn() const { return Bits & NoReturnMask; }
3708 bool getProducesResult() const { return Bits & ProducesResultMask; }
3709 bool getCmseNSCall() const { return Bits & CmseNSCallMask; }
3710 bool getNoCallerSavedRegs() const { return Bits & NoCallerSavedRegsMask; }
3711 bool getNoCfCheck() const { return Bits & NoCfCheckMask; }
3712 bool getHasRegParm() const { return ((Bits & RegParmMask) >> RegParmOffset) != 0; }
3713
3714 unsigned getRegParm() const {
3715 unsigned RegParm = (Bits & RegParmMask) >> RegParmOffset;
3716 if (RegParm > 0)
3717 --RegParm;
3718 return RegParm;
3719 }
3720
3721 CallingConv getCC() const { return CallingConv(Bits & CallConvMask); }
3722
3723 bool operator==(ExtInfo Other) const {
3724 return Bits == Other.Bits;
3725 }
3726 bool operator!=(ExtInfo Other) const {
3727 return Bits != Other.Bits;
3728 }
3729
3730 // Note that we don't have setters. That is by design, use
3731 // the following with methods instead of mutating these objects.
3732
3733 ExtInfo withNoReturn(bool noReturn) const {
3734 if (noReturn)
3735 return ExtInfo(Bits | NoReturnMask);
3736 else
3737 return ExtInfo(Bits & ~NoReturnMask);
3738 }
3739
3740 ExtInfo withProducesResult(bool producesResult) const {
3741 if (producesResult)
3742 return ExtInfo(Bits | ProducesResultMask);
3743 else
3744 return ExtInfo(Bits & ~ProducesResultMask);
3745 }
3746
3747 ExtInfo withCmseNSCall(bool cmseNSCall) const {
3748 if (cmseNSCall)
3749 return ExtInfo(Bits | CmseNSCallMask);
3750 else
3751 return ExtInfo(Bits & ~CmseNSCallMask);
3752 }
3753
3754 ExtInfo withNoCallerSavedRegs(bool noCallerSavedRegs) const {
3755 if (noCallerSavedRegs)
3756 return ExtInfo(Bits | NoCallerSavedRegsMask);
3757 else
3758 return ExtInfo(Bits & ~NoCallerSavedRegsMask);
3759 }
3760
3761 ExtInfo withNoCfCheck(bool noCfCheck) const {
3762 if (noCfCheck)
3763 return ExtInfo(Bits | NoCfCheckMask);
3764 else
3765 return ExtInfo(Bits & ~NoCfCheckMask);
3766 }
3767
3768 ExtInfo withRegParm(unsigned RegParm) const {
3769 assert(RegParm < 7 && "Invalid regparm value")((void)0);
3770 return ExtInfo((Bits & ~RegParmMask) |
3771 ((RegParm + 1) << RegParmOffset));
3772 }
3773
3774 ExtInfo withCallingConv(CallingConv cc) const {
3775 return ExtInfo((Bits & ~CallConvMask) | (unsigned) cc);
3776 }
3777
3778 void Profile(llvm::FoldingSetNodeID &ID) const {
3779 ID.AddInteger(Bits);
3780 }
3781 };
3782
3783 /// A simple holder for a QualType representing a type in an
3784 /// exception specification. Unfortunately needed by FunctionProtoType
3785 /// because TrailingObjects cannot handle repeated types.
3786 struct ExceptionType { QualType Type; };
3787
3788 /// A simple holder for various uncommon bits which do not fit in
3789 /// FunctionTypeBitfields. Aligned to alignof(void *) to maintain the
3790 /// alignment of subsequent objects in TrailingObjects. You must update
3791 /// hasExtraBitfields in FunctionProtoType after adding extra data here.
3792 struct alignas(void *) FunctionTypeExtraBitfields {
3793 /// The number of types in the exception specification.
3794 /// A whole unsigned is not needed here and according to
3795 /// [implimits] 8 bits would be enough here.
3796 unsigned NumExceptionType;
3797 };
3798
3799protected:
3800 FunctionType(TypeClass tc, QualType res, QualType Canonical,
3801 TypeDependence Dependence, ExtInfo Info)
3802 : Type(tc, Canonical, Dependence), ResultType(res) {
3803 FunctionTypeBits.ExtInfo = Info.Bits;
3804 }
3805
3806 Qualifiers getFastTypeQuals() const {
3807 return Qualifiers::fromFastMask(FunctionTypeBits.FastTypeQuals);
3808 }
3809
3810public:
3811 QualType getReturnType() const { return ResultType; }
3812
3813 bool getHasRegParm() const { return getExtInfo().getHasRegParm(); }
3814 unsigned getRegParmType() const { return getExtInfo().getRegParm(); }
3815
3816 /// Determine whether this function type includes the GNU noreturn
3817 /// attribute. The C++11 [[noreturn]] attribute does not affect the function
3818 /// type.
3819 bool getNoReturnAttr() const { return getExtInfo().getNoReturn(); }
3820
3821 bool getCmseNSCallAttr() const { return getExtInfo().getCmseNSCall(); }
3822 CallingConv getCallConv() const { return getExtInfo().getCC(); }
3823 ExtInfo getExtInfo() const { return ExtInfo(FunctionTypeBits.ExtInfo); }
3824
3825 static_assert((~Qualifiers::FastMask & Qualifiers::CVRMask) == 0,
3826 "Const, volatile and restrict are assumed to be a subset of "
3827 "the fast qualifiers.");
3828
3829 bool isConst() const { return getFastTypeQuals().hasConst(); }
3830 bool isVolatile() const { return getFastTypeQuals().hasVolatile(); }
3831 bool isRestrict() const { return getFastTypeQuals().hasRestrict(); }
3832
3833 /// Determine the type of an expression that calls a function of
3834 /// this type.
3835 QualType getCallResultType(const ASTContext &Context) const {
3836 return getReturnType().getNonLValueExprType(Context);
3837 }
3838
3839 static StringRef getNameForCallConv(CallingConv CC);
3840
3841 static bool classof(const Type *T) {
3842 return T->getTypeClass() == FunctionNoProto ||
3843 T->getTypeClass() == FunctionProto;
3844 }
3845};
3846
3847/// Represents a K&R-style 'int foo()' function, which has
3848/// no information available about its arguments.
3849class FunctionNoProtoType : public FunctionType, public llvm::FoldingSetNode {
3850 friend class ASTContext; // ASTContext creates these.
3851
3852 FunctionNoProtoType(QualType Result, QualType Canonical, ExtInfo Info)
3853 : FunctionType(FunctionNoProto, Result, Canonical,
3854 Result->getDependence() &
3855 ~(TypeDependence::DependentInstantiation |
3856 TypeDependence::UnexpandedPack),
3857 Info) {}
3858
3859public:
3860 // No additional state past what FunctionType provides.
3861
3862 bool isSugared() const { return false; }
3863 QualType desugar() const { return QualType(this, 0); }
3864
3865 void Profile(llvm::FoldingSetNodeID &ID) {
3866 Profile(ID, getReturnType(), getExtInfo());
3867 }
3868
3869 static void Profile(llvm::FoldingSetNodeID &ID, QualType ResultType,
3870 ExtInfo Info) {
3871 Info.Profile(ID);
3872 ID.AddPointer(ResultType.getAsOpaquePtr());
3873 }
3874
3875 static bool classof(const Type *T) {
3876 return T->getTypeClass() == FunctionNoProto;
3877 }
3878};
3879
3880/// Represents a prototype with parameter type info, e.g.
3881/// 'int foo(int)' or 'int foo(void)'. 'void' is represented as having no
3882/// parameters, not as having a single void parameter. Such a type can have
3883/// an exception specification, but this specification is not part of the
3884/// canonical type. FunctionProtoType has several trailing objects, some of
3885/// which optional. For more information about the trailing objects see
3886/// the first comment inside FunctionProtoType.
3887class FunctionProtoType final
3888 : public FunctionType,
3889 public llvm::FoldingSetNode,
3890 private llvm::TrailingObjects<
3891 FunctionProtoType, QualType, SourceLocation,
3892 FunctionType::FunctionTypeExtraBitfields, FunctionType::ExceptionType,
3893 Expr *, FunctionDecl *, FunctionType::ExtParameterInfo, Qualifiers> {
3894 friend class ASTContext; // ASTContext creates these.
3895 friend TrailingObjects;
3896
3897 // FunctionProtoType is followed by several trailing objects, some of
3898 // which optional. They are in order:
3899 //
3900 // * An array of getNumParams() QualType holding the parameter types.
3901 // Always present. Note that for the vast majority of FunctionProtoType,
3902 // these will be the only trailing objects.
3903 //
3904 // * Optionally if the function is variadic, the SourceLocation of the
3905 // ellipsis.
3906 //
3907 // * Optionally if some extra data is stored in FunctionTypeExtraBitfields
3908 // (see FunctionTypeExtraBitfields and FunctionTypeBitfields):
3909 // a single FunctionTypeExtraBitfields. Present if and only if
3910 // hasExtraBitfields() is true.
3911 //
3912 // * Optionally exactly one of:
3913 // * an array of getNumExceptions() ExceptionType,
3914 // * a single Expr *,
3915 // * a pair of FunctionDecl *,
3916 // * a single FunctionDecl *
3917 // used to store information about the various types of exception
3918 // specification. See getExceptionSpecSize for the details.
3919 //
3920 // * Optionally an array of getNumParams() ExtParameterInfo holding
3921 // an ExtParameterInfo for each of the parameters. Present if and
3922 // only if hasExtParameterInfos() is true.
3923 //
3924 // * Optionally a Qualifiers object to represent extra qualifiers that can't
3925 // be represented by FunctionTypeBitfields.FastTypeQuals. Present if and only
3926 // if hasExtQualifiers() is true.
3927 //
3928 // The optional FunctionTypeExtraBitfields has to be before the data
3929 // related to the exception specification since it contains the number
3930 // of exception types.
3931 //
3932 // We put the ExtParameterInfos last. If all were equal, it would make
3933 // more sense to put these before the exception specification, because
3934 // it's much easier to skip past them compared to the elaborate switch
3935 // required to skip the exception specification. However, all is not
3936 // equal; ExtParameterInfos are used to model very uncommon features,
3937 // and it's better not to burden the more common paths.
3938
3939public:
3940 /// Holds information about the various types of exception specification.
3941 /// ExceptionSpecInfo is not stored as such in FunctionProtoType but is
3942 /// used to group together the various bits of information about the
3943 /// exception specification.
3944 struct ExceptionSpecInfo {
3945 /// The kind of exception specification this is.
3946 ExceptionSpecificationType Type = EST_None;
3947
3948 /// Explicitly-specified list of exception types.
3949 ArrayRef<QualType> Exceptions;
3950
3951 /// Noexcept expression, if this is a computed noexcept specification.
3952 Expr *NoexceptExpr = nullptr;
3953
3954 /// The function whose exception specification this is, for
3955 /// EST_Unevaluated and EST_Uninstantiated.
3956 FunctionDecl *SourceDecl = nullptr;
3957
3958 /// The function template whose exception specification this is instantiated
3959 /// from, for EST_Uninstantiated.
3960 FunctionDecl *SourceTemplate = nullptr;
3961
3962 ExceptionSpecInfo() = default;
3963
3964 ExceptionSpecInfo(ExceptionSpecificationType EST) : Type(EST) {}
3965 };
3966
3967 /// Extra information about a function prototype. ExtProtoInfo is not
3968 /// stored as such in FunctionProtoType but is used to group together
3969 /// the various bits of extra information about a function prototype.
3970 struct ExtProtoInfo {
3971 FunctionType::ExtInfo ExtInfo;
3972 bool Variadic : 1;
3973 bool HasTrailingReturn : 1;
3974 Qualifiers TypeQuals;
3975 RefQualifierKind RefQualifier = RQ_None;
3976 ExceptionSpecInfo ExceptionSpec;
3977 const ExtParameterInfo *ExtParameterInfos = nullptr;
3978 SourceLocation EllipsisLoc;
3979
3980 ExtProtoInfo() : Variadic(false), HasTrailingReturn(false) {}
3981
3982 ExtProtoInfo(CallingConv CC)
3983 : ExtInfo(CC), Variadic(false), HasTrailingReturn(false) {}
3984
3985 ExtProtoInfo withExceptionSpec(const ExceptionSpecInfo &ESI) {
3986 ExtProtoInfo Result(*this);
3987 Result.ExceptionSpec = ESI;
3988 return Result;
3989 }
3990 };
3991
3992private:
3993 unsigned numTrailingObjects(OverloadToken<QualType>) const {
3994 return getNumParams();
3995 }
3996
3997 unsigned numTrailingObjects(OverloadToken<SourceLocation>) const {
3998 return isVariadic();
3999 }
4000
4001 unsigned numTrailingObjects(OverloadToken<FunctionTypeExtraBitfields>) const {
4002 return hasExtraBitfields();
4003 }
4004
4005 unsigned numTrailingObjects(OverloadToken<ExceptionType>) const {
4006 return getExceptionSpecSize().NumExceptionType;
4007 }
4008
4009 unsigned numTrailingObjects(OverloadToken<Expr *>) const {
4010 return getExceptionSpecSize().NumExprPtr;
4011 }
4012
4013 unsigned numTrailingObjects(OverloadToken<FunctionDecl *>) const {
4014 return getExceptionSpecSize().NumFunctionDeclPtr;
4015 }
4016
4017 unsigned numTrailingObjects(OverloadToken<ExtParameterInfo>) const {
4018 return hasExtParameterInfos() ? getNumParams() : 0;
4019 }
4020
4021 /// Determine whether there are any argument types that
4022 /// contain an unexpanded parameter pack.
4023 static bool containsAnyUnexpandedParameterPack(const QualType *ArgArray,
4024 unsigned numArgs) {
4025 for (unsigned Idx = 0; Idx < numArgs; ++Idx)
4026 if (ArgArray[Idx]->containsUnexpandedParameterPack())
4027 return true;
4028
4029 return false;
4030 }
4031
4032 FunctionProtoType(QualType result, ArrayRef<QualType> params,
4033 QualType canonical, const ExtProtoInfo &epi);
4034
4035 /// This struct is returned by getExceptionSpecSize and is used to
4036 /// translate an ExceptionSpecificationType to the number and kind
4037 /// of trailing objects related to the exception specification.
4038 struct ExceptionSpecSizeHolder {
4039 unsigned NumExceptionType;
4040 unsigned NumExprPtr;
4041 unsigned NumFunctionDeclPtr;
4042 };
4043
4044 /// Return the number and kind of trailing objects
4045 /// related to the exception specification.
4046 static ExceptionSpecSizeHolder
4047 getExceptionSpecSize(ExceptionSpecificationType EST, unsigned NumExceptions) {
4048 switch (EST) {
4049 case EST_None:
4050 case EST_DynamicNone:
4051 case EST_MSAny:
4052 case EST_BasicNoexcept:
4053 case EST_Unparsed:
4054 case EST_NoThrow:
4055 return {0, 0, 0};
4056
4057 case EST_Dynamic:
4058 return {NumExceptions, 0, 0};
4059
4060 case EST_DependentNoexcept:
4061 case EST_NoexceptFalse:
4062 case EST_NoexceptTrue:
4063 return {0, 1, 0};
4064
4065 case EST_Uninstantiated:
4066 return {0, 0, 2};
4067
4068 case EST_Unevaluated:
4069 return {0, 0, 1};
4070 }
4071 llvm_unreachable("bad exception specification kind")__builtin_unreachable();
4072 }
4073
4074 /// Return the number and kind of trailing objects
4075 /// related to the exception specification.
4076 ExceptionSpecSizeHolder getExceptionSpecSize() const {
4077 return getExceptionSpecSize(getExceptionSpecType(), getNumExceptions());
4078 }
4079
4080 /// Whether the trailing FunctionTypeExtraBitfields is present.
4081 static bool hasExtraBitfields(ExceptionSpecificationType EST) {
4082 // If the exception spec type is EST_Dynamic then we have > 0 exception
4083 // types and the exact number is stored in FunctionTypeExtraBitfields.
4084 return EST == EST_Dynamic;
4085 }
4086
4087 /// Whether the trailing FunctionTypeExtraBitfields is present.
4088 bool hasExtraBitfields() const {
4089 return hasExtraBitfields(getExceptionSpecType());
4090 }
4091
4092 bool hasExtQualifiers() const {
4093 return FunctionTypeBits.HasExtQuals;
4094 }
4095
4096public:
4097 unsigned getNumParams() const { return FunctionTypeBits.NumParams; }
4098
4099 QualType getParamType(unsigned i) const {
4100 assert(i < getNumParams() && "invalid parameter index")((void)0);
4101 return param_type_begin()[i];
4102 }
4103
4104 ArrayRef<QualType> getParamTypes() const {
4105 return llvm::makeArrayRef(param_type_begin(), param_type_end());
4106 }
4107
4108 ExtProtoInfo getExtProtoInfo() const {
4109 ExtProtoInfo EPI;
4110 EPI.ExtInfo = getExtInfo();
4111 EPI.Variadic = isVariadic();
4112 EPI.EllipsisLoc = getEllipsisLoc();
4113 EPI.HasTrailingReturn = hasTrailingReturn();
4114 EPI.ExceptionSpec = getExceptionSpecInfo();
4115 EPI.TypeQuals = getMethodQuals();
4116 EPI.RefQualifier = getRefQualifier();
4117 EPI.ExtParameterInfos = getExtParameterInfosOrNull();
4118 return EPI;
4119 }
4120
4121 /// Get the kind of exception specification on this function.
4122 ExceptionSpecificationType getExceptionSpecType() const {
4123 return static_cast<ExceptionSpecificationType>(
4124 FunctionTypeBits.ExceptionSpecType);
4125 }
4126
4127 /// Return whether this function has any kind of exception spec.
4128 bool hasExceptionSpec() const { return getExceptionSpecType() != EST_None; }
4129
4130 /// Return whether this function has a dynamic (throw) exception spec.
4131 bool hasDynamicExceptionSpec() const {
4132 return isDynamicExceptionSpec(getExceptionSpecType());
4133 }
4134
4135 /// Return whether this function has a noexcept exception spec.
4136 bool hasNoexceptExceptionSpec() const {
4137 return isNoexceptExceptionSpec(getExceptionSpecType());
4138 }
4139
4140 /// Return whether this function has a dependent exception spec.
4141 bool hasDependentExceptionSpec() const;
4142
4143 /// Return whether this function has an instantiation-dependent exception
4144 /// spec.
4145 bool hasInstantiationDependentExceptionSpec() const;
4146
4147 /// Return all the available information about this type's exception spec.
4148 ExceptionSpecInfo getExceptionSpecInfo() const {
4149 ExceptionSpecInfo Result;
4150 Result.Type = getExceptionSpecType();
4151 if (Result.Type == EST_Dynamic) {
4152 Result.Exceptions = exceptions();
4153 } else if (isComputedNoexcept(Result.Type)) {
4154 Result.NoexceptExpr = getNoexceptExpr();
4155 } else if (Result.Type == EST_Uninstantiated) {
4156 Result.SourceDecl = getExceptionSpecDecl();
4157 Result.SourceTemplate = getExceptionSpecTemplate();
4158 } else if (Result.Type == EST_Unevaluated) {
4159 Result.SourceDecl = getExceptionSpecDecl();
4160 }
4161 return Result;
4162 }
4163
4164 /// Return the number of types in the exception specification.
4165 unsigned getNumExceptions() const {
4166 return getExceptionSpecType() == EST_Dynamic
4167 ? getTrailingObjects<FunctionTypeExtraBitfields>()
4168 ->NumExceptionType
4169 : 0;
4170 }
4171
4172 /// Return the ith exception type, where 0 <= i < getNumExceptions().
4173 QualType getExceptionType(unsigned i) const {
4174 assert(i < getNumExceptions() && "Invalid exception number!")((void)0);
4175 return exception_begin()[i];
4176 }
4177
4178 /// Return the expression inside noexcept(expression), or a null pointer
4179 /// if there is none (because the exception spec is not of this form).
4180 Expr *getNoexceptExpr() const {
4181 if (!isComputedNoexcept(getExceptionSpecType()))
4182 return nullptr;
4183 return *getTrailingObjects<Expr *>();
4184 }
4185
4186 /// If this function type has an exception specification which hasn't
4187 /// been determined yet (either because it has not been evaluated or because
4188 /// it has not been instantiated), this is the function whose exception
4189 /// specification is represented by this type.
4190 FunctionDecl *getExceptionSpecDecl() const {
4191 if (getExceptionSpecType() != EST_Uninstantiated &&
4192 getExceptionSpecType() != EST_Unevaluated)
4193 return nullptr;
4194 return getTrailingObjects<FunctionDecl *>()[0];
4195 }
4196
4197 /// If this function type has an uninstantiated exception
4198 /// specification, this is the function whose exception specification
4199 /// should be instantiated to find the exception specification for
4200 /// this type.
4201 FunctionDecl *getExceptionSpecTemplate() const {
4202 if (getExceptionSpecType() != EST_Uninstantiated)
4203 return nullptr;
4204 return getTrailingObjects<FunctionDecl *>()[1];
4205 }
4206
4207 /// Determine whether this function type has a non-throwing exception
4208 /// specification.
4209 CanThrowResult canThrow() const;
4210
4211 /// Determine whether this function type has a non-throwing exception
4212 /// specification. If this depends on template arguments, returns
4213 /// \c ResultIfDependent.
4214 bool isNothrow(bool ResultIfDependent = false) const {
4215 return ResultIfDependent ? canThrow() != CT_Can : canThrow() == CT_Cannot;
4216 }
4217
4218 /// Whether this function prototype is variadic.
4219 bool isVariadic() const { return FunctionTypeBits.Variadic; }
4220
4221 SourceLocation getEllipsisLoc() const {
4222 return isVariadic() ? *getTrailingObjects<SourceLocation>()
4223 : SourceLocation();
4224 }
4225
4226 /// Determines whether this function prototype contains a
4227 /// parameter pack at the end.
4228 ///
4229 /// A function template whose last parameter is a parameter pack can be
4230 /// called with an arbitrary number of arguments, much like a variadic
4231 /// function.
4232 bool isTemplateVariadic() const;
4233
4234 /// Whether this function prototype has a trailing return type.
4235 bool hasTrailingReturn() const { return FunctionTypeBits.HasTrailingReturn; }
4236
4237 Qualifiers getMethodQuals() const {
4238 if (hasExtQualifiers())
4239 return *getTrailingObjects<Qualifiers>();
4240 else
4241 return getFastTypeQuals();
4242 }
4243
4244 /// Retrieve the ref-qualifier associated with this function type.
4245 RefQualifierKind getRefQualifier() const {
4246 return static_cast<RefQualifierKind>(FunctionTypeBits.RefQualifier);
4247 }
4248
4249 using param_type_iterator = const QualType *;
4250 using param_type_range = llvm::iterator_range<param_type_iterator>;
4251
4252 param_type_range param_types() const {
4253 return param_type_range(param_type_begin(), param_type_end());
4254 }
4255
4256 param_type_iterator param_type_begin() const {
4257 return getTrailingObjects<QualType>();
4258 }
4259
4260 param_type_iterator param_type_end() const {
4261 return param_type_begin() + getNumParams();
4262 }
4263
4264 using exception_iterator = const QualType *;
4265
4266 ArrayRef<QualType> exceptions() const {
4267 return llvm::makeArrayRef(exception_begin(), exception_end());
4268 }
4269
4270 exception_iterator exception_begin() const {
4271 return reinterpret_cast<exception_iterator>(
4272 getTrailingObjects<ExceptionType>());
4273 }
4274
4275 exception_iterator exception_end() const {
4276 return exception_begin() + getNumExceptions();
4277 }
4278
4279 /// Is there any interesting extra information for any of the parameters
4280 /// of this function type?
4281 bool hasExtParameterInfos() const {
4282 return FunctionTypeBits.HasExtParameterInfos;
4283 }
4284
4285 ArrayRef<ExtParameterInfo> getExtParameterInfos() const {
4286 assert(hasExtParameterInfos())((void)0);
4287 return ArrayRef<ExtParameterInfo>(getTrailingObjects<ExtParameterInfo>(),
4288 getNumParams());
4289 }
4290
4291 /// Return a pointer to the beginning of the array of extra parameter
4292 /// information, if present, or else null if none of the parameters
4293 /// carry it. This is equivalent to getExtProtoInfo().ExtParameterInfos.
4294 const ExtParameterInfo *getExtParameterInfosOrNull() const {
4295 if (!hasExtParameterInfos())
4296 return nullptr;
4297 return getTrailingObjects<ExtParameterInfo>();
4298 }
4299
4300 ExtParameterInfo getExtParameterInfo(unsigned I) const {
4301 assert(I < getNumParams() && "parameter index out of range")((void)0);
4302 if (hasExtParameterInfos())
4303 return getTrailingObjects<ExtParameterInfo>()[I];
4304 return ExtParameterInfo();
4305 }
4306
4307 ParameterABI getParameterABI(unsigned I) const {
4308 assert(I < getNumParams() && "parameter index out of range")((void)0);
4309 if (hasExtParameterInfos())
4310 return getTrailingObjects<ExtParameterInfo>()[I].getABI();
4311 return ParameterABI::Ordinary;
4312 }
4313
4314 bool isParamConsumed(unsigned I) const {
4315 assert(I < getNumParams() && "parameter index out of range")((void)0);
4316 if (hasExtParameterInfos())
4317 return getTrailingObjects<ExtParameterInfo>()[I].isConsumed();
4318 return false;
4319 }
4320
4321 bool isSugared() const { return false; }
4322 QualType desugar() const { return QualType(this, 0); }
4323
4324 void printExceptionSpecification(raw_ostream &OS,
4325 const PrintingPolicy &Policy) const;
4326
4327 static bool classof(const Type *T) {
4328 return T->getTypeClass() == FunctionProto;
4329 }
4330
4331 void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Ctx);
4332 static void Profile(llvm::FoldingSetNodeID &ID, QualType Result,
4333 param_type_iterator ArgTys, unsigned NumArgs,
4334 const ExtProtoInfo &EPI, const ASTContext &Context,
4335 bool Canonical);
4336};
4337
4338/// Represents the dependent type named by a dependently-scoped
4339/// typename using declaration, e.g.
4340/// using typename Base<T>::foo;
4341///
4342/// Template instantiation turns these into the underlying type.
4343class UnresolvedUsingType : public Type {
4344 friend class ASTContext; // ASTContext creates these.
4345
4346 UnresolvedUsingTypenameDecl *Decl;
4347
4348 UnresolvedUsingType(const UnresolvedUsingTypenameDecl *D)
4349 : Type(UnresolvedUsing, QualType(),
4350 TypeDependence::DependentInstantiation),
4351 Decl(const_cast<UnresolvedUsingTypenameDecl *>(D)) {}
4352
4353public:
4354 UnresolvedUsingTypenameDecl *getDecl() const { return Decl; }
4355
4356 bool isSugared() const { return false; }
4357 QualType desugar() const { return QualType(this, 0); }
4358
4359 static bool classof(const Type *T) {
4360 return T->getTypeClass() == UnresolvedUsing;
4361 }
4362
4363 void Profile(llvm::FoldingSetNodeID &ID) {
4364 return Profile(ID, Decl);
4365 }
4366
4367 static void Profile(llvm::FoldingSetNodeID &ID,
4368 UnresolvedUsingTypenameDecl *D) {
4369 ID.AddPointer(D);
4370 }
4371};
4372
4373class TypedefType : public Type {
4374 TypedefNameDecl *Decl;
4375
4376private:
4377 friend class ASTContext; // ASTContext creates these.
4378
4379 TypedefType(TypeClass tc, const TypedefNameDecl *D, QualType underlying,
4380 QualType can);
4381
4382public:
4383 TypedefNameDecl *getDecl() const { return Decl; }
4384
4385 bool isSugared() const { return true; }
4386 QualType desugar() const;
4387
4388 static bool classof(const Type *T) { return T->getTypeClass() == Typedef; }
4389};
4390
4391/// Sugar type that represents a type that was qualified by a qualifier written
4392/// as a macro invocation.
4393class MacroQualifiedType : public Type {
4394 friend class ASTContext; // ASTContext creates these.
4395
4396 QualType UnderlyingTy;
4397 const IdentifierInfo *MacroII;
4398
4399 MacroQualifiedType(QualType UnderlyingTy, QualType CanonTy,
4400 const IdentifierInfo *MacroII)
4401 : Type(MacroQualified, CanonTy, UnderlyingTy->getDependence()),
4402 UnderlyingTy(UnderlyingTy), MacroII(MacroII) {
4403 assert(isa<AttributedType>(UnderlyingTy) &&((void)0)
4404 "Expected a macro qualified type to only wrap attributed types.")((void)0);
4405 }
4406
4407public:
4408 const IdentifierInfo *getMacroIdentifier() const { return MacroII; }
4409 QualType getUnderlyingType() const { return UnderlyingTy; }
4410
4411 /// Return this attributed type's modified type with no qualifiers attached to
4412 /// it.
4413 QualType getModifiedType() const;
4414
4415 bool isSugared() const { return true; }
4416 QualType desugar() const;
4417
4418 static bool classof(const Type *T) {
4419 return T->getTypeClass() == MacroQualified;
4420 }
4421};
4422
4423/// Represents a `typeof` (or __typeof__) expression (a GCC extension).
4424class TypeOfExprType : public Type {
4425 Expr *TOExpr;
4426
4427protected:
4428 friend class ASTContext; // ASTContext creates these.
4429
4430 TypeOfExprType(Expr *E, QualType can = QualType());
4431
4432public:
4433 Expr *getUnderlyingExpr() const { return TOExpr; }
4434
4435 /// Remove a single level of sugar.
4436 QualType desugar() const;
4437
4438 /// Returns whether this type directly provides sugar.
4439 bool isSugared() const;
4440
4441 static bool classof(const Type *T) { return T->getTypeClass() == TypeOfExpr; }
4442};
4443
4444/// Internal representation of canonical, dependent
4445/// `typeof(expr)` types.
4446///
4447/// This class is used internally by the ASTContext to manage
4448/// canonical, dependent types, only. Clients will only see instances
4449/// of this class via TypeOfExprType nodes.
4450class DependentTypeOfExprType
4451 : public TypeOfExprType, public llvm::FoldingSetNode {
4452 const ASTContext &Context;
4453
4454public:
4455 DependentTypeOfExprType(const ASTContext &Context, Expr *E)
4456 : TypeOfExprType(E), Context(Context) {}
4457
4458 void Profile(llvm::FoldingSetNodeID &ID) {
4459 Profile(ID, Context, getUnderlyingExpr());
4460 }
4461
4462 static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
4463 Expr *E);
4464};
4465
4466/// Represents `typeof(type)`, a GCC extension.
4467class TypeOfType : public Type {
4468 friend class ASTContext; // ASTContext creates these.
4469
4470 QualType TOType;
4471
4472 TypeOfType(QualType T, QualType can)
4473 : Type(TypeOf, can, T->getDependence()), TOType(T) {
4474 assert(!isa<TypedefType>(can) && "Invalid canonical type")((void)0);
4475 }
4476
4477public:
4478 QualType getUnderlyingType() const { return TOType; }
4479
4480 /// Remove a single level of sugar.
4481 QualType desugar() const { return getUnderlyingType(); }
4482
4483 /// Returns whether this type directly provides sugar.
4484 bool isSugared() const { return true; }
4485
4486 static bool classof(const Type *T) { return T->getTypeClass() == TypeOf; }
4487};
4488
4489/// Represents the type `decltype(expr)` (C++11).
4490class DecltypeType : public Type {
4491 Expr *E;
4492 QualType UnderlyingType;
4493
4494protected:
4495 friend class ASTContext; // ASTContext creates these.
4496
4497 DecltypeType(Expr *E, QualType underlyingType, QualType can = QualType());
4498
4499public:
4500 Expr *getUnderlyingExpr() const { return E; }
4501 QualType getUnderlyingType() const { return UnderlyingType; }
4502
4503 /// Remove a single level of sugar.
4504 QualType desugar() const;
4505
4506 /// Returns whether this type directly provides sugar.
4507 bool isSugared() const;
4508
4509 static bool classof(const Type *T) { return T->getTypeClass() == Decltype; }
4510};
4511
4512/// Internal representation of canonical, dependent
4513/// decltype(expr) types.
4514///
4515/// This class is used internally by the ASTContext to manage
4516/// canonical, dependent types, only. Clients will only see instances
4517/// of this class via DecltypeType nodes.
4518class DependentDecltypeType : public DecltypeType, public llvm::FoldingSetNode {
4519 const ASTContext &Context;
4520
4521public:
4522 DependentDecltypeType(const ASTContext &Context, Expr *E);
4523
4524 void Profile(llvm::FoldingSetNodeID &ID) {
4525 Profile(ID, Context, getUnderlyingExpr());
4526 }
4527
4528 static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
4529 Expr *E);
4530};
4531
4532/// A unary type transform, which is a type constructed from another.
4533class UnaryTransformType : public Type {
4534public:
4535 enum UTTKind {
4536 EnumUnderlyingType
4537 };
4538
4539private:
4540 /// The untransformed type.
4541 QualType BaseType;
4542
4543 /// The transformed type if not dependent, otherwise the same as BaseType.
4544 QualType UnderlyingType;
4545
4546 UTTKind UKind;
4547
4548protected:
4549 friend class ASTContext;
4550
4551 UnaryTransformType(QualType BaseTy, QualType UnderlyingTy, UTTKind UKind,
4552 QualType CanonicalTy);
4553
4554public:
4555 bool isSugared() const { return !isDependentType(); }
4556 QualType desugar() const { return UnderlyingType; }
4557
4558 QualType getUnderlyingType() const { return UnderlyingType; }
4559 QualType getBaseType() const { return BaseType; }
4560
4561 UTTKind getUTTKind() const { return UKind; }
4562
4563 static bool classof(const Type *T) {
4564 return T->getTypeClass() == UnaryTransform;
4565 }
4566};
4567
4568/// Internal representation of canonical, dependent
4569/// __underlying_type(type) types.
4570///
4571/// This class is used internally by the ASTContext to manage
4572/// canonical, dependent types, only. Clients will only see instances
4573/// of this class via UnaryTransformType nodes.
4574class DependentUnaryTransformType : public UnaryTransformType,
4575 public llvm::FoldingSetNode {
4576public:
4577 DependentUnaryTransformType(const ASTContext &C, QualType BaseType,
4578 UTTKind UKind);
4579
4580 void Profile(llvm::FoldingSetNodeID &ID) {
4581 Profile(ID, getBaseType(), getUTTKind());
4582 }
4583
4584 static void Profile(llvm::FoldingSetNodeID &ID, QualType BaseType,
4585 UTTKind UKind) {
4586 ID.AddPointer(BaseType.getAsOpaquePtr());
4587 ID.AddInteger((unsigned)UKind);
4588 }
4589};
4590
4591class TagType : public Type {
4592 friend class ASTReader;
4593 template <class T> friend class serialization::AbstractTypeReader;
4594
4595 /// Stores the TagDecl associated with this type. The decl may point to any
4596 /// TagDecl that declares the entity.
4597 TagDecl *decl;
4598
4599protected:
4600 TagType(TypeClass TC, const TagDecl *D, QualType can);
4601
4602public:
4603 TagDecl *getDecl() const;
4604
4605 /// Determines whether this type is in the process of being defined.
4606 bool isBeingDefined() const;
4607
4608 static bool classof(const Type *T) {
4609 return T->getTypeClass() == Enum || T->getTypeClass() == Record;
4610 }
4611};
4612
4613/// A helper class that allows the use of isa/cast/dyncast
4614/// to detect TagType objects of structs/unions/classes.
4615class RecordType : public TagType {
4616protected:
4617 friend class ASTContext; // ASTContext creates these.
4618
4619 explicit RecordType(const RecordDecl *D)
4620 : TagType(Record, reinterpret_cast<const TagDecl*>(D), QualType()) {}
4621 explicit RecordType(TypeClass TC, RecordDecl *D)
4622 : TagType(TC, reinterpret_cast<const TagDecl*>(D), QualType()) {}
4623
4624public:
4625 RecordDecl *getDecl() const {
4626 return reinterpret_cast<RecordDecl*>(TagType::getDecl());
4627 }
4628
4629 /// Recursively check all fields in the record for const-ness. If any field
4630 /// is declared const, return true. Otherwise, return false.
4631 bool hasConstFields() const;
4632
4633 bool isSugared() const { return false; }
4634 QualType desugar() const { return QualType(this, 0); }
4635
4636 static bool classof(const Type *T) { return T->getTypeClass() == Record; }
4637};
4638
4639/// A helper class that allows the use of isa/cast/dyncast
4640/// to detect TagType objects of enums.
4641class EnumType : public TagType {
4642 friend class ASTContext; // ASTContext creates these.
4643
4644 explicit EnumType(const EnumDecl *D)
4645 : TagType(Enum, reinterpret_cast<const TagDecl*>(D), QualType()) {}
4646
4647public:
4648 EnumDecl *getDecl() const {
4649 return reinterpret_cast<EnumDecl*>(TagType::getDecl());
4650 }
4651
4652 bool isSugared() const { return false; }
4653 QualType desugar() const { return QualType(this, 0); }
4654
4655 static bool classof(const Type *T) { return T->getTypeClass() == Enum; }
4656};
4657
4658/// An attributed type is a type to which a type attribute has been applied.
4659///
4660/// The "modified type" is the fully-sugared type to which the attributed
4661/// type was applied; generally it is not canonically equivalent to the
4662/// attributed type. The "equivalent type" is the minimally-desugared type
4663/// which the type is canonically equivalent to.
4664///
4665/// For example, in the following attributed type:
4666/// int32_t __attribute__((vector_size(16)))
4667/// - the modified type is the TypedefType for int32_t
4668/// - the equivalent type is VectorType(16, int32_t)
4669/// - the canonical type is VectorType(16, int)
4670class AttributedType : public Type, public llvm::FoldingSetNode {
4671public:
4672 using Kind = attr::Kind;
4673
4674private:
4675 friend class ASTContext; // ASTContext creates these
4676
4677 QualType ModifiedType;
4678 QualType EquivalentType;
4679
4680 AttributedType(QualType canon, attr::Kind attrKind, QualType modified,
4681 QualType equivalent)
4682 : Type(Attributed, canon, equivalent->getDependence()),
4683 ModifiedType(modified), EquivalentType(equivalent) {
4684 AttributedTypeBits.AttrKind = attrKind;
4685 }
4686
4687public:
4688 Kind getAttrKind() const {
4689 return static_cast<Kind>(AttributedTypeBits.AttrKind);
4690 }
4691
4692 QualType getModifiedType() const { return ModifiedType; }
4693 QualType getEquivalentType() const { return EquivalentType; }
4694
4695 bool isSugared() const { return true; }
4696 QualType desugar() const { return getEquivalentType(); }
4697
4698 /// Does this attribute behave like a type qualifier?
4699 ///
4700 /// A type qualifier adjusts a type to provide specialized rules for
4701 /// a specific object, like the standard const and volatile qualifiers.
4702 /// This includes attributes controlling things like nullability,
4703 /// address spaces, and ARC ownership. The value of the object is still
4704 /// largely described by the modified type.
4705 ///
4706 /// In contrast, many type attributes "rewrite" their modified type to
4707 /// produce a fundamentally different type, not necessarily related in any
4708 /// formalizable way to the original type. For example, calling convention
4709 /// and vector attributes are not simple type qualifiers.
4710 ///
4711 /// Type qualifiers are often, but not always, reflected in the canonical
4712 /// type.
4713 bool isQualifier() const;
4714
4715 bool isMSTypeSpec() const;
4716
4717 bool isCallingConv() const;
4718
4719 llvm::Optional<NullabilityKind> getImmediateNullability() const;
4720
4721 /// Retrieve the attribute kind corresponding to the given
4722 /// nullability kind.
4723 static Kind getNullabilityAttrKind(NullabilityKind kind) {
4724 switch (kind) {
4725 case NullabilityKind::NonNull:
4726 return attr::TypeNonNull;
4727
4728 case NullabilityKind::Nullable:
4729 return attr::TypeNullable;
4730
4731 case NullabilityKind::NullableResult:
4732 return attr::TypeNullableResult;
4733
4734 case NullabilityKind::Unspecified:
4735 return attr::TypeNullUnspecified;
4736 }
4737 llvm_unreachable("Unknown nullability kind.")__builtin_unreachable();
4738 }
4739
4740 /// Strip off the top-level nullability annotation on the given
4741 /// type, if it's there.
4742 ///
4743 /// \param T The type to strip. If the type is exactly an
4744 /// AttributedType specifying nullability (without looking through
4745 /// type sugar), the nullability is returned and this type changed
4746 /// to the underlying modified type.
4747 ///
4748 /// \returns the top-level nullability, if present.
4749 static Optional<NullabilityKind> stripOuterNullability(QualType &T);
4750
4751 void Profile(llvm::FoldingSetNodeID &ID) {
4752 Profile(ID, getAttrKind(), ModifiedType, EquivalentType);
4753 }
4754
4755 static void Profile(llvm::FoldingSetNodeID &ID, Kind attrKind,
4756 QualType modified, QualType equivalent) {
4757 ID.AddInteger(attrKind);
4758 ID.AddPointer(modified.getAsOpaquePtr());
4759 ID.AddPointer(equivalent.getAsOpaquePtr());
4760 }
4761
4762 static bool classof(const Type *T) {
4763 return T->getTypeClass() == Attributed;
4764 }
4765};
4766
4767class TemplateTypeParmType : public Type, public llvm::FoldingSetNode {
4768 friend class ASTContext; // ASTContext creates these
4769
4770 // Helper data collector for canonical types.
4771 struct CanonicalTTPTInfo {
4772 unsigned Depth : 15;
4773 unsigned ParameterPack : 1;
4774 unsigned Index : 16;
4775 };
4776
4777 union {
4778 // Info for the canonical type.
4779 CanonicalTTPTInfo CanTTPTInfo;
4780
4781 // Info for the non-canonical type.
4782 TemplateTypeParmDecl *TTPDecl;
4783 };
4784
4785 /// Build a non-canonical type.
4786 TemplateTypeParmType(TemplateTypeParmDecl *TTPDecl, QualType Canon)
4787 : Type(TemplateTypeParm, Canon,
4788 TypeDependence::DependentInstantiation |
4789 (Canon->getDependence() & TypeDependence::UnexpandedPack)),
4790 TTPDecl(TTPDecl) {}
4791
4792 /// Build the canonical type.
4793 TemplateTypeParmType(unsigned D, unsigned I, bool PP)
4794 : Type(TemplateTypeParm, QualType(this, 0),
4795 TypeDependence::DependentInstantiation |
4796 (PP ? TypeDependence::UnexpandedPack : TypeDependence::None)) {
4797 CanTTPTInfo.Depth = D;
4798 CanTTPTInfo.Index = I;
4799 CanTTPTInfo.ParameterPack = PP;
4800 }
4801
4802 const CanonicalTTPTInfo& getCanTTPTInfo() const {
4803 QualType Can = getCanonicalTypeInternal();
4804 return Can->castAs<TemplateTypeParmType>()->CanTTPTInfo;
4805 }
4806
4807public:
4808 unsigned getDepth() const { return getCanTTPTInfo().Depth; }
4809 unsigned getIndex() const { return getCanTTPTInfo().Index; }
4810 bool isParameterPack() const { return getCanTTPTInfo().ParameterPack; }
4811
4812 TemplateTypeParmDecl *getDecl() const {
4813 return isCanonicalUnqualified() ? nullptr : TTPDecl;
4814 }
4815
4816 IdentifierInfo *getIdentifier() const;
4817
4818 bool isSugared() const { return false; }
4819 QualType desugar() const { return QualType(this, 0); }
4820
4821 void Profile(llvm::FoldingSetNodeID &ID) {
4822 Profile(ID, getDepth(), getIndex(), isParameterPack(), getDecl());
4823 }
4824
4825 static void Profile(llvm::FoldingSetNodeID &ID, unsigned Depth,
4826 unsigned Index, bool ParameterPack,
4827 TemplateTypeParmDecl *TTPDecl) {
4828 ID.AddInteger(Depth);
4829 ID.AddInteger(Index);
4830 ID.AddBoolean(ParameterPack);
4831 ID.AddPointer(TTPDecl);
4832 }
4833
4834 static bool classof(const Type *T) {
4835 return T->getTypeClass() == TemplateTypeParm;
4836 }
4837};
4838
4839/// Represents the result of substituting a type for a template
4840/// type parameter.
4841///
4842/// Within an instantiated template, all template type parameters have
4843/// been replaced with these. They are used solely to record that a
4844/// type was originally written as a template type parameter;
4845/// therefore they are never canonical.
4846class SubstTemplateTypeParmType : public Type, public llvm::FoldingSetNode {
4847 friend class ASTContext;
4848
4849 // The original type parameter.
4850 const TemplateTypeParmType *Replaced;
4851
4852 SubstTemplateTypeParmType(const TemplateTypeParmType *Param, QualType Canon)
4853 : Type(SubstTemplateTypeParm, Canon, Canon->getDependence()),
4854 Replaced(Param) {}
4855
4856public:
4857 /// Gets the template parameter that was substituted for.
4858 const TemplateTypeParmType *getReplacedParameter() const {
4859 return Replaced;
4860 }
4861
4862 /// Gets the type that was substituted for the template
4863 /// parameter.
4864 QualType getReplacementType() const {
4865 return getCanonicalTypeInternal();
4866 }
4867
4868 bool isSugared() const { return true; }
4869 QualType desugar() const { return getReplacementType(); }
4870
4871 void Profile(llvm::FoldingSetNodeID &ID) {
4872 Profile(ID, getReplacedParameter(), getReplacementType());
4873 }
4874
4875 static void Profile(llvm::FoldingSetNodeID &ID,
4876 const TemplateTypeParmType *Replaced,
4877 QualType Replacement) {
4878 ID.AddPointer(Replaced);
4879 ID.AddPointer(Replacement.getAsOpaquePtr());
4880 }
4881
4882 static bool classof(const Type *T) {
4883 return T->getTypeClass() == SubstTemplateTypeParm;
4884 }
4885};
4886
4887/// Represents the result of substituting a set of types for a template
4888/// type parameter pack.
4889///
4890/// When a pack expansion in the source code contains multiple parameter packs
4891/// and those parameter packs correspond to different levels of template
4892/// parameter lists, this type node is used to represent a template type
4893/// parameter pack from an outer level, which has already had its argument pack
4894/// substituted but that still lives within a pack expansion that itself
4895/// could not be instantiated. When actually performing a substitution into
4896/// that pack expansion (e.g., when all template parameters have corresponding
4897/// arguments), this type will be replaced with the \c SubstTemplateTypeParmType
4898/// at the current pack substitution index.
4899class SubstTemplateTypeParmPackType : public Type, public llvm::FoldingSetNode {
4900 friend class ASTContext;
4901
4902 /// The original type parameter.
4903 const TemplateTypeParmType *Replaced;
4904
4905 /// A pointer to the set of template arguments that this
4906 /// parameter pack is instantiated with.
4907 const TemplateArgument *Arguments;
4908
4909 SubstTemplateTypeParmPackType(const TemplateTypeParmType *Param,
4910 QualType Canon,
4911 const TemplateArgument &ArgPack);
4912
4913public:
4914 IdentifierInfo *getIdentifier() const { return Replaced->getIdentifier(); }
4915
4916 /// Gets the template parameter that was substituted for.
4917 const TemplateTypeParmType *getReplacedParameter() const {
4918 return Replaced;
4919 }
4920
4921 unsigned getNumArgs() const {
4922 return SubstTemplateTypeParmPackTypeBits.NumArgs;
4923 }
4924
4925 bool isSugared() const { return false; }
4926 QualType desugar() const { return QualType(this, 0); }
4927
4928 TemplateArgument getArgumentPack() const;
4929
4930 void Profile(llvm::FoldingSetNodeID &ID);
4931 static void Profile(llvm::FoldingSetNodeID &ID,
4932 const TemplateTypeParmType *Replaced,
4933 const TemplateArgument &ArgPack);
4934
4935 static bool classof(const Type *T) {
4936 return T->getTypeClass() == SubstTemplateTypeParmPack;
4937 }
4938};
4939
4940/// Common base class for placeholders for types that get replaced by
4941/// placeholder type deduction: C++11 auto, C++14 decltype(auto), C++17 deduced
4942/// class template types, and constrained type names.
4943///
4944/// These types are usually a placeholder for a deduced type. However, before
4945/// the initializer is attached, or (usually) if the initializer is
4946/// type-dependent, there is no deduced type and the type is canonical. In
4947/// the latter case, it is also a dependent type.
4948class DeducedType : public Type {
4949protected:
4950 DeducedType(TypeClass TC, QualType DeducedAsType,
4951 TypeDependence ExtraDependence)
4952 : Type(TC,
4953 // FIXME: Retain the sugared deduced type?
4954 DeducedAsType.isNull() ? QualType(this, 0)
4955 : DeducedAsType.getCanonicalType(),
4956 ExtraDependence | (DeducedAsType.isNull()
4957 ? TypeDependence::None
4958 : DeducedAsType->getDependence() &
4959 ~TypeDependence::VariablyModified)) {}
4960
4961public:
4962 bool isSugared() const { return !isCanonicalUnqualified(); }
4963 QualType desugar() const { return getCanonicalTypeInternal(); }
4964
4965 /// Get the type deduced for this placeholder type, or null if it's
4966 /// either not been deduced or was deduced to a dependent type.
4967 QualType getDeducedType() const {
4968 return !isCanonicalUnqualified() ? getCanonicalTypeInternal() : QualType();
4969 }
4970 bool isDeduced() const {
4971 return !isCanonicalUnqualified() || isDependentType();
4972 }
4973
4974 static bool classof(const Type *T) {
4975 return T->getTypeClass() == Auto ||
4976 T->getTypeClass() == DeducedTemplateSpecialization;
4977 }
4978};
4979
4980/// Represents a C++11 auto or C++14 decltype(auto) type, possibly constrained
4981/// by a type-constraint.
4982class alignas(8) AutoType : public DeducedType, public llvm::FoldingSetNode {
4983 friend class ASTContext; // ASTContext creates these
4984
4985 ConceptDecl *TypeConstraintConcept;
4986
4987 AutoType(QualType DeducedAsType, AutoTypeKeyword Keyword,
4988 TypeDependence ExtraDependence, ConceptDecl *CD,
4989 ArrayRef<TemplateArgument> TypeConstraintArgs);
4990
4991 const TemplateArgument *getArgBuffer() const {
4992 return reinterpret_cast<const TemplateArgument*>(this+1);
4993 }
4994
4995 TemplateArgument *getArgBuffer() {
4996 return reinterpret_cast<TemplateArgument*>(this+1);
4997 }
4998
4999public:
5000 /// Retrieve the template arguments.
5001 const TemplateArgument *getArgs() const {
5002 return getArgBuffer();
5003 }
5004
5005 /// Retrieve the number of template arguments.
5006 unsigned getNumArgs() const {
5007 return AutoTypeBits.NumArgs;
5008 }
5009
5010 const TemplateArgument &getArg(unsigned Idx) const; // in TemplateBase.h
5011
5012 ArrayRef<TemplateArgument> getTypeConstraintArguments() const {
5013 return {getArgs(), getNumArgs()};
5014 }
5015
5016 ConceptDecl *getTypeConstraintConcept() const {
5017 return TypeConstraintConcept;
5018 }
5019
5020 bool isConstrained() const {
5021 return TypeConstraintConcept != nullptr;
5022 }
5023
5024 bool isDecltypeAuto() const {
5025 return getKeyword() == AutoTypeKeyword::DecltypeAuto;
5026 }
5027
5028 AutoTypeKeyword getKeyword() const {
5029 return (AutoTypeKeyword)AutoTypeBits.Keyword;
5030 }
5031
5032 void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context) {
5033 Profile(ID, Context, getDeducedType(), getKeyword(), isDependentType(),
5034 getTypeConstraintConcept(), getTypeConstraintArguments());
5035 }
5036
5037 static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
5038 QualType Deduced, AutoTypeKeyword Keyword,
5039 bool IsDependent, ConceptDecl *CD,
5040 ArrayRef<TemplateArgument> Arguments);
5041
5042 static bool classof(const Type *T) {
5043 return T->getTypeClass() == Auto;
5044 }
5045};
5046
5047/// Represents a C++17 deduced template specialization type.
5048class DeducedTemplateSpecializationType : public DeducedType,
5049 public llvm::FoldingSetNode {
5050 friend class ASTContext; // ASTContext creates these
5051
5052 /// The name of the template whose arguments will be deduced.
5053 TemplateName Template;
5054
5055 DeducedTemplateSpecializationType(TemplateName Template,
5056 QualType DeducedAsType,
5057 bool IsDeducedAsDependent)
5058 : DeducedType(DeducedTemplateSpecialization, DeducedAsType,
5059 toTypeDependence(Template.getDependence()) |
5060 (IsDeducedAsDependent
5061 ? TypeDependence::DependentInstantiation
5062 : TypeDependence::None)),
5063 Template(Template) {}
5064
5065public:
5066 /// Retrieve the name of the template that we are deducing.
5067 TemplateName getTemplateName() const { return Template;}
5068
5069 void Profile(llvm::FoldingSetNodeID &ID) {
5070 Profile(ID, getTemplateName(), getDeducedType(), isDependentType());
5071 }
5072
5073 static void Profile(llvm::FoldingSetNodeID &ID, TemplateName Template,
5074 QualType Deduced, bool IsDependent) {
5075 Template.Profile(ID);
5076 ID.AddPointer(Deduced.getAsOpaquePtr());
5077 ID.AddBoolean(IsDependent);
5078 }
5079
5080 static bool classof(const Type *T) {
5081 return T->getTypeClass() == DeducedTemplateSpecialization;
5082 }
5083};
5084
5085/// Represents a type template specialization; the template
5086/// must be a class template, a type alias template, or a template
5087/// template parameter. A template which cannot be resolved to one of
5088/// these, e.g. because it is written with a dependent scope
5089/// specifier, is instead represented as a
5090/// @c DependentTemplateSpecializationType.
5091///
5092/// A non-dependent template specialization type is always "sugar",
5093/// typically for a \c RecordType. For example, a class template
5094/// specialization type of \c vector<int> will refer to a tag type for
5095/// the instantiation \c std::vector<int, std::allocator<int>>
5096///
5097/// Template specializations are dependent if either the template or
5098/// any of the template arguments are dependent, in which case the
5099/// type may also be canonical.
5100///
5101/// Instances of this type are allocated with a trailing array of
5102/// TemplateArguments, followed by a QualType representing the
5103/// non-canonical aliased type when the template is a type alias
5104/// template.
5105class alignas(8) TemplateSpecializationType
5106 : public Type,
5107 public llvm::FoldingSetNode {
5108 friend class ASTContext; // ASTContext creates these
5109
5110 /// The name of the template being specialized. This is
5111 /// either a TemplateName::Template (in which case it is a
5112 /// ClassTemplateDecl*, a TemplateTemplateParmDecl*, or a
5113 /// TypeAliasTemplateDecl*), a
5114 /// TemplateName::SubstTemplateTemplateParmPack, or a
5115 /// TemplateName::SubstTemplateTemplateParm (in which case the
5116 /// replacement must, recursively, be one of these).
5117 TemplateName Template;
5118
5119 TemplateSpecializationType(TemplateName T,
5120 ArrayRef<TemplateArgument> Args,
5121 QualType Canon,
5122 QualType Aliased);
5123
5124public:
5125 /// Determine whether any of the given template arguments are dependent.
5126 ///
5127 /// The converted arguments should be supplied when known; whether an
5128 /// argument is dependent can depend on the conversions performed on it
5129 /// (for example, a 'const int' passed as a template argument might be
5130 /// dependent if the parameter is a reference but non-dependent if the
5131 /// parameter is an int).
5132 ///
5133 /// Note that the \p Args parameter is unused: this is intentional, to remind
5134 /// the caller that they need to pass in the converted arguments, not the
5135 /// specified arguments.
5136 static bool
5137 anyDependentTemplateArguments(ArrayRef<TemplateArgumentLoc> Args,
5138 ArrayRef<TemplateArgument> Converted);
5139 static bool
5140 anyDependentTemplateArguments(const TemplateArgumentListInfo &,
5141 ArrayRef<TemplateArgument> Converted);
5142 static bool anyInstantiationDependentTemplateArguments(
5143 ArrayRef<TemplateArgumentLoc> Args);
5144
5145 /// True if this template specialization type matches a current
5146 /// instantiation in the context in which it is found.
5147 bool isCurrentInstantiation() const {
5148 return isa<InjectedClassNameType>(getCanonicalTypeInternal());
5149 }
5150
5151 /// Determine if this template specialization type is for a type alias
5152 /// template that has been substituted.
5153 ///
5154 /// Nearly every template specialization type whose template is an alias
5155 /// template will be substituted. However, this is not the case when
5156 /// the specialization contains a pack expansion but the template alias
5157 /// does not have a corresponding parameter pack, e.g.,
5158 ///
5159 /// \code
5160 /// template<typename T, typename U, typename V> struct S;
5161 /// template<typename T, typename U> using A = S<T, int, U>;
5162 /// template<typename... Ts> struct X {
5163 /// typedef A<Ts...> type; // not a type alias
5164 /// };
5165 /// \endcode
5166 bool isTypeAlias() const { return TemplateSpecializationTypeBits.TypeAlias; }
5167
5168 /// Get the aliased type, if this is a specialization of a type alias
5169 /// template.
5170 QualType getAliasedType() const {
5171 assert(isTypeAlias() && "not a type alias template specialization")((void)0);
5172 return *reinterpret_cast<const QualType*>(end());
5173 }
5174
5175 using iterator = const TemplateArgument *;
5176
5177 iterator begin() const { return getArgs(); }
5178 iterator end() const; // defined inline in TemplateBase.h
5179
5180 /// Retrieve the name of the template that we are specializing.
5181 TemplateName getTemplateName() const { return Template; }
5182
5183 /// Retrieve the template arguments.
5184 const TemplateArgument *getArgs() const {
5185 return reinterpret_cast<const TemplateArgument *>(this + 1);
5186 }
5187
5188 /// Retrieve the number of template arguments.
5189 unsigned getNumArgs() const {
5190 return TemplateSpecializationTypeBits.NumArgs;
5191 }
5192
5193 /// Retrieve a specific template argument as a type.
5194 /// \pre \c isArgType(Arg)
5195 const TemplateArgument &getArg(unsigned Idx) const; // in TemplateBase.h
5196
5197 ArrayRef<TemplateArgument> template_arguments() const {
5198 return {getArgs(), getNumArgs()};
5199 }
5200
5201 bool isSugared() const {
5202 return !isDependentType() || isCurrentInstantiation() || isTypeAlias();
5203 }
5204
5205 QualType desugar() const {
5206 return isTypeAlias() ? getAliasedType() : getCanonicalTypeInternal();
5207 }
5208
5209 void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Ctx) {
5210 Profile(ID, Template, template_arguments(), Ctx);
5211 if (isTypeAlias())
5212 getAliasedType().Profile(ID);
5213 }
5214
5215 static void Profile(llvm::FoldingSetNodeID &ID, TemplateName T,
5216 ArrayRef<TemplateArgument> Args,
5217 const ASTContext &Context);
5218
5219 static bool classof(const Type *T) {
5220 return T->getTypeClass() == TemplateSpecialization;
5221 }
5222};
5223
5224/// Print a template argument list, including the '<' and '>'
5225/// enclosing the template arguments.
5226void printTemplateArgumentList(raw_ostream &OS,
5227 ArrayRef<TemplateArgument> Args,
5228 const PrintingPolicy &Policy,
5229 const TemplateParameterList *TPL = nullptr);
5230
5231void printTemplateArgumentList(raw_ostream &OS,
5232 ArrayRef<TemplateArgumentLoc> Args,
5233 const PrintingPolicy &Policy,
5234 const TemplateParameterList *TPL = nullptr);
5235
5236void printTemplateArgumentList(raw_ostream &OS,
5237 const TemplateArgumentListInfo &Args,
5238 const PrintingPolicy &Policy,
5239 const TemplateParameterList *TPL = nullptr);
5240
5241/// The injected class name of a C++ class template or class
5242/// template partial specialization. Used to record that a type was
5243/// spelled with a bare identifier rather than as a template-id; the
5244/// equivalent for non-templated classes is just RecordType.
5245///
5246/// Injected class name types are always dependent. Template
5247/// instantiation turns these into RecordTypes.
5248///
5249/// Injected class name types are always canonical. This works
5250/// because it is impossible to compare an injected class name type
5251/// with the corresponding non-injected template type, for the same
5252/// reason that it is impossible to directly compare template
5253/// parameters from different dependent contexts: injected class name
5254/// types can only occur within the scope of a particular templated
5255/// declaration, and within that scope every template specialization
5256/// will canonicalize to the injected class name (when appropriate
5257/// according to the rules of the language).
5258class InjectedClassNameType : public Type {
5259 friend class ASTContext; // ASTContext creates these.
5260 friend class ASTNodeImporter;
5261 friend class ASTReader; // FIXME: ASTContext::getInjectedClassNameType is not
5262 // currently suitable for AST reading, too much
5263 // interdependencies.
5264 template <class T> friend class serialization::AbstractTypeReader;
5265
5266 CXXRecordDecl *Decl;
5267
5268 /// The template specialization which this type represents.
5269 /// For example, in
5270 /// template <class T> class A { ... };
5271 /// this is A<T>, whereas in
5272 /// template <class X, class Y> class A<B<X,Y> > { ... };
5273 /// this is A<B<X,Y> >.
5274 ///
5275 /// It is always unqualified, always a template specialization type,
5276 /// and always dependent.
5277 QualType InjectedType;
5278
5279 InjectedClassNameType(CXXRecordDecl *D, QualType TST)
5280 : Type(InjectedClassName, QualType(),
5281 TypeDependence::DependentInstantiation),
5282 Decl(D), InjectedType(TST) {
5283 assert(isa<TemplateSpecializationType>(TST))((void)0);
5284 assert(!TST.hasQualifiers())((void)0);
5285 assert(TST->isDependentType())((void)0);
5286 }
5287
5288public:
5289 QualType getInjectedSpecializationType() const { return InjectedType; }
5290
5291 const TemplateSpecializationType *getInjectedTST() const {
5292 return cast<TemplateSpecializationType>(InjectedType.getTypePtr());
5293 }
5294
5295 TemplateName getTemplateName() const {
5296 return getInjectedTST()->getTemplateName();
5297 }
5298
5299 CXXRecordDecl *getDecl() const;
5300
5301 bool isSugared() const { return false; }
5302 QualType desugar() const { return QualType(this, 0); }
5303
5304 static bool classof(const Type *T) {
5305 return T->getTypeClass() == InjectedClassName;
5306 }
5307};
5308
5309/// The kind of a tag type.
5310enum TagTypeKind {
5311 /// The "struct" keyword.
5312 TTK_Struct,
5313
5314 /// The "__interface" keyword.
5315 TTK_Interface,
5316
5317 /// The "union" keyword.
5318 TTK_Union,
5319
5320 /// The "class" keyword.
5321 TTK_Class,
5322
5323 /// The "enum" keyword.
5324 TTK_Enum
5325};
5326
5327/// The elaboration keyword that precedes a qualified type name or
5328/// introduces an elaborated-type-specifier.
5329enum ElaboratedTypeKeyword {
5330 /// The "struct" keyword introduces the elaborated-type-specifier.
5331 ETK_Struct,
5332
5333 /// The "__interface" keyword introduces the elaborated-type-specifier.
5334 ETK_Interface,
5335
5336 /// The "union" keyword introduces the elaborated-type-specifier.
5337 ETK_Union,
5338
5339 /// The "class" keyword introduces the elaborated-type-specifier.
5340 ETK_Class,
5341
5342 /// The "enum" keyword introduces the elaborated-type-specifier.
5343 ETK_Enum,
5344
5345 /// The "typename" keyword precedes the qualified type name, e.g.,
5346 /// \c typename T::type.
5347 ETK_Typename,
5348
5349 /// No keyword precedes the qualified type name.
5350 ETK_None
5351};
5352
5353/// A helper class for Type nodes having an ElaboratedTypeKeyword.
5354/// The keyword in stored in the free bits of the base class.
5355/// Also provides a few static helpers for converting and printing
5356/// elaborated type keyword and tag type kind enumerations.
5357class TypeWithKeyword : public Type {
5358protected:
5359 TypeWithKeyword(ElaboratedTypeKeyword Keyword, TypeClass tc,
5360 QualType Canonical, TypeDependence Dependence)
5361 : Type(tc, Canonical, Dependence) {
5362 TypeWithKeywordBits.Keyword = Keyword;
5363 }
5364
5365public:
5366 ElaboratedTypeKeyword getKeyword() const {
5367 return static_cast<ElaboratedTypeKeyword>(TypeWithKeywordBits.Keyword);
5368 }
5369
5370 /// Converts a type specifier (DeclSpec::TST) into an elaborated type keyword.
5371 static ElaboratedTypeKeyword getKeywordForTypeSpec(unsigned TypeSpec);
5372
5373 /// Converts a type specifier (DeclSpec::TST) into a tag type kind.
5374 /// It is an error to provide a type specifier which *isn't* a tag kind here.
5375 static TagTypeKind getTagTypeKindForTypeSpec(unsigned TypeSpec);
5376
5377 /// Converts a TagTypeKind into an elaborated type keyword.
5378 static ElaboratedTypeKeyword getKeywordForTagTypeKind(TagTypeKind Tag);
5379
5380 /// Converts an elaborated type keyword into a TagTypeKind.
5381 /// It is an error to provide an elaborated type keyword
5382 /// which *isn't* a tag kind here.
5383 static TagTypeKind getTagTypeKindForKeyword(ElaboratedTypeKeyword Keyword);
5384
5385 static bool KeywordIsTagTypeKind(ElaboratedTypeKeyword Keyword);
5386
5387 static StringRef getKeywordName(ElaboratedTypeKeyword Keyword);
5388
5389 static StringRef getTagTypeKindName(TagTypeKind Kind) {
5390 return getKeywordName(getKeywordForTagTypeKind(Kind));
5391 }
5392
5393 class CannotCastToThisType {};
5394 static CannotCastToThisType classof(const Type *);
5395};
5396
5397/// Represents a type that was referred to using an elaborated type
5398/// keyword, e.g., struct S, or via a qualified name, e.g., N::M::type,
5399/// or both.
5400///
5401/// This type is used to keep track of a type name as written in the
5402/// source code, including tag keywords and any nested-name-specifiers.
5403/// The type itself is always "sugar", used to express what was written
5404/// in the source code but containing no additional semantic information.
5405class ElaboratedType final
5406 : public TypeWithKeyword,
5407 public llvm::FoldingSetNode,
5408 private llvm::TrailingObjects<ElaboratedType, TagDecl *> {
5409 friend class ASTContext; // ASTContext creates these
5410 friend TrailingObjects;
5411
5412 /// The nested name specifier containing the qualifier.
5413 NestedNameSpecifier *NNS;
5414
5415 /// The type that this qualified name refers to.
5416 QualType NamedType;
5417
5418 /// The (re)declaration of this tag type owned by this occurrence is stored
5419 /// as a trailing object if there is one. Use getOwnedTagDecl to obtain
5420 /// it, or obtain a null pointer if there is none.
5421
5422 ElaboratedType(ElaboratedTypeKeyword Keyword, NestedNameSpecifier *NNS,
5423 QualType NamedType, QualType CanonType, TagDecl *OwnedTagDecl)
5424 : TypeWithKeyword(Keyword, Elaborated, CanonType,
5425 // Any semantic dependence on the qualifier will have
5426 // been incorporated into NamedType. We still need to
5427 // track syntactic (instantiation / error / pack)
5428 // dependence on the qualifier.
5429 NamedType->getDependence() |
5430 (NNS ? toSyntacticDependence(
5431 toTypeDependence(NNS->getDependence()))
5432 : TypeDependence::None)),
5433 NNS(NNS), NamedType(NamedType) {
5434 ElaboratedTypeBits.HasOwnedTagDecl = false;
5435 if (OwnedTagDecl) {
5436 ElaboratedTypeBits.HasOwnedTagDecl = true;
5437 *getTrailingObjects<TagDecl *>() = OwnedTagDecl;
5438 }
5439 assert(!(Keyword == ETK_None && NNS == nullptr) &&((void)0)
5440 "ElaboratedType cannot have elaborated type keyword "((void)0)
5441 "and name qualifier both null.")((void)0);
5442 }
5443
5444public:
5445 /// Retrieve the qualification on this type.
5446 NestedNameSpecifier *getQualifier() const { return NNS; }
5447
5448 /// Retrieve the type named by the qualified-id.
5449 QualType getNamedType() const { return NamedType; }
5450
5451 /// Remove a single level of sugar.
5452 QualType desugar() const { return getNamedType(); }
5453
5454 /// Returns whether this type directly provides sugar.
5455 bool isSugared() const { return true; }
5456
5457 /// Return the (re)declaration of this type owned by this occurrence of this
5458 /// type, or nullptr if there is none.
5459 TagDecl *getOwnedTagDecl() const {
5460 return ElaboratedTypeBits.HasOwnedTagDecl ? *getTrailingObjects<TagDecl *>()
5461 : nullptr;
5462 }
5463
5464 void Profile(llvm::FoldingSetNodeID &ID) {
5465 Profile(ID, getKeyword(), NNS, NamedType, getOwnedTagDecl());
5466 }
5467
5468 static void Profile(llvm::FoldingSetNodeID &ID, ElaboratedTypeKeyword Keyword,
5469 NestedNameSpecifier *NNS, QualType NamedType,
5470 TagDecl *OwnedTagDecl) {
5471 ID.AddInteger(Keyword);
5472 ID.AddPointer(NNS);
5473 NamedType.Profile(ID);
5474 ID.AddPointer(OwnedTagDecl);
5475 }
5476
5477 static bool classof(const Type *T) { return T->getTypeClass() == Elaborated; }
5478};
5479
5480/// Represents a qualified type name for which the type name is
5481/// dependent.
5482///
5483/// DependentNameType represents a class of dependent types that involve a
5484/// possibly dependent nested-name-specifier (e.g., "T::") followed by a
5485/// name of a type. The DependentNameType may start with a "typename" (for a
5486/// typename-specifier), "class", "struct", "union", or "enum" (for a
5487/// dependent elaborated-type-specifier), or nothing (in contexts where we
5488/// know that we must be referring to a type, e.g., in a base class specifier).
5489/// Typically the nested-name-specifier is dependent, but in MSVC compatibility
5490/// mode, this type is used with non-dependent names to delay name lookup until
5491/// instantiation.
5492class DependentNameType : public TypeWithKeyword, public llvm::FoldingSetNode {
5493 friend class ASTContext; // ASTContext creates these
5494
5495 /// The nested name specifier containing the qualifier.
5496 NestedNameSpecifier *NNS;
5497
5498 /// The type that this typename specifier refers to.
5499 const IdentifierInfo *Name;
5500
5501 DependentNameType(ElaboratedTypeKeyword Keyword, NestedNameSpecifier *NNS,
5502 const IdentifierInfo *Name, QualType CanonType)
5503 : TypeWithKeyword(Keyword, DependentName, CanonType,
5504 TypeDependence::DependentInstantiation |
5505 toTypeDependence(NNS->getDependence())),
5506 NNS(NNS), Name(Name) {}
5507
5508public:
5509 /// Retrieve the qualification on this type.
5510 NestedNameSpecifier *getQualifier() const { return NNS; }
5511
5512 /// Retrieve the type named by the typename specifier as an identifier.
5513 ///
5514 /// This routine will return a non-NULL identifier pointer when the
5515 /// form of the original typename was terminated by an identifier,
5516 /// e.g., "typename T::type".
5517 const IdentifierInfo *getIdentifier() const {
5518 return Name;
5519 }
5520
5521 bool isSugared() const { return false; }
5522 QualType desugar() const { return QualType(this, 0); }
5523
5524 void Profile(llvm::FoldingSetNodeID &ID) {
5525 Profile(ID, getKeyword(), NNS, Name);
5526 }
5527
5528 static void Profile(llvm::FoldingSetNodeID &ID, ElaboratedTypeKeyword Keyword,
5529 NestedNameSpecifier *NNS, const IdentifierInfo *Name) {
5530 ID.AddInteger(Keyword);
5531 ID.AddPointer(NNS);
5532 ID.AddPointer(Name);
5533 }
5534
5535 static bool classof(const Type *T) {
5536 return T->getTypeClass() == DependentName;
5537 }
5538};
5539
5540/// Represents a template specialization type whose template cannot be
5541/// resolved, e.g.
5542/// A<T>::template B<T>
5543class alignas(8) DependentTemplateSpecializationType
5544 : public TypeWithKeyword,
5545 public llvm::FoldingSetNode {
5546 friend class ASTContext; // ASTContext creates these
5547
5548 /// The nested name specifier containing the qualifier.
5549 NestedNameSpecifier *NNS;
5550
5551 /// The identifier of the template.
5552 const IdentifierInfo *Name;
5553
5554 DependentTemplateSpecializationType(ElaboratedTypeKeyword Keyword,
5555 NestedNameSpecifier *NNS,
5556 const IdentifierInfo *Name,
5557 ArrayRef<TemplateArgument> Args,
5558 QualType Canon);
5559
5560 const TemplateArgument *getArgBuffer() const {
5561 return reinterpret_cast<const TemplateArgument*>(this+1);
5562 }
5563
5564 TemplateArgument *getArgBuffer() {
5565 return reinterpret_cast<TemplateArgument*>(this+1);
5566 }
5567
5568public:
5569 NestedNameSpecifier *getQualifier() const { return NNS; }
5570 const IdentifierInfo *getIdentifier() const { return Name; }
5571
5572 /// Retrieve the template arguments.
5573 const TemplateArgument *getArgs() const {
5574 return getArgBuffer();
5575 }
5576
5577 /// Retrieve the number of template arguments.
5578 unsigned getNumArgs() const {
5579 return DependentTemplateSpecializationTypeBits.NumArgs;
5580 }
5581
5582 const TemplateArgument &getArg(unsigned Idx) const; // in TemplateBase.h
5583
5584 ArrayRef<TemplateArgument> template_arguments() const {
5585 return {getArgs(), getNumArgs()};
5586 }
5587
5588 using iterator = const TemplateArgument *;
5589
5590 iterator begin() const { return getArgs(); }
5591 iterator end() const; // inline in TemplateBase.h
5592
5593 bool isSugared() const { return false; }
5594 QualType desugar() const { return QualType(this, 0); }
5595
5596 void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context) {
5597 Profile(ID, Context, getKeyword(), NNS, Name, {getArgs(), getNumArgs()});
5598 }
5599
5600 static void Profile(llvm::FoldingSetNodeID &ID,
5601 const ASTContext &Context,
5602 ElaboratedTypeKeyword Keyword,
5603 NestedNameSpecifier *Qualifier,
5604 const IdentifierInfo *Name,
5605 ArrayRef<TemplateArgument> Args);
5606
5607 static bool classof(const Type *T) {
5608 return T->getTypeClass() == DependentTemplateSpecialization;
5609 }
5610};
5611
5612/// Represents a pack expansion of types.
5613///
5614/// Pack expansions are part of C++11 variadic templates. A pack
5615/// expansion contains a pattern, which itself contains one or more
5616/// "unexpanded" parameter packs. When instantiated, a pack expansion
5617/// produces a series of types, each instantiated from the pattern of
5618/// the expansion, where the Ith instantiation of the pattern uses the
5619/// Ith arguments bound to each of the unexpanded parameter packs. The
5620/// pack expansion is considered to "expand" these unexpanded
5621/// parameter packs.
5622///
5623/// \code
5624/// template<typename ...Types> struct tuple;
5625///
5626/// template<typename ...Types>
5627/// struct tuple_of_references {
5628/// typedef tuple<Types&...> type;
5629/// };
5630/// \endcode
5631///
5632/// Here, the pack expansion \c Types&... is represented via a
5633/// PackExpansionType whose pattern is Types&.
5634class PackExpansionType : public Type, public llvm::FoldingSetNode {
5635 friend class ASTContext; // ASTContext creates these
5636
5637 /// The pattern of the pack expansion.
5638 QualType Pattern;
5639
5640 PackExpansionType(QualType Pattern, QualType Canon,
5641 Optional<unsigned> NumExpansions)
5642 : Type(PackExpansion, Canon,
5643 (Pattern->getDependence() | TypeDependence::Dependent |
5644 TypeDependence::Instantiation) &
5645 ~TypeDependence::UnexpandedPack),
5646 Pattern(Pattern) {
5647 PackExpansionTypeBits.NumExpansions =
5648 NumExpansions ? *NumExpansions + 1 : 0;
5649 }
5650
5651public:
5652 /// Retrieve the pattern of this pack expansion, which is the
5653 /// type that will be repeatedly instantiated when instantiating the
5654 /// pack expansion itself.
5655 QualType getPattern() const { return Pattern; }
5656
5657 /// Retrieve the number of expansions that this pack expansion will
5658 /// generate, if known.
5659 Optional<unsigned> getNumExpansions() const {
5660 if (PackExpansionTypeBits.NumExpansions)
5661 return PackExpansionTypeBits.NumExpansions - 1;
5662 return None;
5663 }
5664
5665 bool isSugared() const { return false; }
5666 QualType desugar() const { return QualType(this, 0); }
5667
5668 void Profile(llvm::FoldingSetNodeID &ID) {
5669 Profile(ID, getPattern(), getNumExpansions());
5670 }
5671
5672 static void Profile(llvm::FoldingSetNodeID &ID, QualType Pattern,
5673 Optional<unsigned> NumExpansions) {
5674 ID.AddPointer(Pattern.getAsOpaquePtr());
5675 ID.AddBoolean(NumExpansions.hasValue());
5676 if (NumExpansions)
5677 ID.AddInteger(*NumExpansions);
5678 }
5679
5680 static bool classof(const Type *T) {
5681 return T->getTypeClass() == PackExpansion;
5682 }
5683};
5684
5685/// This class wraps the list of protocol qualifiers. For types that can
5686/// take ObjC protocol qualifers, they can subclass this class.
5687template <class T>
5688class ObjCProtocolQualifiers {
5689protected:
5690 ObjCProtocolQualifiers() = default;
5691
5692 ObjCProtocolDecl * const *getProtocolStorage() const {
5693 return const_cast<ObjCProtocolQualifiers*>(this)->getProtocolStorage();
5694 }
5695
5696 ObjCProtocolDecl **getProtocolStorage() {
5697 return static_cast<T*>(this)->getProtocolStorageImpl();
5698 }
5699
5700 void setNumProtocols(unsigned N) {
5701 static_cast<T*>(this)->setNumProtocolsImpl(N);
5702 }
5703
5704 void initialize(ArrayRef<ObjCProtocolDecl *> protocols) {
5705 setNumProtocols(protocols.size());
5706 assert(getNumProtocols() == protocols.size() &&((void)0)
5707 "bitfield overflow in protocol count")((void)0);
5708 if (!protocols.empty())
5709 memcpy(getProtocolStorage(), protocols.data(),
5710 protocols.size() * sizeof(ObjCProtocolDecl*));
5711 }
5712
5713public:
5714 using qual_iterator = ObjCProtocolDecl * const *;
5715 using qual_range = llvm::iterator_range<qual_iterator>;
5716
5717 qual_range quals() const { return qual_range(qual_begin(), qual_end()); }
5718 qual_iterator qual_begin() const { return getProtocolStorage(); }
5719 qual_iterator qual_end() const { return qual_begin() + getNumProtocols(); }
5720
5721 bool qual_empty() const { return getNumProtocols() == 0; }
5722
5723 /// Return the number of qualifying protocols in this type, or 0 if
5724 /// there are none.
5725 unsigned getNumProtocols() const {
5726 return static_cast<const T*>(this)->getNumProtocolsImpl();
5727 }
5728
5729 /// Fetch a protocol by index.
5730 ObjCProtocolDecl *getProtocol(unsigned I) const {
5731 assert(I < getNumProtocols() && "Out-of-range protocol access")((void)0);
5732 return qual_begin()[I];
5733 }
5734
5735 /// Retrieve all of the protocol qualifiers.
5736 ArrayRef<ObjCProtocolDecl *> getProtocols() const {
5737 return ArrayRef<ObjCProtocolDecl *>(qual_begin(), getNumProtocols());
5738 }
5739};
5740
5741/// Represents a type parameter type in Objective C. It can take
5742/// a list of protocols.
5743class ObjCTypeParamType : public Type,
5744 public ObjCProtocolQualifiers<ObjCTypeParamType>,
5745 public llvm::FoldingSetNode {
5746 friend class ASTContext;
5747 friend class ObjCProtocolQualifiers<ObjCTypeParamType>;
5748
5749 /// The number of protocols stored on this type.
5750 unsigned NumProtocols : 6;
5751
5752 ObjCTypeParamDecl *OTPDecl;
5753
5754 /// The protocols are stored after the ObjCTypeParamType node. In the
5755 /// canonical type, the list of protocols are sorted alphabetically
5756 /// and uniqued.
5757 ObjCProtocolDecl **getProtocolStorageImpl();
5758
5759 /// Return the number of qualifying protocols in this interface type,
5760 /// or 0 if there are none.
5761 unsigned getNumProtocolsImpl() const {
5762 return NumProtocols;
5763 }
5764
5765 void setNumProtocolsImpl(unsigned N) {
5766 NumProtocols = N;
5767 }
5768
5769 ObjCTypeParamType(const ObjCTypeParamDecl *D,
5770 QualType can,
5771 ArrayRef<ObjCProtocolDecl *> protocols);
5772
5773public:
5774 bool isSugared() const { return true; }
5775 QualType desugar() const { return getCanonicalTypeInternal(); }
5776
5777 static bool classof(const Type *T) {
5778 return T->getTypeClass() == ObjCTypeParam;
5779 }
5780
5781 void Profile(llvm::FoldingSetNodeID &ID);
5782 static void Profile(llvm::FoldingSetNodeID &ID,
5783 const ObjCTypeParamDecl *OTPDecl,
5784 QualType CanonicalType,
5785 ArrayRef<ObjCProtocolDecl *> protocols);
5786
5787 ObjCTypeParamDecl *getDecl() const { return OTPDecl; }
5788};
5789
5790/// Represents a class type in Objective C.
5791///
5792/// Every Objective C type is a combination of a base type, a set of
5793/// type arguments (optional, for parameterized classes) and a list of
5794/// protocols.
5795///
5796/// Given the following declarations:
5797/// \code
5798/// \@class C<T>;
5799/// \@protocol P;
5800/// \endcode
5801///
5802/// 'C' is an ObjCInterfaceType C. It is sugar for an ObjCObjectType
5803/// with base C and no protocols.
5804///
5805/// 'C<P>' is an unspecialized ObjCObjectType with base C and protocol list [P].
5806/// 'C<C*>' is a specialized ObjCObjectType with type arguments 'C*' and no
5807/// protocol list.
5808/// 'C<C*><P>' is a specialized ObjCObjectType with base C, type arguments 'C*',
5809/// and protocol list [P].
5810///
5811/// 'id' is a TypedefType which is sugar for an ObjCObjectPointerType whose
5812/// pointee is an ObjCObjectType with base BuiltinType::ObjCIdType
5813/// and no protocols.
5814///
5815/// 'id<P>' is an ObjCObjectPointerType whose pointee is an ObjCObjectType
5816/// with base BuiltinType::ObjCIdType and protocol list [P]. Eventually
5817/// this should get its own sugar class to better represent the source.
5818class ObjCObjectType : public Type,
5819 public ObjCProtocolQualifiers<ObjCObjectType> {
5820 friend class ObjCProtocolQualifiers<ObjCObjectType>;
5821
5822 // ObjCObjectType.NumTypeArgs - the number of type arguments stored
5823 // after the ObjCObjectPointerType node.
5824 // ObjCObjectType.NumProtocols - the number of protocols stored
5825 // after the type arguments of ObjCObjectPointerType node.
5826 //
5827 // These protocols are those written directly on the type. If
5828 // protocol qualifiers ever become additive, the iterators will need
5829 // to get kindof complicated.
5830 //
5831 // In the canonical object type, these are sorted alphabetically
5832 // and uniqued.
5833
5834 /// Either a BuiltinType or an InterfaceType or sugar for either.
5835 QualType BaseType;
5836
5837 /// Cached superclass type.
5838 mutable llvm::PointerIntPair<const ObjCObjectType *, 1, bool>
5839 CachedSuperClassType;
5840
5841 QualType *getTypeArgStorage();
5842 const QualType *getTypeArgStorage() const {
5843 return const_cast<ObjCObjectType *>(this)->getTypeArgStorage();
5844 }
5845
5846 ObjCProtocolDecl **getProtocolStorageImpl();
5847 /// Return the number of qualifying protocols in this interface type,
5848 /// or 0 if there are none.
5849 unsigned getNumProtocolsImpl() const {
5850 return ObjCObjectTypeBits.NumProtocols;
5851 }
5852 void setNumProtocolsImpl(unsigned N) {
5853 ObjCObjectTypeBits.NumProtocols = N;
5854 }
5855
5856protected:
5857 enum Nonce_ObjCInterface { Nonce_ObjCInterface };
5858
5859 ObjCObjectType(QualType Canonical, QualType Base,
5860 ArrayRef<QualType> typeArgs,
5861 ArrayRef<ObjCProtocolDecl *> protocols,
5862 bool isKindOf);
5863
5864 ObjCObjectType(enum Nonce_ObjCInterface)
5865 : Type(ObjCInterface, QualType(), TypeDependence::None),
5866 BaseType(QualType(this_(), 0)) {
5867 ObjCObjectTypeBits.NumProtocols = 0;
5868 ObjCObjectTypeBits.NumTypeArgs = 0;
5869 ObjCObjectTypeBits.IsKindOf = 0;
5870 }
5871
5872 void computeSuperClassTypeSlow() const;
5873
5874public:
5875 /// Gets the base type of this object type. This is always (possibly
5876 /// sugar for) one of:
5877 /// - the 'id' builtin type (as opposed to the 'id' type visible to the
5878 /// user, which is a typedef for an ObjCObjectPointerType)
5879 /// - the 'Class' builtin type (same caveat)
5880 /// - an ObjCObjectType (currently always an ObjCInterfaceType)
5881 QualType getBaseType() const { return BaseType; }
5882
5883 bool isObjCId() const {
5884 return getBaseType()->isSpecificBuiltinType(BuiltinType::ObjCId);
5885 }
5886
5887 bool isObjCClass() const {
5888 return getBaseType()->isSpecificBuiltinType(BuiltinType::ObjCClass);
5889 }
5890
5891 bool isObjCUnqualifiedId() const { return qual_empty() && isObjCId(); }
5892 bool isObjCUnqualifiedClass() const { return qual_empty() && isObjCClass(); }
5893 bool isObjCUnqualifiedIdOrClass() const {
5894 if (!qual_empty()) return false;
5895 if (const BuiltinType *T = getBaseType()->getAs<BuiltinType>())
5896 return T->getKind() == BuiltinType::ObjCId ||
5897 T->getKind() == BuiltinType::ObjCClass;
5898 return false;
5899 }
5900 bool isObjCQualifiedId() const { return !qual_empty() && isObjCId(); }
5901 bool isObjCQualifiedClass() const { return !qual_empty() && isObjCClass(); }
5902
5903 /// Gets the interface declaration for this object type, if the base type
5904 /// really is an interface.
5905 ObjCInterfaceDecl *getInterface() const;
5906
5907 /// Determine whether this object type is "specialized", meaning
5908 /// that it has type arguments.
5909 bool isSpecialized() const;
5910
5911 /// Determine whether this object type was written with type arguments.
5912 bool isSpecializedAsWritten() const {
5913 return ObjCObjectTypeBits.NumTypeArgs > 0;
5914 }
5915
5916 /// Determine whether this object type is "unspecialized", meaning
5917 /// that it has no type arguments.
5918 bool isUnspecialized() const { return !isSpecialized(); }
5919
5920 /// Determine whether this object type is "unspecialized" as
5921 /// written, meaning that it has no type arguments.
5922 bool isUnspecializedAsWritten() const { return !isSpecializedAsWritten(); }
5923
5924 /// Retrieve the type arguments of this object type (semantically).
5925 ArrayRef<QualType> getTypeArgs() const;
5926
5927 /// Retrieve the type arguments of this object type as they were
5928 /// written.
5929 ArrayRef<QualType> getTypeArgsAsWritten() const {
5930 return llvm::makeArrayRef(getTypeArgStorage(),
5931 ObjCObjectTypeBits.NumTypeArgs);
5932 }
5933
5934 /// Whether this is a "__kindof" type as written.
5935 bool isKindOfTypeAsWritten() const { return ObjCObjectTypeBits.IsKindOf; }
5936
5937 /// Whether this ia a "__kindof" type (semantically).
5938 bool isKindOfType() const;
5939
5940 /// Retrieve the type of the superclass of this object type.
5941 ///
5942 /// This operation substitutes any type arguments into the
5943 /// superclass of the current class type, potentially producing a
5944 /// specialization of the superclass type. Produces a null type if
5945 /// there is no superclass.
5946 QualType getSuperClassType() const {
5947 if (!CachedSuperClassType.getInt())
5948 computeSuperClassTypeSlow();
5949
5950 assert(CachedSuperClassType.getInt() && "Superclass not set?")((void)0);
5951 return QualType(CachedSuperClassType.getPointer(), 0);
5952 }
5953
5954 /// Strip off the Objective-C "kindof" type and (with it) any
5955 /// protocol qualifiers.
5956 QualType stripObjCKindOfTypeAndQuals(const ASTContext &ctx) const;
5957
5958 bool isSugared() const { return false; }
5959 QualType desugar() const { return QualType(this, 0); }
5960
5961 static bool classof(const Type *T) {
5962 return T->getTypeClass() == ObjCObject ||
5963 T->getTypeClass() == ObjCInterface;
5964 }
5965};
5966
5967/// A class providing a concrete implementation
5968/// of ObjCObjectType, so as to not increase the footprint of
5969/// ObjCInterfaceType. Code outside of ASTContext and the core type
5970/// system should not reference this type.
5971class ObjCObjectTypeImpl : public ObjCObjectType, public llvm::FoldingSetNode {
5972 friend class ASTContext;
5973
5974 // If anyone adds fields here, ObjCObjectType::getProtocolStorage()
5975 // will need to be modified.
5976
5977 ObjCObjectTypeImpl(QualType Canonical, QualType Base,
5978 ArrayRef<QualType> typeArgs,
5979 ArrayRef<ObjCProtocolDecl *> protocols,
5980 bool isKindOf)
5981 : ObjCObjectType(Canonical, Base, typeArgs, protocols, isKindOf) {}
5982
5983public:
5984 void Profile(llvm::FoldingSetNodeID &ID);
5985 static void Profile(llvm::FoldingSetNodeID &ID,
5986 QualType Base,
5987 ArrayRef<QualType> typeArgs,
5988 ArrayRef<ObjCProtocolDecl *> protocols,
5989 bool isKindOf);
5990};
5991
5992inline QualType *ObjCObjectType::getTypeArgStorage() {
5993 return reinterpret_cast<QualType *>(static_cast<ObjCObjectTypeImpl*>(this)+1);
5994}
5995
5996inline ObjCProtocolDecl **ObjCObjectType::getProtocolStorageImpl() {
5997 return reinterpret_cast<ObjCProtocolDecl**>(
5998 getTypeArgStorage() + ObjCObjectTypeBits.NumTypeArgs);
5999}
6000
6001inline ObjCProtocolDecl **ObjCTypeParamType::getProtocolStorageImpl() {
6002 return reinterpret_cast<ObjCProtocolDecl**>(
6003 static_cast<ObjCTypeParamType*>(this)+1);
6004}
6005
6006/// Interfaces are the core concept in Objective-C for object oriented design.
6007/// They basically correspond to C++ classes. There are two kinds of interface
6008/// types: normal interfaces like `NSString`, and qualified interfaces, which
6009/// are qualified with a protocol list like `NSString<NSCopyable, NSAmazing>`.
6010///
6011/// ObjCInterfaceType guarantees the following properties when considered
6012/// as a subtype of its superclass, ObjCObjectType:
6013/// - There are no protocol qualifiers. To reinforce this, code which
6014/// tries to invoke the protocol methods via an ObjCInterfaceType will
6015/// fail to compile.
6016/// - It is its own base type. That is, if T is an ObjCInterfaceType*,
6017/// T->getBaseType() == QualType(T, 0).
6018class ObjCInterfaceType : public ObjCObjectType {
6019 friend class ASTContext; // ASTContext creates these.
6020 friend class ASTReader;
6021 friend class ObjCInterfaceDecl;
6022 template <class T> friend class serialization::AbstractTypeReader;
6023
6024 mutable ObjCInterfaceDecl *Decl;
6025
6026 ObjCInterfaceType(const ObjCInterfaceDecl *D)
6027 : ObjCObjectType(Nonce_ObjCInterface),
6028 Decl(const_cast<ObjCInterfaceDecl*>(D)) {}
6029
6030public:
6031 /// Get the declaration of this interface.
6032 ObjCInterfaceDecl *getDecl() const { return Decl; }
6033
6034 bool isSugared() const { return false; }
6035 QualType desugar() const { return QualType(this, 0); }
6036
6037 static bool classof(const Type *T) {
6038 return T->getTypeClass() == ObjCInterface;
6039 }
6040
6041 // Nonsense to "hide" certain members of ObjCObjectType within this
6042 // class. People asking for protocols on an ObjCInterfaceType are
6043 // not going to get what they want: ObjCInterfaceTypes are
6044 // guaranteed to have no protocols.
6045 enum {
6046 qual_iterator,
6047 qual_begin,
6048 qual_end,
6049 getNumProtocols,
6050 getProtocol
6051 };
6052};
6053
6054inline ObjCInterfaceDecl *ObjCObjectType::getInterface() const {
6055 QualType baseType = getBaseType();
6056 while (const auto *ObjT = baseType->getAs<ObjCObjectType>()) {
6057 if (const auto *T = dyn_cast<ObjCInterfaceType>(ObjT))
6058 return T->getDecl();
6059
6060 baseType = ObjT->getBaseType();
6061 }
6062
6063 return nullptr;
6064}
6065
6066/// Represents a pointer to an Objective C object.
6067///
6068/// These are constructed from pointer declarators when the pointee type is
6069/// an ObjCObjectType (or sugar for one). In addition, the 'id' and 'Class'
6070/// types are typedefs for these, and the protocol-qualified types 'id<P>'
6071/// and 'Class<P>' are translated into these.
6072///
6073/// Pointers to pointers to Objective C objects are still PointerTypes;
6074/// only the first level of pointer gets it own type implementation.
6075class ObjCObjectPointerType : public Type, public llvm::FoldingSetNode {
6076 friend class ASTContext; // ASTContext creates these.
6077
6078 QualType PointeeType;
6079
6080 ObjCObjectPointerType(QualType Canonical, QualType Pointee)
6081 : Type(ObjCObjectPointer, Canonical, Pointee->getDependence()),
6082 PointeeType(Pointee) {}
6083
6084public:
6085 /// Gets the type pointed to by this ObjC pointer.
6086 /// The result will always be an ObjCObjectType or sugar thereof.
6087 QualType getPointeeType() const { return PointeeType; }
6088
6089 /// Gets the type pointed to by this ObjC pointer. Always returns non-null.
6090 ///
6091 /// This method is equivalent to getPointeeType() except that
6092 /// it discards any typedefs (or other sugar) between this
6093 /// type and the "outermost" object type. So for:
6094 /// \code
6095 /// \@class A; \@protocol P; \@protocol Q;
6096 /// typedef A<P> AP;
6097 /// typedef A A1;
6098 /// typedef A1<P> A1P;
6099 /// typedef A1P<Q> A1PQ;
6100 /// \endcode
6101 /// For 'A*', getObjectType() will return 'A'.
6102 /// For 'A<P>*', getObjectType() will return 'A<P>'.
6103 /// For 'AP*', getObjectType() will return 'A<P>'.
6104 /// For 'A1*', getObjectType() will return 'A'.
6105 /// For 'A1<P>*', getObjectType() will return 'A1<P>'.
6106 /// For 'A1P*', getObjectType() will return 'A1<P>'.
6107 /// For 'A1PQ*', getObjectType() will return 'A1<Q>', because
6108 /// adding protocols to a protocol-qualified base discards the
6109 /// old qualifiers (for now). But if it didn't, getObjectType()
6110 /// would return 'A1P<Q>' (and we'd have to make iterating over
6111 /// qualifiers more complicated).
6112 const ObjCObjectType *getObjectType() const {
6113 return PointeeType->castAs<ObjCObjectType>();
6114 }
6115
6116 /// If this pointer points to an Objective C
6117 /// \@interface type, gets the type for that interface. Any protocol
6118 /// qualifiers on the interface are ignored.
6119 ///
6120 /// \return null if the base type for this pointer is 'id' or 'Class'
6121 const ObjCInterfaceType *getInterfaceType() const;
6122
6123 /// If this pointer points to an Objective \@interface
6124 /// type, gets the declaration for that interface.
6125 ///
6126 /// \return null if the base type for this pointer is 'id' or 'Class'
6127 ObjCInterfaceDecl *getInterfaceDecl() const {
6128 return getObjectType()->getInterface();
6129 }
6130
6131 /// True if this is equivalent to the 'id' type, i.e. if
6132 /// its object type is the primitive 'id' type with no protocols.
6133 bool isObjCIdType() const {
6134 return getObjectType()->isObjCUnqualifiedId();
6135 }
6136
6137 /// True if this is equivalent to the 'Class' type,
6138 /// i.e. if its object tive is the primitive 'Class' type with no protocols.
6139 bool isObjCClassType() const {
6140 return getObjectType()->isObjCUnqualifiedClass();
6141 }
6142
6143 /// True if this is equivalent to the 'id' or 'Class' type,
6144 bool isObjCIdOrClassType() const {
6145 return getObjectType()->isObjCUnqualifiedIdOrClass();
6146 }
6147
6148 /// True if this is equivalent to 'id<P>' for some non-empty set of
6149 /// protocols.
6150 bool isObjCQualifiedIdType() const {
6151 return getObjectType()->isObjCQualifiedId();
6152 }
6153
6154 /// True if this is equivalent to 'Class<P>' for some non-empty set of
6155 /// protocols.
6156 bool isObjCQualifiedClassType() const {
6157 return getObjectType()->isObjCQualifiedClass();
6158 }
6159
6160 /// Whether this is a "__kindof" type.
6161 bool isKindOfType() const { return getObjectType()->isKindOfType(); }
6162
6163 /// Whether this type is specialized, meaning that it has type arguments.
6164 bool isSpecialized() const { return getObjectType()->isSpecialized(); }
6165
6166 /// Whether this type is specialized, meaning that it has type arguments.
6167 bool isSpecializedAsWritten() const {
6168 return getObjectType()->isSpecializedAsWritten();
6169 }
6170
6171 /// Whether this type is unspecialized, meaning that is has no type arguments.
6172 bool isUnspecialized() const { return getObjectType()->isUnspecialized(); }
6173
6174 /// Determine whether this object type is "unspecialized" as
6175 /// written, meaning that it has no type arguments.
6176 bool isUnspecializedAsWritten() const { return !isSpecializedAsWritten(); }
6177
6178 /// Retrieve the type arguments for this type.
6179 ArrayRef<QualType> getTypeArgs() const {
6180 return getObjectType()->getTypeArgs();
6181 }
6182
6183 /// Retrieve the type arguments for this type.
6184 ArrayRef<QualType> getTypeArgsAsWritten() const {
6185 return getObjectType()->getTypeArgsAsWritten();
6186 }
6187
6188 /// An iterator over the qualifiers on the object type. Provided
6189 /// for convenience. This will always iterate over the full set of
6190 /// protocols on a type, not just those provided directly.
6191 using qual_iterator = ObjCObjectType::qual_iterator;
6192 using qual_range = llvm::iterator_range<qual_iterator>;
6193
6194 qual_range quals() const { return qual_range(qual_begin(), qual_end()); }
6195
6196 qual_iterator qual_begin() const {
6197 return getObjectType()->qual_begin();
6198 }
6199
6200 qual_iterator qual_end() const {
6201 return getObjectType()->qual_end();
6202 }
6203
6204 bool qual_empty() const { return getObjectType()->qual_empty(); }
6205
6206 /// Return the number of qualifying protocols on the object type.
6207 unsigned getNumProtocols() const {
6208 return getObjectType()->getNumProtocols();
6209 }
6210
6211 /// Retrieve a qualifying protocol by index on the object type.
6212 ObjCProtocolDecl *getProtocol(unsigned I) const {
6213 return getObjectType()->getProtocol(I);
6214 }
6215
6216 bool isSugared() const { return false; }
6217 QualType desugar() const { return QualType(this, 0); }
6218
6219 /// Retrieve the type of the superclass of this object pointer type.
6220 ///
6221 /// This operation substitutes any type arguments into the
6222 /// superclass of the current class type, potentially producing a
6223 /// pointer to a specialization of the superclass type. Produces a
6224 /// null type if there is no superclass.
6225 QualType getSuperClassType() const;
6226
6227 /// Strip off the Objective-C "kindof" type and (with it) any
6228 /// protocol qualifiers.
6229 const ObjCObjectPointerType *stripObjCKindOfTypeAndQuals(
6230 const ASTContext &ctx) const;
6231
6232 void Profile(llvm::FoldingSetNodeID &ID) {
6233 Profile(ID, getPointeeType());
6234 }
6235
6236 static void Profile(llvm::FoldingSetNodeID &ID, QualType T) {
6237 ID.AddPointer(T.getAsOpaquePtr());
6238 }
6239
6240 static bool classof(const Type *T) {
6241 return T->getTypeClass() == ObjCObjectPointer;
6242 }
6243};
6244
6245class AtomicType : public Type, public llvm::FoldingSetNode {
6246 friend class ASTContext; // ASTContext creates these.
6247
6248 QualType ValueType;
6249
6250 AtomicType(QualType ValTy, QualType Canonical)
6251 : Type(Atomic, Canonical, ValTy->getDependence()), ValueType(ValTy) {}
6252
6253public:
6254 /// Gets the type contained by this atomic type, i.e.
6255 /// the type returned by performing an atomic load of this atomic type.
6256 QualType getValueType() const { return ValueType; }
6257
6258 bool isSugared() const { return false; }
6259 QualType desugar() const { return QualType(this, 0); }
6260
6261 void Profile(llvm::FoldingSetNodeID &ID) {
6262 Profile(ID, getValueType());
6263 }
6264
6265 static void Profile(llvm::FoldingSetNodeID &ID, QualType T) {
6266 ID.AddPointer(T.getAsOpaquePtr());
6267 }
6268
6269 static bool classof(const Type *T) {
6270 return T->getTypeClass() == Atomic;
6271 }
6272};
6273
6274/// PipeType - OpenCL20.
6275class PipeType : public Type, public llvm::FoldingSetNode {
6276 friend class ASTContext; // ASTContext creates these.
6277
6278 QualType ElementType;
6279 bool isRead;
6280
6281 PipeType(QualType elemType, QualType CanonicalPtr, bool isRead)
6282 : Type(Pipe, CanonicalPtr, elemType->getDependence()),
6283 ElementType(elemType), isRead(isRead) {}
6284
6285public:
6286 QualType getElementType() const { return ElementType; }
6287
6288 bool isSugared() const { return false; }
6289
6290 QualType desugar() const { return QualType(this, 0); }
6291
6292 void Profile(llvm::FoldingSetNodeID &ID) {
6293 Profile(ID, getElementType(), isReadOnly());
6294 }
6295
6296 static void Profile(llvm::FoldingSetNodeID &ID, QualType T, bool isRead) {
6297 ID.AddPointer(T.getAsOpaquePtr());
6298 ID.AddBoolean(isRead);
6299 }
6300
6301 static bool classof(const Type *T) {
6302 return T->getTypeClass() == Pipe;
6303 }
6304
6305 bool isReadOnly() const { return isRead; }
6306};
6307
6308/// A fixed int type of a specified bitwidth.
6309class ExtIntType final : public Type, public llvm::FoldingSetNode {
6310 friend class ASTContext;
6311 unsigned IsUnsigned : 1;
6312 unsigned NumBits : 24;
6313
6314protected:
6315 ExtIntType(bool isUnsigned, unsigned NumBits);
6316
6317public:
6318 bool isUnsigned() const { return IsUnsigned; }
6319 bool isSigned() const { return !IsUnsigned; }
6320 unsigned getNumBits() const { return NumBits; }
6321
6322 bool isSugared() const { return false; }
6323 QualType desugar() const { return QualType(this, 0); }
6324
6325 void Profile(llvm::FoldingSetNodeID &ID) {
6326 Profile(ID, isUnsigned(), getNumBits());
6327 }
6328
6329 static void Profile(llvm::FoldingSetNodeID &ID, bool IsUnsigned,
6330 unsigned NumBits) {
6331 ID.AddBoolean(IsUnsigned);
6332 ID.AddInteger(NumBits);
6333 }
6334
6335 static bool classof(const Type *T) { return T->getTypeClass() == ExtInt; }
6336};
6337
6338class DependentExtIntType final : public Type, public llvm::FoldingSetNode {
6339 friend class ASTContext;
6340 const ASTContext &Context;
6341 llvm::PointerIntPair<Expr*, 1, bool> ExprAndUnsigned;
6342
6343protected:
6344 DependentExtIntType(const ASTContext &Context, bool IsUnsigned,
6345 Expr *NumBits);
6346
6347public:
6348 bool isUnsigned() const;
6349 bool isSigned() const { return !isUnsigned(); }
6350 Expr *getNumBitsExpr() const;
6351
6352 bool isSugared() const { return false; }
6353 QualType desugar() const { return QualType(this, 0); }
6354
6355 void Profile(llvm::FoldingSetNodeID &ID) {
6356 Profile(ID, Context, isUnsigned(), getNumBitsExpr());
6357 }
6358 static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
6359 bool IsUnsigned, Expr *NumBitsExpr);
6360
6361 static bool classof(const Type *T) {
6362 return T->getTypeClass() == DependentExtInt;
6363 }
6364};
6365
6366/// A qualifier set is used to build a set of qualifiers.
6367class QualifierCollector : public Qualifiers {
6368public:
6369 QualifierCollector(Qualifiers Qs = Qualifiers()) : Qualifiers(Qs) {}
6370
6371 /// Collect any qualifiers on the given type and return an
6372 /// unqualified type. The qualifiers are assumed to be consistent
6373 /// with those already in the type.
6374 const Type *strip(QualType type) {
6375 addFastQualifiers(type.getLocalFastQualifiers());
6376 if (!type.hasLocalNonFastQualifiers())
6377 return type.getTypePtrUnsafe();
6378
6379 const ExtQuals *extQuals = type.getExtQualsUnsafe();
6380 addConsistentQualifiers(extQuals->getQualifiers());
6381 return extQuals->getBaseType();
6382 }
6383
6384 /// Apply the collected qualifiers to the given type.
6385 QualType apply(const ASTContext &Context, QualType QT) const;
6386
6387 /// Apply the collected qualifiers to the given type.
6388 QualType apply(const ASTContext &Context, const Type* T) const;
6389};
6390
6391/// A container of type source information.
6392///
6393/// A client can read the relevant info using TypeLoc wrappers, e.g:
6394/// @code
6395/// TypeLoc TL = TypeSourceInfo->getTypeLoc();
6396/// TL.getBeginLoc().print(OS, SrcMgr);
6397/// @endcode
6398class alignas(8) TypeSourceInfo {
6399 // Contains a memory block after the class, used for type source information,
6400 // allocated by ASTContext.
6401 friend class ASTContext;
6402
6403 QualType Ty;
6404
6405 TypeSourceInfo(QualType ty) : Ty(ty) {}
6406
6407public:
6408 /// Return the type wrapped by this type source info.
6409 QualType getType() const { return Ty; }
6410
6411 /// Return the TypeLoc wrapper for the type source info.
6412 TypeLoc getTypeLoc() const; // implemented in TypeLoc.h
6413
6414 /// Override the type stored in this TypeSourceInfo. Use with caution!
6415 void overrideType(QualType T) { Ty = T; }
6416};
6417
6418// Inline function definitions.
6419
6420inline SplitQualType SplitQualType::getSingleStepDesugaredType() const {
6421 SplitQualType desugar =
6422 Ty->getLocallyUnqualifiedSingleStepDesugaredType().split();
6423 desugar.Quals.addConsistentQualifiers(Quals);
6424 return desugar;
6425}
6426
6427inline const Type *QualType::getTypePtr() const {
6428 return getCommonPtr()->BaseType;
6429}
6430
6431inline const Type *QualType::getTypePtrOrNull() const {
6432 return (isNull() ? nullptr : getCommonPtr()->BaseType);
6433}
6434
6435inline SplitQualType QualType::split() const {
6436 if (!hasLocalNonFastQualifiers())
6437 return SplitQualType(getTypePtrUnsafe(),
6438 Qualifiers::fromFastMask(getLocalFastQualifiers()));
6439
6440 const ExtQuals *eq = getExtQualsUnsafe();
6441 Qualifiers qs = eq->getQualifiers();
6442 qs.addFastQualifiers(getLocalFastQualifiers());
6443 return SplitQualType(eq->getBaseType(), qs);
6444}
6445
6446inline Qualifiers QualType::getLocalQualifiers() const {
6447 Qualifiers Quals;
6448 if (hasLocalNonFastQualifiers())
6449 Quals = getExtQualsUnsafe()->getQualifiers();
6450 Quals.addFastQualifiers(getLocalFastQualifiers());
6451 return Quals;
6452}
6453
6454inline Qualifiers QualType::getQualifiers() const {
6455 Qualifiers quals = getCommonPtr()->CanonicalType.getLocalQualifiers();
6456 quals.addFastQualifiers(getLocalFastQualifiers());
6457 return quals;
6458}
6459
6460inline unsigned QualType::getCVRQualifiers() const {
6461 unsigned cvr = getCommonPtr()->CanonicalType.getLocalCVRQualifiers();
6462 cvr |= getLocalCVRQualifiers();
6463 return cvr;
6464}
6465
6466inline QualType QualType::getCanonicalType() const {
6467 QualType canon = getCommonPtr()->CanonicalType;
6468 return canon.withFastQualifiers(getLocalFastQualifiers());
6469}
6470
6471inline bool QualType::isCanonical() const {
6472 return getTypePtr()->isCanonicalUnqualified();
6473}
6474
6475inline bool QualType::isCanonicalAsParam() const {
6476 if (!isCanonical()) return false;
6477 if (hasLocalQualifiers()) return false;
6478
6479 const Type *T = getTypePtr();
6480 if (T->isVariablyModifiedType() && T->hasSizedVLAType())
6481 return false;
6482
6483 return !isa<FunctionType>(T) && !isa<ArrayType>(T);
6484}
6485
6486inline bool QualType::isConstQualified() const {
6487 return isLocalConstQualified() ||
6488 getCommonPtr()->CanonicalType.isLocalConstQualified();
6489}
6490
6491inline bool QualType::isRestrictQualified() const {
6492 return isLocalRestrictQualified() ||
6493 getCommonPtr()->CanonicalType.isLocalRestrictQualified();
6494}
6495
6496
6497inline bool QualType::isVolatileQualified() const {
6498 return isLocalVolatileQualified() ||
6499 getCommonPtr()->CanonicalType.isLocalVolatileQualified();
6500}
6501
6502inline bool QualType::hasQualifiers() const {
6503 return hasLocalQualifiers() ||
6504 getCommonPtr()->CanonicalType.hasLocalQualifiers();
6505}
6506
6507inline QualType QualType::getUnqualifiedType() const {
6508 if (!getTypePtr()->getCanonicalTypeInternal().hasLocalQualifiers())
6509 return QualType(getTypePtr(), 0);
6510
6511 return QualType(getSplitUnqualifiedTypeImpl(*this).Ty, 0);
6512}
6513
6514inline SplitQualType QualType::getSplitUnqualifiedType() const {
6515 if (!getTypePtr()->getCanonicalTypeInternal().hasLocalQualifiers())
6516 return split();
6517
6518 return getSplitUnqualifiedTypeImpl(*this);
6519}
6520
6521inline void QualType::removeLocalConst() {
6522 removeLocalFastQualifiers(Qualifiers::Const);
6523}
6524
6525inline void QualType::removeLocalRestrict() {
6526 removeLocalFastQualifiers(Qualifiers::Restrict);
6527}
6528
6529inline void QualType::removeLocalVolatile() {
6530 removeLocalFastQualifiers(Qualifiers::Volatile);
6531}
6532
6533inline void QualType::removeLocalCVRQualifiers(unsigned Mask) {
6534 assert(!(Mask & ~Qualifiers::CVRMask) && "mask has non-CVR bits")((void)0);
6535 static_assert((int)Qualifiers::CVRMask == (int)Qualifiers::FastMask,
6536 "Fast bits differ from CVR bits!");
6537
6538 // Fast path: we don't need to touch the slow qualifiers.
6539 removeLocalFastQualifiers(Mask);
6540}
6541
6542/// Check if this type has any address space qualifier.
6543inline bool QualType::hasAddressSpace() const {
6544 return getQualifiers().hasAddressSpace();
6545}
6546
6547/// Return the address space of this type.
6548inline LangAS QualType::getAddressSpace() const {
6549 return getQualifiers().getAddressSpace();
6550}
6551
6552/// Return the gc attribute of this type.
6553inline Qualifiers::GC QualType::getObjCGCAttr() const {
6554 return getQualifiers().getObjCGCAttr();
6555}
6556
6557inline bool QualType::hasNonTrivialToPrimitiveDefaultInitializeCUnion() const {
6558 if (auto *RD = getTypePtr()->getBaseElementTypeUnsafe()->getAsRecordDecl())
6559 return hasNonTrivialToPrimitiveDefaultInitializeCUnion(RD);
6560 return false;
6561}
6562
6563inline bool QualType::hasNonTrivialToPrimitiveDestructCUnion() const {
6564 if (auto *RD = getTypePtr()->getBaseElementTypeUnsafe()->getAsRecordDecl())
6565 return hasNonTrivialToPrimitiveDestructCUnion(RD);
6566 return false;
6567}
6568
6569inline bool QualType::hasNonTrivialToPrimitiveCopyCUnion() const {
6570 if (auto *RD = getTypePtr()->getBaseElementTypeUnsafe()->getAsRecordDecl())
6571 return hasNonTrivialToPrimitiveCopyCUnion(RD);
6572 return false;
6573}
6574
6575inline FunctionType::ExtInfo getFunctionExtInfo(const Type &t) {
6576 if (const auto *PT = t.getAs<PointerType>()) {
6577 if (const auto *FT = PT->getPointeeType()->getAs<FunctionType>())
6578 return FT->getExtInfo();
6579 } else if (const auto *FT = t.getAs<FunctionType>())
6580 return FT->getExtInfo();
6581
6582 return FunctionType::ExtInfo();
6583}
6584
6585inline FunctionType::ExtInfo getFunctionExtInfo(QualType t) {
6586 return getFunctionExtInfo(*t);
6587}
6588
6589/// Determine whether this type is more
6590/// qualified than the Other type. For example, "const volatile int"
6591/// is more qualified than "const int", "volatile int", and
6592/// "int". However, it is not more qualified than "const volatile
6593/// int".
6594inline bool QualType::isMoreQualifiedThan(QualType other) const {
6595 Qualifiers MyQuals = getQualifiers();
6596 Qualifiers OtherQuals = other.getQualifiers();
6597 return (MyQuals != OtherQuals && MyQuals.compatiblyIncludes(OtherQuals));
6598}
6599
6600/// Determine whether this type is at last
6601/// as qualified as the Other type. For example, "const volatile
6602/// int" is at least as qualified as "const int", "volatile int",
6603/// "int", and "const volatile int".
6604inline bool QualType::isAtLeastAsQualifiedAs(QualType other) const {
6605 Qualifiers OtherQuals = other.getQualifiers();
6606
6607 // Ignore __unaligned qualifier if this type is a void.
6608 if (getUnqualifiedType()->isVoidType())
6609 OtherQuals.removeUnaligned();
6610
6611 return getQualifiers().compatiblyIncludes(OtherQuals);
6612}
6613
6614/// If Type is a reference type (e.g., const
6615/// int&), returns the type that the reference refers to ("const
6616/// int"). Otherwise, returns the type itself. This routine is used
6617/// throughout Sema to implement C++ 5p6:
6618///
6619/// If an expression initially has the type "reference to T" (8.3.2,
6620/// 8.5.3), the type is adjusted to "T" prior to any further
6621/// analysis, the expression designates the object or function
6622/// denoted by the reference, and the expression is an lvalue.
6623inline QualType QualType::getNonReferenceType() const {
6624 if (const auto *RefType = (*this)->getAs<ReferenceType>())
6625 return RefType->getPointeeType();
6626 else
6627 return *this;
6628}
6629
6630inline bool QualType::isCForbiddenLValueType() const {
6631 return ((getTypePtr()->isVoidType() && !hasQualifiers()) ||
6632 getTypePtr()->isFunctionType());
6633}
6634
6635/// Tests whether the type is categorized as a fundamental type.
6636///
6637/// \returns True for types specified in C++0x [basic.fundamental].
6638inline bool Type::isFundamentalType() const {
6639 return isVoidType() ||
6640 isNullPtrType() ||
6641 // FIXME: It's really annoying that we don't have an
6642 // 'isArithmeticType()' which agrees with the standard definition.
6643 (isArithmeticType() && !isEnumeralType());
6644}
6645
6646/// Tests whether the type is categorized as a compound type.
6647///
6648/// \returns True for types specified in C++0x [basic.compound].
6649inline bool Type::isCompoundType() const {
6650 // C++0x [basic.compound]p1:
6651 // Compound types can be constructed in the following ways:
6652 // -- arrays of objects of a given type [...];
6653 return isArrayType() ||
6654 // -- functions, which have parameters of given types [...];
6655 isFunctionType() ||
6656 // -- pointers to void or objects or functions [...];
6657 isPointerType() ||
6658 // -- references to objects or functions of a given type. [...]
6659 isReferenceType() ||
6660 // -- classes containing a sequence of objects of various types, [...];
6661 isRecordType() ||
6662 // -- unions, which are classes capable of containing objects of different
6663 // types at different times;
6664 isUnionType() ||
6665 // -- enumerations, which comprise a set of named constant values. [...];
6666 isEnumeralType() ||
6667 // -- pointers to non-static class members, [...].
6668 isMemberPointerType();
6669}
6670
6671inline bool Type::isFunctionType() const {
6672 return isa<FunctionType>(CanonicalType);
33
Assuming field 'CanonicalType' is not a 'FunctionType'
34
Returning zero, which participates in a condition later
6673}
6674
6675inline bool Type::isPointerType() const {
6676 return isa<PointerType>(CanonicalType);
6677}
6678
6679inline bool Type::isAnyPointerType() const {
6680 return isPointerType() || isObjCObjectPointerType();
6681}
6682
6683inline bool Type::isBlockPointerType() const {
6684 return isa<BlockPointerType>(CanonicalType);
6685}
6686
6687inline bool Type::isReferenceType() const {
6688 return isa<ReferenceType>(CanonicalType);
6689}
6690
6691inline bool Type::isLValueReferenceType() const {
6692 return isa<LValueReferenceType>(CanonicalType);
6693}
6694
6695inline bool Type::isRValueReferenceType() const {
6696 return isa<RValueReferenceType>(CanonicalType);
6697}
6698
6699inline bool Type::isObjectPointerType() const {
6700 // Note: an "object pointer type" is not the same thing as a pointer to an
6701 // object type; rather, it is a pointer to an object type or a pointer to cv
6702 // void.
6703 if (const auto *T = getAs<PointerType>())
6704 return !T->getPointeeType()->isFunctionType();
6705 else
6706 return false;
6707}
6708
6709inline bool Type::isFunctionPointerType() const {
6710 if (const auto *T = getAs<PointerType>())
6711 return T->getPointeeType()->isFunctionType();
6712 else
6713 return false;
6714}
6715
6716inline bool Type::isFunctionReferenceType() const {
6717 if (const auto *T = getAs<ReferenceType>())
6718 return T->getPointeeType()->isFunctionType();
6719 else
6720 return false;
6721}
6722
6723inline bool Type::isMemberPointerType() const {
6724 return isa<MemberPointerType>(CanonicalType);
6725}
6726
6727inline bool Type::isMemberFunctionPointerType() const {
6728 if (const auto *T = getAs<MemberPointerType>())
6729 return T->isMemberFunctionPointer();
6730 else
6731 return false;
6732}
6733
6734inline bool Type::isMemberDataPointerType() const {
6735 if (const auto *T = getAs<MemberPointerType>())
6736 return T->isMemberDataPointer();
6737 else
6738 return false;
6739}
6740
6741inline bool Type::isArrayType() const {
6742 return isa<ArrayType>(CanonicalType);
6743}
6744
6745inline bool Type::isConstantArrayType() const {
6746 return isa<ConstantArrayType>(CanonicalType);
6747}
6748
6749inline bool Type::isIncompleteArrayType() const {
6750 return isa<IncompleteArrayType>(CanonicalType);
6751}
6752
6753inline bool Type::isVariableArrayType() const {
6754 return isa<VariableArrayType>(CanonicalType);
6755}
6756
6757inline bool Type::isDependentSizedArrayType() const {
6758 return isa<DependentSizedArrayType>(CanonicalType);
6759}
6760
6761inline bool Type::isBuiltinType() const {
6762 return isa<BuiltinType>(CanonicalType);
6763}
6764
6765inline bool Type::isRecordType() const {
6766 return isa<RecordType>(CanonicalType);
6767}
6768
6769inline bool Type::isEnumeralType() const {
6770 return isa<EnumType>(CanonicalType);
6771}
6772
6773inline bool Type::isAnyComplexType() const {
6774 return isa<ComplexType>(CanonicalType);
6775}
6776
6777inline bool Type::isVectorType() const {
6778 return isa<VectorType>(CanonicalType);
6779}
6780
6781inline bool Type::isExtVectorType() const {
6782 return isa<ExtVectorType>(CanonicalType);
6783}
6784
6785inline bool Type::isMatrixType() const {
6786 return isa<MatrixType>(CanonicalType);
6787}
6788
6789inline bool Type::isConstantMatrixType() const {
6790 return isa<ConstantMatrixType>(CanonicalType);
6791}
6792
6793inline bool Type::isDependentAddressSpaceType() const {
6794 return isa<DependentAddressSpaceType>(CanonicalType);
6795}
6796
6797inline bool Type::isObjCObjectPointerType() const {
6798 return isa<ObjCObjectPointerType>(CanonicalType);
6799}
6800
6801inline bool Type::isObjCObjectType() const {
6802 return isa<ObjCObjectType>(CanonicalType);
6803}
6804
6805inline bool Type::isObjCObjectOrInterfaceType() const {
6806 return isa<ObjCInterfaceType>(CanonicalType) ||
6807 isa<ObjCObjectType>(CanonicalType);
6808}
6809
6810inline bool Type::isAtomicType() const {
6811 return isa<AtomicType>(CanonicalType);
6812}
6813
6814inline bool Type::isUndeducedAutoType() const {
6815 return isa<AutoType>(CanonicalType);
6816}
6817
6818inline bool Type::isObjCQualifiedIdType() const {
6819 if (const auto *OPT = getAs<ObjCObjectPointerType>())
6820 return OPT->isObjCQualifiedIdType();
6821 return false;
6822}
6823
6824inline bool Type::isObjCQualifiedClassType() const {
6825 if (const auto *OPT = getAs<ObjCObjectPointerType>())
6826 return OPT->isObjCQualifiedClassType();
6827 return false;
6828}
6829
6830inline bool Type::isObjCIdType() const {
6831 if (const auto *OPT = getAs<ObjCObjectPointerType>())
6832 return OPT->isObjCIdType();
6833 return false;
6834}
6835
6836inline bool Type::isObjCClassType() const {
6837 if (const auto *OPT = getAs<ObjCObjectPointerType>())
6838 return OPT->isObjCClassType();
6839 return false;
6840}
6841
6842inline bool Type::isObjCSelType() const {
6843 if (const auto *OPT = getAs<PointerType>())
6844 return OPT->getPointeeType()->isSpecificBuiltinType(BuiltinType::ObjCSel);
6845 return false;
6846}
6847
6848inline bool Type::isObjCBuiltinType() const {
6849 return isObjCIdType() || isObjCClassType() || isObjCSelType();
6850}
6851
6852inline bool Type::isDecltypeType() const {
6853 return isa<DecltypeType>(this);
6854}
6855
6856#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
6857 inline bool Type::is##Id##Type() const { \
6858 return isSpecificBuiltinType(BuiltinType::Id); \
6859 }
6860#include "clang/Basic/OpenCLImageTypes.def"
6861
6862inline bool Type::isSamplerT() const {
6863 return isSpecificBuiltinType(BuiltinType::OCLSampler);
6864}
6865
6866inline bool Type::isEventT() const {
6867 return isSpecificBuiltinType(BuiltinType::OCLEvent);
6868}
6869
6870inline bool Type::isClkEventT() const {
6871 return isSpecificBuiltinType(BuiltinType::OCLClkEvent);
6872}
6873
6874inline bool Type::isQueueT() const {
6875 return isSpecificBuiltinType(BuiltinType::OCLQueue);
6876}
6877
6878inline bool Type::isReserveIDT() const {
6879 return isSpecificBuiltinType(BuiltinType::OCLReserveID);
6880}
6881
6882inline bool Type::isImageType() const {
6883#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) is##Id##Type() ||
6884 return
6885#include "clang/Basic/OpenCLImageTypes.def"
6886 false; // end boolean or operation
6887}
6888
6889inline bool Type::isPipeType() const {
6890 return isa<PipeType>(CanonicalType);
6891}
6892
6893inline bool Type::isExtIntType() const {
6894 return isa<ExtIntType>(CanonicalType);
6895}
6896
6897#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
6898 inline bool Type::is##Id##Type() const { \
6899 return isSpecificBuiltinType(BuiltinType::Id); \
6900 }
6901#include "clang/Basic/OpenCLExtensionTypes.def"
6902
6903inline bool Type::isOCLIntelSubgroupAVCType() const {
6904#define INTEL_SUBGROUP_AVC_TYPE(ExtType, Id) \
6905 isOCLIntelSubgroupAVC##Id##Type() ||
6906 return
6907#include "clang/Basic/OpenCLExtensionTypes.def"
6908 false; // end of boolean or operation
6909}
6910
6911inline bool Type::isOCLExtOpaqueType() const {
6912#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) is##Id##Type() ||
6913 return
6914#include "clang/Basic/OpenCLExtensionTypes.def"
6915 false; // end of boolean or operation
6916}
6917
6918inline bool Type::isOpenCLSpecificType() const {
6919 return isSamplerT() || isEventT() || isImageType() || isClkEventT() ||
6920 isQueueT() || isReserveIDT() || isPipeType() || isOCLExtOpaqueType();
6921}
6922
6923inline bool Type::isTemplateTypeParmType() const {
6924 return isa<TemplateTypeParmType>(CanonicalType);
6925}
6926
6927inline bool Type::isSpecificBuiltinType(unsigned K) const {
6928 if (const BuiltinType *BT
26.1
'BT' is null
26.1
'BT' is null
26.1
'BT' is null
26.1
'BT' is null
26.1
'BT' is null
26.1
'BT' is null
= getAs<BuiltinType>()) {
26
Assuming the object is not a 'BuiltinType'
27
Taking false branch
6929 return BT->getKind() == static_cast<BuiltinType::Kind>(K);
6930 }
6931 return false;
28
Returning zero, which participates in a condition later
6932}
6933
6934inline bool Type::isPlaceholderType() const {
6935 if (const auto *BT = dyn_cast<BuiltinType>(this))
6936 return BT->isPlaceholderType();
6937 return false;
6938}
6939
6940inline const BuiltinType *Type::getAsPlaceholderType() const {
6941 if (const auto *BT = dyn_cast<BuiltinType>(this))
6942 if (BT->isPlaceholderType())
6943 return BT;
6944 return nullptr;
6945}
6946
6947inline bool Type::isSpecificPlaceholderType(unsigned K) const {
6948 assert(BuiltinType::isPlaceholderTypeKind((BuiltinType::Kind) K))((void)0);
6949 return isSpecificBuiltinType(K);
6950}
6951
6952inline bool Type::isNonOverloadPlaceholderType() const {
6953 if (const auto *BT = dyn_cast<BuiltinType>(this))
6954 return BT->isNonOverloadPlaceholderType();
6955 return false;
6956}
6957
6958inline bool Type::isVoidType() const {
6959 return isSpecificBuiltinType(BuiltinType::Void);
25
Calling 'Type::isSpecificBuiltinType'
29
Returning from 'Type::isSpecificBuiltinType'
30
Returning zero, which participates in a condition later
6960}
6961
6962inline bool Type::isHalfType() const {
6963 // FIXME: Should we allow complex __fp16? Probably not.
6964 return isSpecificBuiltinType(BuiltinType::Half);
6965}
6966
6967inline bool Type::isFloat16Type() const {
6968 return isSpecificBuiltinType(BuiltinType::Float16);
6969}
6970
6971inline bool Type::isBFloat16Type() const {
6972 return isSpecificBuiltinType(BuiltinType::BFloat16);
6973}
6974
6975inline bool Type::isFloat128Type() const {
6976 return isSpecificBuiltinType(BuiltinType::Float128);
6977}
6978
6979inline bool Type::isNullPtrType() const {
6980 return isSpecificBuiltinType(BuiltinType::NullPtr);
6981}
6982
6983bool IsEnumDeclComplete(EnumDecl *);
6984bool IsEnumDeclScoped(EnumDecl *);
6985
6986inline bool Type::isIntegerType() const {
6987 if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType))
6988 return BT->getKind() >= BuiltinType::Bool &&
6989 BT->getKind() <= BuiltinType::Int128;
6990 if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType)) {
6991 // Incomplete enum types are not treated as integer types.
6992 // FIXME: In C++, enum types are never integer types.
6993 return IsEnumDeclComplete(ET->getDecl()) &&
6994 !IsEnumDeclScoped(ET->getDecl());
6995 }
6996 return isExtIntType();
6997}
6998
6999inline bool Type::isFixedPointType() const {
7000 if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType)) {
7001 return BT->getKind() >= BuiltinType::ShortAccum &&
7002 BT->getKind() <= BuiltinType::SatULongFract;
7003 }
7004 return false;
7005}
7006
7007inline bool Type::isFixedPointOrIntegerType() const {
7008 return isFixedPointType() || isIntegerType();
7009}
7010
7011inline bool Type::isSaturatedFixedPointType() const {
7012 if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType)) {
7013 return BT->getKind() >= BuiltinType::SatShortAccum &&
7014 BT->getKind() <= BuiltinType::SatULongFract;
7015 }
7016 return false;
7017}
7018
7019inline bool Type::isUnsaturatedFixedPointType() const {
7020 return isFixedPointType() && !isSaturatedFixedPointType();
7021}
7022
7023inline bool Type::isSignedFixedPointType() const {
7024 if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType)) {
7025 return ((BT->getKind() >= BuiltinType::ShortAccum &&
7026 BT->getKind() <= BuiltinType::LongAccum) ||
7027 (BT->getKind() >= BuiltinType::ShortFract &&
7028 BT->getKind() <= BuiltinType::LongFract) ||
7029 (BT->getKind() >= BuiltinType::SatShortAccum &&
7030 BT->getKind() <= BuiltinType::SatLongAccum) ||
7031 (BT->getKind() >= BuiltinType::SatShortFract &&
7032 BT->getKind() <= BuiltinType::SatLongFract));
7033 }
7034 return false;
7035}
7036
7037inline bool Type::isUnsignedFixedPointType() const {
7038 return isFixedPointType() && !isSignedFixedPointType();
7039}
7040
7041inline bool Type::isScalarType() const {
7042 if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType))
7043 return BT->getKind() > BuiltinType::Void &&
7044 BT->getKind() <= BuiltinType::NullPtr;
7045 if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType))
7046 // Enums are scalar types, but only if they are defined. Incomplete enums
7047 // are not treated as scalar types.
7048 return IsEnumDeclComplete(ET->getDecl());
7049 return isa<PointerType>(CanonicalType) ||
7050 isa<BlockPointerType>(CanonicalType) ||
7051 isa<MemberPointerType>(CanonicalType) ||
7052 isa<ComplexType>(CanonicalType) ||
7053 isa<ObjCObjectPointerType>(CanonicalType) ||
7054 isExtIntType();
7055}
7056
7057inline bool Type::isIntegralOrEnumerationType() const {
7058 if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType))
7059 return BT->getKind() >= BuiltinType::Bool &&
7060 BT->getKind() <= BuiltinType::Int128;
7061
7062 // Check for a complete enum type; incomplete enum types are not properly an
7063 // enumeration type in the sense required here.
7064 if (const auto *ET = dyn_cast<EnumType>(CanonicalType))
7065 return IsEnumDeclComplete(ET->getDecl());
7066
7067 return isExtIntType();
7068}
7069
7070inline bool Type::isBooleanType() const {
7071 if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType))
7072 return BT->getKind() == BuiltinType::Bool;
7073 return false;
7074}
7075
7076inline bool Type::isUndeducedType() const {
7077 auto *DT = getContainedDeducedType();
7078 return DT && !DT->isDeduced();
7079}
7080
7081/// Determines whether this is a type for which one can define
7082/// an overloaded operator.
7083inline bool Type::isOverloadableType() const {
7084 return isDependentType() || isRecordType() || isEnumeralType();
7085}
7086
7087/// Determines whether this type is written as a typedef-name.
7088inline bool Type::isTypedefNameType() const {
7089 if (getAs<TypedefType>())
7090 return true;
7091 if (auto *TST = getAs<TemplateSpecializationType>())
7092 return TST->isTypeAlias();
7093 return false;
7094}
7095
7096/// Determines whether this type can decay to a pointer type.
7097inline bool Type::canDecayToPointerType() const {
7098 return isFunctionType() || isArrayType();
7099}
7100
7101inline bool Type::hasPointerRepresentation() const {
7102 return (isPointerType() || isReferenceType() || isBlockPointerType() ||
7103 isObjCObjectPointerType() || isNullPtrType());
7104}
7105
7106inline bool Type::hasObjCPointerRepresentation() const {
7107 return isObjCObjectPointerType();
7108}
7109
7110inline const Type *Type::getBaseElementTypeUnsafe() const {
7111 const Type *type = this;
7112 while (const ArrayType *arrayType = type->getAsArrayTypeUnsafe())
7113 type = arrayType->getElementType().getTypePtr();
7114 return type;
7115}
7116
7117inline const Type *Type::getPointeeOrArrayElementType() const {
7118 const Type *type = this;
7119 if (type->isAnyPointerType())
7120 return type->getPointeeType().getTypePtr();
7121 else if (type->isArrayType())
7122 return type->getBaseElementTypeUnsafe();
7123 return type;
7124}
7125/// Insertion operator for partial diagnostics. This allows sending adress
7126/// spaces into a diagnostic with <<.
7127inline const StreamingDiagnostic &operator<<(const StreamingDiagnostic &PD,
7128 LangAS AS) {
7129 PD.AddTaggedVal(static_cast<std::underlying_type_t<LangAS>>(AS),
7130 DiagnosticsEngine::ArgumentKind::ak_addrspace);
7131 return PD;
7132}
7133
7134/// Insertion operator for partial diagnostics. This allows sending Qualifiers
7135/// into a diagnostic with <<.
7136inline const StreamingDiagnostic &operator<<(const StreamingDiagnostic &PD,
7137 Qualifiers Q) {
7138 PD.AddTaggedVal(Q.getAsOpaqueValue(),
7139 DiagnosticsEngine::ArgumentKind::ak_qual);
7140 return PD;
7141}
7142
7143/// Insertion operator for partial diagnostics. This allows sending QualType's
7144/// into a diagnostic with <<.
7145inline const StreamingDiagnostic &operator<<(const StreamingDiagnostic &PD,
7146 QualType T) {
7147 PD.AddTaggedVal(reinterpret_cast<intptr_t>(T.getAsOpaquePtr()),
7148 DiagnosticsEngine::ak_qualtype);
7149 return PD;
7150}
7151
7152// Helper class template that is used by Type::getAs to ensure that one does
7153// not try to look through a qualified type to get to an array type.
7154template <typename T>
7155using TypeIsArrayType =
7156 std::integral_constant<bool, std::is_same<T, ArrayType>::value ||
7157 std::is_base_of<ArrayType, T>::value>;
7158
7159// Member-template getAs<specific type>'.
7160template <typename T> const T *Type::getAs() const {
7161 static_assert(!TypeIsArrayType<T>::value,
7162 "ArrayType cannot be used with getAs!");
7163
7164 // If this is directly a T type, return it.
7165 if (const auto *Ty = dyn_cast<T>(this))
7166 return Ty;
7167
7168 // If the canonical form of this type isn't the right kind, reject it.
7169 if (!isa<T>(CanonicalType))
7170 return nullptr;
7171
7172 // If this is a typedef for the type, strip the typedef off without
7173 // losing all typedef information.
7174 return cast<T>(getUnqualifiedDesugaredType());
7175}
7176
7177template <typename T> const T *Type::getAsAdjusted() const {
7178 static_assert(!TypeIsArrayType<T>::value, "ArrayType cannot be used with getAsAdjusted!");
7179
7180 // If this is directly a T type, return it.
7181 if (const auto *Ty = dyn_cast<T>(this))
7182 return Ty;
7183
7184 // If the canonical form of this type isn't the right kind, reject it.
7185 if (!isa<T>(CanonicalType))
7186 return nullptr;
7187
7188 // Strip off type adjustments that do not modify the underlying nature of the
7189 // type.
7190 const Type *Ty = this;
7191 while (Ty) {
7192 if (const auto *A = dyn_cast<AttributedType>(Ty))
7193 Ty = A->getModifiedType().getTypePtr();
7194 else if (const auto *E = dyn_cast<ElaboratedType>(Ty))
7195 Ty = E->desugar().getTypePtr();
7196 else if (const auto *P = dyn_cast<ParenType>(Ty))
7197 Ty = P->desugar().getTypePtr();
7198 else if (const auto *A = dyn_cast<AdjustedType>(Ty))
7199 Ty = A->desugar().getTypePtr();
7200 else if (const auto *M = dyn_cast<MacroQualifiedType>(Ty))
7201 Ty = M->desugar().getTypePtr();
7202 else
7203 break;
7204 }
7205
7206 // Just because the canonical type is correct does not mean we can use cast<>,
7207 // since we may not have stripped off all the sugar down to the base type.
7208 return dyn_cast<T>(Ty);
7209}
7210
7211inline const ArrayType *Type::getAsArrayTypeUnsafe() const {
7212 // If this is directly an array type, return it.
7213 if (const auto *arr = dyn_cast<ArrayType>(this))
7214 return arr;
7215
7216 // If the canonical form of this type isn't the right kind, reject it.
7217 if (!isa<ArrayType>(CanonicalType))
7218 return nullptr;
7219
7220 // If this is a typedef for the type, strip the typedef off without
7221 // losing all typedef information.
7222 return cast<ArrayType>(getUnqualifiedDesugaredType());
7223}
7224
7225template <typename T> const T *Type::castAs() const {
7226 static_assert(!TypeIsArrayType<T>::value,
7227 "ArrayType cannot be used with castAs!");
7228
7229 if (const auto *ty = dyn_cast<T>(this)) return ty;
7230 assert(isa<T>(CanonicalType))((void)0);
7231 return cast<T>(getUnqualifiedDesugaredType());
7232}
7233
7234inline const ArrayType *Type::castAsArrayTypeUnsafe() const {
7235 assert(isa<ArrayType>(CanonicalType))((void)0);
7236 if (const auto *arr = dyn_cast<ArrayType>(this)) return arr;
7237 return cast<ArrayType>(getUnqualifiedDesugaredType());
7238}
7239
7240DecayedType::DecayedType(QualType OriginalType, QualType DecayedPtr,
7241 QualType CanonicalPtr)
7242 : AdjustedType(Decayed, OriginalType, DecayedPtr, CanonicalPtr) {
7243#ifndef NDEBUG1
7244 QualType Adjusted = getAdjustedType();
7245 (void)AttributedType::stripOuterNullability(Adjusted);
7246 assert(isa<PointerType>(Adjusted))((void)0);
7247#endif
7248}
7249
7250QualType DecayedType::getPointeeType() const {
7251 QualType Decayed = getDecayedType();
7252 (void)AttributedType::stripOuterNullability(Decayed);
7253 return cast<PointerType>(Decayed)->getPointeeType();
7254}
7255
7256// Get the decimal string representation of a fixed point type, represented
7257// as a scaled integer.
7258// TODO: At some point, we should change the arguments to instead just accept an
7259// APFixedPoint instead of APSInt and scale.
7260void FixedPointValueToString(SmallVectorImpl<char> &Str, llvm::APSInt Val,
7261 unsigned Scale);
7262
7263} // namespace clang
7264
7265#endif // LLVM_CLANG_AST_TYPE_H

/usr/src/gnu/usr.bin/clang/libclangCodeGen/../../../llvm/clang/include/clang/Basic/LangOptions.h

1//===- LangOptions.h - C Language Family Language Options -------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// Defines the clang::LangOptions interface.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_CLANG_BASIC_LANGOPTIONS_H
15#define LLVM_CLANG_BASIC_LANGOPTIONS_H
16
17#include "clang/Basic/CommentOptions.h"
18#include "clang/Basic/LLVM.h"
19#include "clang/Basic/LangStandard.h"
20#include "clang/Basic/ObjCRuntime.h"
21#include "clang/Basic/Sanitizers.h"
22#include "clang/Basic/TargetCXXABI.h"
23#include "clang/Basic/Visibility.h"
24#include "llvm/ADT/FloatingPointMode.h"
25#include "llvm/ADT/StringRef.h"
26#include "llvm/ADT/Triple.h"
27#include <string>
28#include <vector>
29
30namespace clang {
31
32/// Bitfields of LangOptions, split out from LangOptions in order to ensure that
33/// this large collection of bitfields is a trivial class type.
34class LangOptionsBase {
35 friend class CompilerInvocation;
36
37public:
38 // Define simple language options (with no accessors).
39#define LANGOPT(Name, Bits, Default, Description) unsigned Name : Bits;
40#define ENUM_LANGOPT(Name, Type, Bits, Default, Description)
41#include "clang/Basic/LangOptions.def"
42
43protected:
44 // Define language options of enumeration type. These are private, and will
45 // have accessors (below).
46#define LANGOPT(Name, Bits, Default, Description)
47#define ENUM_LANGOPT(Name, Type, Bits, Default, Description) \
48 unsigned Name : Bits;
49#include "clang/Basic/LangOptions.def"
50};
51
52/// In the Microsoft ABI, this controls the placement of virtual displacement
53/// members used to implement virtual inheritance.
54enum class MSVtorDispMode { Never, ForVBaseOverride, ForVFTable };
55
56/// Keeps track of the various options that can be
57/// enabled, which controls the dialect of C or C++ that is accepted.
58class LangOptions : public LangOptionsBase {
59public:
60 using Visibility = clang::Visibility;
61 using RoundingMode = llvm::RoundingMode;
62
63 enum GCMode { NonGC, GCOnly, HybridGC };
64 enum StackProtectorMode { SSPOff, SSPOn, SSPStrong, SSPReq };
65
66 // Automatic variables live on the stack, and when trivial they're usually
67 // uninitialized because it's undefined behavior to use them without
68 // initializing them.
69 enum class TrivialAutoVarInitKind { Uninitialized, Zero, Pattern };
70
71 enum SignedOverflowBehaviorTy {
72 // Default C standard behavior.
73 SOB_Undefined,
74
75 // -fwrapv
76 SOB_Defined,
77
78 // -ftrapv
79 SOB_Trapping
80 };
81
82 // FIXME: Unify with TUKind.
83 enum CompilingModuleKind {
84 /// Not compiling a module interface at all.
85 CMK_None,
86
87 /// Compiling a module from a module map.
88 CMK_ModuleMap,
89
90 /// Compiling a module from a list of header files.
91 CMK_HeaderModule,
92
93 /// Compiling a C++ modules TS module interface unit.
94 CMK_ModuleInterface,
95 };
96
97 enum PragmaMSPointersToMembersKind {
98 PPTMK_BestCase,
99 PPTMK_FullGeneralitySingleInheritance,
100 PPTMK_FullGeneralityMultipleInheritance,
101 PPTMK_FullGeneralityVirtualInheritance
102 };
103
104 using MSVtorDispMode = clang::MSVtorDispMode;
105
106 enum DefaultCallingConvention {
107 DCC_None,
108 DCC_CDecl,
109 DCC_FastCall,
110 DCC_StdCall,
111 DCC_VectorCall,
112 DCC_RegCall
113 };
114
115 enum AddrSpaceMapMangling { ASMM_Target, ASMM_On, ASMM_Off };
116
117 // Corresponds to _MSC_VER
118 enum MSVCMajorVersion {
119 MSVC2010 = 1600,
120 MSVC2012 = 1700,
121 MSVC2013 = 1800,
122 MSVC2015 = 1900,
123 MSVC2017 = 1910,
124 MSVC2017_5 = 1912,
125 MSVC2017_7 = 1914,
126 MSVC2019 = 1920,
127 MSVC2019_8 = 1928,
128 };
129
130 enum SYCLMajorVersion {
131 SYCL_None,
132 SYCL_2017,
133 SYCL_2020,
134 // The "default" SYCL version to be used when none is specified on the
135 // frontend command line.
136 SYCL_Default = SYCL_2020
137 };
138
139 /// Clang versions with different platform ABI conformance.
140 enum class ClangABI {
141 /// Attempt to be ABI-compatible with code generated by Clang 3.8.x
142 /// (SVN r257626). This causes <1 x long long> to be passed in an
143 /// integer register instead of an SSE register on x64_64.
144 Ver3_8,
145
146 /// Attempt to be ABI-compatible with code generated by Clang 4.0.x
147 /// (SVN r291814). This causes move operations to be ignored when
148 /// determining whether a class type can be passed or returned directly.
149 Ver4,
150
151 /// Attempt to be ABI-compatible with code generated by Clang 6.0.x
152 /// (SVN r321711). This causes determination of whether a type is
153 /// standard-layout to ignore collisions between empty base classes
154 /// and between base classes and member subobjects, which affects
155 /// whether we reuse base class tail padding in some ABIs.
156 Ver6,
157
158 /// Attempt to be ABI-compatible with code generated by Clang 7.0.x
159 /// (SVN r338536). This causes alignof (C++) and _Alignof (C11) to be
160 /// compatible with __alignof (i.e., return the preferred alignment)
161 /// rather than returning the required alignment.
162 Ver7,
163
164 /// Attempt to be ABI-compatible with code generated by Clang 9.0.x
165 /// (SVN r351319). This causes vectors of __int128 to be passed in memory
166 /// instead of passing in multiple scalar registers on x86_64 on Linux and
167 /// NetBSD.
168 Ver9,
169
170 /// Attempt to be ABI-compatible with code generated by Clang 11.0.x
171 /// (git 2e10b7a39b93). This causes clang to pass unions with a 256-bit
172 /// vector member on the stack instead of using registers, to not properly
173 /// mangle substitutions for template names in some cases, and to mangle
174 /// declaration template arguments without a cast to the parameter type
175 /// even when that can lead to mangling collisions.
176 Ver11,
177
178 /// Attempt to be ABI-compatible with code generated by Clang 12.0.x
179 /// (git 8e464dd76bef). This causes clang to mangle lambdas within
180 /// global-scope inline variables incorrectly.
181 Ver12,
182
183 /// Conform to the underlying platform's C and C++ ABIs as closely
184 /// as we can.
185 Latest
186 };
187
188 enum class CoreFoundationABI {
189 /// No interoperability ABI has been specified
190 Unspecified,
191 /// CoreFoundation does not have any language interoperability
192 Standalone,
193 /// Interoperability with the ObjectiveC runtime
194 ObjectiveC,
195 /// Interoperability with the latest known version of the Swift runtime
196 Swift,
197 /// Interoperability with the Swift 5.0 runtime
198 Swift5_0,
199 /// Interoperability with the Swift 4.2 runtime
200 Swift4_2,
201 /// Interoperability with the Swift 4.1 runtime
202 Swift4_1,
203 };
204
205 enum FPModeKind {
206 // Disable the floating point pragma
207 FPM_Off,
208
209 // Enable the floating point pragma
210 FPM_On,
211
212 // Aggressively fuse FP ops (E.g. FMA) disregarding pragmas.
213 FPM_Fast,
214
215 // Aggressively fuse FP ops and honor pragmas.
216 FPM_FastHonorPragmas
217 };
218
219 /// Alias for RoundingMode::NearestTiesToEven.
220 static constexpr unsigned FPR_ToNearest =
221 static_cast<unsigned>(llvm::RoundingMode::NearestTiesToEven);
222
223 /// Possible floating point exception behavior.
224 enum FPExceptionModeKind {
225 /// Assume that floating-point exceptions are masked.
226 FPE_Ignore,
227 /// Transformations do not cause new exceptions but may hide some.
228 FPE_MayTrap,
229 /// Strictly preserve the floating-point exception semantics.
230 FPE_Strict
231 };
232
233 /// Possible exception handling behavior.
234 enum class ExceptionHandlingKind { None, SjLj, WinEH, DwarfCFI, Wasm };
235
236 enum class LaxVectorConversionKind {
237 /// Permit no implicit vector bitcasts.
238 None,
239 /// Permit vector bitcasts between integer vectors with different numbers
240 /// of elements but the same total bit-width.
241 Integer,
242 /// Permit vector bitcasts between all vectors with the same total
243 /// bit-width.
244 All,
245 };
246
247 enum class AltivecSrcCompatKind {
248 // All vector compares produce scalars except vector pixel and vector bool.
249 // The types vector pixel and vector bool return vector results.
250 Mixed,
251 // All vector compares produce vector results as in GCC.
252 GCC,
253 // All vector compares produce scalars as in XL.
254 XL,
255 // Default clang behaviour.
256 Default = Mixed,
257 };
258
259 enum class SignReturnAddressScopeKind {
260 /// No signing for any function.
261 None,
262 /// Sign the return address of functions that spill LR.
263 NonLeaf,
264 /// Sign the return address of all functions,
265 All
266 };
267
268 enum class SignReturnAddressKeyKind {
269 /// Return address signing uses APIA key.
270 AKey,
271 /// Return address signing uses APIB key.
272 BKey
273 };
274
275 enum class ThreadModelKind {
276 /// POSIX Threads.
277 POSIX,
278 /// Single Threaded Environment.
279 Single
280 };
281
282 enum class ExtendArgsKind {
283 /// Integer arguments are sign or zero extended to 32/64 bits
284 /// during default argument promotions.
285 ExtendTo32,
286 ExtendTo64
287 };
288
289public:
290 /// The used language standard.
291 LangStandard::Kind LangStd;
292
293 /// Set of enabled sanitizers.
294 SanitizerSet Sanitize;
295 /// Is at least one coverage instrumentation type enabled.
296 bool SanitizeCoverage = false;
297
298 /// Paths to files specifying which objects
299 /// (files, functions, variables) should not be instrumented.
300 std::vector<std::string> NoSanitizeFiles;
301
302 /// Paths to the XRay "always instrument" files specifying which
303 /// objects (files, functions, variables) should be imbued with the XRay
304 /// "always instrument" attribute.
305 /// WARNING: This is a deprecated field and will go away in the future.
306 std::vector<std::string> XRayAlwaysInstrumentFiles;
307
308 /// Paths to the XRay "never instrument" files specifying which
309 /// objects (files, functions, variables) should be imbued with the XRay
310 /// "never instrument" attribute.
311 /// WARNING: This is a deprecated field and will go away in the future.
312 std::vector<std::string> XRayNeverInstrumentFiles;
313
314 /// Paths to the XRay attribute list files, specifying which objects
315 /// (files, functions, variables) should be imbued with the appropriate XRay
316 /// attribute(s).
317 std::vector<std::string> XRayAttrListFiles;
318
319 /// Paths to special case list files specifying which entities
320 /// (files, functions) should or should not be instrumented.
321 std::vector<std::string> ProfileListFiles;
322
323 clang::ObjCRuntime ObjCRuntime;
324
325 CoreFoundationABI CFRuntime = CoreFoundationABI::Unspecified;
326
327 std::string ObjCConstantStringClass;
328
329 /// The name of the handler function to be called when -ftrapv is
330 /// specified.
331 ///
332 /// If none is specified, abort (GCC-compatible behaviour).
333 std::string OverflowHandler;
334
335 /// The module currently being compiled as specified by -fmodule-name.
336 std::string ModuleName;
337
338 /// The name of the current module, of which the main source file
339 /// is a part. If CompilingModule is set, we are compiling the interface
340 /// of this module, otherwise we are compiling an implementation file of
341 /// it. This starts as ModuleName in case -fmodule-name is provided and
342 /// changes during compilation to reflect the current module.
343 std::string CurrentModule;
344
345 /// The names of any features to enable in module 'requires' decls
346 /// in addition to the hard-coded list in Module.cpp and the target features.
347 ///
348 /// This list is sorted.
349 std::vector<std::string> ModuleFeatures;
350
351 /// Options for parsing comments.
352 CommentOptions CommentOpts;
353
354 /// A list of all -fno-builtin-* function names (e.g., memset).
355 std::vector<std::string> NoBuiltinFuncs;
356
357 /// A prefix map for __FILE__, __BASE_FILE__ and __builtin_FILE().
358 std::map<std::string, std::string, std::greater<std::string>> MacroPrefixMap;
359
360 /// Triples of the OpenMP targets that the host code codegen should
361 /// take into account in order to generate accurate offloading descriptors.
362 std::vector<llvm::Triple> OMPTargetTriples;
363
364 /// Name of the IR file that contains the result of the OpenMP target
365 /// host code generation.
366 std::string OMPHostIRFile;
367
368 /// The user provided compilation unit ID, if non-empty. This is used to
369 /// externalize static variables which is needed to support accessing static
370 /// device variables in host code for single source offloading languages
371 /// like CUDA/HIP.
372 std::string CUID;
373
374 /// C++ ABI to compile with, if specified by the frontend through -fc++-abi=.
375 /// This overrides the default ABI used by the target.
376 llvm::Optional<TargetCXXABI::Kind> CXXABI;
377
378 /// Indicates whether the front-end is explicitly told that the
379 /// input is a header file (i.e. -x c-header).
380 bool IsHeaderFile = false;
381
382 LangOptions();
383
384 // Define accessors/mutators for language options of enumeration type.
385#define LANGOPT(Name, Bits, Default, Description)
386#define ENUM_LANGOPT(Name, Type, Bits, Default, Description) \
387 Type get##Name() const { return static_cast<Type>(Name); } \
388 void set##Name(Type Value) { Name = static_cast<unsigned>(Value); }
389#include "clang/Basic/LangOptions.def"
390
391 /// Are we compiling a module interface (.cppm or module map)?
392 bool isCompilingModule() const {
393 return getCompilingModule() != CMK_None;
394 }
395
396 /// Do we need to track the owning module for a local declaration?
397 bool trackLocalOwningModule() const {
398 return isCompilingModule() || ModulesLocalVisibility;
399 }
400
401 bool isSignedOverflowDefined() const {
402 return getSignedOverflowBehavior() == SOB_Defined;
38
Assuming the condition is false
39
Returning zero, which participates in a condition later
403 }
404
405 bool isSubscriptPointerArithmetic() const {
406 return ObjCRuntime.isSubscriptPointerArithmetic() &&
407 !ObjCSubscriptingLegacyRuntime;
408 }
409
410 bool isCompatibleWithMSVC(MSVCMajorVersion MajorVersion) const {
411 return MSCompatibilityVersion >= MajorVersion * 100000U;
412 }
413
414 /// Reset all of the options that are not considered when building a
415 /// module.
416 void resetNonModularOptions();
417
418 /// Is this a libc/libm function that is no longer recognized as a
419 /// builtin because a -fno-builtin-* option has been specified?
420 bool isNoBuiltinFunc(StringRef Name) const;
421
422 /// True if any ObjC types may have non-trivial lifetime qualifiers.
423 bool allowsNonTrivialObjCLifetimeQualifiers() const {
424 return ObjCAutoRefCount || ObjCWeak;
425 }
426
427 bool assumeFunctionsAreConvergent() const {
428 return ConvergentFunctions;
429 }
430
431 /// Return the OpenCL C or C++ version as a VersionTuple.
432 VersionTuple getOpenCLVersionTuple() const;
433
434 /// Check if return address signing is enabled.
435 bool hasSignReturnAddress() const {
436 return getSignReturnAddressScope() != SignReturnAddressScopeKind::None;
437 }
438
439 /// Check if return address signing uses AKey.
440 bool isSignReturnAddressWithAKey() const {
441 return getSignReturnAddressKey() == SignReturnAddressKeyKind::AKey;
442 }
443
444 /// Check if leaf functions are also signed.
445 bool isSignReturnAddressScopeAll() const {
446 return getSignReturnAddressScope() == SignReturnAddressScopeKind::All;
447 }
448
449 bool hasSjLjExceptions() const {
450 return getExceptionHandling() == ExceptionHandlingKind::SjLj;
451 }
452
453 bool hasSEHExceptions() const {
454 return getExceptionHandling() == ExceptionHandlingKind::WinEH;
455 }
456
457 bool hasDWARFExceptions() const {
458 return getExceptionHandling() == ExceptionHandlingKind::DwarfCFI;
459 }
460
461 bool hasWasmExceptions() const {
462 return getExceptionHandling() == ExceptionHandlingKind::Wasm;
463 }
464
465 bool isSYCL() const { return SYCLIsDevice || SYCLIsHost; }
466
467 /// Remap path prefix according to -fmacro-prefix-path option.
468 void remapPathPrefix(SmallString<256> &Path) const;
469};
470
471/// Floating point control options
472class FPOptionsOverride;
473class FPOptions {
474public:
475 // We start by defining the layout.
476 using storage_type = uint16_t;
477
478 using RoundingMode = llvm::RoundingMode;
479
480 static constexpr unsigned StorageBitSize = 8 * sizeof(storage_type);
481
482 // Define a fake option named "First" so that we have a PREVIOUS even for the
483 // real first option.
484 static constexpr storage_type FirstShift = 0, FirstWidth = 0;
485#define OPTION(NAME, TYPE, WIDTH, PREVIOUS) \
486 static constexpr storage_type NAME##Shift = \
487 PREVIOUS##Shift + PREVIOUS##Width; \
488 static constexpr storage_type NAME##Width = WIDTH; \
489 static constexpr storage_type NAME##Mask = ((1 << NAME##Width) - 1) \
490 << NAME##Shift;
491#include "clang/Basic/FPOptions.def"
492
493 static constexpr storage_type TotalWidth = 0
494#define OPTION(NAME, TYPE, WIDTH, PREVIOUS) +WIDTH
495#include "clang/Basic/FPOptions.def"
496 ;
497 static_assert(TotalWidth <= StorageBitSize, "Too short type for FPOptions");
498
499private:
500 storage_type Value;
501
502public:
503 FPOptions() : Value(0) {
504 setFPContractMode(LangOptions::FPM_Off);
505 setRoundingMode(static_cast<RoundingMode>(LangOptions::FPR_ToNearest));
506 setFPExceptionMode(LangOptions::FPE_Ignore);
507 }
508 explicit FPOptions(const LangOptions &LO) {
509 Value = 0;
510 // The language fp contract option FPM_FastHonorPragmas has the same effect
511 // as FPM_Fast in frontend. For simplicity, use FPM_Fast uniformly in
512 // frontend.
513 auto LangOptContractMode = LO.getDefaultFPContractMode();
514 if (LangOptContractMode == LangOptions::FPM_FastHonorPragmas)
515 LangOptContractMode = LangOptions::FPM_Fast;
516 setFPContractMode(LangOptContractMode);
517 setRoundingMode(LO.getFPRoundingMode());
518 setFPExceptionMode(LO.getFPExceptionMode());
519 setAllowFPReassociate(LO.AllowFPReassoc);
520 setNoHonorNaNs(LO.NoHonorNaNs);
521 setNoHonorInfs(LO.NoHonorInfs);
522 setNoSignedZero(LO.NoSignedZero);
523 setAllowReciprocal(LO.AllowRecip);
524 setAllowApproxFunc(LO.ApproxFunc);
525 if (getFPContractMode() == LangOptions::FPM_On &&
526 getRoundingMode() == llvm::RoundingMode::Dynamic &&
527 getFPExceptionMode() == LangOptions::FPE_Strict)
528 // If the FP settings are set to the "strict" model, then
529 // FENV access is set to true. (ffp-model=strict)
530 setAllowFEnvAccess(true);
531 else
532 setAllowFEnvAccess(LangOptions::FPM_Off);
533 }
534
535 bool allowFPContractWithinStatement() const {
536 return getFPContractMode() == LangOptions::FPM_On;
537 }
538 void setAllowFPContractWithinStatement() {
539 setFPContractMode(LangOptions::FPM_On);
540 }
541
542 bool allowFPContractAcrossStatement() const {
543 return getFPContractMode() == LangOptions::FPM_Fast;
544 }
545 void setAllowFPContractAcrossStatement() {
546 setFPContractMode(LangOptions::FPM_Fast);
547 }
548
549 bool isFPConstrained() const {
550 return getRoundingMode() != llvm::RoundingMode::NearestTiesToEven ||
551 getFPExceptionMode() != LangOptions::FPE_Ignore ||
552 getAllowFEnvAccess();
553 }
554
555 bool operator==(FPOptions other) const { return Value == other.Value; }
556
557 /// Return the default value of FPOptions that's used when trailing
558 /// storage isn't required.
559 static FPOptions defaultWithoutTrailingStorage(const LangOptions &LO);
560
561 storage_type getAsOpaqueInt() const { return Value; }
562 static FPOptions getFromOpaqueInt(storage_type Value) {
563 FPOptions Opts;
564 Opts.Value = Value;
565 return Opts;
566 }
567
568 // We can define most of the accessors automatically:
569#define OPTION(NAME, TYPE, WIDTH, PREVIOUS) \
570 TYPE get##NAME() const { \
571 return static_cast<TYPE>((Value & NAME##Mask) >> NAME##Shift); \
572 } \
573 void set##NAME(TYPE value) { \
574 Value = (Value & ~NAME##Mask) | (storage_type(value) << NAME##Shift); \
575 }
576#include "clang/Basic/FPOptions.def"
577 LLVM_DUMP_METHOD__attribute__((noinline)) void dump();
578};
579
580/// Represents difference between two FPOptions values.
581///
582/// The effect of language constructs changing the set of floating point options
583/// is usually a change of some FP properties while leaving others intact. This
584/// class describes such changes by keeping information about what FP options
585/// are overridden.
586///
587/// The integral set of FP options, described by the class FPOptions, may be
588/// represented as a default FP option set, defined by language standard and
589/// command line options, with the overrides introduced by pragmas.
590///
591/// The is implemented as a value of the new FPOptions plus a mask showing which
592/// fields are actually set in it.
593class FPOptionsOverride {
594 FPOptions Options = FPOptions::getFromOpaqueInt(0);
595 FPOptions::storage_type OverrideMask = 0;
596
597public:
598 using RoundingMode = llvm::RoundingMode;
599
600 /// The type suitable for storing values of FPOptionsOverride. Must be twice
601 /// as wide as bit size of FPOption.
602 using storage_type = uint32_t;
603 static_assert(sizeof(storage_type) >= 2 * sizeof(FPOptions::storage_type),
604 "Too short type for FPOptionsOverride");
605
606 /// Bit mask selecting bits of OverrideMask in serialized representation of
607 /// FPOptionsOverride.
608 static constexpr storage_type OverrideMaskBits =
609 (static_cast<storage_type>(1) << FPOptions::StorageBitSize) - 1;
610
611 FPOptionsOverride() {}
612 FPOptionsOverride(const LangOptions &LO)
613 : Options(LO), OverrideMask(OverrideMaskBits) {}
614 FPOptionsOverride(FPOptions FPO)
615 : Options(FPO), OverrideMask(OverrideMaskBits) {}
616
617 bool requiresTrailingStorage() const { return OverrideMask != 0; }
618
619 void setAllowFPContractWithinStatement() {
620 setFPContractModeOverride(LangOptions::FPM_On);
621 }
622
623 void setAllowFPContractAcrossStatement() {
624 setFPContractModeOverride(LangOptions::FPM_Fast);
625 }
626
627 void setDisallowFPContract() {
628 setFPContractModeOverride(LangOptions::FPM_Off);
629 }
630
631 void setFPPreciseEnabled(bool Value) {
632 setAllowFPReassociateOverride(!Value);
633 setNoHonorNaNsOverride(!Value);
634 setNoHonorInfsOverride(!Value);
635 setNoSignedZeroOverride(!Value);
636 setAllowReciprocalOverride(!Value);
637 setAllowApproxFuncOverride(!Value);
638 if (Value)
639 /* Precise mode implies fp_contract=on and disables ffast-math */
640 setAllowFPContractWithinStatement();
641 else
642 /* Precise mode disabled sets fp_contract=fast and enables ffast-math */
643 setAllowFPContractAcrossStatement();
644 }
645
646 storage_type getAsOpaqueInt() const {
647 return (static_cast<storage_type>(Options.getAsOpaqueInt())
648 << FPOptions::StorageBitSize) |
649 OverrideMask;
650 }
651 static FPOptionsOverride getFromOpaqueInt(storage_type I) {
652 FPOptionsOverride Opts;
653 Opts.OverrideMask = I & OverrideMaskBits;
654 Opts.Options = FPOptions::getFromOpaqueInt(I >> FPOptions::StorageBitSize);
655 return Opts;
656 }
657
658 FPOptions applyOverrides(FPOptions Base) {
659 FPOptions Result =
660 FPOptions::getFromOpaqueInt((Base.getAsOpaqueInt() & ~OverrideMask) |
661 (Options.getAsOpaqueInt() & OverrideMask));
662 return Result;
663 }
664
665 FPOptions applyOverrides(const LangOptions &LO) {
666 return applyOverrides(FPOptions(LO));
667 }
668
669 bool operator==(FPOptionsOverride other) const {
670 return Options == other.Options && OverrideMask == other.OverrideMask;
671 }
672 bool operator!=(FPOptionsOverride other) const { return !(*this == other); }
673
674#define OPTION(NAME, TYPE, WIDTH, PREVIOUS) \
675 bool has##NAME##Override() const { \
676 return OverrideMask & FPOptions::NAME##Mask; \
677 } \
678 TYPE get##NAME##Override() const { \
679 assert(has##NAME##Override())((void)0); \
680 return Options.get##NAME(); \
681 } \
682 void clear##NAME##Override() { \
683 /* Clear the actual value so that we don't have spurious differences when \
684 * testing equality. */ \
685 Options.set##NAME(TYPE(0)); \
686 OverrideMask &= ~FPOptions::NAME##Mask; \
687 } \
688 void set##NAME##Override(TYPE value) { \
689 Options.set##NAME(value); \
690 OverrideMask |= FPOptions::NAME##Mask; \
691 }
692#include "clang/Basic/FPOptions.def"
693 LLVM_DUMP_METHOD__attribute__((noinline)) void dump();
694};
695
696/// Describes the kind of translation unit being processed.
697enum TranslationUnitKind {
698 /// The translation unit is a complete translation unit.
699 TU_Complete,
700
701 /// The translation unit is a prefix to a translation unit, and is
702 /// not complete.
703 TU_Prefix,
704
705 /// The translation unit is a module.
706 TU_Module,
707
708 /// The translation unit is a is a complete translation unit that we might
709 /// incrementally extend later.
710 TU_Incremental
711};
712
713} // namespace clang
714
715#endif // LLVM_CLANG_BASIC_LANGOPTIONS_H

/usr/src/gnu/usr.bin/clang/libclangCodeGen/../../../llvm/llvm/include/llvm/IR/IRBuilder.h

1//===- llvm/IRBuilder.h - Builder for LLVM Instructions ---------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the IRBuilder class, which is used as a convenient way
10// to create LLVM instructions with a consistent and simplified interface.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_IR_IRBUILDER_H
15#define LLVM_IR_IRBUILDER_H
16
17#include "llvm-c/Types.h"
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/None.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/StringRef.h"
22#include "llvm/ADT/Twine.h"
23#include "llvm/IR/BasicBlock.h"
24#include "llvm/IR/Constant.h"
25#include "llvm/IR/ConstantFolder.h"
26#include "llvm/IR/Constants.h"
27#include "llvm/IR/DataLayout.h"
28#include "llvm/IR/DebugInfoMetadata.h"
29#include "llvm/IR/DebugLoc.h"
30#include "llvm/IR/DerivedTypes.h"
31#include "llvm/IR/Function.h"
32#include "llvm/IR/GlobalVariable.h"
33#include "llvm/IR/InstrTypes.h"
34#include "llvm/IR/Instruction.h"
35#include "llvm/IR/Instructions.h"
36#include "llvm/IR/IntrinsicInst.h"
37#include "llvm/IR/LLVMContext.h"
38#include "llvm/IR/Module.h"
39#include "llvm/IR/Operator.h"
40#include "llvm/IR/Type.h"
41#include "llvm/IR/Value.h"
42#include "llvm/IR/ValueHandle.h"
43#include "llvm/Support/AtomicOrdering.h"
44#include "llvm/Support/CBindingWrapping.h"
45#include "llvm/Support/Casting.h"
46#include <cassert>
47#include <cstddef>
48#include <cstdint>
49#include <functional>
50#include <utility>
51
52namespace llvm {
53
54class APInt;
55class MDNode;
56class Use;
57
58/// This provides the default implementation of the IRBuilder
59/// 'InsertHelper' method that is called whenever an instruction is created by
60/// IRBuilder and needs to be inserted.
61///
62/// By default, this inserts the instruction at the insertion point.
63class IRBuilderDefaultInserter {
64public:
65 virtual ~IRBuilderDefaultInserter();
66
67 virtual void InsertHelper(Instruction *I, const Twine &Name,
68 BasicBlock *BB,
69 BasicBlock::iterator InsertPt) const {
70 if (BB) BB->getInstList().insert(InsertPt, I);
71 I->setName(Name);
72 }
73};
74
75/// Provides an 'InsertHelper' that calls a user-provided callback after
76/// performing the default insertion.
77class IRBuilderCallbackInserter : public IRBuilderDefaultInserter {
78 std::function<void(Instruction *)> Callback;
79
80public:
81 virtual ~IRBuilderCallbackInserter();
82
83 IRBuilderCallbackInserter(std::function<void(Instruction *)> Callback)
84 : Callback(std::move(Callback)) {}
85
86 void InsertHelper(Instruction *I, const Twine &Name,
87 BasicBlock *BB,
88 BasicBlock::iterator InsertPt) const override {
89 IRBuilderDefaultInserter::InsertHelper(I, Name, BB, InsertPt);
90 Callback(I);
91 }
92};
93
94/// Common base class shared among various IRBuilders.
95class IRBuilderBase {
96 /// Pairs of (metadata kind, MDNode *) that should be added to all newly
97 /// created instructions, like !dbg metadata.
98 SmallVector<std::pair<unsigned, MDNode *>, 2> MetadataToCopy;
99
100 /// Add or update the an entry (Kind, MD) to MetadataToCopy, if \p MD is not
101 /// null. If \p MD is null, remove the entry with \p Kind.
102 void AddOrRemoveMetadataToCopy(unsigned Kind, MDNode *MD) {
103 if (!MD) {
104 erase_if(MetadataToCopy, [Kind](const std::pair<unsigned, MDNode *> &KV) {
105 return KV.first == Kind;
106 });
107 return;
108 }
109
110 for (auto &KV : MetadataToCopy)
111 if (KV.first == Kind) {
112 KV.second = MD;
113 return;
114 }
115
116 MetadataToCopy.emplace_back(Kind, MD);
117 }
118
119protected:
120 BasicBlock *BB;
121 BasicBlock::iterator InsertPt;
122 LLVMContext &Context;
123 const IRBuilderFolder &Folder;
124 const IRBuilderDefaultInserter &Inserter;
125
126 MDNode *DefaultFPMathTag;
127 FastMathFlags FMF;
128
129 bool IsFPConstrained;
130 fp::ExceptionBehavior DefaultConstrainedExcept;
131 RoundingMode DefaultConstrainedRounding;
132
133 ArrayRef<OperandBundleDef> DefaultOperandBundles;
134
135public:
136 IRBuilderBase(LLVMContext &context, const IRBuilderFolder &Folder,
137 const IRBuilderDefaultInserter &Inserter,
138 MDNode *FPMathTag, ArrayRef<OperandBundleDef> OpBundles)
139 : Context(context), Folder(Folder), Inserter(Inserter),
140 DefaultFPMathTag(FPMathTag), IsFPConstrained(false),
141 DefaultConstrainedExcept(fp::ebStrict),
142 DefaultConstrainedRounding(RoundingMode::Dynamic),
143 DefaultOperandBundles(OpBundles) {
144 ClearInsertionPoint();
145 }
146
147 /// Insert and return the specified instruction.
148 template<typename InstTy>
149 InstTy *Insert(InstTy *I, const Twine &Name = "") const {
150 Inserter.InsertHelper(I, Name, BB, InsertPt);
151 AddMetadataToInst(I);
152 return I;
153 }
154
155 /// No-op overload to handle constants.
156 Constant *Insert(Constant *C, const Twine& = "") const {
157 return C;
158 }
159
160 Value *Insert(Value *V, const Twine &Name = "") const {
161 if (Instruction *I = dyn_cast<Instruction>(V))
162 return Insert(I, Name);
163 assert(isa<Constant>(V))((void)0);
164 return V;
165 }
166
167 //===--------------------------------------------------------------------===//
168 // Builder configuration methods
169 //===--------------------------------------------------------------------===//
170
171 /// Clear the insertion point: created instructions will not be
172 /// inserted into a block.
173 void ClearInsertionPoint() {
174 BB = nullptr;
175 InsertPt = BasicBlock::iterator();
176 }
177
178 BasicBlock *GetInsertBlock() const { return BB; }
179 BasicBlock::iterator GetInsertPoint() const { return InsertPt; }
180 LLVMContext &getContext() const { return Context; }
181
182 /// This specifies that created instructions should be appended to the
183 /// end of the specified block.
184 void SetInsertPoint(BasicBlock *TheBB) {
185 BB = TheBB;
186 InsertPt = BB->end();
187 }
188
189 /// This specifies that created instructions should be inserted before
190 /// the specified instruction.
191 void SetInsertPoint(Instruction *I) {
192 BB = I->getParent();
193 InsertPt = I->getIterator();
194 assert(InsertPt != BB->end() && "Can't read debug loc from end()")((void)0);
195 SetCurrentDebugLocation(I->getDebugLoc());
196 }
197
198 /// This specifies that created instructions should be inserted at the
199 /// specified point.
200 void SetInsertPoint(BasicBlock *TheBB, BasicBlock::iterator IP) {
201 BB = TheBB;
202 InsertPt = IP;
203 if (IP != TheBB->end())
204 SetCurrentDebugLocation(IP->getDebugLoc());
205 }
206
207 /// Set location information used by debugging information.
208 void SetCurrentDebugLocation(DebugLoc L) {
209 AddOrRemoveMetadataToCopy(LLVMContext::MD_dbg, L.getAsMDNode());
210 }
211
212 /// Collect metadata with IDs \p MetadataKinds from \p Src which should be
213 /// added to all created instructions. Entries present in MedataDataToCopy but
214 /// not on \p Src will be dropped from MetadataToCopy.
215 void CollectMetadataToCopy(Instruction *Src,
216 ArrayRef<unsigned> MetadataKinds) {
217 for (unsigned K : MetadataKinds)
218 AddOrRemoveMetadataToCopy(K, Src->getMetadata(K));
219 }
220
221 /// Get location information used by debugging information.
222 DebugLoc getCurrentDebugLocation() const {
223 for (auto &KV : MetadataToCopy)
224 if (KV.first == LLVMContext::MD_dbg)
225 return {cast<DILocation>(KV.second)};
226
227 return {};
228 }
229
230 /// If this builder has a current debug location, set it on the
231 /// specified instruction.
232 void SetInstDebugLocation(Instruction *I) const {
233 for (const auto &KV : MetadataToCopy)
234 if (KV.first == LLVMContext::MD_dbg) {
235 I->setDebugLoc(DebugLoc(KV.second));
236 return;
237 }
238 }
239
240 /// Add all entries in MetadataToCopy to \p I.
241 void AddMetadataToInst(Instruction *I) const {
242 for (auto &KV : MetadataToCopy)
243 I->setMetadata(KV.first, KV.second);
244 }
245
246 /// Get the return type of the current function that we're emitting
247 /// into.
248 Type *getCurrentFunctionReturnType() const;
249
250 /// InsertPoint - A saved insertion point.
251 class InsertPoint {
252 BasicBlock *Block = nullptr;
253 BasicBlock::iterator Point;
254
255 public:
256 /// Creates a new insertion point which doesn't point to anything.
257 InsertPoint() = default;
258
259 /// Creates a new insertion point at the given location.
260 InsertPoint(BasicBlock *InsertBlock, BasicBlock::iterator InsertPoint)
261 : Block(InsertBlock), Point(InsertPoint) {}
262
263 /// Returns true if this insert point is set.
264 bool isSet() const { return (Block != nullptr); }
265
266 BasicBlock *getBlock() const { return Block; }
267 BasicBlock::iterator getPoint() const { return Point; }
268 };
269
270 /// Returns the current insert point.
271 InsertPoint saveIP() const {
272 return InsertPoint(GetInsertBlock(), GetInsertPoint());
273 }
274
275 /// Returns the current insert point, clearing it in the process.
276 InsertPoint saveAndClearIP() {
277 InsertPoint IP(GetInsertBlock(), GetInsertPoint());
278 ClearInsertionPoint();
279 return IP;
280 }
281
282 /// Sets the current insert point to a previously-saved location.
283 void restoreIP(InsertPoint IP) {
284 if (IP.isSet())
285 SetInsertPoint(IP.getBlock(), IP.getPoint());
286 else
287 ClearInsertionPoint();
288 }
289
290 /// Get the floating point math metadata being used.
291 MDNode *getDefaultFPMathTag() const { return DefaultFPMathTag; }
292
293 /// Get the flags to be applied to created floating point ops
294 FastMathFlags getFastMathFlags() const { return FMF; }
295
296 FastMathFlags &getFastMathFlags() { return FMF; }
297
298 /// Clear the fast-math flags.
299 void clearFastMathFlags() { FMF.clear(); }
300
301 /// Set the floating point math metadata to be used.
302 void setDefaultFPMathTag(MDNode *FPMathTag) { DefaultFPMathTag = FPMathTag; }
303
304 /// Set the fast-math flags to be used with generated fp-math operators
305 void setFastMathFlags(FastMathFlags NewFMF) { FMF = NewFMF; }
306
307 /// Enable/Disable use of constrained floating point math. When
308 /// enabled the CreateF<op>() calls instead create constrained
309 /// floating point intrinsic calls. Fast math flags are unaffected
310 /// by this setting.
311 void setIsFPConstrained(bool IsCon) { IsFPConstrained = IsCon; }
312
313 /// Query for the use of constrained floating point math
314 bool getIsFPConstrained() { return IsFPConstrained; }
315
316 /// Set the exception handling to be used with constrained floating point
317 void setDefaultConstrainedExcept(fp::ExceptionBehavior NewExcept) {
318#ifndef NDEBUG1
319 Optional<StringRef> ExceptStr = ExceptionBehaviorToStr(NewExcept);
320 assert(ExceptStr.hasValue() && "Garbage strict exception behavior!")((void)0);
321#endif
322 DefaultConstrainedExcept = NewExcept;
323 }
324
325 /// Set the rounding mode handling to be used with constrained floating point
326 void setDefaultConstrainedRounding(RoundingMode NewRounding) {
327#ifndef NDEBUG1
328 Optional<StringRef> RoundingStr = RoundingModeToStr(NewRounding);
329 assert(RoundingStr.hasValue() && "Garbage strict rounding mode!")((void)0);
330#endif
331 DefaultConstrainedRounding = NewRounding;
332 }
333
334 /// Get the exception handling used with constrained floating point
335 fp::ExceptionBehavior getDefaultConstrainedExcept() {
336 return DefaultConstrainedExcept;
337 }
338
339 /// Get the rounding mode handling used with constrained floating point
340 RoundingMode getDefaultConstrainedRounding() {
341 return DefaultConstrainedRounding;
342 }
343
344 void setConstrainedFPFunctionAttr() {
345 assert(BB && "Must have a basic block to set any function attributes!")((void)0);
346
347 Function *F = BB->getParent();
348 if (!F->hasFnAttribute(Attribute::StrictFP)) {
349 F->addFnAttr(Attribute::StrictFP);
350 }
351 }
352
353 void setConstrainedFPCallAttr(CallBase *I) {
354 I->addAttribute(AttributeList::FunctionIndex, Attribute::StrictFP);
355 }
356
357 void setDefaultOperandBundles(ArrayRef<OperandBundleDef> OpBundles) {
358 DefaultOperandBundles = OpBundles;
359 }
360
361 //===--------------------------------------------------------------------===//
362 // RAII helpers.
363 //===--------------------------------------------------------------------===//
364
365 // RAII object that stores the current insertion point and restores it
366 // when the object is destroyed. This includes the debug location.
367 class InsertPointGuard {
368 IRBuilderBase &Builder;
369 AssertingVH<BasicBlock> Block;
370 BasicBlock::iterator Point;
371 DebugLoc DbgLoc;
372
373 public:
374 InsertPointGuard(IRBuilderBase &B)
375 : Builder(B), Block(B.GetInsertBlock()), Point(B.GetInsertPoint()),
376 DbgLoc(B.getCurrentDebugLocation()) {}
377
378 InsertPointGuard(const InsertPointGuard &) = delete;
379 InsertPointGuard &operator=(const InsertPointGuard &) = delete;
380
381 ~InsertPointGuard() {
382 Builder.restoreIP(InsertPoint(Block, Point));
383 Builder.SetCurrentDebugLocation(DbgLoc);
384 }
385 };
386
387 // RAII object that stores the current fast math settings and restores
388 // them when the object is destroyed.
389 class FastMathFlagGuard {
390 IRBuilderBase &Builder;
391 FastMathFlags FMF;
392 MDNode *FPMathTag;
393 bool IsFPConstrained;
394 fp::ExceptionBehavior DefaultConstrainedExcept;
395 RoundingMode DefaultConstrainedRounding;
396
397 public:
398 FastMathFlagGuard(IRBuilderBase &B)
399 : Builder(B), FMF(B.FMF), FPMathTag(B.DefaultFPMathTag),
400 IsFPConstrained(B.IsFPConstrained),
401 DefaultConstrainedExcept(B.DefaultConstrainedExcept),
402 DefaultConstrainedRounding(B.DefaultConstrainedRounding) {}
403
404 FastMathFlagGuard(const FastMathFlagGuard &) = delete;
405 FastMathFlagGuard &operator=(const FastMathFlagGuard &) = delete;
406
407 ~FastMathFlagGuard() {
408 Builder.FMF = FMF;
409 Builder.DefaultFPMathTag = FPMathTag;
410 Builder.IsFPConstrained = IsFPConstrained;
411 Builder.DefaultConstrainedExcept = DefaultConstrainedExcept;
412 Builder.DefaultConstrainedRounding = DefaultConstrainedRounding;
413 }
414 };
415
416 // RAII object that stores the current default operand bundles and restores
417 // them when the object is destroyed.
418 class OperandBundlesGuard {
419 IRBuilderBase &Builder;
420 ArrayRef<OperandBundleDef> DefaultOperandBundles;
421
422 public:
423 OperandBundlesGuard(IRBuilderBase &B)
424 : Builder(B), DefaultOperandBundles(B.DefaultOperandBundles) {}
425
426 OperandBundlesGuard(const OperandBundlesGuard &) = delete;
427 OperandBundlesGuard &operator=(const OperandBundlesGuard &) = delete;
428
429 ~OperandBundlesGuard() {
430 Builder.DefaultOperandBundles = DefaultOperandBundles;
431 }
432 };
433
434
435 //===--------------------------------------------------------------------===//
436 // Miscellaneous creation methods.
437 //===--------------------------------------------------------------------===//
438
439 /// Make a new global variable with initializer type i8*
440 ///
441 /// Make a new global variable with an initializer that has array of i8 type
442 /// filled in with the null terminated string value specified. The new global
443 /// variable will be marked mergable with any others of the same contents. If
444 /// Name is specified, it is the name of the global variable created.
445 ///
446 /// If no module is given via \p M, it is take from the insertion point basic
447 /// block.
448 GlobalVariable *CreateGlobalString(StringRef Str, const Twine &Name = "",
449 unsigned AddressSpace = 0,
450 Module *M = nullptr);
451
452 /// Get a constant value representing either true or false.
453 ConstantInt *getInt1(bool V) {
454 return ConstantInt::get(getInt1Ty(), V);
455 }
456
457 /// Get the constant value for i1 true.
458 ConstantInt *getTrue() {
459 return ConstantInt::getTrue(Context);
460 }
461
462 /// Get the constant value for i1 false.
463 ConstantInt *getFalse() {
464 return ConstantInt::getFalse(Context);
465 }
466
467 /// Get a constant 8-bit value.
468 ConstantInt *getInt8(uint8_t C) {
469 return ConstantInt::get(getInt8Ty(), C);
470 }
471
472 /// Get a constant 16-bit value.
473 ConstantInt *getInt16(uint16_t C) {
474 return ConstantInt::get(getInt16Ty(), C);
475 }
476
477 /// Get a constant 32-bit value.
478 ConstantInt *getInt32(uint32_t C) {
479 return ConstantInt::get(getInt32Ty(), C);
480 }
481
482 /// Get a constant 64-bit value.
483 ConstantInt *getInt64(uint64_t C) {
484 return ConstantInt::get(getInt64Ty(), C);
485 }
486
487 /// Get a constant N-bit value, zero extended or truncated from
488 /// a 64-bit value.
489 ConstantInt *getIntN(unsigned N, uint64_t C) {
490 return ConstantInt::get(getIntNTy(N), C);
491 }
492
493 /// Get a constant integer value.
494 ConstantInt *getInt(const APInt &AI) {
495 return ConstantInt::get(Context, AI);
496 }
497
498 //===--------------------------------------------------------------------===//
499 // Type creation methods
500 //===--------------------------------------------------------------------===//
501
502 /// Fetch the type representing a single bit
503 IntegerType *getInt1Ty() {
504 return Type::getInt1Ty(Context);
505 }
506
507 /// Fetch the type representing an 8-bit integer.
508 IntegerType *getInt8Ty() {
509 return Type::getInt8Ty(Context);
510 }
511
512 /// Fetch the type representing a 16-bit integer.
513 IntegerType *getInt16Ty() {
514 return Type::getInt16Ty(Context);
515 }
516
517 /// Fetch the type representing a 32-bit integer.
518 IntegerType *getInt32Ty() {
519 return Type::getInt32Ty(Context);
520 }
521
522 /// Fetch the type representing a 64-bit integer.
523 IntegerType *getInt64Ty() {
524 return Type::getInt64Ty(Context);
525 }
526
527 /// Fetch the type representing a 128-bit integer.
528 IntegerType *getInt128Ty() { return Type::getInt128Ty(Context); }
529
530 /// Fetch the type representing an N-bit integer.
531 IntegerType *getIntNTy(unsigned N) {
532 return Type::getIntNTy(Context, N);
533 }
534
535 /// Fetch the type representing a 16-bit floating point value.
536 Type *getHalfTy() {
537 return Type::getHalfTy(Context);
538 }
539
540 /// Fetch the type representing a 16-bit brain floating point value.
541 Type *getBFloatTy() {
542 return Type::getBFloatTy(Context);
543 }
544
545 /// Fetch the type representing a 32-bit floating point value.
546 Type *getFloatTy() {
547 return Type::getFloatTy(Context);
548 }
549
550 /// Fetch the type representing a 64-bit floating point value.
551 Type *getDoubleTy() {
552 return Type::getDoubleTy(Context);
553 }
554
555 /// Fetch the type representing void.
556 Type *getVoidTy() {
557 return Type::getVoidTy(Context);
558 }
559
560 /// Fetch the type representing a pointer to an 8-bit integer value.
561 PointerType *getInt8PtrTy(unsigned AddrSpace = 0) {
562 return Type::getInt8PtrTy(Context, AddrSpace);
563 }
564
565 /// Fetch the type representing a pointer to an integer value.
566 IntegerType *getIntPtrTy(const DataLayout &DL, unsigned AddrSpace = 0) {
567 return DL.getIntPtrType(Context, AddrSpace);
568 }
569
570 //===--------------------------------------------------------------------===//
571 // Intrinsic creation methods
572 //===--------------------------------------------------------------------===//
573
574 /// Create and insert a memset to the specified pointer and the
575 /// specified value.
576 ///
577 /// If the pointer isn't an i8*, it will be converted. If a TBAA tag is
578 /// specified, it will be added to the instruction. Likewise with alias.scope
579 /// and noalias tags.
580 CallInst *CreateMemSet(Value *Ptr, Value *Val, uint64_t Size,
581 MaybeAlign Align, bool isVolatile = false,
582 MDNode *TBAATag = nullptr, MDNode *ScopeTag = nullptr,
583 MDNode *NoAliasTag = nullptr) {
584 return CreateMemSet(Ptr, Val, getInt64(Size), Align, isVolatile,
585 TBAATag, ScopeTag, NoAliasTag);
586 }
587
588 CallInst *CreateMemSet(Value *Ptr, Value *Val, Value *Size, MaybeAlign Align,
589 bool isVolatile = false, MDNode *TBAATag = nullptr,
590 MDNode *ScopeTag = nullptr,
591 MDNode *NoAliasTag = nullptr);
592
593 /// Create and insert an element unordered-atomic memset of the region of
594 /// memory starting at the given pointer to the given value.
595 ///
596 /// If the pointer isn't an i8*, it will be converted. If a TBAA tag is
597 /// specified, it will be added to the instruction. Likewise with alias.scope
598 /// and noalias tags.
599 CallInst *CreateElementUnorderedAtomicMemSet(Value *Ptr, Value *Val,
600 uint64_t Size, Align Alignment,
601 uint32_t ElementSize,
602 MDNode *TBAATag = nullptr,
603 MDNode *ScopeTag = nullptr,
604 MDNode *NoAliasTag = nullptr) {
605 return CreateElementUnorderedAtomicMemSet(Ptr, Val, getInt64(Size),
606 Align(Alignment), ElementSize,
607 TBAATag, ScopeTag, NoAliasTag);
608 }
609
610 CallInst *CreateElementUnorderedAtomicMemSet(Value *Ptr, Value *Val,
611 Value *Size, Align Alignment,
612 uint32_t ElementSize,
613 MDNode *TBAATag = nullptr,
614 MDNode *ScopeTag = nullptr,
615 MDNode *NoAliasTag = nullptr);
616
617 /// Create and insert a memcpy between the specified pointers.
618 ///
619 /// If the pointers aren't i8*, they will be converted. If a TBAA tag is
620 /// specified, it will be added to the instruction. Likewise with alias.scope
621 /// and noalias tags.
622 CallInst *CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src,
623 MaybeAlign SrcAlign, uint64_t Size,
624 bool isVolatile = false, MDNode *TBAATag = nullptr,
625 MDNode *TBAAStructTag = nullptr,
626 MDNode *ScopeTag = nullptr,
627 MDNode *NoAliasTag = nullptr) {
628 return CreateMemCpy(Dst, DstAlign, Src, SrcAlign, getInt64(Size),
629 isVolatile, TBAATag, TBAAStructTag, ScopeTag,
630 NoAliasTag);
631 }
632
633 CallInst *CreateMemTransferInst(
634 Intrinsic::ID IntrID, Value *Dst, MaybeAlign DstAlign, Value *Src,
635 MaybeAlign SrcAlign, Value *Size, bool isVolatile = false,
636 MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr,
637 MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr);
638
639 CallInst *CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src,
640 MaybeAlign SrcAlign, Value *Size,
641 bool isVolatile = false, MDNode *TBAATag = nullptr,
642 MDNode *TBAAStructTag = nullptr,
643 MDNode *ScopeTag = nullptr,
644 MDNode *NoAliasTag = nullptr) {
645 return CreateMemTransferInst(Intrinsic::memcpy, Dst, DstAlign, Src,
646 SrcAlign, Size, isVolatile, TBAATag,
647 TBAAStructTag, ScopeTag, NoAliasTag);
648 }
649
650 CallInst *
651 CreateMemCpyInline(Value *Dst, MaybeAlign DstAlign, Value *Src,
652 MaybeAlign SrcAlign, Value *Size, bool IsVolatile = false,
653 MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr,
654 MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr);
655
656 /// Create and insert an element unordered-atomic memcpy between the
657 /// specified pointers.
658 ///
659 /// DstAlign/SrcAlign are the alignments of the Dst/Src pointers, respectively.
660 ///
661 /// If the pointers aren't i8*, they will be converted. If a TBAA tag is
662 /// specified, it will be added to the instruction. Likewise with alias.scope
663 /// and noalias tags.
664 CallInst *CreateElementUnorderedAtomicMemCpy(
665 Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size,
666 uint32_t ElementSize, MDNode *TBAATag = nullptr,
667 MDNode *TBAAStructTag = nullptr, MDNode *ScopeTag = nullptr,
668 MDNode *NoAliasTag = nullptr);
669
670 CallInst *CreateMemMove(Value *Dst, MaybeAlign DstAlign, Value *Src,
671 MaybeAlign SrcAlign, uint64_t Size,
672 bool isVolatile = false, MDNode *TBAATag = nullptr,
673 MDNode *ScopeTag = nullptr,
674 MDNode *NoAliasTag = nullptr) {
675 return CreateMemMove(Dst, DstAlign, Src, SrcAlign, getInt64(Size),
676 isVolatile, TBAATag, ScopeTag, NoAliasTag);
677 }
678
679 CallInst *CreateMemMove(Value *Dst, MaybeAlign DstAlign, Value *Src,
680 MaybeAlign SrcAlign, Value *Size,
681 bool isVolatile = false, MDNode *TBAATag = nullptr,
682 MDNode *ScopeTag = nullptr,
683 MDNode *NoAliasTag = nullptr);
684
685 /// \brief Create and insert an element unordered-atomic memmove between the
686 /// specified pointers.
687 ///
688 /// DstAlign/SrcAlign are the alignments of the Dst/Src pointers,
689 /// respectively.
690 ///
691 /// If the pointers aren't i8*, they will be converted. If a TBAA tag is
692 /// specified, it will be added to the instruction. Likewise with alias.scope
693 /// and noalias tags.
694 CallInst *CreateElementUnorderedAtomicMemMove(
695 Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size,
696 uint32_t ElementSize, MDNode *TBAATag = nullptr,
697 MDNode *TBAAStructTag = nullptr, MDNode *ScopeTag = nullptr,
698 MDNode *NoAliasTag = nullptr);
699
700 /// Create a vector fadd reduction intrinsic of the source vector.
701 /// The first parameter is a scalar accumulator value for ordered reductions.
702 CallInst *CreateFAddReduce(Value *Acc, Value *Src);
703
704 /// Create a vector fmul reduction intrinsic of the source vector.
705 /// The first parameter is a scalar accumulator value for ordered reductions.
706 CallInst *CreateFMulReduce(Value *Acc, Value *Src);
707
708 /// Create a vector int add reduction intrinsic of the source vector.
709 CallInst *CreateAddReduce(Value *Src);
710
711 /// Create a vector int mul reduction intrinsic of the source vector.
712 CallInst *CreateMulReduce(Value *Src);
713
714 /// Create a vector int AND reduction intrinsic of the source vector.
715 CallInst *CreateAndReduce(Value *Src);
716
717 /// Create a vector int OR reduction intrinsic of the source vector.
718 CallInst *CreateOrReduce(Value *Src);
719
720 /// Create a vector int XOR reduction intrinsic of the source vector.
721 CallInst *CreateXorReduce(Value *Src);
722
723 /// Create a vector integer max reduction intrinsic of the source
724 /// vector.
725 CallInst *CreateIntMaxReduce(Value *Src, bool IsSigned = false);
726
727 /// Create a vector integer min reduction intrinsic of the source
728 /// vector.
729 CallInst *CreateIntMinReduce(Value *Src, bool IsSigned = false);
730
731 /// Create a vector float max reduction intrinsic of the source
732 /// vector.
733 CallInst *CreateFPMaxReduce(Value *Src);
734
735 /// Create a vector float min reduction intrinsic of the source
736 /// vector.
737 CallInst *CreateFPMinReduce(Value *Src);
738
739 /// Create a lifetime.start intrinsic.
740 ///
741 /// If the pointer isn't i8* it will be converted.
742 CallInst *CreateLifetimeStart(Value *Ptr, ConstantInt *Size = nullptr);
743
744 /// Create a lifetime.end intrinsic.
745 ///
746 /// If the pointer isn't i8* it will be converted.
747 CallInst *CreateLifetimeEnd(Value *Ptr, ConstantInt *Size = nullptr);
748
749 /// Create a call to invariant.start intrinsic.
750 ///
751 /// If the pointer isn't i8* it will be converted.
752 CallInst *CreateInvariantStart(Value *Ptr, ConstantInt *Size = nullptr);
753
754 /// Create a call to Masked Load intrinsic
755 CallInst *CreateMaskedLoad(Type *Ty, Value *Ptr, Align Alignment, Value *Mask,
756 Value *PassThru = nullptr, const Twine &Name = "");
757
758 /// Create a call to Masked Store intrinsic
759 CallInst *CreateMaskedStore(Value *Val, Value *Ptr, Align Alignment,
760 Value *Mask);
761
762 /// Create a call to Masked Gather intrinsic
763 CallInst *CreateMaskedGather(Type *Ty, Value *Ptrs, Align Alignment,
764 Value *Mask = nullptr, Value *PassThru = nullptr,
765 const Twine &Name = "");
766
767 /// Create a call to Masked Scatter intrinsic
768 CallInst *CreateMaskedScatter(Value *Val, Value *Ptrs, Align Alignment,
769 Value *Mask = nullptr);
770
771 /// Create an assume intrinsic call that allows the optimizer to
772 /// assume that the provided condition will be true.
773 ///
774 /// The optional argument \p OpBundles specifies operand bundles that are
775 /// added to the call instruction.
776 CallInst *CreateAssumption(Value *Cond,
777 ArrayRef<OperandBundleDef> OpBundles = llvm::None);
778
779 /// Create a llvm.experimental.noalias.scope.decl intrinsic call.
780 Instruction *CreateNoAliasScopeDeclaration(Value *Scope);
781 Instruction *CreateNoAliasScopeDeclaration(MDNode *ScopeTag) {
782 return CreateNoAliasScopeDeclaration(
783 MetadataAsValue::get(Context, ScopeTag));
784 }
785
786 /// Create a call to the experimental.gc.statepoint intrinsic to
787 /// start a new statepoint sequence.
788 CallInst *CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes,
789 Value *ActualCallee,
790 ArrayRef<Value *> CallArgs,
791 Optional<ArrayRef<Value *>> DeoptArgs,
792 ArrayRef<Value *> GCArgs,
793 const Twine &Name = "");
794
795 /// Create a call to the experimental.gc.statepoint intrinsic to
796 /// start a new statepoint sequence.
797 CallInst *CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes,
798 Value *ActualCallee, uint32_t Flags,
799 ArrayRef<Value *> CallArgs,
800 Optional<ArrayRef<Use>> TransitionArgs,
801 Optional<ArrayRef<Use>> DeoptArgs,
802 ArrayRef<Value *> GCArgs,
803 const Twine &Name = "");
804
805 /// Conveninence function for the common case when CallArgs are filled
806 /// in using makeArrayRef(CS.arg_begin(), CS.arg_end()); Use needs to be
807 /// .get()'ed to get the Value pointer.
808 CallInst *CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes,
809 Value *ActualCallee, ArrayRef<Use> CallArgs,
810 Optional<ArrayRef<Value *>> DeoptArgs,
811 ArrayRef<Value *> GCArgs,
812 const Twine &Name = "");
813
814 /// Create an invoke to the experimental.gc.statepoint intrinsic to
815 /// start a new statepoint sequence.
816 InvokeInst *
817 CreateGCStatepointInvoke(uint64_t ID, uint32_t NumPatchBytes,
818 Value *ActualInvokee, BasicBlock *NormalDest,
819 BasicBlock *UnwindDest, ArrayRef<Value *> InvokeArgs,
820 Optional<ArrayRef<Value *>> DeoptArgs,
821 ArrayRef<Value *> GCArgs, const Twine &Name = "");
822
823 /// Create an invoke to the experimental.gc.statepoint intrinsic to
824 /// start a new statepoint sequence.
825 InvokeInst *CreateGCStatepointInvoke(
826 uint64_t ID, uint32_t NumPatchBytes, Value *ActualInvokee,
827 BasicBlock *NormalDest, BasicBlock *UnwindDest, uint32_t Flags,
828 ArrayRef<Value *> InvokeArgs, Optional<ArrayRef<Use>> TransitionArgs,
829 Optional<ArrayRef<Use>> DeoptArgs, ArrayRef<Value *> GCArgs,
830 const Twine &Name = "");
831
832 // Convenience function for the common case when CallArgs are filled in using
833 // makeArrayRef(CS.arg_begin(), CS.arg_end()); Use needs to be .get()'ed to
834 // get the Value *.
835 InvokeInst *
836 CreateGCStatepointInvoke(uint64_t ID, uint32_t NumPatchBytes,
837 Value *ActualInvokee, BasicBlock *NormalDest,
838 BasicBlock *UnwindDest, ArrayRef<Use> InvokeArgs,
839 Optional<ArrayRef<Value *>> DeoptArgs,
840 ArrayRef<Value *> GCArgs, const Twine &Name = "");
841
842 /// Create a call to the experimental.gc.result intrinsic to extract
843 /// the result from a call wrapped in a statepoint.
844 CallInst *CreateGCResult(Instruction *Statepoint,
845 Type *ResultType,
846 const Twine &Name = "");
847
848 /// Create a call to the experimental.gc.relocate intrinsics to
849 /// project the relocated value of one pointer from the statepoint.
850 CallInst *CreateGCRelocate(Instruction *Statepoint,
851 int BaseOffset,
852 int DerivedOffset,
853 Type *ResultType,
854 const Twine &Name = "");
855
856 /// Create a call to the experimental.gc.pointer.base intrinsic to get the
857 /// base pointer for the specified derived pointer.
858 CallInst *CreateGCGetPointerBase(Value *DerivedPtr, const Twine &Name = "");
859
860 /// Create a call to the experimental.gc.get.pointer.offset intrinsic to get
861 /// the offset of the specified derived pointer from its base.
862 CallInst *CreateGCGetPointerOffset(Value *DerivedPtr, const Twine &Name = "");
863
864 /// Create a call to llvm.vscale, multiplied by \p Scaling. The type of VScale
865 /// will be the same type as that of \p Scaling.
866 Value *CreateVScale(Constant *Scaling, const Twine &Name = "");
867
868 /// Creates a vector of type \p DstType with the linear sequence <0, 1, ...>
869 Value *CreateStepVector(Type *DstType, const Twine &Name = "");
870
871 /// Create a call to intrinsic \p ID with 1 operand which is mangled on its
872 /// type.
873 CallInst *CreateUnaryIntrinsic(Intrinsic::ID ID, Value *V,
874 Instruction *FMFSource = nullptr,
875 const Twine &Name = "");
876
877 /// Create a call to intrinsic \p ID with 2 operands which is mangled on the
878 /// first type.
879 CallInst *CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS,
880 Instruction *FMFSource = nullptr,
881 const Twine &Name = "");
882
883 /// Create a call to intrinsic \p ID with \p args, mangled using \p Types. If
884 /// \p FMFSource is provided, copy fast-math-flags from that instruction to
885 /// the intrinsic.
886 CallInst *CreateIntrinsic(Intrinsic::ID ID, ArrayRef<Type *> Types,
887 ArrayRef<Value *> Args,
888 Instruction *FMFSource = nullptr,
889 const Twine &Name = "");
890
891 /// Create call to the minnum intrinsic.
892 CallInst *CreateMinNum(Value *LHS, Value *RHS, const Twine &Name = "") {
893 return CreateBinaryIntrinsic(Intrinsic::minnum, LHS, RHS, nullptr, Name);
894 }
895
896 /// Create call to the maxnum intrinsic.
897 CallInst *CreateMaxNum(Value *LHS, Value *RHS, const Twine &Name = "") {
898 return CreateBinaryIntrinsic(Intrinsic::maxnum, LHS, RHS, nullptr, Name);
899 }
900
901 /// Create call to the minimum intrinsic.
902 CallInst *CreateMinimum(Value *LHS, Value *RHS, const Twine &Name = "") {
903 return CreateBinaryIntrinsic(Intrinsic::minimum, LHS, RHS, nullptr, Name);
904 }
905
906 /// Create call to the maximum intrinsic.
907 CallInst *CreateMaximum(Value *LHS, Value *RHS, const Twine &Name = "") {
908 return CreateBinaryIntrinsic(Intrinsic::maximum, LHS, RHS, nullptr, Name);
909 }
910
911 /// Create a call to the arithmetic_fence intrinsic.
912 CallInst *CreateArithmeticFence(Value *Val, Type *DstType,
913 const Twine &Name = "") {
914 return CreateIntrinsic(Intrinsic::arithmetic_fence, DstType, Val, nullptr,
915 Name);
916 }
917
918 /// Create a call to the experimental.vector.extract intrinsic.
919 CallInst *CreateExtractVector(Type *DstType, Value *SrcVec, Value *Idx,
920 const Twine &Name = "") {
921 return CreateIntrinsic(Intrinsic::experimental_vector_extract,
922 {DstType, SrcVec->getType()}, {SrcVec, Idx}, nullptr,
923 Name);
924 }
925
926 /// Create a call to the experimental.vector.insert intrinsic.
927 CallInst *CreateInsertVector(Type *DstType, Value *SrcVec, Value *SubVec,
928 Value *Idx, const Twine &Name = "") {
929 return CreateIntrinsic(Intrinsic::experimental_vector_insert,
930 {DstType, SubVec->getType()}, {SrcVec, SubVec, Idx},
931 nullptr, Name);
932 }
933
934private:
935 /// Create a call to a masked intrinsic with given Id.
936 CallInst *CreateMaskedIntrinsic(Intrinsic::ID Id, ArrayRef<Value *> Ops,
937 ArrayRef<Type *> OverloadedTypes,
938 const Twine &Name = "");
939
940 Value *getCastedInt8PtrValue(Value *Ptr);
941
942 //===--------------------------------------------------------------------===//
943 // Instruction creation methods: Terminators
944 //===--------------------------------------------------------------------===//
945
946private:
947 /// Helper to add branch weight and unpredictable metadata onto an
948 /// instruction.
949 /// \returns The annotated instruction.
950 template <typename InstTy>
951 InstTy *addBranchMetadata(InstTy *I, MDNode *Weights, MDNode *Unpredictable) {
952 if (Weights)
953 I->setMetadata(LLVMContext::MD_prof, Weights);
954 if (Unpredictable)
955 I->setMetadata(LLVMContext::MD_unpredictable, Unpredictable);
956 return I;
957 }
958
959public:
960 /// Create a 'ret void' instruction.
961 ReturnInst *CreateRetVoid() {
962 return Insert(ReturnInst::Create(Context));
963 }
964
965 /// Create a 'ret <val>' instruction.
966 ReturnInst *CreateRet(Value *V) {
967 return Insert(ReturnInst::Create(Context, V));
968 }
969
970 /// Create a sequence of N insertvalue instructions,
971 /// with one Value from the retVals array each, that build a aggregate
972 /// return value one value at a time, and a ret instruction to return
973 /// the resulting aggregate value.
974 ///
975 /// This is a convenience function for code that uses aggregate return values
976 /// as a vehicle for having multiple return values.
977 ReturnInst *CreateAggregateRet(Value *const *retVals, unsigned N) {
978 Value *V = UndefValue::get(getCurrentFunctionReturnType());
979 for (unsigned i = 0; i != N; ++i)
980 V = CreateInsertValue(V, retVals[i], i, "mrv");
981 return Insert(ReturnInst::Create(Context, V));
982 }
983
984 /// Create an unconditional 'br label X' instruction.
985 BranchInst *CreateBr(BasicBlock *Dest) {
986 return Insert(BranchInst::Create(Dest));
987 }
988
989 /// Create a conditional 'br Cond, TrueDest, FalseDest'
990 /// instruction.
991 BranchInst *CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False,
992 MDNode *BranchWeights = nullptr,
993 MDNode *Unpredictable = nullptr) {
994 return Insert(addBranchMetadata(BranchInst::Create(True, False, Cond),
995 BranchWeights, Unpredictable));
996 }
997
998 /// Create a conditional 'br Cond, TrueDest, FalseDest'
999 /// instruction. Copy branch meta data if available.
1000 BranchInst *CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False,
1001 Instruction *MDSrc) {
1002 BranchInst *Br = BranchInst::Create(True, False, Cond);
1003 if (MDSrc) {
1004 unsigned WL[4] = {LLVMContext::MD_prof, LLVMContext::MD_unpredictable,
1005 LLVMContext::MD_make_implicit, LLVMContext::MD_dbg};
1006 Br->copyMetadata(*MDSrc, makeArrayRef(&WL[0], 4));
1007 }
1008 return Insert(Br);
1009 }
1010
1011 /// Create a switch instruction with the specified value, default dest,
1012 /// and with a hint for the number of cases that will be added (for efficient
1013 /// allocation).
1014 SwitchInst *CreateSwitch(Value *V, BasicBlock *Dest, unsigned NumCases = 10,
1015 MDNode *BranchWeights = nullptr,
1016 MDNode *Unpredictable = nullptr) {
1017 return Insert(addBranchMetadata(SwitchInst::Create(V, Dest, NumCases),
1018 BranchWeights, Unpredictable));
1019 }
1020
1021 /// Create an indirect branch instruction with the specified address
1022 /// operand, with an optional hint for the number of destinations that will be
1023 /// added (for efficient allocation).
1024 IndirectBrInst *CreateIndirectBr(Value *Addr, unsigned NumDests = 10) {
1025 return Insert(IndirectBrInst::Create(Addr, NumDests));
1026 }
1027
1028 /// Create an invoke instruction.
1029 InvokeInst *CreateInvoke(FunctionType *Ty, Value *Callee,
1030 BasicBlock *NormalDest, BasicBlock *UnwindDest,
1031 ArrayRef<Value *> Args,
1032 ArrayRef<OperandBundleDef> OpBundles,
1033 const Twine &Name = "") {
1034 InvokeInst *II =
1035 InvokeInst::Create(Ty, Callee, NormalDest, UnwindDest, Args, OpBundles);
1036 if (IsFPConstrained)
1037 setConstrainedFPCallAttr(II);
1038 return Insert(II, Name);
1039 }
1040 InvokeInst *CreateInvoke(FunctionType *Ty, Value *Callee,
1041 BasicBlock *NormalDest, BasicBlock *UnwindDest,
1042 ArrayRef<Value *> Args = None,
1043 const Twine &Name = "") {
1044 InvokeInst *II =
1045 InvokeInst::Create(Ty, Callee, NormalDest, UnwindDest, Args);
1046 if (IsFPConstrained)
1047 setConstrainedFPCallAttr(II);
1048 return Insert(II, Name);
1049 }
1050
1051 InvokeInst *CreateInvoke(FunctionCallee Callee, BasicBlock *NormalDest,
1052 BasicBlock *UnwindDest, ArrayRef<Value *> Args,
1053 ArrayRef<OperandBundleDef> OpBundles,
1054 const Twine &Name = "") {
1055 return CreateInvoke(Callee.getFunctionType(), Callee.getCallee(),
1056 NormalDest, UnwindDest, Args, OpBundles, Name);
1057 }
1058
1059 InvokeInst *CreateInvoke(FunctionCallee Callee, BasicBlock *NormalDest,
1060 BasicBlock *UnwindDest,
1061 ArrayRef<Value *> Args = None,
1062 const Twine &Name = "") {
1063 return CreateInvoke(Callee.getFunctionType(), Callee.getCallee(),
1064 NormalDest, UnwindDest, Args, Name);
1065 }
1066
1067 /// \brief Create a callbr instruction.
1068 CallBrInst *CreateCallBr(FunctionType *Ty, Value *Callee,
1069 BasicBlock *DefaultDest,
1070 ArrayRef<BasicBlock *> IndirectDests,
1071 ArrayRef<Value *> Args = None,
1072 const Twine &Name = "") {
1073 return Insert(CallBrInst::Create(Ty, Callee, DefaultDest, IndirectDests,
1074 Args), Name);
1075 }
1076 CallBrInst *CreateCallBr(FunctionType *Ty, Value *Callee,
1077 BasicBlock *DefaultDest,
1078 ArrayRef<BasicBlock *> IndirectDests,
1079 ArrayRef<Value *> Args,
1080 ArrayRef<OperandBundleDef> OpBundles,
1081 const Twine &Name = "") {
1082 return Insert(
1083 CallBrInst::Create(Ty, Callee, DefaultDest, IndirectDests, Args,
1084 OpBundles), Name);
1085 }
1086
1087 CallBrInst *CreateCallBr(FunctionCallee Callee, BasicBlock *DefaultDest,
1088 ArrayRef<BasicBlock *> IndirectDests,
1089 ArrayRef<Value *> Args = None,
1090 const Twine &Name = "") {
1091 return CreateCallBr(Callee.getFunctionType(), Callee.getCallee(),
1092 DefaultDest, IndirectDests, Args, Name);
1093 }
1094 CallBrInst *CreateCallBr(FunctionCallee Callee, BasicBlock *DefaultDest,
1095 ArrayRef<BasicBlock *> IndirectDests,
1096 ArrayRef<Value *> Args,
1097 ArrayRef<OperandBundleDef> OpBundles,
1098 const Twine &Name = "") {
1099 return CreateCallBr(Callee.getFunctionType(), Callee.getCallee(),
1100 DefaultDest, IndirectDests, Args, Name);
1101 }
1102
1103 ResumeInst *CreateResume(Value *Exn) {
1104 return Insert(ResumeInst::Create(Exn));
1105 }
1106
1107 CleanupReturnInst *CreateCleanupRet(CleanupPadInst *CleanupPad,
1108 BasicBlock *UnwindBB = nullptr) {
1109 return Insert(CleanupReturnInst::Create(CleanupPad, UnwindBB));
1110 }
1111
1112 CatchSwitchInst *CreateCatchSwitch(Value *ParentPad, BasicBlock *UnwindBB,
1113 unsigned NumHandlers,
1114 const Twine &Name = "") {
1115 return Insert(CatchSwitchInst::Create(ParentPad, UnwindBB, NumHandlers),
1116 Name);
1117 }
1118
1119 CatchPadInst *CreateCatchPad(Value *ParentPad, ArrayRef<Value *> Args,
1120 const Twine &Name = "") {
1121 return Insert(CatchPadInst::Create(ParentPad, Args), Name);
1122 }
1123
1124 CleanupPadInst *CreateCleanupPad(Value *ParentPad,
1125 ArrayRef<Value *> Args = None,
1126 const Twine &Name = "") {
1127 return Insert(CleanupPadInst::Create(ParentPad, Args), Name);
1128 }
1129
1130 CatchReturnInst *CreateCatchRet(CatchPadInst *CatchPad, BasicBlock *BB) {
1131 return Insert(CatchReturnInst::Create(CatchPad, BB));
1132 }
1133
1134 UnreachableInst *CreateUnreachable() {
1135 return Insert(new UnreachableInst(Context));
1136 }
1137
1138 //===--------------------------------------------------------------------===//
1139 // Instruction creation methods: Binary Operators
1140 //===--------------------------------------------------------------------===//
1141private:
1142 BinaryOperator *CreateInsertNUWNSWBinOp(BinaryOperator::BinaryOps Opc,
1143 Value *LHS, Value *RHS,
1144 const Twine &Name,
1145 bool HasNUW, bool HasNSW) {
1146 BinaryOperator *BO = Insert(BinaryOperator::Create(Opc, LHS, RHS), Name);
1147 if (HasNUW) BO->setHasNoUnsignedWrap();
1148 if (HasNSW) BO->setHasNoSignedWrap();
1149 return BO;
1150 }
1151
1152 Instruction *setFPAttrs(Instruction *I, MDNode *FPMD,
1153 FastMathFlags FMF) const {
1154 if (!FPMD)
1155 FPMD = DefaultFPMathTag;
1156 if (FPMD)
1157 I->setMetadata(LLVMContext::MD_fpmath, FPMD);
1158 I->setFastMathFlags(FMF);
1159 return I;
1160 }
1161
1162 Value *foldConstant(Instruction::BinaryOps Opc, Value *L,
1163 Value *R, const Twine &Name) const {
1164 auto *LC = dyn_cast<Constant>(L);
1165 auto *RC = dyn_cast<Constant>(R);
1166 return (LC && RC) ? Insert(Folder.CreateBinOp(Opc, LC, RC), Name) : nullptr;
1167 }
1168
1169 Value *getConstrainedFPRounding(Optional<RoundingMode> Rounding) {
1170 RoundingMode UseRounding = DefaultConstrainedRounding;
1171
1172 if (Rounding.hasValue())
1173 UseRounding = Rounding.getValue();
1174
1175 Optional<StringRef> RoundingStr = RoundingModeToStr(UseRounding);
1176 assert(RoundingStr.hasValue() && "Garbage strict rounding mode!")((void)0);
1177 auto *RoundingMDS = MDString::get(Context, RoundingStr.getValue());
1178
1179 return MetadataAsValue::get(Context, RoundingMDS);
1180 }
1181
1182 Value *getConstrainedFPExcept(Optional<fp::ExceptionBehavior> Except) {
1183 fp::ExceptionBehavior UseExcept = DefaultConstrainedExcept;
1184
1185 if (Except.hasValue())
1186 UseExcept = Except.getValue();
1187
1188 Optional<StringRef> ExceptStr = ExceptionBehaviorToStr(UseExcept);
1189 assert(ExceptStr.hasValue() && "Garbage strict exception behavior!")((void)0);
1190 auto *ExceptMDS = MDString::get(Context, ExceptStr.getValue());
1191
1192 return MetadataAsValue::get(Context, ExceptMDS);
1193 }
1194
1195 Value *getConstrainedFPPredicate(CmpInst::Predicate Predicate) {
1196 assert(CmpInst::isFPPredicate(Predicate) &&((void)0)
1197 Predicate != CmpInst::FCMP_FALSE &&((void)0)
1198 Predicate != CmpInst::FCMP_TRUE &&((void)0)
1199 "Invalid constrained FP comparison predicate!")((void)0);
1200
1201 StringRef PredicateStr = CmpInst::getPredicateName(Predicate);
1202 auto *PredicateMDS = MDString::get(Context, PredicateStr);
1203
1204 return MetadataAsValue::get(Context, PredicateMDS);
1205 }
1206
1207public:
1208 Value *CreateAdd(Value *LHS, Value *RHS, const Twine &Name = "",
1209 bool HasNUW = false, bool HasNSW = false) {
1210 if (auto *LC = dyn_cast<Constant>(LHS))
1211 if (auto *RC = dyn_cast<Constant>(RHS))
1212 return Insert(Folder.CreateAdd(LC, RC, HasNUW, HasNSW), Name);
1213 return CreateInsertNUWNSWBinOp(Instruction::Add, LHS, RHS, Name,
1214 HasNUW, HasNSW);
1215 }
1216
1217 Value *CreateNSWAdd(Value *LHS, Value *RHS, const Twine &Name = "") {
1218 return CreateAdd(LHS, RHS, Name, false, true);
1219 }
1220
1221 Value *CreateNUWAdd(Value *LHS, Value *RHS, const Twine &Name = "") {
1222 return CreateAdd(LHS, RHS, Name, true, false);
1223 }
1224
1225 Value *CreateSub(Value *LHS, Value *RHS, const Twine &Name = "",
1226 bool HasNUW = false, bool HasNSW = false) {
1227 if (auto *LC = dyn_cast<Constant>(LHS))
1228 if (auto *RC = dyn_cast<Constant>(RHS))
1229 return Insert(Folder.CreateSub(LC, RC, HasNUW, HasNSW), Name);
1230 return CreateInsertNUWNSWBinOp(Instruction::Sub, LHS, RHS, Name,
1231 HasNUW, HasNSW);
1232 }
1233
1234 Value *CreateNSWSub(Value *LHS, Value *RHS, const Twine &Name = "") {
1235 return CreateSub(LHS, RHS, Name, false, true);
1236 }
1237
1238 Value *CreateNUWSub(Value *LHS, Value *RHS, const Twine &Name = "") {
1239 return CreateSub(LHS, RHS, Name, true, false);
1240 }
1241
1242 Value *CreateMul(Value *LHS, Value *RHS, const Twine &Name = "",
1243 bool HasNUW = false, bool HasNSW = false) {
1244 if (auto *LC = dyn_cast<Constant>(LHS))
1245 if (auto *RC = dyn_cast<Constant>(RHS))
1246 return Insert(Folder.CreateMul(LC, RC, HasNUW, HasNSW), Name);
1247 return CreateInsertNUWNSWBinOp(Instruction::Mul, LHS, RHS, Name,
1248 HasNUW, HasNSW);
1249 }
1250
1251 Value *CreateNSWMul(Value *LHS, Value *RHS, const Twine &Name = "") {
1252 return CreateMul(LHS, RHS, Name, false, true);
1253 }
1254
1255 Value *CreateNUWMul(Value *LHS, Value *RHS, const Twine &Name = "") {
1256 return CreateMul(LHS, RHS, Name, true, false);
1257 }
1258
1259 Value *CreateUDiv(Value *LHS, Value *RHS, const Twine &Name = "",
1260 bool isExact = false) {
1261 if (auto *LC = dyn_cast<Constant>(LHS))
1262 if (auto *RC = dyn_cast<Constant>(RHS))
1263 return Insert(Folder.CreateUDiv(LC, RC, isExact), Name);
1264 if (!isExact)
1265 return Insert(BinaryOperator::CreateUDiv(LHS, RHS), Name);
1266 return Insert(BinaryOperator::CreateExactUDiv(LHS, RHS), Name);
1267 }
1268
1269 Value *CreateExactUDiv(Value *LHS, Value *RHS, const Twine &Name = "") {
1270 return CreateUDiv(LHS, RHS, Name, true);
1271 }
1272
1273 Value *CreateSDiv(Value *LHS, Value *RHS, const Twine &Name = "",
1274 bool isExact = false) {
1275 if (auto *LC = dyn_cast<Constant>(LHS))
1276 if (auto *RC = dyn_cast<Constant>(RHS))
1277 return Insert(Folder.CreateSDiv(LC, RC, isExact), Name);
1278 if (!isExact)
1279 return Insert(BinaryOperator::CreateSDiv(LHS, RHS), Name);
1280 return Insert(BinaryOperator::CreateExactSDiv(LHS, RHS), Name);
1281 }
1282
1283 Value *CreateExactSDiv(Value *LHS, Value *RHS, const Twine &Name = "") {
1284 return CreateSDiv(LHS, RHS, Name, true);
1285 }
1286
1287 Value *CreateURem(Value *LHS, Value *RHS, const Twine &Name = "") {
1288 if (Value *V = foldConstant(Instruction::URem, LHS, RHS, Name)) return V;
1289 return Insert(BinaryOperator::CreateURem(LHS, RHS), Name);
1290 }
1291
1292 Value *CreateSRem(Value *LHS, Value *RHS, const Twine &Name = "") {
1293 if (Value *V = foldConstant(Instruction::SRem, LHS, RHS, Name)) return V;
1294 return Insert(BinaryOperator::CreateSRem(LHS, RHS), Name);
1295 }
1296
1297 Value *CreateShl(Value *LHS, Value *RHS, const Twine &Name = "",
1298 bool HasNUW = false, bool HasNSW = false) {
1299 if (auto *LC = dyn_cast<Constant>(LHS))
1300 if (auto *RC = dyn_cast<Constant>(RHS))
1301 return Insert(Folder.CreateShl(LC, RC, HasNUW, HasNSW), Name);
1302 return CreateInsertNUWNSWBinOp(Instruction::Shl, LHS, RHS, Name,
1303 HasNUW, HasNSW);
1304 }
1305
1306 Value *CreateShl(Value *LHS, const APInt &RHS, const Twine &Name = "",
1307 bool HasNUW = false, bool HasNSW = false) {
1308 return CreateShl(LHS, ConstantInt::get(LHS->getType(), RHS), Name,
1309 HasNUW, HasNSW);
1310 }
1311
1312 Value *CreateShl(Value *LHS, uint64_t RHS, const Twine &Name = "",
1313 bool HasNUW = false, bool HasNSW = false) {
1314 return CreateShl(LHS, ConstantInt::get(LHS->getType(), RHS), Name,
1315 HasNUW, HasNSW);
1316 }
1317
1318 Value *CreateLShr(Value *LHS, Value *RHS, const Twine &Name = "",
1319 bool isExact = false) {
1320 if (auto *LC = dyn_cast<Constant>(LHS))
1321 if (auto *RC = dyn_cast<Constant>(RHS))
1322 return Insert(Folder.CreateLShr(LC, RC, isExact), Name);
1323 if (!isExact)
1324 return Insert(BinaryOperator::CreateLShr(LHS, RHS), Name);
1325 return Insert(BinaryOperator::CreateExactLShr(LHS, RHS), Name);
1326 }
1327
1328 Value *CreateLShr(Value *LHS, const APInt &RHS, const Twine &Name = "",
1329 bool isExact = false) {
1330 return CreateLShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact);
1331 }
1332
1333 Value *CreateLShr(Value *LHS, uint64_t RHS, const Twine &Name = "",
1334 bool isExact = false) {
1335 return CreateLShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact);
1336 }
1337
1338 Value *CreateAShr(Value *LHS, Value *RHS, const Twine &Name = "",
1339 bool isExact = false) {
1340 if (auto *LC = dyn_cast<Constant>(LHS))
1341 if (auto *RC = dyn_cast<Constant>(RHS))
1342 return Insert(Folder.CreateAShr(LC, RC, isExact), Name);
1343 if (!isExact)
1344 return Insert(BinaryOperator::CreateAShr(LHS, RHS), Name);
1345 return Insert(BinaryOperator::CreateExactAShr(LHS, RHS), Name);
1346 }
1347
1348 Value *CreateAShr(Value *LHS, const APInt &RHS, const Twine &Name = "",
1349 bool isExact = false) {
1350 return CreateAShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact);
1351 }
1352
1353 Value *CreateAShr(Value *LHS, uint64_t RHS, const Twine &Name = "",
1354 bool isExact = false) {
1355 return CreateAShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact);
1356 }
1357
1358 Value *CreateAnd(Value *LHS, Value *RHS, const Twine &Name = "") {
1359 if (auto *RC = dyn_cast<Constant>(RHS)) {
1360 if (isa<ConstantInt>(RC) && cast<ConstantInt>(RC)->isMinusOne())
1361 return LHS; // LHS & -1 -> LHS
1362 if (auto *LC = dyn_cast<Constant>(LHS))
1363 return Insert(Folder.CreateAnd(LC, RC), Name);
1364 }
1365 return Insert(BinaryOperator::CreateAnd(LHS, RHS), Name);
1366 }
1367
1368 Value *CreateAnd(Value *LHS, const APInt &RHS, const Twine &Name = "") {
1369 return CreateAnd(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1370 }
1371
1372 Value *CreateAnd(Value *LHS, uint64_t RHS, const Twine &Name = "") {
1373 return CreateAnd(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1374 }
1375
1376 Value *CreateAnd(ArrayRef<Value*> Ops) {
1377 assert(!Ops.empty())((void)0);
1378 Value *Accum = Ops[0];
1379 for (unsigned i = 1; i < Ops.size(); i++)
1380 Accum = CreateAnd(Accum, Ops[i]);
1381 return Accum;
1382 }
1383
1384 Value *CreateOr(Value *LHS, Value *RHS, const Twine &Name = "") {
1385 if (auto *RC = dyn_cast<Constant>(RHS)) {
1386 if (RC->isNullValue())
1387 return LHS; // LHS | 0 -> LHS
1388 if (auto *LC = dyn_cast<Constant>(LHS))
1389 return Insert(Folder.CreateOr(LC, RC), Name);
1390 }
1391 return Insert(BinaryOperator::CreateOr(LHS, RHS), Name);
1392 }
1393
1394 Value *CreateOr(Value *LHS, const APInt &RHS, const Twine &Name = "") {
1395 return CreateOr(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1396 }
1397
1398 Value *CreateOr(Value *LHS, uint64_t RHS, const Twine &Name = "") {
1399 return CreateOr(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1400 }
1401
1402 Value *CreateOr(ArrayRef<Value*> Ops) {
1403 assert(!Ops.empty())((void)0);
1404 Value *Accum = Ops[0];
1405 for (unsigned i = 1; i < Ops.size(); i++)
1406 Accum = CreateOr(Accum, Ops[i]);
1407 return Accum;
1408 }
1409
1410 Value *CreateXor(Value *LHS, Value *RHS, const Twine &Name = "") {
1411 if (Value *V = foldConstant(Instruction::Xor, LHS, RHS, Name)) return V;
1412 return Insert(BinaryOperator::CreateXor(LHS, RHS), Name);
1413 }
1414
1415 Value *CreateXor(Value *LHS, const APInt &RHS, const Twine &Name = "") {
1416 return CreateXor(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1417 }
1418
1419 Value *CreateXor(Value *LHS, uint64_t RHS, const Twine &Name = "") {
1420 return CreateXor(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1421 }
1422
1423 Value *CreateFAdd(Value *L, Value *R, const Twine &Name = "",
1424 MDNode *FPMD = nullptr) {
1425 if (IsFPConstrained)
1426 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fadd,
1427 L, R, nullptr, Name, FPMD);
1428
1429 if (Value *V = foldConstant(Instruction::FAdd, L, R, Name)) return V;
1430 Instruction *I = setFPAttrs(BinaryOperator::CreateFAdd(L, R), FPMD, FMF);
1431 return Insert(I, Name);
1432 }
1433
1434 /// Copy fast-math-flags from an instruction rather than using the builder's
1435 /// default FMF.
1436 Value *CreateFAddFMF(Value *L, Value *R, Instruction *FMFSource,
1437 const Twine &Name = "") {
1438 if (IsFPConstrained)
1439 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fadd,
1440 L, R, FMFSource, Name);
1441
1442 if (Value *V = foldConstant(Instruction::FAdd, L, R, Name)) return V;
1443 Instruction *I = setFPAttrs(BinaryOperator::CreateFAdd(L, R), nullptr,
1444 FMFSource->getFastMathFlags());
1445 return Insert(I, Name);
1446 }
1447
1448 Value *CreateFSub(Value *L, Value *R, const Twine &Name = "",
1449 MDNode *FPMD = nullptr) {
1450 if (IsFPConstrained)
1451 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fsub,
1452 L, R, nullptr, Name, FPMD);
1453
1454 if (Value *V = foldConstant(Instruction::FSub, L, R, Name)) return V;
1455 Instruction *I = setFPAttrs(BinaryOperator::CreateFSub(L, R), FPMD, FMF);
1456 return Insert(I, Name);
1457 }
1458
1459 /// Copy fast-math-flags from an instruction rather than using the builder's
1460 /// default FMF.
1461 Value *CreateFSubFMF(Value *L, Value *R, Instruction *FMFSource,
1462 const Twine &Name = "") {
1463 if (IsFPConstrained)
1464 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fsub,
1465 L, R, FMFSource, Name);
1466
1467 if (Value *V = foldConstant(Instruction::FSub, L, R, Name)) return V;
1468 Instruction *I = setFPAttrs(BinaryOperator::CreateFSub(L, R), nullptr,
1469 FMFSource->getFastMathFlags());
1470 return Insert(I, Name);
1471 }
1472
1473 Value *CreateFMul(Value *L, Value *R, const Twine &Name = "",
1474 MDNode *FPMD = nullptr) {
1475 if (IsFPConstrained)
1476 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fmul,
1477 L, R, nullptr, Name, FPMD);
1478
1479 if (Value *V = foldConstant(Instruction::FMul, L, R, Name)) return V;
1480 Instruction *I = setFPAttrs(BinaryOperator::CreateFMul(L, R), FPMD, FMF);
1481 return Insert(I, Name);
1482 }
1483
1484 /// Copy fast-math-flags from an instruction rather than using the builder's
1485 /// default FMF.
1486 Value *CreateFMulFMF(Value *L, Value *R, Instruction *FMFSource,
1487 const Twine &Name = "") {
1488 if (IsFPConstrained)
1489 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fmul,
1490 L, R, FMFSource, Name);
1491
1492 if (Value *V = foldConstant(Instruction::FMul, L, R, Name)) return V;
1493 Instruction *I = setFPAttrs(BinaryOperator::CreateFMul(L, R), nullptr,
1494 FMFSource->getFastMathFlags());
1495 return Insert(I, Name);
1496 }
1497
1498 Value *CreateFDiv(Value *L, Value *R, const Twine &Name = "",
1499 MDNode *FPMD = nullptr) {
1500 if (IsFPConstrained)
1501 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fdiv,
1502 L, R, nullptr, Name, FPMD);
1503
1504 if (Value *V = foldConstant(Instruction::FDiv, L, R, Name)) return V;
1505 Instruction *I = setFPAttrs(BinaryOperator::CreateFDiv(L, R), FPMD, FMF);
1506 return Insert(I, Name);
1507 }
1508
1509 /// Copy fast-math-flags from an instruction rather than using the builder's
1510 /// default FMF.
1511 Value *CreateFDivFMF(Value *L, Value *R, Instruction *FMFSource,
1512 const Twine &Name = "") {
1513 if (IsFPConstrained)
1514 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fdiv,
1515 L, R, FMFSource, Name);
1516
1517 if (Value *V = foldConstant(Instruction::FDiv, L, R, Name)) return V;
1518 Instruction *I = setFPAttrs(BinaryOperator::CreateFDiv(L, R), nullptr,
1519 FMFSource->getFastMathFlags());
1520 return Insert(I, Name);
1521 }
1522
1523 Value *CreateFRem(Value *L, Value *R, const Twine &Name = "",
1524 MDNode *FPMD = nullptr) {
1525 if (IsFPConstrained)
1526 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_frem,
1527 L, R, nullptr, Name, FPMD);
1528
1529 if (Value *V = foldConstant(Instruction::FRem, L, R, Name)) return V;
1530 Instruction *I = setFPAttrs(BinaryOperator::CreateFRem(L, R), FPMD, FMF);
1531 return Insert(I, Name);
1532 }
1533
1534 /// Copy fast-math-flags from an instruction rather than using the builder's
1535 /// default FMF.
1536 Value *CreateFRemFMF(Value *L, Value *R, Instruction *FMFSource,
1537 const Twine &Name = "") {
1538 if (IsFPConstrained)
1539 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_frem,
1540 L, R, FMFSource, Name);
1541
1542 if (Value *V = foldConstant(Instruction::FRem, L, R, Name)) return V;
1543 Instruction *I = setFPAttrs(BinaryOperator::CreateFRem(L, R), nullptr,
1544 FMFSource->getFastMathFlags());
1545 return Insert(I, Name);
1546 }
1547
1548 Value *CreateBinOp(Instruction::BinaryOps Opc,
1549 Value *LHS, Value *RHS, const Twine &Name = "",
1550 MDNode *FPMathTag = nullptr) {
1551 if (Value *V = foldConstant(Opc, LHS, RHS, Name)) return V;
1552 Instruction *BinOp = BinaryOperator::Create(Opc, LHS, RHS);
1553 if (isa<FPMathOperator>(BinOp))
1554 setFPAttrs(BinOp, FPMathTag, FMF);
1555 return Insert(BinOp, Name);
1556 }
1557
1558 Value *CreateLogicalAnd(Value *Cond1, Value *Cond2, const Twine &Name = "") {
1559 assert(Cond2->getType()->isIntOrIntVectorTy(1))((void)0);
1560 return CreateSelect(Cond1, Cond2,
1561 ConstantInt::getNullValue(Cond2->getType()), Name);
1562 }
1563
1564 Value *CreateLogicalOr(Value *Cond1, Value *Cond2, const Twine &Name = "") {
1565 assert(Cond2->getType()->isIntOrIntVectorTy(1))((void)0);
1566 return CreateSelect(Cond1, ConstantInt::getAllOnesValue(Cond2->getType()),
1567 Cond2, Name);
1568 }
1569
1570 CallInst *CreateConstrainedFPBinOp(
1571 Intrinsic::ID ID, Value *L, Value *R, Instruction *FMFSource = nullptr,
1572 const Twine &Name = "", MDNode *FPMathTag = nullptr,
1573 Optional<RoundingMode> Rounding = None,
1574 Optional<fp::ExceptionBehavior> Except = None);
1575
1576 Value *CreateNeg(Value *V, const Twine &Name = "",
1577 bool HasNUW = false, bool HasNSW = false) {
1578 if (auto *VC = dyn_cast<Constant>(V))
1579 return Insert(Folder.CreateNeg(VC, HasNUW, HasNSW), Name);
1580 BinaryOperator *BO = Insert(BinaryOperator::CreateNeg(V), Name);
1581 if (HasNUW) BO->setHasNoUnsignedWrap();
1582 if (HasNSW) BO->setHasNoSignedWrap();
1583 return BO;
1584 }
1585
1586 Value *CreateNSWNeg(Value *V, const Twine &Name = "") {
1587 return CreateNeg(V, Name, false, true);
1588 }
1589
1590 Value *CreateNUWNeg(Value *V, const Twine &Name = "") {
1591 return CreateNeg(V, Name, true, false);
1592 }
1593
1594 Value *CreateFNeg(Value *V, const Twine &Name = "",
1595 MDNode *FPMathTag = nullptr) {
1596 if (auto *VC = dyn_cast<Constant>(V))
1597 return Insert(Folder.CreateFNeg(VC), Name);
1598 return Insert(setFPAttrs(UnaryOperator::CreateFNeg(V), FPMathTag, FMF),
1599 Name);
1600 }
1601
1602 /// Copy fast-math-flags from an instruction rather than using the builder's
1603 /// default FMF.
1604 Value *CreateFNegFMF(Value *V, Instruction *FMFSource,
1605 const Twine &Name = "") {
1606 if (auto *VC = dyn_cast<Constant>(V))
1607 return Insert(Folder.CreateFNeg(VC), Name);
1608 return Insert(setFPAttrs(UnaryOperator::CreateFNeg(V), nullptr,
1609 FMFSource->getFastMathFlags()),
1610 Name);
1611 }
1612
1613 Value *CreateNot(Value *V, const Twine &Name = "") {
1614 if (auto *VC = dyn_cast<Constant>(V))
1615 return Insert(Folder.CreateNot(VC), Name);
1616 return Insert(BinaryOperator::CreateNot(V), Name);
1617 }
1618
1619 Value *CreateUnOp(Instruction::UnaryOps Opc,
1620 Value *V, const Twine &Name = "",
1621 MDNode *FPMathTag = nullptr) {
1622 if (auto *VC = dyn_cast<Constant>(V))
1623 return Insert(Folder.CreateUnOp(Opc, VC), Name);
1624 Instruction *UnOp = UnaryOperator::Create(Opc, V);
1625 if (isa<FPMathOperator>(UnOp))
1626 setFPAttrs(UnOp, FPMathTag, FMF);
1627 return Insert(UnOp, Name);
1628 }
1629
1630 /// Create either a UnaryOperator or BinaryOperator depending on \p Opc.
1631 /// Correct number of operands must be passed accordingly.
1632 Value *CreateNAryOp(unsigned Opc, ArrayRef<Value *> Ops,
1633 const Twine &Name = "", MDNode *FPMathTag = nullptr);
1634
1635 //===--------------------------------------------------------------------===//
1636 // Instruction creation methods: Memory Instructions
1637 //===--------------------------------------------------------------------===//
1638
1639 AllocaInst *CreateAlloca(Type *Ty, unsigned AddrSpace,
1640 Value *ArraySize = nullptr, const Twine &Name = "") {
1641 const DataLayout &DL = BB->getModule()->getDataLayout();
1642 Align AllocaAlign = DL.getPrefTypeAlign(Ty);
1643 return Insert(new AllocaInst(Ty, AddrSpace, ArraySize, AllocaAlign), Name);
1644 }
1645
1646 AllocaInst *CreateAlloca(Type *Ty, Value *ArraySize = nullptr,
1647 const Twine &Name = "") {
1648 const DataLayout &DL = BB->getModule()->getDataLayout();
1649 Align AllocaAlign = DL.getPrefTypeAlign(Ty);
1650 unsigned AddrSpace = DL.getAllocaAddrSpace();
1651 return Insert(new AllocaInst(Ty, AddrSpace, ArraySize, AllocaAlign), Name);
1652 }
1653
1654 /// Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of
1655 /// converting the string to 'bool' for the isVolatile parameter.
1656 LoadInst *CreateLoad(Type *Ty, Value *Ptr, const char *Name) {
1657 return CreateAlignedLoad(Ty, Ptr, MaybeAlign(), Name);
1658 }
1659
1660 LoadInst *CreateLoad(Type *Ty, Value *Ptr, const Twine &Name = "") {
1661 return CreateAlignedLoad(Ty, Ptr, MaybeAlign(), Name);
1662 }
1663
1664 LoadInst *CreateLoad(Type *Ty, Value *Ptr, bool isVolatile,
1665 const Twine &Name = "") {
1666 return CreateAlignedLoad(Ty, Ptr, MaybeAlign(), isVolatile, Name);
1667 }
1668
1669 // Deprecated [opaque pointer types]
1670 LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateLoad(Value *Ptr,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const
char *Name)
1671 const char *Name),[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const
char *Name)
1672 "Use the version that explicitly specifies the "[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const
char *Name)
1673 "loaded type instead")[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const
char *Name)
{
1674 return CreateLoad(Ptr->getType()->getPointerElementType(), Ptr, Name);
1675 }
1676
1677 // Deprecated [opaque pointer types]
1678 LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateLoad(Value *Ptr,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const
Twine &Name = "")
1679 const Twine &Name = ""),[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const
Twine &Name = "")
1680 "Use the version that explicitly specifies the "[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const
Twine &Name = "")
1681 "loaded type instead")[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const
Twine &Name = "")
{
1682 return CreateLoad(Ptr->getType()->getPointerElementType(), Ptr, Name);
1683 }
1684
1685 // Deprecated [opaque pointer types]
1686 LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateLoad(Value *Ptr,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, bool
isVolatile, const Twine &Name = "")
1687 bool isVolatile,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, bool
isVolatile, const Twine &Name = "")
1688 const Twine &Name = ""),[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, bool
isVolatile, const Twine &Name = "")
1689 "Use the version that explicitly specifies the "[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, bool
isVolatile, const Twine &Name = "")
1690 "loaded type instead")[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, bool
isVolatile, const Twine &Name = "")
{
1691 return CreateLoad(Ptr->getType()->getPointerElementType(), Ptr, isVolatile,
1692 Name);
1693 }
1694
1695 StoreInst *CreateStore(Value *Val, Value *Ptr, bool isVolatile = false) {
1696 return CreateAlignedStore(Val, Ptr, MaybeAlign(), isVolatile);
1697 }
1698
1699 LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align,
1700 const char *Name) {
1701 return CreateAlignedLoad(Ty, Ptr, Align, /*isVolatile*/false, Name);
1702 }
1703
1704 LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align,
1705 const Twine &Name = "") {
1706 return CreateAlignedLoad(Ty, Ptr, Align, /*isVolatile*/false, Name);
1707 }
1708
1709 LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align,
1710 bool isVolatile, const Twine &Name = "") {
1711 if (!Align) {
1712 const DataLayout &DL = BB->getModule()->getDataLayout();
1713 Align = DL.getABITypeAlign(Ty);
1714 }
1715 return Insert(new LoadInst(Ty, Ptr, Twine(), isVolatile, *Align), Name);
1716 }
1717
1718 // Deprecated [opaque pointer types]
1719 LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateAlignedLoad(Value *Ptr,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const char *Name)
1720 MaybeAlign Align,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const char *Name)
1721 const char *Name),[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const char *Name)
1722 "Use the version that explicitly specifies the "[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const char *Name)
1723 "loaded type instead")[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const char *Name)
{
1724 return CreateAlignedLoad(Ptr->getType()->getPointerElementType(), Ptr,
1725 Align, Name);
1726 }
1727 // Deprecated [opaque pointer types]
1728 LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateAlignedLoad(Value *Ptr,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const Twine &Name = "")
1729 MaybeAlign Align,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const Twine &Name = "")
1730 const Twine &Name = ""),[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const Twine &Name = "")
1731 "Use the version that explicitly specifies the "[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const Twine &Name = "")
1732 "loaded type instead")[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const Twine &Name = "")
{
1733 return CreateAlignedLoad(Ptr->getType()->getPointerElementType(), Ptr,
1734 Align, Name);
1735 }
1736 // Deprecated [opaque pointer types]
1737 LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateAlignedLoad(Value *Ptr,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, bool isVolatile, const Twine &Name
= "")
1738 MaybeAlign Align,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, bool isVolatile, const Twine &Name
= "")
1739 bool isVolatile,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, bool isVolatile, const Twine &Name
= "")
1740 const Twine &Name = ""),[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, bool isVolatile, const Twine &Name
= "")
1741 "Use the version that explicitly specifies the "[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, bool isVolatile, const Twine &Name
= "")
1742 "loaded type instead")[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, bool isVolatile, const Twine &Name
= "")
{
1743 return CreateAlignedLoad(Ptr->getType()->getPointerElementType(), Ptr,
1744 Align, isVolatile, Name);
1745 }
1746
1747 StoreInst *CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align,
1748 bool isVolatile = false) {
1749 if (!Align) {
1750 const DataLayout &DL = BB->getModule()->getDataLayout();
1751 Align = DL.getABITypeAlign(Val->getType());
1752 }
1753 return Insert(new StoreInst(Val, Ptr, isVolatile, *Align));
1754 }
1755 FenceInst *CreateFence(AtomicOrdering Ordering,
1756 SyncScope::ID SSID = SyncScope::System,
1757 const Twine &Name = "") {
1758 return Insert(new FenceInst(Context, Ordering, SSID), Name);
1759 }
1760
1761 AtomicCmpXchgInst *
1762 CreateAtomicCmpXchg(Value *Ptr, Value *Cmp, Value *New, MaybeAlign Align,
1763 AtomicOrdering SuccessOrdering,
1764 AtomicOrdering FailureOrdering,
1765 SyncScope::ID SSID = SyncScope::System) {
1766 if (!Align) {
1767 const DataLayout &DL = BB->getModule()->getDataLayout();
1768 Align = llvm::Align(DL.getTypeStoreSize(New->getType()));
1769 }
1770
1771 return Insert(new AtomicCmpXchgInst(Ptr, Cmp, New, *Align, SuccessOrdering,
1772 FailureOrdering, SSID));
1773 }
1774
1775 AtomicRMWInst *CreateAtomicRMW(AtomicRMWInst::BinOp Op, Value *Ptr,
1776 Value *Val, MaybeAlign Align,
1777 AtomicOrdering Ordering,
1778 SyncScope::ID SSID = SyncScope::System) {
1779 if (!Align) {
1780 const DataLayout &DL = BB->getModule()->getDataLayout();
1781 Align = llvm::Align(DL.getTypeStoreSize(Val->getType()));
1782 }
1783
1784 return Insert(new AtomicRMWInst(Op, Ptr, Val, *Align, Ordering, SSID));
1785 }
1786
1787 LLVM_ATTRIBUTE_DEPRECATED([[deprecated("Use the version with explicit element type instead"
)]] Value *CreateGEP(Value *Ptr, ArrayRef<Value *> IdxList
, const Twine &Name = "")
1788 Value *CreateGEP(Value *Ptr, ArrayRef<Value *> IdxList,[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateGEP(Value *Ptr, ArrayRef<Value *> IdxList
, const Twine &Name = "")
1789 const Twine &Name = ""),[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateGEP(Value *Ptr, ArrayRef<Value *> IdxList
, const Twine &Name = "")
1790 "Use the version with explicit element type instead")[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateGEP(Value *Ptr, ArrayRef<Value *> IdxList
, const Twine &Name = "")
{
1791 return CreateGEP(Ptr->getType()->getScalarType()->getPointerElementType(),
1792 Ptr, IdxList, Name);
1793 }
1794
1795 Value *CreateGEP(Type *Ty, Value *Ptr, ArrayRef<Value *> IdxList,
1796 const Twine &Name = "") {
1797 if (auto *PC = dyn_cast<Constant>(Ptr)) {
1798 // Every index must be constant.
1799 size_t i, e;
1800 for (i = 0, e = IdxList.size(); i != e; ++i)
1801 if (!isa<Constant>(IdxList[i]))
1802 break;
1803 if (i == e)
1804 return Insert(Folder.CreateGetElementPtr(Ty, PC, IdxList), Name);
1805 }
1806 return Insert(GetElementPtrInst::Create(Ty, Ptr, IdxList), Name);
1807 }
1808
1809 LLVM_ATTRIBUTE_DEPRECATED([[deprecated("Use the version with explicit element type instead"
)]] Value *CreateInBoundsGEP(Value *Ptr, ArrayRef<Value *>
IdxList, const Twine &Name = "")
1810 Value *CreateInBoundsGEP(Value *Ptr, ArrayRef<Value *> IdxList,[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateInBoundsGEP(Value *Ptr, ArrayRef<Value *>
IdxList, const Twine &Name = "")
1811 const Twine &Name = ""),[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateInBoundsGEP(Value *Ptr, ArrayRef<Value *>
IdxList, const Twine &Name = "")
1812 "Use the version with explicit element type instead")[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateInBoundsGEP(Value *Ptr, ArrayRef<Value *>
IdxList, const Twine &Name = "")
{
1813 return CreateInBoundsGEP(
1814 Ptr->getType()->getScalarType()->getPointerElementType(), Ptr, IdxList,
1815 Name);
1816 }
1817
1818 Value *CreateInBoundsGEP(Type *Ty, Value *Ptr, ArrayRef<Value *> IdxList,
1819 const Twine &Name = "") {
1820 if (auto *PC = dyn_cast<Constant>(Ptr)) {
1821 // Every index must be constant.
1822 size_t i, e;
1823 for (i = 0, e = IdxList.size(); i != e; ++i)
1824 if (!isa<Constant>(IdxList[i]))
1825 break;
1826 if (i == e)
1827 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, IdxList),
1828 Name);
1829 }
1830 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, IdxList), Name);
1831 }
1832
1833 Value *CreateGEP(Type *Ty, Value *Ptr, Value *Idx, const Twine &Name = "") {
1834 if (auto *PC = dyn_cast<Constant>(Ptr))
1835 if (auto *IC = dyn_cast<Constant>(Idx))
1836 return Insert(Folder.CreateGetElementPtr(Ty, PC, IC), Name);
1837 return Insert(GetElementPtrInst::Create(Ty, Ptr, Idx), Name);
1838 }
1839
1840 Value *CreateInBoundsGEP(Type *Ty, Value *Ptr, Value *Idx,
1841 const Twine &Name = "") {
1842 if (auto *PC = dyn_cast<Constant>(Ptr))
1843 if (auto *IC = dyn_cast<Constant>(Idx))
1844 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, IC), Name);
1845 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idx), Name);
1846 }
1847
1848 LLVM_ATTRIBUTE_DEPRECATED([[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstGEP1_32(Value *Ptr, unsigned Idx0, const
Twine &Name = "")
1849 Value *CreateConstGEP1_32(Value *Ptr, unsigned Idx0,[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstGEP1_32(Value *Ptr, unsigned Idx0, const
Twine &Name = "")
1850 const Twine &Name = ""),[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstGEP1_32(Value *Ptr, unsigned Idx0, const
Twine &Name = "")
1851 "Use the version with explicit element type instead")[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstGEP1_32(Value *Ptr, unsigned Idx0, const
Twine &Name = "")
{
1852 return CreateConstGEP1_32(
1853 Ptr->getType()->getScalarType()->getPointerElementType(), Ptr, Idx0,
1854 Name);
1855 }
1856
1857 Value *CreateConstGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0,
1858 const Twine &Name = "") {
1859 Value *Idx = ConstantInt::get(Type::getInt32Ty(Context), Idx0);
1860
1861 if (auto *PC = dyn_cast<Constant>(Ptr))
1862 return Insert(Folder.CreateGetElementPtr(Ty, PC, Idx), Name);
1863
1864 return Insert(GetElementPtrInst::Create(Ty, Ptr, Idx), Name);
1865 }
1866
1867 Value *CreateConstInBoundsGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0,
1868 const Twine &Name = "") {
1869 Value *Idx = ConstantInt::get(Type::getInt32Ty(Context), Idx0);
1870
1871 if (auto *PC = dyn_cast<Constant>(Ptr))
1872 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, Idx), Name);
1873
1874 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idx), Name);
1875 }
1876
1877 Value *CreateConstGEP2_32(Type *Ty, Value *Ptr, unsigned Idx0, unsigned Idx1,
1878 const Twine &Name = "") {
1879 Value *Idxs[] = {
1880 ConstantInt::get(Type::getInt32Ty(Context), Idx0),
1881 ConstantInt::get(Type::getInt32Ty(Context), Idx1)
1882 };
1883
1884 if (auto *PC = dyn_cast<Constant>(Ptr))
1885 return Insert(Folder.CreateGetElementPtr(Ty, PC, Idxs), Name);
1886
1887 return Insert(GetElementPtrInst::Create(Ty, Ptr, Idxs), Name);
1888 }
1889
1890 Value *CreateConstInBoundsGEP2_32(Type *Ty, Value *Ptr, unsigned Idx0,
1891 unsigned Idx1, const Twine &Name = "") {
1892 Value *Idxs[] = {
1893 ConstantInt::get(Type::getInt32Ty(Context), Idx0),
1894 ConstantInt::get(Type::getInt32Ty(Context), Idx1)
1895 };
1896
1897 if (auto *PC = dyn_cast<Constant>(Ptr))
1898 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, Idxs), Name);
1899
1900 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idxs), Name);
1901 }
1902
1903 Value *CreateConstGEP1_64(Type *Ty, Value *Ptr, uint64_t Idx0,
1904 const Twine &Name = "") {
1905 Value *Idx = ConstantInt::get(Type::getInt64Ty(Context), Idx0);
1906
1907 if (auto *PC = dyn_cast<Constant>(Ptr))
1908 return Insert(Folder.CreateGetElementPtr(Ty, PC, Idx), Name);
1909
1910 return Insert(GetElementPtrInst::Create(Ty, Ptr, Idx), Name);
1911 }
1912
1913 LLVM_ATTRIBUTE_DEPRECATED([[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstGEP1_64(Value *Ptr, uint64_t Idx0, const
Twine &Name = "")
1914 Value *CreateConstGEP1_64(Value *Ptr, uint64_t Idx0,[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstGEP1_64(Value *Ptr, uint64_t Idx0, const
Twine &Name = "")
1915 const Twine &Name = ""),[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstGEP1_64(Value *Ptr, uint64_t Idx0, const
Twine &Name = "")
1916 "Use the version with explicit element type instead")[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstGEP1_64(Value *Ptr, uint64_t Idx0, const
Twine &Name = "")
{
1917 return CreateConstGEP1_64(
1918 Ptr->getType()->getScalarType()->getPointerElementType(), Ptr, Idx0,
1919 Name);
1920 }
1921
1922 Value *CreateConstInBoundsGEP1_64(Type *Ty, Value *Ptr, uint64_t Idx0,
1923 const Twine &Name = "") {
1924 Value *Idx = ConstantInt::get(Type::getInt64Ty(Context), Idx0);
1925
1926 if (auto *PC = dyn_cast<Constant>(Ptr))
1927 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, Idx), Name);
1928
1929 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idx), Name);
1930 }
1931
1932 LLVM_ATTRIBUTE_DEPRECATED([[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstInBoundsGEP1_64(Value *Ptr, uint64_t Idx0
, const Twine &Name = "")
1933 Value *CreateConstInBoundsGEP1_64(Value *Ptr, uint64_t Idx0,[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstInBoundsGEP1_64(Value *Ptr, uint64_t Idx0
, const Twine &Name = "")
1934 const Twine &Name = ""),[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstInBoundsGEP1_64(Value *Ptr, uint64_t Idx0
, const Twine &Name = "")
1935 "Use the version with explicit element type instead")[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstInBoundsGEP1_64(Value *Ptr, uint64_t Idx0
, const Twine &Name = "")
{
1936 return CreateConstInBoundsGEP1_64(
1937 Ptr->getType()->getScalarType()->getPointerElementType(), Ptr, Idx0,
1938 Name);
1939 }
1940
1941 Value *CreateConstGEP2_64(Type *Ty, Value *Ptr, uint64_t Idx0, uint64_t Idx1,
1942 const Twine &Name = "") {
1943 Value *Idxs[] = {
1944 ConstantInt::get(Type::getInt64Ty(Context), Idx0),
1945 ConstantInt::get(Type::getInt64Ty(Context), Idx1)
1946 };
1947
1948 if (auto *PC = dyn_cast<Constant>(Ptr))
1949 return Insert(Folder.CreateGetElementPtr(Ty, PC, Idxs), Name);
1950
1951 return Insert(GetElementPtrInst::Create(Ty, Ptr, Idxs), Name);
1952 }
1953
1954 LLVM_ATTRIBUTE_DEPRECATED([[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstGEP2_64(Value *Ptr, uint64_t Idx0, uint64_t
Idx1, const Twine &Name = "")
1955 Value *CreateConstGEP2_64(Value *Ptr, uint64_t Idx0, uint64_t Idx1,[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstGEP2_64(Value *Ptr, uint64_t Idx0, uint64_t
Idx1, const Twine &Name = "")
1956 const Twine &Name = ""),[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstGEP2_64(Value *Ptr, uint64_t Idx0, uint64_t
Idx1, const Twine &Name = "")
1957 "Use the version with explicit element type instead")[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstGEP2_64(Value *Ptr, uint64_t Idx0, uint64_t
Idx1, const Twine &Name = "")
{
1958 return CreateConstGEP2_64(
1959 Ptr->getType()->getScalarType()->getPointerElementType(), Ptr, Idx0,
1960 Idx1, Name);
1961 }
1962
1963 Value *CreateConstInBoundsGEP2_64(Type *Ty, Value *Ptr, uint64_t Idx0,
1964 uint64_t Idx1, const Twine &Name = "") {
1965 Value *Idxs[] = {
1966 ConstantInt::get(Type::getInt64Ty(Context), Idx0),
1967 ConstantInt::get(Type::getInt64Ty(Context), Idx1)
1968 };
1969
1970 if (auto *PC = dyn_cast<Constant>(Ptr))
1971 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, Idxs), Name);
1972
1973 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idxs), Name);
1974 }
1975
1976 LLVM_ATTRIBUTE_DEPRECATED([[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstInBoundsGEP2_64(Value *Ptr, uint64_t Idx0
, uint64_t Idx1, const Twine &Name = "")
1977 Value *CreateConstInBoundsGEP2_64(Value *Ptr, uint64_t Idx0,[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstInBoundsGEP2_64(Value *Ptr, uint64_t Idx0
, uint64_t Idx1, const Twine &Name = "")
1978 uint64_t Idx1, const Twine &Name = ""),[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstInBoundsGEP2_64(Value *Ptr, uint64_t Idx0
, uint64_t Idx1, const Twine &Name = "")
1979 "Use the version with explicit element type instead")[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstInBoundsGEP2_64(Value *Ptr, uint64_t Idx0
, uint64_t Idx1, const Twine &Name = "")
{
1980 return CreateConstInBoundsGEP2_64(
1981 Ptr->getType()->getScalarType()->getPointerElementType(), Ptr, Idx0,
1982 Idx1, Name);
1983 }
1984
1985 Value *CreateStructGEP(Type *Ty, Value *Ptr, unsigned Idx,
1986 const Twine &Name = "") {
1987 return CreateConstInBoundsGEP2_32(Ty, Ptr, 0, Idx, Name);
1988 }
1989
1990 LLVM_ATTRIBUTE_DEPRECATED([[deprecated("Use the version with explicit element type instead"
)]] Value *CreateStructGEP(Value *Ptr, unsigned Idx, const Twine
&Name = "")
1991 Value *CreateStructGEP(Value *Ptr, unsigned Idx, const Twine &Name = ""),[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateStructGEP(Value *Ptr, unsigned Idx, const Twine
&Name = "")
1992 "Use the version with explicit element type instead")[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateStructGEP(Value *Ptr, unsigned Idx, const Twine
&Name = "")
{
1993 return CreateConstInBoundsGEP2_32(
1994 Ptr->getType()->getScalarType()->getPointerElementType(), Ptr, 0, Idx,
1995 Name);
1996 }
1997
1998 /// Same as CreateGlobalString, but return a pointer with "i8*" type
1999 /// instead of a pointer to array of i8.
2000 ///
2001 /// If no module is given via \p M, it is take from the insertion point basic
2002 /// block.
2003 Constant *CreateGlobalStringPtr(StringRef Str, const Twine &Name = "",
2004 unsigned AddressSpace = 0,
2005 Module *M = nullptr) {
2006 GlobalVariable *GV = CreateGlobalString(Str, Name, AddressSpace, M);
2007 Constant *Zero = ConstantInt::get(Type::getInt32Ty(Context), 0);
2008 Constant *Indices[] = {Zero, Zero};
2009 return ConstantExpr::getInBoundsGetElementPtr(GV->getValueType(), GV,
2010 Indices);
2011 }
2012
2013 //===--------------------------------------------------------------------===//
2014 // Instruction creation methods: Cast/Conversion Operators
2015 //===--------------------------------------------------------------------===//
2016
2017 Value *CreateTrunc(Value *V, Type *DestTy, const Twine &Name = "") {
2018 return CreateCast(Instruction::Trunc, V, DestTy, Name);
2019 }
2020
2021 Value *CreateZExt(Value *V, Type *DestTy, const Twine &Name = "") {
2022 return CreateCast(Instruction::ZExt, V, DestTy, Name);
2023 }
2024
2025 Value *CreateSExt(Value *V, Type *DestTy, const Twine &Name = "") {
2026 return CreateCast(Instruction::SExt, V, DestTy, Name);
2027 }
2028
2029 /// Create a ZExt or Trunc from the integer value V to DestTy. Return
2030 /// the value untouched if the type of V is already DestTy.
2031 Value *CreateZExtOrTrunc(Value *V, Type *DestTy,
2032 const Twine &Name = "") {
2033 assert(V->getType()->isIntOrIntVectorTy() &&((void)0)
2034 DestTy->isIntOrIntVectorTy() &&((void)0)
2035 "Can only zero extend/truncate integers!")((void)0);
2036 Type *VTy = V->getType();
2037 if (VTy->getScalarSizeInBits() < DestTy->getScalarSizeInBits())
2038 return CreateZExt(V, DestTy, Name);
2039 if (VTy->getScalarSizeInBits() > DestTy->getScalarSizeInBits())
2040 return CreateTrunc(V, DestTy, Name);
2041 return V;
2042 }
2043
2044 /// Create a SExt or Trunc from the integer value V to DestTy. Return
2045 /// the value untouched if the type of V is already DestTy.
2046 Value *CreateSExtOrTrunc(Value *V, Type *DestTy,
2047 const Twine &Name = "") {
2048 assert(V->getType()->isIntOrIntVectorTy() &&((void)0)
2049 DestTy->isIntOrIntVectorTy() &&((void)0)
2050 "Can only sign extend/truncate integers!")((void)0);
2051 Type *VTy = V->getType();
2052 if (VTy->getScalarSizeInBits() < DestTy->getScalarSizeInBits())
2053 return CreateSExt(V, DestTy, Name);
2054 if (VTy->getScalarSizeInBits() > DestTy->getScalarSizeInBits())
2055 return CreateTrunc(V, DestTy, Name);
2056 return V;
2057 }
2058
2059 Value *CreateFPToUI(Value *V, Type *DestTy, const Twine &Name = "") {
2060 if (IsFPConstrained)
2061 return CreateConstrainedFPCast(Intrinsic::experimental_constrained_fptoui,
2062 V, DestTy, nullptr, Name);
2063 return CreateCast(Instruction::FPToUI, V, DestTy, Name);
2064 }
2065
2066 Value *CreateFPToSI(Value *V, Type *DestTy, const Twine &Name = "") {
2067 if (IsFPConstrained)
2068 return CreateConstrainedFPCast(Intrinsic::experimental_constrained_fptosi,
2069 V, DestTy, nullptr, Name);
2070 return CreateCast(Instruction::FPToSI, V, DestTy, Name);
2071 }
2072
2073 Value *CreateUIToFP(Value *V, Type *DestTy, const Twine &Name = ""){
2074 if (IsFPConstrained)
2075 return CreateConstrainedFPCast(Intrinsic::experimental_constrained_uitofp,
2076 V, DestTy, nullptr, Name);
2077 return CreateCast(Instruction::UIToFP, V, DestTy, Name);
2078 }
2079
2080 Value *CreateSIToFP(Value *V, Type *DestTy, const Twine &Name = ""){
2081 if (IsFPConstrained)
2082 return CreateConstrainedFPCast(Intrinsic::experimental_constrained_sitofp,
2083 V, DestTy, nullptr, Name);
2084 return CreateCast(Instruction::SIToFP, V, DestTy, Name);
2085 }
2086
2087 Value *CreateFPTrunc(Value *V, Type *DestTy,
2088 const Twine &Name = "") {
2089 if (IsFPConstrained)
2090 return CreateConstrainedFPCast(
2091 Intrinsic::experimental_constrained_fptrunc, V, DestTy, nullptr,
2092 Name);
2093 return CreateCast(Instruction::FPTrunc, V, DestTy, Name);
2094 }
2095
2096 Value *CreateFPExt(Value *V, Type *DestTy, const Twine &Name = "") {
2097 if (IsFPConstrained)
2098 return CreateConstrainedFPCast(Intrinsic::experimental_constrained_fpext,
2099 V, DestTy, nullptr, Name);
2100 return CreateCast(Instruction::FPExt, V, DestTy, Name);
2101 }
2102
2103 Value *CreatePtrToInt(Value *V, Type *DestTy,
2104 const Twine &Name = "") {
2105 return CreateCast(Instruction::PtrToInt, V, DestTy, Name);
2106 }
2107
2108 Value *CreateIntToPtr(Value *V, Type *DestTy,
2109 const Twine &Name = "") {
2110 return CreateCast(Instruction::IntToPtr, V, DestTy, Name);
2111 }
2112
2113 Value *CreateBitCast(Value *V, Type *DestTy,
2114 const Twine &Name = "") {
2115 return CreateCast(Instruction::BitCast, V, DestTy, Name);
2116 }
2117
2118 Value *CreateAddrSpaceCast(Value *V, Type *DestTy,
2119 const Twine &Name = "") {
2120 return CreateCast(Instruction::AddrSpaceCast, V, DestTy, Name);
2121 }
2122
2123 Value *CreateZExtOrBitCast(Value *V, Type *DestTy,
2124 const Twine &Name = "") {
2125 if (V->getType() == DestTy)
2126 return V;
2127 if (auto *VC = dyn_cast<Constant>(V))
2128 return Insert(Folder.CreateZExtOrBitCast(VC, DestTy), Name);
2129 return Insert(CastInst::CreateZExtOrBitCast(V, DestTy), Name);
2130 }
2131
2132 Value *CreateSExtOrBitCast(Value *V, Type *DestTy,
2133 const Twine &Name = "") {
2134 if (V->getType() == DestTy)
2135 return V;
2136 if (auto *VC = dyn_cast<Constant>(V))
2137 return Insert(Folder.CreateSExtOrBitCast(VC, DestTy), Name);
2138 return Insert(CastInst::CreateSExtOrBitCast(V, DestTy), Name);
2139 }
2140
2141 Value *CreateTruncOrBitCast(Value *V, Type *DestTy,
2142 const Twine &Name = "") {
2143 if (V->getType() == DestTy)
2144 return V;
2145 if (auto *VC = dyn_cast<Constant>(V))
2146 return Insert(Folder.CreateTruncOrBitCast(VC, DestTy), Name);
2147 return Insert(CastInst::CreateTruncOrBitCast(V, DestTy), Name);
2148 }
2149
2150 Value *CreateCast(Instruction::CastOps Op, Value *V, Type *DestTy,
2151 const Twine &Name = "") {
2152 if (V->getType() == DestTy)
2153 return V;
2154 if (auto *VC = dyn_cast<Constant>(V))
2155 return Insert(Folder.CreateCast(Op, VC, DestTy), Name);
2156 return Insert(CastInst::Create(Op, V, DestTy), Name);
2157 }
2158
2159 Value *CreatePointerCast(Value *V, Type *DestTy,
2160 const Twine &Name = "") {
2161 if (V->getType() == DestTy)
2162 return V;
2163 if (auto *VC = dyn_cast<Constant>(V))
2164 return Insert(Folder.CreatePointerCast(VC, DestTy), Name);
2165 return Insert(CastInst::CreatePointerCast(V, DestTy), Name);
2166 }
2167
2168 Value *CreatePointerBitCastOrAddrSpaceCast(Value *V, Type *DestTy,
2169 const Twine &Name = "") {
2170 if (V->getType() == DestTy)
2171 return V;
2172
2173 if (auto *VC = dyn_cast<Constant>(V)) {
2174 return Insert(Folder.CreatePointerBitCastOrAddrSpaceCast(VC, DestTy),
2175 Name);
2176 }
2177
2178 return Insert(CastInst::CreatePointerBitCastOrAddrSpaceCast(V, DestTy),
2179 Name);
2180 }
2181
2182 Value *CreateIntCast(Value *V, Type *DestTy, bool isSigned,
2183 const Twine &Name = "") {
2184 if (V->getType() == DestTy)
2185 return V;
2186 if (auto *VC = dyn_cast<Constant>(V))
2187 return Insert(Folder.CreateIntCast(VC, DestTy, isSigned), Name);
2188 return Insert(CastInst::CreateIntegerCast(V, DestTy, isSigned), Name);
2189 }
2190
2191 Value *CreateBitOrPointerCast(Value *V, Type *DestTy,
2192 const Twine &Name = "") {
2193 if (V->getType() == DestTy)
2194 return V;
2195 if (V->getType()->isPtrOrPtrVectorTy() && DestTy->isIntOrIntVectorTy())
2196 return CreatePtrToInt(V, DestTy, Name);
2197 if (V->getType()->isIntOrIntVectorTy() && DestTy->isPtrOrPtrVectorTy())
2198 return CreateIntToPtr(V, DestTy, Name);
2199
2200 return CreateBitCast(V, DestTy, Name);
2201 }
2202
2203 Value *CreateFPCast(Value *V, Type *DestTy, const Twine &Name = "") {
2204 if (V->getType() == DestTy)
2205 return V;
2206 if (auto *VC = dyn_cast<Constant>(V))
2207 return Insert(Folder.CreateFPCast(VC, DestTy), Name);
2208 return Insert(CastInst::CreateFPCast(V, DestTy), Name);
2209 }
2210
2211 CallInst *CreateConstrainedFPCast(
2212 Intrinsic::ID ID, Value *V, Type *DestTy,
2213 Instruction *FMFSource = nullptr, const Twine &Name = "",
2214 MDNode *FPMathTag = nullptr,
2215 Optional<RoundingMode> Rounding = None,
2216 Optional<fp::ExceptionBehavior> Except = None);
2217
2218 // Provided to resolve 'CreateIntCast(Ptr, Ptr, "...")', giving a
2219 // compile time error, instead of converting the string to bool for the
2220 // isSigned parameter.
2221 Value *CreateIntCast(Value *, Type *, const char *) = delete;
2222
2223 //===--------------------------------------------------------------------===//
2224 // Instruction creation methods: Compare Instructions
2225 //===--------------------------------------------------------------------===//
2226
2227 Value *CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name = "") {
2228 return CreateICmp(ICmpInst::ICMP_EQ, LHS, RHS, Name);
2229 }
2230
2231 Value *CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name = "") {
2232 return CreateICmp(ICmpInst::ICMP_NE, LHS, RHS, Name);
2233 }
2234
2235 Value *CreateICmpUGT(Value *LHS, Value *RHS, const Twine &Name = "") {
2236 return CreateICmp(ICmpInst::ICMP_UGT, LHS, RHS, Name);
2237 }
2238
2239 Value *CreateICmpUGE(Value *LHS, Value *RHS, const Twine &Name = "") {
2240 return CreateICmp(ICmpInst::ICMP_UGE, LHS, RHS, Name);
2241 }
2242
2243 Value *CreateICmpULT(Value *LHS, Value *RHS, const Twine &Name = "") {
2244 return CreateICmp(ICmpInst::ICMP_ULT, LHS, RHS, Name);
2245 }
2246
2247 Value *CreateICmpULE(Value *LHS, Value *RHS, const Twine &Name = "") {
2248 return CreateICmp(ICmpInst::ICMP_ULE, LHS, RHS, Name);
2249 }
2250
2251 Value *CreateICmpSGT(Value *LHS, Value *RHS, const Twine &Name = "") {
2252 return CreateICmp(ICmpInst::ICMP_SGT, LHS, RHS, Name);
2253 }
2254
2255 Value *CreateICmpSGE(Value *LHS, Value *RHS, const Twine &Name = "") {
2256 return CreateICmp(ICmpInst::ICMP_SGE, LHS, RHS, Name);
57
Passing null pointer value via 2nd parameter 'LHS'
58
Calling 'IRBuilderBase::CreateICmp'
2257 }
2258
2259 Value *CreateICmpSLT(Value *LHS, Value *RHS, const Twine &Name = "") {
2260 return CreateICmp(ICmpInst::ICMP_SLT, LHS, RHS, Name);
2261 }
2262
2263 Value *CreateICmpSLE(Value *LHS, Value *RHS, const Twine &Name = "") {
2264 return CreateICmp(ICmpInst::ICMP_SLE, LHS, RHS, Name);
2265 }
2266
2267 Value *CreateFCmpOEQ(Value *LHS, Value *RHS, const Twine &Name = "",
2268 MDNode *FPMathTag = nullptr) {
2269 return CreateFCmp(FCmpInst::FCMP_OEQ, LHS, RHS, Name, FPMathTag);
2270 }
2271
2272 Value *CreateFCmpOGT(Value *LHS, Value *RHS, const Twine &Name = "",
2273 MDNode *FPMathTag = nullptr) {
2274 return CreateFCmp(FCmpInst::FCMP_OGT, LHS, RHS, Name, FPMathTag);
2275 }
2276
2277 Value *CreateFCmpOGE(Value *LHS, Value *RHS, const Twine &Name = "",
2278 MDNode *FPMathTag = nullptr) {
2279 return CreateFCmp(FCmpInst::FCMP_OGE, LHS, RHS, Name, FPMathTag);
2280 }
2281
2282 Value *CreateFCmpOLT(Value *LHS, Value *RHS, const Twine &Name = "",
2283 MDNode *FPMathTag = nullptr) {
2284 return CreateFCmp(FCmpInst::FCMP_OLT, LHS, RHS, Name, FPMathTag);
2285 }
2286
2287 Value *CreateFCmpOLE(Value *LHS, Value *RHS, const Twine &Name = "",
2288 MDNode *FPMathTag = nullptr) {
2289 return CreateFCmp(FCmpInst::FCMP_OLE, LHS, RHS, Name, FPMathTag);
2290 }
2291
2292 Value *CreateFCmpONE(Value *LHS, Value *RHS, const Twine &Name = "",
2293 MDNode *FPMathTag = nullptr) {
2294 return CreateFCmp(FCmpInst::FCMP_ONE, LHS, RHS, Name, FPMathTag);
2295 }
2296
2297 Value *CreateFCmpORD(Value *LHS, Value *RHS, const Twine &Name = "",
2298 MDNode *FPMathTag = nullptr) {
2299 return CreateFCmp(FCmpInst::FCMP_ORD, LHS, RHS, Name, FPMathTag);
2300 }
2301
2302 Value *CreateFCmpUNO(Value *LHS, Value *RHS, const Twine &Name = "",
2303 MDNode *FPMathTag = nullptr) {
2304 return CreateFCmp(FCmpInst::FCMP_UNO, LHS, RHS, Name, FPMathTag);
2305 }
2306
2307 Value *CreateFCmpUEQ(Value *LHS, Value *RHS, const Twine &Name = "",
2308 MDNode *FPMathTag = nullptr) {
2309 return CreateFCmp(FCmpInst::FCMP_UEQ, LHS, RHS, Name, FPMathTag);
2310 }
2311
2312 Value *CreateFCmpUGT(Value *LHS, Value *RHS, const Twine &Name = "",
2313 MDNode *FPMathTag = nullptr) {
2314 return CreateFCmp(FCmpInst::FCMP_UGT, LHS, RHS, Name, FPMathTag);
2315 }
2316
2317 Value *CreateFCmpUGE(Value *LHS, Value *RHS, const Twine &Name = "",
2318 MDNode *FPMathTag = nullptr) {
2319 return CreateFCmp(FCmpInst::FCMP_UGE, LHS, RHS, Name, FPMathTag);
2320 }
2321
2322 Value *CreateFCmpULT(Value *LHS, Value *RHS, const Twine &Name = "",
2323 MDNode *FPMathTag = nullptr) {
2324 return CreateFCmp(FCmpInst::FCMP_ULT, LHS, RHS, Name, FPMathTag);
2325 }
2326
2327 Value *CreateFCmpULE(Value *LHS, Value *RHS, const Twine &Name = "",
2328 MDNode *FPMathTag = nullptr) {
2329 return CreateFCmp(FCmpInst::FCMP_ULE, LHS, RHS, Name, FPMathTag);
2330 }
2331
2332 Value *CreateFCmpUNE(Value *LHS, Value *RHS, const Twine &Name = "",
2333 MDNode *FPMathTag = nullptr) {
2334 return CreateFCmp(FCmpInst::FCMP_UNE, LHS, RHS, Name, FPMathTag);
2335 }
2336
2337 Value *CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS,
2338 const Twine &Name = "") {
2339 if (auto *LC = dyn_cast<Constant>(LHS))
59
Assuming 'LC' is null
60
Taking false branch
2340 if (auto *RC = dyn_cast<Constant>(RHS))
2341 return Insert(Folder.CreateICmp(P, LC, RC), Name);
2342 return Insert(new ICmpInst(P, LHS, RHS), Name);
61
Passing null pointer value via 2nd parameter 'LHS'
62
Calling constructor for 'ICmpInst'
2343 }
2344
2345 // Create a quiet floating-point comparison (i.e. one that raises an FP
2346 // exception only in the case where an input is a signaling NaN).
2347 // Note that this differs from CreateFCmpS only if IsFPConstrained is true.
2348 Value *CreateFCmp(CmpInst::Predicate P, Value *LHS, Value *RHS,
2349 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2350 return CreateFCmpHelper(P, LHS, RHS, Name, FPMathTag, false);
2351 }
2352
2353 Value *CreateCmp(CmpInst::Predicate Pred, Value *LHS, Value *RHS,
2354 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2355 return CmpInst::isFPPredicate(Pred)
2356 ? CreateFCmp(Pred, LHS, RHS, Name, FPMathTag)
2357 : CreateICmp(Pred, LHS, RHS, Name);
2358 }
2359
2360 // Create a signaling floating-point comparison (i.e. one that raises an FP
2361 // exception whenever an input is any NaN, signaling or quiet).
2362 // Note that this differs from CreateFCmp only if IsFPConstrained is true.
2363 Value *CreateFCmpS(CmpInst::Predicate P, Value *LHS, Value *RHS,
2364 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2365 return CreateFCmpHelper(P, LHS, RHS, Name, FPMathTag, true);
2366 }
2367
2368private:
2369 // Helper routine to create either a signaling or a quiet FP comparison.
2370 Value *CreateFCmpHelper(CmpInst::Predicate P, Value *LHS, Value *RHS,
2371 const Twine &Name, MDNode *FPMathTag,
2372 bool IsSignaling);
2373
2374public:
2375 CallInst *CreateConstrainedFPCmp(
2376 Intrinsic::ID ID, CmpInst::Predicate P, Value *L, Value *R,
2377 const Twine &Name = "", Optional<fp::ExceptionBehavior> Except = None);
2378
2379 //===--------------------------------------------------------------------===//
2380 // Instruction creation methods: Other Instructions
2381 //===--------------------------------------------------------------------===//
2382
2383 PHINode *CreatePHI(Type *Ty, unsigned NumReservedValues,
2384 const Twine &Name = "") {
2385 PHINode *Phi = PHINode::Create(Ty, NumReservedValues);
2386 if (isa<FPMathOperator>(Phi))
2387 setFPAttrs(Phi, nullptr /* MDNode* */, FMF);
2388 return Insert(Phi, Name);
2389 }
2390
2391 CallInst *CreateCall(FunctionType *FTy, Value *Callee,
2392 ArrayRef<Value *> Args = None, const Twine &Name = "",
2393 MDNode *FPMathTag = nullptr) {
2394 CallInst *CI = CallInst::Create(FTy, Callee, Args, DefaultOperandBundles);
2395 if (IsFPConstrained)
2396 setConstrainedFPCallAttr(CI);
2397 if (isa<FPMathOperator>(CI))
2398 setFPAttrs(CI, FPMathTag, FMF);
2399 return Insert(CI, Name);
2400 }
2401
2402 CallInst *CreateCall(FunctionType *FTy, Value *Callee, ArrayRef<Value *> Args,
2403 ArrayRef<OperandBundleDef> OpBundles,
2404 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2405 CallInst *CI = CallInst::Create(FTy, Callee, Args, OpBundles);
2406 if (IsFPConstrained)
2407 setConstrainedFPCallAttr(CI);
2408 if (isa<FPMathOperator>(CI))
2409 setFPAttrs(CI, FPMathTag, FMF);
2410 return Insert(CI, Name);
2411 }
2412
2413 CallInst *CreateCall(FunctionCallee Callee, ArrayRef<Value *> Args = None,
2414 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2415 return CreateCall(Callee.getFunctionType(), Callee.getCallee(), Args, Name,
2416 FPMathTag);
2417 }
2418
2419 CallInst *CreateCall(FunctionCallee Callee, ArrayRef<Value *> Args,
2420 ArrayRef<OperandBundleDef> OpBundles,
2421 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2422 return CreateCall(Callee.getFunctionType(), Callee.getCallee(), Args,
2423 OpBundles, Name, FPMathTag);
2424 }
2425
2426 CallInst *CreateConstrainedFPCall(
2427 Function *Callee, ArrayRef<Value *> Args, const Twine &Name = "",
2428 Optional<RoundingMode> Rounding = None,
2429 Optional<fp::ExceptionBehavior> Except = None);
2430
2431 Value *CreateSelect(Value *C, Value *True, Value *False,
2432 const Twine &Name = "", Instruction *MDFrom = nullptr);
2433
2434 VAArgInst *CreateVAArg(Value *List, Type *Ty, const Twine &Name = "") {
2435 return Insert(new VAArgInst(List, Ty), Name);
2436 }
2437
2438 Value *CreateExtractElement(Value *Vec, Value *Idx,
2439 const Twine &Name = "") {
2440 if (auto *VC = dyn_cast<Constant>(Vec))
2441 if (auto *IC = dyn_cast<Constant>(Idx))
2442 return Insert(Folder.CreateExtractElement(VC, IC), Name);
2443 return Insert(ExtractElementInst::Create(Vec, Idx), Name);
2444 }
2445
2446 Value *CreateExtractElement(Value *Vec, uint64_t Idx,
2447 const Twine &Name = "") {
2448 return CreateExtractElement(Vec, getInt64(Idx), Name);
2449 }
2450
2451 Value *CreateInsertElement(Value *Vec, Value *NewElt, Value *Idx,
2452 const Twine &Name = "") {
2453 if (auto *VC = dyn_cast<Constant>(Vec))
2454 if (auto *NC = dyn_cast<Constant>(NewElt))
2455 if (auto *IC = dyn_cast<Constant>(Idx))
2456 return Insert(Folder.CreateInsertElement(VC, NC, IC), Name);
2457 return Insert(InsertElementInst::Create(Vec, NewElt, Idx), Name);
2458 }
2459
2460 Value *CreateInsertElement(Value *Vec, Value *NewElt, uint64_t Idx,
2461 const Twine &Name = "") {
2462 return CreateInsertElement(Vec, NewElt, getInt64(Idx), Name);
2463 }
2464
2465 Value *CreateShuffleVector(Value *V1, Value *V2, Value *Mask,
2466 const Twine &Name = "") {
2467 SmallVector<int, 16> IntMask;
2468 ShuffleVectorInst::getShuffleMask(cast<Constant>(Mask), IntMask);
2469 return CreateShuffleVector(V1, V2, IntMask, Name);
2470 }
2471
2472 LLVM_ATTRIBUTE_DEPRECATED(Value *CreateShuffleVector(Value *V1, Value *V2,[[deprecated("Pass indices as 'int' instead")]] Value *CreateShuffleVector
(Value *V1, Value *V2, ArrayRef<uint32_t> Mask, const Twine
&Name = "")
2473 ArrayRef<uint32_t> Mask,[[deprecated("Pass indices as 'int' instead")]] Value *CreateShuffleVector
(Value *V1, Value *V2, ArrayRef<uint32_t> Mask, const Twine
&Name = "")
2474 const Twine &Name = ""),[[deprecated("Pass indices as 'int' instead")]] Value *CreateShuffleVector
(Value *V1, Value *V2, ArrayRef<uint32_t> Mask, const Twine
&Name = "")
2475 "Pass indices as 'int' instead")[[deprecated("Pass indices as 'int' instead")]] Value *CreateShuffleVector
(Value *V1, Value *V2, ArrayRef<uint32_t> Mask, const Twine
&Name = "")
{
2476 SmallVector<int, 16> IntMask;
2477 IntMask.assign(Mask.begin(), Mask.end());
2478 return CreateShuffleVector(V1, V2, IntMask, Name);
2479 }
2480
2481 /// See class ShuffleVectorInst for a description of the mask representation.
2482 Value *CreateShuffleVector(Value *V1, Value *V2, ArrayRef<int> Mask,
2483 const Twine &Name = "") {
2484 if (auto *V1C = dyn_cast<Constant>(V1))
2485 if (auto *V2C = dyn_cast<Constant>(V2))
2486 return Insert(Folder.CreateShuffleVector(V1C, V2C, Mask), Name);
2487 return Insert(new ShuffleVectorInst(V1, V2, Mask), Name);
2488 }
2489
2490 /// Create a unary shuffle. The second vector operand of the IR instruction
2491 /// is poison.
2492 Value *CreateShuffleVector(Value *V, ArrayRef<int> Mask,
2493 const Twine &Name = "") {
2494 return CreateShuffleVector(V, PoisonValue::get(V->getType()), Mask, Name);
2495 }
2496
2497 Value *CreateExtractValue(Value *Agg,
2498 ArrayRef<unsigned> Idxs,
2499 const Twine &Name = "") {
2500 if (auto *AggC = dyn_cast<Constant>(Agg))
2501 return Insert(Folder.CreateExtractValue(AggC, Idxs), Name);
2502 return Insert(ExtractValueInst::Create(Agg, Idxs), Name);
2503 }
2504
2505 Value *CreateInsertValue(Value *Agg, Value *Val,
2506 ArrayRef<unsigned> Idxs,
2507 const Twine &Name = "") {
2508 if (auto *AggC = dyn_cast<Constant>(Agg))
2509 if (auto *ValC = dyn_cast<Constant>(Val))
2510 return Insert(Folder.CreateInsertValue(AggC, ValC, Idxs), Name);
2511 return Insert(InsertValueInst::Create(Agg, Val, Idxs), Name);
2512 }
2513
2514 LandingPadInst *CreateLandingPad(Type *Ty, unsigned NumClauses,
2515 const Twine &Name = "") {
2516 return Insert(LandingPadInst::Create(Ty, NumClauses), Name);
2517 }
2518
2519 Value *CreateFreeze(Value *V, const Twine &Name = "") {
2520 return Insert(new FreezeInst(V), Name);
2521 }
2522
2523 //===--------------------------------------------------------------------===//
2524 // Utility creation methods
2525 //===--------------------------------------------------------------------===//
2526
2527 /// Return an i1 value testing if \p Arg is null.
2528 Value *CreateIsNull(Value *Arg, const Twine &Name = "") {
2529 return CreateICmpEQ(Arg, Constant::getNullValue(Arg->getType()),
2530 Name);
2531 }
2532
2533 /// Return an i1 value testing if \p Arg is not null.
2534 Value *CreateIsNotNull(Value *Arg, const Twine &Name = "") {
2535 return CreateICmpNE(Arg, Constant::getNullValue(Arg->getType()),
2536 Name);
2537 }
2538
2539 /// Return the i64 difference between two pointer values, dividing out
2540 /// the size of the pointed-to objects.
2541 ///
2542 /// This is intended to implement C-style pointer subtraction. As such, the
2543 /// pointers must be appropriately aligned for their element types and
2544 /// pointing into the same object.
2545 Value *CreatePtrDiff(Value *LHS, Value *RHS, const Twine &Name = "");
2546
2547 /// Create a launder.invariant.group intrinsic call. If Ptr type is
2548 /// different from pointer to i8, it's casted to pointer to i8 in the same
2549 /// address space before call and casted back to Ptr type after call.
2550 Value *CreateLaunderInvariantGroup(Value *Ptr);
2551
2552 /// \brief Create a strip.invariant.group intrinsic call. If Ptr type is
2553 /// different from pointer to i8, it's casted to pointer to i8 in the same
2554 /// address space before call and casted back to Ptr type after call.
2555 Value *CreateStripInvariantGroup(Value *Ptr);
2556
2557 /// Return a vector value that contains the vector V reversed
2558 Value *CreateVectorReverse(Value *V, const Twine &Name = "");
2559
2560 /// Return a vector splice intrinsic if using scalable vectors, otherwise
2561 /// return a shufflevector. If the immediate is positive, a vector is
2562 /// extracted from concat(V1, V2), starting at Imm. If the immediate
2563 /// is negative, we extract -Imm elements from V1 and the remaining
2564 /// elements from V2. Imm is a signed integer in the range
2565 /// -VL <= Imm < VL (where VL is the runtime vector length of the
2566 /// source/result vector)
2567 Value *CreateVectorSplice(Value *V1, Value *V2, int64_t Imm,
2568 const Twine &Name = "");
2569
2570 /// Return a vector value that contains \arg V broadcasted to \p
2571 /// NumElts elements.
2572 Value *CreateVectorSplat(unsigned NumElts, Value *V, const Twine &Name = "");
2573
2574 /// Return a vector value that contains \arg V broadcasted to \p
2575 /// EC elements.
2576 Value *CreateVectorSplat(ElementCount EC, Value *V, const Twine &Name = "");
2577
2578 /// Return a value that has been extracted from a larger integer type.
2579 Value *CreateExtractInteger(const DataLayout &DL, Value *From,
2580 IntegerType *ExtractedTy, uint64_t Offset,
2581 const Twine &Name);
2582
2583 Value *CreatePreserveArrayAccessIndex(Type *ElTy, Value *Base,
2584 unsigned Dimension, unsigned LastIndex,
2585 MDNode *DbgInfo);
2586
2587 Value *CreatePreserveUnionAccessIndex(Value *Base, unsigned FieldIndex,
2588 MDNode *DbgInfo);
2589
2590 Value *CreatePreserveStructAccessIndex(Type *ElTy, Value *Base,
2591 unsigned Index, unsigned FieldIndex,
2592 MDNode *DbgInfo);
2593
2594private:
2595 /// Helper function that creates an assume intrinsic call that
2596 /// represents an alignment assumption on the provided pointer \p PtrValue
2597 /// with offset \p OffsetValue and alignment value \p AlignValue.
2598 CallInst *CreateAlignmentAssumptionHelper(const DataLayout &DL,
2599 Value *PtrValue, Value *AlignValue,
2600 Value *OffsetValue);
2601
2602public:
2603 /// Create an assume intrinsic call that represents an alignment
2604 /// assumption on the provided pointer.
2605 ///
2606 /// An optional offset can be provided, and if it is provided, the offset
2607 /// must be subtracted from the provided pointer to get the pointer with the
2608 /// specified alignment.
2609 CallInst *CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue,
2610 unsigned Alignment,
2611 Value *OffsetValue = nullptr);
2612
2613 /// Create an assume intrinsic call that represents an alignment
2614 /// assumption on the provided pointer.
2615 ///
2616 /// An optional offset can be provided, and if it is provided, the offset
2617 /// must be subtracted from the provided pointer to get the pointer with the
2618 /// specified alignment.
2619 ///
2620 /// This overload handles the condition where the Alignment is dependent
2621 /// on an existing value rather than a static value.
2622 CallInst *CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue,
2623 Value *Alignment,
2624 Value *OffsetValue = nullptr);
2625};
2626
2627/// This provides a uniform API for creating instructions and inserting
2628/// them into a basic block: either at the end of a BasicBlock, or at a specific
2629/// iterator location in a block.
2630///
2631/// Note that the builder does not expose the full generality of LLVM
2632/// instructions. For access to extra instruction properties, use the mutators
2633/// (e.g. setVolatile) on the instructions after they have been
2634/// created. Convenience state exists to specify fast-math flags and fp-math
2635/// tags.
2636///
2637/// The first template argument specifies a class to use for creating constants.
2638/// This defaults to creating minimally folded constants. The second template
2639/// argument allows clients to specify custom insertion hooks that are called on
2640/// every newly created insertion.
2641template <typename FolderTy = ConstantFolder,
2642 typename InserterTy = IRBuilderDefaultInserter>
2643class IRBuilder : public IRBuilderBase {
2644private:
2645 FolderTy Folder;
2646 InserterTy Inserter;
2647
2648public:
2649 IRBuilder(LLVMContext &C, FolderTy Folder, InserterTy Inserter = InserterTy(),
2650 MDNode *FPMathTag = nullptr,
2651 ArrayRef<OperandBundleDef> OpBundles = None)
2652 : IRBuilderBase(C, this->Folder, this->Inserter, FPMathTag, OpBundles),
2653 Folder(Folder), Inserter(Inserter) {}
2654
2655 explicit IRBuilder(LLVMContext &C, MDNode *FPMathTag = nullptr,
2656 ArrayRef<OperandBundleDef> OpBundles = None)
2657 : IRBuilderBase(C, this->Folder, this->Inserter, FPMathTag, OpBundles) {}
2658
2659 explicit IRBuilder(BasicBlock *TheBB, FolderTy Folder,
2660 MDNode *FPMathTag = nullptr,
2661 ArrayRef<OperandBundleDef> OpBundles = None)
2662 : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter,
2663 FPMathTag, OpBundles), Folder(Folder) {
2664 SetInsertPoint(TheBB);
2665 }
2666
2667 explicit IRBuilder(BasicBlock *TheBB, MDNode *FPMathTag = nullptr,
2668 ArrayRef<OperandBundleDef> OpBundles = None)
2669 : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter,
2670 FPMathTag, OpBundles) {
2671 SetInsertPoint(TheBB);
2672 }
2673
2674 explicit IRBuilder(Instruction *IP, MDNode *FPMathTag = nullptr,
2675 ArrayRef<OperandBundleDef> OpBundles = None)
2676 : IRBuilderBase(IP->getContext(), this->Folder, this->Inserter,
2677 FPMathTag, OpBundles) {
2678 SetInsertPoint(IP);
2679 }
2680
2681 IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP, FolderTy Folder,
2682 MDNode *FPMathTag = nullptr,
2683 ArrayRef<OperandBundleDef> OpBundles = None)
2684 : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter,
2685 FPMathTag, OpBundles), Folder(Folder) {
2686 SetInsertPoint(TheBB, IP);
2687 }
2688
2689 IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP,
2690 MDNode *FPMathTag = nullptr,
2691 ArrayRef<OperandBundleDef> OpBundles = None)
2692 : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter,
2693 FPMathTag, OpBundles) {
2694 SetInsertPoint(TheBB, IP);
2695 }
2696
2697 /// Avoid copying the full IRBuilder. Prefer using InsertPointGuard
2698 /// or FastMathFlagGuard instead.
2699 IRBuilder(const IRBuilder &) = delete;
2700
2701 InserterTy &getInserter() { return Inserter; }
2702};
2703
2704// Create wrappers for C Binding types (see CBindingWrapping.h).
2705DEFINE_SIMPLE_CONVERSION_FUNCTIONS(IRBuilder<>, LLVMBuilderRef)inline IRBuilder<> *unwrap(LLVMBuilderRef P) { return reinterpret_cast
<IRBuilder<>*>(P); } inline LLVMBuilderRef wrap(const
IRBuilder<> *P) { return reinterpret_cast<LLVMBuilderRef
>(const_cast<IRBuilder<>*>(P)); }
2706
2707} // end namespace llvm
2708
2709#endif // LLVM_IR_IRBUILDER_H

/usr/src/gnu/usr.bin/clang/libclangCodeGen/../../../llvm/llvm/include/llvm/IR/Instructions.h

1//===- llvm/Instructions.h - Instruction subclass definitions ---*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file exposes the class definitions of all of the subclasses of the
10// Instruction class. This is meant to be an easy way to get access to all
11// instruction subclasses.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_IR_INSTRUCTIONS_H
16#define LLVM_IR_INSTRUCTIONS_H
17
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/Bitfields.h"
20#include "llvm/ADT/MapVector.h"
21#include "llvm/ADT/None.h"
22#include "llvm/ADT/STLExtras.h"
23#include "llvm/ADT/SmallVector.h"
24#include "llvm/ADT/StringRef.h"
25#include "llvm/ADT/Twine.h"
26#include "llvm/ADT/iterator.h"
27#include "llvm/ADT/iterator_range.h"
28#include "llvm/IR/Attributes.h"
29#include "llvm/IR/BasicBlock.h"
30#include "llvm/IR/CallingConv.h"
31#include "llvm/IR/CFG.h"
32#include "llvm/IR/Constant.h"
33#include "llvm/IR/DerivedTypes.h"
34#include "llvm/IR/Function.h"
35#include "llvm/IR/InstrTypes.h"
36#include "llvm/IR/Instruction.h"
37#include "llvm/IR/OperandTraits.h"
38#include "llvm/IR/Type.h"
39#include "llvm/IR/Use.h"
40#include "llvm/IR/User.h"
41#include "llvm/IR/Value.h"
42#include "llvm/Support/AtomicOrdering.h"
43#include "llvm/Support/Casting.h"
44#include "llvm/Support/ErrorHandling.h"
45#include <cassert>
46#include <cstddef>
47#include <cstdint>
48#include <iterator>
49
50namespace llvm {
51
52class APInt;
53class ConstantInt;
54class DataLayout;
55class LLVMContext;
56
57//===----------------------------------------------------------------------===//
58// AllocaInst Class
59//===----------------------------------------------------------------------===//
60
61/// an instruction to allocate memory on the stack
62class AllocaInst : public UnaryInstruction {
63 Type *AllocatedType;
64
65 using AlignmentField = AlignmentBitfieldElementT<0>;
66 using UsedWithInAllocaField = BoolBitfieldElementT<AlignmentField::NextBit>;
67 using SwiftErrorField = BoolBitfieldElementT<UsedWithInAllocaField::NextBit>;
68 static_assert(Bitfield::areContiguous<AlignmentField, UsedWithInAllocaField,
69 SwiftErrorField>(),
70 "Bitfields must be contiguous");
71
72protected:
73 // Note: Instruction needs to be a friend here to call cloneImpl.
74 friend class Instruction;
75
76 AllocaInst *cloneImpl() const;
77
78public:
79 explicit AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
80 const Twine &Name, Instruction *InsertBefore);
81 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
82 const Twine &Name, BasicBlock *InsertAtEnd);
83
84 AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
85 Instruction *InsertBefore);
86 AllocaInst(Type *Ty, unsigned AddrSpace,
87 const Twine &Name, BasicBlock *InsertAtEnd);
88
89 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
90 const Twine &Name = "", Instruction *InsertBefore = nullptr);
91 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
92 const Twine &Name, BasicBlock *InsertAtEnd);
93
94 /// Return true if there is an allocation size parameter to the allocation
95 /// instruction that is not 1.
96 bool isArrayAllocation() const;
97
98 /// Get the number of elements allocated. For a simple allocation of a single
99 /// element, this will return a constant 1 value.
100 const Value *getArraySize() const { return getOperand(0); }
101 Value *getArraySize() { return getOperand(0); }
102
103 /// Overload to return most specific pointer type.
104 PointerType *getType() const {
105 return cast<PointerType>(Instruction::getType());
106 }
107
108 /// Get allocation size in bits. Returns None if size can't be determined,
109 /// e.g. in case of a VLA.
110 Optional<TypeSize> getAllocationSizeInBits(const DataLayout &DL) const;
111
112 /// Return the type that is being allocated by the instruction.
113 Type *getAllocatedType() const { return AllocatedType; }
114 /// for use only in special circumstances that need to generically
115 /// transform a whole instruction (eg: IR linking and vectorization).
116 void setAllocatedType(Type *Ty) { AllocatedType = Ty; }
117
118 /// Return the alignment of the memory that is being allocated by the
119 /// instruction.
120 Align getAlign() const {
121 return Align(1ULL << getSubclassData<AlignmentField>());
122 }
123
124 void setAlignment(Align Align) {
125 setSubclassData<AlignmentField>(Log2(Align));
126 }
127
128 // FIXME: Remove this one transition to Align is over.
129 unsigned getAlignment() const { return getAlign().value(); }
130
131 /// Return true if this alloca is in the entry block of the function and is a
132 /// constant size. If so, the code generator will fold it into the
133 /// prolog/epilog code, so it is basically free.
134 bool isStaticAlloca() const;
135
136 /// Return true if this alloca is used as an inalloca argument to a call. Such
137 /// allocas are never considered static even if they are in the entry block.
138 bool isUsedWithInAlloca() const {
139 return getSubclassData<UsedWithInAllocaField>();
140 }
141
142 /// Specify whether this alloca is used to represent the arguments to a call.
143 void setUsedWithInAlloca(bool V) {
144 setSubclassData<UsedWithInAllocaField>(V);
145 }
146
147 /// Return true if this alloca is used as a swifterror argument to a call.
148 bool isSwiftError() const { return getSubclassData<SwiftErrorField>(); }
149 /// Specify whether this alloca is used to represent a swifterror.
150 void setSwiftError(bool V) { setSubclassData<SwiftErrorField>(V); }
151
152 // Methods for support type inquiry through isa, cast, and dyn_cast:
153 static bool classof(const Instruction *I) {
154 return (I->getOpcode() == Instruction::Alloca);
155 }
156 static bool classof(const Value *V) {
157 return isa<Instruction>(V) && classof(cast<Instruction>(V));
158 }
159
160private:
161 // Shadow Instruction::setInstructionSubclassData with a private forwarding
162 // method so that subclasses cannot accidentally use it.
163 template <typename Bitfield>
164 void setSubclassData(typename Bitfield::Type Value) {
165 Instruction::setSubclassData<Bitfield>(Value);
166 }
167};
168
169//===----------------------------------------------------------------------===//
170// LoadInst Class
171//===----------------------------------------------------------------------===//
172
173/// An instruction for reading from memory. This uses the SubclassData field in
174/// Value to store whether or not the load is volatile.
175class LoadInst : public UnaryInstruction {
176 using VolatileField = BoolBitfieldElementT<0>;
177 using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>;
178 using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>;
179 static_assert(
180 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
181 "Bitfields must be contiguous");
182
183 void AssertOK();
184
185protected:
186 // Note: Instruction needs to be a friend here to call cloneImpl.
187 friend class Instruction;
188
189 LoadInst *cloneImpl() const;
190
191public:
192 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr,
193 Instruction *InsertBefore);
194 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd);
195 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
196 Instruction *InsertBefore);
197 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
198 BasicBlock *InsertAtEnd);
199 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
200 Align Align, Instruction *InsertBefore = nullptr);
201 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
202 Align Align, BasicBlock *InsertAtEnd);
203 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
204 Align Align, AtomicOrdering Order,
205 SyncScope::ID SSID = SyncScope::System,
206 Instruction *InsertBefore = nullptr);
207 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
208 Align Align, AtomicOrdering Order, SyncScope::ID SSID,
209 BasicBlock *InsertAtEnd);
210
211 /// Return true if this is a load from a volatile memory location.
212 bool isVolatile() const { return getSubclassData<VolatileField>(); }
213
214 /// Specify whether this is a volatile load or not.
215 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
216
217 /// Return the alignment of the access that is being performed.
218 /// FIXME: Remove this function once transition to Align is over.
219 /// Use getAlign() instead.
220 unsigned getAlignment() const { return getAlign().value(); }
221
222 /// Return the alignment of the access that is being performed.
223 Align getAlign() const {
224 return Align(1ULL << (getSubclassData<AlignmentField>()));
225 }
226
227 void setAlignment(Align Align) {
228 setSubclassData<AlignmentField>(Log2(Align));
229 }
230
231 /// Returns the ordering constraint of this load instruction.
232 AtomicOrdering getOrdering() const {
233 return getSubclassData<OrderingField>();
234 }
235 /// Sets the ordering constraint of this load instruction. May not be Release
236 /// or AcquireRelease.
237 void setOrdering(AtomicOrdering Ordering) {
238 setSubclassData<OrderingField>(Ordering);
239 }
240
241 /// Returns the synchronization scope ID of this load instruction.
242 SyncScope::ID getSyncScopeID() const {
243 return SSID;
244 }
245
246 /// Sets the synchronization scope ID of this load instruction.
247 void setSyncScopeID(SyncScope::ID SSID) {
248 this->SSID = SSID;
249 }
250
251 /// Sets the ordering constraint and the synchronization scope ID of this load
252 /// instruction.
253 void setAtomic(AtomicOrdering Ordering,
254 SyncScope::ID SSID = SyncScope::System) {
255 setOrdering(Ordering);
256 setSyncScopeID(SSID);
257 }
258
259 bool isSimple() const { return !isAtomic() && !isVolatile(); }
260
261 bool isUnordered() const {
262 return (getOrdering() == AtomicOrdering::NotAtomic ||
263 getOrdering() == AtomicOrdering::Unordered) &&
264 !isVolatile();
265 }
266
267 Value *getPointerOperand() { return getOperand(0); }
268 const Value *getPointerOperand() const { return getOperand(0); }
269 static unsigned getPointerOperandIndex() { return 0U; }
270 Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
271
272 /// Returns the address space of the pointer operand.
273 unsigned getPointerAddressSpace() const {
274 return getPointerOperandType()->getPointerAddressSpace();
275 }
276
277 // Methods for support type inquiry through isa, cast, and dyn_cast:
278 static bool classof(const Instruction *I) {
279 return I->getOpcode() == Instruction::Load;
280 }
281 static bool classof(const Value *V) {
282 return isa<Instruction>(V) && classof(cast<Instruction>(V));
283 }
284
285private:
286 // Shadow Instruction::setInstructionSubclassData with a private forwarding
287 // method so that subclasses cannot accidentally use it.
288 template <typename Bitfield>
289 void setSubclassData(typename Bitfield::Type Value) {
290 Instruction::setSubclassData<Bitfield>(Value);
291 }
292
293 /// The synchronization scope ID of this load instruction. Not quite enough
294 /// room in SubClassData for everything, so synchronization scope ID gets its
295 /// own field.
296 SyncScope::ID SSID;
297};
298
299//===----------------------------------------------------------------------===//
300// StoreInst Class
301//===----------------------------------------------------------------------===//
302
303/// An instruction for storing to memory.
304class StoreInst : public Instruction {
305 using VolatileField = BoolBitfieldElementT<0>;
306 using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>;
307 using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>;
308 static_assert(
309 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
310 "Bitfields must be contiguous");
311
312 void AssertOK();
313
314protected:
315 // Note: Instruction needs to be a friend here to call cloneImpl.
316 friend class Instruction;
317
318 StoreInst *cloneImpl() const;
319
320public:
321 StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore);
322 StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd);
323 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Instruction *InsertBefore);
324 StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd);
325 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
326 Instruction *InsertBefore = nullptr);
327 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
328 BasicBlock *InsertAtEnd);
329 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
330 AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System,
331 Instruction *InsertBefore = nullptr);
332 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
333 AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAtEnd);
334
335 // allocate space for exactly two operands
336 void *operator new(size_t S) { return User::operator new(S, 2); }
337 void operator delete(void *Ptr) { User::operator delete(Ptr); }
338
339 /// Return true if this is a store to a volatile memory location.
340 bool isVolatile() const { return getSubclassData<VolatileField>(); }
341
342 /// Specify whether this is a volatile store or not.
343 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
344
345 /// Transparently provide more efficient getOperand methods.
346 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
347
348 /// Return the alignment of the access that is being performed
349 /// FIXME: Remove this function once transition to Align is over.
350 /// Use getAlign() instead.
351 unsigned getAlignment() const { return getAlign().value(); }
352
353 Align getAlign() const {
354 return Align(1ULL << (getSubclassData<AlignmentField>()));
355 }
356
357 void setAlignment(Align Align) {
358 setSubclassData<AlignmentField>(Log2(Align));
359 }
360
361 /// Returns the ordering constraint of this store instruction.
362 AtomicOrdering getOrdering() const {
363 return getSubclassData<OrderingField>();
364 }
365
366 /// Sets the ordering constraint of this store instruction. May not be
367 /// Acquire or AcquireRelease.
368 void setOrdering(AtomicOrdering Ordering) {
369 setSubclassData<OrderingField>(Ordering);
370 }
371
372 /// Returns the synchronization scope ID of this store instruction.
373 SyncScope::ID getSyncScopeID() const {
374 return SSID;
375 }
376
377 /// Sets the synchronization scope ID of this store instruction.
378 void setSyncScopeID(SyncScope::ID SSID) {
379 this->SSID = SSID;
380 }
381
382 /// Sets the ordering constraint and the synchronization scope ID of this
383 /// store instruction.
384 void setAtomic(AtomicOrdering Ordering,
385 SyncScope::ID SSID = SyncScope::System) {
386 setOrdering(Ordering);
387 setSyncScopeID(SSID);
388 }
389
390 bool isSimple() const { return !isAtomic() && !isVolatile(); }
391
392 bool isUnordered() const {
393 return (getOrdering() == AtomicOrdering::NotAtomic ||
394 getOrdering() == AtomicOrdering::Unordered) &&
395 !isVolatile();
396 }
397
398 Value *getValueOperand() { return getOperand(0); }
399 const Value *getValueOperand() const { return getOperand(0); }
400
401 Value *getPointerOperand() { return getOperand(1); }
402 const Value *getPointerOperand() const { return getOperand(1); }
403 static unsigned getPointerOperandIndex() { return 1U; }
404 Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
405
406 /// Returns the address space of the pointer operand.
407 unsigned getPointerAddressSpace() const {
408 return getPointerOperandType()->getPointerAddressSpace();
409 }
410
411 // Methods for support type inquiry through isa, cast, and dyn_cast:
412 static bool classof(const Instruction *I) {
413 return I->getOpcode() == Instruction::Store;
414 }
415 static bool classof(const Value *V) {
416 return isa<Instruction>(V) && classof(cast<Instruction>(V));
417 }
418
419private:
420 // Shadow Instruction::setInstructionSubclassData with a private forwarding
421 // method so that subclasses cannot accidentally use it.
422 template <typename Bitfield>
423 void setSubclassData(typename Bitfield::Type Value) {
424 Instruction::setSubclassData<Bitfield>(Value);
425 }
426
427 /// The synchronization scope ID of this store instruction. Not quite enough
428 /// room in SubClassData for everything, so synchronization scope ID gets its
429 /// own field.
430 SyncScope::ID SSID;
431};
432
433template <>
434struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> {
435};
436
437DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value)StoreInst::op_iterator StoreInst::op_begin() { return OperandTraits
<StoreInst>::op_begin(this); } StoreInst::const_op_iterator
StoreInst::op_begin() const { return OperandTraits<StoreInst
>::op_begin(const_cast<StoreInst*>(this)); } StoreInst
::op_iterator StoreInst::op_end() { return OperandTraits<StoreInst
>::op_end(this); } StoreInst::const_op_iterator StoreInst::
op_end() const { return OperandTraits<StoreInst>::op_end
(const_cast<StoreInst*>(this)); } Value *StoreInst::getOperand
(unsigned i_nocapture) const { ((void)0); return cast_or_null
<Value>( OperandTraits<StoreInst>::op_begin(const_cast
<StoreInst*>(this))[i_nocapture].get()); } void StoreInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { ((
void)0); OperandTraits<StoreInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned StoreInst::getNumOperands() const
{ return OperandTraits<StoreInst>::operands(this); } template
<int Idx_nocapture> Use &StoreInst::Op() { return this
->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture
> const Use &StoreInst::Op() const { return this->OpFrom
<Idx_nocapture>(this); }
438
439//===----------------------------------------------------------------------===//
440// FenceInst Class
441//===----------------------------------------------------------------------===//
442
443/// An instruction for ordering other memory operations.
444class FenceInst : public Instruction {
445 using OrderingField = AtomicOrderingBitfieldElementT<0>;
446
447 void Init(AtomicOrdering Ordering, SyncScope::ID SSID);
448
449protected:
450 // Note: Instruction needs to be a friend here to call cloneImpl.
451 friend class Instruction;
452
453 FenceInst *cloneImpl() const;
454
455public:
456 // Ordering may only be Acquire, Release, AcquireRelease, or
457 // SequentiallyConsistent.
458 FenceInst(LLVMContext &C, AtomicOrdering Ordering,
459 SyncScope::ID SSID = SyncScope::System,
460 Instruction *InsertBefore = nullptr);
461 FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID,
462 BasicBlock *InsertAtEnd);
463
464 // allocate space for exactly zero operands
465 void *operator new(size_t S) { return User::operator new(S, 0); }
466 void operator delete(void *Ptr) { User::operator delete(Ptr); }
467
468 /// Returns the ordering constraint of this fence instruction.
469 AtomicOrdering getOrdering() const {
470 return getSubclassData<OrderingField>();
471 }
472
473 /// Sets the ordering constraint of this fence instruction. May only be
474 /// Acquire, Release, AcquireRelease, or SequentiallyConsistent.
475 void setOrdering(AtomicOrdering Ordering) {
476 setSubclassData<OrderingField>(Ordering);
477 }
478
479 /// Returns the synchronization scope ID of this fence instruction.
480 SyncScope::ID getSyncScopeID() const {
481 return SSID;
482 }
483
484 /// Sets the synchronization scope ID of this fence instruction.
485 void setSyncScopeID(SyncScope::ID SSID) {
486 this->SSID = SSID;
487 }
488
489 // Methods for support type inquiry through isa, cast, and dyn_cast:
490 static bool classof(const Instruction *I) {
491 return I->getOpcode() == Instruction::Fence;
492 }
493 static bool classof(const Value *V) {
494 return isa<Instruction>(V) && classof(cast<Instruction>(V));
495 }
496
497private:
498 // Shadow Instruction::setInstructionSubclassData with a private forwarding
499 // method so that subclasses cannot accidentally use it.
500 template <typename Bitfield>
501 void setSubclassData(typename Bitfield::Type Value) {
502 Instruction::setSubclassData<Bitfield>(Value);
503 }
504
505 /// The synchronization scope ID of this fence instruction. Not quite enough
506 /// room in SubClassData for everything, so synchronization scope ID gets its
507 /// own field.
508 SyncScope::ID SSID;
509};
510
511//===----------------------------------------------------------------------===//
512// AtomicCmpXchgInst Class
513//===----------------------------------------------------------------------===//
514
515/// An instruction that atomically checks whether a
516/// specified value is in a memory location, and, if it is, stores a new value
517/// there. The value returned by this instruction is a pair containing the
518/// original value as first element, and an i1 indicating success (true) or
519/// failure (false) as second element.
520///
521class AtomicCmpXchgInst : public Instruction {
522 void Init(Value *Ptr, Value *Cmp, Value *NewVal, Align Align,
523 AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering,
524 SyncScope::ID SSID);
525
526 template <unsigned Offset>
527 using AtomicOrderingBitfieldElement =
528 typename Bitfield::Element<AtomicOrdering, Offset, 3,
529 AtomicOrdering::LAST>;
530
531protected:
532 // Note: Instruction needs to be a friend here to call cloneImpl.
533 friend class Instruction;
534
535 AtomicCmpXchgInst *cloneImpl() const;
536
537public:
538 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
539 AtomicOrdering SuccessOrdering,
540 AtomicOrdering FailureOrdering, SyncScope::ID SSID,
541 Instruction *InsertBefore = nullptr);
542 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
543 AtomicOrdering SuccessOrdering,
544 AtomicOrdering FailureOrdering, SyncScope::ID SSID,
545 BasicBlock *InsertAtEnd);
546
547 // allocate space for exactly three operands
548 void *operator new(size_t S) { return User::operator new(S, 3); }
549 void operator delete(void *Ptr) { User::operator delete(Ptr); }
550
551 using VolatileField = BoolBitfieldElementT<0>;
552 using WeakField = BoolBitfieldElementT<VolatileField::NextBit>;
553 using SuccessOrderingField =
554 AtomicOrderingBitfieldElementT<WeakField::NextBit>;
555 using FailureOrderingField =
556 AtomicOrderingBitfieldElementT<SuccessOrderingField::NextBit>;
557 using AlignmentField =
558 AlignmentBitfieldElementT<FailureOrderingField::NextBit>;
559 static_assert(
560 Bitfield::areContiguous<VolatileField, WeakField, SuccessOrderingField,
561 FailureOrderingField, AlignmentField>(),
562 "Bitfields must be contiguous");
563
564 /// Return the alignment of the memory that is being allocated by the
565 /// instruction.
566 Align getAlign() const {
567 return Align(1ULL << getSubclassData<AlignmentField>());
568 }
569
570 void setAlignment(Align Align) {
571 setSubclassData<AlignmentField>(Log2(Align));
572 }
573
574 /// Return true if this is a cmpxchg from a volatile memory
575 /// location.
576 ///
577 bool isVolatile() const { return getSubclassData<VolatileField>(); }
578
579 /// Specify whether this is a volatile cmpxchg.
580 ///
581 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
582
583 /// Return true if this cmpxchg may spuriously fail.
584 bool isWeak() const { return getSubclassData<WeakField>(); }
585
586 void setWeak(bool IsWeak) { setSubclassData<WeakField>(IsWeak); }
587
588 /// Transparently provide more efficient getOperand methods.
589 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
590
591 static bool isValidSuccessOrdering(AtomicOrdering Ordering) {
592 return Ordering != AtomicOrdering::NotAtomic &&
593 Ordering != AtomicOrdering::Unordered;
594 }
595
596 static bool isValidFailureOrdering(AtomicOrdering Ordering) {
597 return Ordering != AtomicOrdering::NotAtomic &&
598 Ordering != AtomicOrdering::Unordered &&
599 Ordering != AtomicOrdering::AcquireRelease &&
600 Ordering != AtomicOrdering::Release;
601 }
602
603 /// Returns the success ordering constraint of this cmpxchg instruction.
604 AtomicOrdering getSuccessOrdering() const {
605 return getSubclassData<SuccessOrderingField>();
606 }
607
608 /// Sets the success ordering constraint of this cmpxchg instruction.
609 void setSuccessOrdering(AtomicOrdering Ordering) {
610 assert(isValidSuccessOrdering(Ordering) &&((void)0)
611 "invalid CmpXchg success ordering")((void)0);
612 setSubclassData<SuccessOrderingField>(Ordering);
613 }
614
615 /// Returns the failure ordering constraint of this cmpxchg instruction.
616 AtomicOrdering getFailureOrdering() const {
617 return getSubclassData<FailureOrderingField>();
618 }
619
620 /// Sets the failure ordering constraint of this cmpxchg instruction.
621 void setFailureOrdering(AtomicOrdering Ordering) {
622 assert(isValidFailureOrdering(Ordering) &&((void)0)
623 "invalid CmpXchg failure ordering")((void)0);
624 setSubclassData<FailureOrderingField>(Ordering);
625 }
626
627 /// Returns a single ordering which is at least as strong as both the
628 /// success and failure orderings for this cmpxchg.
629 AtomicOrdering getMergedOrdering() const {
630 if (getFailureOrdering() == AtomicOrdering::SequentiallyConsistent)
631 return AtomicOrdering::SequentiallyConsistent;
632 if (getFailureOrdering() == AtomicOrdering::Acquire) {
633 if (getSuccessOrdering() == AtomicOrdering::Monotonic)
634 return AtomicOrdering::Acquire;
635 if (getSuccessOrdering() == AtomicOrdering::Release)
636 return AtomicOrdering::AcquireRelease;
637 }
638 return getSuccessOrdering();
639 }
640
641 /// Returns the synchronization scope ID of this cmpxchg instruction.
642 SyncScope::ID getSyncScopeID() const {
643 return SSID;
644 }
645
646 /// Sets the synchronization scope ID of this cmpxchg instruction.
647 void setSyncScopeID(SyncScope::ID SSID) {
648 this->SSID = SSID;
649 }
650
651 Value *getPointerOperand() { return getOperand(0); }
652 const Value *getPointerOperand() const { return getOperand(0); }
653 static unsigned getPointerOperandIndex() { return 0U; }
654
655 Value *getCompareOperand() { return getOperand(1); }
656 const Value *getCompareOperand() const { return getOperand(1); }
657
658 Value *getNewValOperand() { return getOperand(2); }
659 const Value *getNewValOperand() const { return getOperand(2); }
660
661 /// Returns the address space of the pointer operand.
662 unsigned getPointerAddressSpace() const {
663 return getPointerOperand()->getType()->getPointerAddressSpace();
664 }
665
666 /// Returns the strongest permitted ordering on failure, given the
667 /// desired ordering on success.
668 ///
669 /// If the comparison in a cmpxchg operation fails, there is no atomic store
670 /// so release semantics cannot be provided. So this function drops explicit
671 /// Release requests from the AtomicOrdering. A SequentiallyConsistent
672 /// operation would remain SequentiallyConsistent.
673 static AtomicOrdering
674 getStrongestFailureOrdering(AtomicOrdering SuccessOrdering) {
675 switch (SuccessOrdering) {
676 default:
677 llvm_unreachable("invalid cmpxchg success ordering")__builtin_unreachable();
678 case AtomicOrdering::Release:
679 case AtomicOrdering::Monotonic:
680 return AtomicOrdering::Monotonic;
681 case AtomicOrdering::AcquireRelease:
682 case AtomicOrdering::Acquire:
683 return AtomicOrdering::Acquire;
684 case AtomicOrdering::SequentiallyConsistent:
685 return AtomicOrdering::SequentiallyConsistent;
686 }
687 }
688
689 // Methods for support type inquiry through isa, cast, and dyn_cast:
690 static bool classof(const Instruction *I) {
691 return I->getOpcode() == Instruction::AtomicCmpXchg;
692 }
693 static bool classof(const Value *V) {
694 return isa<Instruction>(V) && classof(cast<Instruction>(V));
695 }
696
697private:
698 // Shadow Instruction::setInstructionSubclassData with a private forwarding
699 // method so that subclasses cannot accidentally use it.
700 template <typename Bitfield>
701 void setSubclassData(typename Bitfield::Type Value) {
702 Instruction::setSubclassData<Bitfield>(Value);
703 }
704
705 /// The synchronization scope ID of this cmpxchg instruction. Not quite
706 /// enough room in SubClassData for everything, so synchronization scope ID
707 /// gets its own field.
708 SyncScope::ID SSID;
709};
710
711template <>
712struct OperandTraits<AtomicCmpXchgInst> :
713 public FixedNumOperandTraits<AtomicCmpXchgInst, 3> {
714};
715
716DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicCmpXchgInst, Value)AtomicCmpXchgInst::op_iterator AtomicCmpXchgInst::op_begin() {
return OperandTraits<AtomicCmpXchgInst>::op_begin(this
); } AtomicCmpXchgInst::const_op_iterator AtomicCmpXchgInst::
op_begin() const { return OperandTraits<AtomicCmpXchgInst>
::op_begin(const_cast<AtomicCmpXchgInst*>(this)); } AtomicCmpXchgInst
::op_iterator AtomicCmpXchgInst::op_end() { return OperandTraits
<AtomicCmpXchgInst>::op_end(this); } AtomicCmpXchgInst::
const_op_iterator AtomicCmpXchgInst::op_end() const { return OperandTraits
<AtomicCmpXchgInst>::op_end(const_cast<AtomicCmpXchgInst
*>(this)); } Value *AtomicCmpXchgInst::getOperand(unsigned
i_nocapture) const { ((void)0); return cast_or_null<Value
>( OperandTraits<AtomicCmpXchgInst>::op_begin(const_cast
<AtomicCmpXchgInst*>(this))[i_nocapture].get()); } void
AtomicCmpXchgInst::setOperand(unsigned i_nocapture, Value *Val_nocapture
) { ((void)0); OperandTraits<AtomicCmpXchgInst>::op_begin
(this)[i_nocapture] = Val_nocapture; } unsigned AtomicCmpXchgInst
::getNumOperands() const { return OperandTraits<AtomicCmpXchgInst
>::operands(this); } template <int Idx_nocapture> Use
&AtomicCmpXchgInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
AtomicCmpXchgInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
717
718//===----------------------------------------------------------------------===//
719// AtomicRMWInst Class
720//===----------------------------------------------------------------------===//
721
722/// an instruction that atomically reads a memory location,
723/// combines it with another value, and then stores the result back. Returns
724/// the old value.
725///
726class AtomicRMWInst : public Instruction {
727protected:
728 // Note: Instruction needs to be a friend here to call cloneImpl.
729 friend class Instruction;
730
731 AtomicRMWInst *cloneImpl() const;
732
733public:
734 /// This enumeration lists the possible modifications atomicrmw can make. In
735 /// the descriptions, 'p' is the pointer to the instruction's memory location,
736 /// 'old' is the initial value of *p, and 'v' is the other value passed to the
737 /// instruction. These instructions always return 'old'.
738 enum BinOp : unsigned {
739 /// *p = v
740 Xchg,
741 /// *p = old + v
742 Add,
743 /// *p = old - v
744 Sub,
745 /// *p = old & v
746 And,
747 /// *p = ~(old & v)
748 Nand,
749 /// *p = old | v
750 Or,
751 /// *p = old ^ v
752 Xor,
753 /// *p = old >signed v ? old : v
754 Max,
755 /// *p = old <signed v ? old : v
756 Min,
757 /// *p = old >unsigned v ? old : v
758 UMax,
759 /// *p = old <unsigned v ? old : v
760 UMin,
761
762 /// *p = old + v
763 FAdd,
764
765 /// *p = old - v
766 FSub,
767
768 FIRST_BINOP = Xchg,
769 LAST_BINOP = FSub,
770 BAD_BINOP
771 };
772
773private:
774 template <unsigned Offset>
775 using AtomicOrderingBitfieldElement =
776 typename Bitfield::Element<AtomicOrdering, Offset, 3,
777 AtomicOrdering::LAST>;
778
779 template <unsigned Offset>
780 using BinOpBitfieldElement =
781 typename Bitfield::Element<BinOp, Offset, 4, BinOp::LAST_BINOP>;
782
783public:
784 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
785 AtomicOrdering Ordering, SyncScope::ID SSID,
786 Instruction *InsertBefore = nullptr);
787 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
788 AtomicOrdering Ordering, SyncScope::ID SSID,
789 BasicBlock *InsertAtEnd);
790
791 // allocate space for exactly two operands
792 void *operator new(size_t S) { return User::operator new(S, 2); }
793 void operator delete(void *Ptr) { User::operator delete(Ptr); }
794
795 using VolatileField = BoolBitfieldElementT<0>;
796 using AtomicOrderingField =
797 AtomicOrderingBitfieldElementT<VolatileField::NextBit>;
798 using OperationField = BinOpBitfieldElement<AtomicOrderingField::NextBit>;
799 using AlignmentField = AlignmentBitfieldElementT<OperationField::NextBit>;
800 static_assert(Bitfield::areContiguous<VolatileField, AtomicOrderingField,
801 OperationField, AlignmentField>(),
802 "Bitfields must be contiguous");
803
804 BinOp getOperation() const { return getSubclassData<OperationField>(); }
805
806 static StringRef getOperationName(BinOp Op);
807
808 static bool isFPOperation(BinOp Op) {
809 switch (Op) {
810 case AtomicRMWInst::FAdd:
811 case AtomicRMWInst::FSub:
812 return true;
813 default:
814 return false;
815 }
816 }
817
818 void setOperation(BinOp Operation) {
819 setSubclassData<OperationField>(Operation);
820 }
821
822 /// Return the alignment of the memory that is being allocated by the
823 /// instruction.
824 Align getAlign() const {
825 return Align(1ULL << getSubclassData<AlignmentField>());
826 }
827
828 void setAlignment(Align Align) {
829 setSubclassData<AlignmentField>(Log2(Align));
830 }
831
832 /// Return true if this is a RMW on a volatile memory location.
833 ///
834 bool isVolatile() const { return getSubclassData<VolatileField>(); }
835
836 /// Specify whether this is a volatile RMW or not.
837 ///
838 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
839
840 /// Transparently provide more efficient getOperand methods.
841 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
842
843 /// Returns the ordering constraint of this rmw instruction.
844 AtomicOrdering getOrdering() const {
845 return getSubclassData<AtomicOrderingField>();
846 }
847
848 /// Sets the ordering constraint of this rmw instruction.
849 void setOrdering(AtomicOrdering Ordering) {
850 assert(Ordering != AtomicOrdering::NotAtomic &&((void)0)
851 "atomicrmw instructions can only be atomic.")((void)0);
852 setSubclassData<AtomicOrderingField>(Ordering);
853 }
854
855 /// Returns the synchronization scope ID of this rmw instruction.
856 SyncScope::ID getSyncScopeID() const {
857 return SSID;
858 }
859
860 /// Sets the synchronization scope ID of this rmw instruction.
861 void setSyncScopeID(SyncScope::ID SSID) {
862 this->SSID = SSID;
863 }
864
865 Value *getPointerOperand() { return getOperand(0); }
866 const Value *getPointerOperand() const { return getOperand(0); }
867 static unsigned getPointerOperandIndex() { return 0U; }
868
869 Value *getValOperand() { return getOperand(1); }
870 const Value *getValOperand() const { return getOperand(1); }
871
872 /// Returns the address space of the pointer operand.
873 unsigned getPointerAddressSpace() const {
874 return getPointerOperand()->getType()->getPointerAddressSpace();
875 }
876
877 bool isFloatingPointOperation() const {
878 return isFPOperation(getOperation());
879 }
880
881 // Methods for support type inquiry through isa, cast, and dyn_cast:
882 static bool classof(const Instruction *I) {
883 return I->getOpcode() == Instruction::AtomicRMW;
884 }
885 static bool classof(const Value *V) {
886 return isa<Instruction>(V) && classof(cast<Instruction>(V));
887 }
888
889private:
890 void Init(BinOp Operation, Value *Ptr, Value *Val, Align Align,
891 AtomicOrdering Ordering, SyncScope::ID SSID);
892
893 // Shadow Instruction::setInstructionSubclassData with a private forwarding
894 // method so that subclasses cannot accidentally use it.
895 template <typename Bitfield>
896 void setSubclassData(typename Bitfield::Type Value) {
897 Instruction::setSubclassData<Bitfield>(Value);
898 }
899
900 /// The synchronization scope ID of this rmw instruction. Not quite enough
901 /// room in SubClassData for everything, so synchronization scope ID gets its
902 /// own field.
903 SyncScope::ID SSID;
904};
905
906template <>
907struct OperandTraits<AtomicRMWInst>
908 : public FixedNumOperandTraits<AtomicRMWInst,2> {
909};
910
911DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicRMWInst, Value)AtomicRMWInst::op_iterator AtomicRMWInst::op_begin() { return
OperandTraits<AtomicRMWInst>::op_begin(this); } AtomicRMWInst
::const_op_iterator AtomicRMWInst::op_begin() const { return OperandTraits
<AtomicRMWInst>::op_begin(const_cast<AtomicRMWInst*>
(this)); } AtomicRMWInst::op_iterator AtomicRMWInst::op_end()
{ return OperandTraits<AtomicRMWInst>::op_end(this); }
AtomicRMWInst::const_op_iterator AtomicRMWInst::op_end() const
{ return OperandTraits<AtomicRMWInst>::op_end(const_cast
<AtomicRMWInst*>(this)); } Value *AtomicRMWInst::getOperand
(unsigned i_nocapture) const { ((void)0); return cast_or_null
<Value>( OperandTraits<AtomicRMWInst>::op_begin(const_cast
<AtomicRMWInst*>(this))[i_nocapture].get()); } void AtomicRMWInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { ((
void)0); OperandTraits<AtomicRMWInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned AtomicRMWInst::getNumOperands()
const { return OperandTraits<AtomicRMWInst>::operands(
this); } template <int Idx_nocapture> Use &AtomicRMWInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &AtomicRMWInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
912
913//===----------------------------------------------------------------------===//
914// GetElementPtrInst Class
915//===----------------------------------------------------------------------===//
916
917// checkGEPType - Simple wrapper function to give a better assertion failure
918// message on bad indexes for a gep instruction.
919//
920inline Type *checkGEPType(Type *Ty) {
921 assert(Ty && "Invalid GetElementPtrInst indices for type!")((void)0);
922 return Ty;
923}
924
925/// an instruction for type-safe pointer arithmetic to
926/// access elements of arrays and structs
927///
928class GetElementPtrInst : public Instruction {
929 Type *SourceElementType;
930 Type *ResultElementType;
931
932 GetElementPtrInst(const GetElementPtrInst &GEPI);
933
934 /// Constructors - Create a getelementptr instruction with a base pointer an
935 /// list of indices. The first ctor can optionally insert before an existing
936 /// instruction, the second appends the new instruction to the specified
937 /// BasicBlock.
938 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
939 ArrayRef<Value *> IdxList, unsigned Values,
940 const Twine &NameStr, Instruction *InsertBefore);
941 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
942 ArrayRef<Value *> IdxList, unsigned Values,
943 const Twine &NameStr, BasicBlock *InsertAtEnd);
944
945 void init(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr);
946
947protected:
948 // Note: Instruction needs to be a friend here to call cloneImpl.
949 friend class Instruction;
950
951 GetElementPtrInst *cloneImpl() const;
952
953public:
954 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
955 ArrayRef<Value *> IdxList,
956 const Twine &NameStr = "",
957 Instruction *InsertBefore = nullptr) {
958 unsigned Values = 1 + unsigned(IdxList.size());
959 assert(PointeeType && "Must specify element type")((void)0);
960 assert(cast<PointerType>(Ptr->getType()->getScalarType())((void)0)
961 ->isOpaqueOrPointeeTypeMatches(PointeeType))((void)0);
962 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
963 NameStr, InsertBefore);
964 }
965
966 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
967 ArrayRef<Value *> IdxList,
968 const Twine &NameStr,
969 BasicBlock *InsertAtEnd) {
970 unsigned Values = 1 + unsigned(IdxList.size());
971 assert(PointeeType && "Must specify element type")((void)0);
972 assert(cast<PointerType>(Ptr->getType()->getScalarType())((void)0)
973 ->isOpaqueOrPointeeTypeMatches(PointeeType))((void)0);
974 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
975 NameStr, InsertAtEnd);
976 }
977
978 LLVM_ATTRIBUTE_DEPRECATED(static GetElementPtrInst *CreateInBounds([[deprecated("Use the version with explicit element type instead"
)]] static GetElementPtrInst *CreateInBounds( Value *Ptr, ArrayRef
<Value *> IdxList, const Twine &NameStr = "", Instruction
*InsertBefore = nullptr)
979 Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr = "",[[deprecated("Use the version with explicit element type instead"
)]] static GetElementPtrInst *CreateInBounds( Value *Ptr, ArrayRef
<Value *> IdxList, const Twine &NameStr = "", Instruction
*InsertBefore = nullptr)
980 Instruction *InsertBefore = nullptr),[[deprecated("Use the version with explicit element type instead"
)]] static GetElementPtrInst *CreateInBounds( Value *Ptr, ArrayRef
<Value *> IdxList, const Twine &NameStr = "", Instruction
*InsertBefore = nullptr)
981 "Use the version with explicit element type instead")[[deprecated("Use the version with explicit element type instead"
)]] static GetElementPtrInst *CreateInBounds( Value *Ptr, ArrayRef
<Value *> IdxList, const Twine &NameStr = "", Instruction
*InsertBefore = nullptr)
{
982 return CreateInBounds(
983 Ptr->getType()->getScalarType()->getPointerElementType(), Ptr, IdxList,
984 NameStr, InsertBefore);
985 }
986
987 /// Create an "inbounds" getelementptr. See the documentation for the
988 /// "inbounds" flag in LangRef.html for details.
989 static GetElementPtrInst *
990 CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef<Value *> IdxList,
991 const Twine &NameStr = "",
992 Instruction *InsertBefore = nullptr) {
993 GetElementPtrInst *GEP =
994 Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore);
995 GEP->setIsInBounds(true);
996 return GEP;
997 }
998
999 LLVM_ATTRIBUTE_DEPRECATED(static GetElementPtrInst *CreateInBounds([[deprecated("Use the version with explicit element type instead"
)]] static GetElementPtrInst *CreateInBounds( Value *Ptr, ArrayRef
<Value *> IdxList, const Twine &NameStr, BasicBlock
*InsertAtEnd)
1000 Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr,[[deprecated("Use the version with explicit element type instead"
)]] static GetElementPtrInst *CreateInBounds( Value *Ptr, ArrayRef
<Value *> IdxList, const Twine &NameStr, BasicBlock
*InsertAtEnd)
1001 BasicBlock *InsertAtEnd),[[deprecated("Use the version with explicit element type instead"
)]] static GetElementPtrInst *CreateInBounds( Value *Ptr, ArrayRef
<Value *> IdxList, const Twine &NameStr, BasicBlock
*InsertAtEnd)
1002 "Use the version with explicit element type instead")[[deprecated("Use the version with explicit element type instead"
)]] static GetElementPtrInst *CreateInBounds( Value *Ptr, ArrayRef
<Value *> IdxList, const Twine &NameStr, BasicBlock
*InsertAtEnd)
{
1003 return CreateInBounds(
1004 Ptr->getType()->getScalarType()->getPointerElementType(), Ptr, IdxList,
1005 NameStr, InsertAtEnd);
1006 }
1007
1008 static GetElementPtrInst *CreateInBounds(Type *PointeeType, Value *Ptr,
1009 ArrayRef<Value *> IdxList,
1010 const Twine &NameStr,
1011 BasicBlock *InsertAtEnd) {
1012 GetElementPtrInst *GEP =
1013 Create(PointeeType, Ptr, IdxList, NameStr, InsertAtEnd);
1014 GEP->setIsInBounds(true);
1015 return GEP;
1016 }
1017
1018 /// Transparently provide more efficient getOperand methods.
1019 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1020
1021 Type *getSourceElementType() const { return SourceElementType; }
1022
1023 void setSourceElementType(Type *Ty) { SourceElementType = Ty; }
1024 void setResultElementType(Type *Ty) { ResultElementType = Ty; }
1025
1026 Type *getResultElementType() const {
1027 assert(cast<PointerType>(getType()->getScalarType())((void)0)
1028 ->isOpaqueOrPointeeTypeMatches(ResultElementType))((void)0);
1029 return ResultElementType;
1030 }
1031
1032 /// Returns the address space of this instruction's pointer type.
1033 unsigned getAddressSpace() const {
1034 // Note that this is always the same as the pointer operand's address space
1035 // and that is cheaper to compute, so cheat here.
1036 return getPointerAddressSpace();
1037 }
1038
1039 /// Returns the result type of a getelementptr with the given source
1040 /// element type and indexes.
1041 ///
1042 /// Null is returned if the indices are invalid for the specified
1043 /// source element type.
1044 static Type *getIndexedType(Type *Ty, ArrayRef<Value *> IdxList);
1045 static Type *getIndexedType(Type *Ty, ArrayRef<Constant *> IdxList);
1046 static Type *getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList);
1047
1048 /// Return the type of the element at the given index of an indexable
1049 /// type. This is equivalent to "getIndexedType(Agg, {Zero, Idx})".
1050 ///
1051 /// Returns null if the type can't be indexed, or the given index is not
1052 /// legal for the given type.
1053 static Type *getTypeAtIndex(Type *Ty, Value *Idx);
1054 static Type *getTypeAtIndex(Type *Ty, uint64_t Idx);
1055
1056 inline op_iterator idx_begin() { return op_begin()+1; }
1057 inline const_op_iterator idx_begin() const { return op_begin()+1; }
1058 inline op_iterator idx_end() { return op_end(); }
1059 inline const_op_iterator idx_end() const { return op_end(); }
1060
1061 inline iterator_range<op_iterator> indices() {
1062 return make_range(idx_begin(), idx_end());
1063 }
1064
1065 inline iterator_range<const_op_iterator> indices() const {
1066 return make_range(idx_begin(), idx_end());
1067 }
1068
1069 Value *getPointerOperand() {
1070 return getOperand(0);
1071 }
1072 const Value *getPointerOperand() const {
1073 return getOperand(0);
1074 }
1075 static unsigned getPointerOperandIndex() {
1076 return 0U; // get index for modifying correct operand.
1077 }
1078
1079 /// Method to return the pointer operand as a
1080 /// PointerType.
1081 Type *getPointerOperandType() const {
1082 return getPointerOperand()->getType();
1083 }
1084
1085 /// Returns the address space of the pointer operand.
1086 unsigned getPointerAddressSpace() const {
1087 return getPointerOperandType()->getPointerAddressSpace();
1088 }
1089
1090 /// Returns the pointer type returned by the GEP
1091 /// instruction, which may be a vector of pointers.
1092 static Type *getGEPReturnType(Type *ElTy, Value *Ptr,
1093 ArrayRef<Value *> IdxList) {
1094 PointerType *OrigPtrTy = cast<PointerType>(Ptr->getType()->getScalarType());
1095 unsigned AddrSpace = OrigPtrTy->getAddressSpace();
1096 Type *ResultElemTy = checkGEPType(getIndexedType(ElTy, IdxList));
1097 Type *PtrTy = OrigPtrTy->isOpaque()
1098 ? PointerType::get(OrigPtrTy->getContext(), AddrSpace)
1099 : PointerType::get(ResultElemTy, AddrSpace);
1100 // Vector GEP
1101 if (auto *PtrVTy = dyn_cast<VectorType>(Ptr->getType())) {
1102 ElementCount EltCount = PtrVTy->getElementCount();
1103 return VectorType::get(PtrTy, EltCount);
1104 }
1105 for (Value *Index : IdxList)
1106 if (auto *IndexVTy = dyn_cast<VectorType>(Index->getType())) {
1107 ElementCount EltCount = IndexVTy->getElementCount();
1108 return VectorType::get(PtrTy, EltCount);
1109 }
1110 // Scalar GEP
1111 return PtrTy;
1112 }
1113
1114 unsigned getNumIndices() const { // Note: always non-negative
1115 return getNumOperands() - 1;
1116 }
1117
1118 bool hasIndices() const {
1119 return getNumOperands() > 1;
1120 }
1121
1122 /// Return true if all of the indices of this GEP are
1123 /// zeros. If so, the result pointer and the first operand have the same
1124 /// value, just potentially different types.
1125 bool hasAllZeroIndices() const;
1126
1127 /// Return true if all of the indices of this GEP are
1128 /// constant integers. If so, the result pointer and the first operand have
1129 /// a constant offset between them.
1130 bool hasAllConstantIndices() const;
1131
1132 /// Set or clear the inbounds flag on this GEP instruction.
1133 /// See LangRef.html for the meaning of inbounds on a getelementptr.
1134 void setIsInBounds(bool b = true);
1135
1136 /// Determine whether the GEP has the inbounds flag.
1137 bool isInBounds() const;
1138
1139 /// Accumulate the constant address offset of this GEP if possible.
1140 ///
1141 /// This routine accepts an APInt into which it will accumulate the constant
1142 /// offset of this GEP if the GEP is in fact constant. If the GEP is not
1143 /// all-constant, it returns false and the value of the offset APInt is
1144 /// undefined (it is *not* preserved!). The APInt passed into this routine
1145 /// must be at least as wide as the IntPtr type for the address space of
1146 /// the base GEP pointer.
1147 bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const;
1148 bool collectOffset(const DataLayout &DL, unsigned BitWidth,
1149 MapVector<Value *, APInt> &VariableOffsets,
1150 APInt &ConstantOffset) const;
1151 // Methods for support type inquiry through isa, cast, and dyn_cast:
1152 static bool classof(const Instruction *I) {
1153 return (I->getOpcode() == Instruction::GetElementPtr);
1154 }
1155 static bool classof(const Value *V) {
1156 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1157 }
1158};
1159
1160template <>
1161struct OperandTraits<GetElementPtrInst> :
1162 public VariadicOperandTraits<GetElementPtrInst, 1> {
1163};
1164
1165GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1166 ArrayRef<Value *> IdxList, unsigned Values,
1167 const Twine &NameStr,
1168 Instruction *InsertBefore)
1169 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1170 OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1171 Values, InsertBefore),
1172 SourceElementType(PointeeType),
1173 ResultElementType(getIndexedType(PointeeType, IdxList)) {
1174 assert(cast<PointerType>(getType()->getScalarType())((void)0)
1175 ->isOpaqueOrPointeeTypeMatches(ResultElementType))((void)0);
1176 init(Ptr, IdxList, NameStr);
1177}
1178
1179GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1180 ArrayRef<Value *> IdxList, unsigned Values,
1181 const Twine &NameStr,
1182 BasicBlock *InsertAtEnd)
1183 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1184 OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1185 Values, InsertAtEnd),
1186 SourceElementType(PointeeType),
1187 ResultElementType(getIndexedType(PointeeType, IdxList)) {
1188 assert(cast<PointerType>(getType()->getScalarType())((void)0)
1189 ->isOpaqueOrPointeeTypeMatches(ResultElementType))((void)0);
1190 init(Ptr, IdxList, NameStr);
1191}
1192
1193DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value)GetElementPtrInst::op_iterator GetElementPtrInst::op_begin() {
return OperandTraits<GetElementPtrInst>::op_begin(this
); } GetElementPtrInst::const_op_iterator GetElementPtrInst::
op_begin() const { return OperandTraits<GetElementPtrInst>
::op_begin(const_cast<GetElementPtrInst*>(this)); } GetElementPtrInst
::op_iterator GetElementPtrInst::op_end() { return OperandTraits
<GetElementPtrInst>::op_end(this); } GetElementPtrInst::
const_op_iterator GetElementPtrInst::op_end() const { return OperandTraits
<GetElementPtrInst>::op_end(const_cast<GetElementPtrInst
*>(this)); } Value *GetElementPtrInst::getOperand(unsigned
i_nocapture) const { ((void)0); return cast_or_null<Value
>( OperandTraits<GetElementPtrInst>::op_begin(const_cast
<GetElementPtrInst*>(this))[i_nocapture].get()); } void
GetElementPtrInst::setOperand(unsigned i_nocapture, Value *Val_nocapture
) { ((void)0); OperandTraits<GetElementPtrInst>::op_begin
(this)[i_nocapture] = Val_nocapture; } unsigned GetElementPtrInst
::getNumOperands() const { return OperandTraits<GetElementPtrInst
>::operands(this); } template <int Idx_nocapture> Use
&GetElementPtrInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
GetElementPtrInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
1194
1195//===----------------------------------------------------------------------===//
1196// ICmpInst Class
1197//===----------------------------------------------------------------------===//
1198
1199/// This instruction compares its operands according to the predicate given
1200/// to the constructor. It only operates on integers or pointers. The operands
1201/// must be identical types.
1202/// Represent an integer comparison operator.
1203class ICmpInst: public CmpInst {
1204 void AssertOK() {
1205 assert(isIntPredicate() &&((void)0)
1206 "Invalid ICmp predicate value")((void)0);
1207 assert(getOperand(0)->getType() == getOperand(1)->getType() &&((void)0)
1208 "Both operands to ICmp instruction are not of the same type!")((void)0);
1209 // Check that the operands are the right type
1210 assert((getOperand(0)->getType()->isIntOrIntVectorTy() ||((void)0)
1211 getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&((void)0)
1212 "Invalid operand types for ICmp instruction")((void)0);
1213 }
1214
1215protected:
1216 // Note: Instruction needs to be a friend here to call cloneImpl.
1217 friend class Instruction;
1218
1219 /// Clone an identical ICmpInst
1220 ICmpInst *cloneImpl() const;
1221
1222public:
1223 /// Constructor with insert-before-instruction semantics.
1224 ICmpInst(
1225 Instruction *InsertBefore, ///< Where to insert
1226 Predicate pred, ///< The predicate to use for the comparison
1227 Value *LHS, ///< The left-hand-side of the expression
1228 Value *RHS, ///< The right-hand-side of the expression
1229 const Twine &NameStr = "" ///< Name of the instruction
1230 ) : CmpInst(makeCmpResultType(LHS->getType()),
1231 Instruction::ICmp, pred, LHS, RHS, NameStr,
1232 InsertBefore) {
1233#ifndef NDEBUG1
1234 AssertOK();
1235#endif
1236 }
1237
1238 /// Constructor with insert-at-end semantics.
1239 ICmpInst(
1240 BasicBlock &InsertAtEnd, ///< Block to insert into.
1241 Predicate pred, ///< The predicate to use for the comparison
1242 Value *LHS, ///< The left-hand-side of the expression
1243 Value *RHS, ///< The right-hand-side of the expression
1244 const Twine &NameStr = "" ///< Name of the instruction
1245 ) : CmpInst(makeCmpResultType(LHS->getType()),
1246 Instruction::ICmp, pred, LHS, RHS, NameStr,
1247 &InsertAtEnd) {
1248#ifndef NDEBUG1
1249 AssertOK();
1250#endif
1251 }
1252
1253 /// Constructor with no-insertion semantics
1254 ICmpInst(
1255 Predicate pred, ///< The predicate to use for the comparison
1256 Value *LHS, ///< The left-hand-side of the expression
1257 Value *RHS, ///< The right-hand-side of the expression
1258 const Twine &NameStr = "" ///< Name of the instruction
1259 ) : CmpInst(makeCmpResultType(LHS->getType()),
63
Called C++ object pointer is null
1260 Instruction::ICmp, pred, LHS, RHS, NameStr) {
1261#ifndef NDEBUG1
1262 AssertOK();
1263#endif
1264 }
1265
1266 /// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
1267 /// @returns the predicate that would be the result if the operand were
1268 /// regarded as signed.
1269 /// Return the signed version of the predicate
1270 Predicate getSignedPredicate() const {
1271 return getSignedPredicate(getPredicate());
1272 }
1273
1274 /// This is a static version that you can use without an instruction.
1275 /// Return the signed version of the predicate.
1276 static Predicate getSignedPredicate(Predicate pred);
1277
1278 /// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
1279 /// @returns the predicate that would be the result if the operand were
1280 /// regarded as unsigned.
1281 /// Return the unsigned version of the predicate
1282 Predicate getUnsignedPredicate() const {
1283 return getUnsignedPredicate(getPredicate());
1284 }
1285
1286 /// This is a static version that you can use without an instruction.
1287 /// Return the unsigned version of the predicate.
1288 static Predicate getUnsignedPredicate(Predicate pred);
1289
1290 /// Return true if this predicate is either EQ or NE. This also
1291 /// tests for commutativity.
1292 static bool isEquality(Predicate P) {
1293 return P == ICMP_EQ || P == ICMP_NE;
1294 }
1295
1296 /// Return true if this predicate is either EQ or NE. This also
1297 /// tests for commutativity.
1298 bool isEquality() const {
1299 return isEquality(getPredicate());
1300 }
1301
1302 /// @returns true if the predicate of this ICmpInst is commutative
1303 /// Determine if this relation is commutative.
1304 bool isCommutative() const { return isEquality(); }
1305
1306 /// Return true if the predicate is relational (not EQ or NE).
1307 ///
1308 bool isRelational() const {
1309 return !isEquality();
1310 }
1311
1312 /// Return true if the predicate is relational (not EQ or NE).
1313 ///
1314 static bool isRelational(Predicate P) {
1315 return !isEquality(P);
1316 }
1317
1318 /// Return true if the predicate is SGT or UGT.
1319 ///
1320 static bool isGT(Predicate P) {
1321 return P == ICMP_SGT || P == ICMP_UGT;
1322 }
1323
1324 /// Return true if the predicate is SLT or ULT.
1325 ///
1326 static bool isLT(Predicate P) {
1327 return P == ICMP_SLT || P == ICMP_ULT;
1328 }
1329
1330 /// Return true if the predicate is SGE or UGE.
1331 ///
1332 static bool isGE(Predicate P) {
1333 return P == ICMP_SGE || P == ICMP_UGE;
1334 }
1335
1336 /// Return true if the predicate is SLE or ULE.
1337 ///
1338 static bool isLE(Predicate P) {
1339 return P == ICMP_SLE || P == ICMP_ULE;
1340 }
1341
1342 /// Exchange the two operands to this instruction in such a way that it does
1343 /// not modify the semantics of the instruction. The predicate value may be
1344 /// changed to retain the same result if the predicate is order dependent
1345 /// (e.g. ult).
1346 /// Swap operands and adjust predicate.
1347 void swapOperands() {
1348 setPredicate(getSwappedPredicate());
1349 Op<0>().swap(Op<1>());
1350 }
1351
1352 // Methods for support type inquiry through isa, cast, and dyn_cast:
1353 static bool classof(const Instruction *I) {
1354 return I->getOpcode() == Instruction::ICmp;
1355 }
1356 static bool classof(const Value *V) {
1357 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1358 }
1359};
1360
1361//===----------------------------------------------------------------------===//
1362// FCmpInst Class
1363//===----------------------------------------------------------------------===//
1364
1365/// This instruction compares its operands according to the predicate given
1366/// to the constructor. It only operates on floating point values or packed
1367/// vectors of floating point values. The operands must be identical types.
1368/// Represents a floating point comparison operator.
1369class FCmpInst: public CmpInst {
1370 void AssertOK() {
1371 assert(isFPPredicate() && "Invalid FCmp predicate value")((void)0);
1372 assert(getOperand(0)->getType() == getOperand(1)->getType() &&((void)0)
1373 "Both operands to FCmp instruction are not of the same type!")((void)0);
1374 // Check that the operands are the right type
1375 assert(getOperand(0)->getType()->isFPOrFPVectorTy() &&((void)0)
1376 "Invalid operand types for FCmp instruction")((void)0);
1377 }
1378
1379protected:
1380 // Note: Instruction needs to be a friend here to call cloneImpl.
1381 friend class Instruction;
1382
1383 /// Clone an identical FCmpInst
1384 FCmpInst *cloneImpl() const;
1385
1386public:
1387 /// Constructor with insert-before-instruction semantics.
1388 FCmpInst(
1389 Instruction *InsertBefore, ///< Where to insert
1390 Predicate pred, ///< The predicate to use for the comparison
1391 Value *LHS, ///< The left-hand-side of the expression
1392 Value *RHS, ///< The right-hand-side of the expression
1393 const Twine &NameStr = "" ///< Name of the instruction
1394 ) : CmpInst(makeCmpResultType(LHS->getType()),
1395 Instruction::FCmp, pred, LHS, RHS, NameStr,
1396 InsertBefore) {
1397 AssertOK();
1398 }
1399
1400 /// Constructor with insert-at-end semantics.
1401 FCmpInst(
1402 BasicBlock &InsertAtEnd, ///< Block to insert into.
1403 Predicate pred, ///< The predicate to use for the comparison
1404 Value *LHS, ///< The left-hand-side of the expression
1405 Value *RHS, ///< The right-hand-side of the expression
1406 const Twine &NameStr = "" ///< Name of the instruction
1407 ) : CmpInst(makeCmpResultType(LHS->getType()),
1408 Instruction::FCmp, pred, LHS, RHS, NameStr,
1409 &InsertAtEnd) {
1410 AssertOK();
1411 }
1412
1413 /// Constructor with no-insertion semantics
1414 FCmpInst(
1415 Predicate Pred, ///< The predicate to use for the comparison
1416 Value *LHS, ///< The left-hand-side of the expression
1417 Value *RHS, ///< The right-hand-side of the expression
1418 const Twine &NameStr = "", ///< Name of the instruction
1419 Instruction *FlagsSource = nullptr
1420 ) : CmpInst(makeCmpResultType(LHS->getType()), Instruction::FCmp, Pred, LHS,
1421 RHS, NameStr, nullptr, FlagsSource) {
1422 AssertOK();
1423 }
1424
1425 /// @returns true if the predicate of this instruction is EQ or NE.
1426 /// Determine if this is an equality predicate.
1427 static bool isEquality(Predicate Pred) {
1428 return Pred == FCMP_OEQ || Pred == FCMP_ONE || Pred == FCMP_UEQ ||
1429 Pred == FCMP_UNE;
1430 }
1431
1432 /// @returns true if the predicate of this instruction is EQ or NE.
1433 /// Determine if this is an equality predicate.
1434 bool isEquality() const { return isEquality(getPredicate()); }
1435
1436 /// @returns true if the predicate of this instruction is commutative.
1437 /// Determine if this is a commutative predicate.
1438 bool isCommutative() const {
1439 return isEquality() ||
1440 getPredicate() == FCMP_FALSE ||
1441 getPredicate() == FCMP_TRUE ||
1442 getPredicate() == FCMP_ORD ||
1443 getPredicate() == FCMP_UNO;
1444 }
1445
1446 /// @returns true if the predicate is relational (not EQ or NE).
1447 /// Determine if this a relational predicate.
1448 bool isRelational() const { return !isEquality(); }
1449
1450 /// Exchange the two operands to this instruction in such a way that it does
1451 /// not modify the semantics of the instruction. The predicate value may be
1452 /// changed to retain the same result if the predicate is order dependent
1453 /// (e.g. ult).
1454 /// Swap operands and adjust predicate.
1455 void swapOperands() {
1456 setPredicate(getSwappedPredicate());
1457 Op<0>().swap(Op<1>());
1458 }
1459
1460 /// Methods for support type inquiry through isa, cast, and dyn_cast:
1461 static bool classof(const Instruction *I) {
1462 return I->getOpcode() == Instruction::FCmp;
1463 }
1464 static bool classof(const Value *V) {
1465 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1466 }
1467};
1468
1469//===----------------------------------------------------------------------===//
1470/// This class represents a function call, abstracting a target
1471/// machine's calling convention. This class uses low bit of the SubClassData
1472/// field to indicate whether or not this is a tail call. The rest of the bits
1473/// hold the calling convention of the call.
1474///
1475class CallInst : public CallBase {
1476 CallInst(const CallInst &CI);
1477
1478 /// Construct a CallInst given a range of arguments.
1479 /// Construct a CallInst from a range of arguments
1480 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1481 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1482 Instruction *InsertBefore);
1483
1484 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1485 const Twine &NameStr, Instruction *InsertBefore)
1486 : CallInst(Ty, Func, Args, None, NameStr, InsertBefore) {}
1487
1488 /// Construct a CallInst given a range of arguments.
1489 /// Construct a CallInst from a range of arguments
1490 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1491 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1492 BasicBlock *InsertAtEnd);
1493
1494 explicit CallInst(FunctionType *Ty, Value *F, const Twine &NameStr,
1495 Instruction *InsertBefore);
1496
1497 CallInst(FunctionType *ty, Value *F, const Twine &NameStr,
1498 BasicBlock *InsertAtEnd);
1499
1500 void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
1501 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
1502 void init(FunctionType *FTy, Value *Func, const Twine &NameStr);
1503
1504 /// Compute the number of operands to allocate.
1505 static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) {
1506 // We need one operand for the called function, plus the input operand
1507 // counts provided.
1508 return 1 + NumArgs + NumBundleInputs;
1509 }
1510
1511protected:
1512 // Note: Instruction needs to be a friend here to call cloneImpl.
1513 friend class Instruction;
1514
1515 CallInst *cloneImpl() const;
1516
1517public:
1518 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr = "",
1519 Instruction *InsertBefore = nullptr) {
1520 return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertBefore);
1521 }
1522
1523 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1524 const Twine &NameStr,
1525 Instruction *InsertBefore = nullptr) {
1526 return new (ComputeNumOperands(Args.size()))
1527 CallInst(Ty, Func, Args, None, NameStr, InsertBefore);
1528 }
1529
1530 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1531 ArrayRef<OperandBundleDef> Bundles = None,
1532 const Twine &NameStr = "",
1533 Instruction *InsertBefore = nullptr) {
1534 const int NumOperands =
1535 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
1536 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1537
1538 return new (NumOperands, DescriptorBytes)
1539 CallInst(Ty, Func, Args, Bundles, NameStr, InsertBefore);
1540 }
1541
1542 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr,
1543 BasicBlock *InsertAtEnd) {
1544 return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertAtEnd);
1545 }
1546
1547 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1548 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1549 return new (ComputeNumOperands(Args.size()))
1550 CallInst(Ty, Func, Args, None, NameStr, InsertAtEnd);
1551 }
1552
1553 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1554 ArrayRef<OperandBundleDef> Bundles,
1555 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1556 const int NumOperands =
1557 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
1558 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1559
1560 return new (NumOperands, DescriptorBytes)
1561 CallInst(Ty, Func, Args, Bundles, NameStr, InsertAtEnd);
1562 }
1563
1564 static CallInst *Create(FunctionCallee Func, const Twine &NameStr = "",
1565 Instruction *InsertBefore = nullptr) {
1566 return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
1567 InsertBefore);
1568 }
1569
1570 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1571 ArrayRef<OperandBundleDef> Bundles = None,
1572 const Twine &NameStr = "",
1573 Instruction *InsertBefore = nullptr) {
1574 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
1575 NameStr, InsertBefore);
1576 }
1577
1578 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1579 const Twine &NameStr,
1580 Instruction *InsertBefore = nullptr) {
1581 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
1582 InsertBefore);
1583 }
1584
1585 static CallInst *Create(FunctionCallee Func, const Twine &NameStr,
1586 BasicBlock *InsertAtEnd) {
1587 return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
1588 InsertAtEnd);
1589 }
1590
1591 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1592 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1593 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
1594 InsertAtEnd);
1595 }
1596
1597 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1598 ArrayRef<OperandBundleDef> Bundles,
1599 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1600 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
1601 NameStr, InsertAtEnd);
1602 }
1603
1604 /// Create a clone of \p CI with a different set of operand bundles and
1605 /// insert it before \p InsertPt.
1606 ///
1607 /// The returned call instruction is identical \p CI in every way except that
1608 /// the operand bundles for the new instruction are set to the operand bundles
1609 /// in \p Bundles.
1610 static CallInst *Create(CallInst *CI, ArrayRef<OperandBundleDef> Bundles,
1611 Instruction *InsertPt = nullptr);
1612
1613 /// Generate the IR for a call to malloc:
1614 /// 1. Compute the malloc call's argument as the specified type's size,
1615 /// possibly multiplied by the array size if the array size is not
1616 /// constant 1.
1617 /// 2. Call malloc with that argument.
1618 /// 3. Bitcast the result of the malloc call to the specified type.
1619 static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy,
1620 Type *AllocTy, Value *AllocSize,
1621 Value *ArraySize = nullptr,
1622 Function *MallocF = nullptr,
1623 const Twine &Name = "");
1624 static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy,
1625 Type *AllocTy, Value *AllocSize,
1626 Value *ArraySize = nullptr,
1627 Function *MallocF = nullptr,
1628 const Twine &Name = "");
1629 static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy,
1630 Type *AllocTy, Value *AllocSize,
1631 Value *ArraySize = nullptr,
1632 ArrayRef<OperandBundleDef> Bundles = None,
1633 Function *MallocF = nullptr,
1634 const Twine &Name = "");
1635 static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy,
1636 Type *AllocTy, Value *AllocSize,
1637 Value *ArraySize = nullptr,
1638 ArrayRef<OperandBundleDef> Bundles = None,
1639 Function *MallocF = nullptr,
1640 const Twine &Name = "");
1641 /// Generate the IR for a call to the builtin free function.
1642 static Instruction *CreateFree(Value *Source, Instruction *InsertBefore);
1643 static Instruction *CreateFree(Value *Source, BasicBlock *InsertAtEnd);
1644 static Instruction *CreateFree(Value *Source,
1645 ArrayRef<OperandBundleDef> Bundles,
1646 Instruction *InsertBefore);
1647 static Instruction *CreateFree(Value *Source,
1648 ArrayRef<OperandBundleDef> Bundles,
1649 BasicBlock *InsertAtEnd);
1650
1651 // Note that 'musttail' implies 'tail'.
1652 enum TailCallKind : unsigned {
1653 TCK_None = 0,
1654 TCK_Tail = 1,
1655 TCK_MustTail = 2,
1656 TCK_NoTail = 3,
1657 TCK_LAST = TCK_NoTail
1658 };
1659
1660 using TailCallKindField = Bitfield::Element<TailCallKind, 0, 2, TCK_LAST>;
1661 static_assert(
1662 Bitfield::areContiguous<TailCallKindField, CallBase::CallingConvField>(),
1663 "Bitfields must be contiguous");
1664
1665 TailCallKind getTailCallKind() const {
1666 return getSubclassData<TailCallKindField>();
1667 }
1668
1669 bool isTailCall() const {
1670 TailCallKind Kind = getTailCallKind();
1671 return Kind == TCK_Tail || Kind == TCK_MustTail;
1672 }
1673
1674 bool isMustTailCall() const { return getTailCallKind() == TCK_MustTail; }
1675
1676 bool isNoTailCall() const { return getTailCallKind() == TCK_NoTail; }
1677
1678 void setTailCallKind(TailCallKind TCK) {
1679 setSubclassData<TailCallKindField>(TCK);
1680 }
1681
1682 void setTailCall(bool IsTc = true) {
1683 setTailCallKind(IsTc ? TCK_Tail : TCK_None);
1684 }
1685
1686 /// Return true if the call can return twice
1687 bool canReturnTwice() const { return hasFnAttr(Attribute::ReturnsTwice); }
1688 void setCanReturnTwice() {
1689 addAttribute(AttributeList::FunctionIndex, Attribute::ReturnsTwice);
1690 }
1691
1692 // Methods for support type inquiry through isa, cast, and dyn_cast:
1693 static bool classof(const Instruction *I) {
1694 return I->getOpcode() == Instruction::Call;
1695 }
1696 static bool classof(const Value *V) {
1697 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1698 }
1699
1700 /// Updates profile metadata by scaling it by \p S / \p T.
1701 void updateProfWeight(uint64_t S, uint64_t T);
1702
1703private:
1704 // Shadow Instruction::setInstructionSubclassData with a private forwarding
1705 // method so that subclasses cannot accidentally use it.
1706 template <typename Bitfield>
1707 void setSubclassData(typename Bitfield::Type Value) {
1708 Instruction::setSubclassData<Bitfield>(Value);
1709 }
1710};
1711
1712CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1713 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1714 BasicBlock *InsertAtEnd)
1715 : CallBase(Ty->getReturnType(), Instruction::Call,
1716 OperandTraits<CallBase>::op_end(this) -
1717 (Args.size() + CountBundleInputs(Bundles) + 1),
1718 unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1719 InsertAtEnd) {
1720 init(Ty, Func, Args, Bundles, NameStr);
1721}
1722
1723CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1724 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1725 Instruction *InsertBefore)
1726 : CallBase(Ty->getReturnType(), Instruction::Call,
1727 OperandTraits<CallBase>::op_end(this) -
1728 (Args.size() + CountBundleInputs(Bundles) + 1),
1729 unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1730 InsertBefore) {
1731 init(Ty, Func, Args, Bundles, NameStr);
1732}
1733
1734//===----------------------------------------------------------------------===//
1735// SelectInst Class
1736//===----------------------------------------------------------------------===//
1737
1738/// This class represents the LLVM 'select' instruction.
1739///
1740class SelectInst : public Instruction {
1741 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1742 Instruction *InsertBefore)
1743 : Instruction(S1->getType(), Instruction::Select,
1744 &Op<0>(), 3, InsertBefore) {
1745 init(C, S1, S2);
1746 setName(NameStr);
1747 }
1748
1749 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1750 BasicBlock *InsertAtEnd)
1751 : Instruction(S1->getType(), Instruction::Select,
1752 &Op<0>(), 3, InsertAtEnd) {
1753 init(C, S1, S2);
1754 setName(NameStr);
1755 }
1756
1757 void init(Value *C, Value *S1, Value *S2) {
1758 assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select")((void)0);
1759 Op<0>() = C;
1760 Op<1>() = S1;
1761 Op<2>() = S2;
1762 }
1763
1764protected:
1765 // Note: Instruction needs to be a friend here to call cloneImpl.
1766 friend class Instruction;
1767
1768 SelectInst *cloneImpl() const;
1769
1770public:
1771 static SelectInst *Create(Value *C, Value *S1, Value *S2,
1772 const Twine &NameStr = "",
1773 Instruction *InsertBefore = nullptr,
1774 Instruction *MDFrom = nullptr) {
1775 SelectInst *Sel = new(3) SelectInst(C, S1, S2, NameStr, InsertBefore);
1776 if (MDFrom)
1777 Sel->copyMetadata(*MDFrom);
1778 return Sel;
1779 }
1780
1781 static SelectInst *Create(Value *C, Value *S1, Value *S2,
1782 const Twine &NameStr,
1783 BasicBlock *InsertAtEnd) {
1784 return new(3) SelectInst(C, S1, S2, NameStr, InsertAtEnd);
1785 }
1786
1787 const Value *getCondition() const { return Op<0>(); }
1788 const Value *getTrueValue() const { return Op<1>(); }
1789 const Value *getFalseValue() const { return Op<2>(); }
1790 Value *getCondition() { return Op<0>(); }
1791 Value *getTrueValue() { return Op<1>(); }
1792 Value *getFalseValue() { return Op<2>(); }
1793
1794 void setCondition(Value *V) { Op<0>() = V; }
1795 void setTrueValue(Value *V) { Op<1>() = V; }
1796 void setFalseValue(Value *V) { Op<2>() = V; }
1797
1798 /// Swap the true and false values of the select instruction.
1799 /// This doesn't swap prof metadata.
1800 void swapValues() { Op<1>().swap(Op<2>()); }
1801
1802 /// Return a string if the specified operands are invalid
1803 /// for a select operation, otherwise return null.
1804 static const char *areInvalidOperands(Value *Cond, Value *True, Value *False);
1805
1806 /// Transparently provide more efficient getOperand methods.
1807 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1808
1809 OtherOps getOpcode() const {
1810 return static_cast<OtherOps>(Instruction::getOpcode());
1811 }
1812
1813 // Methods for support type inquiry through isa, cast, and dyn_cast:
1814 static bool classof(const Instruction *I) {
1815 return I->getOpcode() == Instruction::Select;
1816 }
1817 static bool classof(const Value *V) {
1818 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1819 }
1820};
1821
1822template <>
1823struct OperandTraits<SelectInst> : public FixedNumOperandTraits<SelectInst, 3> {
1824};
1825
1826DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SelectInst, Value)SelectInst::op_iterator SelectInst::op_begin() { return OperandTraits
<SelectInst>::op_begin(this); } SelectInst::const_op_iterator
SelectInst::op_begin() const { return OperandTraits<SelectInst
>::op_begin(const_cast<SelectInst*>(this)); } SelectInst
::op_iterator SelectInst::op_end() { return OperandTraits<
SelectInst>::op_end(this); } SelectInst::const_op_iterator
SelectInst::op_end() const { return OperandTraits<SelectInst
>::op_end(const_cast<SelectInst*>(this)); } Value *SelectInst
::getOperand(unsigned i_nocapture) const { ((void)0); return cast_or_null
<Value>( OperandTraits<SelectInst>::op_begin(const_cast
<SelectInst*>(this))[i_nocapture].get()); } void SelectInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { ((
void)0); OperandTraits<SelectInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned SelectInst::getNumOperands() const
{ return OperandTraits<SelectInst>::operands(this); } template
<int Idx_nocapture> Use &SelectInst::Op() { return
this->OpFrom<Idx_nocapture>(this); } template <int
Idx_nocapture> const Use &SelectInst::Op() const { return
this->OpFrom<Idx_nocapture>(this); }
1827
1828//===----------------------------------------------------------------------===//
1829// VAArgInst Class
1830//===----------------------------------------------------------------------===//
1831
1832/// This class represents the va_arg llvm instruction, which returns
1833/// an argument of the specified type given a va_list and increments that list
1834///
1835class VAArgInst : public UnaryInstruction {
1836protected:
1837 // Note: Instruction needs to be a friend here to call cloneImpl.
1838 friend class Instruction;
1839
1840 VAArgInst *cloneImpl() const;
1841
1842public:
1843 VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "",
1844 Instruction *InsertBefore = nullptr)
1845 : UnaryInstruction(Ty, VAArg, List, InsertBefore) {
1846 setName(NameStr);
1847 }
1848
1849 VAArgInst(Value *List, Type *Ty, const Twine &NameStr,
1850 BasicBlock *InsertAtEnd)
1851 : UnaryInstruction(Ty, VAArg, List, InsertAtEnd) {
1852 setName(NameStr);
1853 }
1854
1855 Value *getPointerOperand() { return getOperand(0); }
1856 const Value *getPointerOperand() const { return getOperand(0); }
1857 static unsigned getPointerOperandIndex() { return 0U; }
1858
1859 // Methods for support type inquiry through isa, cast, and dyn_cast:
1860 static bool classof(const Instruction *I) {
1861 return I->getOpcode() == VAArg;
1862 }
1863 static bool classof(const Value *V) {
1864 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1865 }
1866};
1867
1868//===----------------------------------------------------------------------===//
1869// ExtractElementInst Class
1870//===----------------------------------------------------------------------===//
1871
1872/// This instruction extracts a single (scalar)
1873/// element from a VectorType value
1874///
1875class ExtractElementInst : public Instruction {
1876 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr = "",
1877 Instruction *InsertBefore = nullptr);
1878 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr,
1879 BasicBlock *InsertAtEnd);
1880
1881protected:
1882 // Note: Instruction needs to be a friend here to call cloneImpl.
1883 friend class Instruction;
1884
1885 ExtractElementInst *cloneImpl() const;
1886
1887public:
1888 static ExtractElementInst *Create(Value *Vec, Value *Idx,
1889 const Twine &NameStr = "",
1890 Instruction *InsertBefore = nullptr) {
1891 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertBefore);
1892 }
1893
1894 static ExtractElementInst *Create(Value *Vec, Value *Idx,
1895 const Twine &NameStr,
1896 BasicBlock *InsertAtEnd) {
1897 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertAtEnd);
1898 }
1899
1900 /// Return true if an extractelement instruction can be
1901 /// formed with the specified operands.
1902 static bool isValidOperands(const Value *Vec, const Value *Idx);
1903
1904 Value *getVectorOperand() { return Op<0>(); }
1905 Value *getIndexOperand() { return Op<1>(); }
1906 const Value *getVectorOperand() const { return Op<0>(); }
1907 const Value *getIndexOperand() const { return Op<1>(); }
1908
1909 VectorType *getVectorOperandType() const {
1910 return cast<VectorType>(getVectorOperand()->getType());
1911 }
1912
1913 /// Transparently provide more efficient getOperand methods.
1914 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1915
1916 // Methods for support type inquiry through isa, cast, and dyn_cast:
1917 static bool classof(const Instruction *I) {
1918 return I->getOpcode() == Instruction::ExtractElement;
1919 }
1920 static bool classof(const Value *V) {
1921 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1922 }
1923};
1924
1925template <>
1926struct OperandTraits<ExtractElementInst> :
1927 public FixedNumOperandTraits<ExtractElementInst, 2> {
1928};
1929
1930DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ExtractElementInst, Value)ExtractElementInst::op_iterator ExtractElementInst::op_begin(
) { return OperandTraits<ExtractElementInst>::op_begin(
this); } ExtractElementInst::const_op_iterator ExtractElementInst
::op_begin() const { return OperandTraits<ExtractElementInst
>::op_begin(const_cast<ExtractElementInst*>(this)); }
ExtractElementInst::op_iterator ExtractElementInst::op_end()
{ return OperandTraits<ExtractElementInst>::op_end(this
); } ExtractElementInst::const_op_iterator ExtractElementInst
::op_end() const { return OperandTraits<ExtractElementInst
>::op_end(const_cast<ExtractElementInst*>(this)); } Value
*ExtractElementInst::getOperand(unsigned i_nocapture) const {
((void)0); return cast_or_null<Value>( OperandTraits<
ExtractElementInst>::op_begin(const_cast<ExtractElementInst
*>(this))[i_nocapture].get()); } void ExtractElementInst::
setOperand(unsigned i_nocapture, Value *Val_nocapture) { ((void
)0); OperandTraits<ExtractElementInst>::op_begin(this)[
i_nocapture] = Val_nocapture; } unsigned ExtractElementInst::
getNumOperands() const { return OperandTraits<ExtractElementInst
>::operands(this); } template <int Idx_nocapture> Use
&ExtractElementInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
ExtractElementInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
1931
1932//===----------------------------------------------------------------------===//
1933// InsertElementInst Class
1934//===----------------------------------------------------------------------===//
1935
1936/// This instruction inserts a single (scalar)
1937/// element into a VectorType value
1938///
1939class InsertElementInst : public Instruction {
1940 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx,
1941 const Twine &NameStr = "",
1942 Instruction *InsertBefore = nullptr);
1943 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr,
1944 BasicBlock *InsertAtEnd);
1945
1946protected:
1947 // Note: Instruction needs to be a friend here to call cloneImpl.
1948 friend class Instruction;
1949
1950 InsertElementInst *cloneImpl() const;
1951
1952public:
1953 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
1954 const Twine &NameStr = "",
1955 Instruction *InsertBefore = nullptr) {
1956 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore);
1957 }
1958
1959 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
1960 const Twine &NameStr,
1961 BasicBlock *InsertAtEnd) {
1962 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertAtEnd);
1963 }
1964
1965 /// Return true if an insertelement instruction can be
1966 /// formed with the specified operands.
1967 static bool isValidOperands(const Value *Vec, const Value *NewElt,
1968 const Value *Idx);
1969
1970 /// Overload to return most specific vector type.
1971 ///
1972 VectorType *getType() const {
1973 return cast<VectorType>(Instruction::getType());
1974 }
1975
1976 /// Transparently provide more efficient getOperand methods.
1977 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1978
1979 // Methods for support type inquiry through isa, cast, and dyn_cast:
1980 static bool classof(const Instruction *I) {
1981 return I->getOpcode() == Instruction::InsertElement;
1982 }
1983 static bool classof(const Value *V) {
1984 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1985 }
1986};
1987
1988template <>
1989struct OperandTraits<InsertElementInst> :
1990 public FixedNumOperandTraits<InsertElementInst, 3> {
1991};
1992
1993DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertElementInst, Value)InsertElementInst::op_iterator InsertElementInst::op_begin() {
return OperandTraits<InsertElementInst>::op_begin(this
); } InsertElementInst::const_op_iterator InsertElementInst::
op_begin() const { return OperandTraits<InsertElementInst>
::op_begin(const_cast<InsertElementInst*>(this)); } InsertElementInst
::op_iterator InsertElementInst::op_end() { return OperandTraits
<InsertElementInst>::op_end(this); } InsertElementInst::
const_op_iterator InsertElementInst::op_end() const { return OperandTraits
<InsertElementInst>::op_end(const_cast<InsertElementInst
*>(this)); } Value *InsertElementInst::getOperand(unsigned
i_nocapture) const { ((void)0); return cast_or_null<Value
>( OperandTraits<InsertElementInst>::op_begin(const_cast
<InsertElementInst*>(this))[i_nocapture].get()); } void
InsertElementInst::setOperand(unsigned i_nocapture, Value *Val_nocapture
) { ((void)0); OperandTraits<InsertElementInst>::op_begin
(this)[i_nocapture] = Val_nocapture; } unsigned InsertElementInst
::getNumOperands() const { return OperandTraits<InsertElementInst
>::operands(this); } template <int Idx_nocapture> Use
&InsertElementInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
InsertElementInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
1994
1995//===----------------------------------------------------------------------===//
1996// ShuffleVectorInst Class
1997//===----------------------------------------------------------------------===//
1998
1999constexpr int UndefMaskElem = -1;
2000
2001/// This instruction constructs a fixed permutation of two
2002/// input vectors.
2003///
2004/// For each element of the result vector, the shuffle mask selects an element
2005/// from one of the input vectors to copy to the result. Non-negative elements
2006/// in the mask represent an index into the concatenated pair of input vectors.
2007/// UndefMaskElem (-1) specifies that the result element is undefined.
2008///
2009/// For scalable vectors, all the elements of the mask must be 0 or -1. This
2010/// requirement may be relaxed in the future.
2011class ShuffleVectorInst : public Instruction {
2012 SmallVector<int, 4> ShuffleMask;
2013 Constant *ShuffleMaskForBitcode;
2014
2015protected:
2016 // Note: Instruction needs to be a friend here to call cloneImpl.
2017 friend class Instruction;
2018
2019 ShuffleVectorInst *cloneImpl() const;
2020
2021public:
2022 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
2023 const Twine &NameStr = "",
2024 Instruction *InsertBefor = nullptr);
2025 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
2026 const Twine &NameStr, BasicBlock *InsertAtEnd);
2027 ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
2028 const Twine &NameStr = "",
2029 Instruction *InsertBefor = nullptr);
2030 ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
2031 const Twine &NameStr, BasicBlock *InsertAtEnd);
2032
2033 void *operator new(size_t S) { return User::operator new(S, 2); }
2034 void operator delete(void *Ptr) { return User::operator delete(Ptr); }
2035
2036 /// Swap the operands and adjust the mask to preserve the semantics
2037 /// of the instruction.
2038 void commute();
2039
2040 /// Return true if a shufflevector instruction can be
2041 /// formed with the specified operands.
2042 static bool isValidOperands(const Value *V1, const Value *V2,
2043 const Value *Mask);
2044 static bool isValidOperands(const Value *V1, const Value *V2,
2045 ArrayRef<int> Mask);
2046
2047 /// Overload to return most specific vector type.
2048 ///
2049 VectorType *getType() const {
2050 return cast<VectorType>(Instruction::getType());
2051 }
2052
2053 /// Transparently provide more efficient getOperand methods.
2054 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2055
2056 /// Return the shuffle mask value of this instruction for the given element
2057 /// index. Return UndefMaskElem if the element is undef.
2058 int getMaskValue(unsigned Elt) const { return ShuffleMask[Elt]; }
2059
2060 /// Convert the input shuffle mask operand to a vector of integers. Undefined
2061 /// elements of the mask are returned as UndefMaskElem.
2062 static void getShuffleMask(const Constant *Mask,
2063 SmallVectorImpl<int> &Result);
2064
2065 /// Return the mask for this instruction as a vector of integers. Undefined
2066 /// elements of the mask are returned as UndefMaskElem.
2067 void getShuffleMask(SmallVectorImpl<int> &Result) const {
2068 Result.assign(ShuffleMask.begin(), ShuffleMask.end());
2069 }
2070
2071 /// Return the mask for this instruction, for use in bitcode.
2072 ///
2073 /// TODO: This is temporary until we decide a new bitcode encoding for
2074 /// shufflevector.
2075 Constant *getShuffleMaskForBitcode() const { return ShuffleMaskForBitcode; }
2076
2077 static Constant *convertShuffleMaskForBitcode(ArrayRef<int> Mask,
2078 Type *ResultTy);
2079
2080 void setShuffleMask(ArrayRef<int> Mask);
2081
2082 ArrayRef<int> getShuffleMask() const { return ShuffleMask; }
2083
2084 /// Return true if this shuffle returns a vector with a different number of
2085 /// elements than its source vectors.
2086 /// Examples: shufflevector <4 x n> A, <4 x n> B, <1,2,3>
2087 /// shufflevector <4 x n> A, <4 x n> B, <1,2,3,4,5>
2088 bool changesLength() const {
2089 unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
2090 ->getElementCount()
2091 .getKnownMinValue();
2092 unsigned NumMaskElts = ShuffleMask.size();
2093 return NumSourceElts != NumMaskElts;
2094 }
2095
2096 /// Return true if this shuffle returns a vector with a greater number of
2097 /// elements than its source vectors.
2098 /// Example: shufflevector <2 x n> A, <2 x n> B, <1,2,3>
2099 bool increasesLength() const {
2100 unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
2101 ->getElementCount()
2102 .getKnownMinValue();
2103 unsigned NumMaskElts = ShuffleMask.size();
2104 return NumSourceElts < NumMaskElts;
2105 }
2106
2107 /// Return true if this shuffle mask chooses elements from exactly one source
2108 /// vector.
2109 /// Example: <7,5,undef,7>
2110 /// This assumes that vector operands are the same length as the mask.
2111 static bool isSingleSourceMask(ArrayRef<int> Mask);
2112 static bool isSingleSourceMask(const Constant *Mask) {
2113 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")((void)0);
2114 SmallVector<int, 16> MaskAsInts;
2115 getShuffleMask(Mask, MaskAsInts);
2116 return isSingleSourceMask(MaskAsInts);
2117 }
2118
2119 /// Return true if this shuffle chooses elements from exactly one source
2120 /// vector without changing the length of that vector.
2121 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,0,undef,3>
2122 /// TODO: Optionally allow length-changing shuffles.
2123 bool isSingleSource() const {
2124 return !changesLength() && isSingleSourceMask(ShuffleMask);
2125 }
2126
2127 /// Return true if this shuffle mask chooses elements from exactly one source
2128 /// vector without lane crossings. A shuffle using this mask is not
2129 /// necessarily a no-op because it may change the number of elements from its
2130 /// input vectors or it may provide demanded bits knowledge via undef lanes.
2131 /// Example: <undef,undef,2,3>
2132 static bool isIdentityMask(ArrayRef<int> Mask);
2133 static bool isIdentityMask(const Constant *Mask) {
2134 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")((void)0);
2135 SmallVector<int, 16> MaskAsInts;
2136 getShuffleMask(Mask, MaskAsInts);
2137 return isIdentityMask(MaskAsInts);
2138 }
2139
2140 /// Return true if this shuffle chooses elements from exactly one source
2141 /// vector without lane crossings and does not change the number of elements
2142 /// from its input vectors.
2143 /// Example: shufflevector <4 x n> A, <4 x n> B, <4,undef,6,undef>
2144 bool isIdentity() const {
2145 return !changesLength() && isIdentityMask(ShuffleMask);
2146 }
2147
2148 /// Return true if this shuffle lengthens exactly one source vector with
2149 /// undefs in the high elements.
2150 bool isIdentityWithPadding() const;
2151
2152 /// Return true if this shuffle extracts the first N elements of exactly one
2153 /// source vector.
2154 bool isIdentityWithExtract() const;
2155
2156 /// Return true if this shuffle concatenates its 2 source vectors. This
2157 /// returns false if either input is undefined. In that case, the shuffle is
2158 /// is better classified as an identity with padding operation.
2159 bool isConcat() const;
2160
2161 /// Return true if this shuffle mask chooses elements from its source vectors
2162 /// without lane crossings. A shuffle using this mask would be
2163 /// equivalent to a vector select with a constant condition operand.
2164 /// Example: <4,1,6,undef>
2165 /// This returns false if the mask does not choose from both input vectors.
2166 /// In that case, the shuffle is better classified as an identity shuffle.
2167 /// This assumes that vector operands are the same length as the mask
2168 /// (a length-changing shuffle can never be equivalent to a vector select).
2169 static bool isSelectMask(ArrayRef<int> Mask);
2170 static bool isSelectMask(const Constant *Mask) {
2171 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")((void)0);
2172 SmallVector<int, 16> MaskAsInts;
2173 getShuffleMask(Mask, MaskAsInts);
2174 return isSelectMask(MaskAsInts);
2175 }
2176
2177 /// Return true if this shuffle chooses elements from its source vectors
2178 /// without lane crossings and all operands have the same number of elements.
2179 /// In other words, this shuffle is equivalent to a vector select with a
2180 /// constant condition operand.
2181 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,1,6,3>
2182 /// This returns false if the mask does not choose from both input vectors.
2183 /// In that case, the shuffle is better classified as an identity shuffle.
2184 /// TODO: Optionally allow length-changing shuffles.
2185 bool isSelect() const {
2186 return !changesLength() && isSelectMask(ShuffleMask);
2187 }
2188
2189 /// Return true if this shuffle mask swaps the order of elements from exactly
2190 /// one source vector.
2191 /// Example: <7,6,undef,4>
2192 /// This assumes that vector operands are the same length as the mask.
2193 static bool isReverseMask(ArrayRef<int> Mask);
2194 static bool isReverseMask(const Constant *Mask) {
2195 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")((void)0);
2196 SmallVector<int, 16> MaskAsInts;
2197 getShuffleMask(Mask, MaskAsInts);
2198 return isReverseMask(MaskAsInts);
2199 }
2200
2201 /// Return true if this shuffle swaps the order of elements from exactly
2202 /// one source vector.
2203 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,undef,1,undef>
2204 /// TODO: Optionally allow length-changing shuffles.
2205 bool isReverse() const {
2206 return !changesLength() && isReverseMask(ShuffleMask);
2207 }
2208
2209 /// Return true if this shuffle mask chooses all elements with the same value
2210 /// as the first element of exactly one source vector.
2211 /// Example: <4,undef,undef,4>
2212 /// This assumes that vector operands are the same length as the mask.
2213 static bool isZeroEltSplatMask(ArrayRef<int> Mask);
2214 static bool isZeroEltSplatMask(const Constant *Mask) {
2215 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")((void)0);
2216 SmallVector<int, 16> MaskAsInts;
2217 getShuffleMask(Mask, MaskAsInts);
2218 return isZeroEltSplatMask(MaskAsInts);
2219 }
2220
2221 /// Return true if all elements of this shuffle are the same value as the
2222 /// first element of exactly one source vector without changing the length
2223 /// of that vector.
2224 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,0,undef,0>
2225 /// TODO: Optionally allow length-changing shuffles.
2226 /// TODO: Optionally allow splats from other elements.
2227 bool isZeroEltSplat() const {
2228 return !changesLength() && isZeroEltSplatMask(ShuffleMask);
2229 }
2230
2231 /// Return true if this shuffle mask is a transpose mask.
2232 /// Transpose vector masks transpose a 2xn matrix. They read corresponding
2233 /// even- or odd-numbered vector elements from two n-dimensional source
2234 /// vectors and write each result into consecutive elements of an
2235 /// n-dimensional destination vector. Two shuffles are necessary to complete
2236 /// the transpose, one for the even elements and another for the odd elements.
2237 /// This description closely follows how the TRN1 and TRN2 AArch64
2238 /// instructions operate.
2239 ///
2240 /// For example, a simple 2x2 matrix can be transposed with:
2241 ///
2242 /// ; Original matrix
2243 /// m0 = < a, b >
2244 /// m1 = < c, d >
2245 ///
2246 /// ; Transposed matrix
2247 /// t0 = < a, c > = shufflevector m0, m1, < 0, 2 >
2248 /// t1 = < b, d > = shufflevector m0, m1, < 1, 3 >
2249 ///
2250 /// For matrices having greater than n columns, the resulting nx2 transposed
2251 /// matrix is stored in two result vectors such that one vector contains
2252 /// interleaved elements from all the even-numbered rows and the other vector
2253 /// contains interleaved elements from all the odd-numbered rows. For example,
2254 /// a 2x4 matrix can be transposed with:
2255 ///
2256 /// ; Original matrix
2257 /// m0 = < a, b, c, d >
2258 /// m1 = < e, f, g, h >
2259 ///
2260 /// ; Transposed matrix
2261 /// t0 = < a, e, c, g > = shufflevector m0, m1 < 0, 4, 2, 6 >
2262 /// t1 = < b, f, d, h > = shufflevector m0, m1 < 1, 5, 3, 7 >
2263 static bool isTransposeMask(ArrayRef<int> Mask);
2264 static bool isTransposeMask(const Constant *Mask) {
2265 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")((void)0);
2266 SmallVector<int, 16> MaskAsInts;
2267 getShuffleMask(Mask, MaskAsInts);
2268 return isTransposeMask(MaskAsInts);
2269 }
2270
2271 /// Return true if this shuffle transposes the elements of its inputs without
2272 /// changing the length of the vectors. This operation may also be known as a
2273 /// merge or interleave. See the description for isTransposeMask() for the
2274 /// exact specification.
2275 /// Example: shufflevector <4 x n> A, <4 x n> B, <0,4,2,6>
2276 bool isTranspose() const {
2277 return !changesLength() && isTransposeMask(ShuffleMask);
2278 }
2279
2280 /// Return true if this shuffle mask is an extract subvector mask.
2281 /// A valid extract subvector mask returns a smaller vector from a single
2282 /// source operand. The base extraction index is returned as well.
2283 static bool isExtractSubvectorMask(ArrayRef<int> Mask, int NumSrcElts,
2284 int &Index);
2285 static bool isExtractSubvectorMask(const Constant *Mask, int NumSrcElts,
2286 int &Index) {
2287 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")((void)0);
2288 // Not possible to express a shuffle mask for a scalable vector for this
2289 // case.
2290 if (isa<ScalableVectorType>(Mask->getType()))
2291 return false;
2292 SmallVector<int, 16> MaskAsInts;
2293 getShuffleMask(Mask, MaskAsInts);
2294 return isExtractSubvectorMask(MaskAsInts, NumSrcElts, Index);
2295 }
2296
2297 /// Return true if this shuffle mask is an extract subvector mask.
2298 bool isExtractSubvectorMask(int &Index) const {
2299 // Not possible to express a shuffle mask for a scalable vector for this
2300 // case.
2301 if (isa<ScalableVectorType>(getType()))
2302 return false;
2303
2304 int NumSrcElts =
2305 cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2306 return isExtractSubvectorMask(ShuffleMask, NumSrcElts, Index);
2307 }
2308
2309 /// Change values in a shuffle permute mask assuming the two vector operands
2310 /// of length InVecNumElts have swapped position.
2311 static void commuteShuffleMask(MutableArrayRef<int> Mask,
2312 unsigned InVecNumElts) {
2313 for (int &Idx : Mask) {
2314 if (Idx == -1)
2315 continue;
2316 Idx = Idx < (int)InVecNumElts ? Idx + InVecNumElts : Idx - InVecNumElts;
2317 assert(Idx >= 0 && Idx < (int)InVecNumElts * 2 &&((void)0)
2318 "shufflevector mask index out of range")((void)0);
2319 }
2320 }
2321
2322 // Methods for support type inquiry through isa, cast, and dyn_cast:
2323 static bool classof(const Instruction *I) {
2324 return I->getOpcode() == Instruction::ShuffleVector;
2325 }
2326 static bool classof(const Value *V) {
2327 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2328 }
2329};
2330
2331template <>
2332struct OperandTraits<ShuffleVectorInst>
2333 : public FixedNumOperandTraits<ShuffleVectorInst, 2> {};
2334
2335DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ShuffleVectorInst, Value)ShuffleVectorInst::op_iterator ShuffleVectorInst::op_begin() {
return OperandTraits<ShuffleVectorInst>::op_begin(this
); } ShuffleVectorInst::const_op_iterator ShuffleVectorInst::
op_begin() const { return OperandTraits<ShuffleVectorInst>
::op_begin(const_cast<ShuffleVectorInst*>(this)); } ShuffleVectorInst
::op_iterator ShuffleVectorInst::op_end() { return OperandTraits
<ShuffleVectorInst>::op_end(this); } ShuffleVectorInst::
const_op_iterator ShuffleVectorInst::op_end() const { return OperandTraits
<ShuffleVectorInst>::op_end(const_cast<ShuffleVectorInst
*>(this)); } Value *ShuffleVectorInst::getOperand(unsigned
i_nocapture) const { ((void)0); return cast_or_null<Value
>( OperandTraits<ShuffleVectorInst>::op_begin(const_cast
<ShuffleVectorInst*>(this))[i_nocapture].get()); } void
ShuffleVectorInst::setOperand(unsigned i_nocapture, Value *Val_nocapture
) { ((void)0); OperandTraits<ShuffleVectorInst>::op_begin
(this)[i_nocapture] = Val_nocapture; } unsigned ShuffleVectorInst
::getNumOperands() const { return OperandTraits<ShuffleVectorInst
>::operands(this); } template <int Idx_nocapture> Use
&ShuffleVectorInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
ShuffleVectorInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
2336
2337//===----------------------------------------------------------------------===//
2338// ExtractValueInst Class
2339//===----------------------------------------------------------------------===//
2340
2341/// This instruction extracts a struct member or array
2342/// element value from an aggregate value.
2343///
2344class ExtractValueInst : public UnaryInstruction {
2345 SmallVector<unsigned, 4> Indices;
2346
2347 ExtractValueInst(const ExtractValueInst &EVI);
2348
2349 /// Constructors - Create a extractvalue instruction with a base aggregate
2350 /// value and a list of indices. The first ctor can optionally insert before
2351 /// an existing instruction, the second appends the new instruction to the
2352 /// specified BasicBlock.
2353 inline ExtractValueInst(Value *Agg,
2354 ArrayRef<unsigned> Idxs,
2355 const Twine &NameStr,
2356 Instruction *InsertBefore);
2357 inline ExtractValueInst(Value *Agg,
2358 ArrayRef<unsigned> Idxs,
2359 const Twine &NameStr, BasicBlock *InsertAtEnd);
2360
2361 void init(ArrayRef<unsigned> Idxs, const Twine &NameStr);
2362
2363protected:
2364 // Note: Instruction needs to be a friend here to call cloneImpl.
2365 friend class Instruction;
2366
2367 ExtractValueInst *cloneImpl() const;
2368
2369public:
2370 static ExtractValueInst *Create(Value *Agg,
2371 ArrayRef<unsigned> Idxs,
2372 const Twine &NameStr = "",
2373 Instruction *InsertBefore = nullptr) {
2374 return new
2375 ExtractValueInst(Agg, Idxs, NameStr, InsertBefore);
2376 }
2377
2378 static ExtractValueInst *Create(Value *Agg,
2379 ArrayRef<unsigned> Idxs,
2380 const Twine &NameStr,
2381 BasicBlock *InsertAtEnd) {
2382 return new ExtractValueInst(Agg, Idxs, NameStr, InsertAtEnd);
2383 }
2384
2385 /// Returns the type of the element that would be extracted
2386 /// with an extractvalue instruction with the specified parameters.
2387 ///
2388 /// Null is returned if the indices are invalid for the specified type.
2389 static Type *getIndexedType(Type *Agg, ArrayRef<unsigned> Idxs);
2390
2391 using idx_iterator = const unsigned*;
2392
2393 inline idx_iterator idx_begin() const { return Indices.begin(); }
2394 inline idx_iterator idx_end() const { return Indices.end(); }
2395 inline iterator_range<idx_iterator> indices() const {
2396 return make_range(idx_begin(), idx_end());
2397 }
2398
2399 Value *getAggregateOperand() {
2400 return getOperand(0);
2401 }
2402 const Value *getAggregateOperand() const {
2403 return getOperand(0);
2404 }
2405 static unsigned getAggregateOperandIndex() {
2406 return 0U; // get index for modifying correct operand
2407 }
2408
2409 ArrayRef<unsigned> getIndices() const {
2410 return Indices;
2411 }
2412
2413 unsigned getNumIndices() const {
2414 return (unsigned)Indices.size();
2415 }
2416
2417 bool hasIndices() const {
2418 return true;
2419 }
2420
2421 // Methods for support type inquiry through isa, cast, and dyn_cast:
2422 static bool classof(const Instruction *I) {
2423 return I->getOpcode() == Instruction::ExtractValue;
2424 }
2425 static bool classof(const Value *V) {
2426 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2427 }
2428};
2429
2430ExtractValueInst::ExtractValueInst(Value *Agg,
2431 ArrayRef<unsigned> Idxs,
2432 const Twine &NameStr,
2433 Instruction *InsertBefore)
2434 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2435 ExtractValue, Agg, InsertBefore) {
2436 init(Idxs, NameStr);
2437}
2438
2439ExtractValueInst::ExtractValueInst(Value *Agg,
2440 ArrayRef<unsigned> Idxs,
2441 const Twine &NameStr,
2442 BasicBlock *InsertAtEnd)
2443 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2444 ExtractValue, Agg, InsertAtEnd) {
2445 init(Idxs, NameStr);
2446}
2447
2448//===----------------------------------------------------------------------===//
2449// InsertValueInst Class
2450//===----------------------------------------------------------------------===//
2451
2452/// This instruction inserts a struct field of array element
2453/// value into an aggregate value.
2454///
2455class InsertValueInst : public Instruction {
2456 SmallVector<unsigned, 4> Indices;
2457
2458 InsertValueInst(const InsertValueInst &IVI);
2459
2460 /// Constructors - Create a insertvalue instruction with a base aggregate
2461 /// value, a value to insert, and a list of indices. The first ctor can
2462 /// optionally insert before an existing instruction, the second appends
2463 /// the new instruction to the specified BasicBlock.
2464 inline InsertValueInst(Value *Agg, Value *Val,
2465 ArrayRef<unsigned> Idxs,
2466 const Twine &NameStr,
2467 Instruction *InsertBefore);
2468 inline InsertValueInst(Value *Agg, Value *Val,
2469 ArrayRef<unsigned> Idxs,
2470 const Twine &NameStr, BasicBlock *InsertAtEnd);
2471
2472 /// Constructors - These two constructors are convenience methods because one
2473 /// and two index insertvalue instructions are so common.
2474 InsertValueInst(Value *Agg, Value *Val, unsigned Idx,
2475 const Twine &NameStr = "",
2476 Instruction *InsertBefore = nullptr);
2477 InsertValueInst(Value *Agg, Value *Val, unsigned Idx, const Twine &NameStr,
2478 BasicBlock *InsertAtEnd);
2479
2480 void init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2481 const Twine &NameStr);
2482
2483protected:
2484 // Note: Instruction needs to be a friend here to call cloneImpl.
2485 friend class Instruction;
2486
2487 InsertValueInst *cloneImpl() const;
2488
2489public:
2490 // allocate space for exactly two operands
2491 void *operator new(size_t S) { return User::operator new(S, 2); }
2492 void operator delete(void *Ptr) { User::operator delete(Ptr); }
2493
2494 static InsertValueInst *Create(Value *Agg, Value *Val,
2495 ArrayRef<unsigned> Idxs,
2496 const Twine &NameStr = "",
2497 Instruction *InsertBefore = nullptr) {
2498 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertBefore);
2499 }
2500
2501 static InsertValueInst *Create(Value *Agg, Value *Val,
2502 ArrayRef<unsigned> Idxs,
2503 const Twine &NameStr,
2504 BasicBlock *InsertAtEnd) {
2505 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertAtEnd);
2506 }
2507
2508 /// Transparently provide more efficient getOperand methods.
2509 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2510
2511 using idx_iterator = const unsigned*;
2512
2513 inline idx_iterator idx_begin() const { return Indices.begin(); }
2514 inline idx_iterator idx_end() const { return Indices.end(); }
2515 inline iterator_range<idx_iterator> indices() const {
2516 return make_range(idx_begin(), idx_end());
2517 }
2518
2519 Value *getAggregateOperand() {
2520 return getOperand(0);
2521 }
2522 const Value *getAggregateOperand() const {
2523 return getOperand(0);
2524 }
2525 static unsigned getAggregateOperandIndex() {
2526 return 0U; // get index for modifying correct operand
2527 }
2528
2529 Value *getInsertedValueOperand() {
2530 return getOperand(1);
2531 }
2532 const Value *getInsertedValueOperand() const {
2533 return getOperand(1);
2534 }
2535 static unsigned getInsertedValueOperandIndex() {
2536 return 1U; // get index for modifying correct operand
2537 }
2538
2539 ArrayRef<unsigned> getIndices() const {
2540 return Indices;
2541 }
2542
2543 unsigned getNumIndices() const {
2544 return (unsigned)Indices.size();
2545 }
2546
2547 bool hasIndices() const {
2548 return true;
2549 }
2550
2551 // Methods for support type inquiry through isa, cast, and dyn_cast:
2552 static bool classof(const Instruction *I) {
2553 return I->getOpcode() == Instruction::InsertValue;
2554 }
2555 static bool classof(const Value *V) {
2556 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2557 }
2558};
2559
2560template <>
2561struct OperandTraits<InsertValueInst> :
2562 public FixedNumOperandTraits<InsertValueInst, 2> {
2563};
2564
2565InsertValueInst::InsertValueInst(Value *Agg,
2566 Value *Val,
2567 ArrayRef<unsigned> Idxs,
2568 const Twine &NameStr,
2569 Instruction *InsertBefore)
2570 : Instruction(Agg->getType(), InsertValue,
2571 OperandTraits<InsertValueInst>::op_begin(this),
2572 2, InsertBefore) {
2573 init(Agg, Val, Idxs, NameStr);
2574}
2575
2576InsertValueInst::InsertValueInst(Value *Agg,
2577 Value *Val,
2578 ArrayRef<unsigned> Idxs,
2579 const Twine &NameStr,
2580 BasicBlock *InsertAtEnd)
2581 : Instruction(Agg->getType(), InsertValue,
2582 OperandTraits<InsertValueInst>::op_begin(this),
2583 2, InsertAtEnd) {
2584 init(Agg, Val, Idxs, NameStr);
2585}
2586
2587DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertValueInst, Value)InsertValueInst::op_iterator InsertValueInst::op_begin() { return
OperandTraits<InsertValueInst>::op_begin(this); } InsertValueInst
::const_op_iterator InsertValueInst::op_begin() const { return
OperandTraits<InsertValueInst>::op_begin(const_cast<
InsertValueInst*>(this)); } InsertValueInst::op_iterator InsertValueInst
::op_end() { return OperandTraits<InsertValueInst>::op_end
(this); } InsertValueInst::const_op_iterator InsertValueInst::
op_end() const { return OperandTraits<InsertValueInst>::
op_end(const_cast<InsertValueInst*>(this)); } Value *InsertValueInst
::getOperand(unsigned i_nocapture) const { ((void)0); return cast_or_null
<Value>( OperandTraits<InsertValueInst>::op_begin
(const_cast<InsertValueInst*>(this))[i_nocapture].get()
); } void InsertValueInst::setOperand(unsigned i_nocapture, Value
*Val_nocapture) { ((void)0); OperandTraits<InsertValueInst
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
InsertValueInst::getNumOperands() const { return OperandTraits
<InsertValueInst>::operands(this); } template <int Idx_nocapture
> Use &InsertValueInst::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &InsertValueInst::Op() const { return this->
OpFrom<Idx_nocapture>(this); }
2588
2589//===----------------------------------------------------------------------===//
2590// PHINode Class
2591//===----------------------------------------------------------------------===//
2592
2593// PHINode - The PHINode class is used to represent the magical mystical PHI
2594// node, that can not exist in nature, but can be synthesized in a computer
2595// scientist's overactive imagination.
2596//
2597class PHINode : public Instruction {
2598 /// The number of operands actually allocated. NumOperands is
2599 /// the number actually in use.
2600 unsigned ReservedSpace;
2601
2602 PHINode(const PHINode &PN);
2603
2604 explicit PHINode(Type *Ty, unsigned NumReservedValues,
2605 const Twine &NameStr = "",
2606 Instruction *InsertBefore = nullptr)
2607 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertBefore),
2608 ReservedSpace(NumReservedValues) {
2609 assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!")((void)0);
2610 setName(NameStr);
2611 allocHungoffUses(ReservedSpace);
2612 }
2613
2614 PHINode(Type *Ty, unsigned NumReservedValues, const Twine &NameStr,
2615 BasicBlock *InsertAtEnd)
2616 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertAtEnd),
2617 ReservedSpace(NumReservedValues) {
2618 assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!")((void)0);
2619 setName(NameStr);
2620 allocHungoffUses(ReservedSpace);
2621 }
2622
2623protected:
2624 // Note: Instruction needs to be a friend here to call cloneImpl.
2625 friend class Instruction;
2626
2627 PHINode *cloneImpl() const;
2628
2629 // allocHungoffUses - this is more complicated than the generic
2630 // User::allocHungoffUses, because we have to allocate Uses for the incoming
2631 // values and pointers to the incoming blocks, all in one allocation.
2632 void allocHungoffUses(unsigned N) {
2633 User::allocHungoffUses(N, /* IsPhi */ true);
2634 }
2635
2636public:
2637 /// Constructors - NumReservedValues is a hint for the number of incoming
2638 /// edges that this phi node will have (use 0 if you really have no idea).
2639 static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2640 const Twine &NameStr = "",
2641 Instruction *InsertBefore = nullptr) {
2642 return new PHINode(Ty, NumReservedValues, NameStr, InsertBefore);
2643 }
2644
2645 static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2646 const Twine &NameStr, BasicBlock *InsertAtEnd) {
2647 return new PHINode(Ty, NumReservedValues, NameStr, InsertAtEnd);
2648 }
2649
2650 /// Provide fast operand accessors
2651 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2652
2653 // Block iterator interface. This provides access to the list of incoming
2654 // basic blocks, which parallels the list of incoming values.
2655
2656 using block_iterator = BasicBlock **;
2657 using const_block_iterator = BasicBlock * const *;
2658
2659 block_iterator block_begin() {
2660 return reinterpret_cast<block_iterator>(op_begin() + ReservedSpace);
2661 }
2662
2663 const_block_iterator block_begin() const {
2664 return reinterpret_cast<const_block_iterator>(op_begin() + ReservedSpace);
2665 }
2666
2667 block_iterator block_end() {
2668 return block_begin() + getNumOperands();
2669 }
2670
2671 const_block_iterator block_end() const {
2672 return block_begin() + getNumOperands();
2673 }
2674
2675 iterator_range<block_iterator> blocks() {
2676 return make_range(block_begin(), block_end());
2677 }
2678
2679 iterator_range<const_block_iterator> blocks() const {
2680 return make_range(block_begin(), block_end());
2681 }
2682
2683 op_range incoming_values() { return operands(); }
2684
2685 const_op_range incoming_values() const { return operands(); }
2686
2687 /// Return the number of incoming edges
2688 ///
2689 unsigned getNumIncomingValues() const { return getNumOperands(); }
2690
2691 /// Return incoming value number x
2692 ///
2693 Value *getIncomingValue(unsigned i) const {
2694 return getOperand(i);
2695 }
2696 void setIncomingValue(unsigned i, Value *V) {
2697 assert(V && "PHI node got a null value!")((void)0);
2698 assert(getType() == V->getType() &&((void)0)
2699 "All operands to PHI node must be the same type as the PHI node!")((void)0);
2700 setOperand(i, V);
2701 }
2702
2703 static unsigned getOperandNumForIncomingValue(unsigned i) {
2704 return i;
2705 }
2706
2707 static unsigned getIncomingValueNumForOperand(unsigned i) {
2708 return i;
2709 }
2710
2711 /// Return incoming basic block number @p i.
2712 ///
2713 BasicBlock *getIncomingBlock(unsigned i) const {
2714 return block_begin()[i];
2715 }
2716
2717 /// Return incoming basic block corresponding
2718 /// to an operand of the PHI.
2719 ///
2720 BasicBlock *getIncomingBlock(const Use &U) const {
2721 assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?")((void)0);
2722 return getIncomingBlock(unsigned(&U - op_begin()));
2723 }
2724
2725 /// Return incoming basic block corresponding
2726 /// to value use iterator.
2727 ///
2728 BasicBlock *getIncomingBlock(Value::const_user_iterator I) const {
2729 return getIncomingBlock(I.getUse());
2730 }
2731
2732 void setIncomingBlock(unsigned i, BasicBlock *BB) {
2733 assert(BB && "PHI node got a null basic block!")((void)0);
2734 block_begin()[i] = BB;
2735 }
2736
2737 /// Replace every incoming basic block \p Old to basic block \p New.
2738 void replaceIncomingBlockWith(const BasicBlock *Old, BasicBlock *New) {
2739 assert(New && Old && "PHI node got a null basic block!")((void)0);
2740 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
2741 if (getIncomingBlock(Op) == Old)
2742 setIncomingBlock(Op, New);
2743 }
2744
2745 /// Add an incoming value to the end of the PHI list
2746 ///
2747 void addIncoming(Value *V, BasicBlock *BB) {
2748 if (getNumOperands() == ReservedSpace)
2749 growOperands(); // Get more space!
2750 // Initialize some new operands.
2751 setNumHungOffUseOperands(getNumOperands() + 1);
2752 setIncomingValue(getNumOperands() - 1, V);
2753 setIncomingBlock(getNumOperands() - 1, BB);
2754 }
2755
2756 /// Remove an incoming value. This is useful if a
2757 /// predecessor basic block is deleted. The value removed is returned.
2758 ///
2759 /// If the last incoming value for a PHI node is removed (and DeletePHIIfEmpty
2760 /// is true), the PHI node is destroyed and any uses of it are replaced with
2761 /// dummy values. The only time there should be zero incoming values to a PHI
2762 /// node is when the block is dead, so this strategy is sound.
2763 ///
2764 Value *removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty = true);
2765
2766 Value *removeIncomingValue(const BasicBlock *BB, bool DeletePHIIfEmpty=true) {
2767 int Idx = getBasicBlockIndex(BB);
2768 assert(Idx >= 0 && "Invalid basic block argument to remove!")((void)0);
2769 return removeIncomingValue(Idx, DeletePHIIfEmpty);
2770 }
2771
2772 /// Return the first index of the specified basic
2773 /// block in the value list for this PHI. Returns -1 if no instance.
2774 ///
2775 int getBasicBlockIndex(const BasicBlock *BB) const {
2776 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
2777 if (block_begin()[i] == BB)
2778 return i;
2779 return -1;
2780 }
2781
2782 Value *getIncomingValueForBlock(const BasicBlock *BB) const {
2783 int Idx = getBasicBlockIndex(BB);
2784 assert(Idx >= 0 && "Invalid basic block argument!")((void)0);
2785 return getIncomingValue(Idx);
2786 }
2787
2788 /// Set every incoming value(s) for block \p BB to \p V.
2789 void setIncomingValueForBlock(const BasicBlock *BB, Value *V) {
2790 assert(BB && "PHI node got a null basic block!")((void)0);
2791 bool Found = false;
2792 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
2793 if (getIncomingBlock(Op) == BB) {
2794 Found = true;
2795 setIncomingValue(Op, V);
2796 }
2797 (void)Found;
2798 assert(Found && "Invalid basic block argument to set!")((void)0);
2799 }
2800
2801 /// If the specified PHI node always merges together the
2802 /// same value, return the value, otherwise return null.
2803 Value *hasConstantValue() const;
2804
2805 /// Whether the specified PHI node always merges
2806 /// together the same value, assuming undefs are equal to a unique
2807 /// non-undef value.
2808 bool hasConstantOrUndefValue() const;
2809
2810 /// If the PHI node is complete which means all of its parent's predecessors
2811 /// have incoming value in this PHI, return true, otherwise return false.
2812 bool isComplete() const {
2813 return llvm::all_of(predecessors(getParent()),
2814 [this](const BasicBlock *Pred) {
2815 return getBasicBlockIndex(Pred) >= 0;
2816 });
2817 }
2818
2819 /// Methods for support type inquiry through isa, cast, and dyn_cast:
2820 static bool classof(const Instruction *I) {
2821 return I->getOpcode() == Instruction::PHI;
2822 }
2823 static bool classof(const Value *V) {
2824 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2825 }
2826
2827private:
2828 void growOperands();
2829};
2830
2831template <>
2832struct OperandTraits<PHINode> : public HungoffOperandTraits<2> {
2833};
2834
2835DEFINE_TRANSPARENT_OPERAND_ACCESSORS(PHINode, Value)PHINode::op_iterator PHINode::op_begin() { return OperandTraits
<PHINode>::op_begin(this); } PHINode::const_op_iterator
PHINode::op_begin() const { return OperandTraits<PHINode>
::op_begin(const_cast<PHINode*>(this)); } PHINode::op_iterator
PHINode::op_end() { return OperandTraits<PHINode>::op_end
(this); } PHINode::const_op_iterator PHINode::op_end() const {
return OperandTraits<PHINode>::op_end(const_cast<PHINode
*>(this)); } Value *PHINode::getOperand(unsigned i_nocapture
) const { ((void)0); return cast_or_null<Value>( OperandTraits
<PHINode>::op_begin(const_cast<PHINode*>(this))[i_nocapture
].get()); } void PHINode::setOperand(unsigned i_nocapture, Value
*Val_nocapture) { ((void)0); OperandTraits<PHINode>::op_begin
(this)[i_nocapture] = Val_nocapture; } unsigned PHINode::getNumOperands
() const { return OperandTraits<PHINode>::operands(this
); } template <int Idx_nocapture> Use &PHINode::Op(
) { return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &PHINode::Op() const
{ return this->OpFrom<Idx_nocapture>(this); }
2836
2837//===----------------------------------------------------------------------===//
2838// LandingPadInst Class
2839//===----------------------------------------------------------------------===//
2840
2841//===---------------------------------------------------------------------------
2842/// The landingpad instruction holds all of the information
2843/// necessary to generate correct exception handling. The landingpad instruction
2844/// cannot be moved from the top of a landing pad block, which itself is
2845/// accessible only from the 'unwind' edge of an invoke. This uses the
2846/// SubclassData field in Value to store whether or not the landingpad is a
2847/// cleanup.
2848///
2849class LandingPadInst : public Instruction {
2850 using CleanupField = BoolBitfieldElementT<0>;
2851
2852 /// The number of operands actually allocated. NumOperands is
2853 /// the number actually in use.
2854 unsigned ReservedSpace;
2855
2856 LandingPadInst(const LandingPadInst &LP);
2857
2858public:
2859 enum ClauseType { Catch, Filter };
2860
2861private:
2862 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
2863 const Twine &NameStr, Instruction *InsertBefore);
2864 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
2865 const Twine &NameStr, BasicBlock *InsertAtEnd);
2866
2867 // Allocate space for exactly zero operands.
2868 void *operator new(size_t S) { return User::operator new(S); }
2869
2870 void growOperands(unsigned Size);
2871 void init(unsigned NumReservedValues, const Twine &NameStr);
2872
2873protected:
2874 // Note: Instruction needs to be a friend here to call cloneImpl.
2875 friend class Instruction;
2876
2877 LandingPadInst *cloneImpl() const;
2878
2879public:
2880 void operator delete(void *Ptr) { User::operator delete(Ptr); }
2881
2882 /// Constructors - NumReservedClauses is a hint for the number of incoming
2883 /// clauses that this landingpad will have (use 0 if you really have no idea).
2884 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
2885 const Twine &NameStr = "",
2886 Instruction *InsertBefore = nullptr);
2887 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
2888 const Twine &NameStr, BasicBlock *InsertAtEnd);
2889
2890 /// Provide fast operand accessors
2891 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2892
2893 /// Return 'true' if this landingpad instruction is a
2894 /// cleanup. I.e., it should be run when unwinding even if its landing pad
2895 /// doesn't catch the exception.
2896 bool isCleanup() const { return getSubclassData<CleanupField>(); }
2897
2898 /// Indicate that this landingpad instruction is a cleanup.
2899 void setCleanup(bool V) { setSubclassData<CleanupField>(V); }
2900
2901 /// Add a catch or filter clause to the landing pad.
2902 void addClause(Constant *ClauseVal);
2903
2904 /// Get the value of the clause at index Idx. Use isCatch/isFilter to
2905 /// determine what type of clause this is.
2906 Constant *getClause(unsigned Idx) const {
2907 return cast<Constant>(getOperandList()[Idx]);
2908 }
2909
2910 /// Return 'true' if the clause and index Idx is a catch clause.
2911 bool isCatch(unsigned Idx) const {
2912 return !isa<ArrayType>(getOperandList()[Idx]->getType());
2913 }
2914
2915 /// Return 'true' if the clause and index Idx is a filter clause.
2916 bool isFilter(unsigned Idx) const {
2917 return isa<ArrayType>(getOperandList()[Idx]->getType());
2918 }
2919
2920 /// Get the number of clauses for this landing pad.
2921 unsigned getNumClauses() const { return getNumOperands(); }
2922
2923 /// Grow the size of the operand list to accommodate the new
2924 /// number of clauses.
2925 void reserveClauses(unsigned Size) { growOperands(Size); }
2926
2927 // Methods for support type inquiry through isa, cast, and dyn_cast:
2928 static bool classof(const Instruction *I) {
2929 return I->getOpcode() == Instruction::LandingPad;
2930 }
2931 static bool classof(const Value *V) {
2932 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2933 }
2934};
2935
2936template <>
2937struct OperandTraits<LandingPadInst> : public HungoffOperandTraits<1> {
2938};
2939
2940DEFINE_TRANSPARENT_OPERAND_ACCESSORS(LandingPadInst, Value)LandingPadInst::op_iterator LandingPadInst::op_begin() { return
OperandTraits<LandingPadInst>::op_begin(this); } LandingPadInst
::const_op_iterator LandingPadInst::op_begin() const { return
OperandTraits<LandingPadInst>::op_begin(const_cast<
LandingPadInst*>(this)); } LandingPadInst::op_iterator LandingPadInst
::op_end() { return OperandTraits<LandingPadInst>::op_end
(this); } LandingPadInst::const_op_iterator LandingPadInst::op_end
() const { return OperandTraits<LandingPadInst>::op_end
(const_cast<LandingPadInst*>(this)); } Value *LandingPadInst
::getOperand(unsigned i_nocapture) const { ((void)0); return cast_or_null
<Value>( OperandTraits<LandingPadInst>::op_begin(
const_cast<LandingPadInst*>(this))[i_nocapture].get());
} void LandingPadInst::setOperand(unsigned i_nocapture, Value
*Val_nocapture) { ((void)0); OperandTraits<LandingPadInst
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
LandingPadInst::getNumOperands() const { return OperandTraits
<LandingPadInst>::operands(this); } template <int Idx_nocapture
> Use &LandingPadInst::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &LandingPadInst::Op() const { return this->
OpFrom<Idx_nocapture>(this); }
2941
2942//===----------------------------------------------------------------------===//
2943// ReturnInst Class
2944//===----------------------------------------------------------------------===//
2945
2946//===---------------------------------------------------------------------------
2947/// Return a value (possibly void), from a function. Execution
2948/// does not continue in this function any longer.
2949///
2950class ReturnInst : public Instruction {
2951 ReturnInst(const ReturnInst &RI);
2952
2953private:
2954 // ReturnInst constructors:
2955 // ReturnInst() - 'ret void' instruction
2956 // ReturnInst( null) - 'ret void' instruction
2957 // ReturnInst(Value* X) - 'ret X' instruction
2958 // ReturnInst( null, Inst *I) - 'ret void' instruction, insert before I
2959 // ReturnInst(Value* X, Inst *I) - 'ret X' instruction, insert before I
2960 // ReturnInst( null, BB *B) - 'ret void' instruction, insert @ end of B
2961 // ReturnInst(Value* X, BB *B) - 'ret X' instruction, insert @ end of B
2962 //
2963 // NOTE: If the Value* passed is of type void then the constructor behaves as
2964 // if it was passed NULL.
2965 explicit ReturnInst(LLVMContext &C, Value *retVal = nullptr,
2966 Instruction *InsertBefore = nullptr);
2967 ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd);
2968 explicit ReturnInst(LLVMContext &C, BasicBlock *InsertAtEnd);
2969
2970protected:
2971 // Note: Instruction needs to be a friend here to call cloneImpl.
2972 friend class Instruction;
2973
2974 ReturnInst *cloneImpl() const;
2975
2976public:
2977 static ReturnInst* Create(LLVMContext &C, Value *retVal = nullptr,
2978 Instruction *InsertBefore = nullptr) {
2979 return new(!!retVal) ReturnInst(C, retVal, InsertBefore);
2980 }
2981
2982 static ReturnInst* Create(LLVMContext &C, Value *retVal,
2983 BasicBlock *InsertAtEnd) {
2984 return new(!!retVal) ReturnInst(C, retVal, InsertAtEnd);
2985 }
2986
2987 static ReturnInst* Create(LLVMContext &C, BasicBlock *InsertAtEnd) {
2988 return new(0) ReturnInst(C, InsertAtEnd);
2989 }
2990
2991 /// Provide fast operand accessors
2992 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2993
2994 /// Convenience accessor. Returns null if there is no return value.
2995 Value *getReturnValue() const {
2996 return getNumOperands() != 0 ? getOperand(0) : nullptr;
2997 }
2998
2999 unsigned getNumSuccessors() const { return 0; }
3000
3001 // Methods for support type inquiry through isa, cast, and dyn_cast:
3002 static bool classof(const Instruction *I) {
3003 return (I->getOpcode() == Instruction::Ret);
3004 }
3005 static bool classof(const Value *V) {
3006 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3007 }
3008
3009private:
3010 BasicBlock *getSuccessor(unsigned idx) const {
3011 llvm_unreachable("ReturnInst has no successors!")__builtin_unreachable();
3012 }
3013
3014 void setSuccessor(unsigned idx, BasicBlock *B) {
3015 llvm_unreachable("ReturnInst has no successors!")__builtin_unreachable();
3016 }
3017};
3018
3019template <>
3020struct OperandTraits<ReturnInst> : public VariadicOperandTraits<ReturnInst> {
3021};
3022
3023DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ReturnInst, Value)ReturnInst::op_iterator ReturnInst::op_begin() { return OperandTraits
<ReturnInst>::op_begin(this); } ReturnInst::const_op_iterator
ReturnInst::op_begin() const { return OperandTraits<ReturnInst
>::op_begin(const_cast<ReturnInst*>(this)); } ReturnInst
::op_iterator ReturnInst::op_end() { return OperandTraits<
ReturnInst>::op_end(this); } ReturnInst::const_op_iterator
ReturnInst::op_end() const { return OperandTraits<ReturnInst
>::op_end(const_cast<ReturnInst*>(this)); } Value *ReturnInst
::getOperand(unsigned i_nocapture) const { ((void)0); return cast_or_null
<Value>( OperandTraits<ReturnInst>::op_begin(const_cast
<ReturnInst*>(this))[i_nocapture].get()); } void ReturnInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { ((
void)0); OperandTraits<ReturnInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned ReturnInst::getNumOperands() const
{ return OperandTraits<ReturnInst>::operands(this); } template
<int Idx_nocapture> Use &ReturnInst::Op() { return
this->OpFrom<Idx_nocapture>(this); } template <int
Idx_nocapture> const Use &ReturnInst::Op() const { return
this->OpFrom<Idx_nocapture>(this); }
3024
3025//===----------------------------------------------------------------------===//
3026// BranchInst Class
3027//===----------------------------------------------------------------------===//
3028
3029//===---------------------------------------------------------------------------
3030/// Conditional or Unconditional Branch instruction.
3031///
3032class BranchInst : public Instruction {
3033 /// Ops list - Branches are strange. The operands are ordered:
3034 /// [Cond, FalseDest,] TrueDest. This makes some accessors faster because
3035 /// they don't have to check for cond/uncond branchness. These are mostly
3036 /// accessed relative from op_end().
3037 BranchInst(const BranchInst &BI);
3038 // BranchInst constructors (where {B, T, F} are blocks, and C is a condition):
3039 // BranchInst(BB *B) - 'br B'
3040 // BranchInst(BB* T, BB *F, Value *C) - 'br C, T, F'
3041 // BranchInst(BB* B, Inst *I) - 'br B' insert before I
3042 // BranchInst(BB* T, BB *F, Value *C, Inst *I) - 'br C, T, F', insert before I
3043 // BranchInst(BB* B, BB *I) - 'br B' insert at end
3044 // BranchInst(BB* T, BB *F, Value *C, BB *I) - 'br C, T, F', insert at end
3045 explicit BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore = nullptr);
3046 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3047 Instruction *InsertBefore = nullptr);
3048 BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd);
3049 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3050 BasicBlock *InsertAtEnd);
3051
3052 void AssertOK();
3053
3054protected:
3055 // Note: Instruction needs to be a friend here to call cloneImpl.
3056 friend class Instruction;
3057
3058 BranchInst *cloneImpl() const;
3059
3060public:
3061 /// Iterator type that casts an operand to a basic block.
3062 ///
3063 /// This only makes sense because the successors are stored as adjacent
3064 /// operands for branch instructions.
3065 struct succ_op_iterator
3066 : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
3067 std::random_access_iterator_tag, BasicBlock *,
3068 ptrdiff_t, BasicBlock *, BasicBlock *> {
3069 explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {}
3070
3071 BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3072 BasicBlock *operator->() const { return operator*(); }
3073 };
3074
3075 /// The const version of `succ_op_iterator`.
3076 struct const_succ_op_iterator
3077 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
3078 std::random_access_iterator_tag,
3079 const BasicBlock *, ptrdiff_t, const BasicBlock *,
3080 const BasicBlock *> {
3081 explicit const_succ_op_iterator(const_value_op_iterator I)
3082 : iterator_adaptor_base(I) {}
3083
3084 const BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3085 const BasicBlock *operator->() const { return operator*(); }
3086 };
3087
3088 static BranchInst *Create(BasicBlock *IfTrue,
3089 Instruction *InsertBefore = nullptr) {
3090 return new(1) BranchInst(IfTrue, InsertBefore);
3091 }
3092
3093 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3094 Value *Cond, Instruction *InsertBefore = nullptr) {
3095 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertBefore);
3096 }
3097
3098 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) {
3099 return new(1) BranchInst(IfTrue, InsertAtEnd);
3100 }
3101
3102 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3103 Value *Cond, BasicBlock *InsertAtEnd) {
3104 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertAtEnd);
3105 }
3106
3107 /// Transparently provide more efficient getOperand methods.
3108 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3109
3110 bool isUnconditional() const { return getNumOperands() == 1; }
3111 bool isConditional() const { return getNumOperands() == 3; }
3112
3113 Value *getCondition() const {
3114 assert(isConditional() && "Cannot get condition of an uncond branch!")((void)0);
3115 return Op<-3>();
3116 }
3117
3118 void setCondition(Value *V) {
3119 assert(isConditional() && "Cannot set condition of unconditional branch!")((void)0);
3120 Op<-3>() = V;
3121 }
3122
3123 unsigned getNumSuccessors() const { return 1+isConditional(); }
3124
3125 BasicBlock *getSuccessor(unsigned i) const {
3126 assert(i < getNumSuccessors() && "Successor # out of range for Branch!")((void)0);
3127 return cast_or_null<BasicBlock>((&Op<-1>() - i)->get());
3128 }
3129
3130 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3131 assert(idx < getNumSuccessors() && "Successor # out of range for Branch!")((void)0);
3132 *(&Op<-1>() - idx) = NewSucc;
3133 }
3134
3135 /// Swap the successors of this branch instruction.
3136 ///
3137 /// Swaps the successors of the branch instruction. This also swaps any
3138 /// branch weight metadata associated with the instruction so that it
3139 /// continues to map correctly to each operand.
3140 void swapSuccessors();
3141
3142 iterator_range<succ_op_iterator> successors() {
3143 return make_range(
3144 succ_op_iterator(std::next(value_op_begin(), isConditional() ? 1 : 0)),
3145 succ_op_iterator(value_op_end()));
3146 }
3147
3148 iterator_range<const_succ_op_iterator> successors() const {
3149 return make_range(const_succ_op_iterator(
3150 std::next(value_op_begin(), isConditional() ? 1 : 0)),
3151 const_succ_op_iterator(value_op_end()));
3152 }
3153
3154 // Methods for support type inquiry through isa, cast, and dyn_cast:
3155 static bool classof(const Instruction *I) {
3156 return (I->getOpcode() == Instruction::Br);
3157 }
3158 static bool classof(const Value *V) {
3159 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3160 }
3161};
3162
3163template <>
3164struct OperandTraits<BranchInst> : public VariadicOperandTraits<BranchInst, 1> {
3165};
3166
3167DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BranchInst, Value)BranchInst::op_iterator BranchInst::op_begin() { return OperandTraits
<BranchInst>::op_begin(this); } BranchInst::const_op_iterator
BranchInst::op_begin() const { return OperandTraits<BranchInst
>::op_begin(const_cast<BranchInst*>(this)); } BranchInst
::op_iterator BranchInst::op_end() { return OperandTraits<
BranchInst>::op_end(this); } BranchInst::const_op_iterator
BranchInst::op_end() const { return OperandTraits<BranchInst
>::op_end(const_cast<BranchInst*>(this)); } Value *BranchInst
::getOperand(unsigned i_nocapture) const { ((void)0); return cast_or_null
<Value>( OperandTraits<BranchInst>::op_begin(const_cast
<BranchInst*>(this))[i_nocapture].get()); } void BranchInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { ((
void)0); OperandTraits<BranchInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned BranchInst::getNumOperands() const
{ return OperandTraits<BranchInst>::operands(this); } template
<int Idx_nocapture> Use &BranchInst::Op() { return
this->OpFrom<Idx_nocapture>(this); } template <int
Idx_nocapture> const Use &BranchInst::Op() const { return
this->OpFrom<Idx_nocapture>(this); }
3168
3169//===----------------------------------------------------------------------===//
3170// SwitchInst Class
3171//===----------------------------------------------------------------------===//
3172
3173//===---------------------------------------------------------------------------
3174/// Multiway switch
3175///
3176class SwitchInst : public Instruction {
3177 unsigned ReservedSpace;
3178
3179 // Operand[0] = Value to switch on
3180 // Operand[1] = Default basic block destination
3181 // Operand[2n ] = Value to match
3182 // Operand[2n+1] = BasicBlock to go to on match
3183 SwitchInst(const SwitchInst &SI);
3184
3185 /// Create a new switch instruction, specifying a value to switch on and a
3186 /// default destination. The number of additional cases can be specified here
3187 /// to make memory allocation more efficient. This constructor can also
3188 /// auto-insert before another instruction.
3189 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3190 Instruction *InsertBefore);
3191
3192 /// Create a new switch instruction, specifying a value to switch on and a
3193 /// default destination. The number of additional cases can be specified here
3194 /// to make memory allocation more efficient. This constructor also
3195 /// auto-inserts at the end of the specified BasicBlock.
3196 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3197 BasicBlock *InsertAtEnd);
3198
3199 // allocate space for exactly zero operands
3200 void *operator new(size_t S) { return User::operator new(S); }
3201
3202 void init(Value *Value, BasicBlock *Default, unsigned NumReserved);
3203 void growOperands();
3204
3205protected:
3206 // Note: Instruction needs to be a friend here to call cloneImpl.
3207 friend class Instruction;
3208
3209 SwitchInst *cloneImpl() const;
3210
3211public:
3212 void operator delete(void *Ptr) { User::operator delete(Ptr); }
3213
3214 // -2
3215 static const unsigned DefaultPseudoIndex = static_cast<unsigned>(~0L-1);
3216
3217 template <typename CaseHandleT> class CaseIteratorImpl;
3218
3219 /// A handle to a particular switch case. It exposes a convenient interface
3220 /// to both the case value and the successor block.
3221 ///
3222 /// We define this as a template and instantiate it to form both a const and
3223 /// non-const handle.
3224 template <typename SwitchInstT, typename ConstantIntT, typename BasicBlockT>
3225 class CaseHandleImpl {
3226 // Directly befriend both const and non-const iterators.
3227 friend class SwitchInst::CaseIteratorImpl<
3228 CaseHandleImpl<SwitchInstT, ConstantIntT, BasicBlockT>>;
3229
3230 protected:
3231 // Expose the switch type we're parameterized with to the iterator.
3232 using SwitchInstType = SwitchInstT;
3233
3234 SwitchInstT *SI;
3235 ptrdiff_t Index;
3236
3237 CaseHandleImpl() = default;
3238 CaseHandleImpl(SwitchInstT *SI, ptrdiff_t Index) : SI(SI), Index(Index) {}
3239
3240 public:
3241 /// Resolves case value for current case.
3242 ConstantIntT *getCaseValue() const {
3243 assert((unsigned)Index < SI->getNumCases() &&((void)0)
3244 "Index out the number of cases.")((void)0);
3245 return reinterpret_cast<ConstantIntT *>(SI->getOperand(2 + Index * 2));
3246 }
3247
3248 /// Resolves successor for current case.
3249 BasicBlockT *getCaseSuccessor() const {
3250 assert(((unsigned)Index < SI->getNumCases() ||((void)0)
3251 (unsigned)Index == DefaultPseudoIndex) &&((void)0)
3252 "Index out the number of cases.")((void)0);
3253 return SI->getSuccessor(getSuccessorIndex());
3254 }
3255
3256 /// Returns number of current case.
3257 unsigned getCaseIndex() const { return Index; }
3258
3259 /// Returns successor index for current case successor.
3260 unsigned getSuccessorIndex() const {
3261 assert(((unsigned)Index == DefaultPseudoIndex ||((void)0)
3262 (unsigned)Index < SI->getNumCases()) &&((void)0)
3263 "Index out the number of cases.")((void)0);
3264 return (unsigned)Index != DefaultPseudoIndex ? Index + 1 : 0;
3265 }
3266
3267 bool operator==(const CaseHandleImpl &RHS) const {
3268 assert(SI == RHS.SI && "Incompatible operators.")((void)0);
3269 return Index == RHS.Index;
3270 }
3271 };
3272
3273 using ConstCaseHandle =
3274 CaseHandleImpl<const SwitchInst, const ConstantInt, const BasicBlock>;
3275
3276 class CaseHandle
3277 : public CaseHandleImpl<SwitchInst, ConstantInt, BasicBlock> {
3278 friend class SwitchInst::CaseIteratorImpl<CaseHandle>;
3279
3280 public:
3281 CaseHandle(SwitchInst *SI, ptrdiff_t Index) : CaseHandleImpl(SI, Index) {}
3282
3283 /// Sets the new value for current case.
3284 void setValue(ConstantInt *V) {
3285 assert((unsigned)Index < SI->getNumCases() &&((void)0)
3286 "Index out the number of cases.")((void)0);
3287 SI->setOperand(2 + Index*2, reinterpret_cast<Value*>(V));
3288 }
3289
3290 /// Sets the new successor for current case.
3291 void setSuccessor(BasicBlock *S) {
3292 SI->setSuccessor(getSuccessorIndex(), S);
3293 }
3294 };
3295
3296 template <typename CaseHandleT>
3297 class CaseIteratorImpl
3298 : public iterator_facade_base<CaseIteratorImpl<CaseHandleT>,
3299 std::random_access_iterator_tag,
3300 CaseHandleT> {
3301 using SwitchInstT = typename CaseHandleT::SwitchInstType;
3302
3303 CaseHandleT Case;
3304
3305 public:
3306 /// Default constructed iterator is in an invalid state until assigned to
3307 /// a case for a particular switch.
3308 CaseIteratorImpl() = default;
3309
3310 /// Initializes case iterator for given SwitchInst and for given
3311 /// case number.
3312 CaseIteratorImpl(SwitchInstT *SI, unsigned CaseNum) : Case(SI, CaseNum) {}
3313
3314 /// Initializes case iterator for given SwitchInst and for given
3315 /// successor index.
3316 static CaseIteratorImpl fromSuccessorIndex(SwitchInstT *SI,
3317 unsigned SuccessorIndex) {
3318 assert(SuccessorIndex < SI->getNumSuccessors() &&((void)0)
3319 "Successor index # out of range!")((void)0);
3320 return SuccessorIndex != 0 ? CaseIteratorImpl(SI, SuccessorIndex - 1)
3321 : CaseIteratorImpl(SI, DefaultPseudoIndex);
3322 }
3323
3324 /// Support converting to the const variant. This will be a no-op for const
3325 /// variant.
3326 operator CaseIteratorImpl<ConstCaseHandle>() const {
3327 return CaseIteratorImpl<ConstCaseHandle>(Case.SI, Case.Index);
3328 }
3329
3330 CaseIteratorImpl &operator+=(ptrdiff_t N) {
3331 // Check index correctness after addition.
3332 // Note: Index == getNumCases() means end().
3333 assert(Case.Index + N >= 0 &&((void)0)
3334 (unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&((void)0)
3335 "Case.Index out the number of cases.")((void)0);
3336 Case.Index += N;
3337 return *this;
3338 }
3339 CaseIteratorImpl &operator-=(ptrdiff_t N) {
3340 // Check index correctness after subtraction.
3341 // Note: Case.Index == getNumCases() means end().
3342 assert(Case.Index - N >= 0 &&((void)0)
3343 (unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&((void)0)
3344 "Case.Index out the number of cases.")((void)0);
3345 Case.Index -= N;
3346 return *this;
3347 }
3348 ptrdiff_t operator-(const CaseIteratorImpl &RHS) const {
3349 assert(Case.SI == RHS.Case.SI && "Incompatible operators.")((void)0);
3350 return Case.Index - RHS.Case.Index;
3351 }
3352 bool operator==(const CaseIteratorImpl &RHS) const {
3353 return Case == RHS.Case;
3354 }
3355 bool operator<(const CaseIteratorImpl &RHS) const {
3356 assert(Case.SI == RHS.Case.SI && "Incompatible operators.")((void)0);
3357 return Case.Index < RHS.Case.Index;
3358 }
3359 CaseHandleT &operator*() { return Case; }
3360 const CaseHandleT &operator*() const { return Case; }
3361 };
3362
3363 using CaseIt = CaseIteratorImpl<CaseHandle>;
3364 using ConstCaseIt = CaseIteratorImpl<ConstCaseHandle>;
3365
3366 static SwitchInst *Create(Value *Value, BasicBlock *Default,
3367 unsigned NumCases,
3368 Instruction *InsertBefore = nullptr) {
3369 return new SwitchInst(Value, Default, NumCases, InsertBefore);
3370 }
3371
3372 static SwitchInst *Create(Value *Value, BasicBlock *Default,
3373 unsigned NumCases, BasicBlock *InsertAtEnd) {
3374 return new SwitchInst(Value, Default, NumCases, InsertAtEnd);
3375 }
3376
3377 /// Provide fast operand accessors
3378 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3379
3380 // Accessor Methods for Switch stmt
3381 Value *getCondition() const { return getOperand(0); }
3382 void setCondition(Value *V) { setOperand(0, V); }
3383
3384 BasicBlock *getDefaultDest() const {
3385 return cast<BasicBlock>(getOperand(1));
3386 }
3387
3388 void setDefaultDest(BasicBlock *DefaultCase) {
3389 setOperand(1, reinterpret_cast<Value*>(DefaultCase));
3390 }
3391
3392 /// Return the number of 'cases' in this switch instruction, excluding the
3393 /// default case.
3394 unsigned getNumCases() const {
3395 return getNumOperands()/2 - 1;
3396 }
3397
3398 /// Returns a read/write iterator that points to the first case in the
3399 /// SwitchInst.
3400 CaseIt case_begin() {
3401 return CaseIt(this, 0);
3402 }
3403
3404 /// Returns a read-only iterator that points to the first case in the
3405 /// SwitchInst.
3406 ConstCaseIt case_begin() const {
3407 return ConstCaseIt(this, 0);
3408 }
3409
3410 /// Returns a read/write iterator that points one past the last in the
3411 /// SwitchInst.
3412 CaseIt case_end() {
3413 return CaseIt(this, getNumCases());
3414 }
3415
3416 /// Returns a read-only iterator that points one past the last in the
3417 /// SwitchInst.
3418 ConstCaseIt case_end() const {
3419 return ConstCaseIt(this, getNumCases());
3420 }
3421
3422 /// Iteration adapter for range-for loops.
3423 iterator_range<CaseIt> cases() {
3424 return make_range(case_begin(), case_end());
3425 }
3426
3427 /// Constant iteration adapter for range-for loops.
3428 iterator_range<ConstCaseIt> cases() const {
3429 return make_range(case_begin(), case_end());
3430 }
3431
3432 /// Returns an iterator that points to the default case.
3433 /// Note: this iterator allows to resolve successor only. Attempt
3434 /// to resolve case value causes an assertion.
3435 /// Also note, that increment and decrement also causes an assertion and
3436 /// makes iterator invalid.
3437 CaseIt case_default() {
3438 return CaseIt(this, DefaultPseudoIndex);
3439 }
3440 ConstCaseIt case_default() const {
3441 return ConstCaseIt(this, DefaultPseudoIndex);
3442 }
3443
3444 /// Search all of the case values for the specified constant. If it is
3445 /// explicitly handled, return the case iterator of it, otherwise return
3446 /// default case iterator to indicate that it is handled by the default
3447 /// handler.
3448 CaseIt findCaseValue(const ConstantInt *C) {
3449 CaseIt I = llvm::find_if(
3450 cases(), [C](CaseHandle &Case) { return Case.getCaseValue() == C; });
3451 if (I != case_end())
3452 return I;
3453
3454 return case_default();
3455 }
3456 ConstCaseIt findCaseValue(const ConstantInt *C) const {
3457 ConstCaseIt I = llvm::find_if(cases(), [C](ConstCaseHandle &Case) {
3458 return Case.getCaseValue() == C;
3459 });
3460 if (I != case_end())
3461 return I;
3462
3463 return case_default();
3464 }
3465
3466 /// Finds the unique case value for a given successor. Returns null if the
3467 /// successor is not found, not unique, or is the default case.
3468 ConstantInt *findCaseDest(BasicBlock *BB) {
3469 if (BB == getDefaultDest())
3470 return nullptr;
3471
3472 ConstantInt *CI = nullptr;
3473 for (auto Case : cases()) {
3474 if (Case.getCaseSuccessor() != BB)
3475 continue;
3476
3477 if (CI)
3478 return nullptr; // Multiple cases lead to BB.
3479
3480 CI = Case.getCaseValue();
3481 }
3482
3483 return CI;
3484 }
3485
3486 /// Add an entry to the switch instruction.
3487 /// Note:
3488 /// This action invalidates case_end(). Old case_end() iterator will
3489 /// point to the added case.
3490 void addCase(ConstantInt *OnVal, BasicBlock *Dest);
3491
3492 /// This method removes the specified case and its successor from the switch
3493 /// instruction. Note that this operation may reorder the remaining cases at
3494 /// index idx and above.
3495 /// Note:
3496 /// This action invalidates iterators for all cases following the one removed,
3497 /// including the case_end() iterator. It returns an iterator for the next
3498 /// case.
3499 CaseIt removeCase(CaseIt I);
3500
3501 unsigned getNumSuccessors() const { return getNumOperands()/2; }
3502 BasicBlock *getSuccessor(unsigned idx) const {
3503 assert(idx < getNumSuccessors() &&"Successor idx out of range for switch!")((void)0);
3504 return cast<BasicBlock>(getOperand(idx*2+1));
3505 }
3506 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3507 assert(idx < getNumSuccessors() && "Successor # out of range for switch!")((void)0);
3508 setOperand(idx * 2 + 1, NewSucc);
3509 }
3510
3511 // Methods for support type inquiry through isa, cast, and dyn_cast:
3512 static bool classof(const Instruction *I) {
3513 return I->getOpcode() == Instruction::Switch;
3514 }
3515 static bool classof(const Value *V) {
3516 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3517 }
3518};
3519
3520/// A wrapper class to simplify modification of SwitchInst cases along with
3521/// their prof branch_weights metadata.
3522class SwitchInstProfUpdateWrapper {
3523 SwitchInst &SI;
3524 Optional<SmallVector<uint32_t, 8> > Weights = None;
3525 bool Changed = false;
3526
3527protected:
3528 static MDNode *getProfBranchWeightsMD(const SwitchInst &SI);
3529
3530 MDNode *buildProfBranchWeightsMD();
3531
3532 void init();
3533
3534public:
3535 using CaseWeightOpt = Optional<uint32_t>;
3536 SwitchInst *operator->() { return &SI; }
3537 SwitchInst &operator*() { return SI; }
3538 operator SwitchInst *() { return &SI; }
3539
3540 SwitchInstProfUpdateWrapper(SwitchInst &SI) : SI(SI) { init(); }
3541
3542 ~SwitchInstProfUpdateWrapper() {
3543 if (Changed)
3544 SI.setMetadata(LLVMContext::MD_prof, buildProfBranchWeightsMD());
3545 }
3546
3547 /// Delegate the call to the underlying SwitchInst::removeCase() and remove
3548 /// correspondent branch weight.
3549 SwitchInst::CaseIt removeCase(SwitchInst::CaseIt I);
3550
3551 /// Delegate the call to the underlying SwitchInst::addCase() and set the
3552 /// specified branch weight for the added case.
3553 void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W);
3554
3555 /// Delegate the call to the underlying SwitchInst::eraseFromParent() and mark
3556 /// this object to not touch the underlying SwitchInst in destructor.
3557 SymbolTableList<Instruction>::iterator eraseFromParent();
3558
3559 void setSuccessorWeight(unsigned idx, CaseWeightOpt W);
3560 CaseWeightOpt getSuccessorWeight(unsigned idx);
3561
3562 static CaseWeightOpt getSuccessorWeight(const SwitchInst &SI, unsigned idx);
3563};
3564
3565template <>
3566struct OperandTraits<SwitchInst> : public HungoffOperandTraits<2> {
3567};
3568
3569DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SwitchInst, Value)SwitchInst::op_iterator SwitchInst::op_begin() { return OperandTraits
<SwitchInst>::op_begin(this); } SwitchInst::const_op_iterator
SwitchInst::op_begin() const { return OperandTraits<SwitchInst
>::op_begin(const_cast<SwitchInst*>(this)); } SwitchInst
::op_iterator SwitchInst::op_end() { return OperandTraits<
SwitchInst>::op_end(this); } SwitchInst::const_op_iterator
SwitchInst::op_end() const { return OperandTraits<SwitchInst
>::op_end(const_cast<SwitchInst*>(this)); } Value *SwitchInst
::getOperand(unsigned i_nocapture) const { ((void)0); return cast_or_null
<Value>( OperandTraits<SwitchInst>::op_begin(const_cast
<SwitchInst*>(this))[i_nocapture].get()); } void SwitchInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { ((
void)0); OperandTraits<SwitchInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned SwitchInst::getNumOperands() const
{ return OperandTraits<SwitchInst>::operands(this); } template
<int Idx_nocapture> Use &SwitchInst::Op() { return
this->OpFrom<Idx_nocapture>(this); } template <int
Idx_nocapture> const Use &SwitchInst::Op() const { return
this->OpFrom<Idx_nocapture>(this); }
3570
3571//===----------------------------------------------------------------------===//
3572// IndirectBrInst Class
3573//===----------------------------------------------------------------------===//
3574
3575//===---------------------------------------------------------------------------
3576/// Indirect Branch Instruction.
3577///
3578class IndirectBrInst : public Instruction {
3579 unsigned ReservedSpace;
3580
3581 // Operand[0] = Address to jump to
3582 // Operand[n+1] = n-th destination
3583 IndirectBrInst(const IndirectBrInst &IBI);
3584
3585 /// Create a new indirectbr instruction, specifying an
3586 /// Address to jump to. The number of expected destinations can be specified
3587 /// here to make memory allocation more efficient. This constructor can also
3588 /// autoinsert before another instruction.
3589 IndirectBrInst(Value *Address, unsigned NumDests, Instruction *InsertBefore);
3590
3591 /// Create a new indirectbr instruction, specifying an
3592 /// Address to jump to. The number of expected destinations can be specified
3593 /// here to make memory allocation more efficient. This constructor also
3594 /// autoinserts at the end of the specified BasicBlock.
3595 IndirectBrInst(Value *Address, unsigned NumDests, BasicBlock *InsertAtEnd);
3596
3597 // allocate space for exactly zero operands
3598 void *operator new(size_t S) { return User::operator new(S); }
3599
3600 void init(Value *Address, unsigned NumDests);
3601 void growOperands();
3602
3603protected:
3604 // Note: Instruction needs to be a friend here to call cloneImpl.
3605 friend class Instruction;
3606
3607 IndirectBrInst *cloneImpl() const;
3608
3609public:
3610 void operator delete(void *Ptr) { User::operator delete(Ptr); }
3611
3612 /// Iterator type that casts an operand to a basic block.
3613 ///
3614 /// This only makes sense because the successors are stored as adjacent
3615 /// operands for indirectbr instructions.
3616 struct succ_op_iterator
3617 : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
3618 std::random_access_iterator_tag, BasicBlock *,
3619 ptrdiff_t, BasicBlock *, BasicBlock *> {
3620 explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {}
3621
3622 BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3623 BasicBlock *operator->() const { return operator*(); }
3624 };
3625
3626 /// The const version of `succ_op_iterator`.
3627 struct const_succ_op_iterator
3628 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
3629 std::random_access_iterator_tag,
3630 const BasicBlock *, ptrdiff_t, const BasicBlock *,
3631 const BasicBlock *> {
3632 explicit const_succ_op_iterator(const_value_op_iterator I)
3633 : iterator_adaptor_base(I) {}
3634
3635 const BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3636 const BasicBlock *operator->() const { return operator*(); }
3637 };
3638
3639 static IndirectBrInst *Create(Value *Address, unsigned NumDests,
3640 Instruction *InsertBefore = nullptr) {
3641 return new IndirectBrInst(Address, NumDests, InsertBefore);
3642 }
3643
3644 static IndirectBrInst *Create(Value *Address, unsigned NumDests,
3645 BasicBlock *InsertAtEnd) {
3646 return new IndirectBrInst(Address, NumDests, InsertAtEnd);
3647 }
3648
3649 /// Provide fast operand accessors.
3650 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3651
3652 // Accessor Methods for IndirectBrInst instruction.
3653 Value *getAddress() { return getOperand(0); }
3654 const Value *getAddress() const { return getOperand(0); }
3655 void setAddress(Value *V) { setOperand(0, V); }
3656
3657 /// return the number of possible destinations in this
3658 /// indirectbr instruction.
3659 unsigned getNumDestinations() const { return getNumOperands()-1; }
3660
3661 /// Return the specified destination.
3662 BasicBlock *getDestination(unsigned i) { return getSuccessor(i); }
3663 const BasicBlock *getDestination(unsigned i) const { return getSuccessor(i); }
3664
3665 /// Add a destination.
3666 ///
3667 void addDestination(BasicBlock *Dest);
3668
3669 /// This method removes the specified successor from the
3670 /// indirectbr instruction.
3671 void removeDestination(unsigned i);
3672
3673 unsigned getNumSuccessors() const { return getNumOperands()-1; }
3674 BasicBlock *getSuccessor(unsigned i) const {
3675 return cast<BasicBlock>(getOperand(i+1));
3676 }
3677 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
3678 setOperand(i + 1, NewSucc);
3679 }
3680
3681 iterator_range<succ_op_iterator> successors() {
3682 return make_range(succ_op_iterator(std::next(value_op_begin())),
3683 succ_op_iterator(value_op_end()));
3684 }
3685
3686 iterator_range<const_succ_op_iterator> successors() const {
3687 return make_range(const_succ_op_iterator(std::next(value_op_begin())),
3688 const_succ_op_iterator(value_op_end()));
3689 }
3690
3691 // Methods for support type inquiry through isa, cast, and dyn_cast:
3692 static bool classof(const Instruction *I) {
3693 return I->getOpcode() == Instruction::IndirectBr;
3694 }
3695 static bool classof(const Value *V) {
3696 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3697 }
3698};
3699
3700template <>
3701struct OperandTraits<IndirectBrInst> : public HungoffOperandTraits<1> {
3702};
3703
3704DEFINE_TRANSPARENT_OPERAND_ACCESSORS(IndirectBrInst, Value)IndirectBrInst::op_iterator IndirectBrInst::op_begin() { return
OperandTraits<IndirectBrInst>::op_begin(this); } IndirectBrInst
::const_op_iterator IndirectBrInst::op_begin() const { return
OperandTraits<IndirectBrInst>::op_begin(const_cast<
IndirectBrInst*>(this)); } IndirectBrInst::op_iterator IndirectBrInst
::op_end() { return OperandTraits<IndirectBrInst>::op_end
(this); } IndirectBrInst::const_op_iterator IndirectBrInst::op_end
() const { return OperandTraits<IndirectBrInst>::op_end
(const_cast<IndirectBrInst*>(this)); } Value *IndirectBrInst
::getOperand(unsigned i_nocapture) const { ((void)0); return cast_or_null
<Value>( OperandTraits<IndirectBrInst>::op_begin(
const_cast<IndirectBrInst*>(this))[i_nocapture].get());
} void IndirectBrInst::setOperand(unsigned i_nocapture, Value
*Val_nocapture) { ((void)0); OperandTraits<IndirectBrInst
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
IndirectBrInst::getNumOperands() const { return OperandTraits
<IndirectBrInst>::operands(this); } template <int Idx_nocapture
> Use &IndirectBrInst::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &IndirectBrInst::Op() const { return this->
OpFrom<Idx_nocapture>(this); }
3705
3706//===----------------------------------------------------------------------===//
3707// InvokeInst Class
3708//===----------------------------------------------------------------------===//
3709
3710/// Invoke instruction. The SubclassData field is used to hold the
3711/// calling convention of the call.
3712///
3713class InvokeInst : public CallBase {
3714 /// The number of operands for this call beyond the called function,
3715 /// arguments, and operand bundles.
3716 static constexpr int NumExtraOperands = 2;
3717
3718 /// The index from the end of the operand array to the normal destination.
3719 static constexpr int NormalDestOpEndIdx = -3;
3720
3721 /// The index from the end of the operand array to the unwind destination.
3722 static constexpr int UnwindDestOpEndIdx = -2;
3723
3724 InvokeInst(const InvokeInst &BI);
3725
3726 /// Construct an InvokeInst given a range of arguments.
3727 ///
3728 /// Construct an InvokeInst from a range of arguments
3729 inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3730 BasicBlock *IfException, ArrayRef<Value *> Args,
3731 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3732 const Twine &NameStr, Instruction *InsertBefore);
3733
3734 inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3735 BasicBlock *IfException, ArrayRef<Value *> Args,
3736 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3737 const Twine &NameStr, BasicBlock *InsertAtEnd);
3738
3739 void init(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3740 BasicBlock *IfException, ArrayRef<Value *> Args,
3741 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
3742
3743 /// Compute the number of operands to allocate.
3744 static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) {
3745 // We need one operand for the called function, plus our extra operands and
3746 // the input operand counts provided.
3747 return 1 + NumExtraOperands + NumArgs + NumBundleInputs;
3748 }
3749
3750protected:
3751 // Note: Instruction needs to be a friend here to call cloneImpl.
3752 friend class Instruction;
3753
3754 InvokeInst *cloneImpl() const;
3755
3756public:
3757 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3758 BasicBlock *IfException, ArrayRef<Value *> Args,
3759 const Twine &NameStr,
3760 Instruction *InsertBefore = nullptr) {
3761 int NumOperands = ComputeNumOperands(Args.size());
3762 return new (NumOperands)
3763 InvokeInst(Ty, Func, IfNormal, IfException, Args, None, NumOperands,
3764 NameStr, InsertBefore);
3765 }
3766
3767 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3768 BasicBlock *IfException, ArrayRef<Value *> Args,
3769 ArrayRef<OperandBundleDef> Bundles = None,
3770 const Twine &NameStr = "",
3771 Instruction *InsertBefore = nullptr) {
3772 int NumOperands =
3773 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
3774 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
3775
3776 return new (NumOperands, DescriptorBytes)
3777 InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands,
3778 NameStr, InsertBefore);
3779 }
3780
3781 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3782 BasicBlock *IfException, ArrayRef<Value *> Args,
3783 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3784 int NumOperands = ComputeNumOperands(Args.size());
3785 return new (NumOperands)
3786 InvokeInst(Ty, Func, IfNormal, IfException, Args, None, NumOperands,
3787 NameStr, InsertAtEnd);
3788 }
3789
3790 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3791 BasicBlock *IfException, ArrayRef<Value *> Args,
3792 ArrayRef<OperandBundleDef> Bundles,
3793 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3794 int NumOperands =
3795 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
3796 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
3797
3798 return new (NumOperands, DescriptorBytes)
3799 InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands,
3800 NameStr, InsertAtEnd);
3801 }
3802
3803 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3804 BasicBlock *IfException, ArrayRef<Value *> Args,
3805 const Twine &NameStr,
3806 Instruction *InsertBefore = nullptr) {
3807 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3808 IfException, Args, None, NameStr, InsertBefore);
3809 }
3810
3811 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3812 BasicBlock *IfException, ArrayRef<Value *> Args,
3813 ArrayRef<OperandBundleDef> Bundles = None,
3814 const Twine &NameStr = "",
3815 Instruction *InsertBefore = nullptr) {
3816 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3817 IfException, Args, Bundles, NameStr, InsertBefore);
3818 }
3819
3820 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3821 BasicBlock *IfException, ArrayRef<Value *> Args,
3822 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3823 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3824 IfException, Args, NameStr, InsertAtEnd);
3825 }
3826
3827 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3828 BasicBlock *IfException, ArrayRef<Value *> Args,
3829 ArrayRef<OperandBundleDef> Bundles,
3830 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3831 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3832 IfException, Args, Bundles, NameStr, InsertAtEnd);
3833 }
3834
3835 /// Create a clone of \p II with a different set of operand bundles and
3836 /// insert it before \p InsertPt.
3837 ///
3838 /// The returned invoke instruction is identical to \p II in every way except
3839 /// that the operand bundles for the new instruction are set to the operand
3840 /// bundles in \p Bundles.
3841 static InvokeInst *Create(InvokeInst *II, ArrayRef<OperandBundleDef> Bundles,
3842 Instruction *InsertPt = nullptr);
3843
3844 // get*Dest - Return the destination basic blocks...
3845 BasicBlock *getNormalDest() const {
3846 return cast<BasicBlock>(Op<NormalDestOpEndIdx>());
3847 }
3848 BasicBlock *getUnwindDest() const {
3849 return cast<BasicBlock>(Op<UnwindDestOpEndIdx>());
3850 }
3851 void setNormalDest(BasicBlock *B) {
3852 Op<NormalDestOpEndIdx>() = reinterpret_cast<Value *>(B);
3853 }
3854 void setUnwindDest(BasicBlock *B) {
3855 Op<UnwindDestOpEndIdx>() = reinterpret_cast<Value *>(B);
3856 }
3857
3858 /// Get the landingpad instruction from the landing pad
3859 /// block (the unwind destination).
3860 LandingPadInst *getLandingPadInst() const;
3861
3862 BasicBlock *getSuccessor(unsigned i) const {
3863 assert(i < 2 && "Successor # out of range for invoke!")((void)0);
3864 return i == 0 ? getNormalDest() : getUnwindDest();
3865 }
3866
3867 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
3868 assert(i < 2 && "Successor # out of range for invoke!")((void)0);
3869 if (i == 0)
3870 setNormalDest(NewSucc);
3871 else
3872 setUnwindDest(NewSucc);
3873 }
3874
3875 unsigned getNumSuccessors() const { return 2; }
3876
3877 // Methods for support type inquiry through isa, cast, and dyn_cast:
3878 static bool classof(const Instruction *I) {
3879 return (I->getOpcode() == Instruction::Invoke);
3880 }
3881 static bool classof(const Value *V) {
3882 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3883 }
3884
3885private:
3886 // Shadow Instruction::setInstructionSubclassData with a private forwarding
3887 // method so that subclasses cannot accidentally use it.
3888 template <typename Bitfield>
3889 void setSubclassData(typename Bitfield::Type Value) {
3890 Instruction::setSubclassData<Bitfield>(Value);
3891 }
3892};
3893
3894InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3895 BasicBlock *IfException, ArrayRef<Value *> Args,
3896 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3897 const Twine &NameStr, Instruction *InsertBefore)
3898 : CallBase(Ty->getReturnType(), Instruction::Invoke,
3899 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
3900 InsertBefore) {
3901 init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr);
3902}
3903
3904InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3905 BasicBlock *IfException, ArrayRef<Value *> Args,
3906 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3907 const Twine &NameStr, BasicBlock *InsertAtEnd)
3908 : CallBase(Ty->getReturnType(), Instruction::Invoke,
3909 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
3910 InsertAtEnd) {
3911 init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr);
3912}
3913
3914//===----------------------------------------------------------------------===//
3915// CallBrInst Class
3916//===----------------------------------------------------------------------===//
3917
3918/// CallBr instruction, tracking function calls that may not return control but
3919/// instead transfer it to a third location. The SubclassData field is used to
3920/// hold the calling convention of the call.
3921///
3922class CallBrInst : public CallBase {
3923
3924 unsigned NumIndirectDests;
3925
3926 CallBrInst(const CallBrInst &BI);
3927
3928 /// Construct a CallBrInst given a range of arguments.
3929 ///
3930 /// Construct a CallBrInst from a range of arguments
3931 inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
3932 ArrayRef<BasicBlock *> IndirectDests,
3933 ArrayRef<Value *> Args,
3934 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3935 const Twine &NameStr, Instruction *InsertBefore);
3936
3937 inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
3938 ArrayRef<BasicBlock *> IndirectDests,
3939 ArrayRef<Value *> Args,
3940 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3941 const Twine &NameStr, BasicBlock *InsertAtEnd);
3942
3943 void init(FunctionType *FTy, Value *Func, BasicBlock *DefaultDest,
3944 ArrayRef<BasicBlock *> IndirectDests, ArrayRef<Value *> Args,
3945 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
3946
3947 /// Should the Indirect Destinations change, scan + update the Arg list.
3948 void updateArgBlockAddresses(unsigned i, BasicBlock *B);
3949
3950 /// Compute the number of operands to allocate.
3951 static int ComputeNumOperands(int NumArgs, int NumIndirectDests,
3952 int NumBundleInputs = 0) {
3953 // We need one operand for the called function, plus our extra operands and
3954 // the input operand counts provided.
3955 return 2 + NumIndirectDests + NumArgs + NumBundleInputs;
3956 }
3957
3958protected:
3959 // Note: Instruction needs to be a friend here to call cloneImpl.
3960 friend class Instruction;
3961
3962 CallBrInst *cloneImpl() const;
3963
3964public:
3965 static CallBrInst *Create(FunctionType *Ty, Value *Func,
3966 BasicBlock *DefaultDest,
3967 ArrayRef<BasicBlock *> IndirectDests,
3968 ArrayRef<Value *> Args, const Twine &NameStr,
3969 Instruction *InsertBefore = nullptr) {
3970 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size());
3971 return new (NumOperands)
3972 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, None,
3973 NumOperands, NameStr, InsertBefore);
3974 }
3975
3976 static CallBrInst *Create(FunctionType *Ty, Value *Func,
3977 BasicBlock *DefaultDest,
3978 ArrayRef<BasicBlock *> IndirectDests,
3979 ArrayRef<Value *> Args,
3980 ArrayRef<OperandBundleDef> Bundles = None,
3981 const Twine &NameStr = "",
3982 Instruction *InsertBefore = nullptr) {
3983 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(),
3984 CountBundleInputs(Bundles));
3985 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
3986
3987 return new (NumOperands, DescriptorBytes)
3988 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles,
3989 NumOperands, NameStr, InsertBefore);
3990 }
3991
3992 static CallBrInst *Create(FunctionType *Ty, Value *Func,
3993 BasicBlock *DefaultDest,
3994 ArrayRef<BasicBlock *> IndirectDests,
3995 ArrayRef<Value *> Args, const Twine &NameStr,
3996 BasicBlock *InsertAtEnd) {
3997 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size());
3998 return new (NumOperands)
3999 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, None,
4000 NumOperands, NameStr, InsertAtEnd);
4001 }
4002
4003 static CallBrInst *Create(FunctionType *Ty, Value *Func,
4004 BasicBlock *DefaultDest,
4005 ArrayRef<BasicBlock *> IndirectDests,
4006 ArrayRef<Value *> Args,
4007 ArrayRef<OperandBundleDef> Bundles,
4008 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4009 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(),
4010 CountBundleInputs(Bundles));
4011 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
4012
4013 return new (NumOperands, DescriptorBytes)
4014 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles,
4015 NumOperands, NameStr, InsertAtEnd);
4016 }
4017
4018 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4019 ArrayRef<BasicBlock *> IndirectDests,
4020 ArrayRef<Value *> Args, const Twine &NameStr,
4021 Instruction *InsertBefore = nullptr) {
4022 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4023 IndirectDests, Args, NameStr, InsertBefore);
4024 }
4025
4026 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4027 ArrayRef<BasicBlock *> IndirectDests,
4028 ArrayRef<Value *> Args,
4029 ArrayRef<OperandBundleDef> Bundles = None,
4030 const Twine &NameStr = "",
4031 Instruction *InsertBefore = nullptr) {
4032 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4033 IndirectDests, Args, Bundles, NameStr, InsertBefore);
4034 }
4035
4036 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4037 ArrayRef<BasicBlock *> IndirectDests,
4038 ArrayRef<Value *> Args, const Twine &NameStr,
4039 BasicBlock *InsertAtEnd) {
4040 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4041 IndirectDests, Args, NameStr, InsertAtEnd);
4042 }
4043
4044 static CallBrInst *Create(FunctionCallee Func,
4045 BasicBlock *DefaultDest,
4046 ArrayRef<BasicBlock *> IndirectDests,
4047 ArrayRef<Value *> Args,
4048 ArrayRef<OperandBundleDef> Bundles,
4049 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4050 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4051 IndirectDests, Args, Bundles, NameStr, InsertAtEnd);
4052 }
4053
4054 /// Create a clone of \p CBI with a different set of operand bundles and
4055 /// insert it before \p InsertPt.
4056 ///
4057 /// The returned callbr instruction is identical to \p CBI in every way
4058 /// except that the operand bundles for the new instruction are set to the
4059 /// operand bundles in \p Bundles.
4060 static CallBrInst *Create(CallBrInst *CBI,
4061 ArrayRef<OperandBundleDef> Bundles,
4062 Instruction *InsertPt = nullptr);
4063
4064 /// Return the number of callbr indirect dest labels.
4065 ///
4066 unsigned getNumIndirectDests() const { return NumIndirectDests; }
4067
4068 /// getIndirectDestLabel - Return the i-th indirect dest label.
4069 ///
4070 Value *getIndirectDestLabel(unsigned i) const {
4071 assert(i < getNumIndirectDests() && "Out of bounds!")((void)0);
4072 return getOperand(i + getNumArgOperands() + getNumTotalBundleOperands() +
4073 1);
4074 }
4075
4076 Value *getIndirectDestLabelUse(unsigned i) const {
4077 assert(i < getNumIndirectDests() && "Out of bounds!")((void)0);
4078 return getOperandUse(i + getNumArgOperands() + getNumTotalBundleOperands() +
4079 1);
4080 }
4081
4082 // Return the destination basic blocks...
4083 BasicBlock *getDefaultDest() const {
4084 return cast<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() - 1));
4085 }
4086 BasicBlock *getIndirectDest(unsigned i) const {
4087 return cast_or_null<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() + i));
4088 }
4089 SmallVector<BasicBlock *, 16> getIndirectDests() const {
4090 SmallVector<BasicBlock *, 16> IndirectDests;
4091 for (unsigned i = 0, e = getNumIndirectDests(); i < e; ++i)
4092 IndirectDests.push_back(getIndirectDest(i));
4093 return IndirectDests;
4094 }
4095 void setDefaultDest(BasicBlock *B) {
4096 *(&Op<-1>() - getNumIndirectDests() - 1) = reinterpret_cast<Value *>(B);
4097 }
4098 void setIndirectDest(unsigned i, BasicBlock *B) {
4099 updateArgBlockAddresses(i, B);
4100 *(&Op<-1>() - getNumIndirectDests() + i) = reinterpret_cast<Value *>(B);
4101 }
4102
4103 BasicBlock *getSuccessor(unsigned i) const {
4104 assert(i < getNumSuccessors() + 1 &&((void)0)
4105 "Successor # out of range for callbr!")((void)0);
4106 return i == 0 ? getDefaultDest() : getIndirectDest(i - 1);
4107 }
4108
4109 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
4110 assert(i < getNumIndirectDests() + 1 &&((void)0)
4111 "Successor # out of range for callbr!")((void)0);
4112 return i == 0 ? setDefaultDest(NewSucc) : setIndirectDest(i - 1, NewSucc);
4113 }
4114
4115 unsigned getNumSuccessors() const { return getNumIndirectDests() + 1; }
4116
4117 // Methods for support type inquiry through isa, cast, and dyn_cast:
4118 static bool classof(const Instruction *I) {
4119 return (I->getOpcode() == Instruction::CallBr);
4120 }
4121 static bool classof(const Value *V) {
4122 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4123 }
4124
4125private:
4126 // Shadow Instruction::setInstructionSubclassData with a private forwarding
4127 // method so that subclasses cannot accidentally use it.
4128 template <typename Bitfield>
4129 void setSubclassData(typename Bitfield::Type Value) {
4130 Instruction::setSubclassData<Bitfield>(Value);
4131 }
4132};
4133
4134CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4135 ArrayRef<BasicBlock *> IndirectDests,
4136 ArrayRef<Value *> Args,
4137 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4138 const Twine &NameStr, Instruction *InsertBefore)
4139 : CallBase(Ty->getReturnType(), Instruction::CallBr,
4140 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
4141 InsertBefore) {
4142 init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr);
4143}
4144
4145CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4146 ArrayRef<BasicBlock *> IndirectDests,
4147 ArrayRef<Value *> Args,
4148 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4149 const Twine &NameStr, BasicBlock *InsertAtEnd)
4150 : CallBase(Ty->getReturnType(), Instruction::CallBr,
4151 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
4152 InsertAtEnd) {
4153 init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr);
4154}
4155
4156//===----------------------------------------------------------------------===//
4157// ResumeInst Class
4158//===----------------------------------------------------------------------===//
4159
4160//===---------------------------------------------------------------------------
4161/// Resume the propagation of an exception.
4162///
4163class ResumeInst : public Instruction {
4164 ResumeInst(const ResumeInst &RI);
4165
4166 explicit ResumeInst(Value *Exn, Instruction *InsertBefore=nullptr);
4167 ResumeInst(Value *Exn, BasicBlock *InsertAtEnd);
4168
4169protected:
4170 // Note: Instruction needs to be a friend here to call cloneImpl.
4171 friend class Instruction;
4172
4173 ResumeInst *cloneImpl() const;
4174
4175public:
4176 static ResumeInst *Create(Value *Exn, Instruction *InsertBefore = nullptr) {
4177 return new(1) ResumeInst(Exn, InsertBefore);
4178 }
4179
4180 static ResumeInst *Create(Value *Exn, BasicBlock *InsertAtEnd) {
4181 return new(1) ResumeInst(Exn, InsertAtEnd);
4182 }
4183
4184 /// Provide fast operand accessors
4185 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
4186
4187 /// Convenience accessor.
4188 Value *getValue() const { return Op<0>(); }
4189
4190 unsigned getNumSuccessors() const { return 0; }
4191
4192 // Methods for support type inquiry through isa, cast, and dyn_cast:
4193 static bool classof(const Instruction *I) {
4194 return I->getOpcode() == Instruction::Resume;
4195 }
4196 static bool classof(const Value *V) {
4197 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4198 }
4199
4200private:
4201 BasicBlock *getSuccessor(unsigned idx) const {
4202 llvm_unreachable("ResumeInst has no successors!")__builtin_unreachable();
4203 }
4204
4205 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
4206 llvm_unreachable("ResumeInst has no successors!")__builtin_unreachable();
4207 }
4208};
4209
4210template <>
4211struct OperandTraits<ResumeInst> :
4212 public FixedNumOperandTraits<ResumeInst, 1> {
4213};
4214
4215DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ResumeInst, Value)ResumeInst::op_iterator ResumeInst::op_begin() { return OperandTraits
<ResumeInst>::op_begin(this); } ResumeInst::const_op_iterator
ResumeInst::op_begin() const { return OperandTraits<ResumeInst
>::op_begin(const_cast<ResumeInst*>(this)); } ResumeInst
::op_iterator ResumeInst::op_end() { return OperandTraits<
ResumeInst>::op_end(this); } ResumeInst::const_op_iterator
ResumeInst::op_end() const { return OperandTraits<ResumeInst
>::op_end(const_cast<ResumeInst*>(this)); } Value *ResumeInst
::getOperand(unsigned i_nocapture) const { ((void)0); return cast_or_null
<Value>( OperandTraits<ResumeInst>::op_begin(const_cast
<ResumeInst*>(this))[i_nocapture].get()); } void ResumeInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { ((
void)0); OperandTraits<ResumeInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned ResumeInst::getNumOperands() const
{ return OperandTraits<ResumeInst>::operands(this); } template
<int Idx_nocapture> Use &ResumeInst::Op() { return
this->OpFrom<Idx_nocapture>(this); } template <int
Idx_nocapture> const Use &ResumeInst::Op() const { return
this->OpFrom<Idx_nocapture>(this); }
4216
4217//===----------------------------------------------------------------------===//
4218// CatchSwitchInst Class
4219//===----------------------------------------------------------------------===//
4220class CatchSwitchInst : public Instruction {
4221 using UnwindDestField = BoolBitfieldElementT<0>;
4222
4223 /// The number of operands actually allocated. NumOperands is
4224 /// the number actually in use.
4225 unsigned ReservedSpace;
4226
4227 // Operand[0] = Outer scope
4228 // Operand[1] = Unwind block destination
4229 // Operand[n] = BasicBlock to go to on match
4230 CatchSwitchInst(const CatchSwitchInst &CSI);
4231
4232 /// Create a new switch instruction, specifying a
4233 /// default destination. The number of additional handlers can be specified
4234 /// here to make memory allocation more efficient.
4235 /// This constructor can also autoinsert before another instruction.
4236 CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
4237 unsigned NumHandlers, const Twine &NameStr,
4238 Instruction *InsertBefore);
4239
4240 /// Create a new switch instruction, specifying a
4241 /// default destination. The number of additional handlers can be specified
4242 /// here to make memory allocation more efficient.
4243 /// This constructor also autoinserts at the end of the specified BasicBlock.
4244 CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
4245 unsigned NumHandlers, const Twine &NameStr,
4246 BasicBlock *InsertAtEnd);
4247
4248 // allocate space for exactly zero operands
4249 void *operator new(size_t S) { return User::operator new(S); }
4250
4251 void init(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumReserved);
4252 void growOperands(unsigned Size);
4253
4254protected:
4255 // Note: Instruction needs to be a friend here to call cloneImpl.
4256 friend class Instruction;
4257
4258 CatchSwitchInst *cloneImpl() const;
4259
4260public:
4261 void operator delete(void *Ptr) { return User::operator delete(Ptr); }
4262
4263 static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest,
4264 unsigned NumHandlers,
4265 const Twine &NameStr = "",
4266 Instruction *InsertBefore = nullptr) {
4267 return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr,
4268 InsertBefore);
4269 }
4270
4271 static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest,
4272 unsigned NumHandlers, const Twine &NameStr,
4273 BasicBlock *InsertAtEnd) {
4274 return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr,
4275 InsertAtEnd);
4276 }
4277
4278 /// Provide fast operand accessors
4279 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
4280
4281 // Accessor Methods for CatchSwitch stmt
4282 Value *getParentPad() const { return getOperand(0); }
4283 void setParentPad(Value *ParentPad) { setOperand(0, ParentPad); }
4284
4285 // Accessor Methods for CatchSwitch stmt
4286 bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); }
4287 bool unwindsToCaller() const { return !hasUnwindDest(); }
4288 BasicBlock *getUnwindDest() const {
4289 if (hasUnwindDest())
4290 return cast<BasicBlock>(getOperand(1));
4291 return nullptr;
4292 }
4293 void setUnwindDest(BasicBlock *UnwindDest) {
4294 assert(UnwindDest)((void)0);
4295 assert(hasUnwindDest())((void)0);
4296 setOperand(1, UnwindDest);
4297 }
4298
4299 /// return the number of 'handlers' in this catchswitch
4300 /// instruction, except the default handler
4301 unsigned getNumHandlers() const {
4302 if (hasUnwindDest())
4303 return getNumOperands() - 2;
4304 return getNumOperands() - 1;
4305 }
4306
4307private:
4308 static BasicBlock *handler_helper(Value *V) { return cast<BasicBlock>(V); }
4309 static const BasicBlock *handler_helper(const Value *V) {
4310 return cast<BasicBlock>(V);
4311 }
4312
4313public:
4314 using DerefFnTy = BasicBlock *(*)(Value *);
4315 using handler_iterator = mapped_iterator<op_iterator, DerefFnTy>;
4316 using handler_range = iterator_range<handler_iterator>;
4317 using ConstDerefFnTy = const BasicBlock *(*)(const Value *);
4318 using const_handler_iterator =
4319 mapped_iterator<const_op_iterator, ConstDerefFnTy>;
4320 using const_handler_range = iterator_range<const_handler_iterator>;
4321
4322 /// Returns an iterator that points to the first handler in CatchSwitchInst.
4323 handler_iterator handler_begin() {
4324 op_iterator It = op_begin() + 1;
4325 if (hasUnwindDest())
4326 ++It;
4327 return handler_iterator(It, DerefFnTy(handler_helper));
4328 }
4329
4330 /// Returns an iterator that points to the first handler in the
4331 /// CatchSwitchInst.
4332 const_handler_iterator handler_begin() const {
4333 const_op_iterator It = op_begin() + 1;
4334 if (hasUnwindDest())
4335 ++It;
4336 return const_handler_iterator(It, ConstDerefFnTy(handler_helper));
4337 }
4338
4339 /// Returns a read-only iterator that points one past the last
4340 /// handler in the CatchSwitchInst.
4341 handler_iterator handler_end() {
4342 return handler_iterator(op_end(), DerefFnTy(handler_helper));
4343 }
4344
4345 /// Returns an iterator that points one past the last handler in the
4346 /// CatchSwitchInst.
4347 const_handler_iterator handler_end() const {
4348 return const_handler_iterator(op_end(), ConstDerefFnTy(handler_helper));
4349 }
4350
4351 /// iteration adapter for range-for loops.
4352 handler_range handlers() {
4353 return make_range(handler_begin(), handler_end());
4354 }
4355
4356 /// iteration adapter for range-for loops.
4357 const_handler_range handlers() const {
4358 return make_range(handler_begin(), handler_end());
4359 }
4360
4361 /// Add an entry to the switch instruction...
4362 /// Note:
4363 /// This action invalidates handler_end(). Old handler_end() iterator will
4364 /// point to the added handler.
4365 void addHandler(BasicBlock *Dest);
4366
4367 void removeHandler(handler_iterator HI);
4368
4369 unsigned getNumSuccessors() const { return getNumOperands() - 1; }
4370 BasicBlock *getSuccessor(unsigned Idx) const {
4371 assert(Idx < getNumSuccessors() &&((void)0)
4372 "Successor # out of range for catchswitch!")((void)0);
4373 return cast<BasicBlock>(getOperand(Idx + 1));
4374 }
4375 void setSuccessor(unsigned Idx, BasicBlock *NewSucc) {
4376 assert(Idx < getNumSuccessors() &&((void)0)
4377 "Successor # out of range for catchswitch!")((void)0);
4378 setOperand(Idx + 1, NewSucc);
4379 }
4380
4381 // Methods for support type inquiry through isa, cast, and dyn_cast:
4382 static bool classof(const Instruction *I) {
4383 return I->getOpcode() == Instruction::CatchSwitch;
4384 }
4385 static bool classof(const Value *V) {
4386 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4387 }
4388};
4389
4390template <>
4391struct OperandTraits<CatchSwitchInst> : public HungoffOperandTraits<2> {};
4392
4393DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchSwitchInst, Value)CatchSwitchInst::op_iterator CatchSwitchInst::op_begin() { return
OperandTraits<CatchSwitchInst>::op_begin(this); } CatchSwitchInst
::const_op_iterator CatchSwitchInst::op_begin() const { return
OperandTraits<CatchSwitchInst>::op_begin(const_cast<
CatchSwitchInst*>(this)); } CatchSwitchInst::op_iterator CatchSwitchInst
::op_end() { return OperandTraits<CatchSwitchInst>::op_end
(this); } CatchSwitchInst::const_op_iterator CatchSwitchInst::
op_end() const { return OperandTraits<CatchSwitchInst>::
op_end(const_cast<CatchSwitchInst*>(this)); } Value *CatchSwitchInst
::getOperand(unsigned i_nocapture) const { ((void)0); return cast_or_null
<Value>( OperandTraits<CatchSwitchInst>::op_begin
(const_cast<CatchSwitchInst*>(this))[i_nocapture].get()
); } void CatchSwitchInst::setOperand(unsigned i_nocapture, Value
*Val_nocapture) { ((void)0); OperandTraits<CatchSwitchInst
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
CatchSwitchInst::getNumOperands() const { return OperandTraits
<CatchSwitchInst>::operands(this); } template <int Idx_nocapture
> Use &CatchSwitchInst::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &CatchSwitchInst::Op() const { return this->
OpFrom<Idx_nocapture>(this); }
4394
4395//===----------------------------------------------------------------------===//
4396// CleanupPadInst Class
4397//===----------------------------------------------------------------------===//
4398class CleanupPadInst : public FuncletPadInst {
4399private:
4400 explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args,
4401 unsigned Values, const Twine &NameStr,
4402 Instruction *InsertBefore)
4403 : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values,
4404 NameStr, InsertBefore) {}
4405 explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args,
4406 unsigned Values, const Twine &NameStr,
4407 BasicBlock *InsertAtEnd)
4408 : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values,
4409 NameStr, InsertAtEnd) {}
4410
4411public:
4412 static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args = None,
4413 const Twine &NameStr = "",
4414 Instruction *InsertBefore = nullptr) {
4415 unsigned Values = 1 + Args.size();
4416 return new (Values)
4417 CleanupPadInst(ParentPad, Args, Values, NameStr, InsertBefore);
4418 }
4419
4420 static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args,
4421 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4422 unsigned Values = 1 + Args.size();
4423 return new (Values)
4424 CleanupPadInst(ParentPad, Args, Values, NameStr, InsertAtEnd);
4425 }
4426
4427 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4428 static bool classof(const Instruction *I) {
4429 return I->getOpcode() == Instruction::CleanupPad;
4430 }
4431 static bool classof(const Value *V) {
4432 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4433 }
4434};
4435
4436//===----------------------------------------------------------------------===//
4437// CatchPadInst Class
4438//===----------------------------------------------------------------------===//
4439class CatchPadInst : public FuncletPadInst {
4440private:
4441 explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args,
4442 unsigned Values, const Twine &NameStr,
4443 Instruction *InsertBefore)
4444 : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values,
4445 NameStr, InsertBefore) {}
4446 explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args,
4447 unsigned Values, const Twine &NameStr,
4448 BasicBlock *InsertAtEnd)
4449 : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values,
4450 NameStr, InsertAtEnd) {}
4451
4452public:
4453 static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args,
4454 const Twine &NameStr = "",
4455 Instruction *InsertBefore = nullptr) {
4456 unsigned Values = 1 + Args.size();
4457 return new (Values)
4458 CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertBefore);
4459 }
4460
4461 static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args,
4462 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4463 unsigned Values = 1 + Args.size();
4464 return new (Values)
4465 CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertAtEnd);
4466 }
4467
4468 /// Convenience accessors
4469 CatchSwitchInst *getCatchSwitch() const {
4470 return cast<CatchSwitchInst>(Op<-1>());
4471 }
4472 void setCatchSwitch(Value *CatchSwitch) {
4473 assert(CatchSwitch)((void)0);
4474 Op<-1>() = CatchSwitch;
4475 }
4476
4477 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4478 static bool classof(const Instruction *I) {
4479 return I->getOpcode() == Instruction::CatchPad;
4480 }
4481 static bool classof(const Value *V) {
4482 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4483 }
4484};
4485
4486//===----------------------------------------------------------------------===//
4487// CatchReturnInst Class
4488//===----------------------------------------------------------------------===//
4489
4490class CatchReturnInst : public Instruction {
4491 CatchReturnInst(const CatchReturnInst &RI);
4492 CatchReturnInst(Value *CatchPad, BasicBlock *BB, Instruction *InsertBefore);
4493 CatchReturnInst(Value *CatchPad, BasicBlock *BB, BasicBlock *InsertAtEnd);
4494
4495 void init(Value *CatchPad, BasicBlock *BB);
4496
4497protected:
4498 // Note: Instruction needs to be a friend here to call cloneImpl.
4499 friend class Instruction;
4500
4501 CatchReturnInst *cloneImpl() const;
4502
4503public:
4504 static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB,
4505 Instruction *InsertBefore = nullptr) {
4506 assert(CatchPad)((void)0);
4507 assert(BB)((void)0);
4508 return new (2) CatchReturnInst(CatchPad, BB, InsertBefore);
4509 }
4510
4511 static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB,
4512 BasicBlock *InsertAtEnd) {
4513 assert(CatchPad)((void)0);
4514 assert(BB)((void)0);
4515 return new (2) CatchReturnInst(CatchPad, BB, InsertAtEnd);
4516 }
4517
4518 /// Provide fast operand accessors
4519 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
4520
4521 /// Convenience accessors.
4522 CatchPadInst *getCatchPad() const { return cast<CatchPadInst>(Op<0>()); }
4523 void setCatchPad(CatchPadInst *CatchPad) {
4524 assert(CatchPad)((void)0);
4525 Op<0>() = CatchPad;
4526 }
4527
4528 BasicBlock *getSuccessor() const { return cast<BasicBlock>(Op<1>()); }
4529 void setSuccessor(BasicBlock *NewSucc) {
4530 assert(NewSucc)((void)0);
4531 Op<1>() = NewSucc;
4532 }
4533 unsigned getNumSuccessors() const { return 1; }
4534
4535 /// Get the parentPad of this catchret's catchpad's catchswitch.
4536 /// The successor block is implicitly a member of this funclet.
4537 Value *getCatchSwitchParentPad() const {
4538 return getCatchPad()->getCatchSwitch()->getParentPad();
4539 }
4540
4541 // Methods for support type inquiry through isa, cast, and dyn_cast:
4542 static bool classof(const Instruction *I) {
4543 return (I->getOpcode() == Instruction::CatchRet);
4544 }
4545 static bool classof(const Value *V) {
4546 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4547 }
4548
4549private:
4550 BasicBlock *getSuccessor(unsigned Idx) const {
4551 assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!")((void)0);
4552 return getSuccessor();
4553 }
4554
4555 void setSuccessor(unsigned Idx, BasicBlock *B) {
4556 assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!")((void)0);
4557 setSuccessor(B);
4558 }
4559};
4560
4561template <>
4562struct OperandTraits<CatchReturnInst>
4563 : public FixedNumOperandTraits<CatchReturnInst, 2> {};
4564
4565DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchReturnInst, Value)CatchReturnInst::op_iterator CatchReturnInst::op_begin() { return
OperandTraits<CatchReturnInst>::op_begin(this); } CatchReturnInst
::const_op_iterator CatchReturnInst::op_begin() const { return
OperandTraits<CatchReturnInst>::op_begin(const_cast<
CatchReturnInst*>(this)); } CatchReturnInst::op_iterator CatchReturnInst
::op_end() { return OperandTraits<CatchReturnInst>::op_end
(this); } CatchReturnInst::const_op_iterator CatchReturnInst::
op_end() const { return OperandTraits<CatchReturnInst>::
op_end(const_cast<CatchReturnInst*>(this)); } Value *CatchReturnInst
::getOperand(unsigned i_nocapture) const { ((void)0); return cast_or_null
<Value>( OperandTraits<CatchReturnInst>::op_begin
(const_cast<CatchReturnInst*>(this))[i_nocapture].get()
); } void CatchReturnInst::setOperand(unsigned i_nocapture, Value
*Val_nocapture) { ((void)0); OperandTraits<CatchReturnInst
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
CatchReturnInst::getNumOperands() const { return OperandTraits
<CatchReturnInst>::operands(this); } template <int Idx_nocapture
> Use &CatchReturnInst::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &CatchReturnInst::Op() const { return this->
OpFrom<Idx_nocapture>(this); }
4566
4567//===----------------------------------------------------------------------===//
4568// CleanupReturnInst Class
4569//===----------------------------------------------------------------------===//
4570
4571class CleanupReturnInst : public Instruction {
4572 using UnwindDestField = BoolBitfieldElementT<0>;
4573
4574private:
4575 CleanupReturnInst(const CleanupReturnInst &RI);
4576 CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values,
4577 Instruction *InsertBefore = nullptr);
4578 CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values,
4579 BasicBlock *InsertAtEnd);
4580
4581 void init(Value *CleanupPad, BasicBlock *UnwindBB);
4582
4583protected:
4584 // Note: Instruction needs to be a friend here to call cloneImpl.
4585 friend class Instruction;
4586
4587 CleanupReturnInst *cloneImpl() const;
4588
4589public:
4590 static CleanupReturnInst *Create(Value *CleanupPad,
4591 BasicBlock *UnwindBB = nullptr,
4592 Instruction *InsertBefore = nullptr) {
4593 assert(CleanupPad)((void)0);
4594 unsigned Values = 1;
4595 if (UnwindBB)
4596 ++Values;
4597 return new (Values)
4598 CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertBefore);
4599 }
4600
4601 static CleanupReturnInst *Create(Value *CleanupPad, BasicBlock *UnwindBB,
4602 BasicBlock *InsertAtEnd) {
4603 assert(CleanupPad)((void)0);
4604 unsigned Values = 1;
4605 if (UnwindBB)
4606 ++Values;
4607 return new (Values)
4608 CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertAtEnd);
4609 }
4610
4611 /// Provide fast operand accessors
4612 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
4613
4614 bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); }
4615 bool unwindsToCaller() const { return !hasUnwindDest(); }
4616
4617 /// Convenience accessor.
4618 CleanupPadInst *getCleanupPad() const {
4619 return cast<CleanupPadInst>(Op<0>());
4620 }
4621 void setCleanupPad(CleanupPadInst *CleanupPad) {
4622 assert(CleanupPad)((void)0);
4623 Op<0>() = CleanupPad;
4624 }
4625
4626 unsigned getNumSuccessors() const { return hasUnwindDest() ? 1 : 0; }
4627
4628 BasicBlock *getUnwindDest() const {
4629 return hasUnwindDest() ? cast<BasicBlock>(Op<1>()) : nullptr;
4630 }
4631 void setUnwindDest(BasicBlock *NewDest) {
4632 assert(NewDest)((void)0);
4633 assert(hasUnwindDest())((void)0);
4634 Op<1>() = NewDest;
4635 }
4636
4637 // Methods for support type inquiry through isa, cast, and dyn_cast:
4638 static bool classof(const Instruction *I) {
4639 return (I->getOpcode() == Instruction::CleanupRet);
4640 }
4641 static bool classof(const Value *V) {
4642 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4643 }
4644
4645private:
4646 BasicBlock *getSuccessor(unsigned Idx) const {
4647 assert(Idx == 0)((void)0);
4648 return getUnwindDest();
4649 }
4650
4651 void setSuccessor(unsigned Idx, BasicBlock *B) {
4652 assert(Idx == 0)((void)0);
4653 setUnwindDest(B);
4654 }
4655
4656 // Shadow Instruction::setInstructionSubclassData with a private forwarding
4657 // method so that subclasses cannot accidentally use it.
4658 template <typename Bitfield>
4659 void setSubclassData(typename Bitfield::Type Value) {
4660 Instruction::setSubclassData<Bitfield>(Value);
4661 }
4662};
4663
4664template <>
4665struct OperandTraits<CleanupReturnInst>
4666 : public VariadicOperandTraits<CleanupReturnInst, /*MINARITY=*/1> {};
4667
4668DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CleanupReturnInst, Value)CleanupReturnInst::op_iterator CleanupReturnInst::op_begin() {
return OperandTraits<CleanupReturnInst>::op_begin(this
); } CleanupReturnInst::const_op_iterator CleanupReturnInst::
op_begin() const { return OperandTraits<CleanupReturnInst>
::op_begin(const_cast<CleanupReturnInst*>(this)); } CleanupReturnInst
::op_iterator CleanupReturnInst::op_end() { return OperandTraits
<CleanupReturnInst>::op_end(this); } CleanupReturnInst::
const_op_iterator CleanupReturnInst::op_end() const { return OperandTraits
<CleanupReturnInst>::op_end(const_cast<CleanupReturnInst
*>(this)); } Value *CleanupReturnInst::getOperand(unsigned
i_nocapture) const { ((void)0); return cast_or_null<Value
>( OperandTraits<CleanupReturnInst>::op_begin(const_cast
<CleanupReturnInst*>(this))[i_nocapture].get()); } void
CleanupReturnInst::setOperand(unsigned i_nocapture, Value *Val_nocapture
) { ((void)0); OperandTraits<CleanupReturnInst>::op_begin
(this)[i_nocapture] = Val_nocapture; } unsigned CleanupReturnInst
::getNumOperands() const { return OperandTraits<CleanupReturnInst
>::operands(this); } template <int Idx_nocapture> Use
&CleanupReturnInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
CleanupReturnInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
4669
4670//===----------------------------------------------------------------------===//
4671// UnreachableInst Class
4672//===----------------------------------------------------------------------===//
4673
4674//===---------------------------------------------------------------------------
4675/// This function has undefined behavior. In particular, the
4676/// presence of this instruction indicates some higher level knowledge that the
4677/// end of the block cannot be reached.
4678///
4679class UnreachableInst : public Instruction {
4680protected:
4681 // Note: Instruction needs to be a friend here to call cloneImpl.
4682 friend class Instruction;
4683
4684 UnreachableInst *cloneImpl() const;
4685
4686public:
4687 explicit UnreachableInst(LLVMContext &C, Instruction *InsertBefore = nullptr);
4688 explicit UnreachableInst(LLVMContext &C, BasicBlock *InsertAtEnd);
4689
4690 // allocate space for exactly zero operands
4691 void *operator new(size_t S) { return User::operator new(S, 0); }
4692 void operator delete(void *Ptr) { User::operator delete(Ptr); }
4693
4694 unsigned getNumSuccessors() const { return 0; }
4695
4696 // Methods for support type inquiry through isa, cast, and dyn_cast:
4697 static bool classof(const Instruction *I) {
4698 return I->getOpcode() == Instruction::Unreachable;
4699 }
4700 static bool classof(const Value *V) {
4701 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4702 }
4703
4704private:
4705 BasicBlock *getSuccessor(unsigned idx) const {
4706 llvm_unreachable("UnreachableInst has no successors!")__builtin_unreachable();
4707 }
4708
4709 void setSuccessor(unsigned idx, BasicBlock *B) {
4710 llvm_unreachable("UnreachableInst has no successors!")__builtin_unreachable();
4711 }
4712};
4713
4714//===----------------------------------------------------------------------===//
4715// TruncInst Class
4716//===----------------------------------------------------------------------===//
4717
4718/// This class represents a truncation of integer types.
4719class TruncInst : public CastInst {
4720protected:
4721 // Note: Instruction needs to be a friend here to call cloneImpl.
4722 friend class Instruction;
4723
4724 /// Clone an identical TruncInst
4725 TruncInst *cloneImpl() const;
4726
4727public:
4728 /// Constructor with insert-before-instruction semantics
4729 TruncInst(
4730 Value *S, ///< The value to be truncated
4731 Type *Ty, ///< The (smaller) type to truncate to
4732 const Twine &NameStr = "", ///< A name for the new instruction
4733 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4734 );
4735
4736 /// Constructor with insert-at-end-of-block semantics
4737 TruncInst(
4738 Value *S, ///< The value to be truncated
4739 Type *Ty, ///< The (smaller) type to truncate to
4740 const Twine &NameStr, ///< A name for the new instruction
4741 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4742 );
4743
4744 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4745 static bool classof(const Instruction *I) {
4746 return I->getOpcode() == Trunc;
4747 }
4748 static bool classof(const Value *V) {
4749 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4750 }
4751};
4752
4753//===----------------------------------------------------------------------===//
4754// ZExtInst Class
4755//===----------------------------------------------------------------------===//
4756
4757/// This class represents zero extension of integer types.
4758class ZExtInst : public CastInst {
4759protected:
4760 // Note: Instruction needs to be a friend here to call cloneImpl.
4761 friend class Instruction;
4762
4763 /// Clone an identical ZExtInst
4764 ZExtInst *cloneImpl() const;
4765
4766public:
4767 /// Constructor with insert-before-instruction semantics
4768 ZExtInst(
4769 Value *S, ///< The value to be zero extended
4770 Type *Ty, ///< The type to zero extend to
4771 const Twine &NameStr = "", ///< A name for the new instruction
4772 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4773 );
4774
4775 /// Constructor with insert-at-end semantics.
4776 ZExtInst(
4777 Value *S, ///< The value to be zero extended
4778 Type *Ty, ///< The type to zero extend to
4779 const Twine &NameStr, ///< A name for the new instruction
4780 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4781 );
4782
4783 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4784 static bool classof(const Instruction *I) {
4785 return I->getOpcode() == ZExt;
4786 }
4787 static bool classof(const Value *V) {
4788 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4789 }
4790};
4791
4792//===----------------------------------------------------------------------===//
4793// SExtInst Class
4794//===----------------------------------------------------------------------===//
4795
4796/// This class represents a sign extension of integer types.
4797class SExtInst : public CastInst {
4798protected:
4799 // Note: Instruction needs to be a friend here to call cloneImpl.
4800 friend class Instruction;
4801
4802 /// Clone an identical SExtInst
4803 SExtInst *cloneImpl() const;
4804
4805public:
4806 /// Constructor with insert-before-instruction semantics
4807 SExtInst(
4808 Value *S, ///< The value to be sign extended
4809 Type *Ty, ///< The type to sign extend to
4810 const Twine &NameStr = "", ///< A name for the new instruction
4811 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4812 );
4813
4814 /// Constructor with insert-at-end-of-block semantics
4815 SExtInst(
4816 Value *S, ///< The value to be sign extended
4817 Type *Ty, ///< The type to sign extend to
4818 const Twine &NameStr, ///< A name for the new instruction
4819 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4820 );
4821
4822 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4823 static bool classof(const Instruction *I) {
4824 return I->getOpcode() == SExt;
4825 }
4826 static bool classof(const Value *V) {
4827 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4828 }
4829};
4830
4831//===----------------------------------------------------------------------===//
4832// FPTruncInst Class
4833//===----------------------------------------------------------------------===//
4834
4835/// This class represents a truncation of floating point types.
4836class FPTruncInst : public CastInst {
4837protected:
4838 // Note: Instruction needs to be a friend here to call cloneImpl.
4839 friend class Instruction;
4840
4841 /// Clone an identical FPTruncInst
4842 FPTruncInst *cloneImpl() const;
4843
4844public:
4845 /// Constructor with insert-before-instruction semantics
4846 FPTruncInst(
4847 Value *S, ///< The value to be truncated
4848 Type *Ty, ///< The type to truncate to
4849 const Twine &NameStr = "", ///< A name for the new instruction
4850 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4851 );
4852
4853 /// Constructor with insert-before-instruction semantics
4854 FPTruncInst(
4855 Value *S, ///< The value to be truncated
4856 Type *Ty, ///< The type to truncate to
4857 const Twine &NameStr, ///< A name for the new instruction
4858 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4859 );
4860
4861 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4862 static bool classof(const Instruction *I) {
4863 return I->getOpcode() == FPTrunc;
4864 }
4865 static bool classof(const Value *V) {
4866 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4867 }
4868};
4869
4870//===----------------------------------------------------------------------===//
4871// FPExtInst Class
4872//===----------------------------------------------------------------------===//
4873
4874/// This class represents an extension of floating point types.
4875class FPExtInst : public CastInst {
4876protected:
4877 // Note: Instruction needs to be a friend here to call cloneImpl.
4878 friend class Instruction;
4879
4880 /// Clone an identical FPExtInst
4881 FPExtInst *cloneImpl() const;
4882
4883public:
4884 /// Constructor with insert-before-instruction semantics
4885 FPExtInst(
4886 Value *S, ///< The value to be extended
4887 Type *Ty, ///< The type to extend to
4888 const Twine &NameStr = "", ///< A name for the new instruction
4889 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4890 );
4891
4892 /// Constructor with insert-at-end-of-block semantics
4893 FPExtInst(
4894 Value *S, ///< The value to be extended
4895 Type *Ty, ///< The type to extend to
4896 const Twine &NameStr, ///< A name for the new instruction
4897 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4898 );
4899
4900 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4901 static bool classof(const Instruction *I) {
4902 return I->getOpcode() == FPExt;
4903 }
4904 static bool classof(const Value *V) {
4905 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4906 }
4907};
4908
4909//===----------------------------------------------------------------------===//
4910// UIToFPInst Class
4911//===----------------------------------------------------------------------===//
4912
4913/// This class represents a cast unsigned integer to floating point.
4914class UIToFPInst : public CastInst {
4915protected:
4916 // Note: Instruction needs to be a friend here to call cloneImpl.
4917 friend class Instruction;
4918
4919 /// Clone an identical UIToFPInst
4920 UIToFPInst *cloneImpl() const;
4921
4922public:
4923 /// Constructor with insert-before-instruction semantics
4924 UIToFPInst(
4925 Value *S, ///< The value to be converted
4926 Type *Ty, ///< The type to convert to
4927 const Twine &NameStr = "", ///< A name for the new instruction
4928 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4929 );
4930
4931 /// Constructor with insert-at-end-of-block semantics
4932 UIToFPInst(
4933 Value *S, ///< The value to be converted
4934 Type *Ty, ///< The type to convert to
4935 const Twine &NameStr, ///< A name for the new instruction
4936 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4937 );
4938
4939 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4940 static bool classof(const Instruction *I) {
4941 return I->getOpcode() == UIToFP;
4942 }
4943 static bool classof(const Value *V) {
4944 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4945 }
4946};
4947
4948//===----------------------------------------------------------------------===//
4949// SIToFPInst Class
4950//===----------------------------------------------------------------------===//
4951
4952/// This class represents a cast from signed integer to floating point.
4953class SIToFPInst : public CastInst {
4954protected:
4955 // Note: Instruction needs to be a friend here to call cloneImpl.
4956 friend class Instruction;
4957
4958 /// Clone an identical SIToFPInst
4959 SIToFPInst *cloneImpl() const;
4960
4961public:
4962 /// Constructor with insert-before-instruction semantics
4963 SIToFPInst(
4964 Value *S, ///< The value to be converted
4965 Type *Ty, ///< The type to convert to
4966 const Twine &NameStr = "", ///< A name for the new instruction
4967 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4968 );
4969
4970 /// Constructor with insert-at-end-of-block semantics
4971 SIToFPInst(
4972 Value *S, ///< The value to be converted
4973 Type *Ty, ///< The type to convert to
4974 const Twine &NameStr, ///< A name for the new instruction
4975 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4976 );
4977
4978 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4979 static bool classof(const Instruction *I) {
4980 return I->getOpcode() == SIToFP;
4981 }
4982 static bool classof(const Value *V) {
4983 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4984 }
4985};
4986
4987//===----------------------------------------------------------------------===//
4988// FPToUIInst Class
4989//===----------------------------------------------------------------------===//
4990
4991/// This class represents a cast from floating point to unsigned integer
4992class FPToUIInst : public CastInst {
4993protected:
4994 // Note: Instruction needs to be a friend here to call cloneImpl.
4995 friend class Instruction;
4996
4997 /// Clone an identical FPToUIInst
4998 FPToUIInst *cloneImpl() const;
4999
5000public:
5001 /// Constructor with insert-before-instruction semantics
5002 FPToUIInst(
5003 Value *S, ///< The value to be converted
5004 Type *Ty, ///< The type to convert to
5005 const Twine &NameStr = "", ///< A name for the new instruction
5006 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5007 );
5008
5009 /// Constructor with insert-at-end-of-block semantics
5010 FPToUIInst(
5011 Value *S, ///< The value to be converted
5012 Type *Ty, ///< The type to convert to
5013 const Twine &NameStr, ///< A name for the new instruction
5014 BasicBlock *InsertAtEnd ///< Where to insert the new instruction
5015 );
5016
5017 /// Methods for support type inquiry through isa, cast, and dyn_cast:
5018 static bool classof(const Instruction *I) {
5019 return I->getOpcode() == FPToUI;
5020 }
5021 static bool classof(const Value *V) {
5022 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5023 }
5024};
5025
5026//===----------------------------------------------------------------------===//
5027// FPToSIInst Class
5028//===----------------------------------------------------------------------===//
5029
5030/// This class represents a cast from floating point to signed integer.
5031class FPToSIInst : public CastInst {
5032protected:
5033 // Note: Instruction needs to be a friend here to call cloneImpl.
5034 friend class Instruction;
5035
5036 /// Clone an identical FPToSIInst
5037 FPToSIInst *cloneImpl() const;
5038
5039public:
5040 /// Constructor with insert-before-instruction semantics
5041 FPToSIInst(
5042 Value *S, ///< The value to be converted
5043 Type *Ty, ///< The type to convert to
5044 const Twine &NameStr = "", ///< A name for the new instruction
5045 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5046 );
5047
5048 /// Constructor with insert-at-end-of-block semantics
5049 FPToSIInst(
5050 Value *S, ///< The value to be converted
5051 Type *Ty, ///< The type to convert to
5052 const Twine &NameStr, ///< A name for the new instruction
5053 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5054 );
5055
5056 /// Methods for support type inquiry through isa, cast, and dyn_cast:
5057 static bool classof(const Instruction *I) {
5058 return I->getOpcode() == FPToSI;
5059 }
5060 static bool classof(const Value *V) {
5061 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5062 }
5063};
5064
5065//===----------------------------------------------------------------------===//
5066// IntToPtrInst Class
5067//===----------------------------------------------------------------------===//
5068
5069/// This class represents a cast from an integer to a pointer.
5070class IntToPtrInst : public CastInst {
5071public:
5072 // Note: Instruction needs to be a friend here to call cloneImpl.
5073 friend class Instruction;
5074
5075 /// Constructor with insert-before-instruction semantics
5076 IntToPtrInst(
5077 Value *S, ///< The value to be converted
5078 Type *Ty, ///< The type to convert to
5079 const Twine &NameStr = "", ///< A name for the new instruction
5080 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5081 );
5082
5083 /// Constructor with insert-at-end-of-block semantics
5084 IntToPtrInst(
5085 Value *S, ///< The value to be converted
5086 Type *Ty, ///< The type to convert to
5087 const Twine &NameStr, ///< A name for the new instruction
5088 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5089 );
5090
5091 /// Clone an identical IntToPtrInst.
5092 IntToPtrInst *cloneImpl() const;
5093
5094 /// Returns the address space of this instruction's pointer type.
5095 unsigned getAddressSpace() const {
5096 return getType()->getPointerAddressSpace();
5097 }
5098
5099 // Methods for support type inquiry through isa, cast, and dyn_cast:
5100 static bool classof(const Instruction *I) {
5101 return I->getOpcode() == IntToPtr;
5102 }
5103 static bool classof(const Value *V) {
5104 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5105 }
5106};
5107
5108//===----------------------------------------------------------------------===//
5109// PtrToIntInst Class
5110//===----------------------------------------------------------------------===//
5111
5112/// This class represents a cast from a pointer to an integer.
5113class PtrToIntInst : public CastInst {
5114protected:
5115 // Note: Instruction needs to be a friend here to call cloneImpl.
5116 friend class Instruction;
5117
5118 /// Clone an identical PtrToIntInst.
5119 PtrToIntInst *cloneImpl() const;
5120
5121public:
5122 /// Constructor with insert-before-instruction semantics
5123 PtrToIntInst(
5124 Value *S, ///< The value to be converted
5125 Type *Ty, ///< The type to convert to
5126 const Twine &NameStr = "", ///< A name for the new instruction
5127 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5128 );
5129
5130 /// Constructor with insert-at-end-of-block semantics
5131 PtrToIntInst(
5132 Value *S, ///< The value to be converted
5133 Type *Ty, ///< The type to convert to
5134 const Twine &NameStr, ///< A name for the new instruction
5135 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5136 );
5137
5138 /// Gets the pointer operand.
5139 Value *getPointerOperand() { return getOperand(0); }
5140 /// Gets the pointer operand.
5141 const Value *getPointerOperand() const { return getOperand(0); }
5142 /// Gets the operand index of the pointer operand.
5143 static unsigned getPointerOperandIndex() { return 0U; }
5144
5145 /// Returns the address space of the pointer operand.
5146 unsigned getPointerAddressSpace() const {
5147 return getPointerOperand()->getType()->getPointerAddressSpace();
5148 }
5149
5150 // Methods for support type inquiry through isa, cast, and dyn_cast:
5151 static bool classof(const Instruction *I) {
5152 return I->getOpcode() == PtrToInt;
5153 }
5154 static bool classof(const Value *V) {
5155 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5156 }
5157};
5158
5159//===----------------------------------------------------------------------===//
5160// BitCastInst Class
5161//===----------------------------------------------------------------------===//
5162
5163/// This class represents a no-op cast from one type to another.
5164class BitCastInst : public CastInst {
5165protected:
5166 // Note: Instruction needs to be a friend here to call cloneImpl.
5167 friend class Instruction;
5168
5169 /// Clone an identical BitCastInst.
5170 BitCastInst *cloneImpl() const;
5171
5172public:
5173 /// Constructor with insert-before-instruction semantics
5174 BitCastInst(
5175 Value *S, ///< The value to be casted
5176 Type *Ty, ///< The type to casted to
5177 const Twine &NameStr = "", ///< A name for the new instruction
5178 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5179 );
5180
5181 /// Constructor with insert-at-end-of-block semantics
5182 BitCastInst(
5183 Value *S, ///< The value to be casted
5184 Type *Ty, ///< The type to casted to
5185 const Twine &NameStr, ///< A name for the new instruction
5186 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5187 );
5188
5189 // Methods for support type inquiry through isa, cast, and dyn_cast:
5190 static bool classof(const Instruction *I) {
5191 return I->getOpcode() == BitCast;
5192 }
5193 static bool classof(const Value *V) {
5194 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5195 }
5196};
5197
5198//===----------------------------------------------------------------------===//
5199// AddrSpaceCastInst Class
5200//===----------------------------------------------------------------------===//
5201
5202/// This class represents a conversion between pointers from one address space
5203/// to another.
5204class AddrSpaceCastInst : public CastInst {
5205protected:
5206 // Note: Instruction needs to be a friend here to call cloneImpl.
5207 friend class Instruction;
5208
5209 /// Clone an identical AddrSpaceCastInst.
5210 AddrSpaceCastInst *cloneImpl() const;
5211
5212public:
5213 /// Constructor with insert-before-instruction semantics
5214 AddrSpaceCastInst(
5215 Value *S, ///< The value to be casted
5216 Type *Ty, ///< The type to casted to
5217 const Twine &NameStr = "", ///< A name for the new instruction
5218 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5219 );
5220
5221 /// Constructor with insert-at-end-of-block semantics
5222 AddrSpaceCastInst(
5223 Value *S, ///< The value to be casted
5224 Type *Ty, ///< The type to casted to
5225 const Twine &NameStr, ///< A name for the new instruction
5226 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5227 );
5228
5229 // Methods for support type inquiry through isa, cast, and dyn_cast:
5230 static bool classof(const Instruction *I) {
5231 return I->getOpcode() == AddrSpaceCast;
5232 }
5233 static bool classof(const Value *V) {
5234 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5235 }
5236
5237 /// Gets the pointer operand.
5238 Value *getPointerOperand() {
5239 return getOperand(0);
5240 }
5241
5242 /// Gets the pointer operand.
5243 const Value *getPointerOperand() const {
5244 return getOperand(0);
5245 }
5246
5247 /// Gets the operand index of the pointer operand.
5248 static unsigned getPointerOperandIndex() {
5249 return 0U;
5250 }
5251
5252 /// Returns the address space of the pointer operand.
5253 unsigned getSrcAddressSpace() const {
5254 return getPointerOperand()->getType()->getPointerAddressSpace();
5255 }
5256
5257 /// Returns the address space of the result.
5258 unsigned getDestAddressSpace() const {
5259 return getType()->getPointerAddressSpace();
5260 }
5261};
5262
5263/// A helper function that returns the pointer operand of a load or store
5264/// instruction. Returns nullptr if not load or store.
5265inline const Value *getLoadStorePointerOperand(const Value *V) {
5266 if (auto *Load = dyn_cast<LoadInst>(V))
5267 return Load->getPointerOperand();
5268 if (auto *Store = dyn_cast<StoreInst>(V))
5269 return Store->getPointerOperand();
5270 return nullptr;
5271}
5272inline Value *getLoadStorePointerOperand(Value *V) {
5273 return const_cast<Value *>(
5274 getLoadStorePointerOperand(static_cast<const Value *>(V)));
5275}
5276
5277/// A helper function that returns the pointer operand of a load, store
5278/// or GEP instruction. Returns nullptr if not load, store, or GEP.
5279inline const Value *getPointerOperand(const Value *V) {
5280 if (auto *Ptr = getLoadStorePointerOperand(V))
5281 return Ptr;
5282 if (auto *Gep = dyn_cast<GetElementPtrInst>(V))
5283 return Gep->getPointerOperand();
5284 return nullptr;
5285}
5286inline Value *getPointerOperand(Value *V) {
5287 return const_cast<Value *>(getPointerOperand(static_cast<const Value *>(V)));
5288}
5289
5290/// A helper function that returns the alignment of load or store instruction.
5291inline Align getLoadStoreAlignment(Value *I) {
5292 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&((void)0)
5293 "Expected Load or Store instruction")((void)0);
5294 if (auto *LI = dyn_cast<LoadInst>(I))
5295 return LI->getAlign();
5296 return cast<StoreInst>(I)->getAlign();
5297}
5298
5299/// A helper function that returns the address space of the pointer operand of
5300/// load or store instruction.
5301inline unsigned getLoadStoreAddressSpace(Value *I) {
5302 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&((void)0)
5303 "Expected Load or Store instruction")((void)0);
5304 if (auto *LI = dyn_cast<LoadInst>(I))
5305 return LI->getPointerAddressSpace();
5306 return cast<StoreInst>(I)->getPointerAddressSpace();
5307}
5308
5309/// A helper function that returns the type of a load or store instruction.
5310inline Type *getLoadStoreType(Value *I) {
5311 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&((void)0)
5312 "Expected Load or Store instruction")((void)0);
5313 if (auto *LI = dyn_cast<LoadInst>(I))
5314 return LI->getType();
5315 return cast<StoreInst>(I)->getValueOperand()->getType();
5316}
5317
5318//===----------------------------------------------------------------------===//
5319// FreezeInst Class
5320//===----------------------------------------------------------------------===//
5321
5322/// This class represents a freeze function that returns random concrete
5323/// value if an operand is either a poison value or an undef value
5324class FreezeInst : public UnaryInstruction {
5325protected:
5326 // Note: Instruction needs to be a friend here to call cloneImpl.
5327 friend class Instruction;
5328
5329 /// Clone an identical FreezeInst
5330 FreezeInst *cloneImpl() const;
5331
5332public:
5333 explicit FreezeInst(Value *S,
5334 const Twine &NameStr = "",
5335 Instruction *InsertBefore = nullptr);
5336 FreezeInst(Value *S, const Twine &NameStr, BasicBlock *InsertAtEnd);
5337
5338 // Methods for support type inquiry through isa, cast, and dyn_cast:
5339 static inline bool classof(const Instruction *I) {
5340 return I->getOpcode() == Freeze;
5341 }
5342 static inline bool classof(const Value *V) {
5343 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5344 }
5345};
5346
5347} // end namespace llvm
5348
5349#endif // LLVM_IR_INSTRUCTIONS_H