Bug Summary

File:src/gnu/usr.bin/clang/libclangAST/../../../llvm/clang/lib/AST/ASTContext.cpp
Warning:line 3241, column 3
Value stored to 'AT' is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name ASTContext.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/gnu/usr.bin/clang/libclangAST/obj -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/gnu/usr.bin/clang/libclangAST/obj/../include/clang/AST -I /usr/src/gnu/usr.bin/clang/libclangAST/../../../llvm/clang/include -I /usr/src/gnu/usr.bin/clang/libclangAST/../../../llvm/llvm/include -I /usr/src/gnu/usr.bin/clang/libclangAST/../include -I /usr/src/gnu/usr.bin/clang/libclangAST/obj -I /usr/src/gnu/usr.bin/clang/libclangAST/obj/../include -D NDEBUG -D __STDC_LIMIT_MACROS -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D LLVM_PREFIX="/usr" -internal-isystem /usr/include/c++/v1 -internal-isystem /usr/local/lib/clang/13.0.0/include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/usr/src/gnu/usr.bin/clang/libclangAST/obj -ferror-limit 19 -fvisibility-inlines-hidden -fwrapv -stack-protector 2 -fno-rtti -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /home/ben/Projects/vmm/scan-build/2022-01-12-194120-40624-1 -x c++ /usr/src/gnu/usr.bin/clang/libclangAST/../../../llvm/clang/lib/AST/ASTContext.cpp
1//===- ASTContext.cpp - Context to hold long-lived AST nodes --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the ASTContext interface.
10//
11//===----------------------------------------------------------------------===//
12
13#include "clang/AST/ASTContext.h"
14#include "CXXABI.h"
15#include "Interp/Context.h"
16#include "clang/AST/APValue.h"
17#include "clang/AST/ASTConcept.h"
18#include "clang/AST/ASTMutationListener.h"
19#include "clang/AST/ASTTypeTraits.h"
20#include "clang/AST/Attr.h"
21#include "clang/AST/AttrIterator.h"
22#include "clang/AST/CharUnits.h"
23#include "clang/AST/Comment.h"
24#include "clang/AST/Decl.h"
25#include "clang/AST/DeclBase.h"
26#include "clang/AST/DeclCXX.h"
27#include "clang/AST/DeclContextInternals.h"
28#include "clang/AST/DeclObjC.h"
29#include "clang/AST/DeclOpenMP.h"
30#include "clang/AST/DeclTemplate.h"
31#include "clang/AST/DeclarationName.h"
32#include "clang/AST/DependenceFlags.h"
33#include "clang/AST/Expr.h"
34#include "clang/AST/ExprCXX.h"
35#include "clang/AST/ExprConcepts.h"
36#include "clang/AST/ExternalASTSource.h"
37#include "clang/AST/Mangle.h"
38#include "clang/AST/MangleNumberingContext.h"
39#include "clang/AST/NestedNameSpecifier.h"
40#include "clang/AST/ParentMapContext.h"
41#include "clang/AST/RawCommentList.h"
42#include "clang/AST/RecordLayout.h"
43#include "clang/AST/Stmt.h"
44#include "clang/AST/TemplateBase.h"
45#include "clang/AST/TemplateName.h"
46#include "clang/AST/Type.h"
47#include "clang/AST/TypeLoc.h"
48#include "clang/AST/UnresolvedSet.h"
49#include "clang/AST/VTableBuilder.h"
50#include "clang/Basic/AddressSpaces.h"
51#include "clang/Basic/Builtins.h"
52#include "clang/Basic/CommentOptions.h"
53#include "clang/Basic/ExceptionSpecificationType.h"
54#include "clang/Basic/IdentifierTable.h"
55#include "clang/Basic/LLVM.h"
56#include "clang/Basic/LangOptions.h"
57#include "clang/Basic/Linkage.h"
58#include "clang/Basic/Module.h"
59#include "clang/Basic/NoSanitizeList.h"
60#include "clang/Basic/ObjCRuntime.h"
61#include "clang/Basic/SourceLocation.h"
62#include "clang/Basic/SourceManager.h"
63#include "clang/Basic/Specifiers.h"
64#include "clang/Basic/TargetCXXABI.h"
65#include "clang/Basic/TargetInfo.h"
66#include "clang/Basic/XRayLists.h"
67#include "llvm/ADT/APFixedPoint.h"
68#include "llvm/ADT/APInt.h"
69#include "llvm/ADT/APSInt.h"
70#include "llvm/ADT/ArrayRef.h"
71#include "llvm/ADT/DenseMap.h"
72#include "llvm/ADT/DenseSet.h"
73#include "llvm/ADT/FoldingSet.h"
74#include "llvm/ADT/None.h"
75#include "llvm/ADT/Optional.h"
76#include "llvm/ADT/PointerUnion.h"
77#include "llvm/ADT/STLExtras.h"
78#include "llvm/ADT/SmallPtrSet.h"
79#include "llvm/ADT/SmallVector.h"
80#include "llvm/ADT/StringExtras.h"
81#include "llvm/ADT/StringRef.h"
82#include "llvm/ADT/Triple.h"
83#include "llvm/Support/Capacity.h"
84#include "llvm/Support/Casting.h"
85#include "llvm/Support/Compiler.h"
86#include "llvm/Support/ErrorHandling.h"
87#include "llvm/Support/MD5.h"
88#include "llvm/Support/MathExtras.h"
89#include "llvm/Support/raw_ostream.h"
90#include <algorithm>
91#include <cassert>
92#include <cstddef>
93#include <cstdint>
94#include <cstdlib>
95#include <map>
96#include <memory>
97#include <string>
98#include <tuple>
99#include <utility>
100
101using namespace clang;
102
103enum FloatingRank {
104 BFloat16Rank, Float16Rank, HalfRank, FloatRank, DoubleRank, LongDoubleRank, Float128Rank
105};
106
107/// \returns location that is relevant when searching for Doc comments related
108/// to \p D.
109static SourceLocation getDeclLocForCommentSearch(const Decl *D,
110 SourceManager &SourceMgr) {
111 assert(D)((void)0);
112
113 // User can not attach documentation to implicit declarations.
114 if (D->isImplicit())
115 return {};
116
117 // User can not attach documentation to implicit instantiations.
118 if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
119 if (FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
120 return {};
121 }
122
123 if (const auto *VD = dyn_cast<VarDecl>(D)) {
124 if (VD->isStaticDataMember() &&
125 VD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
126 return {};
127 }
128
129 if (const auto *CRD = dyn_cast<CXXRecordDecl>(D)) {
130 if (CRD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
131 return {};
132 }
133
134 if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(D)) {
135 TemplateSpecializationKind TSK = CTSD->getSpecializationKind();
136 if (TSK == TSK_ImplicitInstantiation ||
137 TSK == TSK_Undeclared)
138 return {};
139 }
140
141 if (const auto *ED = dyn_cast<EnumDecl>(D)) {
142 if (ED->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
143 return {};
144 }
145 if (const auto *TD = dyn_cast<TagDecl>(D)) {
146 // When tag declaration (but not definition!) is part of the
147 // decl-specifier-seq of some other declaration, it doesn't get comment
148 if (TD->isEmbeddedInDeclarator() && !TD->isCompleteDefinition())
149 return {};
150 }
151 // TODO: handle comments for function parameters properly.
152 if (isa<ParmVarDecl>(D))
153 return {};
154
155 // TODO: we could look up template parameter documentation in the template
156 // documentation.
157 if (isa<TemplateTypeParmDecl>(D) ||
158 isa<NonTypeTemplateParmDecl>(D) ||
159 isa<TemplateTemplateParmDecl>(D))
160 return {};
161
162 // Find declaration location.
163 // For Objective-C declarations we generally don't expect to have multiple
164 // declarators, thus use declaration starting location as the "declaration
165 // location".
166 // For all other declarations multiple declarators are used quite frequently,
167 // so we use the location of the identifier as the "declaration location".
168 if (isa<ObjCMethodDecl>(D) || isa<ObjCContainerDecl>(D) ||
169 isa<ObjCPropertyDecl>(D) ||
170 isa<RedeclarableTemplateDecl>(D) ||
171 isa<ClassTemplateSpecializationDecl>(D) ||
172 // Allow association with Y across {} in `typedef struct X {} Y`.
173 isa<TypedefDecl>(D))
174 return D->getBeginLoc();
175 else {
176 const SourceLocation DeclLoc = D->getLocation();
177 if (DeclLoc.isMacroID()) {
178 if (isa<TypedefDecl>(D)) {
179 // If location of the typedef name is in a macro, it is because being
180 // declared via a macro. Try using declaration's starting location as
181 // the "declaration location".
182 return D->getBeginLoc();
183 } else if (const auto *TD = dyn_cast<TagDecl>(D)) {
184 // If location of the tag decl is inside a macro, but the spelling of
185 // the tag name comes from a macro argument, it looks like a special
186 // macro like NS_ENUM is being used to define the tag decl. In that
187 // case, adjust the source location to the expansion loc so that we can
188 // attach the comment to the tag decl.
189 if (SourceMgr.isMacroArgExpansion(DeclLoc) &&
190 TD->isCompleteDefinition())
191 return SourceMgr.getExpansionLoc(DeclLoc);
192 }
193 }
194 return DeclLoc;
195 }
196
197 return {};
198}
199
200RawComment *ASTContext::getRawCommentForDeclNoCacheImpl(
201 const Decl *D, const SourceLocation RepresentativeLocForDecl,
202 const std::map<unsigned, RawComment *> &CommentsInTheFile) const {
203 // If the declaration doesn't map directly to a location in a file, we
204 // can't find the comment.
205 if (RepresentativeLocForDecl.isInvalid() ||
206 !RepresentativeLocForDecl.isFileID())
207 return nullptr;
208
209 // If there are no comments anywhere, we won't find anything.
210 if (CommentsInTheFile.empty())
211 return nullptr;
212
213 // Decompose the location for the declaration and find the beginning of the
214 // file buffer.
215 const std::pair<FileID, unsigned> DeclLocDecomp =
216 SourceMgr.getDecomposedLoc(RepresentativeLocForDecl);
217
218 // Slow path.
219 auto OffsetCommentBehindDecl =
220 CommentsInTheFile.lower_bound(DeclLocDecomp.second);
221
222 // First check whether we have a trailing comment.
223 if (OffsetCommentBehindDecl != CommentsInTheFile.end()) {
224 RawComment *CommentBehindDecl = OffsetCommentBehindDecl->second;
225 if ((CommentBehindDecl->isDocumentation() ||
226 LangOpts.CommentOpts.ParseAllComments) &&
227 CommentBehindDecl->isTrailingComment() &&
228 (isa<FieldDecl>(D) || isa<EnumConstantDecl>(D) || isa<VarDecl>(D) ||
229 isa<ObjCMethodDecl>(D) || isa<ObjCPropertyDecl>(D))) {
230
231 // Check that Doxygen trailing comment comes after the declaration, starts
232 // on the same line and in the same file as the declaration.
233 if (SourceMgr.getLineNumber(DeclLocDecomp.first, DeclLocDecomp.second) ==
234 Comments.getCommentBeginLine(CommentBehindDecl, DeclLocDecomp.first,
235 OffsetCommentBehindDecl->first)) {
236 return CommentBehindDecl;
237 }
238 }
239 }
240
241 // The comment just after the declaration was not a trailing comment.
242 // Let's look at the previous comment.
243 if (OffsetCommentBehindDecl == CommentsInTheFile.begin())
244 return nullptr;
245
246 auto OffsetCommentBeforeDecl = --OffsetCommentBehindDecl;
247 RawComment *CommentBeforeDecl = OffsetCommentBeforeDecl->second;
248
249 // Check that we actually have a non-member Doxygen comment.
250 if (!(CommentBeforeDecl->isDocumentation() ||
251 LangOpts.CommentOpts.ParseAllComments) ||
252 CommentBeforeDecl->isTrailingComment())
253 return nullptr;
254
255 // Decompose the end of the comment.
256 const unsigned CommentEndOffset =
257 Comments.getCommentEndOffset(CommentBeforeDecl);
258
259 // Get the corresponding buffer.
260 bool Invalid = false;
261 const char *Buffer = SourceMgr.getBufferData(DeclLocDecomp.first,
262 &Invalid).data();
263 if (Invalid)
264 return nullptr;
265
266 // Extract text between the comment and declaration.
267 StringRef Text(Buffer + CommentEndOffset,
268 DeclLocDecomp.second - CommentEndOffset);
269
270 // There should be no other declarations or preprocessor directives between
271 // comment and declaration.
272 if (Text.find_first_of(";{}#@") != StringRef::npos)
273 return nullptr;
274
275 return CommentBeforeDecl;
276}
277
278RawComment *ASTContext::getRawCommentForDeclNoCache(const Decl *D) const {
279 const SourceLocation DeclLoc = getDeclLocForCommentSearch(D, SourceMgr);
280
281 // If the declaration doesn't map directly to a location in a file, we
282 // can't find the comment.
283 if (DeclLoc.isInvalid() || !DeclLoc.isFileID())
284 return nullptr;
285
286 if (ExternalSource && !CommentsLoaded) {
287 ExternalSource->ReadComments();
288 CommentsLoaded = true;
289 }
290
291 if (Comments.empty())
292 return nullptr;
293
294 const FileID File = SourceMgr.getDecomposedLoc(DeclLoc).first;
295 const auto CommentsInThisFile = Comments.getCommentsInFile(File);
296 if (!CommentsInThisFile || CommentsInThisFile->empty())
297 return nullptr;
298
299 return getRawCommentForDeclNoCacheImpl(D, DeclLoc, *CommentsInThisFile);
300}
301
302void ASTContext::addComment(const RawComment &RC) {
303 assert(LangOpts.RetainCommentsFromSystemHeaders ||((void)0)
304 !SourceMgr.isInSystemHeader(RC.getSourceRange().getBegin()))((void)0);
305 Comments.addComment(RC, LangOpts.CommentOpts, BumpAlloc);
306}
307
308/// If we have a 'templated' declaration for a template, adjust 'D' to
309/// refer to the actual template.
310/// If we have an implicit instantiation, adjust 'D' to refer to template.
311static const Decl &adjustDeclToTemplate(const Decl &D) {
312 if (const auto *FD = dyn_cast<FunctionDecl>(&D)) {
313 // Is this function declaration part of a function template?
314 if (const FunctionTemplateDecl *FTD = FD->getDescribedFunctionTemplate())
315 return *FTD;
316
317 // Nothing to do if function is not an implicit instantiation.
318 if (FD->getTemplateSpecializationKind() != TSK_ImplicitInstantiation)
319 return D;
320
321 // Function is an implicit instantiation of a function template?
322 if (const FunctionTemplateDecl *FTD = FD->getPrimaryTemplate())
323 return *FTD;
324
325 // Function is instantiated from a member definition of a class template?
326 if (const FunctionDecl *MemberDecl =
327 FD->getInstantiatedFromMemberFunction())
328 return *MemberDecl;
329
330 return D;
331 }
332 if (const auto *VD = dyn_cast<VarDecl>(&D)) {
333 // Static data member is instantiated from a member definition of a class
334 // template?
335 if (VD->isStaticDataMember())
336 if (const VarDecl *MemberDecl = VD->getInstantiatedFromStaticDataMember())
337 return *MemberDecl;
338
339 return D;
340 }
341 if (const auto *CRD = dyn_cast<CXXRecordDecl>(&D)) {
342 // Is this class declaration part of a class template?
343 if (const ClassTemplateDecl *CTD = CRD->getDescribedClassTemplate())
344 return *CTD;
345
346 // Class is an implicit instantiation of a class template or partial
347 // specialization?
348 if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(CRD)) {
349 if (CTSD->getSpecializationKind() != TSK_ImplicitInstantiation)
350 return D;
351 llvm::PointerUnion<ClassTemplateDecl *,
352 ClassTemplatePartialSpecializationDecl *>
353 PU = CTSD->getSpecializedTemplateOrPartial();
354 return PU.is<ClassTemplateDecl *>()
355 ? *static_cast<const Decl *>(PU.get<ClassTemplateDecl *>())
356 : *static_cast<const Decl *>(
357 PU.get<ClassTemplatePartialSpecializationDecl *>());
358 }
359
360 // Class is instantiated from a member definition of a class template?
361 if (const MemberSpecializationInfo *Info =
362 CRD->getMemberSpecializationInfo())
363 return *Info->getInstantiatedFrom();
364
365 return D;
366 }
367 if (const auto *ED = dyn_cast<EnumDecl>(&D)) {
368 // Enum is instantiated from a member definition of a class template?
369 if (const EnumDecl *MemberDecl = ED->getInstantiatedFromMemberEnum())
370 return *MemberDecl;
371
372 return D;
373 }
374 // FIXME: Adjust alias templates?
375 return D;
376}
377
378const RawComment *ASTContext::getRawCommentForAnyRedecl(
379 const Decl *D,
380 const Decl **OriginalDecl) const {
381 if (!D) {
382 if (OriginalDecl)
383 OriginalDecl = nullptr;
384 return nullptr;
385 }
386
387 D = &adjustDeclToTemplate(*D);
388
389 // Any comment directly attached to D?
390 {
391 auto DeclComment = DeclRawComments.find(D);
392 if (DeclComment != DeclRawComments.end()) {
393 if (OriginalDecl)
394 *OriginalDecl = D;
395 return DeclComment->second;
396 }
397 }
398
399 // Any comment attached to any redeclaration of D?
400 const Decl *CanonicalD = D->getCanonicalDecl();
401 if (!CanonicalD)
402 return nullptr;
403
404 {
405 auto RedeclComment = RedeclChainComments.find(CanonicalD);
406 if (RedeclComment != RedeclChainComments.end()) {
407 if (OriginalDecl)
408 *OriginalDecl = RedeclComment->second;
409 auto CommentAtRedecl = DeclRawComments.find(RedeclComment->second);
410 assert(CommentAtRedecl != DeclRawComments.end() &&((void)0)
411 "This decl is supposed to have comment attached.")((void)0);
412 return CommentAtRedecl->second;
413 }
414 }
415
416 // Any redeclarations of D that we haven't checked for comments yet?
417 // We can't use DenseMap::iterator directly since it'd get invalid.
418 auto LastCheckedRedecl = [this, CanonicalD]() -> const Decl * {
419 auto LookupRes = CommentlessRedeclChains.find(CanonicalD);
420 if (LookupRes != CommentlessRedeclChains.end())
421 return LookupRes->second;
422 return nullptr;
423 }();
424
425 for (const auto Redecl : D->redecls()) {
426 assert(Redecl)((void)0);
427 // Skip all redeclarations that have been checked previously.
428 if (LastCheckedRedecl) {
429 if (LastCheckedRedecl == Redecl) {
430 LastCheckedRedecl = nullptr;
431 }
432 continue;
433 }
434 const RawComment *RedeclComment = getRawCommentForDeclNoCache(Redecl);
435 if (RedeclComment) {
436 cacheRawCommentForDecl(*Redecl, *RedeclComment);
437 if (OriginalDecl)
438 *OriginalDecl = Redecl;
439 return RedeclComment;
440 }
441 CommentlessRedeclChains[CanonicalD] = Redecl;
442 }
443
444 if (OriginalDecl)
445 *OriginalDecl = nullptr;
446 return nullptr;
447}
448
449void ASTContext::cacheRawCommentForDecl(const Decl &OriginalD,
450 const RawComment &Comment) const {
451 assert(Comment.isDocumentation() || LangOpts.CommentOpts.ParseAllComments)((void)0);
452 DeclRawComments.try_emplace(&OriginalD, &Comment);
453 const Decl *const CanonicalDecl = OriginalD.getCanonicalDecl();
454 RedeclChainComments.try_emplace(CanonicalDecl, &OriginalD);
455 CommentlessRedeclChains.erase(CanonicalDecl);
456}
457
458static void addRedeclaredMethods(const ObjCMethodDecl *ObjCMethod,
459 SmallVectorImpl<const NamedDecl *> &Redeclared) {
460 const DeclContext *DC = ObjCMethod->getDeclContext();
461 if (const auto *IMD = dyn_cast<ObjCImplDecl>(DC)) {
462 const ObjCInterfaceDecl *ID = IMD->getClassInterface();
463 if (!ID)
464 return;
465 // Add redeclared method here.
466 for (const auto *Ext : ID->known_extensions()) {
467 if (ObjCMethodDecl *RedeclaredMethod =
468 Ext->getMethod(ObjCMethod->getSelector(),
469 ObjCMethod->isInstanceMethod()))
470 Redeclared.push_back(RedeclaredMethod);
471 }
472 }
473}
474
475void ASTContext::attachCommentsToJustParsedDecls(ArrayRef<Decl *> Decls,
476 const Preprocessor *PP) {
477 if (Comments.empty() || Decls.empty())
478 return;
479
480 FileID File;
481 for (Decl *D : Decls) {
482 SourceLocation Loc = D->getLocation();
483 if (Loc.isValid()) {
484 // See if there are any new comments that are not attached to a decl.
485 // The location doesn't have to be precise - we care only about the file.
486 File = SourceMgr.getDecomposedLoc(Loc).first;
487 break;
488 }
489 }
490
491 if (File.isInvalid())
492 return;
493
494 auto CommentsInThisFile = Comments.getCommentsInFile(File);
495 if (!CommentsInThisFile || CommentsInThisFile->empty() ||
496 CommentsInThisFile->rbegin()->second->isAttached())
497 return;
498
499 // There is at least one comment not attached to a decl.
500 // Maybe it should be attached to one of Decls?
501 //
502 // Note that this way we pick up not only comments that precede the
503 // declaration, but also comments that *follow* the declaration -- thanks to
504 // the lookahead in the lexer: we've consumed the semicolon and looked
505 // ahead through comments.
506
507 for (const Decl *D : Decls) {
508 assert(D)((void)0);
509 if (D->isInvalidDecl())
510 continue;
511
512 D = &adjustDeclToTemplate(*D);
513
514 const SourceLocation DeclLoc = getDeclLocForCommentSearch(D, SourceMgr);
515
516 if (DeclLoc.isInvalid() || !DeclLoc.isFileID())
517 continue;
518
519 if (DeclRawComments.count(D) > 0)
520 continue;
521
522 if (RawComment *const DocComment =
523 getRawCommentForDeclNoCacheImpl(D, DeclLoc, *CommentsInThisFile)) {
524 cacheRawCommentForDecl(*D, *DocComment);
525 comments::FullComment *FC = DocComment->parse(*this, PP, D);
526 ParsedComments[D->getCanonicalDecl()] = FC;
527 }
528 }
529}
530
531comments::FullComment *ASTContext::cloneFullComment(comments::FullComment *FC,
532 const Decl *D) const {
533 auto *ThisDeclInfo = new (*this) comments::DeclInfo;
534 ThisDeclInfo->CommentDecl = D;
535 ThisDeclInfo->IsFilled = false;
536 ThisDeclInfo->fill();
537 ThisDeclInfo->CommentDecl = FC->getDecl();
538 if (!ThisDeclInfo->TemplateParameters)
539 ThisDeclInfo->TemplateParameters = FC->getDeclInfo()->TemplateParameters;
540 comments::FullComment *CFC =
541 new (*this) comments::FullComment(FC->getBlocks(),
542 ThisDeclInfo);
543 return CFC;
544}
545
546comments::FullComment *ASTContext::getLocalCommentForDeclUncached(const Decl *D) const {
547 const RawComment *RC = getRawCommentForDeclNoCache(D);
548 return RC ? RC->parse(*this, nullptr, D) : nullptr;
549}
550
551comments::FullComment *ASTContext::getCommentForDecl(
552 const Decl *D,
553 const Preprocessor *PP) const {
554 if (!D || D->isInvalidDecl())
555 return nullptr;
556 D = &adjustDeclToTemplate(*D);
557
558 const Decl *Canonical = D->getCanonicalDecl();
559 llvm::DenseMap<const Decl *, comments::FullComment *>::iterator Pos =
560 ParsedComments.find(Canonical);
561
562 if (Pos != ParsedComments.end()) {
563 if (Canonical != D) {
564 comments::FullComment *FC = Pos->second;
565 comments::FullComment *CFC = cloneFullComment(FC, D);
566 return CFC;
567 }
568 return Pos->second;
569 }
570
571 const Decl *OriginalDecl = nullptr;
572
573 const RawComment *RC = getRawCommentForAnyRedecl(D, &OriginalDecl);
574 if (!RC) {
575 if (isa<ObjCMethodDecl>(D) || isa<FunctionDecl>(D)) {
576 SmallVector<const NamedDecl*, 8> Overridden;
577 const auto *OMD = dyn_cast<ObjCMethodDecl>(D);
578 if (OMD && OMD->isPropertyAccessor())
579 if (const ObjCPropertyDecl *PDecl = OMD->findPropertyDecl())
580 if (comments::FullComment *FC = getCommentForDecl(PDecl, PP))
581 return cloneFullComment(FC, D);
582 if (OMD)
583 addRedeclaredMethods(OMD, Overridden);
584 getOverriddenMethods(dyn_cast<NamedDecl>(D), Overridden);
585 for (unsigned i = 0, e = Overridden.size(); i < e; i++)
586 if (comments::FullComment *FC = getCommentForDecl(Overridden[i], PP))
587 return cloneFullComment(FC, D);
588 }
589 else if (const auto *TD = dyn_cast<TypedefNameDecl>(D)) {
590 // Attach any tag type's documentation to its typedef if latter
591 // does not have one of its own.
592 QualType QT = TD->getUnderlyingType();
593 if (const auto *TT = QT->getAs<TagType>())
594 if (const Decl *TD = TT->getDecl())
595 if (comments::FullComment *FC = getCommentForDecl(TD, PP))
596 return cloneFullComment(FC, D);
597 }
598 else if (const auto *IC = dyn_cast<ObjCInterfaceDecl>(D)) {
599 while (IC->getSuperClass()) {
600 IC = IC->getSuperClass();
601 if (comments::FullComment *FC = getCommentForDecl(IC, PP))
602 return cloneFullComment(FC, D);
603 }
604 }
605 else if (const auto *CD = dyn_cast<ObjCCategoryDecl>(D)) {
606 if (const ObjCInterfaceDecl *IC = CD->getClassInterface())
607 if (comments::FullComment *FC = getCommentForDecl(IC, PP))
608 return cloneFullComment(FC, D);
609 }
610 else if (const auto *RD = dyn_cast<CXXRecordDecl>(D)) {
611 if (!(RD = RD->getDefinition()))
612 return nullptr;
613 // Check non-virtual bases.
614 for (const auto &I : RD->bases()) {
615 if (I.isVirtual() || (I.getAccessSpecifier() != AS_public))
616 continue;
617 QualType Ty = I.getType();
618 if (Ty.isNull())
619 continue;
620 if (const CXXRecordDecl *NonVirtualBase = Ty->getAsCXXRecordDecl()) {
621 if (!(NonVirtualBase= NonVirtualBase->getDefinition()))
622 continue;
623
624 if (comments::FullComment *FC = getCommentForDecl((NonVirtualBase), PP))
625 return cloneFullComment(FC, D);
626 }
627 }
628 // Check virtual bases.
629 for (const auto &I : RD->vbases()) {
630 if (I.getAccessSpecifier() != AS_public)
631 continue;
632 QualType Ty = I.getType();
633 if (Ty.isNull())
634 continue;
635 if (const CXXRecordDecl *VirtualBase = Ty->getAsCXXRecordDecl()) {
636 if (!(VirtualBase= VirtualBase->getDefinition()))
637 continue;
638 if (comments::FullComment *FC = getCommentForDecl((VirtualBase), PP))
639 return cloneFullComment(FC, D);
640 }
641 }
642 }
643 return nullptr;
644 }
645
646 // If the RawComment was attached to other redeclaration of this Decl, we
647 // should parse the comment in context of that other Decl. This is important
648 // because comments can contain references to parameter names which can be
649 // different across redeclarations.
650 if (D != OriginalDecl && OriginalDecl)
651 return getCommentForDecl(OriginalDecl, PP);
652
653 comments::FullComment *FC = RC->parse(*this, PP, D);
654 ParsedComments[Canonical] = FC;
655 return FC;
656}
657
658void
659ASTContext::CanonicalTemplateTemplateParm::Profile(llvm::FoldingSetNodeID &ID,
660 const ASTContext &C,
661 TemplateTemplateParmDecl *Parm) {
662 ID.AddInteger(Parm->getDepth());
663 ID.AddInteger(Parm->getPosition());
664 ID.AddBoolean(Parm->isParameterPack());
665
666 TemplateParameterList *Params = Parm->getTemplateParameters();
667 ID.AddInteger(Params->size());
668 for (TemplateParameterList::const_iterator P = Params->begin(),
669 PEnd = Params->end();
670 P != PEnd; ++P) {
671 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) {
672 ID.AddInteger(0);
673 ID.AddBoolean(TTP->isParameterPack());
674 const TypeConstraint *TC = TTP->getTypeConstraint();
675 ID.AddBoolean(TC != nullptr);
676 if (TC)
677 TC->getImmediatelyDeclaredConstraint()->Profile(ID, C,
678 /*Canonical=*/true);
679 if (TTP->isExpandedParameterPack()) {
680 ID.AddBoolean(true);
681 ID.AddInteger(TTP->getNumExpansionParameters());
682 } else
683 ID.AddBoolean(false);
684 continue;
685 }
686
687 if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) {
688 ID.AddInteger(1);
689 ID.AddBoolean(NTTP->isParameterPack());
690 ID.AddPointer(NTTP->getType().getCanonicalType().getAsOpaquePtr());
691 if (NTTP->isExpandedParameterPack()) {
692 ID.AddBoolean(true);
693 ID.AddInteger(NTTP->getNumExpansionTypes());
694 for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) {
695 QualType T = NTTP->getExpansionType(I);
696 ID.AddPointer(T.getCanonicalType().getAsOpaquePtr());
697 }
698 } else
699 ID.AddBoolean(false);
700 continue;
701 }
702
703 auto *TTP = cast<TemplateTemplateParmDecl>(*P);
704 ID.AddInteger(2);
705 Profile(ID, C, TTP);
706 }
707 Expr *RequiresClause = Parm->getTemplateParameters()->getRequiresClause();
708 ID.AddBoolean(RequiresClause != nullptr);
709 if (RequiresClause)
710 RequiresClause->Profile(ID, C, /*Canonical=*/true);
711}
712
713static Expr *
714canonicalizeImmediatelyDeclaredConstraint(const ASTContext &C, Expr *IDC,
715 QualType ConstrainedType) {
716 // This is a bit ugly - we need to form a new immediately-declared
717 // constraint that references the new parameter; this would ideally
718 // require semantic analysis (e.g. template<C T> struct S {}; - the
719 // converted arguments of C<T> could be an argument pack if C is
720 // declared as template<typename... T> concept C = ...).
721 // We don't have semantic analysis here so we dig deep into the
722 // ready-made constraint expr and change the thing manually.
723 ConceptSpecializationExpr *CSE;
724 if (const auto *Fold = dyn_cast<CXXFoldExpr>(IDC))
725 CSE = cast<ConceptSpecializationExpr>(Fold->getLHS());
726 else
727 CSE = cast<ConceptSpecializationExpr>(IDC);
728 ArrayRef<TemplateArgument> OldConverted = CSE->getTemplateArguments();
729 SmallVector<TemplateArgument, 3> NewConverted;
730 NewConverted.reserve(OldConverted.size());
731 if (OldConverted.front().getKind() == TemplateArgument::Pack) {
732 // The case:
733 // template<typename... T> concept C = true;
734 // template<C<int> T> struct S; -> constraint is C<{T, int}>
735 NewConverted.push_back(ConstrainedType);
736 for (auto &Arg : OldConverted.front().pack_elements().drop_front(1))
737 NewConverted.push_back(Arg);
738 TemplateArgument NewPack(NewConverted);
739
740 NewConverted.clear();
741 NewConverted.push_back(NewPack);
742 assert(OldConverted.size() == 1 &&((void)0)
743 "Template parameter pack should be the last parameter")((void)0);
744 } else {
745 assert(OldConverted.front().getKind() == TemplateArgument::Type &&((void)0)
746 "Unexpected first argument kind for immediately-declared "((void)0)
747 "constraint")((void)0);
748 NewConverted.push_back(ConstrainedType);
749 for (auto &Arg : OldConverted.drop_front(1))
750 NewConverted.push_back(Arg);
751 }
752 Expr *NewIDC = ConceptSpecializationExpr::Create(
753 C, CSE->getNamedConcept(), NewConverted, nullptr,
754 CSE->isInstantiationDependent(), CSE->containsUnexpandedParameterPack());
755
756 if (auto *OrigFold = dyn_cast<CXXFoldExpr>(IDC))
757 NewIDC = new (C) CXXFoldExpr(
758 OrigFold->getType(), /*Callee*/nullptr, SourceLocation(), NewIDC,
759 BinaryOperatorKind::BO_LAnd, SourceLocation(), /*RHS=*/nullptr,
760 SourceLocation(), /*NumExpansions=*/None);
761 return NewIDC;
762}
763
764TemplateTemplateParmDecl *
765ASTContext::getCanonicalTemplateTemplateParmDecl(
766 TemplateTemplateParmDecl *TTP) const {
767 // Check if we already have a canonical template template parameter.
768 llvm::FoldingSetNodeID ID;
769 CanonicalTemplateTemplateParm::Profile(ID, *this, TTP);
770 void *InsertPos = nullptr;
771 CanonicalTemplateTemplateParm *Canonical
772 = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos);
773 if (Canonical)
774 return Canonical->getParam();
775
776 // Build a canonical template parameter list.
777 TemplateParameterList *Params = TTP->getTemplateParameters();
778 SmallVector<NamedDecl *, 4> CanonParams;
779 CanonParams.reserve(Params->size());
780 for (TemplateParameterList::const_iterator P = Params->begin(),
781 PEnd = Params->end();
782 P != PEnd; ++P) {
783 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) {
784 TemplateTypeParmDecl *NewTTP = TemplateTypeParmDecl::Create(*this,
785 getTranslationUnitDecl(), SourceLocation(), SourceLocation(),
786 TTP->getDepth(), TTP->getIndex(), nullptr, false,
787 TTP->isParameterPack(), TTP->hasTypeConstraint(),
788 TTP->isExpandedParameterPack() ?
789 llvm::Optional<unsigned>(TTP->getNumExpansionParameters()) : None);
790 if (const auto *TC = TTP->getTypeConstraint()) {
791 QualType ParamAsArgument(NewTTP->getTypeForDecl(), 0);
792 Expr *NewIDC = canonicalizeImmediatelyDeclaredConstraint(
793 *this, TC->getImmediatelyDeclaredConstraint(),
794 ParamAsArgument);
795 TemplateArgumentListInfo CanonArgsAsWritten;
796 if (auto *Args = TC->getTemplateArgsAsWritten())
797 for (const auto &ArgLoc : Args->arguments())
798 CanonArgsAsWritten.addArgument(
799 TemplateArgumentLoc(ArgLoc.getArgument(),
800 TemplateArgumentLocInfo()));
801 NewTTP->setTypeConstraint(
802 NestedNameSpecifierLoc(),
803 DeclarationNameInfo(TC->getNamedConcept()->getDeclName(),
804 SourceLocation()), /*FoundDecl=*/nullptr,
805 // Actually canonicalizing a TemplateArgumentLoc is difficult so we
806 // simply omit the ArgsAsWritten
807 TC->getNamedConcept(), /*ArgsAsWritten=*/nullptr, NewIDC);
808 }
809 CanonParams.push_back(NewTTP);
810 } else if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) {
811 QualType T = getCanonicalType(NTTP->getType());
812 TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T);
813 NonTypeTemplateParmDecl *Param;
814 if (NTTP->isExpandedParameterPack()) {
815 SmallVector<QualType, 2> ExpandedTypes;
816 SmallVector<TypeSourceInfo *, 2> ExpandedTInfos;
817 for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) {
818 ExpandedTypes.push_back(getCanonicalType(NTTP->getExpansionType(I)));
819 ExpandedTInfos.push_back(
820 getTrivialTypeSourceInfo(ExpandedTypes.back()));
821 }
822
823 Param = NonTypeTemplateParmDecl::Create(*this, getTranslationUnitDecl(),
824 SourceLocation(),
825 SourceLocation(),
826 NTTP->getDepth(),
827 NTTP->getPosition(), nullptr,
828 T,
829 TInfo,
830 ExpandedTypes,
831 ExpandedTInfos);
832 } else {
833 Param = NonTypeTemplateParmDecl::Create(*this, getTranslationUnitDecl(),
834 SourceLocation(),
835 SourceLocation(),
836 NTTP->getDepth(),
837 NTTP->getPosition(), nullptr,
838 T,
839 NTTP->isParameterPack(),
840 TInfo);
841 }
842 if (AutoType *AT = T->getContainedAutoType()) {
843 if (AT->isConstrained()) {
844 Param->setPlaceholderTypeConstraint(
845 canonicalizeImmediatelyDeclaredConstraint(
846 *this, NTTP->getPlaceholderTypeConstraint(), T));
847 }
848 }
849 CanonParams.push_back(Param);
850
851 } else
852 CanonParams.push_back(getCanonicalTemplateTemplateParmDecl(
853 cast<TemplateTemplateParmDecl>(*P)));
854 }
855
856 Expr *CanonRequiresClause = nullptr;
857 if (Expr *RequiresClause = TTP->getTemplateParameters()->getRequiresClause())
858 CanonRequiresClause = RequiresClause;
859
860 TemplateTemplateParmDecl *CanonTTP
861 = TemplateTemplateParmDecl::Create(*this, getTranslationUnitDecl(),
862 SourceLocation(), TTP->getDepth(),
863 TTP->getPosition(),
864 TTP->isParameterPack(),
865 nullptr,
866 TemplateParameterList::Create(*this, SourceLocation(),
867 SourceLocation(),
868 CanonParams,
869 SourceLocation(),
870 CanonRequiresClause));
871
872 // Get the new insert position for the node we care about.
873 Canonical = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos);
874 assert(!Canonical && "Shouldn't be in the map!")((void)0);
875 (void)Canonical;
876
877 // Create the canonical template template parameter entry.
878 Canonical = new (*this) CanonicalTemplateTemplateParm(CanonTTP);
879 CanonTemplateTemplateParms.InsertNode(Canonical, InsertPos);
880 return CanonTTP;
881}
882
883TargetCXXABI::Kind ASTContext::getCXXABIKind() const {
884 auto Kind = getTargetInfo().getCXXABI().getKind();
885 return getLangOpts().CXXABI.getValueOr(Kind);
886}
887
888CXXABI *ASTContext::createCXXABI(const TargetInfo &T) {
889 if (!LangOpts.CPlusPlus) return nullptr;
890
891 switch (getCXXABIKind()) {
892 case TargetCXXABI::AppleARM64:
893 case TargetCXXABI::Fuchsia:
894 case TargetCXXABI::GenericARM: // Same as Itanium at this level
895 case TargetCXXABI::iOS:
896 case TargetCXXABI::WatchOS:
897 case TargetCXXABI::GenericAArch64:
898 case TargetCXXABI::GenericMIPS:
899 case TargetCXXABI::GenericItanium:
900 case TargetCXXABI::WebAssembly:
901 case TargetCXXABI::XL:
902 return CreateItaniumCXXABI(*this);
903 case TargetCXXABI::Microsoft:
904 return CreateMicrosoftCXXABI(*this);
905 }
906 llvm_unreachable("Invalid CXXABI type!")__builtin_unreachable();
907}
908
909interp::Context &ASTContext::getInterpContext() {
910 if (!InterpContext) {
911 InterpContext.reset(new interp::Context(*this));
912 }
913 return *InterpContext.get();
914}
915
916ParentMapContext &ASTContext::getParentMapContext() {
917 if (!ParentMapCtx)
918 ParentMapCtx.reset(new ParentMapContext(*this));
919 return *ParentMapCtx.get();
920}
921
922static const LangASMap *getAddressSpaceMap(const TargetInfo &T,
923 const LangOptions &LOpts) {
924 if (LOpts.FakeAddressSpaceMap) {
925 // The fake address space map must have a distinct entry for each
926 // language-specific address space.
927 static const unsigned FakeAddrSpaceMap[] = {
928 0, // Default
929 1, // opencl_global
930 3, // opencl_local
931 2, // opencl_constant
932 0, // opencl_private
933 4, // opencl_generic
934 5, // opencl_global_device
935 6, // opencl_global_host
936 7, // cuda_device
937 8, // cuda_constant
938 9, // cuda_shared
939 1, // sycl_global
940 5, // sycl_global_device
941 6, // sycl_global_host
942 3, // sycl_local
943 0, // sycl_private
944 10, // ptr32_sptr
945 11, // ptr32_uptr
946 12 // ptr64
947 };
948 return &FakeAddrSpaceMap;
949 } else {
950 return &T.getAddressSpaceMap();
951 }
952}
953
954static bool isAddrSpaceMapManglingEnabled(const TargetInfo &TI,
955 const LangOptions &LangOpts) {
956 switch (LangOpts.getAddressSpaceMapMangling()) {
957 case LangOptions::ASMM_Target:
958 return TI.useAddressSpaceMapMangling();
959 case LangOptions::ASMM_On:
960 return true;
961 case LangOptions::ASMM_Off:
962 return false;
963 }
964 llvm_unreachable("getAddressSpaceMapMangling() doesn't cover anything.")__builtin_unreachable();
965}
966
967ASTContext::ASTContext(LangOptions &LOpts, SourceManager &SM,
968 IdentifierTable &idents, SelectorTable &sels,
969 Builtin::Context &builtins, TranslationUnitKind TUKind)
970 : ConstantArrayTypes(this_()), FunctionProtoTypes(this_()),
971 TemplateSpecializationTypes(this_()),
972 DependentTemplateSpecializationTypes(this_()), AutoTypes(this_()),
973 SubstTemplateTemplateParmPacks(this_()),
974 CanonTemplateTemplateParms(this_()), SourceMgr(SM), LangOpts(LOpts),
975 NoSanitizeL(new NoSanitizeList(LangOpts.NoSanitizeFiles, SM)),
976 XRayFilter(new XRayFunctionFilter(LangOpts.XRayAlwaysInstrumentFiles,
977 LangOpts.XRayNeverInstrumentFiles,
978 LangOpts.XRayAttrListFiles, SM)),
979 ProfList(new ProfileList(LangOpts.ProfileListFiles, SM)),
980 PrintingPolicy(LOpts), Idents(idents), Selectors(sels),
981 BuiltinInfo(builtins), TUKind(TUKind), DeclarationNames(*this),
982 Comments(SM), CommentCommandTraits(BumpAlloc, LOpts.CommentOpts),
983 CompCategories(this_()), LastSDM(nullptr, 0) {
984 addTranslationUnitDecl();
985}
986
987ASTContext::~ASTContext() {
988 // Release the DenseMaps associated with DeclContext objects.
989 // FIXME: Is this the ideal solution?
990 ReleaseDeclContextMaps();
991
992 // Call all of the deallocation functions on all of their targets.
993 for (auto &Pair : Deallocations)
994 (Pair.first)(Pair.second);
995
996 // ASTRecordLayout objects in ASTRecordLayouts must always be destroyed
997 // because they can contain DenseMaps.
998 for (llvm::DenseMap<const ObjCContainerDecl*,
999 const ASTRecordLayout*>::iterator
1000 I = ObjCLayouts.begin(), E = ObjCLayouts.end(); I != E; )
1001 // Increment in loop to prevent using deallocated memory.
1002 if (auto *R = const_cast<ASTRecordLayout *>((I++)->second))
1003 R->Destroy(*this);
1004
1005 for (llvm::DenseMap<const RecordDecl*, const ASTRecordLayout*>::iterator
1006 I = ASTRecordLayouts.begin(), E = ASTRecordLayouts.end(); I != E; ) {
1007 // Increment in loop to prevent using deallocated memory.
1008 if (auto *R = const_cast<ASTRecordLayout *>((I++)->second))
1009 R->Destroy(*this);
1010 }
1011
1012 for (llvm::DenseMap<const Decl*, AttrVec*>::iterator A = DeclAttrs.begin(),
1013 AEnd = DeclAttrs.end();
1014 A != AEnd; ++A)
1015 A->second->~AttrVec();
1016
1017 for (const auto &Value : ModuleInitializers)
1018 Value.second->~PerModuleInitializers();
1019}
1020
1021void ASTContext::setTraversalScope(const std::vector<Decl *> &TopLevelDecls) {
1022 TraversalScope = TopLevelDecls;
1023 getParentMapContext().clear();
1024}
1025
1026void ASTContext::AddDeallocation(void (*Callback)(void *), void *Data) const {
1027 Deallocations.push_back({Callback, Data});
1028}
1029
1030void
1031ASTContext::setExternalSource(IntrusiveRefCntPtr<ExternalASTSource> Source) {
1032 ExternalSource = std::move(Source);
1033}
1034
1035void ASTContext::PrintStats() const {
1036 llvm::errs() << "\n*** AST Context Stats:\n";
1037 llvm::errs() << " " << Types.size() << " types total.\n";
1038
1039 unsigned counts[] = {
1040#define TYPE(Name, Parent) 0,
1041#define ABSTRACT_TYPE(Name, Parent)
1042#include "clang/AST/TypeNodes.inc"
1043 0 // Extra
1044 };
1045
1046 for (unsigned i = 0, e = Types.size(); i != e; ++i) {
1047 Type *T = Types[i];
1048 counts[(unsigned)T->getTypeClass()]++;
1049 }
1050
1051 unsigned Idx = 0;
1052 unsigned TotalBytes = 0;
1053#define TYPE(Name, Parent) \
1054 if (counts[Idx]) \
1055 llvm::errs() << " " << counts[Idx] << " " << #Name \
1056 << " types, " << sizeof(Name##Type) << " each " \
1057 << "(" << counts[Idx] * sizeof(Name##Type) \
1058 << " bytes)\n"; \
1059 TotalBytes += counts[Idx] * sizeof(Name##Type); \
1060 ++Idx;
1061#define ABSTRACT_TYPE(Name, Parent)
1062#include "clang/AST/TypeNodes.inc"
1063
1064 llvm::errs() << "Total bytes = " << TotalBytes << "\n";
1065
1066 // Implicit special member functions.
1067 llvm::errs() << NumImplicitDefaultConstructorsDeclared << "/"
1068 << NumImplicitDefaultConstructors
1069 << " implicit default constructors created\n";
1070 llvm::errs() << NumImplicitCopyConstructorsDeclared << "/"
1071 << NumImplicitCopyConstructors
1072 << " implicit copy constructors created\n";
1073 if (getLangOpts().CPlusPlus)
1074 llvm::errs() << NumImplicitMoveConstructorsDeclared << "/"
1075 << NumImplicitMoveConstructors
1076 << " implicit move constructors created\n";
1077 llvm::errs() << NumImplicitCopyAssignmentOperatorsDeclared << "/"
1078 << NumImplicitCopyAssignmentOperators
1079 << " implicit copy assignment operators created\n";
1080 if (getLangOpts().CPlusPlus)
1081 llvm::errs() << NumImplicitMoveAssignmentOperatorsDeclared << "/"
1082 << NumImplicitMoveAssignmentOperators
1083 << " implicit move assignment operators created\n";
1084 llvm::errs() << NumImplicitDestructorsDeclared << "/"
1085 << NumImplicitDestructors
1086 << " implicit destructors created\n";
1087
1088 if (ExternalSource) {
1089 llvm::errs() << "\n";
1090 ExternalSource->PrintStats();
1091 }
1092
1093 BumpAlloc.PrintStats();
1094}
1095
1096void ASTContext::mergeDefinitionIntoModule(NamedDecl *ND, Module *M,
1097 bool NotifyListeners) {
1098 if (NotifyListeners)
1099 if (auto *Listener = getASTMutationListener())
1100 Listener->RedefinedHiddenDefinition(ND, M);
1101
1102 MergedDefModules[cast<NamedDecl>(ND->getCanonicalDecl())].push_back(M);
1103}
1104
1105void ASTContext::deduplicateMergedDefinitonsFor(NamedDecl *ND) {
1106 auto It = MergedDefModules.find(cast<NamedDecl>(ND->getCanonicalDecl()));
1107 if (It == MergedDefModules.end())
1108 return;
1109
1110 auto &Merged = It->second;
1111 llvm::DenseSet<Module*> Found;
1112 for (Module *&M : Merged)
1113 if (!Found.insert(M).second)
1114 M = nullptr;
1115 Merged.erase(std::remove(Merged.begin(), Merged.end(), nullptr), Merged.end());
1116}
1117
1118ArrayRef<Module *>
1119ASTContext::getModulesWithMergedDefinition(const NamedDecl *Def) {
1120 auto MergedIt =
1121 MergedDefModules.find(cast<NamedDecl>(Def->getCanonicalDecl()));
1122 if (MergedIt == MergedDefModules.end())
1123 return None;
1124 return MergedIt->second;
1125}
1126
1127void ASTContext::PerModuleInitializers::resolve(ASTContext &Ctx) {
1128 if (LazyInitializers.empty())
1129 return;
1130
1131 auto *Source = Ctx.getExternalSource();
1132 assert(Source && "lazy initializers but no external source")((void)0);
1133
1134 auto LazyInits = std::move(LazyInitializers);
1135 LazyInitializers.clear();
1136
1137 for (auto ID : LazyInits)
1138 Initializers.push_back(Source->GetExternalDecl(ID));
1139
1140 assert(LazyInitializers.empty() &&((void)0)
1141 "GetExternalDecl for lazy module initializer added more inits")((void)0);
1142}
1143
1144void ASTContext::addModuleInitializer(Module *M, Decl *D) {
1145 // One special case: if we add a module initializer that imports another
1146 // module, and that module's only initializer is an ImportDecl, simplify.
1147 if (const auto *ID = dyn_cast<ImportDecl>(D)) {
1148 auto It = ModuleInitializers.find(ID->getImportedModule());
1149
1150 // Maybe the ImportDecl does nothing at all. (Common case.)
1151 if (It == ModuleInitializers.end())
1152 return;
1153
1154 // Maybe the ImportDecl only imports another ImportDecl.
1155 auto &Imported = *It->second;
1156 if (Imported.Initializers.size() + Imported.LazyInitializers.size() == 1) {
1157 Imported.resolve(*this);
1158 auto *OnlyDecl = Imported.Initializers.front();
1159 if (isa<ImportDecl>(OnlyDecl))
1160 D = OnlyDecl;
1161 }
1162 }
1163
1164 auto *&Inits = ModuleInitializers[M];
1165 if (!Inits)
1166 Inits = new (*this) PerModuleInitializers;
1167 Inits->Initializers.push_back(D);
1168}
1169
1170void ASTContext::addLazyModuleInitializers(Module *M, ArrayRef<uint32_t> IDs) {
1171 auto *&Inits = ModuleInitializers[M];
1172 if (!Inits)
1173 Inits = new (*this) PerModuleInitializers;
1174 Inits->LazyInitializers.insert(Inits->LazyInitializers.end(),
1175 IDs.begin(), IDs.end());
1176}
1177
1178ArrayRef<Decl *> ASTContext::getModuleInitializers(Module *M) {
1179 auto It = ModuleInitializers.find(M);
1180 if (It == ModuleInitializers.end())
1181 return None;
1182
1183 auto *Inits = It->second;
1184 Inits->resolve(*this);
1185 return Inits->Initializers;
1186}
1187
1188ExternCContextDecl *ASTContext::getExternCContextDecl() const {
1189 if (!ExternCContext)
1190 ExternCContext = ExternCContextDecl::Create(*this, getTranslationUnitDecl());
1191
1192 return ExternCContext;
1193}
1194
1195BuiltinTemplateDecl *
1196ASTContext::buildBuiltinTemplateDecl(BuiltinTemplateKind BTK,
1197 const IdentifierInfo *II) const {
1198 auto *BuiltinTemplate =
1199 BuiltinTemplateDecl::Create(*this, getTranslationUnitDecl(), II, BTK);
1200 BuiltinTemplate->setImplicit();
1201 getTranslationUnitDecl()->addDecl(BuiltinTemplate);
1202
1203 return BuiltinTemplate;
1204}
1205
1206BuiltinTemplateDecl *
1207ASTContext::getMakeIntegerSeqDecl() const {
1208 if (!MakeIntegerSeqDecl)
1209 MakeIntegerSeqDecl = buildBuiltinTemplateDecl(BTK__make_integer_seq,
1210 getMakeIntegerSeqName());
1211 return MakeIntegerSeqDecl;
1212}
1213
1214BuiltinTemplateDecl *
1215ASTContext::getTypePackElementDecl() const {
1216 if (!TypePackElementDecl)
1217 TypePackElementDecl = buildBuiltinTemplateDecl(BTK__type_pack_element,
1218 getTypePackElementName());
1219 return TypePackElementDecl;
1220}
1221
1222RecordDecl *ASTContext::buildImplicitRecord(StringRef Name,
1223 RecordDecl::TagKind TK) const {
1224 SourceLocation Loc;
1225 RecordDecl *NewDecl;
1226 if (getLangOpts().CPlusPlus)
1227 NewDecl = CXXRecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc,
1228 Loc, &Idents.get(Name));
1229 else
1230 NewDecl = RecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc, Loc,
1231 &Idents.get(Name));
1232 NewDecl->setImplicit();
1233 NewDecl->addAttr(TypeVisibilityAttr::CreateImplicit(
1234 const_cast<ASTContext &>(*this), TypeVisibilityAttr::Default));
1235 return NewDecl;
1236}
1237
1238TypedefDecl *ASTContext::buildImplicitTypedef(QualType T,
1239 StringRef Name) const {
1240 TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T);
1241 TypedefDecl *NewDecl = TypedefDecl::Create(
1242 const_cast<ASTContext &>(*this), getTranslationUnitDecl(),
1243 SourceLocation(), SourceLocation(), &Idents.get(Name), TInfo);
1244 NewDecl->setImplicit();
1245 return NewDecl;
1246}
1247
1248TypedefDecl *ASTContext::getInt128Decl() const {
1249 if (!Int128Decl)
1250 Int128Decl = buildImplicitTypedef(Int128Ty, "__int128_t");
1251 return Int128Decl;
1252}
1253
1254TypedefDecl *ASTContext::getUInt128Decl() const {
1255 if (!UInt128Decl)
1256 UInt128Decl = buildImplicitTypedef(UnsignedInt128Ty, "__uint128_t");
1257 return UInt128Decl;
1258}
1259
1260void ASTContext::InitBuiltinType(CanQualType &R, BuiltinType::Kind K) {
1261 auto *Ty = new (*this, TypeAlignment) BuiltinType(K);
1262 R = CanQualType::CreateUnsafe(QualType(Ty, 0));
1263 Types.push_back(Ty);
1264}
1265
1266void ASTContext::InitBuiltinTypes(const TargetInfo &Target,
1267 const TargetInfo *AuxTarget) {
1268 assert((!this->Target || this->Target == &Target) &&((void)0)
1269 "Incorrect target reinitialization")((void)0);
1270 assert(VoidTy.isNull() && "Context reinitialized?")((void)0);
1271
1272 this->Target = &Target;
1273 this->AuxTarget = AuxTarget;
1274
1275 ABI.reset(createCXXABI(Target));
1276 AddrSpaceMap = getAddressSpaceMap(Target, LangOpts);
1277 AddrSpaceMapMangling = isAddrSpaceMapManglingEnabled(Target, LangOpts);
1278
1279 // C99 6.2.5p19.
1280 InitBuiltinType(VoidTy, BuiltinType::Void);
1281
1282 // C99 6.2.5p2.
1283 InitBuiltinType(BoolTy, BuiltinType::Bool);
1284 // C99 6.2.5p3.
1285 if (LangOpts.CharIsSigned)
1286 InitBuiltinType(CharTy, BuiltinType::Char_S);
1287 else
1288 InitBuiltinType(CharTy, BuiltinType::Char_U);
1289 // C99 6.2.5p4.
1290 InitBuiltinType(SignedCharTy, BuiltinType::SChar);
1291 InitBuiltinType(ShortTy, BuiltinType::Short);
1292 InitBuiltinType(IntTy, BuiltinType::Int);
1293 InitBuiltinType(LongTy, BuiltinType::Long);
1294 InitBuiltinType(LongLongTy, BuiltinType::LongLong);
1295
1296 // C99 6.2.5p6.
1297 InitBuiltinType(UnsignedCharTy, BuiltinType::UChar);
1298 InitBuiltinType(UnsignedShortTy, BuiltinType::UShort);
1299 InitBuiltinType(UnsignedIntTy, BuiltinType::UInt);
1300 InitBuiltinType(UnsignedLongTy, BuiltinType::ULong);
1301 InitBuiltinType(UnsignedLongLongTy, BuiltinType::ULongLong);
1302
1303 // C99 6.2.5p10.
1304 InitBuiltinType(FloatTy, BuiltinType::Float);
1305 InitBuiltinType(DoubleTy, BuiltinType::Double);
1306 InitBuiltinType(LongDoubleTy, BuiltinType::LongDouble);
1307
1308 // GNU extension, __float128 for IEEE quadruple precision
1309 InitBuiltinType(Float128Ty, BuiltinType::Float128);
1310
1311 // C11 extension ISO/IEC TS 18661-3
1312 InitBuiltinType(Float16Ty, BuiltinType::Float16);
1313
1314 // ISO/IEC JTC1 SC22 WG14 N1169 Extension
1315 InitBuiltinType(ShortAccumTy, BuiltinType::ShortAccum);
1316 InitBuiltinType(AccumTy, BuiltinType::Accum);
1317 InitBuiltinType(LongAccumTy, BuiltinType::LongAccum);
1318 InitBuiltinType(UnsignedShortAccumTy, BuiltinType::UShortAccum);
1319 InitBuiltinType(UnsignedAccumTy, BuiltinType::UAccum);
1320 InitBuiltinType(UnsignedLongAccumTy, BuiltinType::ULongAccum);
1321 InitBuiltinType(ShortFractTy, BuiltinType::ShortFract);
1322 InitBuiltinType(FractTy, BuiltinType::Fract);
1323 InitBuiltinType(LongFractTy, BuiltinType::LongFract);
1324 InitBuiltinType(UnsignedShortFractTy, BuiltinType::UShortFract);
1325 InitBuiltinType(UnsignedFractTy, BuiltinType::UFract);
1326 InitBuiltinType(UnsignedLongFractTy, BuiltinType::ULongFract);
1327 InitBuiltinType(SatShortAccumTy, BuiltinType::SatShortAccum);
1328 InitBuiltinType(SatAccumTy, BuiltinType::SatAccum);
1329 InitBuiltinType(SatLongAccumTy, BuiltinType::SatLongAccum);
1330 InitBuiltinType(SatUnsignedShortAccumTy, BuiltinType::SatUShortAccum);
1331 InitBuiltinType(SatUnsignedAccumTy, BuiltinType::SatUAccum);
1332 InitBuiltinType(SatUnsignedLongAccumTy, BuiltinType::SatULongAccum);
1333 InitBuiltinType(SatShortFractTy, BuiltinType::SatShortFract);
1334 InitBuiltinType(SatFractTy, BuiltinType::SatFract);
1335 InitBuiltinType(SatLongFractTy, BuiltinType::SatLongFract);
1336 InitBuiltinType(SatUnsignedShortFractTy, BuiltinType::SatUShortFract);
1337 InitBuiltinType(SatUnsignedFractTy, BuiltinType::SatUFract);
1338 InitBuiltinType(SatUnsignedLongFractTy, BuiltinType::SatULongFract);
1339
1340 // GNU extension, 128-bit integers.
1341 InitBuiltinType(Int128Ty, BuiltinType::Int128);
1342 InitBuiltinType(UnsignedInt128Ty, BuiltinType::UInt128);
1343
1344 // C++ 3.9.1p5
1345 if (TargetInfo::isTypeSigned(Target.getWCharType()))
1346 InitBuiltinType(WCharTy, BuiltinType::WChar_S);
1347 else // -fshort-wchar makes wchar_t be unsigned.
1348 InitBuiltinType(WCharTy, BuiltinType::WChar_U);
1349 if (LangOpts.CPlusPlus && LangOpts.WChar)
1350 WideCharTy = WCharTy;
1351 else {
1352 // C99 (or C++ using -fno-wchar).
1353 WideCharTy = getFromTargetType(Target.getWCharType());
1354 }
1355
1356 WIntTy = getFromTargetType(Target.getWIntType());
1357
1358 // C++20 (proposed)
1359 InitBuiltinType(Char8Ty, BuiltinType::Char8);
1360
1361 if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++
1362 InitBuiltinType(Char16Ty, BuiltinType::Char16);
1363 else // C99
1364 Char16Ty = getFromTargetType(Target.getChar16Type());
1365
1366 if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++
1367 InitBuiltinType(Char32Ty, BuiltinType::Char32);
1368 else // C99
1369 Char32Ty = getFromTargetType(Target.getChar32Type());
1370
1371 // Placeholder type for type-dependent expressions whose type is
1372 // completely unknown. No code should ever check a type against
1373 // DependentTy and users should never see it; however, it is here to
1374 // help diagnose failures to properly check for type-dependent
1375 // expressions.
1376 InitBuiltinType(DependentTy, BuiltinType::Dependent);
1377
1378 // Placeholder type for functions.
1379 InitBuiltinType(OverloadTy, BuiltinType::Overload);
1380
1381 // Placeholder type for bound members.
1382 InitBuiltinType(BoundMemberTy, BuiltinType::BoundMember);
1383
1384 // Placeholder type for pseudo-objects.
1385 InitBuiltinType(PseudoObjectTy, BuiltinType::PseudoObject);
1386
1387 // "any" type; useful for debugger-like clients.
1388 InitBuiltinType(UnknownAnyTy, BuiltinType::UnknownAny);
1389
1390 // Placeholder type for unbridged ARC casts.
1391 InitBuiltinType(ARCUnbridgedCastTy, BuiltinType::ARCUnbridgedCast);
1392
1393 // Placeholder type for builtin functions.
1394 InitBuiltinType(BuiltinFnTy, BuiltinType::BuiltinFn);
1395
1396 // Placeholder type for OMP array sections.
1397 if (LangOpts.OpenMP) {
1398 InitBuiltinType(OMPArraySectionTy, BuiltinType::OMPArraySection);
1399 InitBuiltinType(OMPArrayShapingTy, BuiltinType::OMPArrayShaping);
1400 InitBuiltinType(OMPIteratorTy, BuiltinType::OMPIterator);
1401 }
1402 if (LangOpts.MatrixTypes)
1403 InitBuiltinType(IncompleteMatrixIdxTy, BuiltinType::IncompleteMatrixIdx);
1404
1405 // C99 6.2.5p11.
1406 FloatComplexTy = getComplexType(FloatTy);
1407 DoubleComplexTy = getComplexType(DoubleTy);
1408 LongDoubleComplexTy = getComplexType(LongDoubleTy);
1409 Float128ComplexTy = getComplexType(Float128Ty);
1410
1411 // Builtin types for 'id', 'Class', and 'SEL'.
1412 InitBuiltinType(ObjCBuiltinIdTy, BuiltinType::ObjCId);
1413 InitBuiltinType(ObjCBuiltinClassTy, BuiltinType::ObjCClass);
1414 InitBuiltinType(ObjCBuiltinSelTy, BuiltinType::ObjCSel);
1415
1416 if (LangOpts.OpenCL) {
1417#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
1418 InitBuiltinType(SingletonId, BuiltinType::Id);
1419#include "clang/Basic/OpenCLImageTypes.def"
1420
1421 InitBuiltinType(OCLSamplerTy, BuiltinType::OCLSampler);
1422 InitBuiltinType(OCLEventTy, BuiltinType::OCLEvent);
1423 InitBuiltinType(OCLClkEventTy, BuiltinType::OCLClkEvent);
1424 InitBuiltinType(OCLQueueTy, BuiltinType::OCLQueue);
1425 InitBuiltinType(OCLReserveIDTy, BuiltinType::OCLReserveID);
1426
1427#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
1428 InitBuiltinType(Id##Ty, BuiltinType::Id);
1429#include "clang/Basic/OpenCLExtensionTypes.def"
1430 }
1431
1432 if (Target.hasAArch64SVETypes()) {
1433#define SVE_TYPE(Name, Id, SingletonId) \
1434 InitBuiltinType(SingletonId, BuiltinType::Id);
1435#include "clang/Basic/AArch64SVEACLETypes.def"
1436 }
1437
1438 if (Target.getTriple().isPPC64() &&
1439 Target.hasFeature("paired-vector-memops")) {
1440 if (Target.hasFeature("mma")) {
1441#define PPC_VECTOR_MMA_TYPE(Name, Id, Size) \
1442 InitBuiltinType(Id##Ty, BuiltinType::Id);
1443#include "clang/Basic/PPCTypes.def"
1444 }
1445#define PPC_VECTOR_VSX_TYPE(Name, Id, Size) \
1446 InitBuiltinType(Id##Ty, BuiltinType::Id);
1447#include "clang/Basic/PPCTypes.def"
1448 }
1449
1450 if (Target.hasRISCVVTypes()) {
1451#define RVV_TYPE(Name, Id, SingletonId) \
1452 InitBuiltinType(SingletonId, BuiltinType::Id);
1453#include "clang/Basic/RISCVVTypes.def"
1454 }
1455
1456 // Builtin type for __objc_yes and __objc_no
1457 ObjCBuiltinBoolTy = (Target.useSignedCharForObjCBool() ?
1458 SignedCharTy : BoolTy);
1459
1460 ObjCConstantStringType = QualType();
1461
1462 ObjCSuperType = QualType();
1463
1464 // void * type
1465 if (LangOpts.OpenCLGenericAddressSpace) {
1466 auto Q = VoidTy.getQualifiers();
1467 Q.setAddressSpace(LangAS::opencl_generic);
1468 VoidPtrTy = getPointerType(getCanonicalType(
1469 getQualifiedType(VoidTy.getUnqualifiedType(), Q)));
1470 } else {
1471 VoidPtrTy = getPointerType(VoidTy);
1472 }
1473
1474 // nullptr type (C++0x 2.14.7)
1475 InitBuiltinType(NullPtrTy, BuiltinType::NullPtr);
1476
1477 // half type (OpenCL 6.1.1.1) / ARM NEON __fp16
1478 InitBuiltinType(HalfTy, BuiltinType::Half);
1479
1480 InitBuiltinType(BFloat16Ty, BuiltinType::BFloat16);
1481
1482 // Builtin type used to help define __builtin_va_list.
1483 VaListTagDecl = nullptr;
1484
1485 // MSVC predeclares struct _GUID, and we need it to create MSGuidDecls.
1486 if (LangOpts.MicrosoftExt || LangOpts.Borland) {
1487 MSGuidTagDecl = buildImplicitRecord("_GUID");
1488 getTranslationUnitDecl()->addDecl(MSGuidTagDecl);
1489 }
1490}
1491
1492DiagnosticsEngine &ASTContext::getDiagnostics() const {
1493 return SourceMgr.getDiagnostics();
1494}
1495
1496AttrVec& ASTContext::getDeclAttrs(const Decl *D) {
1497 AttrVec *&Result = DeclAttrs[D];
1498 if (!Result) {
1499 void *Mem = Allocate(sizeof(AttrVec));
1500 Result = new (Mem) AttrVec;
1501 }
1502
1503 return *Result;
1504}
1505
1506/// Erase the attributes corresponding to the given declaration.
1507void ASTContext::eraseDeclAttrs(const Decl *D) {
1508 llvm::DenseMap<const Decl*, AttrVec*>::iterator Pos = DeclAttrs.find(D);
1509 if (Pos != DeclAttrs.end()) {
1510 Pos->second->~AttrVec();
1511 DeclAttrs.erase(Pos);
1512 }
1513}
1514
1515// FIXME: Remove ?
1516MemberSpecializationInfo *
1517ASTContext::getInstantiatedFromStaticDataMember(const VarDecl *Var) {
1518 assert(Var->isStaticDataMember() && "Not a static data member")((void)0);
1519 return getTemplateOrSpecializationInfo(Var)
1520 .dyn_cast<MemberSpecializationInfo *>();
1521}
1522
1523ASTContext::TemplateOrSpecializationInfo
1524ASTContext::getTemplateOrSpecializationInfo(const VarDecl *Var) {
1525 llvm::DenseMap<const VarDecl *, TemplateOrSpecializationInfo>::iterator Pos =
1526 TemplateOrInstantiation.find(Var);
1527 if (Pos == TemplateOrInstantiation.end())
1528 return {};
1529
1530 return Pos->second;
1531}
1532
1533void
1534ASTContext::setInstantiatedFromStaticDataMember(VarDecl *Inst, VarDecl *Tmpl,
1535 TemplateSpecializationKind TSK,
1536 SourceLocation PointOfInstantiation) {
1537 assert(Inst->isStaticDataMember() && "Not a static data member")((void)0);
1538 assert(Tmpl->isStaticDataMember() && "Not a static data member")((void)0);
1539 setTemplateOrSpecializationInfo(Inst, new (*this) MemberSpecializationInfo(
1540 Tmpl, TSK, PointOfInstantiation));
1541}
1542
1543void
1544ASTContext::setTemplateOrSpecializationInfo(VarDecl *Inst,
1545 TemplateOrSpecializationInfo TSI) {
1546 assert(!TemplateOrInstantiation[Inst] &&((void)0)
1547 "Already noted what the variable was instantiated from")((void)0);
1548 TemplateOrInstantiation[Inst] = TSI;
1549}
1550
1551NamedDecl *
1552ASTContext::getInstantiatedFromUsingDecl(NamedDecl *UUD) {
1553 auto Pos = InstantiatedFromUsingDecl.find(UUD);
1554 if (Pos == InstantiatedFromUsingDecl.end())
1555 return nullptr;
1556
1557 return Pos->second;
1558}
1559
1560void
1561ASTContext::setInstantiatedFromUsingDecl(NamedDecl *Inst, NamedDecl *Pattern) {
1562 assert((isa<UsingDecl>(Pattern) ||((void)0)
1563 isa<UnresolvedUsingValueDecl>(Pattern) ||((void)0)
1564 isa<UnresolvedUsingTypenameDecl>(Pattern)) &&((void)0)
1565 "pattern decl is not a using decl")((void)0);
1566 assert((isa<UsingDecl>(Inst) ||((void)0)
1567 isa<UnresolvedUsingValueDecl>(Inst) ||((void)0)
1568 isa<UnresolvedUsingTypenameDecl>(Inst)) &&((void)0)
1569 "instantiation did not produce a using decl")((void)0);
1570 assert(!InstantiatedFromUsingDecl[Inst] && "pattern already exists")((void)0);
1571 InstantiatedFromUsingDecl[Inst] = Pattern;
1572}
1573
1574UsingEnumDecl *
1575ASTContext::getInstantiatedFromUsingEnumDecl(UsingEnumDecl *UUD) {
1576 auto Pos = InstantiatedFromUsingEnumDecl.find(UUD);
1577 if (Pos == InstantiatedFromUsingEnumDecl.end())
1578 return nullptr;
1579
1580 return Pos->second;
1581}
1582
1583void ASTContext::setInstantiatedFromUsingEnumDecl(UsingEnumDecl *Inst,
1584 UsingEnumDecl *Pattern) {
1585 assert(!InstantiatedFromUsingEnumDecl[Inst] && "pattern already exists")((void)0);
1586 InstantiatedFromUsingEnumDecl[Inst] = Pattern;
1587}
1588
1589UsingShadowDecl *
1590ASTContext::getInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst) {
1591 llvm::DenseMap<UsingShadowDecl*, UsingShadowDecl*>::const_iterator Pos
1592 = InstantiatedFromUsingShadowDecl.find(Inst);
1593 if (Pos == InstantiatedFromUsingShadowDecl.end())
1594 return nullptr;
1595
1596 return Pos->second;
1597}
1598
1599void
1600ASTContext::setInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst,
1601 UsingShadowDecl *Pattern) {
1602 assert(!InstantiatedFromUsingShadowDecl[Inst] && "pattern already exists")((void)0);
1603 InstantiatedFromUsingShadowDecl[Inst] = Pattern;
1604}
1605
1606FieldDecl *ASTContext::getInstantiatedFromUnnamedFieldDecl(FieldDecl *Field) {
1607 llvm::DenseMap<FieldDecl *, FieldDecl *>::iterator Pos
1608 = InstantiatedFromUnnamedFieldDecl.find(Field);
1609 if (Pos == InstantiatedFromUnnamedFieldDecl.end())
1610 return nullptr;
1611
1612 return Pos->second;
1613}
1614
1615void ASTContext::setInstantiatedFromUnnamedFieldDecl(FieldDecl *Inst,
1616 FieldDecl *Tmpl) {
1617 assert(!Inst->getDeclName() && "Instantiated field decl is not unnamed")((void)0);
1618 assert(!Tmpl->getDeclName() && "Template field decl is not unnamed")((void)0);
1619 assert(!InstantiatedFromUnnamedFieldDecl[Inst] &&((void)0)
1620 "Already noted what unnamed field was instantiated from")((void)0);
1621
1622 InstantiatedFromUnnamedFieldDecl[Inst] = Tmpl;
1623}
1624
1625ASTContext::overridden_cxx_method_iterator
1626ASTContext::overridden_methods_begin(const CXXMethodDecl *Method) const {
1627 return overridden_methods(Method).begin();
1628}
1629
1630ASTContext::overridden_cxx_method_iterator
1631ASTContext::overridden_methods_end(const CXXMethodDecl *Method) const {
1632 return overridden_methods(Method).end();
1633}
1634
1635unsigned
1636ASTContext::overridden_methods_size(const CXXMethodDecl *Method) const {
1637 auto Range = overridden_methods(Method);
1638 return Range.end() - Range.begin();
1639}
1640
1641ASTContext::overridden_method_range
1642ASTContext::overridden_methods(const CXXMethodDecl *Method) const {
1643 llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos =
1644 OverriddenMethods.find(Method->getCanonicalDecl());
1645 if (Pos == OverriddenMethods.end())
1646 return overridden_method_range(nullptr, nullptr);
1647 return overridden_method_range(Pos->second.begin(), Pos->second.end());
1648}
1649
1650void ASTContext::addOverriddenMethod(const CXXMethodDecl *Method,
1651 const CXXMethodDecl *Overridden) {
1652 assert(Method->isCanonicalDecl() && Overridden->isCanonicalDecl())((void)0);
1653 OverriddenMethods[Method].push_back(Overridden);
1654}
1655
1656void ASTContext::getOverriddenMethods(
1657 const NamedDecl *D,
1658 SmallVectorImpl<const NamedDecl *> &Overridden) const {
1659 assert(D)((void)0);
1660
1661 if (const auto *CXXMethod = dyn_cast<CXXMethodDecl>(D)) {
1662 Overridden.append(overridden_methods_begin(CXXMethod),
1663 overridden_methods_end(CXXMethod));
1664 return;
1665 }
1666
1667 const auto *Method = dyn_cast<ObjCMethodDecl>(D);
1668 if (!Method)
1669 return;
1670
1671 SmallVector<const ObjCMethodDecl *, 8> OverDecls;
1672 Method->getOverriddenMethods(OverDecls);
1673 Overridden.append(OverDecls.begin(), OverDecls.end());
1674}
1675
1676void ASTContext::addedLocalImportDecl(ImportDecl *Import) {
1677 assert(!Import->getNextLocalImport() &&((void)0)
1678 "Import declaration already in the chain")((void)0);
1679 assert(!Import->isFromASTFile() && "Non-local import declaration")((void)0);
1680 if (!FirstLocalImport) {
1681 FirstLocalImport = Import;
1682 LastLocalImport = Import;
1683 return;
1684 }
1685
1686 LastLocalImport->setNextLocalImport(Import);
1687 LastLocalImport = Import;
1688}
1689
1690//===----------------------------------------------------------------------===//
1691// Type Sizing and Analysis
1692//===----------------------------------------------------------------------===//
1693
1694/// getFloatTypeSemantics - Return the APFloat 'semantics' for the specified
1695/// scalar floating point type.
1696const llvm::fltSemantics &ASTContext::getFloatTypeSemantics(QualType T) const {
1697 switch (T->castAs<BuiltinType>()->getKind()) {
1698 default:
1699 llvm_unreachable("Not a floating point type!")__builtin_unreachable();
1700 case BuiltinType::BFloat16:
1701 return Target->getBFloat16Format();
1702 case BuiltinType::Float16:
1703 case BuiltinType::Half:
1704 return Target->getHalfFormat();
1705 case BuiltinType::Float: return Target->getFloatFormat();
1706 case BuiltinType::Double: return Target->getDoubleFormat();
1707 case BuiltinType::LongDouble:
1708 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice)
1709 return AuxTarget->getLongDoubleFormat();
1710 return Target->getLongDoubleFormat();
1711 case BuiltinType::Float128:
1712 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice)
1713 return AuxTarget->getFloat128Format();
1714 return Target->getFloat128Format();
1715 }
1716}
1717
1718CharUnits ASTContext::getDeclAlign(const Decl *D, bool ForAlignof) const {
1719 unsigned Align = Target->getCharWidth();
1720
1721 bool UseAlignAttrOnly = false;
1722 if (unsigned AlignFromAttr = D->getMaxAlignment()) {
1723 Align = AlignFromAttr;
1724
1725 // __attribute__((aligned)) can increase or decrease alignment
1726 // *except* on a struct or struct member, where it only increases
1727 // alignment unless 'packed' is also specified.
1728 //
1729 // It is an error for alignas to decrease alignment, so we can
1730 // ignore that possibility; Sema should diagnose it.
1731 if (isa<FieldDecl>(D)) {
1732 UseAlignAttrOnly = D->hasAttr<PackedAttr>() ||
1733 cast<FieldDecl>(D)->getParent()->hasAttr<PackedAttr>();
1734 } else {
1735 UseAlignAttrOnly = true;
1736 }
1737 }
1738 else if (isa<FieldDecl>(D))
1739 UseAlignAttrOnly =
1740 D->hasAttr<PackedAttr>() ||
1741 cast<FieldDecl>(D)->getParent()->hasAttr<PackedAttr>();
1742
1743 // If we're using the align attribute only, just ignore everything
1744 // else about the declaration and its type.
1745 if (UseAlignAttrOnly) {
1746 // do nothing
1747 } else if (const auto *VD = dyn_cast<ValueDecl>(D)) {
1748 QualType T = VD->getType();
1749 if (const auto *RT = T->getAs<ReferenceType>()) {
1750 if (ForAlignof)
1751 T = RT->getPointeeType();
1752 else
1753 T = getPointerType(RT->getPointeeType());
1754 }
1755 QualType BaseT = getBaseElementType(T);
1756 if (T->isFunctionType())
1757 Align = getTypeInfoImpl(T.getTypePtr()).Align;
1758 else if (!BaseT->isIncompleteType()) {
1759 // Adjust alignments of declarations with array type by the
1760 // large-array alignment on the target.
1761 if (const ArrayType *arrayType = getAsArrayType(T)) {
1762 unsigned MinWidth = Target->getLargeArrayMinWidth();
1763 if (!ForAlignof && MinWidth) {
1764 if (isa<VariableArrayType>(arrayType))
1765 Align = std::max(Align, Target->getLargeArrayAlign());
1766 else if (isa<ConstantArrayType>(arrayType) &&
1767 MinWidth <= getTypeSize(cast<ConstantArrayType>(arrayType)))
1768 Align = std::max(Align, Target->getLargeArrayAlign());
1769 }
1770 }
1771 Align = std::max(Align, getPreferredTypeAlign(T.getTypePtr()));
1772 if (BaseT.getQualifiers().hasUnaligned())
1773 Align = Target->getCharWidth();
1774 if (const auto *VD = dyn_cast<VarDecl>(D)) {
1775 if (VD->hasGlobalStorage() && !ForAlignof) {
1776 uint64_t TypeSize = getTypeSize(T.getTypePtr());
1777 Align = std::max(Align, getTargetInfo().getMinGlobalAlign(TypeSize));
1778 }
1779 }
1780 }
1781
1782 // Fields can be subject to extra alignment constraints, like if
1783 // the field is packed, the struct is packed, or the struct has a
1784 // a max-field-alignment constraint (#pragma pack). So calculate
1785 // the actual alignment of the field within the struct, and then
1786 // (as we're expected to) constrain that by the alignment of the type.
1787 if (const auto *Field = dyn_cast<FieldDecl>(VD)) {
1788 const RecordDecl *Parent = Field->getParent();
1789 // We can only produce a sensible answer if the record is valid.
1790 if (!Parent->isInvalidDecl()) {
1791 const ASTRecordLayout &Layout = getASTRecordLayout(Parent);
1792
1793 // Start with the record's overall alignment.
1794 unsigned FieldAlign = toBits(Layout.getAlignment());
1795
1796 // Use the GCD of that and the offset within the record.
1797 uint64_t Offset = Layout.getFieldOffset(Field->getFieldIndex());
1798 if (Offset > 0) {
1799 // Alignment is always a power of 2, so the GCD will be a power of 2,
1800 // which means we get to do this crazy thing instead of Euclid's.
1801 uint64_t LowBitOfOffset = Offset & (~Offset + 1);
1802 if (LowBitOfOffset < FieldAlign)
1803 FieldAlign = static_cast<unsigned>(LowBitOfOffset);
1804 }
1805
1806 Align = std::min(Align, FieldAlign);
1807 }
1808 }
1809 }
1810
1811 // Some targets have hard limitation on the maximum requestable alignment in
1812 // aligned attribute for static variables.
1813 const unsigned MaxAlignedAttr = getTargetInfo().getMaxAlignedAttribute();
1814 const auto *VD = dyn_cast<VarDecl>(D);
1815 if (MaxAlignedAttr && VD && VD->getStorageClass() == SC_Static)
1816 Align = std::min(Align, MaxAlignedAttr);
1817
1818 return toCharUnitsFromBits(Align);
1819}
1820
1821CharUnits ASTContext::getExnObjectAlignment() const {
1822 return toCharUnitsFromBits(Target->getExnObjectAlignment());
1823}
1824
1825// getTypeInfoDataSizeInChars - Return the size of a type, in
1826// chars. If the type is a record, its data size is returned. This is
1827// the size of the memcpy that's performed when assigning this type
1828// using a trivial copy/move assignment operator.
1829TypeInfoChars ASTContext::getTypeInfoDataSizeInChars(QualType T) const {
1830 TypeInfoChars Info = getTypeInfoInChars(T);
1831
1832 // In C++, objects can sometimes be allocated into the tail padding
1833 // of a base-class subobject. We decide whether that's possible
1834 // during class layout, so here we can just trust the layout results.
1835 if (getLangOpts().CPlusPlus) {
1836 if (const auto *RT = T->getAs<RecordType>()) {
1837 const ASTRecordLayout &layout = getASTRecordLayout(RT->getDecl());
1838 Info.Width = layout.getDataSize();
1839 }
1840 }
1841
1842 return Info;
1843}
1844
1845/// getConstantArrayInfoInChars - Performing the computation in CharUnits
1846/// instead of in bits prevents overflowing the uint64_t for some large arrays.
1847TypeInfoChars
1848static getConstantArrayInfoInChars(const ASTContext &Context,
1849 const ConstantArrayType *CAT) {
1850 TypeInfoChars EltInfo = Context.getTypeInfoInChars(CAT->getElementType());
1851 uint64_t Size = CAT->getSize().getZExtValue();
1852 assert((Size == 0 || static_cast<uint64_t>(EltInfo.Width.getQuantity()) <=((void)0)
1853 (uint64_t)(-1)/Size) &&((void)0)
1854 "Overflow in array type char size evaluation")((void)0);
1855 uint64_t Width = EltInfo.Width.getQuantity() * Size;
1856 unsigned Align = EltInfo.Align.getQuantity();
1857 if (!Context.getTargetInfo().getCXXABI().isMicrosoft() ||
1858 Context.getTargetInfo().getPointerWidth(0) == 64)
1859 Width = llvm::alignTo(Width, Align);
1860 return TypeInfoChars(CharUnits::fromQuantity(Width),
1861 CharUnits::fromQuantity(Align),
1862 EltInfo.AlignIsRequired);
1863}
1864
1865TypeInfoChars ASTContext::getTypeInfoInChars(const Type *T) const {
1866 if (const auto *CAT = dyn_cast<ConstantArrayType>(T))
1867 return getConstantArrayInfoInChars(*this, CAT);
1868 TypeInfo Info = getTypeInfo(T);
1869 return TypeInfoChars(toCharUnitsFromBits(Info.Width),
1870 toCharUnitsFromBits(Info.Align),
1871 Info.AlignIsRequired);
1872}
1873
1874TypeInfoChars ASTContext::getTypeInfoInChars(QualType T) const {
1875 return getTypeInfoInChars(T.getTypePtr());
1876}
1877
1878bool ASTContext::isAlignmentRequired(const Type *T) const {
1879 return getTypeInfo(T).AlignIsRequired;
1880}
1881
1882bool ASTContext::isAlignmentRequired(QualType T) const {
1883 return isAlignmentRequired(T.getTypePtr());
1884}
1885
1886unsigned ASTContext::getTypeAlignIfKnown(QualType T,
1887 bool NeedsPreferredAlignment) const {
1888 // An alignment on a typedef overrides anything else.
1889 if (const auto *TT = T->getAs<TypedefType>())
1890 if (unsigned Align = TT->getDecl()->getMaxAlignment())
1891 return Align;
1892
1893 // If we have an (array of) complete type, we're done.
1894 T = getBaseElementType(T);
1895 if (!T->isIncompleteType())
1896 return NeedsPreferredAlignment ? getPreferredTypeAlign(T) : getTypeAlign(T);
1897
1898 // If we had an array type, its element type might be a typedef
1899 // type with an alignment attribute.
1900 if (const auto *TT = T->getAs<TypedefType>())
1901 if (unsigned Align = TT->getDecl()->getMaxAlignment())
1902 return Align;
1903
1904 // Otherwise, see if the declaration of the type had an attribute.
1905 if (const auto *TT = T->getAs<TagType>())
1906 return TT->getDecl()->getMaxAlignment();
1907
1908 return 0;
1909}
1910
1911TypeInfo ASTContext::getTypeInfo(const Type *T) const {
1912 TypeInfoMap::iterator I = MemoizedTypeInfo.find(T);
1913 if (I != MemoizedTypeInfo.end())
1914 return I->second;
1915
1916 // This call can invalidate MemoizedTypeInfo[T], so we need a second lookup.
1917 TypeInfo TI = getTypeInfoImpl(T);
1918 MemoizedTypeInfo[T] = TI;
1919 return TI;
1920}
1921
1922/// getTypeInfoImpl - Return the size of the specified type, in bits. This
1923/// method does not work on incomplete types.
1924///
1925/// FIXME: Pointers into different addr spaces could have different sizes and
1926/// alignment requirements: getPointerInfo should take an AddrSpace, this
1927/// should take a QualType, &c.
1928TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
1929 uint64_t Width = 0;
1930 unsigned Align = 8;
1931 bool AlignIsRequired = false;
1932 unsigned AS = 0;
1933 switch (T->getTypeClass()) {
1934#define TYPE(Class, Base)
1935#define ABSTRACT_TYPE(Class, Base)
1936#define NON_CANONICAL_TYPE(Class, Base)
1937#define DEPENDENT_TYPE(Class, Base) case Type::Class:
1938#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) \
1939 case Type::Class: \
1940 assert(!T->isDependentType() && "should not see dependent types here")((void)0); \
1941 return getTypeInfo(cast<Class##Type>(T)->desugar().getTypePtr());
1942#include "clang/AST/TypeNodes.inc"
1943 llvm_unreachable("Should not see dependent types")__builtin_unreachable();
1944
1945 case Type::FunctionNoProto:
1946 case Type::FunctionProto:
1947 // GCC extension: alignof(function) = 32 bits
1948 Width = 0;
1949 Align = 32;
1950 break;
1951
1952 case Type::IncompleteArray:
1953 case Type::VariableArray:
1954 case Type::ConstantArray: {
1955 // Model non-constant sized arrays as size zero, but track the alignment.
1956 uint64_t Size = 0;
1957 if (const auto *CAT = dyn_cast<ConstantArrayType>(T))
1958 Size = CAT->getSize().getZExtValue();
1959
1960 TypeInfo EltInfo = getTypeInfo(cast<ArrayType>(T)->getElementType());
1961 assert((Size == 0 || EltInfo.Width <= (uint64_t)(-1) / Size) &&((void)0)
1962 "Overflow in array type bit size evaluation")((void)0);
1963 Width = EltInfo.Width * Size;
1964 Align = EltInfo.Align;
1965 AlignIsRequired = EltInfo.AlignIsRequired;
1966 if (!getTargetInfo().getCXXABI().isMicrosoft() ||
1967 getTargetInfo().getPointerWidth(0) == 64)
1968 Width = llvm::alignTo(Width, Align);
1969 break;
1970 }
1971
1972 case Type::ExtVector:
1973 case Type::Vector: {
1974 const auto *VT = cast<VectorType>(T);
1975 TypeInfo EltInfo = getTypeInfo(VT->getElementType());
1976 Width = EltInfo.Width * VT->getNumElements();
1977 Align = Width;
1978 // If the alignment is not a power of 2, round up to the next power of 2.
1979 // This happens for non-power-of-2 length vectors.
1980 if (Align & (Align-1)) {
1981 Align = llvm::NextPowerOf2(Align);
1982 Width = llvm::alignTo(Width, Align);
1983 }
1984 // Adjust the alignment based on the target max.
1985 uint64_t TargetVectorAlign = Target->getMaxVectorAlign();
1986 if (TargetVectorAlign && TargetVectorAlign < Align)
1987 Align = TargetVectorAlign;
1988 if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector)
1989 // Adjust the alignment for fixed-length SVE vectors. This is important
1990 // for non-power-of-2 vector lengths.
1991 Align = 128;
1992 else if (VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector)
1993 // Adjust the alignment for fixed-length SVE predicates.
1994 Align = 16;
1995 break;
1996 }
1997
1998 case Type::ConstantMatrix: {
1999 const auto *MT = cast<ConstantMatrixType>(T);
2000 TypeInfo ElementInfo = getTypeInfo(MT->getElementType());
2001 // The internal layout of a matrix value is implementation defined.
2002 // Initially be ABI compatible with arrays with respect to alignment and
2003 // size.
2004 Width = ElementInfo.Width * MT->getNumRows() * MT->getNumColumns();
2005 Align = ElementInfo.Align;
2006 break;
2007 }
2008
2009 case Type::Builtin:
2010 switch (cast<BuiltinType>(T)->getKind()) {
2011 default: llvm_unreachable("Unknown builtin type!")__builtin_unreachable();
2012 case BuiltinType::Void:
2013 // GCC extension: alignof(void) = 8 bits.
2014 Width = 0;
2015 Align = 8;
2016 break;
2017 case BuiltinType::Bool:
2018 Width = Target->getBoolWidth();
2019 Align = Target->getBoolAlign();
2020 break;
2021 case BuiltinType::Char_S:
2022 case BuiltinType::Char_U:
2023 case BuiltinType::UChar:
2024 case BuiltinType::SChar:
2025 case BuiltinType::Char8:
2026 Width = Target->getCharWidth();
2027 Align = Target->getCharAlign();
2028 break;
2029 case BuiltinType::WChar_S:
2030 case BuiltinType::WChar_U:
2031 Width = Target->getWCharWidth();
2032 Align = Target->getWCharAlign();
2033 break;
2034 case BuiltinType::Char16:
2035 Width = Target->getChar16Width();
2036 Align = Target->getChar16Align();
2037 break;
2038 case BuiltinType::Char32:
2039 Width = Target->getChar32Width();
2040 Align = Target->getChar32Align();
2041 break;
2042 case BuiltinType::UShort:
2043 case BuiltinType::Short:
2044 Width = Target->getShortWidth();
2045 Align = Target->getShortAlign();
2046 break;
2047 case BuiltinType::UInt:
2048 case BuiltinType::Int:
2049 Width = Target->getIntWidth();
2050 Align = Target->getIntAlign();
2051 break;
2052 case BuiltinType::ULong:
2053 case BuiltinType::Long:
2054 Width = Target->getLongWidth();
2055 Align = Target->getLongAlign();
2056 break;
2057 case BuiltinType::ULongLong:
2058 case BuiltinType::LongLong:
2059 Width = Target->getLongLongWidth();
2060 Align = Target->getLongLongAlign();
2061 break;
2062 case BuiltinType::Int128:
2063 case BuiltinType::UInt128:
2064 Width = 128;
2065 Align = 128; // int128_t is 128-bit aligned on all targets.
2066 break;
2067 case BuiltinType::ShortAccum:
2068 case BuiltinType::UShortAccum:
2069 case BuiltinType::SatShortAccum:
2070 case BuiltinType::SatUShortAccum:
2071 Width = Target->getShortAccumWidth();
2072 Align = Target->getShortAccumAlign();
2073 break;
2074 case BuiltinType::Accum:
2075 case BuiltinType::UAccum:
2076 case BuiltinType::SatAccum:
2077 case BuiltinType::SatUAccum:
2078 Width = Target->getAccumWidth();
2079 Align = Target->getAccumAlign();
2080 break;
2081 case BuiltinType::LongAccum:
2082 case BuiltinType::ULongAccum:
2083 case BuiltinType::SatLongAccum:
2084 case BuiltinType::SatULongAccum:
2085 Width = Target->getLongAccumWidth();
2086 Align = Target->getLongAccumAlign();
2087 break;
2088 case BuiltinType::ShortFract:
2089 case BuiltinType::UShortFract:
2090 case BuiltinType::SatShortFract:
2091 case BuiltinType::SatUShortFract:
2092 Width = Target->getShortFractWidth();
2093 Align = Target->getShortFractAlign();
2094 break;
2095 case BuiltinType::Fract:
2096 case BuiltinType::UFract:
2097 case BuiltinType::SatFract:
2098 case BuiltinType::SatUFract:
2099 Width = Target->getFractWidth();
2100 Align = Target->getFractAlign();
2101 break;
2102 case BuiltinType::LongFract:
2103 case BuiltinType::ULongFract:
2104 case BuiltinType::SatLongFract:
2105 case BuiltinType::SatULongFract:
2106 Width = Target->getLongFractWidth();
2107 Align = Target->getLongFractAlign();
2108 break;
2109 case BuiltinType::BFloat16:
2110 Width = Target->getBFloat16Width();
2111 Align = Target->getBFloat16Align();
2112 break;
2113 case BuiltinType::Float16:
2114 case BuiltinType::Half:
2115 if (Target->hasFloat16Type() || !getLangOpts().OpenMP ||
2116 !getLangOpts().OpenMPIsDevice) {
2117 Width = Target->getHalfWidth();
2118 Align = Target->getHalfAlign();
2119 } else {
2120 assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice &&((void)0)
2121 "Expected OpenMP device compilation.")((void)0);
2122 Width = AuxTarget->getHalfWidth();
2123 Align = AuxTarget->getHalfAlign();
2124 }
2125 break;
2126 case BuiltinType::Float:
2127 Width = Target->getFloatWidth();
2128 Align = Target->getFloatAlign();
2129 break;
2130 case BuiltinType::Double:
2131 Width = Target->getDoubleWidth();
2132 Align = Target->getDoubleAlign();
2133 break;
2134 case BuiltinType::LongDouble:
2135 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice &&
2136 (Target->getLongDoubleWidth() != AuxTarget->getLongDoubleWidth() ||
2137 Target->getLongDoubleAlign() != AuxTarget->getLongDoubleAlign())) {
2138 Width = AuxTarget->getLongDoubleWidth();
2139 Align = AuxTarget->getLongDoubleAlign();
2140 } else {
2141 Width = Target->getLongDoubleWidth();
2142 Align = Target->getLongDoubleAlign();
2143 }
2144 break;
2145 case BuiltinType::Float128:
2146 if (Target->hasFloat128Type() || !getLangOpts().OpenMP ||
2147 !getLangOpts().OpenMPIsDevice) {
2148 Width = Target->getFloat128Width();
2149 Align = Target->getFloat128Align();
2150 } else {
2151 assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice &&((void)0)
2152 "Expected OpenMP device compilation.")((void)0);
2153 Width = AuxTarget->getFloat128Width();
2154 Align = AuxTarget->getFloat128Align();
2155 }
2156 break;
2157 case BuiltinType::NullPtr:
2158 Width = Target->getPointerWidth(0); // C++ 3.9.1p11: sizeof(nullptr_t)
2159 Align = Target->getPointerAlign(0); // == sizeof(void*)
2160 break;
2161 case BuiltinType::ObjCId:
2162 case BuiltinType::ObjCClass:
2163 case BuiltinType::ObjCSel:
2164 Width = Target->getPointerWidth(0);
2165 Align = Target->getPointerAlign(0);
2166 break;
2167 case BuiltinType::OCLSampler:
2168 case BuiltinType::OCLEvent:
2169 case BuiltinType::OCLClkEvent:
2170 case BuiltinType::OCLQueue:
2171 case BuiltinType::OCLReserveID:
2172#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
2173 case BuiltinType::Id:
2174#include "clang/Basic/OpenCLImageTypes.def"
2175#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
2176 case BuiltinType::Id:
2177#include "clang/Basic/OpenCLExtensionTypes.def"
2178 AS = getTargetAddressSpace(
2179 Target->getOpenCLTypeAddrSpace(getOpenCLTypeKind(T)));
2180 Width = Target->getPointerWidth(AS);
2181 Align = Target->getPointerAlign(AS);
2182 break;
2183 // The SVE types are effectively target-specific. The length of an
2184 // SVE_VECTOR_TYPE is only known at runtime, but it is always a multiple
2185 // of 128 bits. There is one predicate bit for each vector byte, so the
2186 // length of an SVE_PREDICATE_TYPE is always a multiple of 16 bits.
2187 //
2188 // Because the length is only known at runtime, we use a dummy value
2189 // of 0 for the static length. The alignment values are those defined
2190 // by the Procedure Call Standard for the Arm Architecture.
2191#define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId, NumEls, ElBits, \
2192 IsSigned, IsFP, IsBF) \
2193 case BuiltinType::Id: \
2194 Width = 0; \
2195 Align = 128; \
2196 break;
2197#define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId, NumEls) \
2198 case BuiltinType::Id: \
2199 Width = 0; \
2200 Align = 16; \
2201 break;
2202#include "clang/Basic/AArch64SVEACLETypes.def"
2203#define PPC_VECTOR_TYPE(Name, Id, Size) \
2204 case BuiltinType::Id: \
2205 Width = Size; \
2206 Align = Size; \
2207 break;
2208#include "clang/Basic/PPCTypes.def"
2209#define RVV_VECTOR_TYPE(Name, Id, SingletonId, ElKind, ElBits, NF, IsSigned, \
2210 IsFP) \
2211 case BuiltinType::Id: \
2212 Width = 0; \
2213 Align = ElBits; \
2214 break;
2215#define RVV_PREDICATE_TYPE(Name, Id, SingletonId, ElKind) \
2216 case BuiltinType::Id: \
2217 Width = 0; \
2218 Align = 8; \
2219 break;
2220#include "clang/Basic/RISCVVTypes.def"
2221 }
2222 break;
2223 case Type::ObjCObjectPointer:
2224 Width = Target->getPointerWidth(0);
2225 Align = Target->getPointerAlign(0);
2226 break;
2227 case Type::BlockPointer:
2228 AS = getTargetAddressSpace(cast<BlockPointerType>(T)->getPointeeType());
2229 Width = Target->getPointerWidth(AS);
2230 Align = Target->getPointerAlign(AS);
2231 break;
2232 case Type::LValueReference:
2233 case Type::RValueReference:
2234 // alignof and sizeof should never enter this code path here, so we go
2235 // the pointer route.
2236 AS = getTargetAddressSpace(cast<ReferenceType>(T)->getPointeeType());
2237 Width = Target->getPointerWidth(AS);
2238 Align = Target->getPointerAlign(AS);
2239 break;
2240 case Type::Pointer:
2241 AS = getTargetAddressSpace(cast<PointerType>(T)->getPointeeType());
2242 Width = Target->getPointerWidth(AS);
2243 Align = Target->getPointerAlign(AS);
2244 break;
2245 case Type::MemberPointer: {
2246 const auto *MPT = cast<MemberPointerType>(T);
2247 CXXABI::MemberPointerInfo MPI = ABI->getMemberPointerInfo(MPT);
2248 Width = MPI.Width;
2249 Align = MPI.Align;
2250 break;
2251 }
2252 case Type::Complex: {
2253 // Complex types have the same alignment as their elements, but twice the
2254 // size.
2255 TypeInfo EltInfo = getTypeInfo(cast<ComplexType>(T)->getElementType());
2256 Width = EltInfo.Width * 2;
2257 Align = EltInfo.Align;
2258 break;
2259 }
2260 case Type::ObjCObject:
2261 return getTypeInfo(cast<ObjCObjectType>(T)->getBaseType().getTypePtr());
2262 case Type::Adjusted:
2263 case Type::Decayed:
2264 return getTypeInfo(cast<AdjustedType>(T)->getAdjustedType().getTypePtr());
2265 case Type::ObjCInterface: {
2266 const auto *ObjCI = cast<ObjCInterfaceType>(T);
2267 if (ObjCI->getDecl()->isInvalidDecl()) {
2268 Width = 8;
2269 Align = 8;
2270 break;
2271 }
2272 const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl());
2273 Width = toBits(Layout.getSize());
2274 Align = toBits(Layout.getAlignment());
2275 break;
2276 }
2277 case Type::ExtInt: {
2278 const auto *EIT = cast<ExtIntType>(T);
2279 Align =
2280 std::min(static_cast<unsigned>(std::max(
2281 getCharWidth(), llvm::PowerOf2Ceil(EIT->getNumBits()))),
2282 Target->getLongLongAlign());
2283 Width = llvm::alignTo(EIT->getNumBits(), Align);
2284 break;
2285 }
2286 case Type::Record:
2287 case Type::Enum: {
2288 const auto *TT = cast<TagType>(T);
2289
2290 if (TT->getDecl()->isInvalidDecl()) {
2291 Width = 8;
2292 Align = 8;
2293 break;
2294 }
2295
2296 if (const auto *ET = dyn_cast<EnumType>(TT)) {
2297 const EnumDecl *ED = ET->getDecl();
2298 TypeInfo Info =
2299 getTypeInfo(ED->getIntegerType()->getUnqualifiedDesugaredType());
2300 if (unsigned AttrAlign = ED->getMaxAlignment()) {
2301 Info.Align = AttrAlign;
2302 Info.AlignIsRequired = true;
2303 }
2304 return Info;
2305 }
2306
2307 const auto *RT = cast<RecordType>(TT);
2308 const RecordDecl *RD = RT->getDecl();
2309 const ASTRecordLayout &Layout = getASTRecordLayout(RD);
2310 Width = toBits(Layout.getSize());
2311 Align = toBits(Layout.getAlignment());
2312 AlignIsRequired = RD->hasAttr<AlignedAttr>();
2313 break;
2314 }
2315
2316 case Type::SubstTemplateTypeParm:
2317 return getTypeInfo(cast<SubstTemplateTypeParmType>(T)->
2318 getReplacementType().getTypePtr());
2319
2320 case Type::Auto:
2321 case Type::DeducedTemplateSpecialization: {
2322 const auto *A = cast<DeducedType>(T);
2323 assert(!A->getDeducedType().isNull() &&((void)0)
2324 "cannot request the size of an undeduced or dependent auto type")((void)0);
2325 return getTypeInfo(A->getDeducedType().getTypePtr());
2326 }
2327
2328 case Type::Paren:
2329 return getTypeInfo(cast<ParenType>(T)->getInnerType().getTypePtr());
2330
2331 case Type::MacroQualified:
2332 return getTypeInfo(
2333 cast<MacroQualifiedType>(T)->getUnderlyingType().getTypePtr());
2334
2335 case Type::ObjCTypeParam:
2336 return getTypeInfo(cast<ObjCTypeParamType>(T)->desugar().getTypePtr());
2337
2338 case Type::Typedef: {
2339 const TypedefNameDecl *Typedef = cast<TypedefType>(T)->getDecl();
2340 TypeInfo Info = getTypeInfo(Typedef->getUnderlyingType().getTypePtr());
2341 // If the typedef has an aligned attribute on it, it overrides any computed
2342 // alignment we have. This violates the GCC documentation (which says that
2343 // attribute(aligned) can only round up) but matches its implementation.
2344 if (unsigned AttrAlign = Typedef->getMaxAlignment()) {
2345 Align = AttrAlign;
2346 AlignIsRequired = true;
2347 } else {
2348 Align = Info.Align;
2349 AlignIsRequired = Info.AlignIsRequired;
2350 }
2351 Width = Info.Width;
2352 break;
2353 }
2354
2355 case Type::Elaborated:
2356 return getTypeInfo(cast<ElaboratedType>(T)->getNamedType().getTypePtr());
2357
2358 case Type::Attributed:
2359 return getTypeInfo(
2360 cast<AttributedType>(T)->getEquivalentType().getTypePtr());
2361
2362 case Type::Atomic: {
2363 // Start with the base type information.
2364 TypeInfo Info = getTypeInfo(cast<AtomicType>(T)->getValueType());
2365 Width = Info.Width;
2366 Align = Info.Align;
2367
2368 if (!Width) {
2369 // An otherwise zero-sized type should still generate an
2370 // atomic operation.
2371 Width = Target->getCharWidth();
2372 assert(Align)((void)0);
2373 } else if (Width <= Target->getMaxAtomicPromoteWidth()) {
2374 // If the size of the type doesn't exceed the platform's max
2375 // atomic promotion width, make the size and alignment more
2376 // favorable to atomic operations:
2377
2378 // Round the size up to a power of 2.
2379 if (!llvm::isPowerOf2_64(Width))
2380 Width = llvm::NextPowerOf2(Width);
2381
2382 // Set the alignment equal to the size.
2383 Align = static_cast<unsigned>(Width);
2384 }
2385 }
2386 break;
2387
2388 case Type::Pipe:
2389 Width = Target->getPointerWidth(getTargetAddressSpace(LangAS::opencl_global));
2390 Align = Target->getPointerAlign(getTargetAddressSpace(LangAS::opencl_global));
2391 break;
2392 }
2393
2394 assert(llvm::isPowerOf2_32(Align) && "Alignment must be power of 2")((void)0);
2395 return TypeInfo(Width, Align, AlignIsRequired);
2396}
2397
2398unsigned ASTContext::getTypeUnadjustedAlign(const Type *T) const {
2399 UnadjustedAlignMap::iterator I = MemoizedUnadjustedAlign.find(T);
2400 if (I != MemoizedUnadjustedAlign.end())
2401 return I->second;
2402
2403 unsigned UnadjustedAlign;
2404 if (const auto *RT = T->getAs<RecordType>()) {
2405 const RecordDecl *RD = RT->getDecl();
2406 const ASTRecordLayout &Layout = getASTRecordLayout(RD);
2407 UnadjustedAlign = toBits(Layout.getUnadjustedAlignment());
2408 } else if (const auto *ObjCI = T->getAs<ObjCInterfaceType>()) {
2409 const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl());
2410 UnadjustedAlign = toBits(Layout.getUnadjustedAlignment());
2411 } else {
2412 UnadjustedAlign = getTypeAlign(T->getUnqualifiedDesugaredType());
2413 }
2414
2415 MemoizedUnadjustedAlign[T] = UnadjustedAlign;
2416 return UnadjustedAlign;
2417}
2418
2419unsigned ASTContext::getOpenMPDefaultSimdAlign(QualType T) const {
2420 unsigned SimdAlign = getTargetInfo().getSimdDefaultAlign();
2421 return SimdAlign;
2422}
2423
2424/// toCharUnitsFromBits - Convert a size in bits to a size in characters.
2425CharUnits ASTContext::toCharUnitsFromBits(int64_t BitSize) const {
2426 return CharUnits::fromQuantity(BitSize / getCharWidth());
2427}
2428
2429/// toBits - Convert a size in characters to a size in characters.
2430int64_t ASTContext::toBits(CharUnits CharSize) const {
2431 return CharSize.getQuantity() * getCharWidth();
2432}
2433
2434/// getTypeSizeInChars - Return the size of the specified type, in characters.
2435/// This method does not work on incomplete types.
2436CharUnits ASTContext::getTypeSizeInChars(QualType T) const {
2437 return getTypeInfoInChars(T).Width;
2438}
2439CharUnits ASTContext::getTypeSizeInChars(const Type *T) const {
2440 return getTypeInfoInChars(T).Width;
2441}
2442
2443/// getTypeAlignInChars - Return the ABI-specified alignment of a type, in
2444/// characters. This method does not work on incomplete types.
2445CharUnits ASTContext::getTypeAlignInChars(QualType T) const {
2446 return toCharUnitsFromBits(getTypeAlign(T));
2447}
2448CharUnits ASTContext::getTypeAlignInChars(const Type *T) const {
2449 return toCharUnitsFromBits(getTypeAlign(T));
2450}
2451
2452/// getTypeUnadjustedAlignInChars - Return the ABI-specified alignment of a
2453/// type, in characters, before alignment adustments. This method does
2454/// not work on incomplete types.
2455CharUnits ASTContext::getTypeUnadjustedAlignInChars(QualType T) const {
2456 return toCharUnitsFromBits(getTypeUnadjustedAlign(T));
2457}
2458CharUnits ASTContext::getTypeUnadjustedAlignInChars(const Type *T) const {
2459 return toCharUnitsFromBits(getTypeUnadjustedAlign(T));
2460}
2461
2462/// getPreferredTypeAlign - Return the "preferred" alignment of the specified
2463/// type for the current target in bits. This can be different than the ABI
2464/// alignment in cases where it is beneficial for performance or backwards
2465/// compatibility preserving to overalign a data type. (Note: despite the name,
2466/// the preferred alignment is ABI-impacting, and not an optimization.)
2467unsigned ASTContext::getPreferredTypeAlign(const Type *T) const {
2468 TypeInfo TI = getTypeInfo(T);
2469 unsigned ABIAlign = TI.Align;
2470
2471 T = T->getBaseElementTypeUnsafe();
2472
2473 // The preferred alignment of member pointers is that of a pointer.
2474 if (T->isMemberPointerType())
2475 return getPreferredTypeAlign(getPointerDiffType().getTypePtr());
2476
2477 if (!Target->allowsLargerPreferedTypeAlignment())
2478 return ABIAlign;
2479
2480 if (const auto *RT = T->getAs<RecordType>()) {
2481 if (TI.AlignIsRequired || RT->getDecl()->isInvalidDecl())
2482 return ABIAlign;
2483
2484 unsigned PreferredAlign = static_cast<unsigned>(
2485 toBits(getASTRecordLayout(RT->getDecl()).PreferredAlignment));
2486 assert(PreferredAlign >= ABIAlign &&((void)0)
2487 "PreferredAlign should be at least as large as ABIAlign.")((void)0);
2488 return PreferredAlign;
2489 }
2490
2491 // Double (and, for targets supporting AIX `power` alignment, long double) and
2492 // long long should be naturally aligned (despite requiring less alignment) if
2493 // possible.
2494 if (const auto *CT = T->getAs<ComplexType>())
2495 T = CT->getElementType().getTypePtr();
2496 if (const auto *ET = T->getAs<EnumType>())
2497 T = ET->getDecl()->getIntegerType().getTypePtr();
2498 if (T->isSpecificBuiltinType(BuiltinType::Double) ||
2499 T->isSpecificBuiltinType(BuiltinType::LongLong) ||
2500 T->isSpecificBuiltinType(BuiltinType::ULongLong) ||
2501 (T->isSpecificBuiltinType(BuiltinType::LongDouble) &&
2502 Target->defaultsToAIXPowerAlignment()))
2503 // Don't increase the alignment if an alignment attribute was specified on a
2504 // typedef declaration.
2505 if (!TI.AlignIsRequired)
2506 return std::max(ABIAlign, (unsigned)getTypeSize(T));
2507
2508 return ABIAlign;
2509}
2510
2511/// getTargetDefaultAlignForAttributeAligned - Return the default alignment
2512/// for __attribute__((aligned)) on this target, to be used if no alignment
2513/// value is specified.
2514unsigned ASTContext::getTargetDefaultAlignForAttributeAligned() const {
2515 return getTargetInfo().getDefaultAlignForAttributeAligned();
2516}
2517
2518/// getAlignOfGlobalVar - Return the alignment in bits that should be given
2519/// to a global variable of the specified type.
2520unsigned ASTContext::getAlignOfGlobalVar(QualType T) const {
2521 uint64_t TypeSize = getTypeSize(T.getTypePtr());
2522 return std::max(getPreferredTypeAlign(T),
2523 getTargetInfo().getMinGlobalAlign(TypeSize));
2524}
2525
2526/// getAlignOfGlobalVarInChars - Return the alignment in characters that
2527/// should be given to a global variable of the specified type.
2528CharUnits ASTContext::getAlignOfGlobalVarInChars(QualType T) const {
2529 return toCharUnitsFromBits(getAlignOfGlobalVar(T));
2530}
2531
2532CharUnits ASTContext::getOffsetOfBaseWithVBPtr(const CXXRecordDecl *RD) const {
2533 CharUnits Offset = CharUnits::Zero();
2534 const ASTRecordLayout *Layout = &getASTRecordLayout(RD);
2535 while (const CXXRecordDecl *Base = Layout->getBaseSharingVBPtr()) {
2536 Offset += Layout->getBaseClassOffset(Base);
2537 Layout = &getASTRecordLayout(Base);
2538 }
2539 return Offset;
2540}
2541
2542CharUnits ASTContext::getMemberPointerPathAdjustment(const APValue &MP) const {
2543 const ValueDecl *MPD = MP.getMemberPointerDecl();
2544 CharUnits ThisAdjustment = CharUnits::Zero();
2545 ArrayRef<const CXXRecordDecl*> Path = MP.getMemberPointerPath();
2546 bool DerivedMember = MP.isMemberPointerToDerivedMember();
2547 const CXXRecordDecl *RD = cast<CXXRecordDecl>(MPD->getDeclContext());
2548 for (unsigned I = 0, N = Path.size(); I != N; ++I) {
2549 const CXXRecordDecl *Base = RD;
2550 const CXXRecordDecl *Derived = Path[I];
2551 if (DerivedMember)
2552 std::swap(Base, Derived);
2553 ThisAdjustment += getASTRecordLayout(Derived).getBaseClassOffset(Base);
2554 RD = Path[I];
2555 }
2556 if (DerivedMember)
2557 ThisAdjustment = -ThisAdjustment;
2558 return ThisAdjustment;
2559}
2560
2561/// DeepCollectObjCIvars -
2562/// This routine first collects all declared, but not synthesized, ivars in
2563/// super class and then collects all ivars, including those synthesized for
2564/// current class. This routine is used for implementation of current class
2565/// when all ivars, declared and synthesized are known.
2566void ASTContext::DeepCollectObjCIvars(const ObjCInterfaceDecl *OI,
2567 bool leafClass,
2568 SmallVectorImpl<const ObjCIvarDecl*> &Ivars) const {
2569 if (const ObjCInterfaceDecl *SuperClass = OI->getSuperClass())
2570 DeepCollectObjCIvars(SuperClass, false, Ivars);
2571 if (!leafClass) {
2572 for (const auto *I : OI->ivars())
2573 Ivars.push_back(I);
2574 } else {
2575 auto *IDecl = const_cast<ObjCInterfaceDecl *>(OI);
2576 for (const ObjCIvarDecl *Iv = IDecl->all_declared_ivar_begin(); Iv;
2577 Iv= Iv->getNextIvar())
2578 Ivars.push_back(Iv);
2579 }
2580}
2581
2582/// CollectInheritedProtocols - Collect all protocols in current class and
2583/// those inherited by it.
2584void ASTContext::CollectInheritedProtocols(const Decl *CDecl,
2585 llvm::SmallPtrSet<ObjCProtocolDecl*, 8> &Protocols) {
2586 if (const auto *OI = dyn_cast<ObjCInterfaceDecl>(CDecl)) {
2587 // We can use protocol_iterator here instead of
2588 // all_referenced_protocol_iterator since we are walking all categories.
2589 for (auto *Proto : OI->all_referenced_protocols()) {
2590 CollectInheritedProtocols(Proto, Protocols);
2591 }
2592
2593 // Categories of this Interface.
2594 for (const auto *Cat : OI->visible_categories())
2595 CollectInheritedProtocols(Cat, Protocols);
2596
2597 if (ObjCInterfaceDecl *SD = OI->getSuperClass())
2598 while (SD) {
2599 CollectInheritedProtocols(SD, Protocols);
2600 SD = SD->getSuperClass();
2601 }
2602 } else if (const auto *OC = dyn_cast<ObjCCategoryDecl>(CDecl)) {
2603 for (auto *Proto : OC->protocols()) {
2604 CollectInheritedProtocols(Proto, Protocols);
2605 }
2606 } else if (const auto *OP = dyn_cast<ObjCProtocolDecl>(CDecl)) {
2607 // Insert the protocol.
2608 if (!Protocols.insert(
2609 const_cast<ObjCProtocolDecl *>(OP->getCanonicalDecl())).second)
2610 return;
2611
2612 for (auto *Proto : OP->protocols())
2613 CollectInheritedProtocols(Proto, Protocols);
2614 }
2615}
2616
2617static bool unionHasUniqueObjectRepresentations(const ASTContext &Context,
2618 const RecordDecl *RD) {
2619 assert(RD->isUnion() && "Must be union type")((void)0);
2620 CharUnits UnionSize = Context.getTypeSizeInChars(RD->getTypeForDecl());
2621
2622 for (const auto *Field : RD->fields()) {
2623 if (!Context.hasUniqueObjectRepresentations(Field->getType()))
2624 return false;
2625 CharUnits FieldSize = Context.getTypeSizeInChars(Field->getType());
2626 if (FieldSize != UnionSize)
2627 return false;
2628 }
2629 return !RD->field_empty();
2630}
2631
2632static bool isStructEmpty(QualType Ty) {
2633 const RecordDecl *RD = Ty->castAs<RecordType>()->getDecl();
2634
2635 if (!RD->field_empty())
2636 return false;
2637
2638 if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RD))
2639 return ClassDecl->isEmpty();
2640
2641 return true;
2642}
2643
2644static llvm::Optional<int64_t>
2645structHasUniqueObjectRepresentations(const ASTContext &Context,
2646 const RecordDecl *RD) {
2647 assert(!RD->isUnion() && "Must be struct/class type")((void)0);
2648 const auto &Layout = Context.getASTRecordLayout(RD);
2649
2650 int64_t CurOffsetInBits = 0;
2651 if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RD)) {
2652 if (ClassDecl->isDynamicClass())
2653 return llvm::None;
2654
2655 SmallVector<std::pair<QualType, int64_t>, 4> Bases;
2656 for (const auto &Base : ClassDecl->bases()) {
2657 // Empty types can be inherited from, and non-empty types can potentially
2658 // have tail padding, so just make sure there isn't an error.
2659 if (!isStructEmpty(Base.getType())) {
2660 llvm::Optional<int64_t> Size = structHasUniqueObjectRepresentations(
2661 Context, Base.getType()->castAs<RecordType>()->getDecl());
2662 if (!Size)
2663 return llvm::None;
2664 Bases.emplace_back(Base.getType(), Size.getValue());
2665 }
2666 }
2667
2668 llvm::sort(Bases, [&](const std::pair<QualType, int64_t> &L,
2669 const std::pair<QualType, int64_t> &R) {
2670 return Layout.getBaseClassOffset(L.first->getAsCXXRecordDecl()) <
2671 Layout.getBaseClassOffset(R.first->getAsCXXRecordDecl());
2672 });
2673
2674 for (const auto &Base : Bases) {
2675 int64_t BaseOffset = Context.toBits(
2676 Layout.getBaseClassOffset(Base.first->getAsCXXRecordDecl()));
2677 int64_t BaseSize = Base.second;
2678 if (BaseOffset != CurOffsetInBits)
2679 return llvm::None;
2680 CurOffsetInBits = BaseOffset + BaseSize;
2681 }
2682 }
2683
2684 for (const auto *Field : RD->fields()) {
2685 if (!Field->getType()->isReferenceType() &&
2686 !Context.hasUniqueObjectRepresentations(Field->getType()))
2687 return llvm::None;
2688
2689 int64_t FieldSizeInBits =
2690 Context.toBits(Context.getTypeSizeInChars(Field->getType()));
2691 if (Field->isBitField()) {
2692 int64_t BitfieldSize = Field->getBitWidthValue(Context);
2693
2694 if (BitfieldSize > FieldSizeInBits)
2695 return llvm::None;
2696 FieldSizeInBits = BitfieldSize;
2697 }
2698
2699 int64_t FieldOffsetInBits = Context.getFieldOffset(Field);
2700
2701 if (FieldOffsetInBits != CurOffsetInBits)
2702 return llvm::None;
2703
2704 CurOffsetInBits = FieldSizeInBits + FieldOffsetInBits;
2705 }
2706
2707 return CurOffsetInBits;
2708}
2709
2710bool ASTContext::hasUniqueObjectRepresentations(QualType Ty) const {
2711 // C++17 [meta.unary.prop]:
2712 // The predicate condition for a template specialization
2713 // has_unique_object_representations<T> shall be
2714 // satisfied if and only if:
2715 // (9.1) - T is trivially copyable, and
2716 // (9.2) - any two objects of type T with the same value have the same
2717 // object representation, where two objects
2718 // of array or non-union class type are considered to have the same value
2719 // if their respective sequences of
2720 // direct subobjects have the same values, and two objects of union type
2721 // are considered to have the same
2722 // value if they have the same active member and the corresponding members
2723 // have the same value.
2724 // The set of scalar types for which this condition holds is
2725 // implementation-defined. [ Note: If a type has padding
2726 // bits, the condition does not hold; otherwise, the condition holds true
2727 // for unsigned integral types. -- end note ]
2728 assert(!Ty.isNull() && "Null QualType sent to unique object rep check")((void)0);
2729
2730 // Arrays are unique only if their element type is unique.
2731 if (Ty->isArrayType())
2732 return hasUniqueObjectRepresentations(getBaseElementType(Ty));
2733
2734 // (9.1) - T is trivially copyable...
2735 if (!Ty.isTriviallyCopyableType(*this))
2736 return false;
2737
2738 // All integrals and enums are unique.
2739 if (Ty->isIntegralOrEnumerationType())
2740 return true;
2741
2742 // All other pointers are unique.
2743 if (Ty->isPointerType())
2744 return true;
2745
2746 if (Ty->isMemberPointerType()) {
2747 const auto *MPT = Ty->getAs<MemberPointerType>();
2748 return !ABI->getMemberPointerInfo(MPT).HasPadding;
2749 }
2750
2751 if (Ty->isRecordType()) {
2752 const RecordDecl *Record = Ty->castAs<RecordType>()->getDecl();
2753
2754 if (Record->isInvalidDecl())
2755 return false;
2756
2757 if (Record->isUnion())
2758 return unionHasUniqueObjectRepresentations(*this, Record);
2759
2760 Optional<int64_t> StructSize =
2761 structHasUniqueObjectRepresentations(*this, Record);
2762
2763 return StructSize &&
2764 StructSize.getValue() == static_cast<int64_t>(getTypeSize(Ty));
2765 }
2766
2767 // FIXME: More cases to handle here (list by rsmith):
2768 // vectors (careful about, eg, vector of 3 foo)
2769 // _Complex int and friends
2770 // _Atomic T
2771 // Obj-C block pointers
2772 // Obj-C object pointers
2773 // and perhaps OpenCL's various builtin types (pipe, sampler_t, event_t,
2774 // clk_event_t, queue_t, reserve_id_t)
2775 // There're also Obj-C class types and the Obj-C selector type, but I think it
2776 // makes sense for those to return false here.
2777
2778 return false;
2779}
2780
2781unsigned ASTContext::CountNonClassIvars(const ObjCInterfaceDecl *OI) const {
2782 unsigned count = 0;
2783 // Count ivars declared in class extension.
2784 for (const auto *Ext : OI->known_extensions())
2785 count += Ext->ivar_size();
2786
2787 // Count ivar defined in this class's implementation. This
2788 // includes synthesized ivars.
2789 if (ObjCImplementationDecl *ImplDecl = OI->getImplementation())
2790 count += ImplDecl->ivar_size();
2791
2792 return count;
2793}
2794
2795bool ASTContext::isSentinelNullExpr(const Expr *E) {
2796 if (!E)
2797 return false;
2798
2799 // nullptr_t is always treated as null.
2800 if (E->getType()->isNullPtrType()) return true;
2801
2802 if (E->getType()->isAnyPointerType() &&
2803 E->IgnoreParenCasts()->isNullPointerConstant(*this,
2804 Expr::NPC_ValueDependentIsNull))
2805 return true;
2806
2807 // Unfortunately, __null has type 'int'.
2808 if (isa<GNUNullExpr>(E)) return true;
2809
2810 return false;
2811}
2812
2813/// Get the implementation of ObjCInterfaceDecl, or nullptr if none
2814/// exists.
2815ObjCImplementationDecl *ASTContext::getObjCImplementation(ObjCInterfaceDecl *D) {
2816 llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator
2817 I = ObjCImpls.find(D);
2818 if (I != ObjCImpls.end())
2819 return cast<ObjCImplementationDecl>(I->second);
2820 return nullptr;
2821}
2822
2823/// Get the implementation of ObjCCategoryDecl, or nullptr if none
2824/// exists.
2825ObjCCategoryImplDecl *ASTContext::getObjCImplementation(ObjCCategoryDecl *D) {
2826 llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator
2827 I = ObjCImpls.find(D);
2828 if (I != ObjCImpls.end())
2829 return cast<ObjCCategoryImplDecl>(I->second);
2830 return nullptr;
2831}
2832
2833/// Set the implementation of ObjCInterfaceDecl.
2834void ASTContext::setObjCImplementation(ObjCInterfaceDecl *IFaceD,
2835 ObjCImplementationDecl *ImplD) {
2836 assert(IFaceD && ImplD && "Passed null params")((void)0);
2837 ObjCImpls[IFaceD] = ImplD;
2838}
2839
2840/// Set the implementation of ObjCCategoryDecl.
2841void ASTContext::setObjCImplementation(ObjCCategoryDecl *CatD,
2842 ObjCCategoryImplDecl *ImplD) {
2843 assert(CatD && ImplD && "Passed null params")((void)0);
2844 ObjCImpls[CatD] = ImplD;
2845}
2846
2847const ObjCMethodDecl *
2848ASTContext::getObjCMethodRedeclaration(const ObjCMethodDecl *MD) const {
2849 return ObjCMethodRedecls.lookup(MD);
2850}
2851
2852void ASTContext::setObjCMethodRedeclaration(const ObjCMethodDecl *MD,
2853 const ObjCMethodDecl *Redecl) {
2854 assert(!getObjCMethodRedeclaration(MD) && "MD already has a redeclaration")((void)0);
2855 ObjCMethodRedecls[MD] = Redecl;
2856}
2857
2858const ObjCInterfaceDecl *ASTContext::getObjContainingInterface(
2859 const NamedDecl *ND) const {
2860 if (const auto *ID = dyn_cast<ObjCInterfaceDecl>(ND->getDeclContext()))
2861 return ID;
2862 if (const auto *CD = dyn_cast<ObjCCategoryDecl>(ND->getDeclContext()))
2863 return CD->getClassInterface();
2864 if (const auto *IMD = dyn_cast<ObjCImplDecl>(ND->getDeclContext()))
2865 return IMD->getClassInterface();
2866
2867 return nullptr;
2868}
2869
2870/// Get the copy initialization expression of VarDecl, or nullptr if
2871/// none exists.
2872BlockVarCopyInit ASTContext::getBlockVarCopyInit(const VarDecl *VD) const {
2873 assert(VD && "Passed null params")((void)0);
2874 assert(VD->hasAttr<BlocksAttr>() &&((void)0)
2875 "getBlockVarCopyInits - not __block var")((void)0);
2876 auto I = BlockVarCopyInits.find(VD);
2877 if (I != BlockVarCopyInits.end())
2878 return I->second;
2879 return {nullptr, false};
2880}
2881
2882/// Set the copy initialization expression of a block var decl.
2883void ASTContext::setBlockVarCopyInit(const VarDecl*VD, Expr *CopyExpr,
2884 bool CanThrow) {
2885 assert(VD && CopyExpr && "Passed null params")((void)0);
2886 assert(VD->hasAttr<BlocksAttr>() &&((void)0)
2887 "setBlockVarCopyInits - not __block var")((void)0);
2888 BlockVarCopyInits[VD].setExprAndFlag(CopyExpr, CanThrow);
2889}
2890
2891TypeSourceInfo *ASTContext::CreateTypeSourceInfo(QualType T,
2892 unsigned DataSize) const {
2893 if (!DataSize)
2894 DataSize = TypeLoc::getFullDataSizeForType(T);
2895 else
2896 assert(DataSize == TypeLoc::getFullDataSizeForType(T) &&((void)0)
2897 "incorrect data size provided to CreateTypeSourceInfo!")((void)0);
2898
2899 auto *TInfo =
2900 (TypeSourceInfo*)BumpAlloc.Allocate(sizeof(TypeSourceInfo) + DataSize, 8);
2901 new (TInfo) TypeSourceInfo(T);
2902 return TInfo;
2903}
2904
2905TypeSourceInfo *ASTContext::getTrivialTypeSourceInfo(QualType T,
2906 SourceLocation L) const {
2907 TypeSourceInfo *DI = CreateTypeSourceInfo(T);
2908 DI->getTypeLoc().initialize(const_cast<ASTContext &>(*this), L);
2909 return DI;
2910}
2911
2912const ASTRecordLayout &
2913ASTContext::getASTObjCInterfaceLayout(const ObjCInterfaceDecl *D) const {
2914 return getObjCLayout(D, nullptr);
2915}
2916
2917const ASTRecordLayout &
2918ASTContext::getASTObjCImplementationLayout(
2919 const ObjCImplementationDecl *D) const {
2920 return getObjCLayout(D->getClassInterface(), D);
2921}
2922
2923//===----------------------------------------------------------------------===//
2924// Type creation/memoization methods
2925//===----------------------------------------------------------------------===//
2926
2927QualType
2928ASTContext::getExtQualType(const Type *baseType, Qualifiers quals) const {
2929 unsigned fastQuals = quals.getFastQualifiers();
2930 quals.removeFastQualifiers();
2931
2932 // Check if we've already instantiated this type.
2933 llvm::FoldingSetNodeID ID;
2934 ExtQuals::Profile(ID, baseType, quals);
2935 void *insertPos = nullptr;
2936 if (ExtQuals *eq = ExtQualNodes.FindNodeOrInsertPos(ID, insertPos)) {
2937 assert(eq->getQualifiers() == quals)((void)0);
2938 return QualType(eq, fastQuals);
2939 }
2940
2941 // If the base type is not canonical, make the appropriate canonical type.
2942 QualType canon;
2943 if (!baseType->isCanonicalUnqualified()) {
2944 SplitQualType canonSplit = baseType->getCanonicalTypeInternal().split();
2945 canonSplit.Quals.addConsistentQualifiers(quals);
2946 canon = getExtQualType(canonSplit.Ty, canonSplit.Quals);
2947
2948 // Re-find the insert position.
2949 (void) ExtQualNodes.FindNodeOrInsertPos(ID, insertPos);
2950 }
2951
2952 auto *eq = new (*this, TypeAlignment) ExtQuals(baseType, canon, quals);
2953 ExtQualNodes.InsertNode(eq, insertPos);
2954 return QualType(eq, fastQuals);
2955}
2956
2957QualType ASTContext::getAddrSpaceQualType(QualType T,
2958 LangAS AddressSpace) const {
2959 QualType CanT = getCanonicalType(T);
2960 if (CanT.getAddressSpace() == AddressSpace)
2961 return T;
2962
2963 // If we are composing extended qualifiers together, merge together
2964 // into one ExtQuals node.
2965 QualifierCollector Quals;
2966 const Type *TypeNode = Quals.strip(T);
2967
2968 // If this type already has an address space specified, it cannot get
2969 // another one.
2970 assert(!Quals.hasAddressSpace() &&((void)0)
2971 "Type cannot be in multiple addr spaces!")((void)0);
2972 Quals.addAddressSpace(AddressSpace);
2973
2974 return getExtQualType(TypeNode, Quals);
2975}
2976
2977QualType ASTContext::removeAddrSpaceQualType(QualType T) const {
2978 // If the type is not qualified with an address space, just return it
2979 // immediately.
2980 if (!T.hasAddressSpace())
2981 return T;
2982
2983 // If we are composing extended qualifiers together, merge together
2984 // into one ExtQuals node.
2985 QualifierCollector Quals;
2986 const Type *TypeNode;
2987
2988 while (T.hasAddressSpace()) {
2989 TypeNode = Quals.strip(T);
2990
2991 // If the type no longer has an address space after stripping qualifiers,
2992 // jump out.
2993 if (!QualType(TypeNode, 0).hasAddressSpace())
2994 break;
2995
2996 // There might be sugar in the way. Strip it and try again.
2997 T = T.getSingleStepDesugaredType(*this);
2998 }
2999
3000 Quals.removeAddressSpace();
3001
3002 // Removal of the address space can mean there are no longer any
3003 // non-fast qualifiers, so creating an ExtQualType isn't possible (asserts)
3004 // or required.
3005 if (Quals.hasNonFastQualifiers())
3006 return getExtQualType(TypeNode, Quals);
3007 else
3008 return QualType(TypeNode, Quals.getFastQualifiers());
3009}
3010
3011QualType ASTContext::getObjCGCQualType(QualType T,
3012 Qualifiers::GC GCAttr) const {
3013 QualType CanT = getCanonicalType(T);
3014 if (CanT.getObjCGCAttr() == GCAttr)
3015 return T;
3016
3017 if (const auto *ptr = T->getAs<PointerType>()) {
3018 QualType Pointee = ptr->getPointeeType();
3019 if (Pointee->isAnyPointerType()) {
3020 QualType ResultType = getObjCGCQualType(Pointee, GCAttr);
3021 return getPointerType(ResultType);
3022 }
3023 }
3024
3025 // If we are composing extended qualifiers together, merge together
3026 // into one ExtQuals node.
3027 QualifierCollector Quals;
3028 const Type *TypeNode = Quals.strip(T);
3029
3030 // If this type already has an ObjCGC specified, it cannot get
3031 // another one.
3032 assert(!Quals.hasObjCGCAttr() &&((void)0)
3033 "Type cannot have multiple ObjCGCs!")((void)0);
3034 Quals.addObjCGCAttr(GCAttr);
3035
3036 return getExtQualType(TypeNode, Quals);
3037}
3038
3039QualType ASTContext::removePtrSizeAddrSpace(QualType T) const {
3040 if (const PointerType *Ptr = T->getAs<PointerType>()) {
3041 QualType Pointee = Ptr->getPointeeType();
3042 if (isPtrSizeAddressSpace(Pointee.getAddressSpace())) {
3043 return getPointerType(removeAddrSpaceQualType(Pointee));
3044 }
3045 }
3046 return T;
3047}
3048
3049const FunctionType *ASTContext::adjustFunctionType(const FunctionType *T,
3050 FunctionType::ExtInfo Info) {
3051 if (T->getExtInfo() == Info)
3052 return T;
3053
3054 QualType Result;
3055 if (const auto *FNPT = dyn_cast<FunctionNoProtoType>(T)) {
3056 Result = getFunctionNoProtoType(FNPT->getReturnType(), Info);
3057 } else {
3058 const auto *FPT = cast<FunctionProtoType>(T);
3059 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
3060 EPI.ExtInfo = Info;
3061 Result = getFunctionType(FPT->getReturnType(), FPT->getParamTypes(), EPI);
3062 }
3063
3064 return cast<FunctionType>(Result.getTypePtr());
3065}
3066
3067void ASTContext::adjustDeducedFunctionResultType(FunctionDecl *FD,
3068 QualType ResultType) {
3069 FD = FD->getMostRecentDecl();
3070 while (true) {
3071 const auto *FPT = FD->getType()->castAs<FunctionProtoType>();
3072 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
3073 FD->setType(getFunctionType(ResultType, FPT->getParamTypes(), EPI));
3074 if (FunctionDecl *Next = FD->getPreviousDecl())
3075 FD = Next;
3076 else
3077 break;
3078 }
3079 if (ASTMutationListener *L = getASTMutationListener())
3080 L->DeducedReturnType(FD, ResultType);
3081}
3082
3083/// Get a function type and produce the equivalent function type with the
3084/// specified exception specification. Type sugar that can be present on a
3085/// declaration of a function with an exception specification is permitted
3086/// and preserved. Other type sugar (for instance, typedefs) is not.
3087QualType ASTContext::getFunctionTypeWithExceptionSpec(
3088 QualType Orig, const FunctionProtoType::ExceptionSpecInfo &ESI) {
3089 // Might have some parens.
3090 if (const auto *PT = dyn_cast<ParenType>(Orig))
3091 return getParenType(
3092 getFunctionTypeWithExceptionSpec(PT->getInnerType(), ESI));
3093
3094 // Might be wrapped in a macro qualified type.
3095 if (const auto *MQT = dyn_cast<MacroQualifiedType>(Orig))
3096 return getMacroQualifiedType(
3097 getFunctionTypeWithExceptionSpec(MQT->getUnderlyingType(), ESI),
3098 MQT->getMacroIdentifier());
3099
3100 // Might have a calling-convention attribute.
3101 if (const auto *AT = dyn_cast<AttributedType>(Orig))
3102 return getAttributedType(
3103 AT->getAttrKind(),
3104 getFunctionTypeWithExceptionSpec(AT->getModifiedType(), ESI),
3105 getFunctionTypeWithExceptionSpec(AT->getEquivalentType(), ESI));
3106
3107 // Anything else must be a function type. Rebuild it with the new exception
3108 // specification.
3109 const auto *Proto = Orig->castAs<FunctionProtoType>();
3110 return getFunctionType(
3111 Proto->getReturnType(), Proto->getParamTypes(),
3112 Proto->getExtProtoInfo().withExceptionSpec(ESI));
3113}
3114
3115bool ASTContext::hasSameFunctionTypeIgnoringExceptionSpec(QualType T,
3116 QualType U) {
3117 return hasSameType(T, U) ||
3118 (getLangOpts().CPlusPlus17 &&
3119 hasSameType(getFunctionTypeWithExceptionSpec(T, EST_None),
3120 getFunctionTypeWithExceptionSpec(U, EST_None)));
3121}
3122
3123QualType ASTContext::getFunctionTypeWithoutPtrSizes(QualType T) {
3124 if (const auto *Proto = T->getAs<FunctionProtoType>()) {
3125 QualType RetTy = removePtrSizeAddrSpace(Proto->getReturnType());
3126 SmallVector<QualType, 16> Args(Proto->param_types());
3127 for (unsigned i = 0, n = Args.size(); i != n; ++i)
3128 Args[i] = removePtrSizeAddrSpace(Args[i]);
3129 return getFunctionType(RetTy, Args, Proto->getExtProtoInfo());
3130 }
3131
3132 if (const FunctionNoProtoType *Proto = T->getAs<FunctionNoProtoType>()) {
3133 QualType RetTy = removePtrSizeAddrSpace(Proto->getReturnType());
3134 return getFunctionNoProtoType(RetTy, Proto->getExtInfo());
3135 }
3136
3137 return T;
3138}
3139
3140bool ASTContext::hasSameFunctionTypeIgnoringPtrSizes(QualType T, QualType U) {
3141 return hasSameType(T, U) ||
3142 hasSameType(getFunctionTypeWithoutPtrSizes(T),
3143 getFunctionTypeWithoutPtrSizes(U));
3144}
3145
3146void ASTContext::adjustExceptionSpec(
3147 FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI,
3148 bool AsWritten) {
3149 // Update the type.
3150 QualType Updated =
3151 getFunctionTypeWithExceptionSpec(FD->getType(), ESI);
3152 FD->setType(Updated);
3153
3154 if (!AsWritten)
3155 return;
3156
3157 // Update the type in the type source information too.
3158 if (TypeSourceInfo *TSInfo = FD->getTypeSourceInfo()) {
3159 // If the type and the type-as-written differ, we may need to update
3160 // the type-as-written too.
3161 if (TSInfo->getType() != FD->getType())
3162 Updated = getFunctionTypeWithExceptionSpec(TSInfo->getType(), ESI);
3163
3164 // FIXME: When we get proper type location information for exceptions,
3165 // we'll also have to rebuild the TypeSourceInfo. For now, we just patch
3166 // up the TypeSourceInfo;
3167 assert(TypeLoc::getFullDataSizeForType(Updated) ==((void)0)
3168 TypeLoc::getFullDataSizeForType(TSInfo->getType()) &&((void)0)
3169 "TypeLoc size mismatch from updating exception specification")((void)0);
3170 TSInfo->overrideType(Updated);
3171 }
3172}
3173
3174/// getComplexType - Return the uniqued reference to the type for a complex
3175/// number with the specified element type.
3176QualType ASTContext::getComplexType(QualType T) const {
3177 // Unique pointers, to guarantee there is only one pointer of a particular
3178 // structure.
3179 llvm::FoldingSetNodeID ID;
3180 ComplexType::Profile(ID, T);
3181
3182 void *InsertPos = nullptr;
3183 if (ComplexType *CT = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos))
3184 return QualType(CT, 0);
3185
3186 // If the pointee type isn't canonical, this won't be a canonical type either,
3187 // so fill in the canonical type field.
3188 QualType Canonical;
3189 if (!T.isCanonical()) {
3190 Canonical = getComplexType(getCanonicalType(T));
3191
3192 // Get the new insert position for the node we care about.
3193 ComplexType *NewIP = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos);
3194 assert(!NewIP && "Shouldn't be in the map!")((void)0); (void)NewIP;
3195 }
3196 auto *New = new (*this, TypeAlignment) ComplexType(T, Canonical);
3197 Types.push_back(New);
3198 ComplexTypes.InsertNode(New, InsertPos);
3199 return QualType(New, 0);
3200}
3201
3202/// getPointerType - Return the uniqued reference to the type for a pointer to
3203/// the specified type.
3204QualType ASTContext::getPointerType(QualType T) const {
3205 // Unique pointers, to guarantee there is only one pointer of a particular
3206 // structure.
3207 llvm::FoldingSetNodeID ID;
3208 PointerType::Profile(ID, T);
3209
3210 void *InsertPos = nullptr;
3211 if (PointerType *PT = PointerTypes.FindNodeOrInsertPos(ID, InsertPos))
3212 return QualType(PT, 0);
3213
3214 // If the pointee type isn't canonical, this won't be a canonical type either,
3215 // so fill in the canonical type field.
3216 QualType Canonical;
3217 if (!T.isCanonical()) {
3218 Canonical = getPointerType(getCanonicalType(T));
3219
3220 // Get the new insert position for the node we care about.
3221 PointerType *NewIP = PointerTypes.FindNodeOrInsertPos(ID, InsertPos);
3222 assert(!NewIP && "Shouldn't be in the map!")((void)0); (void)NewIP;
3223 }
3224 auto *New = new (*this, TypeAlignment) PointerType(T, Canonical);
3225 Types.push_back(New);
3226 PointerTypes.InsertNode(New, InsertPos);
3227 return QualType(New, 0);
3228}
3229
3230QualType ASTContext::getAdjustedType(QualType Orig, QualType New) const {
3231 llvm::FoldingSetNodeID ID;
3232 AdjustedType::Profile(ID, Orig, New);
3233 void *InsertPos = nullptr;
3234 AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
3235 if (AT)
3236 return QualType(AT, 0);
3237
3238 QualType Canonical = getCanonicalType(New);
3239
3240 // Get the new insert position for the node we care about.
3241 AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
Value stored to 'AT' is never read
3242 assert(!AT && "Shouldn't be in the map!")((void)0);
3243
3244 AT = new (*this, TypeAlignment)
3245 AdjustedType(Type::Adjusted, Orig, New, Canonical);
3246 Types.push_back(AT);
3247 AdjustedTypes.InsertNode(AT, InsertPos);
3248 return QualType(AT, 0);
3249}
3250
3251QualType ASTContext::getDecayedType(QualType T) const {
3252 assert((T->isArrayType() || T->isFunctionType()) && "T does not decay")((void)0);
3253
3254 QualType Decayed;
3255
3256 // C99 6.7.5.3p7:
3257 // A declaration of a parameter as "array of type" shall be
3258 // adjusted to "qualified pointer to type", where the type
3259 // qualifiers (if any) are those specified within the [ and ] of
3260 // the array type derivation.
3261 if (T->isArrayType())
3262 Decayed = getArrayDecayedType(T);
3263
3264 // C99 6.7.5.3p8:
3265 // A declaration of a parameter as "function returning type"
3266 // shall be adjusted to "pointer to function returning type", as
3267 // in 6.3.2.1.
3268 if (T->isFunctionType())
3269 Decayed = getPointerType(T);
3270
3271 llvm::FoldingSetNodeID ID;
3272 AdjustedType::Profile(ID, T, Decayed);
3273 void *InsertPos = nullptr;
3274 AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
3275 if (AT)
3276 return QualType(AT, 0);
3277
3278 QualType Canonical = getCanonicalType(Decayed);
3279
3280 // Get the new insert position for the node we care about.
3281 AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
3282 assert(!AT && "Shouldn't be in the map!")((void)0);
3283
3284 AT = new (*this, TypeAlignment) DecayedType(T, Decayed, Canonical);
3285 Types.push_back(AT);
3286 AdjustedTypes.InsertNode(AT, InsertPos);
3287 return QualType(AT, 0);
3288}
3289
3290/// getBlockPointerType - Return the uniqued reference to the type for
3291/// a pointer to the specified block.
3292QualType ASTContext::getBlockPointerType(QualType T) const {
3293 assert(T->isFunctionType() && "block of function types only")((void)0);
3294 // Unique pointers, to guarantee there is only one block of a particular
3295 // structure.
3296 llvm::FoldingSetNodeID ID;
3297 BlockPointerType::Profile(ID, T);
3298
3299 void *InsertPos = nullptr;
3300 if (BlockPointerType *PT =
3301 BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
3302 return QualType(PT, 0);
3303
3304 // If the block pointee type isn't canonical, this won't be a canonical
3305 // type either so fill in the canonical type field.
3306 QualType Canonical;
3307 if (!T.isCanonical()) {
3308 Canonical = getBlockPointerType(getCanonicalType(T));
3309
3310 // Get the new insert position for the node we care about.
3311 BlockPointerType *NewIP =
3312 BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
3313 assert(!NewIP && "Shouldn't be in the map!")((void)0); (void)NewIP;
3314 }
3315 auto *New = new (*this, TypeAlignment) BlockPointerType(T, Canonical);
3316 Types.push_back(New);
3317 BlockPointerTypes.InsertNode(New, InsertPos);
3318 return QualType(New, 0);
3319}
3320
3321/// getLValueReferenceType - Return the uniqued reference to the type for an
3322/// lvalue reference to the specified type.
3323QualType
3324ASTContext::getLValueReferenceType(QualType T, bool SpelledAsLValue) const {
3325 assert(getCanonicalType(T) != OverloadTy &&((void)0)
3326 "Unresolved overloaded function type")((void)0);
3327
3328 // Unique pointers, to guarantee there is only one pointer of a particular
3329 // structure.
3330 llvm::FoldingSetNodeID ID;
3331 ReferenceType::Profile(ID, T, SpelledAsLValue);
3332
3333 void *InsertPos = nullptr;
3334 if (LValueReferenceType *RT =
3335 LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos))
3336 return QualType(RT, 0);
3337
3338 const auto *InnerRef = T->getAs<ReferenceType>();
3339
3340 // If the referencee type isn't canonical, this won't be a canonical type
3341 // either, so fill in the canonical type field.
3342 QualType Canonical;
3343 if (!SpelledAsLValue || InnerRef || !T.isCanonical()) {
3344 QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T);
3345 Canonical = getLValueReferenceType(getCanonicalType(PointeeType));
3346
3347 // Get the new insert position for the node we care about.
3348 LValueReferenceType *NewIP =
3349 LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos);
3350 assert(!NewIP && "Shouldn't be in the map!")((void)0); (void)NewIP;
3351 }
3352
3353 auto *New = new (*this, TypeAlignment) LValueReferenceType(T, Canonical,
3354 SpelledAsLValue);
3355 Types.push_back(New);
3356 LValueReferenceTypes.InsertNode(New, InsertPos);
3357
3358 return QualType(New, 0);
3359}
3360
3361/// getRValueReferenceType - Return the uniqued reference to the type for an
3362/// rvalue reference to the specified type.
3363QualType ASTContext::getRValueReferenceType(QualType T) const {
3364 // Unique pointers, to guarantee there is only one pointer of a particular
3365 // structure.
3366 llvm::FoldingSetNodeID ID;
3367 ReferenceType::Profile(ID, T, false);
3368
3369 void *InsertPos = nullptr;
3370 if (RValueReferenceType *RT =
3371 RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos))
3372 return QualType(RT, 0);
3373
3374 const auto *InnerRef = T->getAs<ReferenceType>();
3375
3376 // If the referencee type isn't canonical, this won't be a canonical type
3377 // either, so fill in the canonical type field.
3378 QualType Canonical;
3379 if (InnerRef || !T.isCanonical()) {
3380 QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T);
3381 Canonical = getRValueReferenceType(getCanonicalType(PointeeType));
3382
3383 // Get the new insert position for the node we care about.
3384 RValueReferenceType *NewIP =
3385 RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos);
3386 assert(!NewIP && "Shouldn't be in the map!")((void)0); (void)NewIP;
3387 }
3388
3389 auto *New = new (*this, TypeAlignment) RValueReferenceType(T, Canonical);
3390 Types.push_back(New);
3391 RValueReferenceTypes.InsertNode(New, InsertPos);
3392 return QualType(New, 0);
3393}
3394
3395/// getMemberPointerType - Return the uniqued reference to the type for a
3396/// member pointer to the specified type, in the specified class.
3397QualType ASTContext::getMemberPointerType(QualType T, const Type *Cls) const {
3398 // Unique pointers, to guarantee there is only one pointer of a particular
3399 // structure.
3400 llvm::FoldingSetNodeID ID;
3401 MemberPointerType::Profile(ID, T, Cls);
3402
3403 void *InsertPos = nullptr;
3404 if (MemberPointerType *PT =
3405 MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
3406 return QualType(PT, 0);
3407
3408 // If the pointee or class type isn't canonical, this won't be a canonical
3409 // type either, so fill in the canonical type field.
3410 QualType Canonical;
3411 if (!T.isCanonical() || !Cls->isCanonicalUnqualified()) {
3412 Canonical = getMemberPointerType(getCanonicalType(T),getCanonicalType(Cls));
3413
3414 // Get the new insert position for the node we care about.
3415 MemberPointerType *NewIP =
3416 MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
3417 assert(!NewIP && "Shouldn't be in the map!")((void)0); (void)NewIP;
3418 }
3419 auto *New = new (*this, TypeAlignment) MemberPointerType(T, Cls, Canonical);
3420 Types.push_back(New);
3421 MemberPointerTypes.InsertNode(New, InsertPos);
3422 return QualType(New, 0);
3423}
3424
3425/// getConstantArrayType - Return the unique reference to the type for an
3426/// array of the specified element type.
3427QualType ASTContext::getConstantArrayType(QualType EltTy,
3428 const llvm::APInt &ArySizeIn,
3429 const Expr *SizeExpr,
3430 ArrayType::ArraySizeModifier ASM,
3431 unsigned IndexTypeQuals) const {
3432 assert((EltTy->isDependentType() ||((void)0)
3433 EltTy->isIncompleteType() || EltTy->isConstantSizeType()) &&((void)0)
3434 "Constant array of VLAs is illegal!")((void)0);
3435
3436 // We only need the size as part of the type if it's instantiation-dependent.
3437 if (SizeExpr && !SizeExpr->isInstantiationDependent())
3438 SizeExpr = nullptr;
3439
3440 // Convert the array size into a canonical width matching the pointer size for
3441 // the target.
3442 llvm::APInt ArySize(ArySizeIn);
3443 ArySize = ArySize.zextOrTrunc(Target->getMaxPointerWidth());
3444
3445 llvm::FoldingSetNodeID ID;
3446 ConstantArrayType::Profile(ID, *this, EltTy, ArySize, SizeExpr, ASM,
3447 IndexTypeQuals);
3448
3449 void *InsertPos = nullptr;
3450 if (ConstantArrayType *ATP =
3451 ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos))
3452 return QualType(ATP, 0);
3453
3454 // If the element type isn't canonical or has qualifiers, or the array bound
3455 // is instantiation-dependent, this won't be a canonical type either, so fill
3456 // in the canonical type field.
3457 QualType Canon;
3458 if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers() || SizeExpr) {
3459 SplitQualType canonSplit = getCanonicalType(EltTy).split();
3460 Canon = getConstantArrayType(QualType(canonSplit.Ty, 0), ArySize, nullptr,
3461 ASM, IndexTypeQuals);
3462 Canon = getQualifiedType(Canon, canonSplit.Quals);
3463
3464 // Get the new insert position for the node we care about.
3465 ConstantArrayType *NewIP =
3466 ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos);
3467 assert(!NewIP && "Shouldn't be in the map!")((void)0); (void)NewIP;
3468 }
3469
3470 void *Mem = Allocate(
3471 ConstantArrayType::totalSizeToAlloc<const Expr *>(SizeExpr ? 1 : 0),
3472 TypeAlignment);
3473 auto *New = new (Mem)
3474 ConstantArrayType(EltTy, Canon, ArySize, SizeExpr, ASM, IndexTypeQuals);
3475 ConstantArrayTypes.InsertNode(New, InsertPos);
3476 Types.push_back(New);
3477 return QualType(New, 0);
3478}
3479
3480/// getVariableArrayDecayedType - Turns the given type, which may be
3481/// variably-modified, into the corresponding type with all the known
3482/// sizes replaced with [*].
3483QualType ASTContext::getVariableArrayDecayedType(QualType type) const {
3484 // Vastly most common case.
3485 if (!type->isVariablyModifiedType()) return type;
3486
3487 QualType result;
3488
3489 SplitQualType split = type.getSplitDesugaredType();
3490 const Type *ty = split.Ty;
3491 switch (ty->getTypeClass()) {
3492#define TYPE(Class, Base)
3493#define ABSTRACT_TYPE(Class, Base)
3494#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3495#include "clang/AST/TypeNodes.inc"
3496 llvm_unreachable("didn't desugar past all non-canonical types?")__builtin_unreachable();
3497
3498 // These types should never be variably-modified.
3499 case Type::Builtin:
3500 case Type::Complex:
3501 case Type::Vector:
3502 case Type::DependentVector:
3503 case Type::ExtVector:
3504 case Type::DependentSizedExtVector:
3505 case Type::ConstantMatrix:
3506 case Type::DependentSizedMatrix:
3507 case Type::DependentAddressSpace:
3508 case Type::ObjCObject:
3509 case Type::ObjCInterface:
3510 case Type::ObjCObjectPointer:
3511 case Type::Record:
3512 case Type::Enum:
3513 case Type::UnresolvedUsing:
3514 case Type::TypeOfExpr:
3515 case Type::TypeOf:
3516 case Type::Decltype:
3517 case Type::UnaryTransform:
3518 case Type::DependentName:
3519 case Type::InjectedClassName:
3520 case Type::TemplateSpecialization:
3521 case Type::DependentTemplateSpecialization:
3522 case Type::TemplateTypeParm:
3523 case Type::SubstTemplateTypeParmPack:
3524 case Type::Auto:
3525 case Type::DeducedTemplateSpecialization:
3526 case Type::PackExpansion:
3527 case Type::ExtInt:
3528 case Type::DependentExtInt:
3529 llvm_unreachable("type should never be variably-modified")__builtin_unreachable();
3530
3531 // These types can be variably-modified but should never need to
3532 // further decay.
3533 case Type::FunctionNoProto:
3534 case Type::FunctionProto:
3535 case Type::BlockPointer:
3536 case Type::MemberPointer:
3537 case Type::Pipe:
3538 return type;
3539
3540 // These types can be variably-modified. All these modifications
3541 // preserve structure except as noted by comments.
3542 // TODO: if we ever care about optimizing VLAs, there are no-op
3543 // optimizations available here.
3544 case Type::Pointer:
3545 result = getPointerType(getVariableArrayDecayedType(
3546 cast<PointerType>(ty)->getPointeeType()));
3547 break;
3548
3549 case Type::LValueReference: {
3550 const auto *lv = cast<LValueReferenceType>(ty);
3551 result = getLValueReferenceType(
3552 getVariableArrayDecayedType(lv->getPointeeType()),
3553 lv->isSpelledAsLValue());
3554 break;
3555 }
3556
3557 case Type::RValueReference: {
3558 const auto *lv = cast<RValueReferenceType>(ty);
3559 result = getRValueReferenceType(
3560 getVariableArrayDecayedType(lv->getPointeeType()));
3561 break;
3562 }
3563
3564 case Type::Atomic: {
3565 const auto *at = cast<AtomicType>(ty);
3566 result = getAtomicType(getVariableArrayDecayedType(at->getValueType()));
3567 break;
3568 }
3569
3570 case Type::ConstantArray: {
3571 const auto *cat = cast<ConstantArrayType>(ty);
3572 result = getConstantArrayType(
3573 getVariableArrayDecayedType(cat->getElementType()),
3574 cat->getSize(),
3575 cat->getSizeExpr(),
3576 cat->getSizeModifier(),
3577 cat->getIndexTypeCVRQualifiers());
3578 break;
3579 }
3580
3581 case Type::DependentSizedArray: {
3582 const auto *dat = cast<DependentSizedArrayType>(ty);
3583 result = getDependentSizedArrayType(
3584 getVariableArrayDecayedType(dat->getElementType()),
3585 dat->getSizeExpr(),
3586 dat->getSizeModifier(),
3587 dat->getIndexTypeCVRQualifiers(),
3588 dat->getBracketsRange());
3589 break;
3590 }
3591
3592 // Turn incomplete types into [*] types.
3593 case Type::IncompleteArray: {
3594 const auto *iat = cast<IncompleteArrayType>(ty);
3595 result = getVariableArrayType(
3596 getVariableArrayDecayedType(iat->getElementType()),
3597 /*size*/ nullptr,
3598 ArrayType::Normal,
3599 iat->getIndexTypeCVRQualifiers(),
3600 SourceRange());
3601 break;
3602 }
3603
3604 // Turn VLA types into [*] types.
3605 case Type::VariableArray: {
3606 const auto *vat = cast<VariableArrayType>(ty);
3607 result = getVariableArrayType(
3608 getVariableArrayDecayedType(vat->getElementType()),
3609 /*size*/ nullptr,
3610 ArrayType::Star,
3611 vat->getIndexTypeCVRQualifiers(),
3612 vat->getBracketsRange());
3613 break;
3614 }
3615 }
3616
3617 // Apply the top-level qualifiers from the original.
3618 return getQualifiedType(result, split.Quals);
3619}
3620
3621/// getVariableArrayType - Returns a non-unique reference to the type for a
3622/// variable array of the specified element type.
3623QualType ASTContext::getVariableArrayType(QualType EltTy,
3624 Expr *NumElts,
3625 ArrayType::ArraySizeModifier ASM,
3626 unsigned IndexTypeQuals,
3627 SourceRange Brackets) const {
3628 // Since we don't unique expressions, it isn't possible to unique VLA's
3629 // that have an expression provided for their size.
3630 QualType Canon;
3631
3632 // Be sure to pull qualifiers off the element type.
3633 if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers()) {
3634 SplitQualType canonSplit = getCanonicalType(EltTy).split();
3635 Canon = getVariableArrayType(QualType(canonSplit.Ty, 0), NumElts, ASM,
3636 IndexTypeQuals, Brackets);
3637 Canon = getQualifiedType(Canon, canonSplit.Quals);
3638 }
3639
3640 auto *New = new (*this, TypeAlignment)
3641 VariableArrayType(EltTy, Canon, NumElts, ASM, IndexTypeQuals, Brackets);
3642
3643 VariableArrayTypes.push_back(New);
3644 Types.push_back(New);
3645 return QualType(New, 0);
3646}
3647
3648/// getDependentSizedArrayType - Returns a non-unique reference to
3649/// the type for a dependently-sized array of the specified element
3650/// type.
3651QualType ASTContext::getDependentSizedArrayType(QualType elementType,
3652 Expr *numElements,
3653 ArrayType::ArraySizeModifier ASM,
3654 unsigned elementTypeQuals,
3655 SourceRange brackets) const {
3656 assert((!numElements || numElements->isTypeDependent() ||((void)0)
3657 numElements->isValueDependent()) &&((void)0)
3658 "Size must be type- or value-dependent!")((void)0);
3659
3660 // Dependently-sized array types that do not have a specified number
3661 // of elements will have their sizes deduced from a dependent
3662 // initializer. We do no canonicalization here at all, which is okay
3663 // because they can't be used in most locations.
3664 if (!numElements) {
3665 auto *newType
3666 = new (*this, TypeAlignment)
3667 DependentSizedArrayType(*this, elementType, QualType(),
3668 numElements, ASM, elementTypeQuals,
3669 brackets);
3670 Types.push_back(newType);
3671 return QualType(newType, 0);
3672 }
3673
3674 // Otherwise, we actually build a new type every time, but we
3675 // also build a canonical type.
3676
3677 SplitQualType canonElementType = getCanonicalType(elementType).split();
3678
3679 void *insertPos = nullptr;
3680 llvm::FoldingSetNodeID ID;
3681 DependentSizedArrayType::Profile(ID, *this,
3682 QualType(canonElementType.Ty, 0),
3683 ASM, elementTypeQuals, numElements);
3684
3685 // Look for an existing type with these properties.
3686 DependentSizedArrayType *canonTy =
3687 DependentSizedArrayTypes.FindNodeOrInsertPos(ID, insertPos);
3688
3689 // If we don't have one, build one.
3690 if (!canonTy) {
3691 canonTy = new (*this, TypeAlignment)
3692 DependentSizedArrayType(*this, QualType(canonElementType.Ty, 0),
3693 QualType(), numElements, ASM, elementTypeQuals,
3694 brackets);
3695 DependentSizedArrayTypes.InsertNode(canonTy, insertPos);
3696 Types.push_back(canonTy);
3697 }
3698
3699 // Apply qualifiers from the element type to the array.
3700 QualType canon = getQualifiedType(QualType(canonTy,0),
3701 canonElementType.Quals);
3702
3703 // If we didn't need extra canonicalization for the element type or the size
3704 // expression, then just use that as our result.
3705 if (QualType(canonElementType.Ty, 0) == elementType &&
3706 canonTy->getSizeExpr() == numElements)
3707 return canon;
3708
3709 // Otherwise, we need to build a type which follows the spelling
3710 // of the element type.
3711 auto *sugaredType
3712 = new (*this, TypeAlignment)
3713 DependentSizedArrayType(*this, elementType, canon, numElements,
3714 ASM, elementTypeQuals, brackets);
3715 Types.push_back(sugaredType);
3716 return QualType(sugaredType, 0);
3717}
3718
3719QualType ASTContext::getIncompleteArrayType(QualType elementType,
3720 ArrayType::ArraySizeModifier ASM,
3721 unsigned elementTypeQuals) const {
3722 llvm::FoldingSetNodeID ID;
3723 IncompleteArrayType::Profile(ID, elementType, ASM, elementTypeQuals);
3724
3725 void *insertPos = nullptr;
3726 if (IncompleteArrayType *iat =
3727 IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos))
3728 return QualType(iat, 0);
3729
3730 // If the element type isn't canonical, this won't be a canonical type
3731 // either, so fill in the canonical type field. We also have to pull
3732 // qualifiers off the element type.
3733 QualType canon;
3734
3735 if (!elementType.isCanonical() || elementType.hasLocalQualifiers()) {
3736 SplitQualType canonSplit = getCanonicalType(elementType).split();
3737 canon = getIncompleteArrayType(QualType(canonSplit.Ty, 0),
3738 ASM, elementTypeQuals);
3739 canon = getQualifiedType(canon, canonSplit.Quals);
3740
3741 // Get the new insert position for the node we care about.
3742 IncompleteArrayType *existing =
3743 IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos);
3744 assert(!existing && "Shouldn't be in the map!")((void)0); (void) existing;
3745 }
3746
3747 auto *newType = new (*this, TypeAlignment)
3748 IncompleteArrayType(elementType, canon, ASM, elementTypeQuals);
3749
3750 IncompleteArrayTypes.InsertNode(newType, insertPos);
3751 Types.push_back(newType);
3752 return QualType(newType, 0);
3753}
3754
3755ASTContext::BuiltinVectorTypeInfo
3756ASTContext::getBuiltinVectorTypeInfo(const BuiltinType *Ty) const {
3757#define SVE_INT_ELTTY(BITS, ELTS, SIGNED, NUMVECTORS){getIntTypeForBitwidth(BITS, SIGNED), llvm::ElementCount::getScalable
(ELTS), NUMVECTORS};
\
3758 {getIntTypeForBitwidth(BITS, SIGNED), llvm::ElementCount::getScalable(ELTS), \
3759 NUMVECTORS};
3760
3761#define SVE_ELTTY(ELTTY, ELTS, NUMVECTORS){ELTTY, llvm::ElementCount::getScalable(ELTS), NUMVECTORS}; \
3762 {ELTTY, llvm::ElementCount::getScalable(ELTS), NUMVECTORS};
3763
3764 switch (Ty->getKind()) {
3765 default:
3766 llvm_unreachable("Unsupported builtin vector type")__builtin_unreachable();
3767 case BuiltinType::SveInt8:
3768 return SVE_INT_ELTTY(8, 16, true, 1){getIntTypeForBitwidth(8, true), llvm::ElementCount::getScalable
(16), 1};
;
3769 case BuiltinType::SveUint8:
3770 return SVE_INT_ELTTY(8, 16, false, 1){getIntTypeForBitwidth(8, false), llvm::ElementCount::getScalable
(16), 1};
;
3771 case BuiltinType::SveInt8x2:
3772 return SVE_INT_ELTTY(8, 16, true, 2){getIntTypeForBitwidth(8, true), llvm::ElementCount::getScalable
(16), 2};
;
3773 case BuiltinType::SveUint8x2:
3774 return SVE_INT_ELTTY(8, 16, false, 2){getIntTypeForBitwidth(8, false), llvm::ElementCount::getScalable
(16), 2};
;
3775 case BuiltinType::SveInt8x3:
3776 return SVE_INT_ELTTY(8, 16, true, 3){getIntTypeForBitwidth(8, true), llvm::ElementCount::getScalable
(16), 3};
;
3777 case BuiltinType::SveUint8x3:
3778 return SVE_INT_ELTTY(8, 16, false, 3){getIntTypeForBitwidth(8, false), llvm::ElementCount::getScalable
(16), 3};
;
3779 case BuiltinType::SveInt8x4:
3780 return SVE_INT_ELTTY(8, 16, true, 4){getIntTypeForBitwidth(8, true), llvm::ElementCount::getScalable
(16), 4};
;
3781 case BuiltinType::SveUint8x4:
3782 return SVE_INT_ELTTY(8, 16, false, 4){getIntTypeForBitwidth(8, false), llvm::ElementCount::getScalable
(16), 4};
;
3783 case BuiltinType::SveInt16:
3784 return SVE_INT_ELTTY(16, 8, true, 1){getIntTypeForBitwidth(16, true), llvm::ElementCount::getScalable
(8), 1};
;
3785 case BuiltinType::SveUint16:
3786 return SVE_INT_ELTTY(16, 8, false, 1){getIntTypeForBitwidth(16, false), llvm::ElementCount::getScalable
(8), 1};
;
3787 case BuiltinType::SveInt16x2:
3788 return SVE_INT_ELTTY(16, 8, true, 2){getIntTypeForBitwidth(16, true), llvm::ElementCount::getScalable
(8), 2};
;
3789 case BuiltinType::SveUint16x2:
3790 return SVE_INT_ELTTY(16, 8, false, 2){getIntTypeForBitwidth(16, false), llvm::ElementCount::getScalable
(8), 2};
;
3791 case BuiltinType::SveInt16x3:
3792 return SVE_INT_ELTTY(16, 8, true, 3){getIntTypeForBitwidth(16, true), llvm::ElementCount::getScalable
(8), 3};
;
3793 case BuiltinType::SveUint16x3:
3794 return SVE_INT_ELTTY(16, 8, false, 3){getIntTypeForBitwidth(16, false), llvm::ElementCount::getScalable
(8), 3};
;
3795 case BuiltinType::SveInt16x4:
3796 return SVE_INT_ELTTY(16, 8, true, 4){getIntTypeForBitwidth(16, true), llvm::ElementCount::getScalable
(8), 4};
;
3797 case BuiltinType::SveUint16x4:
3798 return SVE_INT_ELTTY(16, 8, false, 4){getIntTypeForBitwidth(16, false), llvm::ElementCount::getScalable
(8), 4};
;
3799 case BuiltinType::SveInt32:
3800 return SVE_INT_ELTTY(32, 4, true, 1){getIntTypeForBitwidth(32, true), llvm::ElementCount::getScalable
(4), 1};
;
3801 case BuiltinType::SveUint32:
3802 return SVE_INT_ELTTY(32, 4, false, 1){getIntTypeForBitwidth(32, false), llvm::ElementCount::getScalable
(4), 1};
;
3803 case BuiltinType::SveInt32x2:
3804 return SVE_INT_ELTTY(32, 4, true, 2){getIntTypeForBitwidth(32, true), llvm::ElementCount::getScalable
(4), 2};
;
3805 case BuiltinType::SveUint32x2:
3806 return SVE_INT_ELTTY(32, 4, false, 2){getIntTypeForBitwidth(32, false), llvm::ElementCount::getScalable
(4), 2};
;
3807 case BuiltinType::SveInt32x3:
3808 return SVE_INT_ELTTY(32, 4, true, 3){getIntTypeForBitwidth(32, true), llvm::ElementCount::getScalable
(4), 3};
;
3809 case BuiltinType::SveUint32x3:
3810 return SVE_INT_ELTTY(32, 4, false, 3){getIntTypeForBitwidth(32, false), llvm::ElementCount::getScalable
(4), 3};
;
3811 case BuiltinType::SveInt32x4:
3812 return SVE_INT_ELTTY(32, 4, true, 4){getIntTypeForBitwidth(32, true), llvm::ElementCount::getScalable
(4), 4};
;
3813 case BuiltinType::SveUint32x4:
3814 return SVE_INT_ELTTY(32, 4, false, 4){getIntTypeForBitwidth(32, false), llvm::ElementCount::getScalable
(4), 4};
;
3815 case BuiltinType::SveInt64:
3816 return SVE_INT_ELTTY(64, 2, true, 1){getIntTypeForBitwidth(64, true), llvm::ElementCount::getScalable
(2), 1};
;
3817 case BuiltinType::SveUint64:
3818 return SVE_INT_ELTTY(64, 2, false, 1){getIntTypeForBitwidth(64, false), llvm::ElementCount::getScalable
(2), 1};
;
3819 case BuiltinType::SveInt64x2:
3820 return SVE_INT_ELTTY(64, 2, true, 2){getIntTypeForBitwidth(64, true), llvm::ElementCount::getScalable
(2), 2};
;
3821 case BuiltinType::SveUint64x2:
3822 return SVE_INT_ELTTY(64, 2, false, 2){getIntTypeForBitwidth(64, false), llvm::ElementCount::getScalable
(2), 2};
;
3823 case BuiltinType::SveInt64x3:
3824 return SVE_INT_ELTTY(64, 2, true, 3){getIntTypeForBitwidth(64, true), llvm::ElementCount::getScalable
(2), 3};
;
3825 case BuiltinType::SveUint64x3:
3826 return SVE_INT_ELTTY(64, 2, false, 3){getIntTypeForBitwidth(64, false), llvm::ElementCount::getScalable
(2), 3};
;
3827 case BuiltinType::SveInt64x4:
3828 return SVE_INT_ELTTY(64, 2, true, 4){getIntTypeForBitwidth(64, true), llvm::ElementCount::getScalable
(2), 4};
;
3829 case BuiltinType::SveUint64x4:
3830 return SVE_INT_ELTTY(64, 2, false, 4){getIntTypeForBitwidth(64, false), llvm::ElementCount::getScalable
(2), 4};
;
3831 case BuiltinType::SveBool:
3832 return SVE_ELTTY(BoolTy, 16, 1){BoolTy, llvm::ElementCount::getScalable(16), 1};;
3833 case BuiltinType::SveFloat16:
3834 return SVE_ELTTY(HalfTy, 8, 1){HalfTy, llvm::ElementCount::getScalable(8), 1};;
3835 case BuiltinType::SveFloat16x2:
3836 return SVE_ELTTY(HalfTy, 8, 2){HalfTy, llvm::ElementCount::getScalable(8), 2};;
3837 case BuiltinType::SveFloat16x3:
3838 return SVE_ELTTY(HalfTy, 8, 3){HalfTy, llvm::ElementCount::getScalable(8), 3};;
3839 case BuiltinType::SveFloat16x4:
3840 return SVE_ELTTY(HalfTy, 8, 4){HalfTy, llvm::ElementCount::getScalable(8), 4};;
3841 case BuiltinType::SveFloat32:
3842 return SVE_ELTTY(FloatTy, 4, 1){FloatTy, llvm::ElementCount::getScalable(4), 1};;
3843 case BuiltinType::SveFloat32x2:
3844 return SVE_ELTTY(FloatTy, 4, 2){FloatTy, llvm::ElementCount::getScalable(4), 2};;
3845 case BuiltinType::SveFloat32x3:
3846 return SVE_ELTTY(FloatTy, 4, 3){FloatTy, llvm::ElementCount::getScalable(4), 3};;
3847 case BuiltinType::SveFloat32x4:
3848 return SVE_ELTTY(FloatTy, 4, 4){FloatTy, llvm::ElementCount::getScalable(4), 4};;
3849 case BuiltinType::SveFloat64:
3850 return SVE_ELTTY(DoubleTy, 2, 1){DoubleTy, llvm::ElementCount::getScalable(2), 1};;
3851 case BuiltinType::SveFloat64x2:
3852 return SVE_ELTTY(DoubleTy, 2, 2){DoubleTy, llvm::ElementCount::getScalable(2), 2};;
3853 case BuiltinType::SveFloat64x3:
3854 return SVE_ELTTY(DoubleTy, 2, 3){DoubleTy, llvm::ElementCount::getScalable(2), 3};;
3855 case BuiltinType::SveFloat64x4:
3856 return SVE_ELTTY(DoubleTy, 2, 4){DoubleTy, llvm::ElementCount::getScalable(2), 4};;
3857 case BuiltinType::SveBFloat16:
3858 return SVE_ELTTY(BFloat16Ty, 8, 1){BFloat16Ty, llvm::ElementCount::getScalable(8), 1};;
3859 case BuiltinType::SveBFloat16x2:
3860 return SVE_ELTTY(BFloat16Ty, 8, 2){BFloat16Ty, llvm::ElementCount::getScalable(8), 2};;
3861 case BuiltinType::SveBFloat16x3:
3862 return SVE_ELTTY(BFloat16Ty, 8, 3){BFloat16Ty, llvm::ElementCount::getScalable(8), 3};;
3863 case BuiltinType::SveBFloat16x4:
3864 return SVE_ELTTY(BFloat16Ty, 8, 4){BFloat16Ty, llvm::ElementCount::getScalable(8), 4};;
3865#define RVV_VECTOR_TYPE_INT(Name, Id, SingletonId, NumEls, ElBits, NF, \
3866 IsSigned) \
3867 case BuiltinType::Id: \
3868 return {getIntTypeForBitwidth(ElBits, IsSigned), \
3869 llvm::ElementCount::getScalable(NumEls), NF};
3870#define RVV_VECTOR_TYPE_FLOAT(Name, Id, SingletonId, NumEls, ElBits, NF) \
3871 case BuiltinType::Id: \
3872 return {ElBits == 16 ? Float16Ty : (ElBits == 32 ? FloatTy : DoubleTy), \
3873 llvm::ElementCount::getScalable(NumEls), NF};
3874#define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \
3875 case BuiltinType::Id: \
3876 return {BoolTy, llvm::ElementCount::getScalable(NumEls), 1};
3877#include "clang/Basic/RISCVVTypes.def"
3878 }
3879}
3880
3881/// getScalableVectorType - Return the unique reference to a scalable vector
3882/// type of the specified element type and size. VectorType must be a built-in
3883/// type.
3884QualType ASTContext::getScalableVectorType(QualType EltTy,
3885 unsigned NumElts) const {
3886 if (Target->hasAArch64SVETypes()) {
3887 uint64_t EltTySize = getTypeSize(EltTy);
3888#define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId, NumEls, ElBits, \
3889 IsSigned, IsFP, IsBF) \
3890 if (!EltTy->isBooleanType() && \
3891 ((EltTy->hasIntegerRepresentation() && \
3892 EltTy->hasSignedIntegerRepresentation() == IsSigned) || \
3893 (EltTy->hasFloatingRepresentation() && !EltTy->isBFloat16Type() && \
3894 IsFP && !IsBF) || \
3895 (EltTy->hasFloatingRepresentation() && EltTy->isBFloat16Type() && \
3896 IsBF && !IsFP)) && \
3897 EltTySize == ElBits && NumElts == NumEls) { \
3898 return SingletonId; \
3899 }
3900#define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId, NumEls) \
3901 if (EltTy->isBooleanType() && NumElts == NumEls) \
3902 return SingletonId;
3903#include "clang/Basic/AArch64SVEACLETypes.def"
3904 } else if (Target->hasRISCVVTypes()) {
3905 uint64_t EltTySize = getTypeSize(EltTy);
3906#define RVV_VECTOR_TYPE(Name, Id, SingletonId, NumEls, ElBits, NF, IsSigned, \
3907 IsFP) \
3908 if (!EltTy->isBooleanType() && \
3909 ((EltTy->hasIntegerRepresentation() && \
3910 EltTy->hasSignedIntegerRepresentation() == IsSigned) || \
3911 (EltTy->hasFloatingRepresentation() && IsFP)) && \
3912 EltTySize == ElBits && NumElts == NumEls) \
3913 return SingletonId;
3914#define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \
3915 if (EltTy->isBooleanType() && NumElts == NumEls) \
3916 return SingletonId;
3917#include "clang/Basic/RISCVVTypes.def"
3918 }
3919 return QualType();
3920}
3921
3922/// getVectorType - Return the unique reference to a vector type of
3923/// the specified element type and size. VectorType must be a built-in type.
3924QualType ASTContext::getVectorType(QualType vecType, unsigned NumElts,
3925 VectorType::VectorKind VecKind) const {
3926 assert(vecType->isBuiltinType())((void)0);
3927
3928 // Check if we've already instantiated a vector of this type.
3929 llvm::FoldingSetNodeID ID;
3930 VectorType::Profile(ID, vecType, NumElts, Type::Vector, VecKind);
3931
3932 void *InsertPos = nullptr;
3933 if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos))
3934 return QualType(VTP, 0);
3935
3936 // If the element type isn't canonical, this won't be a canonical type either,
3937 // so fill in the canonical type field.
3938 QualType Canonical;
3939 if (!vecType.isCanonical()) {
3940 Canonical = getVectorType(getCanonicalType(vecType), NumElts, VecKind);
3941
3942 // Get the new insert position for the node we care about.
3943 VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos);
3944 assert(!NewIP && "Shouldn't be in the map!")((void)0); (void)NewIP;
3945 }
3946 auto *New = new (*this, TypeAlignment)
3947 VectorType(vecType, NumElts, Canonical, VecKind);
3948 VectorTypes.InsertNode(New, InsertPos);
3949 Types.push_back(New);
3950 return QualType(New, 0);
3951}
3952
3953QualType
3954ASTContext::getDependentVectorType(QualType VecType, Expr *SizeExpr,
3955 SourceLocation AttrLoc,
3956 VectorType::VectorKind VecKind) const {
3957 llvm::FoldingSetNodeID ID;
3958 DependentVectorType::Profile(ID, *this, getCanonicalType(VecType), SizeExpr,
3959 VecKind);
3960 void *InsertPos = nullptr;
3961 DependentVectorType *Canon =
3962 DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
3963 DependentVectorType *New;
3964
3965 if (Canon) {
3966 New = new (*this, TypeAlignment) DependentVectorType(
3967 *this, VecType, QualType(Canon, 0), SizeExpr, AttrLoc, VecKind);
3968 } else {
3969 QualType CanonVecTy = getCanonicalType(VecType);
3970 if (CanonVecTy == VecType) {
3971 New = new (*this, TypeAlignment) DependentVectorType(
3972 *this, VecType, QualType(), SizeExpr, AttrLoc, VecKind);
3973
3974 DependentVectorType *CanonCheck =
3975 DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
3976 assert(!CanonCheck &&((void)0)
3977 "Dependent-sized vector_size canonical type broken")((void)0);
3978 (void)CanonCheck;
3979 DependentVectorTypes.InsertNode(New, InsertPos);
3980 } else {
3981 QualType CanonTy = getDependentVectorType(CanonVecTy, SizeExpr,
3982 SourceLocation(), VecKind);
3983 New = new (*this, TypeAlignment) DependentVectorType(
3984 *this, VecType, CanonTy, SizeExpr, AttrLoc, VecKind);
3985 }
3986 }
3987
3988 Types.push_back(New);
3989 return QualType(New, 0);
3990}
3991
3992/// getExtVectorType - Return the unique reference to an extended vector type of
3993/// the specified element type and size. VectorType must be a built-in type.
3994QualType
3995ASTContext::getExtVectorType(QualType vecType, unsigned NumElts) const {
3996 assert(vecType->isBuiltinType() || vecType->isDependentType())((void)0);
3997
3998 // Check if we've already instantiated a vector of this type.
3999 llvm::FoldingSetNodeID ID;
4000 VectorType::Profile(ID, vecType, NumElts, Type::ExtVector,
4001 VectorType::GenericVector);
4002 void *InsertPos = nullptr;
4003 if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos))
4004 return QualType(VTP, 0);
4005
4006 // If the element type isn't canonical, this won't be a canonical type either,
4007 // so fill in the canonical type field.
4008 QualType Canonical;
4009 if (!vecType.isCanonical()) {
4010 Canonical = getExtVectorType(getCanonicalType(vecType), NumElts);
4011
4012 // Get the new insert position for the node we care about.
4013 VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4014 assert(!NewIP && "Shouldn't be in the map!")((void)0); (void)NewIP;
4015 }
4016 auto *New = new (*this, TypeAlignment)
4017 ExtVectorType(vecType, NumElts, Canonical);
4018 VectorTypes.InsertNode(New, InsertPos);
4019 Types.push_back(New);
4020 return QualType(New, 0);
4021}
4022
4023QualType
4024ASTContext::getDependentSizedExtVectorType(QualType vecType,
4025 Expr *SizeExpr,
4026 SourceLocation AttrLoc) const {
4027 llvm::FoldingSetNodeID ID;
4028 DependentSizedExtVectorType::Profile(ID, *this, getCanonicalType(vecType),
4029 SizeExpr);
4030
4031 void *InsertPos = nullptr;
4032 DependentSizedExtVectorType *Canon
4033 = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4034 DependentSizedExtVectorType *New;
4035 if (Canon) {
4036 // We already have a canonical version of this array type; use it as
4037 // the canonical type for a newly-built type.
4038 New = new (*this, TypeAlignment)
4039 DependentSizedExtVectorType(*this, vecType, QualType(Canon, 0),
4040 SizeExpr, AttrLoc);
4041 } else {
4042 QualType CanonVecTy = getCanonicalType(vecType);
4043 if (CanonVecTy == vecType) {
4044 New = new (*this, TypeAlignment)
4045 DependentSizedExtVectorType(*this, vecType, QualType(), SizeExpr,
4046 AttrLoc);
4047
4048 DependentSizedExtVectorType *CanonCheck
4049 = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4050 assert(!CanonCheck && "Dependent-sized ext_vector canonical type broken")((void)0);
4051 (void)CanonCheck;
4052 DependentSizedExtVectorTypes.InsertNode(New, InsertPos);
4053 } else {
4054 QualType CanonExtTy = getDependentSizedExtVectorType(CanonVecTy, SizeExpr,
4055 SourceLocation());
4056 New = new (*this, TypeAlignment) DependentSizedExtVectorType(
4057 *this, vecType, CanonExtTy, SizeExpr, AttrLoc);
4058 }
4059 }
4060
4061 Types.push_back(New);
4062 return QualType(New, 0);
4063}
4064
4065QualType ASTContext::getConstantMatrixType(QualType ElementTy, unsigned NumRows,
4066 unsigned NumColumns) const {
4067 llvm::FoldingSetNodeID ID;
4068 ConstantMatrixType::Profile(ID, ElementTy, NumRows, NumColumns,
4069 Type::ConstantMatrix);
4070
4071 assert(MatrixType::isValidElementType(ElementTy) &&((void)0)
4072 "need a valid element type")((void)0);
4073 assert(ConstantMatrixType::isDimensionValid(NumRows) &&((void)0)
4074 ConstantMatrixType::isDimensionValid(NumColumns) &&((void)0)
4075 "need valid matrix dimensions")((void)0);
4076 void *InsertPos = nullptr;
4077 if (ConstantMatrixType *MTP = MatrixTypes.FindNodeOrInsertPos(ID, InsertPos))
4078 return QualType(MTP, 0);
4079
4080 QualType Canonical;
4081 if (!ElementTy.isCanonical()) {
4082 Canonical =
4083 getConstantMatrixType(getCanonicalType(ElementTy), NumRows, NumColumns);
4084
4085 ConstantMatrixType *NewIP = MatrixTypes.FindNodeOrInsertPos(ID, InsertPos);
4086 assert(!NewIP && "Matrix type shouldn't already exist in the map")((void)0);
4087 (void)NewIP;
4088 }
4089
4090 auto *New = new (*this, TypeAlignment)
4091 ConstantMatrixType(ElementTy, NumRows, NumColumns, Canonical);
4092 MatrixTypes.InsertNode(New, InsertPos);
4093 Types.push_back(New);
4094 return QualType(New, 0);
4095}
4096
4097QualType ASTContext::getDependentSizedMatrixType(QualType ElementTy,
4098 Expr *RowExpr,
4099 Expr *ColumnExpr,
4100 SourceLocation AttrLoc) const {
4101 QualType CanonElementTy = getCanonicalType(ElementTy);
4102 llvm::FoldingSetNodeID ID;
4103 DependentSizedMatrixType::Profile(ID, *this, CanonElementTy, RowExpr,
4104 ColumnExpr);
4105
4106 void *InsertPos = nullptr;
4107 DependentSizedMatrixType *Canon =
4108 DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos);
4109
4110 if (!Canon) {
4111 Canon = new (*this, TypeAlignment) DependentSizedMatrixType(
4112 *this, CanonElementTy, QualType(), RowExpr, ColumnExpr, AttrLoc);
4113#ifndef NDEBUG1
4114 DependentSizedMatrixType *CanonCheck =
4115 DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos);
4116 assert(!CanonCheck && "Dependent-sized matrix canonical type broken")((void)0);
4117#endif
4118 DependentSizedMatrixTypes.InsertNode(Canon, InsertPos);
4119 Types.push_back(Canon);
4120 }
4121
4122 // Already have a canonical version of the matrix type
4123 //
4124 // If it exactly matches the requested type, use it directly.
4125 if (Canon->getElementType() == ElementTy && Canon->getRowExpr() == RowExpr &&
4126 Canon->getRowExpr() == ColumnExpr)
4127 return QualType(Canon, 0);
4128
4129 // Use Canon as the canonical type for newly-built type.
4130 DependentSizedMatrixType *New = new (*this, TypeAlignment)
4131 DependentSizedMatrixType(*this, ElementTy, QualType(Canon, 0), RowExpr,
4132 ColumnExpr, AttrLoc);
4133 Types.push_back(New);
4134 return QualType(New, 0);
4135}
4136
4137QualType ASTContext::getDependentAddressSpaceType(QualType PointeeType,
4138 Expr *AddrSpaceExpr,
4139 SourceLocation AttrLoc) const {
4140 assert(AddrSpaceExpr->isInstantiationDependent())((void)0);
4141
4142 QualType canonPointeeType = getCanonicalType(PointeeType);
4143
4144 void *insertPos = nullptr;
4145 llvm::FoldingSetNodeID ID;
4146 DependentAddressSpaceType::Profile(ID, *this, canonPointeeType,
4147 AddrSpaceExpr);
4148
4149 DependentAddressSpaceType *canonTy =
4150 DependentAddressSpaceTypes.FindNodeOrInsertPos(ID, insertPos);
4151
4152 if (!canonTy) {
4153 canonTy = new (*this, TypeAlignment)
4154 DependentAddressSpaceType(*this, canonPointeeType,
4155 QualType(), AddrSpaceExpr, AttrLoc);
4156 DependentAddressSpaceTypes.InsertNode(canonTy, insertPos);
4157 Types.push_back(canonTy);
4158 }
4159
4160 if (canonPointeeType == PointeeType &&
4161 canonTy->getAddrSpaceExpr() == AddrSpaceExpr)
4162 return QualType(canonTy, 0);
4163
4164 auto *sugaredType
4165 = new (*this, TypeAlignment)
4166 DependentAddressSpaceType(*this, PointeeType, QualType(canonTy, 0),
4167 AddrSpaceExpr, AttrLoc);
4168 Types.push_back(sugaredType);
4169 return QualType(sugaredType, 0);
4170}
4171
4172/// Determine whether \p T is canonical as the result type of a function.
4173static bool isCanonicalResultType(QualType T) {
4174 return T.isCanonical() &&
4175 (T.getObjCLifetime() == Qualifiers::OCL_None ||
4176 T.getObjCLifetime() == Qualifiers::OCL_ExplicitNone);
4177}
4178
4179/// getFunctionNoProtoType - Return a K&R style C function type like 'int()'.
4180QualType
4181ASTContext::getFunctionNoProtoType(QualType ResultTy,
4182 const FunctionType::ExtInfo &Info) const {
4183 // Unique functions, to guarantee there is only one function of a particular
4184 // structure.
4185 llvm::FoldingSetNodeID ID;
4186 FunctionNoProtoType::Profile(ID, ResultTy, Info);
4187
4188 void *InsertPos = nullptr;
4189 if (FunctionNoProtoType *FT =
4190 FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos))
4191 return QualType(FT, 0);
4192
4193 QualType Canonical;
4194 if (!isCanonicalResultType(ResultTy)) {
4195 Canonical =
4196 getFunctionNoProtoType(getCanonicalFunctionResultType(ResultTy), Info);
4197
4198 // Get the new insert position for the node we care about.
4199 FunctionNoProtoType *NewIP =
4200 FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos);
4201 assert(!NewIP && "Shouldn't be in the map!")((void)0); (void)NewIP;
4202 }
4203
4204 auto *New = new (*this, TypeAlignment)
4205 FunctionNoProtoType(ResultTy, Canonical, Info);
4206 Types.push_back(New);
4207 FunctionNoProtoTypes.InsertNode(New, InsertPos);
4208 return QualType(New, 0);
4209}
4210
4211CanQualType
4212ASTContext::getCanonicalFunctionResultType(QualType ResultType) const {
4213 CanQualType CanResultType = getCanonicalType(ResultType);
4214
4215 // Canonical result types do not have ARC lifetime qualifiers.
4216 if (CanResultType.getQualifiers().hasObjCLifetime()) {
4217 Qualifiers Qs = CanResultType.getQualifiers();
4218 Qs.removeObjCLifetime();
4219 return CanQualType::CreateUnsafe(
4220 getQualifiedType(CanResultType.getUnqualifiedType(), Qs));
4221 }
4222
4223 return CanResultType;
4224}
4225
4226static bool isCanonicalExceptionSpecification(
4227 const FunctionProtoType::ExceptionSpecInfo &ESI, bool NoexceptInType) {
4228 if (ESI.Type == EST_None)
4229 return true;
4230 if (!NoexceptInType)
4231 return false;
4232
4233 // C++17 onwards: exception specification is part of the type, as a simple
4234 // boolean "can this function type throw".
4235 if (ESI.Type == EST_BasicNoexcept)
4236 return true;
4237
4238 // A noexcept(expr) specification is (possibly) canonical if expr is
4239 // value-dependent.
4240 if (ESI.Type == EST_DependentNoexcept)
4241 return true;
4242
4243 // A dynamic exception specification is canonical if it only contains pack
4244 // expansions (so we can't tell whether it's non-throwing) and all its
4245 // contained types are canonical.
4246 if (ESI.Type == EST_Dynamic) {
4247 bool AnyPackExpansions = false;
4248 for (QualType ET : ESI.Exceptions) {
4249 if (!ET.isCanonical())
4250 return false;
4251 if (ET->getAs<PackExpansionType>())
4252 AnyPackExpansions = true;
4253 }
4254 return AnyPackExpansions;
4255 }
4256
4257 return false;
4258}
4259
4260QualType ASTContext::getFunctionTypeInternal(
4261 QualType ResultTy, ArrayRef<QualType> ArgArray,
4262 const FunctionProtoType::ExtProtoInfo &EPI, bool OnlyWantCanonical) const {
4263 size_t NumArgs = ArgArray.size();
4264
4265 // Unique functions, to guarantee there is only one function of a particular
4266 // structure.
4267 llvm::FoldingSetNodeID ID;
4268 FunctionProtoType::Profile(ID, ResultTy, ArgArray.begin(), NumArgs, EPI,
4269 *this, true);
4270
4271 QualType Canonical;
4272 bool Unique = false;
4273
4274 void *InsertPos = nullptr;
4275 if (FunctionProtoType *FPT =
4276 FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos)) {
4277 QualType Existing = QualType(FPT, 0);
4278
4279 // If we find a pre-existing equivalent FunctionProtoType, we can just reuse
4280 // it so long as our exception specification doesn't contain a dependent
4281 // noexcept expression, or we're just looking for a canonical type.
4282 // Otherwise, we're going to need to create a type
4283 // sugar node to hold the concrete expression.
4284 if (OnlyWantCanonical || !isComputedNoexcept(EPI.ExceptionSpec.Type) ||
4285 EPI.ExceptionSpec.NoexceptExpr == FPT->getNoexceptExpr())
4286 return Existing;
4287
4288 // We need a new type sugar node for this one, to hold the new noexcept
4289 // expression. We do no canonicalization here, but that's OK since we don't
4290 // expect to see the same noexcept expression much more than once.
4291 Canonical = getCanonicalType(Existing);
4292 Unique = true;
4293 }
4294
4295 bool NoexceptInType = getLangOpts().CPlusPlus17;
4296 bool IsCanonicalExceptionSpec =
4297 isCanonicalExceptionSpecification(EPI.ExceptionSpec, NoexceptInType);
4298
4299 // Determine whether the type being created is already canonical or not.
4300 bool isCanonical = !Unique && IsCanonicalExceptionSpec &&
4301 isCanonicalResultType(ResultTy) && !EPI.HasTrailingReturn;
4302 for (unsigned i = 0; i != NumArgs && isCanonical; ++i)
4303 if (!ArgArray[i].isCanonicalAsParam())
4304 isCanonical = false;
4305
4306 if (OnlyWantCanonical)
4307 assert(isCanonical &&((void)0)
4308 "given non-canonical parameters constructing canonical type")((void)0);
4309
4310 // If this type isn't canonical, get the canonical version of it if we don't
4311 // already have it. The exception spec is only partially part of the
4312 // canonical type, and only in C++17 onwards.
4313 if (!isCanonical && Canonical.isNull()) {
4314 SmallVector<QualType, 16> CanonicalArgs;
4315 CanonicalArgs.reserve(NumArgs);
4316 for (unsigned i = 0; i != NumArgs; ++i)
4317 CanonicalArgs.push_back(getCanonicalParamType(ArgArray[i]));
4318
4319 llvm::SmallVector<QualType, 8> ExceptionTypeStorage;
4320 FunctionProtoType::ExtProtoInfo CanonicalEPI = EPI;
4321 CanonicalEPI.HasTrailingReturn = false;
4322
4323 if (IsCanonicalExceptionSpec) {
4324 // Exception spec is already OK.
4325 } else if (NoexceptInType) {
4326 switch (EPI.ExceptionSpec.Type) {
4327 case EST_Unparsed: case EST_Unevaluated: case EST_Uninstantiated:
4328 // We don't know yet. It shouldn't matter what we pick here; no-one
4329 // should ever look at this.
4330 LLVM_FALLTHROUGH[[gnu::fallthrough]];
4331 case EST_None: case EST_MSAny: case EST_NoexceptFalse:
4332 CanonicalEPI.ExceptionSpec.Type = EST_None;
4333 break;
4334
4335 // A dynamic exception specification is almost always "not noexcept",
4336 // with the exception that a pack expansion might expand to no types.
4337 case EST_Dynamic: {
4338 bool AnyPacks = false;
4339 for (QualType ET : EPI.ExceptionSpec.Exceptions) {
4340 if (ET->getAs<PackExpansionType>())
4341 AnyPacks = true;
4342 ExceptionTypeStorage.push_back(getCanonicalType(ET));
4343 }
4344 if (!AnyPacks)
4345 CanonicalEPI.ExceptionSpec.Type = EST_None;
4346 else {
4347 CanonicalEPI.ExceptionSpec.Type = EST_Dynamic;
4348 CanonicalEPI.ExceptionSpec.Exceptions = ExceptionTypeStorage;
4349 }
4350 break;
4351 }
4352
4353 case EST_DynamicNone:
4354 case EST_BasicNoexcept:
4355 case EST_NoexceptTrue:
4356 case EST_NoThrow:
4357 CanonicalEPI.ExceptionSpec.Type = EST_BasicNoexcept;
4358 break;
4359
4360 case EST_DependentNoexcept:
4361 llvm_unreachable("dependent noexcept is already canonical")__builtin_unreachable();
4362 }
4363 } else {
4364 CanonicalEPI.ExceptionSpec = FunctionProtoType::ExceptionSpecInfo();
4365 }
4366
4367 // Adjust the canonical function result type.
4368 CanQualType CanResultTy = getCanonicalFunctionResultType(ResultTy);
4369 Canonical =
4370 getFunctionTypeInternal(CanResultTy, CanonicalArgs, CanonicalEPI, true);
4371
4372 // Get the new insert position for the node we care about.
4373 FunctionProtoType *NewIP =
4374 FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos);
4375 assert(!NewIP && "Shouldn't be in the map!")((void)0); (void)NewIP;
4376 }
4377
4378 // Compute the needed size to hold this FunctionProtoType and the
4379 // various trailing objects.
4380 auto ESH = FunctionProtoType::getExceptionSpecSize(
4381 EPI.ExceptionSpec.Type, EPI.ExceptionSpec.Exceptions.size());
4382 size_t Size = FunctionProtoType::totalSizeToAlloc<
4383 QualType, SourceLocation, FunctionType::FunctionTypeExtraBitfields,
4384 FunctionType::ExceptionType, Expr *, FunctionDecl *,
4385 FunctionProtoType::ExtParameterInfo, Qualifiers>(
4386 NumArgs, EPI.Variadic,
4387 FunctionProtoType::hasExtraBitfields(EPI.ExceptionSpec.Type),
4388 ESH.NumExceptionType, ESH.NumExprPtr, ESH.NumFunctionDeclPtr,
4389 EPI.ExtParameterInfos ? NumArgs : 0,
4390 EPI.TypeQuals.hasNonFastQualifiers() ? 1 : 0);
4391
4392 auto *FTP = (FunctionProtoType *)Allocate(Size, TypeAlignment);
4393 FunctionProtoType::ExtProtoInfo newEPI = EPI;
4394 new (FTP) FunctionProtoType(ResultTy, ArgArray, Canonical, newEPI);
4395 Types.push_back(FTP);
4396 if (!Unique)
4397 FunctionProtoTypes.InsertNode(FTP, InsertPos);
4398 return QualType(FTP, 0);
4399}
4400
4401QualType ASTContext::getPipeType(QualType T, bool ReadOnly) const {
4402 llvm::FoldingSetNodeID ID;
4403 PipeType::Profile(ID, T, ReadOnly);
4404
4405 void *InsertPos = nullptr;
4406 if (PipeType *PT = PipeTypes.FindNodeOrInsertPos(ID, InsertPos))
4407 return QualType(PT, 0);
4408
4409 // If the pipe element type isn't canonical, this won't be a canonical type
4410 // either, so fill in the canonical type field.
4411 QualType Canonical;
4412 if (!T.isCanonical()) {
4413 Canonical = getPipeType(getCanonicalType(T), ReadOnly);
4414
4415 // Get the new insert position for the node we care about.
4416 PipeType *NewIP = PipeTypes.FindNodeOrInsertPos(ID, InsertPos);
4417 assert(!NewIP && "Shouldn't be in the map!")((void)0);
4418 (void)NewIP;
4419 }
4420 auto *New = new (*this, TypeAlignment) PipeType(T, Canonical, ReadOnly);
4421 Types.push_back(New);
4422 PipeTypes.InsertNode(New, InsertPos);
4423 return QualType(New, 0);
4424}
4425
4426QualType ASTContext::adjustStringLiteralBaseType(QualType Ty) const {
4427 // OpenCL v1.1 s6.5.3: a string literal is in the constant address space.
4428 return LangOpts.OpenCL ? getAddrSpaceQualType(Ty, LangAS::opencl_constant)
4429 : Ty;
4430}
4431
4432QualType ASTContext::getReadPipeType(QualType T) const {
4433 return getPipeType(T, true);
4434}
4435
4436QualType ASTContext::getWritePipeType(QualType T) const {
4437 return getPipeType(T, false);
4438}
4439
4440QualType ASTContext::getExtIntType(bool IsUnsigned, unsigned NumBits) const {
4441 llvm::FoldingSetNodeID ID;
4442 ExtIntType::Profile(ID, IsUnsigned, NumBits);
4443
4444 void *InsertPos = nullptr;
4445 if (ExtIntType *EIT = ExtIntTypes.FindNodeOrInsertPos(ID, InsertPos))
4446 return QualType(EIT, 0);
4447
4448 auto *New = new (*this, TypeAlignment) ExtIntType(IsUnsigned, NumBits);
4449 ExtIntTypes.InsertNode(New, InsertPos);
4450 Types.push_back(New);
4451 return QualType(New, 0);
4452}
4453
4454QualType ASTContext::getDependentExtIntType(bool IsUnsigned,
4455 Expr *NumBitsExpr) const {
4456 assert(NumBitsExpr->isInstantiationDependent() && "Only good for dependent")((void)0);
4457 llvm::FoldingSetNodeID ID;
4458 DependentExtIntType::Profile(ID, *this, IsUnsigned, NumBitsExpr);
4459
4460 void *InsertPos = nullptr;
4461 if (DependentExtIntType *Existing =
4462 DependentExtIntTypes.FindNodeOrInsertPos(ID, InsertPos))
4463 return QualType(Existing, 0);
4464
4465 auto *New = new (*this, TypeAlignment)
4466 DependentExtIntType(*this, IsUnsigned, NumBitsExpr);
4467 DependentExtIntTypes.InsertNode(New, InsertPos);
4468
4469 Types.push_back(New);
4470 return QualType(New, 0);
4471}
4472
4473#ifndef NDEBUG1
4474static bool NeedsInjectedClassNameType(const RecordDecl *D) {
4475 if (!isa<CXXRecordDecl>(D)) return false;
4476 const auto *RD = cast<CXXRecordDecl>(D);
4477 if (isa<ClassTemplatePartialSpecializationDecl>(RD))
4478 return true;
4479 if (RD->getDescribedClassTemplate() &&
4480 !isa<ClassTemplateSpecializationDecl>(RD))
4481 return true;
4482 return false;
4483}
4484#endif
4485
4486/// getInjectedClassNameType - Return the unique reference to the
4487/// injected class name type for the specified templated declaration.
4488QualType ASTContext::getInjectedClassNameType(CXXRecordDecl *Decl,
4489 QualType TST) const {
4490 assert(NeedsInjectedClassNameType(Decl))((void)0);
4491 if (Decl->TypeForDecl) {
4492 assert(isa<InjectedClassNameType>(Decl->TypeForDecl))((void)0);
4493 } else if (CXXRecordDecl *PrevDecl = Decl->getPreviousDecl()) {
4494 assert(PrevDecl->TypeForDecl && "previous declaration has no type")((void)0);
4495 Decl->TypeForDecl = PrevDecl->TypeForDecl;
4496 assert(isa<InjectedClassNameType>(Decl->TypeForDecl))((void)0);
4497 } else {
4498 Type *newType =
4499 new (*this, TypeAlignment) InjectedClassNameType(Decl, TST);
4500 Decl->TypeForDecl = newType;
4501 Types.push_back(newType);
4502 }
4503 return QualType(Decl->TypeForDecl, 0);
4504}
4505
4506/// getTypeDeclType - Return the unique reference to the type for the
4507/// specified type declaration.
4508QualType ASTContext::getTypeDeclTypeSlow(const TypeDecl *Decl) const {
4509 assert(Decl && "Passed null for Decl param")((void)0);
4510 assert(!Decl->TypeForDecl && "TypeForDecl present in slow case")((void)0);
4511
4512 if (const auto *Typedef = dyn_cast<TypedefNameDecl>(Decl))
4513 return getTypedefType(Typedef);
4514
4515 assert(!isa<TemplateTypeParmDecl>(Decl) &&((void)0)
4516 "Template type parameter types are always available.")((void)0);
4517
4518 if (const auto *Record = dyn_cast<RecordDecl>(Decl)) {
4519 assert(Record->isFirstDecl() && "struct/union has previous declaration")((void)0);
4520 assert(!NeedsInjectedClassNameType(Record))((void)0);
4521 return getRecordType(Record);
4522 } else if (const auto *Enum = dyn_cast<EnumDecl>(Decl)) {
4523 assert(Enum->isFirstDecl() && "enum has previous declaration")((void)0);
4524 return getEnumType(Enum);
4525 } else if (const auto *Using = dyn_cast<UnresolvedUsingTypenameDecl>(Decl)) {
4526 Type *newType = new (*this, TypeAlignment) UnresolvedUsingType(Using);
4527 Decl->TypeForDecl = newType;
4528 Types.push_back(newType);
4529 } else
4530 llvm_unreachable("TypeDecl without a type?")__builtin_unreachable();
4531
4532 return QualType(Decl->TypeForDecl, 0);
4533}
4534
4535/// getTypedefType - Return the unique reference to the type for the
4536/// specified typedef name decl.
4537QualType ASTContext::getTypedefType(const TypedefNameDecl *Decl,
4538 QualType Underlying) const {
4539 if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
4540
4541 if (Underlying.isNull())
4542 Underlying = Decl->getUnderlyingType();
4543 QualType Canonical = getCanonicalType(Underlying);
4544 auto *newType = new (*this, TypeAlignment)
4545 TypedefType(Type::Typedef, Decl, Underlying, Canonical);
4546 Decl->TypeForDecl = newType;
4547 Types.push_back(newType);
4548 return QualType(newType, 0);
4549}
4550
4551QualType ASTContext::getRecordType(const RecordDecl *Decl) const {
4552 if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
4553
4554 if (const RecordDecl *PrevDecl = Decl->getPreviousDecl())
4555 if (PrevDecl->TypeForDecl)
4556 return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0);
4557
4558 auto *newType = new (*this, TypeAlignment) RecordType(Decl);
4559 Decl->TypeForDecl = newType;
4560 Types.push_back(newType);
4561 return QualType(newType, 0);
4562}
4563
4564QualType ASTContext::getEnumType(const EnumDecl *Decl) const {
4565 if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
4566
4567 if (const EnumDecl *PrevDecl = Decl->getPreviousDecl())
4568 if (PrevDecl->TypeForDecl)
4569 return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0);
4570
4571 auto *newType = new (*this, TypeAlignment) EnumType(Decl);
4572 Decl->TypeForDecl = newType;
4573 Types.push_back(newType);
4574 return QualType(newType, 0);
4575}
4576
4577QualType ASTContext::getAttributedType(attr::Kind attrKind,
4578 QualType modifiedType,
4579 QualType equivalentType) {
4580 llvm::FoldingSetNodeID id;
4581 AttributedType::Profile(id, attrKind, modifiedType, equivalentType);
4582
4583 void *insertPos = nullptr;
4584 AttributedType *type = AttributedTypes.FindNodeOrInsertPos(id, insertPos);
4585 if (type) return QualType(type, 0);
4586
4587 QualType canon = getCanonicalType(equivalentType);
4588 type = new (*this, TypeAlignment)
4589 AttributedType(canon, attrKind, modifiedType, equivalentType);
4590
4591 Types.push_back(type);
4592 AttributedTypes.InsertNode(type, insertPos);
4593
4594 return QualType(type, 0);
4595}
4596
4597/// Retrieve a substitution-result type.
4598QualType
4599ASTContext::getSubstTemplateTypeParmType(const TemplateTypeParmType *Parm,
4600 QualType Replacement) const {
4601 assert(Replacement.isCanonical()((void)0)
4602 && "replacement types must always be canonical")((void)0);
4603
4604 llvm::FoldingSetNodeID ID;
4605 SubstTemplateTypeParmType::Profile(ID, Parm, Replacement);
4606 void *InsertPos = nullptr;
4607 SubstTemplateTypeParmType *SubstParm
4608 = SubstTemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
4609
4610 if (!SubstParm) {
4611 SubstParm = new (*this, TypeAlignment)
4612 SubstTemplateTypeParmType(Parm, Replacement);
4613 Types.push_back(SubstParm);
4614 SubstTemplateTypeParmTypes.InsertNode(SubstParm, InsertPos);
4615 }
4616
4617 return QualType(SubstParm, 0);
4618}
4619
4620/// Retrieve a
4621QualType ASTContext::getSubstTemplateTypeParmPackType(
4622 const TemplateTypeParmType *Parm,
4623 const TemplateArgument &ArgPack) {
4624#ifndef NDEBUG1
4625 for (const auto &P : ArgPack.pack_elements()) {
4626 assert(P.getKind() == TemplateArgument::Type &&"Pack contains a non-type")((void)0);
4627 assert(P.getAsType().isCanonical() && "Pack contains non-canonical type")((void)0);
4628 }
4629#endif
4630
4631 llvm::FoldingSetNodeID ID;
4632 SubstTemplateTypeParmPackType::Profile(ID, Parm, ArgPack);
4633 void *InsertPos = nullptr;
4634 if (SubstTemplateTypeParmPackType *SubstParm
4635 = SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos))
4636 return QualType(SubstParm, 0);
4637
4638 QualType Canon;
4639 if (!Parm->isCanonicalUnqualified()) {
4640 Canon = getCanonicalType(QualType(Parm, 0));
4641 Canon = getSubstTemplateTypeParmPackType(cast<TemplateTypeParmType>(Canon),
4642 ArgPack);
4643 SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos);
4644 }
4645
4646 auto *SubstParm
4647 = new (*this, TypeAlignment) SubstTemplateTypeParmPackType(Parm, Canon,
4648 ArgPack);
4649 Types.push_back(SubstParm);
4650 SubstTemplateTypeParmPackTypes.InsertNode(SubstParm, InsertPos);
4651 return QualType(SubstParm, 0);
4652}
4653
4654/// Retrieve the template type parameter type for a template
4655/// parameter or parameter pack with the given depth, index, and (optionally)
4656/// name.
4657QualType ASTContext::getTemplateTypeParmType(unsigned Depth, unsigned Index,
4658 bool ParameterPack,
4659 TemplateTypeParmDecl *TTPDecl) const {
4660 llvm::FoldingSetNodeID ID;
4661 TemplateTypeParmType::Profile(ID, Depth, Index, ParameterPack, TTPDecl);
4662 void *InsertPos = nullptr;
4663 TemplateTypeParmType *TypeParm
4664 = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
4665
4666 if (TypeParm)
4667 return QualType(TypeParm, 0);
4668
4669 if (TTPDecl) {
4670 QualType Canon = getTemplateTypeParmType(Depth, Index, ParameterPack);
4671 TypeParm = new (*this, TypeAlignment) TemplateTypeParmType(TTPDecl, Canon);
4672
4673 TemplateTypeParmType *TypeCheck
4674 = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
4675 assert(!TypeCheck && "Template type parameter canonical type broken")((void)0);
4676 (void)TypeCheck;
4677 } else
4678 TypeParm = new (*this, TypeAlignment)
4679 TemplateTypeParmType(Depth, Index, ParameterPack);
4680
4681 Types.push_back(TypeParm);
4682 TemplateTypeParmTypes.InsertNode(TypeParm, InsertPos);
4683
4684 return QualType(TypeParm, 0);
4685}
4686
4687TypeSourceInfo *
4688ASTContext::getTemplateSpecializationTypeInfo(TemplateName Name,
4689 SourceLocation NameLoc,
4690 const TemplateArgumentListInfo &Args,
4691 QualType Underlying) const {
4692 assert(!Name.getAsDependentTemplateName() &&((void)0)
4693 "No dependent template names here!")((void)0);
4694 QualType TST = getTemplateSpecializationType(Name, Args, Underlying);
4695
4696 TypeSourceInfo *DI = CreateTypeSourceInfo(TST);
4697 TemplateSpecializationTypeLoc TL =
4698 DI->getTypeLoc().castAs<TemplateSpecializationTypeLoc>();
4699 TL.setTemplateKeywordLoc(SourceLocation());
4700 TL.setTemplateNameLoc(NameLoc);
4701 TL.setLAngleLoc(Args.getLAngleLoc());
4702 TL.setRAngleLoc(Args.getRAngleLoc());
4703 for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i)
4704 TL.setArgLocInfo(i, Args[i].getLocInfo());
4705 return DI;
4706}
4707
4708QualType
4709ASTContext::getTemplateSpecializationType(TemplateName Template,
4710 const TemplateArgumentListInfo &Args,
4711 QualType Underlying) const {
4712 assert(!Template.getAsDependentTemplateName() &&((void)0)
4713 "No dependent template names here!")((void)0);
4714
4715 SmallVector<TemplateArgument, 4> ArgVec;
4716 ArgVec.reserve(Args.size());
4717 for (const TemplateArgumentLoc &Arg : Args.arguments())
4718 ArgVec.push_back(Arg.getArgument());
4719
4720 return getTemplateSpecializationType(Template, ArgVec, Underlying);
4721}
4722
4723#ifndef NDEBUG1
4724static bool hasAnyPackExpansions(ArrayRef<TemplateArgument> Args) {
4725 for (const TemplateArgument &Arg : Args)
4726 if (Arg.isPackExpansion())
4727 return true;
4728
4729 return true;
4730}
4731#endif
4732
4733QualType
4734ASTContext::getTemplateSpecializationType(TemplateName Template,
4735 ArrayRef<TemplateArgument> Args,
4736 QualType Underlying) const {
4737 assert(!Template.getAsDependentTemplateName() &&((void)0)
4738 "No dependent template names here!")((void)0);
4739 // Look through qualified template names.
4740 if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName())
4741 Template = TemplateName(QTN->getTemplateDecl());
4742
4743 bool IsTypeAlias =
4744 Template.getAsTemplateDecl() &&
4745 isa<TypeAliasTemplateDecl>(Template.getAsTemplateDecl());
4746 QualType CanonType;
4747 if (!Underlying.isNull())
4748 CanonType = getCanonicalType(Underlying);
4749 else {
4750 // We can get here with an alias template when the specialization contains
4751 // a pack expansion that does not match up with a parameter pack.
4752 assert((!IsTypeAlias || hasAnyPackExpansions(Args)) &&((void)0)
4753 "Caller must compute aliased type")((void)0);
4754 IsTypeAlias = false;
4755 CanonType = getCanonicalTemplateSpecializationType(Template, Args);
4756 }
4757
4758 // Allocate the (non-canonical) template specialization type, but don't
4759 // try to unique it: these types typically have location information that
4760 // we don't unique and don't want to lose.
4761 void *Mem = Allocate(sizeof(TemplateSpecializationType) +
4762 sizeof(TemplateArgument) * Args.size() +
4763 (IsTypeAlias? sizeof(QualType) : 0),
4764 TypeAlignment);
4765 auto *Spec
4766 = new (Mem) TemplateSpecializationType(Template, Args, CanonType,
4767 IsTypeAlias ? Underlying : QualType());
4768
4769 Types.push_back(Spec);
4770 return QualType(Spec, 0);
4771}
4772
4773QualType ASTContext::getCanonicalTemplateSpecializationType(
4774 TemplateName Template, ArrayRef<TemplateArgument> Args) const {
4775 assert(!Template.getAsDependentTemplateName() &&((void)0)
4776 "No dependent template names here!")((void)0);
4777
4778 // Look through qualified template names.
4779 if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName())
4780 Template = TemplateName(QTN->getTemplateDecl());
4781
4782 // Build the canonical template specialization type.
4783 TemplateName CanonTemplate = getCanonicalTemplateName(Template);
4784 SmallVector<TemplateArgument, 4> CanonArgs;
4785 unsigned NumArgs = Args.size();
4786 CanonArgs.reserve(NumArgs);
4787 for (const TemplateArgument &Arg : Args)
4788 CanonArgs.push_back(getCanonicalTemplateArgument(Arg));
4789
4790 // Determine whether this canonical template specialization type already
4791 // exists.
4792 llvm::FoldingSetNodeID ID;
4793 TemplateSpecializationType::Profile(ID, CanonTemplate,
4794 CanonArgs, *this);
4795
4796 void *InsertPos = nullptr;
4797 TemplateSpecializationType *Spec
4798 = TemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos);
4799
4800 if (!Spec) {
4801 // Allocate a new canonical template specialization type.
4802 void *Mem = Allocate((sizeof(TemplateSpecializationType) +
4803 sizeof(TemplateArgument) * NumArgs),
4804 TypeAlignment);
4805 Spec = new (Mem) TemplateSpecializationType(CanonTemplate,
4806 CanonArgs,
4807 QualType(), QualType());
4808 Types.push_back(Spec);
4809 TemplateSpecializationTypes.InsertNode(Spec, InsertPos);
4810 }
4811
4812 assert(Spec->isDependentType() &&((void)0)
4813 "Non-dependent template-id type must have a canonical type")((void)0);
4814 return QualType(Spec, 0);
4815}
4816
4817QualType ASTContext::getElaboratedType(ElaboratedTypeKeyword Keyword,
4818 NestedNameSpecifier *NNS,
4819 QualType NamedType,
4820 TagDecl *OwnedTagDecl) const {
4821 llvm::FoldingSetNodeID ID;
4822 ElaboratedType::Profile(ID, Keyword, NNS, NamedType, OwnedTagDecl);
4823
4824 void *InsertPos = nullptr;
4825 ElaboratedType *T = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos);
4826 if (T)
4827 return QualType(T, 0);
4828
4829 QualType Canon = NamedType;
4830 if (!Canon.isCanonical()) {
4831 Canon = getCanonicalType(NamedType);
4832 ElaboratedType *CheckT = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos);
4833 assert(!CheckT && "Elaborated canonical type broken")((void)0);
4834 (void)CheckT;
4835 }
4836
4837 void *Mem = Allocate(ElaboratedType::totalSizeToAlloc<TagDecl *>(!!OwnedTagDecl),
4838 TypeAlignment);
4839 T = new (Mem) ElaboratedType(Keyword, NNS, NamedType, Canon, OwnedTagDecl);
4840
4841 Types.push_back(T);
4842 ElaboratedTypes.InsertNode(T, InsertPos);
4843 return QualType(T, 0);
4844}
4845
4846QualType
4847ASTContext::getParenType(QualType InnerType) const {
4848 llvm::FoldingSetNodeID ID;
4849 ParenType::Profile(ID, InnerType);
4850
4851 void *InsertPos = nullptr;
4852 ParenType *T = ParenTypes.FindNodeOrInsertPos(ID, InsertPos);
4853 if (T)
4854 return QualType(T, 0);
4855
4856 QualType Canon = InnerType;
4857 if (!Canon.isCanonical()) {
4858 Canon = getCanonicalType(InnerType);
4859 ParenType *CheckT = ParenTypes.FindNodeOrInsertPos(ID, InsertPos);
4860 assert(!CheckT && "Paren canonical type broken")((void)0);
4861 (void)CheckT;
4862 }
4863
4864 T = new (*this, TypeAlignment) ParenType(InnerType, Canon);
4865 Types.push_back(T);
4866 ParenTypes.InsertNode(T, InsertPos);
4867 return QualType(T, 0);
4868}
4869
4870QualType
4871ASTContext::getMacroQualifiedType(QualType UnderlyingTy,
4872 const IdentifierInfo *MacroII) const {
4873 QualType Canon = UnderlyingTy;
4874 if (!Canon.isCanonical())
4875 Canon = getCanonicalType(UnderlyingTy);
4876
4877 auto *newType = new (*this, TypeAlignment)
4878 MacroQualifiedType(UnderlyingTy, Canon, MacroII);
4879 Types.push_back(newType);
4880 return QualType(newType, 0);
4881}
4882
4883QualType ASTContext::getDependentNameType(ElaboratedTypeKeyword Keyword,
4884 NestedNameSpecifier *NNS,
4885 const IdentifierInfo *Name,
4886 QualType Canon) const {
4887 if (Canon.isNull()) {
4888 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS);
4889 if (CanonNNS != NNS)
4890 Canon = getDependentNameType(Keyword, CanonNNS, Name);
4891 }
4892
4893 llvm::FoldingSetNodeID ID;
4894 DependentNameType::Profile(ID, Keyword, NNS, Name);
4895
4896 void *InsertPos = nullptr;
4897 DependentNameType *T
4898 = DependentNameTypes.FindNodeOrInsertPos(ID, InsertPos);
4899 if (T)
4900 return QualType(T, 0);
4901
4902 T = new (*this, TypeAlignment) DependentNameType(Keyword, NNS, Name, Canon);
4903 Types.push_back(T);
4904 DependentNameTypes.InsertNode(T, InsertPos);
4905 return QualType(T, 0);
4906}
4907
4908QualType
4909ASTContext::getDependentTemplateSpecializationType(
4910 ElaboratedTypeKeyword Keyword,
4911 NestedNameSpecifier *NNS,
4912 const IdentifierInfo *Name,
4913 const TemplateArgumentListInfo &Args) const {
4914 // TODO: avoid this copy
4915 SmallVector<TemplateArgument, 16> ArgCopy;
4916 for (unsigned I = 0, E = Args.size(); I != E; ++I)
4917 ArgCopy.push_back(Args[I].getArgument());
4918 return getDependentTemplateSpecializationType(Keyword, NNS, Name, ArgCopy);
4919}
4920
4921QualType
4922ASTContext::getDependentTemplateSpecializationType(
4923 ElaboratedTypeKeyword Keyword,
4924 NestedNameSpecifier *NNS,
4925 const IdentifierInfo *Name,
4926 ArrayRef<TemplateArgument> Args) const {
4927 assert((!NNS || NNS->isDependent()) &&((void)0)
4928 "nested-name-specifier must be dependent")((void)0);
4929
4930 llvm::FoldingSetNodeID ID;
4931 DependentTemplateSpecializationType::Profile(ID, *this, Keyword, NNS,
4932 Name, Args);
4933
4934 void *InsertPos = nullptr;
4935 DependentTemplateSpecializationType *T
4936 = DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos);
4937 if (T)
4938 return QualType(T, 0);
4939
4940 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS);
4941
4942 ElaboratedTypeKeyword CanonKeyword = Keyword;
4943 if (Keyword == ETK_None) CanonKeyword = ETK_Typename;
4944
4945 bool AnyNonCanonArgs = false;
4946 unsigned NumArgs = Args.size();
4947 SmallVector<TemplateArgument, 16> CanonArgs(NumArgs);
4948 for (unsigned I = 0; I != NumArgs; ++I) {
4949 CanonArgs[I] = getCanonicalTemplateArgument(Args[I]);
4950 if (!CanonArgs[I].structurallyEquals(Args[I]))
4951 AnyNonCanonArgs = true;
4952 }
4953
4954 QualType Canon;
4955 if (AnyNonCanonArgs || CanonNNS != NNS || CanonKeyword != Keyword) {
4956 Canon = getDependentTemplateSpecializationType(CanonKeyword, CanonNNS,
4957 Name,
4958 CanonArgs);
4959
4960 // Find the insert position again.
4961 DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos);
4962 }
4963
4964 void *Mem = Allocate((sizeof(DependentTemplateSpecializationType) +
4965 sizeof(TemplateArgument) * NumArgs),
4966 TypeAlignment);
4967 T = new (Mem) DependentTemplateSpecializationType(Keyword, NNS,
4968 Name, Args, Canon);
4969 Types.push_back(T);
4970 DependentTemplateSpecializationTypes.InsertNode(T, InsertPos);
4971 return QualType(T, 0);
4972}
4973
4974TemplateArgument ASTContext::getInjectedTemplateArg(NamedDecl *Param) {
4975 TemplateArgument Arg;
4976 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(Param)) {
4977 QualType ArgType = getTypeDeclType(TTP);
4978 if (TTP->isParameterPack())
4979 ArgType = getPackExpansionType(ArgType, None);
4980
4981 Arg = TemplateArgument(ArgType);
4982 } else if (auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Param)) {
4983 QualType T =
4984 NTTP->getType().getNonPackExpansionType().getNonLValueExprType(*this);
4985 // For class NTTPs, ensure we include the 'const' so the type matches that
4986 // of a real template argument.
4987 // FIXME: It would be more faithful to model this as something like an
4988 // lvalue-to-rvalue conversion applied to a const-qualified lvalue.
4989 if (T->isRecordType())
4990 T.addConst();
4991 Expr *E = new (*this) DeclRefExpr(
4992 *this, NTTP, /*enclosing*/ false, T,
4993 Expr::getValueKindForType(NTTP->getType()), NTTP->getLocation());
4994
4995 if (NTTP->isParameterPack())
4996 E = new (*this) PackExpansionExpr(DependentTy, E, NTTP->getLocation(),
4997 None);
4998 Arg = TemplateArgument(E);
4999 } else {
5000 auto *TTP = cast<TemplateTemplateParmDecl>(Param);
5001 if (TTP->isParameterPack())
5002 Arg = TemplateArgument(TemplateName(TTP), Optional<unsigned>());
5003 else
5004 Arg = TemplateArgument(TemplateName(TTP));
5005 }
5006
5007 if (Param->isTemplateParameterPack())
5008 Arg = TemplateArgument::CreatePackCopy(*this, Arg);
5009
5010 return Arg;
5011}
5012
5013void
5014ASTContext::getInjectedTemplateArgs(const TemplateParameterList *Params,
5015 SmallVectorImpl<TemplateArgument> &Args) {
5016 Args.reserve(Args.size() + Params->size());
5017
5018 for (NamedDecl *Param : *Params)
5019 Args.push_back(getInjectedTemplateArg(Param));
5020}
5021
5022QualType ASTContext::getPackExpansionType(QualType Pattern,
5023 Optional<unsigned> NumExpansions,
5024 bool ExpectPackInType) {
5025 assert((!ExpectPackInType || Pattern->containsUnexpandedParameterPack()) &&((void)0)
5026 "Pack expansions must expand one or more parameter packs")((void)0);
5027
5028 llvm::FoldingSetNodeID ID;
5029 PackExpansionType::Profile(ID, Pattern, NumExpansions);
5030
5031 void *InsertPos = nullptr;
5032 PackExpansionType *T = PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos);
5033 if (T)
5034 return QualType(T, 0);
5035
5036 QualType Canon;
5037 if (!Pattern.isCanonical()) {
5038 Canon = getPackExpansionType(getCanonicalType(Pattern), NumExpansions,
5039 /*ExpectPackInType=*/false);
5040
5041 // Find the insert position again, in case we inserted an element into
5042 // PackExpansionTypes and invalidated our insert position.
5043 PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos);
5044 }
5045
5046 T = new (*this, TypeAlignment)
5047 PackExpansionType(Pattern, Canon, NumExpansions);
5048 Types.push_back(T);
5049 PackExpansionTypes.InsertNode(T, InsertPos);
5050 return QualType(T, 0);
5051}
5052
5053/// CmpProtocolNames - Comparison predicate for sorting protocols
5054/// alphabetically.
5055static int CmpProtocolNames(ObjCProtocolDecl *const *LHS,
5056 ObjCProtocolDecl *const *RHS) {
5057 return DeclarationName::compare((*LHS)->getDeclName(), (*RHS)->getDeclName());
5058}
5059
5060static bool areSortedAndUniqued(ArrayRef<ObjCProtocolDecl *> Protocols) {
5061 if (Protocols.empty()) return true;
5062
5063 if (Protocols[0]->getCanonicalDecl() != Protocols[0])
5064 return false;
5065
5066 for (unsigned i = 1; i != Protocols.size(); ++i)
5067 if (CmpProtocolNames(&Protocols[i - 1], &Protocols[i]) >= 0 ||
5068 Protocols[i]->getCanonicalDecl() != Protocols[i])
5069 return false;
5070 return true;
5071}
5072
5073static void
5074SortAndUniqueProtocols(SmallVectorImpl<ObjCProtocolDecl *> &Protocols) {
5075 // Sort protocols, keyed by name.
5076 llvm::array_pod_sort(Protocols.begin(), Protocols.end(), CmpProtocolNames);
5077
5078 // Canonicalize.
5079 for (ObjCProtocolDecl *&P : Protocols)
5080 P = P->getCanonicalDecl();
5081
5082 // Remove duplicates.
5083 auto ProtocolsEnd = std::unique(Protocols.begin(), Protocols.end());
5084 Protocols.erase(ProtocolsEnd, Protocols.end());
5085}
5086
5087QualType ASTContext::getObjCObjectType(QualType BaseType,
5088 ObjCProtocolDecl * const *Protocols,
5089 unsigned NumProtocols) const {
5090 return getObjCObjectType(BaseType, {},
5091 llvm::makeArrayRef(Protocols, NumProtocols),
5092 /*isKindOf=*/false);
5093}
5094
5095QualType ASTContext::getObjCObjectType(
5096 QualType baseType,
5097 ArrayRef<QualType> typeArgs,
5098 ArrayRef<ObjCProtocolDecl *> protocols,
5099 bool isKindOf) const {
5100 // If the base type is an interface and there aren't any protocols or
5101 // type arguments to add, then the interface type will do just fine.
5102 if (typeArgs.empty() && protocols.empty() && !isKindOf &&
5103 isa<ObjCInterfaceType>(baseType))
5104 return baseType;
5105
5106 // Look in the folding set for an existing type.
5107 llvm::FoldingSetNodeID ID;
5108 ObjCObjectTypeImpl::Profile(ID, baseType, typeArgs, protocols, isKindOf);
5109 void *InsertPos = nullptr;
5110 if (ObjCObjectType *QT = ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos))
5111 return QualType(QT, 0);
5112
5113 // Determine the type arguments to be used for canonicalization,
5114 // which may be explicitly specified here or written on the base
5115 // type.
5116 ArrayRef<QualType> effectiveTypeArgs = typeArgs;
5117 if (effectiveTypeArgs.empty()) {
5118 if (const auto *baseObject = baseType->getAs<ObjCObjectType>())
5119 effectiveTypeArgs = baseObject->getTypeArgs();
5120 }
5121
5122 // Build the canonical type, which has the canonical base type and a
5123 // sorted-and-uniqued list of protocols and the type arguments
5124 // canonicalized.
5125 QualType canonical;
5126 bool typeArgsAreCanonical = std::all_of(effectiveTypeArgs.begin(),
5127 effectiveTypeArgs.end(),
5128 [&](QualType type) {
5129 return type.isCanonical();
5130 });
5131 bool protocolsSorted = areSortedAndUniqued(protocols);
5132 if (!typeArgsAreCanonical || !protocolsSorted || !baseType.isCanonical()) {
5133 // Determine the canonical type arguments.
5134 ArrayRef<QualType> canonTypeArgs;
5135 SmallVector<QualType, 4> canonTypeArgsVec;
5136 if (!typeArgsAreCanonical) {
5137 canonTypeArgsVec.reserve(effectiveTypeArgs.size());
5138 for (auto typeArg : effectiveTypeArgs)
5139 canonTypeArgsVec.push_back(getCanonicalType(typeArg));
5140 canonTypeArgs = canonTypeArgsVec;
5141 } else {
5142 canonTypeArgs = effectiveTypeArgs;
5143 }
5144
5145 ArrayRef<ObjCProtocolDecl *> canonProtocols;
5146 SmallVector<ObjCProtocolDecl*, 8> canonProtocolsVec;
5147 if (!protocolsSorted) {
5148 canonProtocolsVec.append(protocols.begin(), protocols.end());
5149 SortAndUniqueProtocols(canonProtocolsVec);
5150 canonProtocols = canonProtocolsVec;
5151 } else {
5152 canonProtocols = protocols;
5153 }
5154
5155 canonical = getObjCObjectType(getCanonicalType(baseType), canonTypeArgs,
5156 canonProtocols, isKindOf);
5157
5158 // Regenerate InsertPos.
5159 ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos);
5160 }
5161
5162 unsigned size = sizeof(ObjCObjectTypeImpl);
5163 size += typeArgs.size() * sizeof(QualType);
5164 size += protocols.size() * sizeof(ObjCProtocolDecl *);
5165 void *mem = Allocate(size, TypeAlignment);
5166 auto *T =
5167 new (mem) ObjCObjectTypeImpl(canonical, baseType, typeArgs, protocols,
5168 isKindOf);
5169
5170 Types.push_back(T);
5171 ObjCObjectTypes.InsertNode(T, InsertPos);
5172 return QualType(T, 0);
5173}
5174
5175/// Apply Objective-C protocol qualifiers to the given type.
5176/// If this is for the canonical type of a type parameter, we can apply
5177/// protocol qualifiers on the ObjCObjectPointerType.
5178QualType
5179ASTContext::applyObjCProtocolQualifiers(QualType type,
5180 ArrayRef<ObjCProtocolDecl *> protocols, bool &hasError,
5181 bool allowOnPointerType) const {
5182 hasError = false;
5183
5184 if (const auto *objT = dyn_cast<ObjCTypeParamType>(type.getTypePtr())) {
5185 return getObjCTypeParamType(objT->getDecl(), protocols);
5186 }
5187
5188 // Apply protocol qualifiers to ObjCObjectPointerType.
5189 if (allowOnPointerType) {
5190 if (const auto *objPtr =
5191 dyn_cast<ObjCObjectPointerType>(type.getTypePtr())) {
5192 const ObjCObjectType *objT = objPtr->getObjectType();
5193 // Merge protocol lists and construct ObjCObjectType.
5194 SmallVector<ObjCProtocolDecl*, 8> protocolsVec;
5195 protocolsVec.append(objT->qual_begin(),
5196 objT->qual_end());
5197 protocolsVec.append(protocols.begin(), protocols.end());
5198 ArrayRef<ObjCProtocolDecl *> protocols = protocolsVec;
5199 type = getObjCObjectType(
5200 objT->getBaseType(),
5201 objT->getTypeArgsAsWritten(),
5202 protocols,
5203 objT->isKindOfTypeAsWritten());
5204 return getObjCObjectPointerType(type);
5205 }
5206 }
5207
5208 // Apply protocol qualifiers to ObjCObjectType.
5209 if (const auto *objT = dyn_cast<ObjCObjectType>(type.getTypePtr())){
5210 // FIXME: Check for protocols to which the class type is already
5211 // known to conform.
5212
5213 return getObjCObjectType(objT->getBaseType(),
5214 objT->getTypeArgsAsWritten(),
5215 protocols,
5216 objT->isKindOfTypeAsWritten());
5217 }
5218
5219 // If the canonical type is ObjCObjectType, ...
5220 if (type->isObjCObjectType()) {
5221 // Silently overwrite any existing protocol qualifiers.
5222 // TODO: determine whether that's the right thing to do.
5223
5224 // FIXME: Check for protocols to which the class type is already
5225 // known to conform.
5226 return getObjCObjectType(type, {}, protocols, false);
5227 }
5228
5229 // id<protocol-list>
5230 if (type->isObjCIdType()) {
5231 const auto *objPtr = type->castAs<ObjCObjectPointerType>();
5232 type = getObjCObjectType(ObjCBuiltinIdTy, {}, protocols,
5233 objPtr->isKindOfType());
5234 return getObjCObjectPointerType(type);
5235 }
5236
5237 // Class<protocol-list>
5238 if (type->isObjCClassType()) {
5239 const auto *objPtr = type->castAs<ObjCObjectPointerType>();
5240 type = getObjCObjectType(ObjCBuiltinClassTy, {}, protocols,
5241 objPtr->isKindOfType());
5242 return getObjCObjectPointerType(type);
5243 }
5244
5245 hasError = true;
5246 return type;
5247}
5248
5249QualType
5250ASTContext::getObjCTypeParamType(const ObjCTypeParamDecl *Decl,
5251 ArrayRef<ObjCProtocolDecl *> protocols) const {
5252 // Look in the folding set for an existing type.
5253 llvm::FoldingSetNodeID ID;
5254 ObjCTypeParamType::Profile(ID, Decl, Decl->getUnderlyingType(), protocols);
5255 void *InsertPos = nullptr;
5256 if (ObjCTypeParamType *TypeParam =
5257 ObjCTypeParamTypes.FindNodeOrInsertPos(ID, InsertPos))
5258 return QualType(TypeParam, 0);
5259
5260 // We canonicalize to the underlying type.
5261 QualType Canonical = getCanonicalType(Decl->getUnderlyingType());
5262 if (!protocols.empty()) {
5263 // Apply the protocol qualifers.
5264 bool hasError;
5265 Canonical = getCanonicalType(applyObjCProtocolQualifiers(
5266 Canonical, protocols, hasError, true /*allowOnPointerType*/));
5267 assert(!hasError && "Error when apply protocol qualifier to bound type")((void)0);
5268 }
5269
5270 unsigned size = sizeof(ObjCTypeParamType);
5271 size += protocols.size() * sizeof(ObjCProtocolDecl *);
5272 void *mem = Allocate(size, TypeAlignment);
5273 auto *newType = new (mem) ObjCTypeParamType(Decl, Canonical, protocols);
5274
5275 Types.push_back(newType);
5276 ObjCTypeParamTypes.InsertNode(newType, InsertPos);
5277 return QualType(newType, 0);
5278}
5279
5280void ASTContext::adjustObjCTypeParamBoundType(const ObjCTypeParamDecl *Orig,
5281 ObjCTypeParamDecl *New) const {
5282 New->setTypeSourceInfo(getTrivialTypeSourceInfo(Orig->getUnderlyingType()));
5283 // Update TypeForDecl after updating TypeSourceInfo.
5284 auto NewTypeParamTy = cast<ObjCTypeParamType>(New->getTypeForDecl());
5285 SmallVector<ObjCProtocolDecl *, 8> protocols;
5286 protocols.append(NewTypeParamTy->qual_begin(), NewTypeParamTy->qual_end());
5287 QualType UpdatedTy = getObjCTypeParamType(New, protocols);
5288 New->setTypeForDecl(UpdatedTy.getTypePtr());
5289}
5290
5291/// ObjCObjectAdoptsQTypeProtocols - Checks that protocols in IC's
5292/// protocol list adopt all protocols in QT's qualified-id protocol
5293/// list.
5294bool ASTContext::ObjCObjectAdoptsQTypeProtocols(QualType QT,
5295 ObjCInterfaceDecl *IC) {
5296 if (!QT->isObjCQualifiedIdType())
5297 return false;
5298
5299 if (const auto *OPT = QT->getAs<ObjCObjectPointerType>()) {
5300 // If both the right and left sides have qualifiers.
5301 for (auto *Proto : OPT->quals()) {
5302 if (!IC->ClassImplementsProtocol(Proto, false))
5303 return false;
5304 }
5305 return true;
5306 }
5307 return false;
5308}
5309
5310/// QIdProtocolsAdoptObjCObjectProtocols - Checks that protocols in
5311/// QT's qualified-id protocol list adopt all protocols in IDecl's list
5312/// of protocols.
5313bool ASTContext::QIdProtocolsAdoptObjCObjectProtocols(QualType QT,
5314 ObjCInterfaceDecl *IDecl) {
5315 if (!QT->isObjCQualifiedIdType())
5316 return false;
5317 const auto *OPT = QT->getAs<ObjCObjectPointerType>();
5318 if (!OPT)
5319 return false;
5320 if (!IDecl->hasDefinition())
5321 return false;
5322 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> InheritedProtocols;
5323 CollectInheritedProtocols(IDecl, InheritedProtocols);
5324 if (InheritedProtocols.empty())
5325 return false;
5326 // Check that if every protocol in list of id<plist> conforms to a protocol
5327 // of IDecl's, then bridge casting is ok.
5328 bool Conforms = false;
5329 for (auto *Proto : OPT->quals()) {
5330 Conforms = false;
5331 for (auto *PI : InheritedProtocols) {
5332 if (ProtocolCompatibleWithProtocol(Proto, PI)) {
5333 Conforms = true;
5334 break;
5335 }
5336 }
5337 if (!Conforms)
5338 break;
5339 }
5340 if (Conforms)
5341 return true;
5342
5343 for (auto *PI : InheritedProtocols) {
5344 // If both the right and left sides have qualifiers.
5345 bool Adopts = false;
5346 for (auto *Proto : OPT->quals()) {
5347 // return 'true' if 'PI' is in the inheritance hierarchy of Proto
5348 if ((Adopts = ProtocolCompatibleWithProtocol(PI, Proto)))
5349 break;
5350 }
5351 if (!Adopts)
5352 return false;
5353 }
5354 return true;
5355}
5356
5357/// getObjCObjectPointerType - Return a ObjCObjectPointerType type for
5358/// the given object type.
5359QualType ASTContext::getObjCObjectPointerType(QualType ObjectT) const {
5360 llvm::FoldingSetNodeID ID;
5361 ObjCObjectPointerType::Profile(ID, ObjectT);
5362
5363 void *InsertPos = nullptr;
5364 if (ObjCObjectPointerType *QT =
5365 ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
5366 return QualType(QT, 0);
5367
5368 // Find the canonical object type.
5369 QualType Canonical;
5370 if (!ObjectT.isCanonical()) {
5371 Canonical = getObjCObjectPointerType(getCanonicalType(ObjectT));
5372
5373 // Regenerate InsertPos.
5374 ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
5375 }
5376
5377 // No match.
5378 void *Mem = Allocate(sizeof(ObjCObjectPointerType), TypeAlignment);
5379 auto *QType =
5380 new (Mem) ObjCObjectPointerType(Canonical, ObjectT);
5381
5382 Types.push_back(QType);
5383 ObjCObjectPointerTypes.InsertNode(QType, InsertPos);
5384 return QualType(QType, 0);
5385}
5386
5387/// getObjCInterfaceType - Return the unique reference to the type for the
5388/// specified ObjC interface decl. The list of protocols is optional.
5389QualType ASTContext::getObjCInterfaceType(const ObjCInterfaceDecl *Decl,
5390 ObjCInterfaceDecl *PrevDecl) const {
5391 if (Decl->TypeForDecl)
5392 return QualType(Decl->TypeForDecl, 0);
5393
5394 if (PrevDecl) {
5395 assert(PrevDecl->TypeForDecl && "previous decl has no TypeForDecl")((void)0);
5396 Decl->TypeForDecl = PrevDecl->TypeForDecl;
5397 return QualType(PrevDecl->TypeForDecl, 0);
5398 }
5399
5400 // Prefer the definition, if there is one.
5401 if (const ObjCInterfaceDecl *Def = Decl->getDefinition())
5402 Decl = Def;
5403
5404 void *Mem = Allocate(sizeof(ObjCInterfaceType), TypeAlignment);
5405 auto *T = new (Mem) ObjCInterfaceType(Decl);
5406 Decl->TypeForDecl = T;
5407 Types.push_back(T);
5408 return QualType(T, 0);
5409}
5410
5411/// getTypeOfExprType - Unlike many "get<Type>" functions, we can't unique
5412/// TypeOfExprType AST's (since expression's are never shared). For example,
5413/// multiple declarations that refer to "typeof(x)" all contain different
5414/// DeclRefExpr's. This doesn't effect the type checker, since it operates
5415/// on canonical type's (which are always unique).
5416QualType ASTContext::getTypeOfExprType(Expr *tofExpr) const {
5417 TypeOfExprType *toe;
5418 if (tofExpr->isTypeDependent()) {
5419 llvm::FoldingSetNodeID ID;
5420 DependentTypeOfExprType::Profile(ID, *this, tofExpr);
5421
5422 void *InsertPos = nullptr;
5423 DependentTypeOfExprType *Canon
5424 = DependentTypeOfExprTypes.FindNodeOrInsertPos(ID, InsertPos);
5425 if (Canon) {
5426 // We already have a "canonical" version of an identical, dependent
5427 // typeof(expr) type. Use that as our canonical type.
5428 toe = new (*this, TypeAlignment) TypeOfExprType(tofExpr,
5429 QualType((TypeOfExprType*)Canon, 0));
5430 } else {
5431 // Build a new, canonical typeof(expr) type.
5432 Canon
5433 = new (*this, TypeAlignment) DependentTypeOfExprType(*this, tofExpr);
5434 DependentTypeOfExprTypes.InsertNode(Canon, InsertPos);
5435 toe = Canon;
5436 }
5437 } else {
5438 QualType Canonical = getCanonicalType(tofExpr->getType());
5439 toe = new (*this, TypeAlignment) TypeOfExprType(tofExpr, Canonical);
5440 }
5441 Types.push_back(toe);
5442 return QualType(toe, 0);
5443}
5444
5445/// getTypeOfType - Unlike many "get<Type>" functions, we don't unique
5446/// TypeOfType nodes. The only motivation to unique these nodes would be
5447/// memory savings. Since typeof(t) is fairly uncommon, space shouldn't be
5448/// an issue. This doesn't affect the type checker, since it operates
5449/// on canonical types (which are always unique).
5450QualType ASTContext::getTypeOfType(QualType tofType) const {
5451 QualType Canonical = getCanonicalType(tofType);
5452 auto *tot = new (*this, TypeAlignment) TypeOfType(tofType, Canonical);
5453 Types.push_back(tot);
5454 return QualType(tot, 0);
5455}
5456
5457/// Unlike many "get<Type>" functions, we don't unique DecltypeType
5458/// nodes. This would never be helpful, since each such type has its own
5459/// expression, and would not give a significant memory saving, since there
5460/// is an Expr tree under each such type.
5461QualType ASTContext::getDecltypeType(Expr *e, QualType UnderlyingType) const {
5462 DecltypeType *dt;
5463
5464 // C++11 [temp.type]p2:
5465 // If an expression e involves a template parameter, decltype(e) denotes a
5466 // unique dependent type. Two such decltype-specifiers refer to the same
5467 // type only if their expressions are equivalent (14.5.6.1).
5468 if (e->isInstantiationDependent()) {
5469 llvm::FoldingSetNodeID ID;
5470 DependentDecltypeType::Profile(ID, *this, e);
5471
5472 void *InsertPos = nullptr;
5473 DependentDecltypeType *Canon
5474 = DependentDecltypeTypes.FindNodeOrInsertPos(ID, InsertPos);
5475 if (!Canon) {
5476 // Build a new, canonical decltype(expr) type.
5477 Canon = new (*this, TypeAlignment) DependentDecltypeType(*this, e);
5478 DependentDecltypeTypes.InsertNode(Canon, InsertPos);
5479 }
5480 dt = new (*this, TypeAlignment)
5481 DecltypeType(e, UnderlyingType, QualType((DecltypeType *)Canon, 0));
5482 } else {
5483 dt = new (*this, TypeAlignment)
5484 DecltypeType(e, UnderlyingType, getCanonicalType(UnderlyingType));
5485 }
5486 Types.push_back(dt);
5487 return QualType(dt, 0);
5488}
5489
5490/// getUnaryTransformationType - We don't unique these, since the memory
5491/// savings are minimal and these are rare.
5492QualType ASTContext::getUnaryTransformType(QualType BaseType,
5493 QualType UnderlyingType,
5494 UnaryTransformType::UTTKind Kind)
5495 const {
5496 UnaryTransformType *ut = nullptr;
5497
5498 if (BaseType->isDependentType()) {
5499 // Look in the folding set for an existing type.
5500 llvm::FoldingSetNodeID ID;
5501 DependentUnaryTransformType::Profile(ID, getCanonicalType(BaseType), Kind);
5502
5503 void *InsertPos = nullptr;
5504 DependentUnaryTransformType *Canon
5505 = DependentUnaryTransformTypes.FindNodeOrInsertPos(ID, InsertPos);
5506
5507 if (!Canon) {
5508 // Build a new, canonical __underlying_type(type) type.
5509 Canon = new (*this, TypeAlignment)
5510 DependentUnaryTransformType(*this, getCanonicalType(BaseType),
5511 Kind);
5512 DependentUnaryTransformTypes.InsertNode(Canon, InsertPos);
5513 }
5514 ut = new (*this, TypeAlignment) UnaryTransformType (BaseType,
5515 QualType(), Kind,
5516 QualType(Canon, 0));
5517 } else {
5518 QualType CanonType = getCanonicalType(UnderlyingType);
5519 ut = new (*this, TypeAlignment) UnaryTransformType (BaseType,
5520 UnderlyingType, Kind,
5521 CanonType);
5522 }
5523 Types.push_back(ut);
5524 return QualType(ut, 0);
5525}
5526
5527/// getAutoType - Return the uniqued reference to the 'auto' type which has been
5528/// deduced to the given type, or to the canonical undeduced 'auto' type, or the
5529/// canonical deduced-but-dependent 'auto' type.
5530QualType
5531ASTContext::getAutoType(QualType DeducedType, AutoTypeKeyword Keyword,
5532 bool IsDependent, bool IsPack,
5533 ConceptDecl *TypeConstraintConcept,
5534 ArrayRef<TemplateArgument> TypeConstraintArgs) const {
5535 assert((!IsPack || IsDependent) && "only use IsPack for a dependent pack")((void)0);
5536 if (DeducedType.isNull() && Keyword == AutoTypeKeyword::Auto &&
5537 !TypeConstraintConcept && !IsDependent)
5538 return getAutoDeductType();
5539
5540 // Look in the folding set for an existing type.
5541 void *InsertPos = nullptr;
5542 llvm::FoldingSetNodeID ID;
5543 AutoType::Profile(ID, *this, DeducedType, Keyword, IsDependent,
5544 TypeConstraintConcept, TypeConstraintArgs);
5545 if (AutoType *AT = AutoTypes.FindNodeOrInsertPos(ID, InsertPos))
5546 return QualType(AT, 0);
5547
5548 void *Mem = Allocate(sizeof(AutoType) +
5549 sizeof(TemplateArgument) * TypeConstraintArgs.size(),
5550 TypeAlignment);
5551 auto *AT = new (Mem) AutoType(
5552 DeducedType, Keyword,
5553 (IsDependent ? TypeDependence::DependentInstantiation
5554 : TypeDependence::None) |
5555 (IsPack ? TypeDependence::UnexpandedPack : TypeDependence::None),
5556 TypeConstraintConcept, TypeConstraintArgs);
5557 Types.push_back(AT);
5558 if (InsertPos)
5559 AutoTypes.InsertNode(AT, InsertPos);
5560 return QualType(AT, 0);
5561}
5562
5563/// Return the uniqued reference to the deduced template specialization type
5564/// which has been deduced to the given type, or to the canonical undeduced
5565/// such type, or the canonical deduced-but-dependent such type.
5566QualType ASTContext::getDeducedTemplateSpecializationType(
5567 TemplateName Template, QualType DeducedType, bool IsDependent) const {
5568 // Look in the folding set for an existing type.
5569 void *InsertPos = nullptr;
5570 llvm::FoldingSetNodeID ID;
5571 DeducedTemplateSpecializationType::Profile(ID, Template, DeducedType,
5572 IsDependent);
5573 if (DeducedTemplateSpecializationType *DTST =
5574 DeducedTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos))
5575 return QualType(DTST, 0);
5576
5577 auto *DTST = new (*this, TypeAlignment)
5578 DeducedTemplateSpecializationType(Template, DeducedType, IsDependent);
5579 Types.push_back(DTST);
5580 if (InsertPos)
5581 DeducedTemplateSpecializationTypes.InsertNode(DTST, InsertPos);
5582 return QualType(DTST, 0);
5583}
5584
5585/// getAtomicType - Return the uniqued reference to the atomic type for
5586/// the given value type.
5587QualType ASTContext::getAtomicType(QualType T) const {
5588 // Unique pointers, to guarantee there is only one pointer of a particular
5589 // structure.
5590 llvm::FoldingSetNodeID ID;
5591 AtomicType::Profile(ID, T);
5592
5593 void *InsertPos = nullptr;
5594 if (AtomicType *AT = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos))
5595 return QualType(AT, 0);
5596
5597 // If the atomic value type isn't canonical, this won't be a canonical type
5598 // either, so fill in the canonical type field.
5599 QualType Canonical;
5600 if (!T.isCanonical()) {
5601 Canonical = getAtomicType(getCanonicalType(T));
5602
5603 // Get the new insert position for the node we care about.
5604 AtomicType *NewIP = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos);
5605 assert(!NewIP && "Shouldn't be in the map!")((void)0); (void)NewIP;
5606 }
5607 auto *New = new (*this, TypeAlignment) AtomicType(T, Canonical);
5608 Types.push_back(New);
5609 AtomicTypes.InsertNode(New, InsertPos);
5610 return QualType(New, 0);
5611}
5612
5613/// getAutoDeductType - Get type pattern for deducing against 'auto'.
5614QualType ASTContext::getAutoDeductType() const {
5615 if (AutoDeductTy.isNull())
5616 AutoDeductTy = QualType(new (*this, TypeAlignment)
5617 AutoType(QualType(), AutoTypeKeyword::Auto,
5618 TypeDependence::None,
5619 /*concept*/ nullptr, /*args*/ {}),
5620 0);
5621 return AutoDeductTy;
5622}
5623
5624/// getAutoRRefDeductType - Get type pattern for deducing against 'auto &&'.
5625QualType ASTContext::getAutoRRefDeductType() const {
5626 if (AutoRRefDeductTy.isNull())
5627 AutoRRefDeductTy = getRValueReferenceType(getAutoDeductType());
5628 assert(!AutoRRefDeductTy.isNull() && "can't build 'auto &&' pattern")((void)0);
5629 return AutoRRefDeductTy;
5630}
5631
5632/// getTagDeclType - Return the unique reference to the type for the
5633/// specified TagDecl (struct/union/class/enum) decl.
5634QualType ASTContext::getTagDeclType(const TagDecl *Decl) const {
5635 assert(Decl)((void)0);
5636 // FIXME: What is the design on getTagDeclType when it requires casting
5637 // away const? mutable?
5638 return getTypeDeclType(const_cast<TagDecl*>(Decl));
5639}
5640
5641/// getSizeType - Return the unique type for "size_t" (C99 7.17), the result
5642/// of the sizeof operator (C99 6.5.3.4p4). The value is target dependent and
5643/// needs to agree with the definition in <stddef.h>.
5644CanQualType ASTContext::getSizeType() const {
5645 return getFromTargetType(Target->getSizeType());
5646}
5647
5648/// Return the unique signed counterpart of the integer type
5649/// corresponding to size_t.
5650CanQualType ASTContext::getSignedSizeType() const {
5651 return getFromTargetType(Target->getSignedSizeType());
5652}
5653
5654/// getIntMaxType - Return the unique type for "intmax_t" (C99 7.18.1.5).
5655CanQualType ASTContext::getIntMaxType() const {
5656 return getFromTargetType(Target->getIntMaxType());
5657}
5658
5659/// getUIntMaxType - Return the unique type for "uintmax_t" (C99 7.18.1.5).
5660CanQualType ASTContext::getUIntMaxType() const {
5661 return getFromTargetType(Target->getUIntMaxType());
5662}
5663
5664/// getSignedWCharType - Return the type of "signed wchar_t".
5665/// Used when in C++, as a GCC extension.
5666QualType ASTContext::getSignedWCharType() const {
5667 // FIXME: derive from "Target" ?
5668 return WCharTy;
5669}
5670
5671/// getUnsignedWCharType - Return the type of "unsigned wchar_t".
5672/// Used when in C++, as a GCC extension.
5673QualType ASTContext::getUnsignedWCharType() const {
5674 // FIXME: derive from "Target" ?
5675 return UnsignedIntTy;
5676}
5677
5678QualType ASTContext::getIntPtrType() const {
5679 return getFromTargetType(Target->getIntPtrType());
5680}
5681
5682QualType ASTContext::getUIntPtrType() const {
5683 return getCorrespondingUnsignedType(getIntPtrType());
5684}
5685
5686/// getPointerDiffType - Return the unique type for "ptrdiff_t" (C99 7.17)
5687/// defined in <stddef.h>. Pointer - pointer requires this (C99 6.5.6p9).
5688QualType ASTContext::getPointerDiffType() const {
5689 return getFromTargetType(Target->getPtrDiffType(0));
5690}
5691
5692/// Return the unique unsigned counterpart of "ptrdiff_t"
5693/// integer type. The standard (C11 7.21.6.1p7) refers to this type
5694/// in the definition of %tu format specifier.
5695QualType ASTContext::getUnsignedPointerDiffType() const {
5696 return getFromTargetType(Target->getUnsignedPtrDiffType(0));
5697}
5698
5699/// Return the unique type for "pid_t" defined in
5700/// <sys/types.h>. We need this to compute the correct type for vfork().
5701QualType ASTContext::getProcessIDType() const {
5702 return getFromTargetType(Target->getProcessIDType());
5703}
5704
5705//===----------------------------------------------------------------------===//
5706// Type Operators
5707//===----------------------------------------------------------------------===//
5708
5709CanQualType ASTContext::getCanonicalParamType(QualType T) const {
5710 // Push qualifiers into arrays, and then discard any remaining
5711 // qualifiers.
5712 T = getCanonicalType(T);
5713 T = getVariableArrayDecayedType(T);
5714 const Type *Ty = T.getTypePtr();
5715 QualType Result;
5716 if (isa<ArrayType>(Ty)) {
5717 Result = getArrayDecayedType(QualType(Ty,0));
5718 } else if (isa<FunctionType>(Ty)) {
5719 Result = getPointerType(QualType(Ty, 0));
5720 } else {
5721 Result = QualType(Ty, 0);
5722 }
5723
5724 return CanQualType::CreateUnsafe(Result);
5725}
5726
5727QualType ASTContext::getUnqualifiedArrayType(QualType type,
5728 Qualifiers &quals) {
5729 SplitQualType splitType = type.getSplitUnqualifiedType();
5730
5731 // FIXME: getSplitUnqualifiedType() actually walks all the way to
5732 // the unqualified desugared type and then drops it on the floor.
5733 // We then have to strip that sugar back off with
5734 // getUnqualifiedDesugaredType(), which is silly.
5735 const auto *AT =
5736 dyn_cast<ArrayType>(splitType.Ty->getUnqualifiedDesugaredType());
5737
5738 // If we don't have an array, just use the results in splitType.
5739 if (!AT) {
5740 quals = splitType.Quals;
5741 return QualType(splitType.Ty, 0);
5742 }
5743
5744 // Otherwise, recurse on the array's element type.
5745 QualType elementType = AT->getElementType();
5746 QualType unqualElementType = getUnqualifiedArrayType(elementType, quals);
5747
5748 // If that didn't change the element type, AT has no qualifiers, so we
5749 // can just use the results in splitType.
5750 if (elementType == unqualElementType) {
5751 assert(quals.empty())((void)0); // from the recursive call
5752 quals = splitType.Quals;
5753 return QualType(splitType.Ty, 0);
5754 }
5755
5756 // Otherwise, add in the qualifiers from the outermost type, then
5757 // build the type back up.
5758 quals.addConsistentQualifiers(splitType.Quals);
5759
5760 if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) {
5761 return getConstantArrayType(unqualElementType, CAT->getSize(),
5762 CAT->getSizeExpr(), CAT->getSizeModifier(), 0);
5763 }
5764
5765 if (const auto *IAT = dyn_cast<IncompleteArrayType>(AT)) {
5766 return getIncompleteArrayType(unqualElementType, IAT->getSizeModifier(), 0);
5767 }
5768
5769 if (const auto *VAT = dyn_cast<VariableArrayType>(AT)) {
5770 return getVariableArrayType(unqualElementType,
5771 VAT->getSizeExpr(),
5772 VAT->getSizeModifier(),
5773 VAT->getIndexTypeCVRQualifiers(),
5774 VAT->getBracketsRange());
5775 }
5776
5777 const auto *DSAT = cast<DependentSizedArrayType>(AT);
5778 return getDependentSizedArrayType(unqualElementType, DSAT->getSizeExpr(),
5779 DSAT->getSizeModifier(), 0,
5780 SourceRange());
5781}
5782
5783/// Attempt to unwrap two types that may both be array types with the same bound
5784/// (or both be array types of unknown bound) for the purpose of comparing the
5785/// cv-decomposition of two types per C++ [conv.qual].
5786void ASTContext::UnwrapSimilarArrayTypes(QualType &T1, QualType &T2) {
5787 while (true) {
5788 auto *AT1 = getAsArrayType(T1);
5789 if (!AT1)
5790 return;
5791
5792 auto *AT2 = getAsArrayType(T2);
5793 if (!AT2)
5794 return;
5795
5796 // If we don't have two array types with the same constant bound nor two
5797 // incomplete array types, we've unwrapped everything we can.
5798 if (auto *CAT1 = dyn_cast<ConstantArrayType>(AT1)) {
5799 auto *CAT2 = dyn_cast<ConstantArrayType>(AT2);
5800 if (!CAT2 || CAT1->getSize() != CAT2->getSize())
5801 return;
5802 } else if (!isa<IncompleteArrayType>(AT1) ||
5803 !isa<IncompleteArrayType>(AT2)) {
5804 return;
5805 }
5806
5807 T1 = AT1->getElementType();
5808 T2 = AT2->getElementType();
5809 }
5810}
5811
5812/// Attempt to unwrap two types that may be similar (C++ [conv.qual]).
5813///
5814/// If T1 and T2 are both pointer types of the same kind, or both array types
5815/// with the same bound, unwraps layers from T1 and T2 until a pointer type is
5816/// unwrapped. Top-level qualifiers on T1 and T2 are ignored.
5817///
5818/// This function will typically be called in a loop that successively
5819/// "unwraps" pointer and pointer-to-member types to compare them at each
5820/// level.
5821///
5822/// \return \c true if a pointer type was unwrapped, \c false if we reached a
5823/// pair of types that can't be unwrapped further.
5824bool ASTContext::UnwrapSimilarTypes(QualType &T1, QualType &T2) {
5825 UnwrapSimilarArrayTypes(T1, T2);
5826
5827 const auto *T1PtrType = T1->getAs<PointerType>();
5828 const auto *T2PtrType = T2->getAs<PointerType>();
5829 if (T1PtrType && T2PtrType) {
5830 T1 = T1PtrType->getPointeeType();
5831 T2 = T2PtrType->getPointeeType();
5832 return true;
5833 }
5834
5835 const auto *T1MPType = T1->getAs<MemberPointerType>();
5836 const auto *T2MPType = T2->getAs<MemberPointerType>();
5837 if (T1MPType && T2MPType &&
5838 hasSameUnqualifiedType(QualType(T1MPType->getClass(), 0),
5839 QualType(T2MPType->getClass(), 0))) {
5840 T1 = T1MPType->getPointeeType();
5841 T2 = T2MPType->getPointeeType();
5842 return true;
5843 }
5844
5845 if (getLangOpts().ObjC) {
5846 const auto *T1OPType = T1->getAs<ObjCObjectPointerType>();
5847 const auto *T2OPType = T2->getAs<ObjCObjectPointerType>();
5848 if (T1OPType && T2OPType) {
5849 T1 = T1OPType->getPointeeType();
5850 T2 = T2OPType->getPointeeType();
5851 return true;
5852 }
5853 }
5854
5855 // FIXME: Block pointers, too?
5856
5857 return false;
5858}
5859
5860bool ASTContext::hasSimilarType(QualType T1, QualType T2) {
5861 while (true) {
5862 Qualifiers Quals;
5863 T1 = getUnqualifiedArrayType(T1, Quals);
5864 T2 = getUnqualifiedArrayType(T2, Quals);
5865 if (hasSameType(T1, T2))
5866 return true;
5867 if (!UnwrapSimilarTypes(T1, T2))
5868 return false;
5869 }
5870}
5871
5872bool ASTContext::hasCvrSimilarType(QualType T1, QualType T2) {
5873 while (true) {
5874 Qualifiers Quals1, Quals2;
5875 T1 = getUnqualifiedArrayType(T1, Quals1);
5876 T2 = getUnqualifiedArrayType(T2, Quals2);
5877
5878 Quals1.removeCVRQualifiers();
5879 Quals2.removeCVRQualifiers();
5880 if (Quals1 != Quals2)
5881 return false;
5882
5883 if (hasSameType(T1, T2))
5884 return true;
5885
5886 if (!UnwrapSimilarTypes(T1, T2))
5887 return false;
5888 }
5889}
5890
5891DeclarationNameInfo
5892ASTContext::getNameForTemplate(TemplateName Name,
5893 SourceLocation NameLoc) const {
5894 switch (Name.getKind()) {
5895 case TemplateName::QualifiedTemplate:
5896 case TemplateName::Template:
5897 // DNInfo work in progress: CHECKME: what about DNLoc?
5898 return DeclarationNameInfo(Name.getAsTemplateDecl()->getDeclName(),
5899 NameLoc);
5900
5901 case TemplateName::OverloadedTemplate: {
5902 OverloadedTemplateStorage *Storage = Name.getAsOverloadedTemplate();
5903 // DNInfo work in progress: CHECKME: what about DNLoc?
5904 return DeclarationNameInfo((*Storage->begin())->getDeclName(), NameLoc);
5905 }
5906
5907 case TemplateName::AssumedTemplate: {
5908 AssumedTemplateStorage *Storage = Name.getAsAssumedTemplateName();
5909 return DeclarationNameInfo(Storage->getDeclName(), NameLoc);
5910 }
5911
5912 case TemplateName::DependentTemplate: {
5913 DependentTemplateName *DTN = Name.getAsDependentTemplateName();
5914 DeclarationName DName;
5915 if (DTN->isIdentifier()) {
5916 DName = DeclarationNames.getIdentifier(DTN->getIdentifier());
5917 return DeclarationNameInfo(DName, NameLoc);
5918 } else {
5919 DName = DeclarationNames.getCXXOperatorName(DTN->getOperator());
5920 // DNInfo work in progress: FIXME: source locations?
5921 DeclarationNameLoc DNLoc =
5922 DeclarationNameLoc::makeCXXOperatorNameLoc(SourceRange());
5923 return DeclarationNameInfo(DName, NameLoc, DNLoc);
5924 }
5925 }
5926
5927 case TemplateName::SubstTemplateTemplateParm: {
5928 SubstTemplateTemplateParmStorage *subst
5929 = Name.getAsSubstTemplateTemplateParm();
5930 return DeclarationNameInfo(subst->getParameter()->getDeclName(),
5931 NameLoc);
5932 }
5933
5934 case TemplateName::SubstTemplateTemplateParmPack: {
5935 SubstTemplateTemplateParmPackStorage *subst
5936 = Name.getAsSubstTemplateTemplateParmPack();
5937 return DeclarationNameInfo(subst->getParameterPack()->getDeclName(),
5938 NameLoc);
5939 }
5940 }
5941
5942 llvm_unreachable("bad template name kind!")__builtin_unreachable();
5943}
5944
5945TemplateName ASTContext::getCanonicalTemplateName(TemplateName Name) const {
5946 switch (Name.getKind()) {
5947 case TemplateName::QualifiedTemplate:
5948 case TemplateName::Template: {
5949 TemplateDecl *Template = Name.getAsTemplateDecl();
5950 if (auto *TTP = dyn_cast<TemplateTemplateParmDecl>(Template))
5951 Template = getCanonicalTemplateTemplateParmDecl(TTP);
5952
5953 // The canonical template name is the canonical template declaration.
5954 return TemplateName(cast<TemplateDecl>(Template->getCanonicalDecl()));
5955 }
5956
5957 case TemplateName::OverloadedTemplate:
5958 case TemplateName::AssumedTemplate:
5959 llvm_unreachable("cannot canonicalize unresolved template")__builtin_unreachable();
5960
5961 case TemplateName::DependentTemplate: {
5962 DependentTemplateName *DTN = Name.getAsDependentTemplateName();
5963 assert(DTN && "Non-dependent template names must refer to template decls.")((void)0);
5964 return DTN->CanonicalTemplateName;
5965 }
5966
5967 case TemplateName::SubstTemplateTemplateParm: {
5968 SubstTemplateTemplateParmStorage *subst
5969 = Name.getAsSubstTemplateTemplateParm();
5970 return getCanonicalTemplateName(subst->getReplacement());
5971 }
5972
5973 case TemplateName::SubstTemplateTemplateParmPack: {
5974 SubstTemplateTemplateParmPackStorage *subst
5975 = Name.getAsSubstTemplateTemplateParmPack();
5976 TemplateTemplateParmDecl *canonParameter
5977 = getCanonicalTemplateTemplateParmDecl(subst->getParameterPack());
5978 TemplateArgument canonArgPack
5979 = getCanonicalTemplateArgument(subst->getArgumentPack());
5980 return getSubstTemplateTemplateParmPack(canonParameter, canonArgPack);
5981 }
5982 }
5983
5984 llvm_unreachable("bad template name!")__builtin_unreachable();
5985}
5986
5987bool ASTContext::hasSameTemplateName(TemplateName X, TemplateName Y) {
5988 X = getCanonicalTemplateName(X);
5989 Y = getCanonicalTemplateName(Y);
5990 return X.getAsVoidPointer() == Y.getAsVoidPointer();
5991}
5992
5993TemplateArgument
5994ASTContext::getCanonicalTemplateArgument(const TemplateArgument &Arg) const {
5995 switch (Arg.getKind()) {
5996 case TemplateArgument::Null:
5997 return Arg;
5998
5999 case TemplateArgument::Expression:
6000 return Arg;
6001
6002 case TemplateArgument::Declaration: {
6003 auto *D = cast<ValueDecl>(Arg.getAsDecl()->getCanonicalDecl());
6004 return TemplateArgument(D, Arg.getParamTypeForDecl());
6005 }
6006
6007 case TemplateArgument::NullPtr:
6008 return TemplateArgument(getCanonicalType(Arg.getNullPtrType()),
6009 /*isNullPtr*/true);
6010
6011 case TemplateArgument::Template:
6012 return TemplateArgument(getCanonicalTemplateName(Arg.getAsTemplate()));
6013
6014 case TemplateArgument::TemplateExpansion:
6015 return TemplateArgument(getCanonicalTemplateName(
6016 Arg.getAsTemplateOrTemplatePattern()),
6017 Arg.getNumTemplateExpansions());
6018
6019 case TemplateArgument::Integral:
6020 return TemplateArgument(Arg, getCanonicalType(Arg.getIntegralType()));
6021
6022 case TemplateArgument::Type:
6023 return TemplateArgument(getCanonicalType(Arg.getAsType()));
6024
6025 case TemplateArgument::Pack: {
6026 if (Arg.pack_size() == 0)
6027 return Arg;
6028
6029 auto *CanonArgs = new (*this) TemplateArgument[Arg.pack_size()];
6030 unsigned Idx = 0;
6031 for (TemplateArgument::pack_iterator A = Arg.pack_begin(),
6032 AEnd = Arg.pack_end();
6033 A != AEnd; (void)++A, ++Idx)
6034 CanonArgs[Idx] = getCanonicalTemplateArgument(*A);
6035
6036 return TemplateArgument(llvm::makeArrayRef(CanonArgs, Arg.pack_size()));
6037 }
6038 }
6039
6040 // Silence GCC warning
6041 llvm_unreachable("Unhandled template argument kind")__builtin_unreachable();
6042}
6043
6044NestedNameSpecifier *
6045ASTContext::getCanonicalNestedNameSpecifier(NestedNameSpecifier *NNS) const {
6046 if (!NNS)
6047 return nullptr;
6048
6049 switch (NNS->getKind()) {
6050 case NestedNameSpecifier::Identifier:
6051 // Canonicalize the prefix but keep the identifier the same.
6052 return NestedNameSpecifier::Create(*this,
6053 getCanonicalNestedNameSpecifier(NNS->getPrefix()),
6054 NNS->getAsIdentifier());
6055
6056 case NestedNameSpecifier::Namespace:
6057 // A namespace is canonical; build a nested-name-specifier with
6058 // this namespace and no prefix.
6059 return NestedNameSpecifier::Create(*this, nullptr,
6060 NNS->getAsNamespace()->getOriginalNamespace());
6061
6062 case NestedNameSpecifier::NamespaceAlias:
6063 // A namespace is canonical; build a nested-name-specifier with
6064 // this namespace and no prefix.
6065 return NestedNameSpecifier::Create(*this, nullptr,
6066 NNS->getAsNamespaceAlias()->getNamespace()
6067 ->getOriginalNamespace());
6068
6069 // The difference between TypeSpec and TypeSpecWithTemplate is that the
6070 // latter will have the 'template' keyword when printed.
6071 case NestedNameSpecifier::TypeSpec:
6072 case NestedNameSpecifier::TypeSpecWithTemplate: {
6073 const Type *T = getCanonicalType(NNS->getAsType());
6074
6075 // If we have some kind of dependent-named type (e.g., "typename T::type"),
6076 // break it apart into its prefix and identifier, then reconsititute those
6077 // as the canonical nested-name-specifier. This is required to canonicalize
6078 // a dependent nested-name-specifier involving typedefs of dependent-name
6079 // types, e.g.,
6080 // typedef typename T::type T1;
6081 // typedef typename T1::type T2;
6082 if (const auto *DNT = T->getAs<DependentNameType>())
6083 return NestedNameSpecifier::Create(
6084 *this, DNT->getQualifier(),
6085 const_cast<IdentifierInfo *>(DNT->getIdentifier()));
6086 if (const auto *DTST = T->getAs<DependentTemplateSpecializationType>())
6087 return NestedNameSpecifier::Create(*this, DTST->getQualifier(), true,
6088 const_cast<Type *>(T));
6089
6090 // TODO: Set 'Template' parameter to true for other template types.
6091 return NestedNameSpecifier::Create(*this, nullptr, false,
6092 const_cast<Type *>(T));
6093 }
6094
6095 case NestedNameSpecifier::Global:
6096 case NestedNameSpecifier::Super:
6097 // The global specifier and __super specifer are canonical and unique.
6098 return NNS;
6099 }
6100
6101 llvm_unreachable("Invalid NestedNameSpecifier::Kind!")__builtin_unreachable();
6102}
6103
6104const ArrayType *ASTContext::getAsArrayType(QualType T) const {
6105 // Handle the non-qualified case efficiently.
6106 if (!T.hasLocalQualifiers()) {
6107 // Handle the common positive case fast.
6108 if (const auto *AT = dyn_cast<ArrayType>(T))
6109 return AT;
6110 }
6111
6112 // Handle the common negative case fast.
6113 if (!isa<ArrayType>(T.getCanonicalType()))
6114 return nullptr;
6115
6116 // Apply any qualifiers from the array type to the element type. This
6117 // implements C99 6.7.3p8: "If the specification of an array type includes
6118 // any type qualifiers, the element type is so qualified, not the array type."
6119
6120 // If we get here, we either have type qualifiers on the type, or we have
6121 // sugar such as a typedef in the way. If we have type qualifiers on the type
6122 // we must propagate them down into the element type.
6123
6124 SplitQualType split = T.getSplitDesugaredType();
6125 Qualifiers qs = split.Quals;
6126
6127 // If we have a simple case, just return now.
6128 const auto *ATy = dyn_cast<ArrayType>(split.Ty);
6129 if (!ATy || qs.empty())
6130 return ATy;
6131
6132 // Otherwise, we have an array and we have qualifiers on it. Push the
6133 // qualifiers into the array element type and return a new array type.
6134 QualType NewEltTy = getQualifiedType(ATy->getElementType(), qs);
6135
6136 if (const auto *CAT = dyn_cast<ConstantArrayType>(ATy))
6137 return cast<ArrayType>(getConstantArrayType(NewEltTy, CAT->getSize(),
6138 CAT->getSizeExpr(),
6139 CAT->getSizeModifier(),
6140 CAT->getIndexTypeCVRQualifiers()));
6141 if (const auto *IAT = dyn_cast<IncompleteArrayType>(ATy))
6142 return cast<ArrayType>(getIncompleteArrayType(NewEltTy,
6143 IAT->getSizeModifier(),
6144 IAT->getIndexTypeCVRQualifiers()));
6145
6146 if (const auto *DSAT = dyn_cast<DependentSizedArrayType>(ATy))
6147 return cast<ArrayType>(
6148 getDependentSizedArrayType(NewEltTy,
6149 DSAT->getSizeExpr(),
6150 DSAT->getSizeModifier(),
6151 DSAT->getIndexTypeCVRQualifiers(),
6152 DSAT->getBracketsRange()));
6153
6154 const auto *VAT = cast<VariableArrayType>(ATy);
6155 return cast<ArrayType>(getVariableArrayType(NewEltTy,
6156 VAT->getSizeExpr(),
6157 VAT->getSizeModifier(),
6158 VAT->getIndexTypeCVRQualifiers(),
6159 VAT->getBracketsRange()));
6160}
6161
6162QualType ASTContext::getAdjustedParameterType(QualType T) const {
6163 if (T->isArrayType() || T->isFunctionType())
6164 return getDecayedType(T);
6165 return T;
6166}
6167
6168QualType ASTContext::getSignatureParameterType(QualType T) const {
6169 T = getVariableArrayDecayedType(T);
6170 T = getAdjustedParameterType(T);
6171 return T.getUnqualifiedType();
6172}
6173
6174QualType ASTContext::getExceptionObjectType(QualType T) const {
6175 // C++ [except.throw]p3:
6176 // A throw-expression initializes a temporary object, called the exception
6177 // object, the type of which is determined by removing any top-level
6178 // cv-qualifiers from the static type of the operand of throw and adjusting
6179 // the type from "array of T" or "function returning T" to "pointer to T"
6180 // or "pointer to function returning T", [...]
6181 T = getVariableArrayDecayedType(T);
6182 if (T->isArrayType() || T->isFunctionType())
6183 T = getDecayedType(T);
6184 return T.getUnqualifiedType();
6185}
6186
6187/// getArrayDecayedType - Return the properly qualified result of decaying the
6188/// specified array type to a pointer. This operation is non-trivial when
6189/// handling typedefs etc. The canonical type of "T" must be an array type,
6190/// this returns a pointer to a properly qualified element of the array.
6191///
6192/// See C99 6.7.5.3p7 and C99 6.3.2.1p3.
6193QualType ASTContext::getArrayDecayedType(QualType Ty) const {
6194 // Get the element type with 'getAsArrayType' so that we don't lose any
6195 // typedefs in the element type of the array. This also handles propagation
6196 // of type qualifiers from the array type into the element type if present
6197 // (C99 6.7.3p8).
6198 const ArrayType *PrettyArrayType = getAsArrayType(Ty);
6199 assert(PrettyArrayType && "Not an array type!")((void)0);
6200
6201 QualType PtrTy = getPointerType(PrettyArrayType->getElementType());
6202
6203 // int x[restrict 4] -> int *restrict
6204 QualType Result = getQualifiedType(PtrTy,
6205 PrettyArrayType->getIndexTypeQualifiers());
6206
6207 // int x[_Nullable] -> int * _Nullable
6208 if (auto Nullability = Ty->getNullability(*this)) {
6209 Result = const_cast<ASTContext *>(this)->getAttributedType(
6210 AttributedType::getNullabilityAttrKind(*Nullability), Result, Result);
6211 }
6212 return Result;
6213}
6214
6215QualType ASTContext::getBaseElementType(const ArrayType *array) const {
6216 return getBaseElementType(array->getElementType());
6217}
6218
6219QualType ASTContext::getBaseElementType(QualType type) const {
6220 Qualifiers qs;
6221 while (true) {
6222 SplitQualType split = type.getSplitDesugaredType();
6223 const ArrayType *array = split.Ty->getAsArrayTypeUnsafe();
6224 if (!array) break;
6225
6226 type = array->getElementType();
6227 qs.addConsistentQualifiers(split.Quals);
6228 }
6229
6230 return getQualifiedType(type, qs);
6231}
6232
6233/// getConstantArrayElementCount - Returns number of constant array elements.
6234uint64_t
6235ASTContext::getConstantArrayElementCount(const ConstantArrayType *CA) const {
6236 uint64_t ElementCount = 1;
6237 do {
6238 ElementCount *= CA->getSize().getZExtValue();
6239 CA = dyn_cast_or_null<ConstantArrayType>(
6240 CA->getElementType()->getAsArrayTypeUnsafe());
6241 } while (CA);
6242 return ElementCount;
6243}
6244
6245/// getFloatingRank - Return a relative rank for floating point types.
6246/// This routine will assert if passed a built-in type that isn't a float.
6247static FloatingRank getFloatingRank(QualType T) {
6248 if (const auto *CT = T->getAs<ComplexType>())
6249 return getFloatingRank(CT->getElementType());
6250
6251 switch (T->castAs<BuiltinType>()->getKind()) {
6252 default: llvm_unreachable("getFloatingRank(): not a floating type")__builtin_unreachable();
6253 case BuiltinType::Float16: return Float16Rank;
6254 case BuiltinType::Half: return HalfRank;
6255 case BuiltinType::Float: return FloatRank;
6256 case BuiltinType::Double: return DoubleRank;
6257 case BuiltinType::LongDouble: return LongDoubleRank;
6258 case BuiltinType::Float128: return Float128Rank;
6259 case BuiltinType::BFloat16: return BFloat16Rank;
6260 }
6261}
6262
6263/// getFloatingTypeOfSizeWithinDomain - Returns a real floating
6264/// point or a complex type (based on typeDomain/typeSize).
6265/// 'typeDomain' is a real floating point or complex type.
6266/// 'typeSize' is a real floating point or complex type.
6267QualType ASTContext::getFloatingTypeOfSizeWithinDomain(QualType Size,
6268 QualType Domain) const {
6269 FloatingRank EltRank = getFloatingRank(Size);
6270 if (Domain->isComplexType()) {
6271 switch (EltRank) {
6272 case BFloat16Rank: llvm_unreachable("Complex bfloat16 is not supported")__builtin_unreachable();
6273 case Float16Rank:
6274 case HalfRank: llvm_unreachable("Complex half is not supported")__builtin_unreachable();
6275 case FloatRank: return FloatComplexTy;
6276 case DoubleRank: return DoubleComplexTy;
6277 case LongDoubleRank: return LongDoubleComplexTy;
6278 case Float128Rank: return Float128ComplexTy;
6279 }
6280 }
6281
6282 assert(Domain->isRealFloatingType() && "Unknown domain!")((void)0);
6283 switch (EltRank) {
6284 case Float16Rank: return HalfTy;
6285 case BFloat16Rank: return BFloat16Ty;
6286 case HalfRank: return HalfTy;
6287 case FloatRank: return FloatTy;
6288 case DoubleRank: return DoubleTy;
6289 case LongDoubleRank: return LongDoubleTy;
6290 case Float128Rank: return Float128Ty;
6291 }
6292 llvm_unreachable("getFloatingRank(): illegal value for rank")__builtin_unreachable();
6293}
6294
6295/// getFloatingTypeOrder - Compare the rank of the two specified floating
6296/// point types, ignoring the domain of the type (i.e. 'double' ==
6297/// '_Complex double'). If LHS > RHS, return 1. If LHS == RHS, return 0. If
6298/// LHS < RHS, return -1.
6299int ASTContext::getFloatingTypeOrder(QualType LHS, QualType RHS) const {
6300 FloatingRank LHSR = getFloatingRank(LHS);
6301 FloatingRank RHSR = getFloatingRank(RHS);
6302
6303 if (LHSR == RHSR)
6304 return 0;
6305 if (LHSR > RHSR)
6306 return 1;
6307 return -1;
6308}
6309
6310int ASTContext::getFloatingTypeSemanticOrder(QualType LHS, QualType RHS) const {
6311 if (&getFloatTypeSemantics(LHS) == &getFloatTypeSemantics(RHS))
6312 return 0;
6313 return getFloatingTypeOrder(LHS, RHS);
6314}
6315
6316/// getIntegerRank - Return an integer conversion rank (C99 6.3.1.1p1). This
6317/// routine will assert if passed a built-in type that isn't an integer or enum,
6318/// or if it is not canonicalized.
6319unsigned ASTContext::getIntegerRank(const Type *T) const {
6320 assert(T->isCanonicalUnqualified() && "T should be canonicalized")((void)0);
6321
6322 // Results in this 'losing' to any type of the same size, but winning if
6323 // larger.
6324 if (const auto *EIT = dyn_cast<ExtIntType>(T))
6325 return 0 + (EIT->getNumBits() << 3);
6326
6327 switch (cast<BuiltinType>(T)->getKind()) {
6328 default: llvm_unreachable("getIntegerRank(): not a built-in integer")__builtin_unreachable();
6329 case BuiltinType::Bool:
6330 return 1 + (getIntWidth(BoolTy) << 3);
6331 case BuiltinType::Char_S:
6332 case BuiltinType::Char_U:
6333 case BuiltinType::SChar:
6334 case BuiltinType::UChar:
6335 return 2 + (getIntWidth(CharTy) << 3);
6336 case BuiltinType::Short:
6337 case BuiltinType::UShort:
6338 return 3 + (getIntWidth(ShortTy) << 3);
6339 case BuiltinType::Int:
6340 case BuiltinType::UInt:
6341 return 4 + (getIntWidth(IntTy) << 3);
6342 case BuiltinType::Long:
6343 case BuiltinType::ULong:
6344 return 5 + (getIntWidth(LongTy) << 3);
6345 case BuiltinType::LongLong:
6346 case BuiltinType::ULongLong:
6347 return 6 + (getIntWidth(LongLongTy) << 3);
6348 case BuiltinType::Int128:
6349 case BuiltinType::UInt128:
6350 return 7 + (getIntWidth(Int128Ty) << 3);
6351 }
6352}
6353
6354/// Whether this is a promotable bitfield reference according
6355/// to C99 6.3.1.1p2, bullet 2 (and GCC extensions).
6356///
6357/// \returns the type this bit-field will promote to, or NULL if no
6358/// promotion occurs.
6359QualType ASTContext::isPromotableBitField(Expr *E) const {
6360 if (E->isTypeDependent() || E->isValueDependent())
6361 return {};
6362
6363 // C++ [conv.prom]p5:
6364 // If the bit-field has an enumerated type, it is treated as any other
6365 // value of that type for promotion purposes.
6366 if (getLangOpts().CPlusPlus && E->getType()->isEnumeralType())
6367 return {};
6368
6369 // FIXME: We should not do this unless E->refersToBitField() is true. This
6370 // matters in C where getSourceBitField() will find bit-fields for various
6371 // cases where the source expression is not a bit-field designator.
6372
6373 FieldDecl *Field = E->getSourceBitField(); // FIXME: conditional bit-fields?
6374 if (!Field)
6375 return {};
6376
6377 QualType FT = Field->getType();
6378
6379 uint64_t BitWidth = Field->getBitWidthValue(*this);
6380 uint64_t IntSize = getTypeSize(IntTy);
6381 // C++ [conv.prom]p5:
6382 // A prvalue for an integral bit-field can be converted to a prvalue of type
6383 // int if int can represent all the values of the bit-field; otherwise, it
6384 // can be converted to unsigned int if unsigned int can represent all the
6385 // values of the bit-field. If the bit-field is larger yet, no integral
6386 // promotion applies to it.
6387 // C11 6.3.1.1/2:
6388 // [For a bit-field of type _Bool, int, signed int, or unsigned int:]
6389 // If an int can represent all values of the original type (as restricted by
6390 // the width, for a bit-field), the value is converted to an int; otherwise,
6391 // it is converted to an unsigned int.
6392 //
6393 // FIXME: C does not permit promotion of a 'long : 3' bitfield to int.
6394 // We perform that promotion here to match GCC and C++.
6395 // FIXME: C does not permit promotion of an enum bit-field whose rank is
6396 // greater than that of 'int'. We perform that promotion to match GCC.
6397 if (BitWidth < IntSize)
6398 return IntTy;
6399
6400 if (BitWidth == IntSize)
6401 return FT->isSignedIntegerType() ? IntTy : UnsignedIntTy;
6402
6403 // Bit-fields wider than int are not subject to promotions, and therefore act
6404 // like the base type. GCC has some weird bugs in this area that we
6405 // deliberately do not follow (GCC follows a pre-standard resolution to
6406 // C's DR315 which treats bit-width as being part of the type, and this leaks
6407 // into their semantics in some cases).
6408 return {};
6409}
6410
6411/// getPromotedIntegerType - Returns the type that Promotable will
6412/// promote to: C99 6.3.1.1p2, assuming that Promotable is a promotable
6413/// integer type.
6414QualType ASTContext::getPromotedIntegerType(QualType Promotable) const {
6415 assert(!Promotable.isNull())((void)0);
6416 assert(Promotable->isPromotableIntegerType())((void)0);
6417 if (const auto *ET = Promotable->getAs<EnumType>())
6418 return ET->getDecl()->getPromotionType();
6419
6420 if (const auto *BT = Promotable->getAs<BuiltinType>()) {
6421 // C++ [conv.prom]: A prvalue of type char16_t, char32_t, or wchar_t
6422 // (3.9.1) can be converted to a prvalue of the first of the following
6423 // types that can represent all the values of its underlying type:
6424 // int, unsigned int, long int, unsigned long int, long long int, or
6425 // unsigned long long int [...]
6426 // FIXME: Is there some better way to compute this?
6427 if (BT->getKind() == BuiltinType::WChar_S ||
6428 BT->getKind() == BuiltinType::WChar_U ||
6429 BT->getKind() == BuiltinType::Char8 ||
6430 BT->getKind() == BuiltinType::Char16 ||
6431 BT->getKind() == BuiltinType::Char32) {
6432 bool FromIsSigned = BT->getKind() == BuiltinType::WChar_S;
6433 uint64_t FromSize = getTypeSize(BT);
6434 QualType PromoteTypes[] = { IntTy, UnsignedIntTy, LongTy, UnsignedLongTy,
6435 LongLongTy, UnsignedLongLongTy };
6436 for (size_t Idx = 0; Idx < llvm::array_lengthof(PromoteTypes); ++Idx) {
6437 uint64_t ToSize = getTypeSize(PromoteTypes[Idx]);
6438 if (FromSize < ToSize ||
6439 (FromSize == ToSize &&
6440 FromIsSigned == PromoteTypes[Idx]->isSignedIntegerType()))
6441 return PromoteTypes[Idx];
6442 }
6443 llvm_unreachable("char type should fit into long long")__builtin_unreachable();
6444 }
6445 }
6446
6447 // At this point, we should have a signed or unsigned integer type.
6448 if (Promotable->isSignedIntegerType())
6449 return IntTy;
6450 uint64_t PromotableSize = getIntWidth(Promotable);
6451 uint64_t IntSize = getIntWidth(IntTy);
6452 assert(Promotable->isUnsignedIntegerType() && PromotableSize <= IntSize)((void)0);
6453 return (PromotableSize != IntSize) ? IntTy : UnsignedIntTy;
6454}
6455
6456/// Recurses in pointer/array types until it finds an objc retainable
6457/// type and returns its ownership.
6458Qualifiers::ObjCLifetime ASTContext::getInnerObjCOwnership(QualType T) const {
6459 while (!T.isNull()) {
6460 if (T.getObjCLifetime() != Qualifiers::OCL_None)
6461 return T.getObjCLifetime();
6462 if (T->isArrayType())
6463 T = getBaseElementType(T);
6464 else if (const auto *PT = T->getAs<PointerType>())
6465 T = PT->getPointeeType();
6466 else if (const auto *RT = T->getAs<ReferenceType>())
6467 T = RT->getPointeeType();
6468 else
6469 break;
6470 }
6471
6472 return Qualifiers::OCL_None;
6473}
6474
6475static const Type *getIntegerTypeForEnum(const EnumType *ET) {
6476 // Incomplete enum types are not treated as integer types.
6477 // FIXME: In C++, enum types are never integer types.
6478 if (ET->getDecl()->isComplete() && !ET->getDecl()->isScoped())
6479 return ET->getDecl()->getIntegerType().getTypePtr();
6480 return nullptr;
6481}
6482
6483/// getIntegerTypeOrder - Returns the highest ranked integer type:
6484/// C99 6.3.1.8p1. If LHS > RHS, return 1. If LHS == RHS, return 0. If
6485/// LHS < RHS, return -1.
6486int ASTContext::getIntegerTypeOrder(QualType LHS, QualType RHS) const {
6487 const Type *LHSC = getCanonicalType(LHS).getTypePtr();
6488 const Type *RHSC = getCanonicalType(RHS).getTypePtr();
6489
6490 // Unwrap enums to their underlying type.
6491 if (const auto *ET = dyn_cast<EnumType>(LHSC))
6492 LHSC = getIntegerTypeForEnum(ET);
6493 if (const auto *ET = dyn_cast<EnumType>(RHSC))
6494 RHSC = getIntegerTypeForEnum(ET);
6495
6496 if (LHSC == RHSC) return 0;
6497
6498 bool LHSUnsigned = LHSC->isUnsignedIntegerType();
6499 bool RHSUnsigned = RHSC->isUnsignedIntegerType();
6500
6501 unsigned LHSRank = getIntegerRank(LHSC);
6502 unsigned RHSRank = getIntegerRank(RHSC);
6503
6504 if (LHSUnsigned == RHSUnsigned) { // Both signed or both unsigned.
6505 if (LHSRank == RHSRank) return 0;
6506 return LHSRank > RHSRank ? 1 : -1;
6507 }
6508
6509 // Otherwise, the LHS is signed and the RHS is unsigned or visa versa.
6510 if (LHSUnsigned) {
6511 // If the unsigned [LHS] type is larger, return it.
6512 if (LHSRank >= RHSRank)
6513 return 1;
6514
6515 // If the signed type can represent all values of the unsigned type, it
6516 // wins. Because we are dealing with 2's complement and types that are
6517 // powers of two larger than each other, this is always safe.
6518 return -1;
6519 }
6520
6521 // If the unsigned [RHS] type is larger, return it.
6522 if (RHSRank >= LHSRank)
6523 return -1;
6524
6525 // If the signed type can represent all values of the unsigned type, it
6526 // wins. Because we are dealing with 2's complement and types that are
6527 // powers of two larger than each other, this is always safe.
6528 return 1;
6529}
6530
6531TypedefDecl *ASTContext::getCFConstantStringDecl() const {
6532 if (CFConstantStringTypeDecl)
6533 return CFConstantStringTypeDecl;
6534
6535 assert(!CFConstantStringTagDecl &&((void)0)
6536 "tag and typedef should be initialized together")((void)0);
6537 CFConstantStringTagDecl = buildImplicitRecord("__NSConstantString_tag");
6538 CFConstantStringTagDecl->startDefinition();
6539
6540 struct {
6541 QualType Type;
6542 const char *Name;
6543 } Fields[5];
6544 unsigned Count = 0;
6545
6546 /// Objective-C ABI
6547 ///
6548 /// typedef struct __NSConstantString_tag {
6549 /// const int *isa;
6550 /// int flags;
6551 /// const char *str;
6552 /// long length;
6553 /// } __NSConstantString;
6554 ///
6555 /// Swift ABI (4.1, 4.2)
6556 ///
6557 /// typedef struct __NSConstantString_tag {
6558 /// uintptr_t _cfisa;
6559 /// uintptr_t _swift_rc;
6560 /// _Atomic(uint64_t) _cfinfoa;
6561 /// const char *_ptr;
6562 /// uint32_t _length;
6563 /// } __NSConstantString;
6564 ///
6565 /// Swift ABI (5.0)
6566 ///
6567 /// typedef struct __NSConstantString_tag {
6568 /// uintptr_t _cfisa;
6569 /// uintptr_t _swift_rc;
6570 /// _Atomic(uint64_t) _cfinfoa;
6571 /// const char *_ptr;
6572 /// uintptr_t _length;
6573 /// } __NSConstantString;
6574
6575 const auto CFRuntime = getLangOpts().CFRuntime;
6576 if (static_cast<unsigned>(CFRuntime) <
6577 static_cast<unsigned>(LangOptions::CoreFoundationABI::Swift)) {
6578 Fields[Count++] = { getPointerType(IntTy.withConst()), "isa" };
6579 Fields[Count++] = { IntTy, "flags" };
6580 Fields[Count++] = { getPointerType(CharTy.withConst()), "str" };
6581 Fields[Count++] = { LongTy, "length" };
6582 } else {
6583 Fields[Count++] = { getUIntPtrType(), "_cfisa" };
6584 Fields[Count++] = { getUIntPtrType(), "_swift_rc" };
6585 Fields[Count++] = { getFromTargetType(Target->getUInt64Type()), "_swift_rc" };
6586 Fields[Count++] = { getPointerType(CharTy.withConst()), "_ptr" };
6587 if (CFRuntime == LangOptions::CoreFoundationABI::Swift4_1 ||
6588 CFRuntime == LangOptions::CoreFoundationABI::Swift4_2)
6589 Fields[Count++] = { IntTy, "_ptr" };
6590 else
6591 Fields[Count++] = { getUIntPtrType(), "_ptr" };
6592 }
6593
6594 // Create fields
6595 for (unsigned i = 0; i < Count; ++i) {
6596 FieldDecl *Field =
6597 FieldDecl::Create(*this, CFConstantStringTagDecl, SourceLocation(),
6598 SourceLocation(), &Idents.get(Fields[i].Name),
6599 Fields[i].Type, /*TInfo=*/nullptr,
6600 /*BitWidth=*/nullptr, /*Mutable=*/false, ICIS_NoInit);
6601 Field->setAccess(AS_public);
6602 CFConstantStringTagDecl->addDecl(Field);
6603 }
6604
6605 CFConstantStringTagDecl->completeDefinition();
6606 // This type is designed to be compatible with NSConstantString, but cannot
6607 // use the same name, since NSConstantString is an interface.
6608 auto tagType = getTagDeclType(CFConstantStringTagDecl);
6609 CFConstantStringTypeDecl =
6610 buildImplicitTypedef(tagType, "__NSConstantString");
6611
6612 return CFConstantStringTypeDecl;
6613}
6614
6615RecordDecl *ASTContext::getCFConstantStringTagDecl() const {
6616 if (!CFConstantStringTagDecl)
6617 getCFConstantStringDecl(); // Build the tag and the typedef.
6618 return CFConstantStringTagDecl;
6619}
6620
6621// getCFConstantStringType - Return the type used for constant CFStrings.
6622QualType ASTContext::getCFConstantStringType() const {
6623 return getTypedefType(getCFConstantStringDecl());
6624}
6625
6626QualType ASTContext::getObjCSuperType() const {
6627 if (ObjCSuperType.isNull()) {
6628 RecordDecl *ObjCSuperTypeDecl = buildImplicitRecord("objc_super");
6629 getTranslationUnitDecl()->addDecl(ObjCSuperTypeDecl);
6630 ObjCSuperType = getTagDeclType(ObjCSuperTypeDecl);
6631 }
6632 return ObjCSuperType;
6633}
6634
6635void ASTContext::setCFConstantStringType(QualType T) {
6636 const auto *TD = T->castAs<TypedefType>();
6637 CFConstantStringTypeDecl = cast<TypedefDecl>(TD->getDecl());
6638 const auto *TagType =
6639 CFConstantStringTypeDecl->getUnderlyingType()->castAs<RecordType>();
6640 CFConstantStringTagDecl = TagType->getDecl();
6641}
6642
6643QualType ASTContext::getBlockDescriptorType() const {
6644 if (BlockDescriptorType)
6645 return getTagDeclType(BlockDescriptorType);
6646
6647 RecordDecl *RD;
6648 // FIXME: Needs the FlagAppleBlock bit.
6649 RD = buildImplicitRecord("__block_descriptor");
6650 RD->startDefinition();
6651
6652 QualType FieldTypes[] = {
6653 UnsignedLongTy,
6654 UnsignedLongTy,
6655 };
6656
6657 static const char *const FieldNames[] = {
6658 "reserved",
6659 "Size"
6660 };
6661
6662 for (size_t i = 0; i < 2; ++i) {
6663 FieldDecl *Field = FieldDecl::Create(
6664 *this, RD, SourceLocation(), SourceLocation(),
6665 &Idents.get(FieldNames[i]), FieldTypes[i], /*TInfo=*/nullptr,
6666 /*BitWidth=*/nullptr, /*Mutable=*/false, ICIS_NoInit);
6667 Field->setAccess(AS_public);
6668 RD->addDecl(Field);
6669 }
6670
6671 RD->completeDefinition();
6672
6673 BlockDescriptorType = RD;
6674
6675 return getTagDeclType(BlockDescriptorType);
6676}
6677
6678QualType ASTContext::getBlockDescriptorExtendedType() const {
6679 if (BlockDescriptorExtendedType)
6680 return getTagDeclType(BlockDescriptorExtendedType);
6681
6682 RecordDecl *RD;
6683 // FIXME: Needs the FlagAppleBlock bit.
6684 RD = buildImplicitRecord("__block_descriptor_withcopydispose");
6685 RD->startDefinition();
6686
6687 QualType FieldTypes[] = {
6688 UnsignedLongTy,
6689 UnsignedLongTy,
6690 getPointerType(VoidPtrTy),
6691 getPointerType(VoidPtrTy)
6692 };
6693
6694 static const char *const FieldNames[] = {
6695 "reserved",
6696 "Size",
6697 "CopyFuncPtr",
6698 "DestroyFuncPtr"
6699 };
6700
6701 for (size_t i = 0; i < 4; ++i) {
6702 FieldDecl *Field = FieldDecl::Create(
6703 *this, RD, SourceLocation(), SourceLocation(),
6704 &Idents.get(FieldNames[i]), FieldTypes[i], /*TInfo=*/nullptr,
6705 /*BitWidth=*/nullptr,
6706 /*Mutable=*/false, ICIS_NoInit);
6707 Field->setAccess(AS_public);
6708 RD->addDecl(Field);
6709 }
6710
6711 RD->completeDefinition();
6712
6713 BlockDescriptorExtendedType = RD;
6714 return getTagDeclType(BlockDescriptorExtendedType);
6715}
6716
6717OpenCLTypeKind ASTContext::getOpenCLTypeKind(const Type *T) const {
6718 const auto *BT = dyn_cast<BuiltinType>(T);
6719
6720 if (!BT) {
6721 if (isa<PipeType>(T))
6722 return OCLTK_Pipe;
6723
6724 return OCLTK_Default;
6725 }
6726
6727 switch (BT->getKind()) {
6728#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
6729 case BuiltinType::Id: \
6730 return OCLTK_Image;
6731#include "clang/Basic/OpenCLImageTypes.def"
6732
6733 case BuiltinType::OCLClkEvent:
6734 return OCLTK_ClkEvent;
6735
6736 case BuiltinType::OCLEvent:
6737 return OCLTK_Event;
6738
6739 case BuiltinType::OCLQueue:
6740 return OCLTK_Queue;
6741
6742 case BuiltinType::OCLReserveID:
6743 return OCLTK_ReserveID;
6744
6745 case BuiltinType::OCLSampler:
6746 return OCLTK_Sampler;
6747
6748 default:
6749 return OCLTK_Default;
6750 }
6751}
6752
6753LangAS ASTContext::getOpenCLTypeAddrSpace(const Type *T) const {
6754 return Target->getOpenCLTypeAddrSpace(getOpenCLTypeKind(T));
6755}
6756
6757/// BlockRequiresCopying - Returns true if byref variable "D" of type "Ty"
6758/// requires copy/dispose. Note that this must match the logic
6759/// in buildByrefHelpers.
6760bool ASTContext::BlockRequiresCopying(QualType Ty,
6761 const VarDecl *D) {
6762 if (const CXXRecordDecl *record = Ty->getAsCXXRecordDecl()) {
6763 const Expr *copyExpr = getBlockVarCopyInit(D).getCopyExpr();
6764 if (!copyExpr && record->hasTrivialDestructor()) return false;
6765
6766 return true;
6767 }
6768
6769 // The block needs copy/destroy helpers if Ty is non-trivial to destructively
6770 // move or destroy.
6771 if (Ty.isNonTrivialToPrimitiveDestructiveMove() || Ty.isDestructedType())
6772 return true;
6773
6774 if (!Ty->isObjCRetainableType()) return false;
6775
6776 Qualifiers qs = Ty.getQualifiers();
6777
6778 // If we have lifetime, that dominates.
6779 if (Qualifiers::ObjCLifetime lifetime = qs.getObjCLifetime()) {
6780 switch (lifetime) {
6781 case Qualifiers::OCL_None: llvm_unreachable("impossible")__builtin_unreachable();
6782
6783 // These are just bits as far as the runtime is concerned.
6784 case Qualifiers::OCL_ExplicitNone:
6785 case Qualifiers::OCL_Autoreleasing:
6786 return false;
6787
6788 // These cases should have been taken care of when checking the type's
6789 // non-triviality.
6790 case Qualifiers::OCL_Weak:
6791 case Qualifiers::OCL_Strong:
6792 llvm_unreachable("impossible")__builtin_unreachable();
6793 }
6794 llvm_unreachable("fell out of lifetime switch!")__builtin_unreachable();
6795 }
6796 return (Ty->isBlockPointerType() || isObjCNSObjectType(Ty) ||
6797 Ty->isObjCObjectPointerType());
6798}
6799
6800bool ASTContext::getByrefLifetime(QualType Ty,
6801 Qualifiers::ObjCLifetime &LifeTime,
6802 bool &HasByrefExtendedLayout) const {
6803 if (!getLangOpts().ObjC ||
6804 getLangOpts().getGC() != LangOptions::NonGC)
6805 return false;
6806
6807 HasByrefExtendedLayout = false;
6808 if (Ty->isRecordType()) {
6809 HasByrefExtendedLayout = true;
6810 LifeTime = Qualifiers::OCL_None;
6811 } else if ((LifeTime = Ty.getObjCLifetime())) {
6812 // Honor the ARC qualifiers.
6813 } else if (Ty->isObjCObjectPointerType() || Ty->isBlockPointerType()) {
6814 // The MRR rule.
6815 LifeTime = Qualifiers::OCL_ExplicitNone;
6816 } else {
6817 LifeTime = Qualifiers::OCL_None;
6818 }
6819 return true;
6820}
6821
6822CanQualType ASTContext::getNSUIntegerType() const {
6823 assert(Target && "Expected target to be initialized")((void)0);
6824 const llvm::Triple &T = Target->getTriple();
6825 // Windows is LLP64 rather than LP64
6826 if (T.isOSWindows() && T.isArch64Bit())
6827 return UnsignedLongLongTy;
6828 return UnsignedLongTy;
6829}
6830
6831CanQualType ASTContext::getNSIntegerType() const {
6832 assert(Target && "Expected target to be initialized")((void)0);
6833 const llvm::Triple &T = Target->getTriple();
6834 // Windows is LLP64 rather than LP64
6835 if (T.isOSWindows() && T.isArch64Bit())
6836 return LongLongTy;
6837 return LongTy;
6838}
6839
6840TypedefDecl *ASTContext::getObjCInstanceTypeDecl() {
6841 if (!ObjCInstanceTypeDecl)
6842 ObjCInstanceTypeDecl =
6843 buildImplicitTypedef(getObjCIdType(), "instancetype");
6844 return ObjCInstanceTypeDecl;
6845}
6846
6847// This returns true if a type has been typedefed to BOOL:
6848// typedef <type> BOOL;
6849static bool isTypeTypedefedAsBOOL(QualType T) {
6850 if (const auto *TT = dyn_cast<TypedefType>(T))
6851 if (IdentifierInfo *II = TT->getDecl()->getIdentifier())
6852 return II->isStr("BOOL");
6853
6854 return false;
6855}
6856
6857/// getObjCEncodingTypeSize returns size of type for objective-c encoding
6858/// purpose.
6859CharUnits ASTContext::getObjCEncodingTypeSize(QualType type) const {
6860 if (!type->isIncompleteArrayType() && type->isIncompleteType())
6861 return CharUnits::Zero();
6862
6863 CharUnits sz = getTypeSizeInChars(type);
6864
6865 // Make all integer and enum types at least as large as an int
6866 if (sz.isPositive() && type->isIntegralOrEnumerationType())
6867 sz = std::max(sz, getTypeSizeInChars(IntTy));
6868 // Treat arrays as pointers, since that's how they're passed in.
6869 else if (type->isArrayType())
6870 sz = getTypeSizeInChars(VoidPtrTy);
6871 return sz;
6872}
6873
6874bool ASTContext::isMSStaticDataMemberInlineDefinition(const VarDecl *VD) const {
6875 return getTargetInfo().getCXXABI().isMicrosoft() &&
6876 VD->isStaticDataMember() &&
6877 VD->getType()->isIntegralOrEnumerationType() &&
6878 !VD->getFirstDecl()->isOutOfLine() && VD->getFirstDecl()->hasInit();
6879}
6880
6881ASTContext::InlineVariableDefinitionKind
6882ASTContext::getInlineVariableDefinitionKind(const VarDecl *VD) const {
6883 if (!VD->isInline())
6884 return InlineVariableDefinitionKind::None;
6885
6886 // In almost all cases, it's a weak definition.
6887 auto *First = VD->getFirstDecl();
6888 if (First->isInlineSpecified() || !First->isStaticDataMember())
6889 return InlineVariableDefinitionKind::Weak;
6890
6891 // If there's a file-context declaration in this translation unit, it's a
6892 // non-discardable definition.
6893 for (auto *D : VD->redecls())
6894 if (D->getLexicalDeclContext()->isFileContext() &&
6895 !D->isInlineSpecified() && (D->isConstexpr() || First->isConstexpr()))
6896 return InlineVariableDefinitionKind::Strong;
6897
6898 // If we've not seen one yet, we don't know.
6899 return InlineVariableDefinitionKind::WeakUnknown;
6900}
6901
6902static std::string charUnitsToString(const CharUnits &CU) {
6903 return llvm::itostr(CU.getQuantity());
6904}
6905
6906/// getObjCEncodingForBlock - Return the encoded type for this block
6907/// declaration.
6908std::string ASTContext::getObjCEncodingForBlock(const BlockExpr *Expr) const {
6909 std::string S;
6910
6911 const BlockDecl *Decl = Expr->getBlockDecl();
6912 QualType BlockTy =
6913 Expr->getType()->castAs<BlockPointerType>()->getPointeeType();
6914 QualType BlockReturnTy = BlockTy->castAs<FunctionType>()->getReturnType();
6915 // Encode result type.
6916 if (getLangOpts().EncodeExtendedBlockSig)
6917 getObjCEncodingForMethodParameter(Decl::OBJC_TQ_None, BlockReturnTy, S,
6918 true /*Extended*/);
6919 else
6920 getObjCEncodingForType(BlockReturnTy, S);
6921 // Compute size of all parameters.
6922 // Start with computing size of a pointer in number of bytes.
6923 // FIXME: There might(should) be a better way of doing this computation!
6924 CharUnits PtrSize = getTypeSizeInChars(VoidPtrTy);
6925 CharUnits ParmOffset = PtrSize;
6926 for (auto PI : Decl->parameters()) {
6927 QualType PType = PI->getType();
6928 CharUnits sz = getObjCEncodingTypeSize(PType);
6929 if (sz.isZero())
6930 continue;
6931 assert(sz.isPositive() && "BlockExpr - Incomplete param type")((void)0);
6932 ParmOffset += sz;
6933 }
6934 // Size of the argument frame
6935 S += charUnitsToString(ParmOffset);
6936 // Block pointer and offset.
6937 S += "@?0";
6938
6939 // Argument types.
6940 ParmOffset = PtrSize;
6941 for (auto PVDecl : Decl->parameters()) {
6942 QualType PType = PVDecl->getOriginalType();
6943 if (const auto *AT =
6944 dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) {
6945 // Use array's original type only if it has known number of
6946 // elements.
6947 if (!isa<ConstantArrayType>(AT))
6948 PType = PVDecl->getType();
6949 } else if (PType->isFunctionType())
6950 PType = PVDecl->getType();
6951 if (getLangOpts().EncodeExtendedBlockSig)
6952 getObjCEncodingForMethodParameter(Decl::OBJC_TQ_None, PType,
6953 S, true /*Extended*/);
6954 else
6955 getObjCEncodingForType(PType, S);
6956 S += charUnitsToString(ParmOffset);
6957 ParmOffset += getObjCEncodingTypeSize(PType);
6958 }
6959
6960 return S;
6961}
6962
6963std::string
6964ASTContext::getObjCEncodingForFunctionDecl(const FunctionDecl *Decl) const {
6965 std::string S;
6966 // Encode result type.
6967 getObjCEncodingForType(Decl->getReturnType(), S);
6968 CharUnits ParmOffset;
6969 // Compute size of all parameters.
6970 for (auto PI : Decl->parameters()) {
6971 QualType PType = PI->getType();
6972 CharUnits sz = getObjCEncodingTypeSize(PType);
6973 if (sz.isZero())
6974 continue;
6975
6976 assert(sz.isPositive() &&((void)0)
6977 "getObjCEncodingForFunctionDecl - Incomplete param type")((void)0);
6978 ParmOffset += sz;
6979 }
6980 S += charUnitsToString(ParmOffset);
6981 ParmOffset = CharUnits::Zero();
6982
6983 // Argument types.
6984 for (auto PVDecl : Decl->parameters()) {
6985 QualType PType = PVDecl->getOriginalType();
6986 if (const auto *AT =
6987 dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) {
6988 // Use array's original type only if it has known number of
6989 // elements.
6990 if (!isa<ConstantArrayType>(AT))
6991 PType = PVDecl->getType();
6992 } else if (PType->isFunctionType())
6993 PType = PVDecl->getType();
6994 getObjCEncodingForType(PType, S);
6995 S += charUnitsToString(ParmOffset);
6996 ParmOffset += getObjCEncodingTypeSize(PType);
6997 }
6998
6999 return S;
7000}
7001
7002/// getObjCEncodingForMethodParameter - Return the encoded type for a single
7003/// method parameter or return type. If Extended, include class names and
7004/// block object types.
7005void ASTContext::getObjCEncodingForMethodParameter(Decl::ObjCDeclQualifier QT,
7006 QualType T, std::string& S,
7007 bool Extended) const {
7008 // Encode type qualifer, 'in', 'inout', etc. for the parameter.
7009 getObjCEncodingForTypeQualifier(QT, S);
7010 // Encode parameter type.
7011 ObjCEncOptions Options = ObjCEncOptions()
7012 .setExpandPointedToStructures()
7013 .setExpandStructures()
7014 .setIsOutermostType();
7015 if (Extended)
7016 Options.setEncodeBlockParameters().setEncodeClassNames();
7017 getObjCEncodingForTypeImpl(T, S, Options, /*Field=*/nullptr);
7018}
7019
7020/// getObjCEncodingForMethodDecl - Return the encoded type for this method
7021/// declaration.
7022std::string ASTContext::getObjCEncodingForMethodDecl(const ObjCMethodDecl *Decl,
7023 bool Extended) const {
7024 // FIXME: This is not very efficient.
7025 // Encode return type.
7026 std::string S;
7027 getObjCEncodingForMethodParameter(Decl->getObjCDeclQualifier(),
7028 Decl->getReturnType(), S, Extended);
7029 // Compute size of all parameters.
7030 // Start with computing size of a pointer in number of bytes.
7031 // FIXME: There might(should) be a better way of doing this computation!
7032 CharUnits PtrSize = getTypeSizeInChars(VoidPtrTy);
7033 // The first two arguments (self and _cmd) are pointers; account for
7034 // their size.
7035 CharUnits ParmOffset = 2 * PtrSize;
7036 for (ObjCMethodDecl::param_const_iterator PI = Decl->param_begin(),
7037 E = Decl->sel_param_end(); PI != E; ++PI) {
7038 QualType PType = (*PI)->getType();
7039 CharUnits sz = getObjCEncodingTypeSize(PType);
7040 if (sz.isZero())
7041 continue;
7042
7043 assert(sz.isPositive() &&((void)0)
7044 "getObjCEncodingForMethodDecl - Incomplete param type")((void)0);
7045 ParmOffset += sz;
7046 }
7047 S += charUnitsToString(ParmOffset);
7048 S += "@0:";
7049 S += charUnitsToString(PtrSize);
7050
7051 // Argument types.
7052 ParmOffset = 2 * PtrSize;
7053 for (ObjCMethodDecl::param_const_iterator PI = Decl->param_begin(),
7054 E = Decl->sel_param_end(); PI != E; ++PI) {
7055 const ParmVarDecl *PVDecl = *PI;
7056 QualType PType = PVDecl->getOriginalType();
7057 if (const auto *AT =
7058 dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) {
7059 // Use array's original type only if it has known number of
7060 // elements.
7061 if (!isa<ConstantArrayType>(AT))
7062 PType = PVDecl->getType();
7063 } else if (PType->isFunctionType())
7064 PType = PVDecl->getType();
7065 getObjCEncodingForMethodParameter(PVDecl->getObjCDeclQualifier(),
7066 PType, S, Extended);
7067 S += charUnitsToString(ParmOffset);
7068 ParmOffset += getObjCEncodingTypeSize(PType);
7069 }
7070
7071 return S;
7072}
7073
7074ObjCPropertyImplDecl *
7075ASTContext::getObjCPropertyImplDeclForPropertyDecl(
7076 const ObjCPropertyDecl *PD,
7077 const Decl *Container) const {
7078 if (!Container)
7079 return nullptr;
7080 if (const auto *CID = dyn_cast<ObjCCategoryImplDecl>(Container)) {
7081 for (auto *PID : CID->property_impls())
7082 if (PID->getPropertyDecl() == PD)
7083 return PID;
7084 } else {
7085 const auto *OID = cast<ObjCImplementationDecl>(Container);
7086 for (auto *PID : OID->property_impls())
7087 if (PID->getPropertyDecl() == PD)
7088 return PID;
7089 }
7090 return nullptr;
7091}
7092
7093/// getObjCEncodingForPropertyDecl - Return the encoded type for this
7094/// property declaration. If non-NULL, Container must be either an
7095/// ObjCCategoryImplDecl or ObjCImplementationDecl; it should only be
7096/// NULL when getting encodings for protocol properties.
7097/// Property attributes are stored as a comma-delimited C string. The simple
7098/// attributes readonly and bycopy are encoded as single characters. The
7099/// parametrized attributes, getter=name, setter=name, and ivar=name, are
7100/// encoded as single characters, followed by an identifier. Property types
7101/// are also encoded as a parametrized attribute. The characters used to encode
7102/// these attributes are defined by the following enumeration:
7103/// @code
7104/// enum PropertyAttributes {
7105/// kPropertyReadOnly = 'R', // property is read-only.
7106/// kPropertyBycopy = 'C', // property is a copy of the value last assigned
7107/// kPropertyByref = '&', // property is a reference to the value last assigned
7108/// kPropertyDynamic = 'D', // property is dynamic
7109/// kPropertyGetter = 'G', // followed by getter selector name
7110/// kPropertySetter = 'S', // followed by setter selector name
7111/// kPropertyInstanceVariable = 'V' // followed by instance variable name
7112/// kPropertyType = 'T' // followed by old-style type encoding.
7113/// kPropertyWeak = 'W' // 'weak' property
7114/// kPropertyStrong = 'P' // property GC'able
7115/// kPropertyNonAtomic = 'N' // property non-atomic
7116/// };
7117/// @endcode
7118std::string
7119ASTContext::getObjCEncodingForPropertyDecl(const ObjCPropertyDecl *PD,
7120 const Decl *Container) const {
7121 // Collect information from the property implementation decl(s).
7122 bool Dynamic = false;
7123 ObjCPropertyImplDecl *SynthesizePID = nullptr;
7124
7125 if (ObjCPropertyImplDecl *PropertyImpDecl =
7126 getObjCPropertyImplDeclForPropertyDecl(PD, Container)) {
7127 if (PropertyImpDecl->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic)
7128 Dynamic = true;
7129 else
7130 SynthesizePID = PropertyImpDecl;
7131 }
7132
7133 // FIXME: This is not very efficient.
7134 std::string S = "T";
7135
7136 // Encode result type.
7137 // GCC has some special rules regarding encoding of properties which
7138 // closely resembles encoding of ivars.
7139 getObjCEncodingForPropertyType(PD->getType(), S);
7140
7141 if (PD->isReadOnly()) {
7142 S += ",R";
7143 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_copy)
7144 S += ",C";
7145 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_retain)
7146 S += ",&";
7147 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_weak)
7148 S += ",W";
7149 } else {
7150 switch (PD->getSetterKind()) {
7151 case ObjCPropertyDecl::Assign: break;
7152 case ObjCPropertyDecl::Copy: S += ",C"; break;
7153 case ObjCPropertyDecl::Retain: S += ",&"; break;
7154 case ObjCPropertyDecl::Weak: S += ",W"; break;
7155 }
7156 }
7157
7158 // It really isn't clear at all what this means, since properties
7159 // are "dynamic by default".
7160 if (Dynamic)
7161 S += ",D";
7162
7163 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_nonatomic)
7164 S += ",N";
7165
7166 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_getter) {
7167 S += ",G";
7168 S += PD->getGetterName().getAsString();
7169 }
7170
7171 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_setter) {
7172 S += ",S";
7173 S += PD->getSetterName().getAsString();
7174 }
7175
7176 if (SynthesizePID) {
7177 const ObjCIvarDecl *OID = SynthesizePID->getPropertyIvarDecl();
7178 S += ",V";
7179 S += OID->getNameAsString();
7180 }
7181
7182 // FIXME: OBJCGC: weak & strong
7183 return S;
7184}
7185
7186/// getLegacyIntegralTypeEncoding -
7187/// Another legacy compatibility encoding: 32-bit longs are encoded as
7188/// 'l' or 'L' , but not always. For typedefs, we need to use
7189/// 'i' or 'I' instead if encoding a struct field, or a pointer!
7190void ASTContext::getLegacyIntegralTypeEncoding (QualType &PointeeTy) const {
7191 if (isa<TypedefType>(PointeeTy.getTypePtr())) {
7192 if (const auto *BT = PointeeTy->getAs<BuiltinType>()) {
7193 if (BT->getKind() == BuiltinType::ULong && getIntWidth(PointeeTy) == 32)
7194 PointeeTy = UnsignedIntTy;
7195 else
7196 if (BT->getKind() == BuiltinType::Long && getIntWidth(PointeeTy) == 32)
7197 PointeeTy = IntTy;
7198 }
7199 }
7200}
7201
7202void ASTContext::getObjCEncodingForType(QualType T, std::string& S,
7203 const FieldDecl *Field,
7204 QualType *NotEncodedT) const {
7205 // We follow the behavior of gcc, expanding structures which are
7206 // directly pointed to, and expanding embedded structures. Note that
7207 // these rules are sufficient to prevent recursive encoding of the
7208 // same type.
7209 getObjCEncodingForTypeImpl(T, S,
7210 ObjCEncOptions()
7211 .setExpandPointedToStructures()
7212 .setExpandStructures()
7213 .setIsOutermostType(),
7214 Field, NotEncodedT);
7215}
7216
7217void ASTContext::getObjCEncodingForPropertyType(QualType T,
7218 std::string& S) const {
7219 // Encode result type.
7220 // GCC has some special rules regarding encoding of properties which
7221 // closely resembles encoding of ivars.
7222 getObjCEncodingForTypeImpl(T, S,
7223 ObjCEncOptions()
7224 .setExpandPointedToStructures()
7225 .setExpandStructures()
7226 .setIsOutermostType()
7227 .setEncodingProperty(),
7228 /*Field=*/nullptr);
7229}
7230
7231static char getObjCEncodingForPrimitiveType(const ASTContext *C,
7232 const BuiltinType *BT) {
7233 BuiltinType::Kind kind = BT->getKind();
7234 switch (kind) {
7235 case BuiltinType::Void: return 'v';
7236 case BuiltinType::Bool: return 'B';
7237 case BuiltinType::Char8:
7238 case BuiltinType::Char_U:
7239 case BuiltinType::UChar: return 'C';
7240 case BuiltinType::Char16:
7241 case BuiltinType::UShort: return 'S';
7242 case BuiltinType::Char32:
7243 case BuiltinType::UInt: return 'I';
7244 case BuiltinType::ULong:
7245 return C->getTargetInfo().getLongWidth() == 32 ? 'L' : 'Q';
7246 case BuiltinType::UInt128: return 'T';
7247 case BuiltinType::ULongLong: return 'Q';
7248 case BuiltinType::Char_S:
7249 case BuiltinType::SChar: return 'c';
7250 case BuiltinType::Short: return 's';
7251 case BuiltinType::WChar_S:
7252 case BuiltinType::WChar_U:
7253 case BuiltinType::Int: return 'i';
7254 case BuiltinType::Long:
7255 return C->getTargetInfo().getLongWidth() == 32 ? 'l' : 'q';
7256 case BuiltinType::LongLong: return 'q';
7257 case BuiltinType::Int128: return 't';
7258 case BuiltinType::Float: return 'f';
7259 case BuiltinType::Double: return 'd';
7260 case BuiltinType::LongDouble: return 'D';
7261 case BuiltinType::NullPtr: return '*'; // like char*
7262
7263 case BuiltinType::BFloat16:
7264 case BuiltinType::Float16:
7265 case BuiltinType::Float128:
7266 case BuiltinType::Half:
7267 case BuiltinType::ShortAccum:
7268 case BuiltinType::Accum:
7269 case BuiltinType::LongAccum:
7270 case BuiltinType::UShortAccum:
7271 case BuiltinType::UAccum:
7272 case BuiltinType::ULongAccum:
7273 case BuiltinType::ShortFract:
7274 case BuiltinType::Fract:
7275 case BuiltinType::LongFract:
7276 case BuiltinType::UShortFract:
7277 case BuiltinType::UFract:
7278 case BuiltinType::ULongFract:
7279 case BuiltinType::SatShortAccum:
7280 case BuiltinType::SatAccum:
7281 case BuiltinType::SatLongAccum:
7282 case BuiltinType::SatUShortAccum:
7283 case BuiltinType::SatUAccum:
7284 case BuiltinType::SatULongAccum:
7285 case BuiltinType::SatShortFract:
7286 case BuiltinType::SatFract:
7287 case BuiltinType::SatLongFract:
7288 case BuiltinType::SatUShortFract:
7289 case BuiltinType::SatUFract:
7290 case BuiltinType::SatULongFract:
7291 // FIXME: potentially need @encodes for these!
7292 return ' ';
7293
7294#define SVE_TYPE(Name, Id, SingletonId) \
7295 case BuiltinType::Id:
7296#include "clang/Basic/AArch64SVEACLETypes.def"
7297#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
7298#include "clang/Basic/RISCVVTypes.def"
7299 {
7300 DiagnosticsEngine &Diags = C->getDiagnostics();
7301 unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
7302 "cannot yet @encode type %0");
7303 Diags.Report(DiagID) << BT->getName(C->getPrintingPolicy());
7304 return ' ';
7305 }
7306
7307 case BuiltinType::ObjCId:
7308 case BuiltinType::ObjCClass:
7309 case BuiltinType::ObjCSel:
7310 llvm_unreachable("@encoding ObjC primitive type")__builtin_unreachable();
7311
7312 // OpenCL and placeholder types don't need @encodings.
7313#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
7314 case BuiltinType::Id:
7315#include "clang/Basic/OpenCLImageTypes.def"
7316#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
7317 case BuiltinType::Id:
7318#include "clang/Basic/OpenCLExtensionTypes.def"
7319 case BuiltinType::OCLEvent:
7320 case BuiltinType::OCLClkEvent:
7321 case BuiltinType::OCLQueue:
7322 case BuiltinType::OCLReserveID:
7323 case BuiltinType::OCLSampler:
7324 case BuiltinType::Dependent:
7325#define PPC_VECTOR_TYPE(Name, Id, Size) \
7326 case BuiltinType::Id:
7327#include "clang/Basic/PPCTypes.def"
7328#define BUILTIN_TYPE(KIND, ID)
7329#define PLACEHOLDER_TYPE(KIND, ID) \
7330 case BuiltinType::KIND:
7331#include "clang/AST/BuiltinTypes.def"
7332 llvm_unreachable("invalid builtin type for @encode")__builtin_unreachable();
7333 }
7334 llvm_unreachable("invalid BuiltinType::Kind value")__builtin_unreachable();
7335}
7336
7337static char ObjCEncodingForEnumType(const ASTContext *C, const EnumType *ET) {
7338 EnumDecl *Enum = ET->getDecl();
7339
7340 // The encoding of an non-fixed enum type is always 'i', regardless of size.
7341 if (!Enum->isFixed())
7342 return 'i';
7343
7344 // The encoding of a fixed enum type matches its fixed underlying type.
7345 const auto *BT = Enum->getIntegerType()->castAs<BuiltinType>();
7346 return getObjCEncodingForPrimitiveType(C, BT);
7347}
7348
7349static void EncodeBitField(const ASTContext *Ctx, std::string& S,
7350 QualType T, const FieldDecl *FD) {
7351 assert(FD->isBitField() && "not a bitfield - getObjCEncodingForTypeImpl")((void)0);
7352 S += 'b';
7353 // The NeXT runtime encodes bit fields as b followed by the number of bits.
7354 // The GNU runtime requires more information; bitfields are encoded as b,
7355 // then the offset (in bits) of the first element, then the type of the
7356 // bitfield, then the size in bits. For example, in this structure:
7357 //
7358 // struct
7359 // {
7360 // int integer;
7361 // int flags:2;
7362 // };
7363 // On a 32-bit system, the encoding for flags would be b2 for the NeXT
7364 // runtime, but b32i2 for the GNU runtime. The reason for this extra
7365 // information is not especially sensible, but we're stuck with it for
7366 // compatibility with GCC, although providing it breaks anything that
7367 // actually uses runtime introspection and wants to work on both runtimes...
7368 if (Ctx->getLangOpts().ObjCRuntime.isGNUFamily()) {
7369 uint64_t Offset;
7370
7371 if (const auto *IVD = dyn_cast<ObjCIvarDecl>(FD)) {
7372 Offset = Ctx->lookupFieldBitOffset(IVD->getContainingInterface(), nullptr,
7373 IVD);
7374 } else {
7375 const RecordDecl *RD = FD->getParent();
7376 const ASTRecordLayout &RL = Ctx->getASTRecordLayout(RD);
7377 Offset = RL.getFieldOffset(FD->getFieldIndex());
7378 }
7379
7380 S += llvm::utostr(Offset);
7381
7382 if (const auto *ET = T->getAs<EnumType>())
7383 S += ObjCEncodingForEnumType(Ctx, ET);
7384 else {
7385 const auto *BT = T->castAs<BuiltinType>();
7386 S += getObjCEncodingForPrimitiveType(Ctx, BT);
7387 }
7388 }
7389 S += llvm::utostr(FD->getBitWidthValue(*Ctx));
7390}
7391
7392// Helper function for determining whether the encoded type string would include
7393// a template specialization type.
7394static bool hasTemplateSpecializationInEncodedString(const Type *T,
7395 bool VisitBasesAndFields) {
7396 T = T->getBaseElementTypeUnsafe();
7397
7398 if (auto *PT = T->getAs<PointerType>())
7399 return hasTemplateSpecializationInEncodedString(
7400 PT->getPointeeType().getTypePtr(), false);
7401
7402 auto *CXXRD = T->getAsCXXRecordDecl();
7403
7404 if (!CXXRD)
7405 return false;
7406
7407 if (isa<ClassTemplateSpecializationDecl>(CXXRD))
7408 return true;
7409
7410 if (!CXXRD->hasDefinition() || !VisitBasesAndFields)
7411 return false;
7412
7413 for (auto B : CXXRD->bases())
7414 if (hasTemplateSpecializationInEncodedString(B.getType().getTypePtr(),
7415 true))
7416 return true;
7417
7418 for (auto *FD : CXXRD->fields())
7419 if (hasTemplateSpecializationInEncodedString(FD->getType().getTypePtr(),
7420 true))
7421 return true;
7422
7423 return false;
7424}
7425
7426// FIXME: Use SmallString for accumulating string.
7427void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string &S,
7428 const ObjCEncOptions Options,
7429 const FieldDecl *FD,
7430 QualType *NotEncodedT) const {
7431 CanQualType CT = getCanonicalType(T);
7432 switch (CT->getTypeClass()) {
7433 case Type::Builtin:
7434 case Type::Enum:
7435 if (FD && FD->isBitField())
7436 return EncodeBitField(this, S, T, FD);
7437 if (const auto *BT = dyn_cast<BuiltinType>(CT))
7438 S += getObjCEncodingForPrimitiveType(this, BT);
7439 else
7440 S += ObjCEncodingForEnumType(this, cast<EnumType>(CT));
7441 return;
7442
7443 case Type::Complex:
7444 S += 'j';
7445 getObjCEncodingForTypeImpl(T->castAs<ComplexType>()->getElementType(), S,
7446 ObjCEncOptions(),
7447 /*Field=*/nullptr);
7448 return;
7449
7450 case Type::Atomic:
7451 S += 'A';
7452 getObjCEncodingForTypeImpl(T->castAs<AtomicType>()->getValueType(), S,
7453 ObjCEncOptions(),
7454 /*Field=*/nullptr);
7455 return;
7456
7457 // encoding for pointer or reference types.
7458 case Type::Pointer:
7459 case Type::LValueReference:
7460 case Type::RValueReference: {
7461 QualType PointeeTy;
7462 if (isa<PointerType>(CT)) {
7463 const auto *PT = T->castAs<PointerType>();
7464 if (PT->isObjCSelType()) {
7465 S += ':';
7466 return;
7467 }
7468 PointeeTy = PT->getPointeeType();
7469 } else {
7470 PointeeTy = T->castAs<ReferenceType>()->getPointeeType();
7471 }
7472
7473 bool isReadOnly = false;
7474 // For historical/compatibility reasons, the read-only qualifier of the
7475 // pointee gets emitted _before_ the '^'. The read-only qualifier of
7476 // the pointer itself gets ignored, _unless_ we are looking at a typedef!
7477 // Also, do not emit the 'r' for anything but the outermost type!
7478 if (isa<TypedefType>(T.getTypePtr())) {
7479 if (Options.IsOutermostType() && T.isConstQualified()) {
7480 isReadOnly = true;
7481 S += 'r';
7482 }
7483 } else if (Options.IsOutermostType()) {
7484 QualType P = PointeeTy;
7485 while (auto PT = P->getAs<PointerType>())
7486 P = PT->getPointeeType();
7487 if (P.isConstQualified()) {
7488 isReadOnly = true;
7489 S += 'r';
7490 }
7491 }
7492 if (isReadOnly) {
7493 // Another legacy compatibility encoding. Some ObjC qualifier and type
7494 // combinations need to be rearranged.
7495 // Rewrite "in const" from "nr" to "rn"
7496 if (StringRef(S).endswith("nr"))
7497 S.replace(S.end()-2, S.end(), "rn");
7498 }
7499
7500 if (PointeeTy->isCharType()) {
7501 // char pointer types should be encoded as '*' unless it is a
7502 // type that has been typedef'd to 'BOOL'.
7503 if (!isTypeTypedefedAsBOOL(PointeeTy)) {
7504 S += '*';
7505 return;
7506 }
7507 } else if (const auto *RTy = PointeeTy->getAs<RecordType>()) {
7508 // GCC binary compat: Need to convert "struct objc_class *" to "#".
7509 if (RTy->getDecl()->getIdentifier() == &Idents.get("objc_class")) {
7510 S += '#';
7511 return;
7512 }
7513 // GCC binary compat: Need to convert "struct objc_object *" to "@".
7514 if (RTy->getDecl()->getIdentifier() == &Idents.get("objc_object")) {
7515 S += '@';
7516 return;
7517 }
7518 // If the encoded string for the class includes template names, just emit
7519 // "^v" for pointers to the class.
7520 if (getLangOpts().CPlusPlus &&
7521 (!getLangOpts().EncodeCXXClassTemplateSpec &&
7522 hasTemplateSpecializationInEncodedString(
7523 RTy, Options.ExpandPointedToStructures()))) {
7524 S += "^v";
7525 return;
7526 }
7527 // fall through...
7528 }
7529 S += '^';
7530 getLegacyIntegralTypeEncoding(PointeeTy);
7531
7532 ObjCEncOptions NewOptions;
7533 if (Options.ExpandPointedToStructures())
7534 NewOptions.setExpandStructures();
7535 getObjCEncodingForTypeImpl(PointeeTy, S, NewOptions,
7536 /*Field=*/nullptr, NotEncodedT);
7537 return;
7538 }
7539
7540 case Type::ConstantArray:
7541 case Type::IncompleteArray:
7542 case Type::VariableArray: {
7543 const auto *AT = cast<ArrayType>(CT);
7544
7545 if (isa<IncompleteArrayType>(AT) && !Options.IsStructField()) {
7546 // Incomplete arrays are encoded as a pointer to the array element.
7547 S += '^';
7548
7549 getObjCEncodingForTypeImpl(
7550 AT->getElementType(), S,
7551 Options.keepingOnly(ObjCEncOptions().setExpandStructures()), FD);
7552 } else {
7553 S += '[';
7554
7555 if (const auto *CAT = dyn_cast<ConstantArrayType>(AT))
7556 S += llvm::utostr(CAT->getSize().getZExtValue());
7557 else {
7558 //Variable length arrays are encoded as a regular array with 0 elements.
7559 assert((isa<VariableArrayType>(AT) || isa<IncompleteArrayType>(AT)) &&((void)0)
7560 "Unknown array type!")((void)0);
7561 S += '0';
7562 }
7563
7564 getObjCEncodingForTypeImpl(
7565 AT->getElementType(), S,
7566 Options.keepingOnly(ObjCEncOptions().setExpandStructures()), FD,
7567 NotEncodedT);
7568 S += ']';
7569 }
7570 return;
7571 }
7572
7573 case Type::FunctionNoProto:
7574 case Type::FunctionProto:
7575 S += '?';
7576 return;
7577
7578 case Type::Record: {
7579 RecordDecl *RDecl = cast<RecordType>(CT)->getDecl();
7580 S += RDecl->isUnion() ? '(' : '{';
7581 // Anonymous structures print as '?'
7582 if (const IdentifierInfo *II = RDecl->getIdentifier()) {
7583 S += II->getName();
7584 if (const auto *Spec = dyn_cast<ClassTemplateSpecializationDecl>(RDecl)) {
7585 const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs();
7586 llvm::raw_string_ostream OS(S);
7587 printTemplateArgumentList(OS, TemplateArgs.asArray(),
7588 getPrintingPolicy());
7589 }
7590 } else {
7591 S += '?';
7592 }
7593 if (Options.ExpandStructures()) {
7594 S += '=';
7595 if (!RDecl->isUnion()) {
7596 getObjCEncodingForStructureImpl(RDecl, S, FD, true, NotEncodedT);
7597 } else {
7598 for (const auto *Field : RDecl->fields()) {
7599 if (FD) {
7600 S += '"';
7601 S += Field->getNameAsString();
7602 S += '"';
7603 }
7604
7605 // Special case bit-fields.
7606 if (Field->isBitField()) {
7607 getObjCEncodingForTypeImpl(Field->getType(), S,
7608 ObjCEncOptions().setExpandStructures(),
7609 Field);
7610 } else {
7611 QualType qt = Field->getType();
7612 getLegacyIntegralTypeEncoding(qt);
7613 getObjCEncodingForTypeImpl(
7614 qt, S,
7615 ObjCEncOptions().setExpandStructures().setIsStructField(), FD,
7616 NotEncodedT);
7617 }
7618 }
7619 }
7620 }
7621 S += RDecl->isUnion() ? ')' : '}';
7622 return;
7623 }
7624
7625 case Type::BlockPointer: {
7626 const auto *BT = T->castAs<BlockPointerType>();
7627 S += "@?"; // Unlike a pointer-to-function, which is "^?".
7628 if (Options.EncodeBlockParameters()) {
7629 const auto *FT = BT->getPointeeType()->castAs<FunctionType>();
7630
7631 S += '<';
7632 // Block return type
7633 getObjCEncodingForTypeImpl(FT->getReturnType(), S,
7634 Options.forComponentType(), FD, NotEncodedT);
7635 // Block self
7636 S += "@?";
7637 // Block parameters
7638 if (const auto *FPT = dyn_cast<FunctionProtoType>(FT)) {
7639 for (const auto &I : FPT->param_types())
7640 getObjCEncodingForTypeImpl(I, S, Options.forComponentType(), FD,
7641 NotEncodedT);
7642 }
7643 S += '>';
7644 }
7645 return;
7646 }
7647
7648 case Type::ObjCObject: {
7649 // hack to match legacy encoding of *id and *Class
7650 QualType Ty = getObjCObjectPointerType(CT);
7651 if (Ty->isObjCIdType()) {
7652 S += "{objc_object=}";
7653 return;
7654 }
7655 else if (Ty->isObjCClassType()) {
7656 S += "{objc_class=}";
7657 return;
7658 }
7659 // TODO: Double check to make sure this intentionally falls through.
7660 LLVM_FALLTHROUGH[[gnu::fallthrough]];
7661 }
7662
7663 case Type::ObjCInterface: {
7664 // Ignore protocol qualifiers when mangling at this level.
7665 // @encode(class_name)
7666 ObjCInterfaceDecl *OI = T->castAs<ObjCObjectType>()->getInterface();
7667 S += '{';
7668 S += OI->getObjCRuntimeNameAsString();
7669 if (Options.ExpandStructures()) {
7670 S += '=';
7671 SmallVector<const ObjCIvarDecl*, 32> Ivars;
7672 DeepCollectObjCIvars(OI, true, Ivars);
7673 for (unsigned i = 0, e = Ivars.size(); i != e; ++i) {
7674 const FieldDecl *Field = Ivars[i];
7675 if (Field->isBitField())
7676 getObjCEncodingForTypeImpl(Field->getType(), S,
7677 ObjCEncOptions().setExpandStructures(),
7678 Field);
7679 else
7680 getObjCEncodingForTypeImpl(Field->getType(), S,
7681 ObjCEncOptions().setExpandStructures(), FD,
7682 NotEncodedT);
7683 }
7684 }
7685 S += '}';
7686 return;
7687 }
7688
7689 case Type::ObjCObjectPointer: {
7690 const auto *OPT = T->castAs<ObjCObjectPointerType>();
7691 if (OPT->isObjCIdType()) {
7692 S += '@';
7693 return;
7694 }
7695
7696 if (OPT->isObjCClassType() || OPT->isObjCQualifiedClassType()) {
7697 // FIXME: Consider if we need to output qualifiers for 'Class<p>'.
7698 // Since this is a binary compatibility issue, need to consult with
7699 // runtime folks. Fortunately, this is a *very* obscure construct.
7700 S += '#';
7701 return;
7702 }
7703
7704 if (OPT->isObjCQualifiedIdType()) {
7705 getObjCEncodingForTypeImpl(
7706 getObjCIdType(), S,
7707 Options.keepingOnly(ObjCEncOptions()
7708 .setExpandPointedToStructures()
7709 .setExpandStructures()),
7710 FD);
7711 if (FD || Options.EncodingProperty() || Options.EncodeClassNames()) {
7712 // Note that we do extended encoding of protocol qualifer list
7713 // Only when doing ivar or property encoding.
7714 S += '"';
7715 for (const auto *I : OPT->quals()) {
7716 S += '<';
7717 S += I->getObjCRuntimeNameAsString();
7718 S += '>';
7719 }
7720 S += '"';
7721 }
7722 return;
7723 }
7724
7725 S += '@';
7726 if (OPT->getInterfaceDecl() &&
7727 (FD || Options.EncodingProperty() || Options.EncodeClassNames())) {
7728 S += '"';
7729 S += OPT->getInterfaceDecl()->getObjCRuntimeNameAsString();
7730 for (const auto *I : OPT->quals()) {
7731 S += '<';
7732 S += I->getObjCRuntimeNameAsString();
7733 S += '>';
7734 }
7735 S += '"';
7736 }
7737 return;
7738 }
7739
7740 // gcc just blithely ignores member pointers.
7741 // FIXME: we should do better than that. 'M' is available.
7742 case Type::MemberPointer:
7743 // This matches gcc's encoding, even though technically it is insufficient.
7744 //FIXME. We should do a better job than gcc.
7745 case Type::Vector:
7746 case Type::ExtVector:
7747 // Until we have a coherent encoding of these three types, issue warning.
7748 if (NotEncodedT)
7749 *NotEncodedT = T;
7750 return;
7751
7752 case Type::ConstantMatrix:
7753 if (NotEncodedT)
7754 *NotEncodedT = T;
7755 return;
7756
7757 // We could see an undeduced auto type here during error recovery.
7758 // Just ignore it.
7759 case Type::Auto:
7760 case Type::DeducedTemplateSpecialization:
7761 return;
7762
7763 case Type::Pipe:
7764 case Type::ExtInt:
7765#define ABSTRACT_TYPE(KIND, BASE)
7766#define TYPE(KIND, BASE)
7767#define DEPENDENT_TYPE(KIND, BASE) \
7768 case Type::KIND:
7769#define NON_CANONICAL_TYPE(KIND, BASE) \
7770 case Type::KIND:
7771#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(KIND, BASE) \
7772 case Type::KIND:
7773#include "clang/AST/TypeNodes.inc"
7774 llvm_unreachable("@encode for dependent type!")__builtin_unreachable();
7775 }
7776 llvm_unreachable("bad type kind!")__builtin_unreachable();
7777}
7778
7779void ASTContext::getObjCEncodingForStructureImpl(RecordDecl *RDecl,
7780 std::string &S,
7781 const FieldDecl *FD,
7782 bool includeVBases,
7783 QualType *NotEncodedT) const {
7784 assert(RDecl && "Expected non-null RecordDecl")((void)0);
7785 assert(!RDecl->isUnion() && "Should not be called for unions")((void)0);
7786 if (!RDecl->getDefinition() || RDecl->getDefinition()->isInvalidDecl())
7787 return;
7788
7789 const auto *CXXRec = dyn_cast<CXXRecordDecl>(RDecl);
7790 std::multimap<uint64_t, NamedDecl *> FieldOrBaseOffsets;
7791 const ASTRecordLayout &layout = getASTRecordLayout(RDecl);
7792
7793 if (CXXRec) {
7794 for (const auto &BI : CXXRec->bases()) {
7795 if (!BI.isVirtual()) {
7796 CXXRecordDecl *base = BI.getType()->getAsCXXRecordDecl();
7797 if (base->isEmpty())
7798 continue;
7799 uint64_t offs = toBits(layout.getBaseClassOffset(base));
7800 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs),
7801 std::make_pair(offs, base));
7802 }
7803 }
7804 }
7805
7806 unsigned i = 0;
7807 for (FieldDecl *Field : RDecl->fields()) {
7808 if (!Field->isZeroLengthBitField(*this) && Field->isZeroSize(*this))
7809 continue;
7810 uint64_t offs = layout.getFieldOffset(i);
7811 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs),
7812 std::make_pair(offs, Field));
7813 ++i;
7814 }
7815
7816 if (CXXRec && includeVBases) {
7817 for (const auto &BI : CXXRec->vbases()) {
7818 CXXRecordDecl *base = BI.getType()->getAsCXXRecordDecl();
7819 if (base->isEmpty())
7820 continue;
7821 uint64_t offs = toBits(layout.getVBaseClassOffset(base));
7822 if (offs >= uint64_t(toBits(layout.getNonVirtualSize())) &&
7823 FieldOrBaseOffsets.find(offs) == FieldOrBaseOffsets.end())
7824 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.end(),
7825 std::make_pair(offs, base));
7826 }
7827 }
7828
7829 CharUnits size;
7830 if (CXXRec) {
7831 size = includeVBases ? layout.getSize() : layout.getNonVirtualSize();
7832 } else {
7833 size = layout.getSize();
7834 }
7835
7836#ifndef NDEBUG1
7837 uint64_t CurOffs = 0;
7838#endif
7839 std::multimap<uint64_t, NamedDecl *>::iterator
7840 CurLayObj = FieldOrBaseOffsets.begin();
7841
7842 if (CXXRec && CXXRec->isDynamicClass() &&
7843 (CurLayObj == FieldOrBaseOffsets.end() || CurLayObj->first != 0)) {
7844 if (FD) {
7845 S += "\"_vptr$";
7846 std::string recname = CXXRec->getNameAsString();
7847 if (recname.empty()) recname = "?";
7848 S += recname;
7849 S += '"';
7850 }
7851 S += "^^?";
7852#ifndef NDEBUG1
7853 CurOffs += getTypeSize(VoidPtrTy);
7854#endif
7855 }
7856
7857 if (!RDecl->hasFlexibleArrayMember()) {
7858 // Mark the end of the structure.
7859 uint64_t offs = toBits(size);
7860 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs),
7861 std::make_pair(offs, nullptr));
7862 }
7863
7864 for (; CurLayObj != FieldOrBaseOffsets.end(); ++CurLayObj) {
7865#ifndef NDEBUG1
7866 assert(CurOffs <= CurLayObj->first)((void)0);
7867 if (CurOffs < CurLayObj->first) {
7868 uint64_t padding = CurLayObj->first - CurOffs;
7869 // FIXME: There doesn't seem to be a way to indicate in the encoding that
7870 // packing/alignment of members is different that normal, in which case
7871 // the encoding will be out-of-sync with the real layout.
7872 // If the runtime switches to just consider the size of types without
7873 // taking into account alignment, we could make padding explicit in the
7874 // encoding (e.g. using arrays of chars). The encoding strings would be
7875 // longer then though.
7876 CurOffs += padding;
7877 }
7878#endif
7879
7880 NamedDecl *dcl = CurLayObj->second;
7881 if (!dcl)
7882 break; // reached end of structure.
7883
7884 if (auto *base = dyn_cast<CXXRecordDecl>(dcl)) {
7885 // We expand the bases without their virtual bases since those are going
7886 // in the initial structure. Note that this differs from gcc which
7887 // expands virtual bases each time one is encountered in the hierarchy,
7888 // making the encoding type bigger than it really is.
7889 getObjCEncodingForStructureImpl(base, S, FD, /*includeVBases*/false,
7890 NotEncodedT);
7891 assert(!base->isEmpty())((void)0);
7892#ifndef NDEBUG1
7893 CurOffs += toBits(getASTRecordLayout(base).getNonVirtualSize());
7894#endif
7895 } else {
7896 const auto *field = cast<FieldDecl>(dcl);
7897 if (FD) {
7898 S += '"';
7899 S += field->getNameAsString();
7900 S += '"';
7901 }
7902
7903 if (field->isBitField()) {
7904 EncodeBitField(this, S, field->getType(), field);
7905#ifndef NDEBUG1
7906 CurOffs += field->getBitWidthValue(*this);
7907#endif
7908 } else {
7909 QualType qt = field->getType();
7910 getLegacyIntegralTypeEncoding(qt);
7911 getObjCEncodingForTypeImpl(
7912 qt, S, ObjCEncOptions().setExpandStructures().setIsStructField(),
7913 FD, NotEncodedT);
7914#ifndef NDEBUG1
7915 CurOffs += getTypeSize(field->getType());
7916#endif
7917 }
7918 }
7919 }
7920}
7921
7922void ASTContext::getObjCEncodingForTypeQualifier(Decl::ObjCDeclQualifier QT,
7923 std::string& S) const {
7924 if (QT & Decl::OBJC_TQ_In)
7925 S += 'n';
7926 if (QT & Decl::OBJC_TQ_Inout)
7927 S += 'N';
7928 if (QT & Decl::OBJC_TQ_Out)
7929 S += 'o';
7930 if (QT & Decl::OBJC_TQ_Bycopy)
7931 S += 'O';
7932 if (QT & Decl::OBJC_TQ_Byref)
7933 S += 'R';
7934 if (QT & Decl::OBJC_TQ_Oneway)
7935 S += 'V';
7936}
7937
7938TypedefDecl *ASTContext::getObjCIdDecl() const {
7939 if (!ObjCIdDecl) {
7940 QualType T = getObjCObjectType(ObjCBuiltinIdTy, {}, {});
7941 T = getObjCObjectPointerType(T);
7942 ObjCIdDecl = buildImplicitTypedef(T, "id");
7943 }
7944 return ObjCIdDecl;
7945}
7946
7947TypedefDecl *ASTContext::getObjCSelDecl() const {
7948 if (!ObjCSelDecl) {
7949 QualType T = getPointerType(ObjCBuiltinSelTy);
7950 ObjCSelDecl = buildImplicitTypedef(T, "SEL");
7951 }
7952 return ObjCSelDecl;
7953}
7954
7955TypedefDecl *ASTContext::getObjCClassDecl() const {
7956 if (!ObjCClassDecl) {
7957 QualType T = getObjCObjectType(ObjCBuiltinClassTy, {}, {});
7958 T = getObjCObjectPointerType(T);
7959 ObjCClassDecl = buildImplicitTypedef(T, "Class");
7960 }
7961 return ObjCClassDecl;
7962}
7963
7964ObjCInterfaceDecl *ASTContext::getObjCProtocolDecl() const {
7965 if (!ObjCProtocolClassDecl) {
7966 ObjCProtocolClassDecl
7967 = ObjCInterfaceDecl::Create(*this, getTranslationUnitDecl(),
7968 SourceLocation(),
7969 &Idents.get("Protocol"),
7970 /*typeParamList=*/nullptr,
7971 /*PrevDecl=*/nullptr,
7972 SourceLocation(), true);
7973 }
7974
7975 return ObjCProtocolClassDecl;
7976}
7977
7978//===----------------------------------------------------------------------===//
7979// __builtin_va_list Construction Functions
7980//===----------------------------------------------------------------------===//
7981
7982static TypedefDecl *CreateCharPtrNamedVaListDecl(const ASTContext *Context,
7983 StringRef Name) {
7984 // typedef char* __builtin[_ms]_va_list;
7985 QualType T = Context->getPointerType(Context->CharTy);
7986 return Context->buildImplicitTypedef(T, Name);
7987}
7988
7989static TypedefDecl *CreateMSVaListDecl(const ASTContext *Context) {
7990 return CreateCharPtrNamedVaListDecl(Context, "__builtin_ms_va_list");
7991}
7992
7993static TypedefDecl *CreateCharPtrBuiltinVaListDecl(const ASTContext *Context) {
7994 return CreateCharPtrNamedVaListDecl(Context, "__builtin_va_list");
7995}
7996
7997static TypedefDecl *CreateVoidPtrBuiltinVaListDecl(const ASTContext *Context) {
7998 // typedef void* __builtin_va_list;
7999 QualType T = Context->getPointerType(Context->VoidTy);
8000 return Context->buildImplicitTypedef(T, "__builtin_va_list");
8001}
8002
8003static TypedefDecl *
8004CreateAArch64ABIBuiltinVaListDecl(const ASTContext *Context) {
8005 RecordDecl *VaListTagDecl = Context->buildImplicitRecord("__va_list");
8006 // namespace std { struct __va_list {
8007 // Note that we create the namespace even in C. This is intentional so that
8008 // the type is consistent between C and C++, which is important in cases where
8009 // the types need to match between translation units (e.g. with
8010 // -fsanitize=cfi-icall). Ideally we wouldn't have created this namespace at
8011 // all, but it's now part of the ABI (e.g. in mangled names), so we can't
8012 // change it.
8013 auto *NS = NamespaceDecl::Create(
8014 const_cast<ASTContext &>(*Context), Context->getTranslationUnitDecl(),
8015 /*Inline*/ false, SourceLocation(), SourceLocation(),
8016 &Context->Idents.get("std"),
8017 /*PrevDecl*/ nullptr);
8018 NS->setImplicit();
8019 VaListTagDecl->setDeclContext(NS);
8020
8021 VaListTagDecl->startDefinition();
8022
8023 const size_t NumFields = 5;
8024 QualType FieldTypes[NumFields];
8025 const char *FieldNames[NumFields];
8026
8027 // void *__stack;
8028 FieldTypes[0] = Context->getPointerType(Context->VoidTy);
8029 FieldNames[0] = "__stack";
8030
8031 // void *__gr_top;
8032 FieldTypes[1] = Context->getPointerType(Context->VoidTy);
8033 FieldNames[1] = "__gr_top";
8034
8035 // void *__vr_top;
8036 FieldTypes[2] = Context->getPointerType(Context->VoidTy);
8037 FieldNames[2] = "__vr_top";
8038
8039 // int __gr_offs;
8040 FieldTypes[3] = Context->IntTy;
8041 FieldNames[3] = "__gr_offs";
8042
8043 // int __vr_offs;
8044 FieldTypes[4] = Context->IntTy;
8045 FieldNames[4] = "__vr_offs";
8046
8047 // Create fields
8048 for (unsigned i = 0; i < NumFields; ++i) {
8049 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context),
8050 VaListTagDecl,
8051 SourceLocation(),
8052 SourceLocation(),
8053 &Context->Idents.get(FieldNames[i]),
8054 FieldTypes[i], /*TInfo=*/nullptr,
8055 /*BitWidth=*/nullptr,
8056 /*Mutable=*/false,
8057 ICIS_NoInit);
8058 Field->setAccess(AS_public);
8059 VaListTagDecl->addDecl(Field);
8060 }
8061 VaListTagDecl->completeDefinition();
8062 Context->VaListTagDecl = VaListTagDecl;
8063 QualType VaListTagType = Context->getRecordType(VaListTagDecl);
8064
8065 // } __builtin_va_list;
8066 return Context->buildImplicitTypedef(VaListTagType, "__builtin_va_list");
8067}
8068
8069static TypedefDecl *CreatePowerABIBuiltinVaListDecl(const ASTContext *Context) {
8070 // typedef struct __va_list_tag {
8071 RecordDecl *VaListTagDecl;
8072
8073 VaListTagDecl = Context->buildImplicitRecord("__va_list_tag");
8074 VaListTagDecl->startDefinition();
8075
8076 const size_t NumFields = 5;
8077 QualType FieldTypes[NumFields];
8078 const char *FieldNames[NumFields];
8079
8080 // unsigned char gpr;
8081 FieldTypes[0] = Context->UnsignedCharTy;
8082 FieldNames[0] = "gpr";
8083
8084 // unsigned char fpr;
8085 FieldTypes[1] = Context->UnsignedCharTy;
8086 FieldNames[1] = "fpr";
8087
8088 // unsigned short reserved;
8089 FieldTypes[2] = Context->UnsignedShortTy;
8090 FieldNames[2] = "reserved";
8091
8092 // void* overflow_arg_area;
8093 FieldTypes[3] = Context->getPointerType(Context->VoidTy);
8094 FieldNames[3] = "overflow_arg_area";
8095
8096 // void* reg_save_area;
8097 FieldTypes[4] = Context->getPointerType(Context->VoidTy);
8098 FieldNames[4] = "reg_save_area";
8099
8100 // Create fields
8101 for (unsigned i = 0; i < NumFields; ++i) {
8102 FieldDecl *Field = FieldDecl::Create(*Context, VaListTagDecl,
8103 SourceLocation(),
8104 SourceLocation(),
8105 &Context->Idents.get(FieldNames[i]),
8106 FieldTypes[i], /*TInfo=*/nullptr,
8107 /*BitWidth=*/nullptr,
8108 /*Mutable=*/false,
8109 ICIS_NoInit);
8110 Field->setAccess(AS_public);
8111 VaListTagDecl->addDecl(Field);
8112 }
8113 VaListTagDecl->completeDefinition();
8114 Context->VaListTagDecl = VaListTagDecl;
8115 QualType VaListTagType = Context->getRecordType(VaListTagDecl);
8116
8117 // } __va_list_tag;
8118 TypedefDecl *VaListTagTypedefDecl =
8119 Context->buildImplicitTypedef(VaListTagType, "__va_list_tag");
8120
8121 QualType VaListTagTypedefType =
8122 Context->getTypedefType(VaListTagTypedefDecl);
8123
8124 // typedef __va_list_tag __builtin_va_list[1];
8125 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1);
8126 QualType VaListTagArrayType
8127 = Context->getConstantArrayType(VaListTagTypedefType,
8128 Size, nullptr, ArrayType::Normal, 0);
8129 return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list");
8130}
8131
8132static TypedefDecl *
8133CreateX86_64ABIBuiltinVaListDecl(const ASTContext *Context) {
8134 // struct __va_list_tag {
8135 RecordDecl *VaListTagDecl;
8136 VaListTagDecl = Context->buildImplicitRecord("__va_list_tag");
8137 VaListTagDecl->startDefinition();
8138
8139 const size_t NumFields = 4;
8140 QualType FieldTypes[NumFields];
8141 const char *FieldNames[NumFields];
8142
8143 // unsigned gp_offset;
8144 FieldTypes[0] = Context->UnsignedIntTy;
8145 FieldNames[0] = "gp_offset";
8146
8147 // unsigned fp_offset;
8148 FieldTypes[1] = Context->UnsignedIntTy;
8149 FieldNames[1] = "fp_offset";
8150
8151 // void* overflow_arg_area;
8152 FieldTypes[2] = Context->getPointerType(Context->VoidTy);
8153 FieldNames[2] = "overflow_arg_area";
8154
8155 // void* reg_save_area;
8156 FieldTypes[3] = Context->getPointerType(Context->VoidTy);
8157 FieldNames[3] = "reg_save_area";
8158
8159 // Create fields
8160 for (unsigned i = 0; i < NumFields; ++i) {
8161 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context),
8162 VaListTagDecl,
8163 SourceLocation(),
8164 SourceLocation(),
8165 &Context->Idents.get(FieldNames[i]),
8166 FieldTypes[i], /*TInfo=*/nullptr,
8167 /*BitWidth=*/nullptr,
8168 /*Mutable=*/false,
8169 ICIS_NoInit);
8170 Field->setAccess(AS_public);
8171 VaListTagDecl->addDecl(Field);
8172 }
8173 VaListTagDecl->completeDefinition();
8174 Context->VaListTagDecl = VaListTagDecl;
8175 QualType VaListTagType = Context->getRecordType(VaListTagDecl);
8176
8177 // };
8178
8179 // typedef struct __va_list_tag __builtin_va_list[1];
8180 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1);
8181 QualType VaListTagArrayType = Context->getConstantArrayType(
8182 VaListTagType, Size, nullptr, ArrayType::Normal, 0);
8183 return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list");
8184}
8185
8186static TypedefDecl *CreatePNaClABIBuiltinVaListDecl(const ASTContext *Context) {
8187 // typedef int __builtin_va_list[4];
8188 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 4);
8189 QualType IntArrayType = Context->getConstantArrayType(
8190 Context->IntTy, Size, nullptr, ArrayType::Normal, 0);
8191 return Context->buildImplicitTypedef(IntArrayType, "__builtin_va_list");
8192}
8193
8194static TypedefDecl *
8195CreateAAPCSABIBuiltinVaListDecl(const ASTContext *Context) {
8196 // struct __va_list
8197 RecordDecl *VaListDecl = Context->buildImplicitRecord("__va_list");
8198 if (Context->getLangOpts().CPlusPlus) {
8199 // namespace std { struct __va_list {
8200 NamespaceDecl *NS;
8201 NS = NamespaceDecl::Create(const_cast<ASTContext &>(*Context),
8202 Context->getTranslationUnitDecl(),
8203 /*Inline*/false, SourceLocation(),
8204 SourceLocation(), &Context->Idents.get("std"),
8205 /*PrevDecl*/ nullptr);
8206 NS->setImplicit();
8207 VaListDecl->setDeclContext(NS);
8208 }
8209
8210 VaListDecl->startDefinition();
8211
8212 // void * __ap;
8213 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context),
8214 VaListDecl,
8215 SourceLocation(),
8216 SourceLocation(),
8217 &Context->Idents.get("__ap"),
8218 Context->getPointerType(Context->VoidTy),
8219 /*TInfo=*/nullptr,
8220 /*BitWidth=*/nullptr,
8221 /*Mutable=*/false,
8222 ICIS_NoInit);
8223 Field->setAccess(AS_public);
8224 VaListDecl->addDecl(Field);
8225
8226 // };
8227 VaListDecl->completeDefinition();
8228 Context->VaListTagDecl = VaListDecl;
8229
8230 // typedef struct __va_list __builtin_va_list;
8231 QualType T = Context->getRecordType(VaListDecl);
8232 return Context->buildImplicitTypedef(T, "__builtin_va_list");
8233}
8234
8235static TypedefDecl *
8236CreateSystemZBuiltinVaListDecl(const ASTContext *Context) {
8237 // struct __va_list_tag {
8238 RecordDecl *VaListTagDecl;
8239 VaListTagDecl = Context->buildImplicitRecord("__va_list_tag");
8240 VaListTagDecl->startDefinition();
8241
8242 const size_t NumFields = 4;
8243 QualType FieldTypes[NumFields];
8244 const char *FieldNames[NumFields];
8245
8246 // long __gpr;
8247 FieldTypes[0] = Context->LongTy;
8248 FieldNames[0] = "__gpr";
8249
8250 // long __fpr;
8251 FieldTypes[1] = Context->LongTy;
8252 FieldNames[1] = "__fpr";
8253
8254 // void *__overflow_arg_area;
8255 FieldTypes[2] = Context->getPointerType(Context->VoidTy);
8256 FieldNames[2] = "__overflow_arg_area";
8257
8258 // void *__reg_save_area;
8259 FieldTypes[3] = Context->getPointerType(Context->VoidTy);
8260 FieldNames[3] = "__reg_save_area";
8261
8262 // Create fields
8263 for (unsigned i = 0; i < NumFields; ++i) {
8264 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context),
8265 VaListTagDecl,
8266 SourceLocation(),
8267 SourceLocation(),
8268 &Context->Idents.get(FieldNames[i]),
8269 FieldTypes[i], /*TInfo=*/nullptr,
8270 /*BitWidth=*/nullptr,
8271 /*Mutable=*/false,
8272 ICIS_NoInit);
8273 Field->setAccess(AS_public);
8274 VaListTagDecl->addDecl(Field);
8275 }
8276 VaListTagDecl->completeDefinition();
8277 Context->VaListTagDecl = VaListTagDecl;
8278 QualType VaListTagType = Context->getRecordType(VaListTagDecl);
8279
8280 // };
8281
8282 // typedef __va_list_tag __builtin_va_list[1];
8283 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1);
8284 QualType VaListTagArrayType = Context->getConstantArrayType(
8285 VaListTagType, Size, nullptr, ArrayType::Normal, 0);
8286
8287 return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list");
8288}
8289
8290static TypedefDecl *CreateHexagonBuiltinVaListDecl(const ASTContext *Context) {
8291 // typedef struct __va_list_tag {
8292 RecordDecl *VaListTagDecl;
8293 VaListTagDecl = Context->buildImplicitRecord("__va_list_tag");
8294 VaListTagDecl->startDefinition();
8295
8296 const size_t NumFields = 3;
8297 QualType FieldTypes[NumFields];
8298 const char *FieldNames[NumFields];
8299
8300 // void *CurrentSavedRegisterArea;
8301 FieldTypes[0] = Context->getPointerType(Context->VoidTy);
8302 FieldNames[0] = "__current_saved_reg_area_pointer";
8303
8304 // void *SavedRegAreaEnd;
8305 FieldTypes[1] = Context->getPointerType(Context->VoidTy);
8306 FieldNames[1] = "__saved_reg_area_end_pointer";
8307
8308 // void *OverflowArea;
8309 FieldTypes[2] = Context->getPointerType(Context->VoidTy);
8310 FieldNames[2] = "__overflow_area_pointer";
8311
8312 // Create fields
8313 for (unsigned i = 0; i < NumFields; ++i) {
8314 FieldDecl *Field = FieldDecl::Create(
8315 const_cast<ASTContext &>(*Context), VaListTagDecl, SourceLocation(),
8316 SourceLocation(), &Context->Idents.get(FieldNames[i]), FieldTypes[i],
8317 /*TInfo=*/0,
8318 /*BitWidth=*/0,
8319 /*Mutable=*/false, ICIS_NoInit);
8320 Field->setAccess(AS_public);
8321 VaListTagDecl->addDecl(Field);
8322 }
8323 VaListTagDecl->completeDefinition();
8324 Context->VaListTagDecl = VaListTagDecl;
8325 QualType VaListTagType = Context->getRecordType(VaListTagDecl);
8326
8327 // } __va_list_tag;
8328 TypedefDecl *VaListTagTypedefDecl =
8329 Context->buildImplicitTypedef(VaListTagType, "__va_list_tag");
8330
8331 QualType VaListTagTypedefType = Context->getTypedefType(VaListTagTypedefDecl);
8332
8333 // typedef __va_list_tag __builtin_va_list[1];
8334 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1);
8335 QualType VaListTagArrayType = Context->getConstantArrayType(
8336 VaListTagTypedefType, Size, nullptr, ArrayType::Normal, 0);
8337
8338 return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list");
8339}
8340
8341static TypedefDecl *CreateVaListDecl(const ASTContext *Context,
8342 TargetInfo::BuiltinVaListKind Kind) {
8343 switch (Kind) {
8344 case TargetInfo::CharPtrBuiltinVaList:
8345 return CreateCharPtrBuiltinVaListDecl(Context);
8346 case TargetInfo::VoidPtrBuiltinVaList:
8347 return CreateVoidPtrBuiltinVaListDecl(Context);
8348 case TargetInfo::AArch64ABIBuiltinVaList:
8349 return CreateAArch64ABIBuiltinVaListDecl(Context);
8350 case TargetInfo::PowerABIBuiltinVaList:
8351 return CreatePowerABIBuiltinVaListDecl(Context);
8352 case TargetInfo::X86_64ABIBuiltinVaList:
8353 return CreateX86_64ABIBuiltinVaListDecl(Context);
8354 case TargetInfo::PNaClABIBuiltinVaList:
8355 return CreatePNaClABIBuiltinVaListDecl(Context);
8356 case TargetInfo::AAPCSABIBuiltinVaList:
8357 return CreateAAPCSABIBuiltinVaListDecl(Context);
8358 case TargetInfo::SystemZBuiltinVaList:
8359 return CreateSystemZBuiltinVaListDecl(Context);
8360 case TargetInfo::HexagonBuiltinVaList:
8361 return CreateHexagonBuiltinVaListDecl(Context);
8362 }
8363
8364 llvm_unreachable("Unhandled __builtin_va_list type kind")__builtin_unreachable();
8365}
8366
8367TypedefDecl *ASTContext::getBuiltinVaListDecl() const {
8368 if (!BuiltinVaListDecl) {
8369 BuiltinVaListDecl = CreateVaListDecl(this, Target->getBuiltinVaListKind());
8370 assert(BuiltinVaListDecl->isImplicit())((void)0);
8371 }
8372
8373 return BuiltinVaListDecl;
8374}
8375
8376Decl *ASTContext::getVaListTagDecl() const {
8377 // Force the creation of VaListTagDecl by building the __builtin_va_list
8378 // declaration.
8379 if (!VaListTagDecl)
8380 (void)getBuiltinVaListDecl();
8381
8382 return VaListTagDecl;
8383}
8384
8385TypedefDecl *ASTContext::getBuiltinMSVaListDecl() const {
8386 if (!BuiltinMSVaListDecl)
8387 BuiltinMSVaListDecl = CreateMSVaListDecl(this);
8388
8389 return BuiltinMSVaListDecl;
8390}
8391
8392bool ASTContext::canBuiltinBeRedeclared(const FunctionDecl *FD) const {
8393 return BuiltinInfo.canBeRedeclared(FD->getBuiltinID());
8394}
8395
8396void ASTContext::setObjCConstantStringInterface(ObjCInterfaceDecl *Decl) {
8397 assert(ObjCConstantStringType.isNull() &&((void)0)
8398 "'NSConstantString' type already set!")((void)0);
8399
8400 ObjCConstantStringType = getObjCInterfaceType(Decl);
8401}
8402
8403/// Retrieve the template name that corresponds to a non-empty
8404/// lookup.
8405TemplateName
8406ASTContext::getOverloadedTemplateName(UnresolvedSetIterator Begin,
8407 UnresolvedSetIterator End) const {
8408 unsigned size = End - Begin;
8409 assert(size > 1 && "set is not overloaded!")((void)0);
8410
8411 void *memory = Allocate(sizeof(OverloadedTemplateStorage) +
8412 size * sizeof(FunctionTemplateDecl*));
8413 auto *OT = new (memory) OverloadedTemplateStorage(size);
8414
8415 NamedDecl **Storage = OT->getStorage();
8416 for (UnresolvedSetIterator I = Begin; I != End; ++I) {
8417 NamedDecl *D = *I;
8418 assert(isa<FunctionTemplateDecl>(D) ||((void)0)
8419 isa<UnresolvedUsingValueDecl>(D) ||((void)0)
8420 (isa<UsingShadowDecl>(D) &&((void)0)
8421 isa<FunctionTemplateDecl>(D->getUnderlyingDecl())))((void)0);
8422 *Storage++ = D;
8423 }
8424
8425 return TemplateName(OT);
8426}
8427
8428/// Retrieve a template name representing an unqualified-id that has been
8429/// assumed to name a template for ADL purposes.
8430TemplateName ASTContext::getAssumedTemplateName(DeclarationName Name) const {
8431 auto *OT = new (*this) AssumedTemplateStorage(Name);
8432 return TemplateName(OT);
8433}
8434
8435/// Retrieve the template name that represents a qualified
8436/// template name such as \c std::vector.
8437TemplateName
8438ASTContext::getQualifiedTemplateName(NestedNameSpecifier *NNS,
8439 bool TemplateKeyword,
8440 TemplateDecl *Template) const {
8441 assert(NNS && "Missing nested-name-specifier in qualified template name")((void)0);
8442
8443 // FIXME: Canonicalization?
8444 llvm::FoldingSetNodeID ID;
8445 QualifiedTemplateName::Profile(ID, NNS, TemplateKeyword, Template);
8446
8447 void *InsertPos = nullptr;
8448 QualifiedTemplateName *QTN =
8449 QualifiedTemplateNames.FindNodeOrInsertPos(ID, InsertPos);
8450 if (!QTN) {
8451 QTN = new (*this, alignof(QualifiedTemplateName))
8452 QualifiedTemplateName(NNS, TemplateKeyword, Template);
8453 QualifiedTemplateNames.InsertNode(QTN, InsertPos);
8454 }
8455
8456 return TemplateName(QTN);
8457}
8458
8459/// Retrieve the template name that represents a dependent
8460/// template name such as \c MetaFun::template apply.
8461TemplateName
8462ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS,
8463 const IdentifierInfo *Name) const {
8464 assert((!NNS || NNS->isDependent()) &&((void)0)
8465 "Nested name specifier must be dependent")((void)0);
8466
8467 llvm::FoldingSetNodeID ID;
8468 DependentTemplateName::Profile(ID, NNS, Name);
8469
8470 void *InsertPos = nullptr;
8471 DependentTemplateName *QTN =
8472 DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos);
8473
8474 if (QTN)
8475 return TemplateName(QTN);
8476
8477 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS);
8478 if (CanonNNS == NNS) {
8479 QTN = new (*this, alignof(DependentTemplateName))
8480 DependentTemplateName(NNS, Name);
8481 } else {
8482 TemplateName Canon = getDependentTemplateName(CanonNNS, Name);
8483 QTN = new (*this, alignof(DependentTemplateName))
8484 DependentTemplateName(NNS, Name, Canon);
8485 DependentTemplateName *CheckQTN =
8486 DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos);
8487 assert(!CheckQTN && "Dependent type name canonicalization broken")((void)0);
8488 (void)CheckQTN;
8489 }
8490
8491 DependentTemplateNames.InsertNode(QTN, InsertPos);
8492 return TemplateName(QTN);
8493}
8494
8495/// Retrieve the template name that represents a dependent
8496/// template name such as \c MetaFun::template operator+.
8497TemplateName
8498ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS,
8499 OverloadedOperatorKind Operator) const {
8500 assert((!NNS || NNS->isDependent()) &&((void)0)
8501 "Nested name specifier must be dependent")((void)0);
8502
8503 llvm::FoldingSetNodeID ID;
8504 DependentTemplateName::Profile(ID, NNS, Operator);
8505
8506 void *InsertPos = nullptr;
8507 DependentTemplateName *QTN
8508 = DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos);
8509
8510 if (QTN)
8511 return TemplateName(QTN);
8512
8513 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS);
8514 if (CanonNNS == NNS) {
8515 QTN = new (*this, alignof(DependentTemplateName))
8516 DependentTemplateName(NNS, Operator);
8517 } else {
8518 TemplateName Canon = getDependentTemplateName(CanonNNS, Operator);
8519 QTN = new (*this, alignof(DependentTemplateName))
8520 DependentTemplateName(NNS, Operator, Canon);
8521
8522 DependentTemplateName *CheckQTN
8523 = DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos);
8524 assert(!CheckQTN && "Dependent template name canonicalization broken")((void)0);
8525 (void)CheckQTN;
8526 }
8527
8528 DependentTemplateNames.InsertNode(QTN, InsertPos);
8529 return TemplateName(QTN);
8530}
8531
8532TemplateName
8533ASTContext::getSubstTemplateTemplateParm(TemplateTemplateParmDecl *param,
8534 TemplateName replacement) const {
8535 llvm::FoldingSetNodeID ID;
8536 SubstTemplateTemplateParmStorage::Profile(ID, param, replacement);
8537
8538 void *insertPos = nullptr;
8539 SubstTemplateTemplateParmStorage *subst
8540 = SubstTemplateTemplateParms.FindNodeOrInsertPos(ID, insertPos);
8541
8542 if (!subst) {
8543 subst = new (*this) SubstTemplateTemplateParmStorage(param, replacement);
8544 SubstTemplateTemplateParms.InsertNode(subst, insertPos);
8545 }
8546
8547 return TemplateName(subst);
8548}
8549
8550TemplateName
8551ASTContext::getSubstTemplateTemplateParmPack(TemplateTemplateParmDecl *Param,
8552 const TemplateArgument &ArgPack) const {
8553 auto &Self = const_cast<ASTContext &>(*this);
8554 llvm::FoldingSetNodeID ID;
8555 SubstTemplateTemplateParmPackStorage::Profile(ID, Self, Param, ArgPack);
8556
8557 void *InsertPos = nullptr;
8558 SubstTemplateTemplateParmPackStorage *Subst
8559 = SubstTemplateTemplateParmPacks.FindNodeOrInsertPos(ID, InsertPos);
8560
8561 if (!Subst) {
8562 Subst = new (*this) SubstTemplateTemplateParmPackStorage(Param,
8563 ArgPack.pack_size(),
8564 ArgPack.pack_begin());
8565 SubstTemplateTemplateParmPacks.InsertNode(Subst, InsertPos);
8566 }
8567
8568 return TemplateName(Subst);
8569}
8570
8571/// getFromTargetType - Given one of the integer types provided by
8572/// TargetInfo, produce the corresponding type. The unsigned @p Type
8573/// is actually a value of type @c TargetInfo::IntType.
8574CanQualType ASTContext::getFromTargetType(unsigned Type) const {
8575 switch (Type) {
8576 case TargetInfo::NoInt: return {};
8577 case TargetInfo::SignedChar: return SignedCharTy;
8578 case TargetInfo::UnsignedChar: return UnsignedCharTy;
8579 case TargetInfo::SignedShort: return ShortTy;
8580 case TargetInfo::UnsignedShort: return UnsignedShortTy;
8581 case TargetInfo::SignedInt: return IntTy;
8582 case TargetInfo::UnsignedInt: return UnsignedIntTy;
8583 case TargetInfo::SignedLong: return LongTy;
8584 case TargetInfo::UnsignedLong: return UnsignedLongTy;
8585 case TargetInfo::SignedLongLong: return LongLongTy;
8586 case TargetInfo::UnsignedLongLong: return UnsignedLongLongTy;
8587 }
8588
8589 llvm_unreachable("Unhandled TargetInfo::IntType value")__builtin_unreachable();
8590}
8591
8592//===----------------------------------------------------------------------===//
8593// Type Predicates.
8594//===----------------------------------------------------------------------===//
8595
8596/// getObjCGCAttr - Returns one of GCNone, Weak or Strong objc's
8597/// garbage collection attribute.
8598///
8599Qualifiers::GC ASTContext::getObjCGCAttrKind(QualType Ty) const {
8600 if (getLangOpts().getGC() == LangOptions::NonGC)
8601 return Qualifiers::GCNone;
8602
8603 assert(getLangOpts().ObjC)((void)0);
8604 Qualifiers::GC GCAttrs = Ty.getObjCGCAttr();
8605
8606 // Default behaviour under objective-C's gc is for ObjC pointers
8607 // (or pointers to them) be treated as though they were declared
8608 // as __strong.
8609 if (GCAttrs == Qualifiers::GCNone) {
8610 if (Ty->isObjCObjectPointerType() || Ty->isBlockPointerType())
8611 return Qualifiers::Strong;
8612 else if (Ty->isPointerType())
8613 return getObjCGCAttrKind(Ty->castAs<PointerType>()->getPointeeType());
8614 } else {
8615 // It's not valid to set GC attributes on anything that isn't a
8616 // pointer.
8617#ifndef NDEBUG1
8618 QualType CT = Ty->getCanonicalTypeInternal();
8619 while (const auto *AT = dyn_cast<ArrayType>(CT))
8620 CT = AT->getElementType();
8621 assert(CT->isAnyPointerType() || CT->isBlockPointerType())((void)0);
8622#endif
8623 }
8624 return GCAttrs;
8625}
8626
8627//===----------------------------------------------------------------------===//
8628// Type Compatibility Testing
8629//===----------------------------------------------------------------------===//
8630
8631/// areCompatVectorTypes - Return true if the two specified vector types are
8632/// compatible.
8633static bool areCompatVectorTypes(const VectorType *LHS,
8634 const VectorType *RHS) {
8635 assert(LHS->isCanonicalUnqualified() && RHS->isCanonicalUnqualified())((void)0);
8636 return LHS->getElementType() == RHS->getElementType() &&
8637 LHS->getNumElements() == RHS->getNumElements();
8638}
8639
8640/// areCompatMatrixTypes - Return true if the two specified matrix types are
8641/// compatible.
8642static bool areCompatMatrixTypes(const ConstantMatrixType *LHS,
8643 const ConstantMatrixType *RHS) {
8644 assert(LHS->isCanonicalUnqualified() && RHS->isCanonicalUnqualified())((void)0);
8645 return LHS->getElementType() == RHS->getElementType() &&
8646 LHS->getNumRows() == RHS->getNumRows() &&
8647 LHS->getNumColumns() == RHS->getNumColumns();
8648}
8649
8650bool ASTContext::areCompatibleVectorTypes(QualType FirstVec,
8651 QualType SecondVec) {
8652 assert(FirstVec->isVectorType() && "FirstVec should be a vector type")((void)0);
8653 assert(SecondVec->isVectorType() && "SecondVec should be a vector type")((void)0);
8654
8655 if (hasSameUnqualifiedType(FirstVec, SecondVec))
8656 return true;
8657
8658 // Treat Neon vector types and most AltiVec vector types as if they are the
8659 // equivalent GCC vector types.
8660 const auto *First = FirstVec->castAs<VectorType>();
8661 const auto *Second = SecondVec->castAs<VectorType>();
8662 if (First->getNumElements() == Second->getNumElements() &&
8663 hasSameType(First->getElementType(), Second->getElementType()) &&
8664 First->getVectorKind() != VectorType::AltiVecPixel &&
8665 First->getVectorKind() != VectorType::AltiVecBool &&
8666 Second->getVectorKind() != VectorType::AltiVecPixel &&
8667 Second->getVectorKind() != VectorType::AltiVecBool &&
8668 First->getVectorKind() != VectorType::SveFixedLengthDataVector &&
8669 First->getVectorKind() != VectorType::SveFixedLengthPredicateVector &&
8670 Second->getVectorKind() != VectorType::SveFixedLengthDataVector &&
8671 Second->getVectorKind() != VectorType::SveFixedLengthPredicateVector)
8672 return true;
8673
8674 return false;
8675}
8676
8677/// getSVETypeSize - Return SVE vector or predicate register size.
8678static uint64_t getSVETypeSize(ASTContext &Context, const BuiltinType *Ty) {
8679 assert(Ty->isVLSTBuiltinType() && "Invalid SVE Type")((void)0);
8680 return Ty->getKind() == BuiltinType::SveBool
8681 ? Context.getLangOpts().ArmSveVectorBits / Context.getCharWidth()
8682 : Context.getLangOpts().ArmSveVectorBits;
8683}
8684
8685bool ASTContext::areCompatibleSveTypes(QualType FirstType,
8686 QualType SecondType) {
8687 assert(((FirstType->isSizelessBuiltinType() && SecondType->isVectorType()) ||((void)0)
8688 (FirstType->isVectorType() && SecondType->isSizelessBuiltinType())) &&((void)0)
8689 "Expected SVE builtin type and vector type!")((void)0);
8690
8691 auto IsValidCast = [this](QualType FirstType, QualType SecondType) {
8692 if (const auto *BT = FirstType->getAs<BuiltinType>()) {
8693 if (const auto *VT = SecondType->getAs<VectorType>()) {
8694 // Predicates have the same representation as uint8 so we also have to
8695 // check the kind to make these types incompatible.
8696 if (VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector)
8697 return BT->getKind() == BuiltinType::SveBool;
8698 else if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector)
8699 return VT->getElementType().getCanonicalType() ==
8700 FirstType->getSveEltType(*this);
8701 else if (VT->getVectorKind() == VectorType::GenericVector)
8702 return getTypeSize(SecondType) == getSVETypeSize(*this, BT) &&
8703 hasSameType(VT->getElementType(),
8704 getBuiltinVectorTypeInfo(BT).ElementType);
8705 }
8706 }
8707 return false;
8708 };
8709
8710 return IsValidCast(FirstType, SecondType) ||
8711 IsValidCast(SecondType, FirstType);
8712}
8713
8714bool ASTContext::areLaxCompatibleSveTypes(QualType FirstType,
8715 QualType SecondType) {
8716 assert(((FirstType->isSizelessBuiltinType() && SecondType->isVectorType()) ||((void)0)
8717 (FirstType->isVectorType() && SecondType->isSizelessBuiltinType())) &&((void)0)
8718 "Expected SVE builtin type and vector type!")((void)0);
8719
8720 auto IsLaxCompatible = [this](QualType FirstType, QualType SecondType) {
8721 const auto *BT = FirstType->getAs<BuiltinType>();
8722 if (!BT)
8723 return false;
8724
8725 const auto *VecTy = SecondType->getAs<VectorType>();
8726 if (VecTy &&
8727 (VecTy->getVectorKind() == VectorType::SveFixedLengthDataVector ||
8728 VecTy->getVectorKind() == VectorType::GenericVector)) {
8729 const LangOptions::LaxVectorConversionKind LVCKind =
8730 getLangOpts().getLaxVectorConversions();
8731
8732 // Can not convert between sve predicates and sve vectors because of
8733 // different size.
8734 if (BT->getKind() == BuiltinType::SveBool &&
8735 VecTy->getVectorKind() == VectorType::SveFixedLengthDataVector)
8736 return false;
8737
8738 // If __ARM_FEATURE_SVE_BITS != N do not allow GNU vector lax conversion.
8739 // "Whenever __ARM_FEATURE_SVE_BITS==N, GNUT implicitly
8740 // converts to VLAT and VLAT implicitly converts to GNUT."
8741 // ACLE Spec Version 00bet6, 3.7.3.2. Behavior common to vectors and
8742 // predicates.
8743 if (VecTy->getVectorKind() == VectorType::GenericVector &&
8744 getTypeSize(SecondType) != getSVETypeSize(*this, BT))
8745 return false;
8746
8747 // If -flax-vector-conversions=all is specified, the types are
8748 // certainly compatible.
8749 if (LVCKind == LangOptions::LaxVectorConversionKind::All)
8750 return true;
8751
8752 // If -flax-vector-conversions=integer is specified, the types are
8753 // compatible if the elements are integer types.
8754 if (LVCKind == LangOptions::LaxVectorConversionKind::Integer)
8755 return VecTy->getElementType().getCanonicalType()->isIntegerType() &&
8756 FirstType->getSveEltType(*this)->isIntegerType();
8757 }
8758
8759 return false;
8760 };
8761
8762 return IsLaxCompatible(FirstType, SecondType) ||
8763 IsLaxCompatible(SecondType, FirstType);
8764}
8765
8766bool ASTContext::hasDirectOwnershipQualifier(QualType Ty) const {
8767 while (true) {
8768 // __strong id
8769 if (const AttributedType *Attr = dyn_cast<AttributedType>(Ty)) {
8770 if (Attr->getAttrKind() == attr::ObjCOwnership)
8771 return true;
8772
8773 Ty = Attr->getModifiedType();
8774
8775 // X *__strong (...)
8776 } else if (const ParenType *Paren = dyn_cast<ParenType>(Ty)) {
8777 Ty = Paren->getInnerType();
8778
8779 // We do not want to look through typedefs, typeof(expr),
8780 // typeof(type), or any other way that the type is somehow
8781 // abstracted.
8782 } else {
8783 return false;
8784 }
8785 }
8786}
8787
8788//===----------------------------------------------------------------------===//
8789// ObjCQualifiedIdTypesAreCompatible - Compatibility testing for qualified id's.
8790//===----------------------------------------------------------------------===//
8791
8792/// ProtocolCompatibleWithProtocol - return 'true' if 'lProto' is in the
8793/// inheritance hierarchy of 'rProto'.
8794bool
8795ASTContext::ProtocolCompatibleWithProtocol(ObjCProtocolDecl *lProto,
8796 ObjCProtocolDecl *rProto) const {
8797 if (declaresSameEntity(lProto, rProto))
8798 return true;
8799 for (auto *PI : rProto->protocols())
8800 if (ProtocolCompatibleWithProtocol(lProto, PI))
8801 return true;
8802 return false;
8803}
8804
8805/// ObjCQualifiedClassTypesAreCompatible - compare Class<pr,...> and
8806/// Class<pr1, ...>.
8807bool ASTContext::ObjCQualifiedClassTypesAreCompatible(
8808 const ObjCObjectPointerType *lhs, const ObjCObjectPointerType *rhs) {
8809 for (auto *lhsProto : lhs->quals()) {
8810 bool match = false;
8811 for (auto *rhsProto : rhs->quals()) {
8812 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto)) {
8813 match = true;
8814 break;
8815 }
8816 }
8817 if (!match)
8818 return false;
8819 }
8820 return true;
8821}
8822
8823/// ObjCQualifiedIdTypesAreCompatible - We know that one of lhs/rhs is an
8824/// ObjCQualifiedIDType.
8825bool ASTContext::ObjCQualifiedIdTypesAreCompatible(
8826 const ObjCObjectPointerType *lhs, const ObjCObjectPointerType *rhs,
8827 bool compare) {
8828 // Allow id<P..> and an 'id' in all cases.
8829 if (lhs->isObjCIdType() || rhs->isObjCIdType())
8830 return true;
8831
8832 // Don't allow id<P..> to convert to Class or Class<P..> in either direction.
8833 if (lhs->isObjCClassType() || lhs->isObjCQualifiedClassType() ||
8834 rhs->isObjCClassType() || rhs->isObjCQualifiedClassType())
8835 return false;
8836
8837 if (lhs->isObjCQualifiedIdType()) {
8838 if (rhs->qual_empty()) {
8839 // If the RHS is a unqualified interface pointer "NSString*",
8840 // make sure we check the class hierarchy.
8841 if (ObjCInterfaceDecl *rhsID = rhs->getInterfaceDecl()) {
8842 for (auto *I : lhs->quals()) {
8843 // when comparing an id<P> on lhs with a static type on rhs,
8844 // see if static class implements all of id's protocols, directly or
8845 // through its super class and categories.
8846 if (!rhsID->ClassImplementsProtocol(I, true))
8847 return false;
8848 }
8849 }
8850 // If there are no qualifiers and no interface, we have an 'id'.
8851 return true;
8852 }
8853 // Both the right and left sides have qualifiers.
8854 for (auto *lhsProto : lhs->quals()) {
8855 bool match = false;
8856
8857 // when comparing an id<P> on lhs with a static type on rhs,
8858 // see if static class implements all of id's protocols, directly or
8859 // through its super class and categories.
8860 for (auto *rhsProto : rhs->quals()) {
8861 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) ||
8862 (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) {
8863 match = true;
8864 break;
8865 }
8866 }
8867 // If the RHS is a qualified interface pointer "NSString<P>*",
8868 // make sure we check the class hierarchy.
8869 if (ObjCInterfaceDecl *rhsID = rhs->getInterfaceDecl()) {
8870 for (auto *I : lhs->quals()) {
8871 // when comparing an id<P> on lhs with a static type on rhs,
8872 // see if static class implements all of id's protocols, directly or
8873 // through its super class and categories.
8874 if (rhsID->ClassImplementsProtocol(I, true)) {
8875 match = true;
8876 break;
8877 }
8878 }
8879 }
8880 if (!match)
8881 return false;
8882 }
8883
8884 return true;
8885 }
8886
8887 assert(rhs->isObjCQualifiedIdType() && "One of the LHS/RHS should be id<x>")((void)0);
8888
8889 if (lhs->getInterfaceType()) {
8890 // If both the right and left sides have qualifiers.
8891 for (auto *lhsProto : lhs->quals()) {
8892 bool match = false;
8893
8894 // when comparing an id<P> on rhs with a static type on lhs,
8895 // see if static class implements all of id's protocols, directly or
8896 // through its super class and categories.
8897 // First, lhs protocols in the qualifier list must be found, direct
8898 // or indirect in rhs's qualifier list or it is a mismatch.
8899 for (auto *rhsProto : rhs->quals()) {
8900 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) ||
8901 (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) {
8902 match = true;
8903 break;
8904 }
8905 }
8906 if (!match)
8907 return false;
8908 }
8909
8910 // Static class's protocols, or its super class or category protocols
8911 // must be found, direct or indirect in rhs's qualifier list or it is a mismatch.
8912 if (ObjCInterfaceDecl *lhsID = lhs->getInterfaceDecl()) {
8913 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> LHSInheritedProtocols;
8914 CollectInheritedProtocols(lhsID, LHSInheritedProtocols);
8915 // This is rather dubious but matches gcc's behavior. If lhs has
8916 // no type qualifier and its class has no static protocol(s)
8917 // assume that it is mismatch.
8918 if (LHSInheritedProtocols.empty() && lhs->qual_empty())
8919 return false;
8920 for (auto *lhsProto : LHSInheritedProtocols) {
8921 bool match = false;
8922 for (auto *rhsProto : rhs->quals()) {
8923 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) ||
8924 (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) {
8925 match = true;
8926 break;
8927 }
8928 }
8929 if (!match)
8930 return false;
8931 }
8932 }
8933 return true;
8934 }
8935 return false;
8936}
8937
8938/// canAssignObjCInterfaces - Return true if the two interface types are
8939/// compatible for assignment from RHS to LHS. This handles validation of any
8940/// protocol qualifiers on the LHS or RHS.
8941bool ASTContext::canAssignObjCInterfaces(const ObjCObjectPointerType *LHSOPT,
8942 const ObjCObjectPointerType *RHSOPT) {
8943 const ObjCObjectType* LHS = LHSOPT->getObjectType();
8944 const ObjCObjectType* RHS = RHSOPT->getObjectType();
8945
8946 // If either type represents the built-in 'id' type, return true.
8947 if (LHS->isObjCUnqualifiedId() || RHS->isObjCUnqualifiedId())
8948 return true;
8949
8950 // Function object that propagates a successful result or handles
8951 // __kindof types.
8952 auto finish = [&](bool succeeded) -> bool {
8953 if (succeeded)
8954 return true;
8955
8956 if (!RHS->isKindOfType())
8957 return false;
8958
8959 // Strip off __kindof and protocol qualifiers, then check whether
8960 // we can assign the other way.
8961 return canAssignObjCInterfaces(RHSOPT->stripObjCKindOfTypeAndQuals(*this),
8962 LHSOPT->stripObjCKindOfTypeAndQuals(*this));
8963 };
8964
8965 // Casts from or to id<P> are allowed when the other side has compatible
8966 // protocols.
8967 if (LHS->isObjCQualifiedId() || RHS->isObjCQualifiedId()) {
8968 return finish(ObjCQualifiedIdTypesAreCompatible(LHSOPT, RHSOPT, false));
8969 }
8970
8971 // Verify protocol compatibility for casts from Class<P1> to Class<P2>.
8972 if (LHS->isObjCQualifiedClass() && RHS->isObjCQualifiedClass()) {
8973 return finish(ObjCQualifiedClassTypesAreCompatible(LHSOPT, RHSOPT));
8974 }
8975
8976 // Casts from Class to Class<Foo>, or vice-versa, are allowed.
8977 if (LHS->isObjCClass() && RHS->isObjCClass()) {
8978 return true;
8979 }
8980
8981 // If we have 2 user-defined types, fall into that path.
8982 if (LHS->getInterface() && RHS->getInterface()) {
8983 return finish(canAssignObjCInterfaces(LHS, RHS));
8984 }
8985
8986 return false;
8987}
8988
8989/// canAssignObjCInterfacesInBlockPointer - This routine is specifically written
8990/// for providing type-safety for objective-c pointers used to pass/return
8991/// arguments in block literals. When passed as arguments, passing 'A*' where
8992/// 'id' is expected is not OK. Passing 'Sub *" where 'Super *" is expected is
8993/// not OK. For the return type, the opposite is not OK.
8994bool ASTContext::canAssignObjCInterfacesInBlockPointer(
8995 const ObjCObjectPointerType *LHSOPT,
8996 const ObjCObjectPointerType *RHSOPT,
8997 bool BlockReturnType) {
8998
8999 // Function object that propagates a successful result or handles
9000 // __kindof types.
9001 auto finish = [&](bool succeeded) -> bool {
9002 if (succeeded)
9003 return true;
9004
9005 const ObjCObjectPointerType *Expected = BlockReturnType ? RHSOPT : LHSOPT;
9006 if (!Expected->isKindOfType())
9007 return false;
9008
9009 // Strip off __kindof and protocol qualifiers, then check whether
9010 // we can assign the other way.
9011 return canAssignObjCInterfacesInBlockPointer(
9012 RHSOPT->stripObjCKindOfTypeAndQuals(*this),
9013 LHSOPT->stripObjCKindOfTypeAndQuals(*this),
9014 BlockReturnType);
9015 };
9016
9017 if (RHSOPT->isObjCBuiltinType() || LHSOPT->isObjCIdType())
9018 return true;
9019
9020 if (LHSOPT->isObjCBuiltinType()) {
9021 return finish(RHSOPT->isObjCBuiltinType() ||
9022 RHSOPT->isObjCQualifiedIdType());
9023 }
9024
9025 if (LHSOPT->isObjCQualifiedIdType() || RHSOPT->isObjCQualifiedIdType()) {
9026 if (getLangOpts().CompatibilityQualifiedIdBlockParamTypeChecking)
9027 // Use for block parameters previous type checking for compatibility.
9028 return finish(ObjCQualifiedIdTypesAreCompatible(LHSOPT, RHSOPT, false) ||
9029 // Or corrected type checking as in non-compat mode.
9030 (!BlockReturnType &&
9031 ObjCQualifiedIdTypesAreCompatible(RHSOPT, LHSOPT, false)));
9032 else
9033 return finish(ObjCQualifiedIdTypesAreCompatible(
9034 (BlockReturnType ? LHSOPT : RHSOPT),
9035 (BlockReturnType ? RHSOPT : LHSOPT), false));
9036 }
9037
9038 const ObjCInterfaceType* LHS = LHSOPT->getInterfaceType();
9039 const ObjCInterfaceType* RHS = RHSOPT->getInterfaceType();
9040 if (LHS && RHS) { // We have 2 user-defined types.
9041 if (LHS != RHS) {
9042 if (LHS->getDecl()->isSuperClassOf(RHS->getDecl()))
9043 return finish(BlockReturnType);
9044 if (RHS->getDecl()->isSuperClassOf(LHS->getDecl()))
9045 return finish(!BlockReturnType);
9046 }
9047 else
9048 return true;
9049 }
9050 return false;
9051}
9052
9053/// Comparison routine for Objective-C protocols to be used with
9054/// llvm::array_pod_sort.
9055static int compareObjCProtocolsByName(ObjCProtocolDecl * const *lhs,
9056 ObjCProtocolDecl * const *rhs) {
9057 return (*lhs)->getName().compare((*rhs)->getName());
9058}
9059
9060/// getIntersectionOfProtocols - This routine finds the intersection of set
9061/// of protocols inherited from two distinct objective-c pointer objects with
9062/// the given common base.
9063/// It is used to build composite qualifier list of the composite type of
9064/// the conditional expression involving two objective-c pointer objects.
9065static
9066void getIntersectionOfProtocols(ASTContext &Context,
9067 const ObjCInterfaceDecl *CommonBase,
9068 const ObjCObjectPointerType *LHSOPT,
9069 const ObjCObjectPointerType *RHSOPT,
9070 SmallVectorImpl<ObjCProtocolDecl *> &IntersectionSet) {
9071
9072 const ObjCObjectType* LHS = LHSOPT->getObjectType();
9073 const ObjCObjectType* RHS = RHSOPT->getObjectType();
9074 assert(LHS->getInterface() && "LHS must have an interface base")((void)0);
9075 assert(RHS->getInterface() && "RHS must have an interface base")((void)0);
9076
9077 // Add all of the protocols for the LHS.
9078 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> LHSProtocolSet;
9079
9080 // Start with the protocol qualifiers.
9081 for (auto proto : LHS->quals()) {
9082 Context.CollectInheritedProtocols(proto, LHSProtocolSet);
9083 }
9084
9085 // Also add the protocols associated with the LHS interface.
9086 Context.CollectInheritedProtocols(LHS->getInterface(), LHSProtocolSet);
9087
9088 // Add all of the protocols for the RHS.
9089 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> RHSProtocolSet;
9090
9091 // Start with the protocol qualifiers.
9092 for (auto proto : RHS->quals()) {
9093 Context.CollectInheritedProtocols(proto, RHSProtocolSet);
9094 }
9095
9096 // Also add the protocols associated with the RHS interface.
9097 Context.CollectInheritedProtocols(RHS->getInterface(), RHSProtocolSet);
9098
9099 // Compute the intersection of the collected protocol sets.
9100 for (auto proto : LHSProtocolSet) {
9101 if (RHSProtocolSet.count(proto))
9102 IntersectionSet.push_back(proto);
9103 }
9104
9105 // Compute the set of protocols that is implied by either the common type or
9106 // the protocols within the intersection.
9107 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> ImpliedProtocols;
9108 Context.CollectInheritedProtocols(CommonBase, ImpliedProtocols);
9109
9110 // Remove any implied protocols from the list of inherited protocols.
9111 if (!ImpliedProtocols.empty()) {
9112 IntersectionSet.erase(
9113 std::remove_if(IntersectionSet.begin(),
9114 IntersectionSet.end(),
9115 [&](ObjCProtocolDecl *proto) -> bool {
9116 return ImpliedProtocols.count(proto) > 0;
9117 }),
9118 IntersectionSet.end());
9119 }
9120
9121 // Sort the remaining protocols by name.
9122 llvm::array_pod_sort(IntersectionSet.begin(), IntersectionSet.end(),
9123 compareObjCProtocolsByName);
9124}
9125
9126/// Determine whether the first type is a subtype of the second.
9127static bool canAssignObjCObjectTypes(ASTContext &ctx, QualType lhs,
9128 QualType rhs) {
9129 // Common case: two object pointers.
9130 const auto *lhsOPT = lhs->getAs<ObjCObjectPointerType>();
9131 const auto *rhsOPT = rhs->getAs<ObjCObjectPointerType>();
9132 if (lhsOPT && rhsOPT)
9133 return ctx.canAssignObjCInterfaces(lhsOPT, rhsOPT);
9134
9135 // Two block pointers.
9136 const auto *lhsBlock = lhs->getAs<BlockPointerType>();
9137 const auto *rhsBlock = rhs->getAs<BlockPointerType>();
9138 if (lhsBlock && rhsBlock)
9139 return ctx.typesAreBlockPointerCompatible(lhs, rhs);
9140
9141 // If either is an unqualified 'id' and the other is a block, it's
9142 // acceptable.
9143 if ((lhsOPT && lhsOPT->isObjCIdType() && rhsBlock) ||
9144 (rhsOPT && rhsOPT->isObjCIdType() && lhsBlock))
9145 return true;
9146
9147 return false;
9148}
9149
9150// Check that the given Objective-C type argument lists are equivalent.
9151static bool sameObjCTypeArgs(ASTContext &ctx,
9152 const ObjCInterfaceDecl *iface,
9153 ArrayRef<QualType> lhsArgs,
9154 ArrayRef<QualType> rhsArgs,
9155 bool stripKindOf) {
9156 if (lhsArgs.size() != rhsArgs.size())
9157 return false;
9158
9159 ObjCTypeParamList *typeParams = iface->getTypeParamList();
9160 for (unsigned i = 0, n = lhsArgs.size(); i != n; ++i) {
9161 if (ctx.hasSameType(lhsArgs[i], rhsArgs[i]))
9162 continue;
9163
9164 switch (typeParams->begin()[i]->getVariance()) {
9165 case ObjCTypeParamVariance::Invariant:
9166 if (!stripKindOf ||
9167 !ctx.hasSameType(lhsArgs[i].stripObjCKindOfType(ctx),
9168 rhsArgs[i].stripObjCKindOfType(ctx))) {
9169 return false;
9170 }
9171 break;
9172
9173 case ObjCTypeParamVariance::Covariant:
9174 if (!canAssignObjCObjectTypes(ctx, lhsArgs[i], rhsArgs[i]))
9175 return false;
9176 break;
9177
9178 case ObjCTypeParamVariance::Contravariant:
9179 if (!canAssignObjCObjectTypes(ctx, rhsArgs[i], lhsArgs[i]))
9180 return false;
9181 break;
9182 }
9183 }
9184
9185 return true;
9186}
9187
9188QualType ASTContext::areCommonBaseCompatible(
9189 const ObjCObjectPointerType *Lptr,
9190 const ObjCObjectPointerType *Rptr) {
9191 const ObjCObjectType *LHS = Lptr->getObjectType();
9192 const ObjCObjectType *RHS = Rptr->getObjectType();
9193 const ObjCInterfaceDecl* LDecl = LHS->getInterface();
9194 const ObjCInterfaceDecl* RDecl = RHS->getInterface();
9195
9196 if (!LDecl || !RDecl)
9197 return {};
9198
9199 // When either LHS or RHS is a kindof type, we should return a kindof type.
9200 // For example, for common base of kindof(ASub1) and kindof(ASub2), we return
9201 // kindof(A).
9202 bool anyKindOf = LHS->isKindOfType() || RHS->isKindOfType();
9203
9204 // Follow the left-hand side up the class hierarchy until we either hit a
9205 // root or find the RHS. Record the ancestors in case we don't find it.
9206 llvm::SmallDenseMap<const ObjCInterfaceDecl *, const ObjCObjectType *, 4>
9207 LHSAncestors;
9208 while (true) {
9209 // Record this ancestor. We'll need this if the common type isn't in the
9210 // path from the LHS to the root.
9211 LHSAncestors[LHS->getInterface()->getCanonicalDecl()] = LHS;
9212
9213 if (declaresSameEntity(LHS->getInterface(), RDecl)) {
9214 // Get the type arguments.
9215 ArrayRef<QualType> LHSTypeArgs = LHS->getTypeArgsAsWritten();
9216 bool anyChanges = false;
9217 if (LHS->isSpecialized() && RHS->isSpecialized()) {
9218 // Both have type arguments, compare them.
9219 if (!sameObjCTypeArgs(*this, LHS->getInterface(),
9220 LHS->getTypeArgs(), RHS->getTypeArgs(),
9221 /*stripKindOf=*/true))
9222 return {};
9223 } else if (LHS->isSpecialized() != RHS->isSpecialized()) {
9224 // If only one has type arguments, the result will not have type
9225 // arguments.
9226 LHSTypeArgs = {};
9227 anyChanges = true;
9228 }
9229
9230 // Compute the intersection of protocols.
9231 SmallVector<ObjCProtocolDecl *, 8> Protocols;
9232 getIntersectionOfProtocols(*this, LHS->getInterface(), Lptr, Rptr,
9233 Protocols);
9234 if (!Protocols.empty())
9235 anyChanges = true;
9236
9237 // If anything in the LHS will have changed, build a new result type.
9238 // If we need to return a kindof type but LHS is not a kindof type, we
9239 // build a new result type.
9240 if (anyChanges || LHS->isKindOfType() != anyKindOf) {
9241 QualType Result = getObjCInterfaceType(LHS->getInterface());
9242 Result = getObjCObjectType(Result, LHSTypeArgs, Protocols,
9243 anyKindOf || LHS->isKindOfType());
9244 return getObjCObjectPointerType(Result);
9245 }
9246
9247 return getObjCObjectPointerType(QualType(LHS, 0));
9248 }
9249
9250 // Find the superclass.
9251 QualType LHSSuperType = LHS->getSuperClassType();
9252 if (LHSSuperType.isNull())
9253 break;
9254
9255 LHS = LHSSuperType->castAs<ObjCObjectType>();
9256 }
9257
9258 // We didn't find anything by following the LHS to its root; now check
9259 // the RHS against the cached set of ancestors.
9260 while (true) {
9261 auto KnownLHS = LHSAncestors.find(RHS->getInterface()->getCanonicalDecl());
9262 if (KnownLHS != LHSAncestors.end()) {
9263 LHS = KnownLHS->second;
9264
9265 // Get the type arguments.
9266 ArrayRef<QualType> RHSTypeArgs = RHS->getTypeArgsAsWritten();
9267 bool anyChanges = false;
9268 if (LHS->isSpecialized() && RHS->isSpecialized()) {
9269 // Both have type arguments, compare them.
9270 if (!sameObjCTypeArgs(*this, LHS->getInterface(),
9271 LHS->getTypeArgs(), RHS->getTypeArgs(),
9272 /*stripKindOf=*/true))
9273 return {};
9274 } else if (LHS->isSpecialized() != RHS->isSpecialized()) {
9275 // If only one has type arguments, the result will not have type
9276 // arguments.
9277 RHSTypeArgs = {};
9278 anyChanges = true;
9279 }
9280
9281 // Compute the intersection of protocols.
9282 SmallVector<ObjCProtocolDecl *, 8> Protocols;
9283 getIntersectionOfProtocols(*this, RHS->getInterface(), Lptr, Rptr,
9284 Protocols);
9285 if (!Protocols.empty())
9286 anyChanges = true;
9287
9288 // If we need to return a kindof type but RHS is not a kindof type, we
9289 // build a new result type.
9290 if (anyChanges || RHS->isKindOfType() != anyKindOf) {
9291 QualType Result = getObjCInterfaceType(RHS->getInterface());
9292 Result = getObjCObjectType(Result, RHSTypeArgs, Protocols,
9293 anyKindOf || RHS->isKindOfType());
9294 return getObjCObjectPointerType(Result);
9295 }
9296
9297 return getObjCObjectPointerType(QualType(RHS, 0));
9298 }
9299
9300 // Find the superclass of the RHS.
9301 QualType RHSSuperType = RHS->getSuperClassType();
9302 if (RHSSuperType.isNull())
9303 break;
9304
9305 RHS = RHSSuperType->castAs<ObjCObjectType>();
9306 }
9307
9308 return {};
9309}
9310
9311bool ASTContext::canAssignObjCInterfaces(const ObjCObjectType *LHS,
9312 const ObjCObjectType *RHS) {
9313 assert(LHS->getInterface() && "LHS is not an interface type")((void)0);
9314 assert(RHS->getInterface() && "RHS is not an interface type")((void)0);
9315
9316 // Verify that the base decls are compatible: the RHS must be a subclass of
9317 // the LHS.
9318 ObjCInterfaceDecl *LHSInterface = LHS->getInterface();
9319 bool IsSuperClass = LHSInterface->isSuperClassOf(RHS->getInterface());
9320 if (!IsSuperClass)
9321 return false;
9322
9323 // If the LHS has protocol qualifiers, determine whether all of them are
9324 // satisfied by the RHS (i.e., the RHS has a superset of the protocols in the
9325 // LHS).
9326 if (LHS->getNumProtocols() > 0) {
9327 // OK if conversion of LHS to SuperClass results in narrowing of types
9328 // ; i.e., SuperClass may implement at least one of the protocols
9329 // in LHS's protocol list. Example, SuperObj<P1> = lhs<P1,P2> is ok.
9330 // But not SuperObj<P1,P2,P3> = lhs<P1,P2>.
9331 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> SuperClassInheritedProtocols;
9332 CollectInheritedProtocols(RHS->getInterface(), SuperClassInheritedProtocols);
9333 // Also, if RHS has explicit quelifiers, include them for comparing with LHS's
9334 // qualifiers.
9335 for (auto *RHSPI : RHS->quals())
9336 CollectInheritedProtocols(RHSPI, SuperClassInheritedProtocols);
9337 // If there is no protocols associated with RHS, it is not a match.
9338 if (SuperClassInheritedProtocols.empty())
9339 return false;
9340
9341 for (const auto *LHSProto : LHS->quals()) {
9342 bool SuperImplementsProtocol = false;
9343 for (auto *SuperClassProto : SuperClassInheritedProtocols)
9344 if (SuperClassProto->lookupProtocolNamed(LHSProto->getIdentifier())) {
9345 SuperImplementsProtocol = true;
9346 break;
9347 }
9348 if (!SuperImplementsProtocol)
9349 return false;
9350 }
9351 }
9352
9353 // If the LHS is specialized, we may need to check type arguments.
9354 if (LHS->isSpecialized()) {
9355 // Follow the superclass chain until we've matched the LHS class in the
9356 // hierarchy. This substitutes type arguments through.
9357 const ObjCObjectType *RHSSuper = RHS;
9358 while (!declaresSameEntity(RHSSuper->getInterface(), LHSInterface))
9359 RHSSuper = RHSSuper->getSuperClassType()->castAs<ObjCObjectType>();
9360
9361 // If the RHS is specializd, compare type arguments.
9362 if (RHSSuper->isSpecialized() &&
9363 !sameObjCTypeArgs(*this, LHS->getInterface(),
9364 LHS->getTypeArgs(), RHSSuper->getTypeArgs(),
9365 /*stripKindOf=*/true)) {
9366 return false;
9367 }
9368 }
9369
9370 return true;
9371}
9372
9373bool ASTContext::areComparableObjCPointerTypes(QualType LHS, QualType RHS) {
9374 // get the "pointed to" types
9375 const auto *LHSOPT = LHS->getAs<ObjCObjectPointerType>();
9376 const auto *RHSOPT = RHS->getAs<ObjCObjectPointerType>();
9377
9378 if (!LHSOPT || !RHSOPT)
9379 return false;
9380
9381 return canAssignObjCInterfaces(LHSOPT, RHSOPT) ||
9382 canAssignObjCInterfaces(RHSOPT, LHSOPT);
9383}
9384
9385bool ASTContext::canBindObjCObjectType(QualType To, QualType From) {
9386 return canAssignObjCInterfaces(
9387 getObjCObjectPointerType(To)->castAs<ObjCObjectPointerType>(),
9388 getObjCObjectPointerType(From)->castAs<ObjCObjectPointerType>());
9389}
9390
9391/// typesAreCompatible - C99 6.7.3p9: For two qualified types to be compatible,
9392/// both shall have the identically qualified version of a compatible type.
9393/// C99 6.2.7p1: Two types have compatible types if their types are the
9394/// same. See 6.7.[2,3,5] for additional rules.
9395bool ASTContext::typesAreCompatible(QualType LHS, QualType RHS,
9396 bool CompareUnqualified) {
9397 if (getLangOpts().CPlusPlus)
9398 return hasSameType(LHS, RHS);
9399
9400 return !mergeTypes(LHS, RHS, false, CompareUnqualified).isNull();
9401}
9402
9403bool ASTContext::propertyTypesAreCompatible(QualType LHS, QualType RHS) {
9404 return typesAreCompatible(LHS, RHS);
9405}
9406
9407bool ASTContext::typesAreBlockPointerCompatible(QualType LHS, QualType RHS) {
9408 return !mergeTypes(LHS, RHS, true).isNull();
9409}
9410
9411/// mergeTransparentUnionType - if T is a transparent union type and a member
9412/// of T is compatible with SubType, return the merged type, else return
9413/// QualType()
9414QualType ASTContext::mergeTransparentUnionType(QualType T, QualType SubType,
9415 bool OfBlockPointer,
9416 bool Unqualified) {
9417 if (const RecordType *UT = T->getAsUnionType()) {
9418 RecordDecl *UD = UT->getDecl();
9419 if (UD->hasAttr<TransparentUnionAttr>()) {
9420 for (const auto *I : UD->fields()) {
9421 QualType ET = I->getType().getUnqualifiedType();
9422 QualType MT = mergeTypes(ET, SubType, OfBlockPointer, Unqualified);
9423 if (!MT.isNull())
9424 return MT;
9425 }
9426 }
9427 }
9428
9429 return {};
9430}
9431
9432/// mergeFunctionParameterTypes - merge two types which appear as function
9433/// parameter types
9434QualType ASTContext::mergeFunctionParameterTypes(QualType lhs, QualType rhs,
9435 bool OfBlockPointer,
9436 bool Unqualified) {
9437 // GNU extension: two types are compatible if they appear as a function
9438 // argument, one of the types is a transparent union type and the other
9439 // type is compatible with a union member
9440 QualType lmerge = mergeTransparentUnionType(lhs, rhs, OfBlockPointer,
9441 Unqualified);
9442 if (!lmerge.isNull())
9443 return lmerge;
9444
9445 QualType rmerge = mergeTransparentUnionType(rhs, lhs, OfBlockPointer,
9446 Unqualified);
9447 if (!rmerge.isNull())
9448 return rmerge;
9449
9450 return mergeTypes(lhs, rhs, OfBlockPointer, Unqualified);
9451}
9452
9453QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs,
9454 bool OfBlockPointer, bool Unqualified,
9455 bool AllowCXX) {
9456 const auto *lbase = lhs->castAs<FunctionType>();
9457 const auto *rbase = rhs->castAs<FunctionType>();
9458 const auto *lproto = dyn_cast<FunctionProtoType>(lbase);
9459 const auto *rproto = dyn_cast<FunctionProtoType>(rbase);
9460 bool allLTypes = true;
9461 bool allRTypes = true;
9462
9463 // Check return type
9464 QualType retType;
9465 if (OfBlockPointer) {
9466 QualType RHS = rbase->getReturnType();
9467 QualType LHS = lbase->getReturnType();
9468 bool UnqualifiedResult = Unqualified;
9469 if (!UnqualifiedResult)
9470 UnqualifiedResult = (!RHS.hasQualifiers() && LHS.hasQualifiers());
9471 retType = mergeTypes(LHS, RHS, true, UnqualifiedResult, true);
9472 }
9473 else
9474 retType = mergeTypes(lbase->getReturnType(), rbase->getReturnType(), false,
9475 Unqualified);
9476 if (retType.isNull())
9477 return {};
9478
9479 if (Unqualified)
9480 retType = retType.getUnqualifiedType();
9481
9482 CanQualType LRetType = getCanonicalType(lbase->getReturnType());
9483 CanQualType RRetType = getCanonicalType(rbase->getReturnType());
9484 if (Unqualified) {
9485 LRetType = LRetType.getUnqualifiedType();
9486 RRetType = RRetType.getUnqualifiedType();
9487 }
9488
9489 if (getCanonicalType(retType) != LRetType)
9490 allLTypes = false;
9491 if (getCanonicalType(retType) != RRetType)
9492 allRTypes = false;
9493
9494 // FIXME: double check this
9495 // FIXME: should we error if lbase->getRegParmAttr() != 0 &&
9496 // rbase->getRegParmAttr() != 0 &&
9497 // lbase->getRegParmAttr() != rbase->getRegParmAttr()?
9498 FunctionType::ExtInfo lbaseInfo = lbase->getExtInfo();
9499 FunctionType::ExtInfo rbaseInfo = rbase->getExtInfo();
9500
9501 // Compatible functions must have compatible calling conventions
9502 if (lbaseInfo.getCC() != rbaseInfo.getCC())
9503 return {};
9504
9505 // Regparm is part of the calling convention.
9506 if (lbaseInfo.getHasRegParm() != rbaseInfo.getHasRegParm())
9507 return {};
9508 if (lbaseInfo.getRegParm() != rbaseInfo.getRegParm())
9509 return {};
9510
9511 if (lbaseInfo.getProducesResult() != rbaseInfo.getProducesResult())
9512 return {};
9513 if (lbaseInfo.getNoCallerSavedRegs() != rbaseInfo.getNoCallerSavedRegs())
9514 return {};
9515 if (lbaseInfo.getNoCfCheck() != rbaseInfo.getNoCfCheck())
9516 return {};
9517
9518 // FIXME: some uses, e.g. conditional exprs, really want this to be 'both'.
9519 bool NoReturn = lbaseInfo.getNoReturn() || rbaseInfo.getNoReturn();
9520
9521 if (lbaseInfo.getNoReturn() != NoReturn)
9522 allLTypes = false;
9523 if (rbaseInfo.getNoReturn() != NoReturn)
9524 allRTypes = false;
9525
9526 FunctionType::ExtInfo einfo = lbaseInfo.withNoReturn(NoReturn);
9527
9528 if (lproto && rproto) { // two C99 style function prototypes
9529 assert((AllowCXX ||((void)0)
9530 (!lproto->hasExceptionSpec() && !rproto->hasExceptionSpec())) &&((void)0)
9531 "C++ shouldn't be here")((void)0);
9532 // Compatible functions must have the same number of parameters
9533 if (lproto->getNumParams() != rproto->getNumParams())
9534 return {};
9535
9536 // Variadic and non-variadic functions aren't compatible
9537 if (lproto->isVariadic() != rproto->isVariadic())
9538 return {};
9539
9540 if (lproto->getMethodQuals() != rproto->getMethodQuals())
9541 return {};
9542
9543 SmallVector<FunctionProtoType::ExtParameterInfo, 4> newParamInfos;
9544 bool canUseLeft, canUseRight;
9545 if (!mergeExtParameterInfo(lproto, rproto, canUseLeft, canUseRight,
9546 newParamInfos))
9547 return {};
9548
9549 if (!canUseLeft)
9550 allLTypes = false;
9551 if (!canUseRight)
9552 allRTypes = false;
9553
9554 // Check parameter type compatibility
9555 SmallVector<QualType, 10> types;
9556 for (unsigned i = 0, n = lproto->getNumParams(); i < n; i++) {
9557 QualType lParamType = lproto->getParamType(i).getUnqualifiedType();
9558 QualType rParamType = rproto->getParamType(i).getUnqualifiedType();
9559 QualType paramType = mergeFunctionParameterTypes(
9560 lParamType, rParamType, OfBlockPointer, Unqualified);
9561 if (paramType.isNull())
9562 return {};
9563
9564 if (Unqualified)
9565 paramType = paramType.getUnqualifiedType();
9566
9567 types.push_back(paramType);
9568 if (Unqualified) {
9569 lParamType = lParamType.getUnqualifiedType();
9570 rParamType = rParamType.getUnqualifiedType();
9571 }
9572
9573 if (getCanonicalType(paramType) != getCanonicalType(lParamType))
9574 allLTypes = false;
9575 if (getCanonicalType(paramType) != getCanonicalType(rParamType))
9576 allRTypes = false;
9577 }
9578
9579 if (allLTypes) return lhs;
9580 if (allRTypes) return rhs;
9581
9582 FunctionProtoType::ExtProtoInfo EPI = lproto->getExtProtoInfo();
9583 EPI.ExtInfo = einfo;
9584 EPI.ExtParameterInfos =
9585 newParamInfos.empty() ? nullptr : newParamInfos.data();
9586 return getFunctionType(retType, types, EPI);
9587 }
9588
9589 if (lproto) allRTypes = false;
9590 if (rproto) allLTypes = false;
9591
9592 const FunctionProtoType *proto = lproto ? lproto : rproto;
9593 if (proto) {
9594 assert((AllowCXX || !proto->hasExceptionSpec()) && "C++ shouldn't be here")((void)0);
9595 if (proto->isVariadic())
9596 return {};
9597 // Check that the types are compatible with the types that
9598 // would result from default argument promotions (C99 6.7.5.3p15).
9599 // The only types actually affected are promotable integer
9600 // types and floats, which would be passed as a different
9601 // type depending on whether the prototype is visible.
9602 for (unsigned i = 0, n = proto->getNumParams(); i < n; ++i) {
9603 QualType paramTy = proto->getParamType(i);
9604
9605 // Look at the converted type of enum types, since that is the type used
9606 // to pass enum values.
9607 if (const auto *Enum = paramTy->getAs<EnumType>()) {
9608 paramTy = Enum->getDecl()->getIntegerType();
9609 if (paramTy.isNull())
9610 return {};
9611 }
9612
9613 if (paramTy->isPromotableIntegerType() ||
9614 getCanonicalType(paramTy).getUnqualifiedType() == FloatTy)
9615 return {};
9616 }
9617
9618 if (allLTypes) return lhs;
9619 if (allRTypes) return rhs;
9620
9621 FunctionProtoType::ExtProtoInfo EPI = proto->getExtProtoInfo();
9622 EPI.ExtInfo = einfo;
9623 return getFunctionType(retType, proto->getParamTypes(), EPI);
9624 }
9625
9626 if (allLTypes) return lhs;
9627 if (allRTypes) return rhs;
9628 return getFunctionNoProtoType(retType, einfo);
9629}
9630
9631/// Given that we have an enum type and a non-enum type, try to merge them.
9632static QualType mergeEnumWithInteger(ASTContext &Context, const EnumType *ET,
9633 QualType other, bool isBlockReturnType) {
9634 // C99 6.7.2.2p4: Each enumerated type shall be compatible with char,
9635 // a signed integer type, or an unsigned integer type.
9636 // Compatibility is based on the underlying type, not the promotion
9637 // type.
9638 QualType underlyingType = ET->getDecl()->getIntegerType();
9639 if (underlyingType.isNull())
9640 return {};
9641 if (Context.hasSameType(underlyingType, other))
9642 return other;
9643
9644 // In block return types, we're more permissive and accept any
9645 // integral type of the same size.
9646 if (isBlockReturnType && other->isIntegerType() &&
9647 Context.getTypeSize(underlyingType) == Context.getTypeSize(other))
9648 return other;
9649
9650 return {};
9651}
9652
9653QualType ASTContext::mergeTypes(QualType LHS, QualType RHS,
9654 bool OfBlockPointer,
9655 bool Unqualified, bool BlockReturnType) {
9656 // For C++ we will not reach this code with reference types (see below),
9657 // for OpenMP variant call overloading we might.
9658 //
9659 // C++ [expr]: If an expression initially has the type "reference to T", the
9660 // type is adjusted to "T" prior to any further analysis, the expression
9661 // designates the object or function denoted by the reference, and the
9662 // expression is an lvalue unless the reference is an rvalue reference and
9663 // the expression is a function call (possibly inside parentheses).
9664 if (LangOpts.OpenMP && LHS->getAs<ReferenceType>() &&
9665 RHS->getAs<ReferenceType>() && LHS->getTypeClass() == RHS->getTypeClass())
9666 return mergeTypes(LHS->getAs<ReferenceType>()->getPointeeType(),
9667 RHS->getAs<ReferenceType>()->getPointeeType(),
9668 OfBlockPointer, Unqualified, BlockReturnType);
9669 if (LHS->getAs<ReferenceType>() || RHS->getAs<ReferenceType>())
9670 return {};
9671
9672 if (Unqualified) {
9673 LHS = LHS.getUnqualifiedType();
9674 RHS = RHS.getUnqualifiedType();
9675 }
9676
9677 QualType LHSCan = getCanonicalType(LHS),
9678 RHSCan = getCanonicalType(RHS);
9679
9680 // If two types are identical, they are compatible.
9681 if (LHSCan == RHSCan)
9682 return LHS;
9683
9684 // If the qualifiers are different, the types aren't compatible... mostly.
9685 Qualifiers LQuals = LHSCan.getLocalQualifiers();
9686 Qualifiers RQuals = RHSCan.getLocalQualifiers();
9687 if (LQuals != RQuals) {
9688 // If any of these qualifiers are different, we have a type
9689 // mismatch.
9690 if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() ||
9691 LQuals.getAddressSpace() != RQuals.getAddressSpace() ||
9692 LQuals.getObjCLifetime() != RQuals.getObjCLifetime() ||
9693 LQuals.hasUnaligned() != RQuals.hasUnaligned())
9694 return {};
9695
9696 // Exactly one GC qualifier difference is allowed: __strong is
9697 // okay if the other type has no GC qualifier but is an Objective
9698 // C object pointer (i.e. implicitly strong by default). We fix
9699 // this by pretending that the unqualified type was actually
9700 // qualified __strong.
9701 Qualifiers::GC GC_L = LQuals.getObjCGCAttr();
9702 Qualifiers::GC GC_R = RQuals.getObjCGCAttr();
9703 assert((GC_L != GC_R) && "unequal qualifier sets had only equal elements")((void)0);
9704
9705 if (GC_L == Qualifiers::Weak || GC_R == Qualifiers::Weak)
9706 return {};
9707
9708 if (GC_L == Qualifiers::Strong && RHSCan->isObjCObjectPointerType()) {
9709 return mergeTypes(LHS, getObjCGCQualType(RHS, Qualifiers::Strong));
9710 }
9711 if (GC_R == Qualifiers::Strong && LHSCan->isObjCObjectPointerType()) {
9712 return mergeTypes(getObjCGCQualType(LHS, Qualifiers::Strong), RHS);
9713 }
9714 return {};
9715 }
9716
9717 // Okay, qualifiers are equal.
9718
9719 Type::TypeClass LHSClass = LHSCan->getTypeClass();
9720 Type::TypeClass RHSClass = RHSCan->getTypeClass();
9721
9722 // We want to consider the two function types to be the same for these
9723 // comparisons, just force one to the other.
9724 if (LHSClass == Type::FunctionProto) LHSClass = Type::FunctionNoProto;
9725 if (RHSClass == Type::FunctionProto) RHSClass = Type::FunctionNoProto;
9726
9727 // Same as above for arrays
9728 if (LHSClass == Type::VariableArray || LHSClass == Type::IncompleteArray)
9729 LHSClass = Type::ConstantArray;
9730 if (RHSClass == Type::VariableArray || RHSClass == Type::IncompleteArray)
9731 RHSClass = Type::ConstantArray;
9732
9733 // ObjCInterfaces are just specialized ObjCObjects.
9734 if (LHSClass == Type::ObjCInterface) LHSClass = Type::ObjCObject;
9735 if (RHSClass == Type::ObjCInterface) RHSClass = Type::ObjCObject;
9736
9737 // Canonicalize ExtVector -> Vector.
9738 if (LHSClass == Type::ExtVector) LHSClass = Type::Vector;
9739 if (RHSClass == Type::ExtVector) RHSClass = Type::Vector;
9740
9741 // If the canonical type classes don't match.
9742 if (LHSClass != RHSClass) {
9743 // Note that we only have special rules for turning block enum
9744 // returns into block int returns, not vice-versa.
9745 if (const auto *ETy = LHS->getAs<EnumType>()) {
9746 return mergeEnumWithInteger(*this, ETy, RHS, false);
9747 }
9748 if (const EnumType* ETy = RHS->getAs<EnumType>()) {
9749 return mergeEnumWithInteger(*this, ETy, LHS, BlockReturnType);
9750 }
9751 // allow block pointer type to match an 'id' type.
9752 if (OfBlockPointer && !BlockReturnType) {
9753 if (LHS->isObjCIdType() && RHS->isBlockPointerType())
9754 return LHS;
9755 if (RHS->isObjCIdType() && LHS->isBlockPointerType())
9756 return RHS;
9757 }
9758
9759 return {};
9760 }
9761
9762 // The canonical type classes match.
9763 switch (LHSClass) {
9764#define TYPE(Class, Base)
9765#define ABSTRACT_TYPE(Class, Base)
9766#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
9767#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
9768#define DEPENDENT_TYPE(Class, Base) case Type::Class:
9769#include "clang/AST/TypeNodes.inc"
9770 llvm_unreachable("Non-canonical and dependent types shouldn't get here")__builtin_unreachable();
9771
9772 case Type::Auto:
9773 case Type::DeducedTemplateSpecialization:
9774 case Type::LValueReference:
9775 case Type::RValueReference:
9776 case Type::MemberPointer:
9777 llvm_unreachable("C++ should never be in mergeTypes")__builtin_unreachable();
9778
9779 case Type::ObjCInterface:
9780 case Type::IncompleteArray:
9781 case Type::VariableArray:
9782 case Type::FunctionProto:
9783 case Type::ExtVector:
9784 llvm_unreachable("Types are eliminated above")__builtin_unreachable();
9785
9786 case Type::Pointer:
9787 {
9788 // Merge two pointer types, while trying to preserve typedef info
9789 QualType LHSPointee = LHS->castAs<PointerType>()->getPointeeType();
9790 QualType RHSPointee = RHS->castAs<PointerType>()->getPointeeType();
9791 if (Unqualified) {
9792 LHSPointee = LHSPointee.getUnqualifiedType();
9793 RHSPointee = RHSPointee.getUnqualifiedType();
9794 }
9795 QualType ResultType = mergeTypes(LHSPointee, RHSPointee, false,
9796 Unqualified);
9797 if (ResultType.isNull())
9798 return {};
9799 if (getCanonicalType(LHSPointee) == getCanonicalType(ResultType))
9800 return LHS;
9801 if (getCanonicalType(RHSPointee) == getCanonicalType(ResultType))
9802 return RHS;
9803 return getPointerType(ResultType);
9804 }
9805 case Type::BlockPointer:
9806 {
9807 // Merge two block pointer types, while trying to preserve typedef info
9808 QualType LHSPointee = LHS->castAs<BlockPointerType>()->getPointeeType();
9809 QualType RHSPointee = RHS->castAs<BlockPointerType>()->getPointeeType();
9810 if (Unqualified) {
9811 LHSPointee = LHSPointee.getUnqualifiedType();
9812 RHSPointee = RHSPointee.getUnqualifiedType();
9813 }
9814 if (getLangOpts().OpenCL) {
9815 Qualifiers LHSPteeQual = LHSPointee.getQualifiers();
9816 Qualifiers RHSPteeQual = RHSPointee.getQualifiers();
9817 // Blocks can't be an expression in a ternary operator (OpenCL v2.0
9818 // 6.12.5) thus the following check is asymmetric.
9819 if (!LHSPteeQual.isAddressSpaceSupersetOf(RHSPteeQual))
9820 return {};
9821 LHSPteeQual.removeAddressSpace();
9822 RHSPteeQual.removeAddressSpace();
9823 LHSPointee =
9824 QualType(LHSPointee.getTypePtr(), LHSPteeQual.getAsOpaqueValue());
9825 RHSPointee =
9826 QualType(RHSPointee.getTypePtr(), RHSPteeQual.getAsOpaqueValue());
9827 }
9828 QualType ResultType = mergeTypes(LHSPointee, RHSPointee, OfBlockPointer,
9829 Unqualified);
9830 if (ResultType.isNull())
9831 return {};
9832 if (getCanonicalType(LHSPointee) == getCanonicalType(ResultType))
9833 return LHS;
9834 if (getCanonicalType(RHSPointee) == getCanonicalType(ResultType))
9835 return RHS;
9836 return getBlockPointerType(ResultType);
9837 }
9838 case Type::Atomic:
9839 {
9840 // Merge two pointer types, while trying to preserve typedef info
9841 QualType LHSValue = LHS->castAs<AtomicType>()->getValueType();
9842 QualType RHSValue = RHS->castAs<AtomicType>()->getValueType();
9843 if (Unqualified) {
9844 LHSValue = LHSValue.getUnqualifiedType();
9845 RHSValue = RHSValue.getUnqualifiedType();
9846 }
9847 QualType ResultType = mergeTypes(LHSValue, RHSValue, false,
9848 Unqualified);
9849 if (ResultType.isNull())
9850 return {};
9851 if (getCanonicalType(LHSValue) == getCanonicalType(ResultType))
9852 return LHS;
9853 if (getCanonicalType(RHSValue) == getCanonicalType(ResultType))
9854 return RHS;
9855 return getAtomicType(ResultType);
9856 }
9857 case Type::ConstantArray:
9858 {
9859 const ConstantArrayType* LCAT = getAsConstantArrayType(LHS);
9860 const ConstantArrayType* RCAT = getAsConstantArrayType(RHS);
9861 if (LCAT && RCAT && RCAT->getSize() != LCAT->getSize())
9862 return {};
9863
9864 QualType LHSElem = getAsArrayType(LHS)->getElementType();
9865 QualType RHSElem = getAsArrayType(RHS)->getElementType();
9866 if (Unqualified) {
9867 LHSElem = LHSElem.getUnqualifiedType();
9868 RHSElem = RHSElem.getUnqualifiedType();
9869 }
9870
9871 QualType ResultType = mergeTypes(LHSElem, RHSElem, false, Unqualified);
9872 if (ResultType.isNull())
9873 return {};
9874
9875 const VariableArrayType* LVAT = getAsVariableArrayType(LHS);
9876 const VariableArrayType* RVAT = getAsVariableArrayType(RHS);
9877
9878 // If either side is a variable array, and both are complete, check whether
9879 // the current dimension is definite.
9880 if (LVAT || RVAT) {
9881 auto SizeFetch = [this](const VariableArrayType* VAT,
9882 const ConstantArrayType* CAT)
9883 -> std::pair<bool,llvm::APInt> {
9884 if (VAT) {
9885 Optional<llvm::APSInt> TheInt;
9886 Expr *E = VAT->getSizeExpr();
9887 if (E && (TheInt = E->getIntegerConstantExpr(*this)))
9888 return std::make_pair(true, *TheInt);
9889 return std::make_pair(false, llvm::APSInt());
9890 }
9891 if (CAT)
9892 return std::make_pair(true, CAT->getSize());
9893 return std::make_pair(false, llvm::APInt());
9894 };
9895
9896 bool HaveLSize, HaveRSize;
9897 llvm::APInt LSize, RSize;
9898 std::tie(HaveLSize, LSize) = SizeFetch(LVAT, LCAT);
9899 std::tie(HaveRSize, RSize) = SizeFetch(RVAT, RCAT);
9900 if (HaveLSize && HaveRSize && !llvm::APInt::isSameValue(LSize, RSize))
9901 return {}; // Definite, but unequal, array dimension
9902 }
9903
9904 if (LCAT && getCanonicalType(LHSElem) == getCanonicalType(ResultType))
9905 return LHS;
9906 if (RCAT && getCanonicalType(RHSElem) == getCanonicalType(ResultType))
9907 return RHS;
9908 if (LCAT)
9909 return getConstantArrayType(ResultType, LCAT->getSize(),
9910 LCAT->getSizeExpr(),
9911 ArrayType::ArraySizeModifier(), 0);
9912 if (RCAT)
9913 return getConstantArrayType(ResultType, RCAT->getSize(),
9914 RCAT->getSizeExpr(),
9915 ArrayType::ArraySizeModifier(), 0);
9916 if (LVAT && getCanonicalType(LHSElem) == getCanonicalType(ResultType))
9917 return LHS;
9918 if (RVAT && getCanonicalType(RHSElem) == getCanonicalType(ResultType))
9919 return RHS;
9920 if (LVAT) {
9921 // FIXME: This isn't correct! But tricky to implement because
9922 // the array's size has to be the size of LHS, but the type
9923 // has to be different.
9924 return LHS;
9925 }
9926 if (RVAT) {
9927 // FIXME: This isn't correct! But tricky to implement because
9928 // the array's size has to be the size of RHS, but the type
9929 // has to be different.
9930 return RHS;
9931 }
9932 if (getCanonicalType(LHSElem) == getCanonicalType(ResultType)) return LHS;
9933 if (getCanonicalType(RHSElem) == getCanonicalType(ResultType)) return RHS;
9934 return getIncompleteArrayType(ResultType,
9935 ArrayType::ArraySizeModifier(), 0);
9936 }
9937 case Type::FunctionNoProto:
9938 return mergeFunctionTypes(LHS, RHS, OfBlockPointer, Unqualified);
9939 case Type::Record:
9940 case Type::Enum:
9941 return {};
9942 case Type::Builtin:
9943 // Only exactly equal builtin types are compatible, which is tested above.
9944 return {};
9945 case Type::Complex:
9946 // Distinct complex types are incompatible.
9947 return {};
9948 case Type::Vector:
9949 // FIXME: The merged type should be an ExtVector!
9950 if (areCompatVectorTypes(LHSCan->castAs<VectorType>(),
9951 RHSCan->castAs<VectorType>()))
9952 return LHS;
9953 return {};
9954 case Type::ConstantMatrix:
9955 if (areCompatMatrixTypes(LHSCan->castAs<ConstantMatrixType>(),
9956 RHSCan->castAs<ConstantMatrixType>()))
9957 return LHS;
9958 return {};
9959 case Type::ObjCObject: {
9960 // Check if the types are assignment compatible.
9961 // FIXME: This should be type compatibility, e.g. whether
9962 // "LHS x; RHS x;" at global scope is legal.
9963 if (canAssignObjCInterfaces(LHS->castAs<ObjCObjectType>(),
9964 RHS->castAs<ObjCObjectType>()))
9965 return LHS;
9966 return {};
9967 }
9968 case Type::ObjCObjectPointer:
9969 if (OfBlockPointer) {
9970 if (canAssignObjCInterfacesInBlockPointer(
9971 LHS->castAs<ObjCObjectPointerType>(),
9972 RHS->castAs<ObjCObjectPointerType>(), BlockReturnType))
9973 return LHS;
9974 return {};
9975 }
9976 if (canAssignObjCInterfaces(LHS->castAs<ObjCObjectPointerType>(),
9977 RHS->castAs<ObjCObjectPointerType>()))
9978 return LHS;
9979 return {};
9980 case Type::Pipe:
9981 assert(LHS != RHS &&((void)0)
9982 "Equivalent pipe types should have already been handled!")((void)0);
9983 return {};
9984 case Type::ExtInt: {
9985 // Merge two ext-int types, while trying to preserve typedef info.
9986 bool LHSUnsigned = LHS->castAs<ExtIntType>()->isUnsigned();
9987 bool RHSUnsigned = RHS->castAs<ExtIntType>()->isUnsigned();
9988 unsigned LHSBits = LHS->castAs<ExtIntType>()->getNumBits();
9989 unsigned RHSBits = RHS->castAs<ExtIntType>()->getNumBits();
9990
9991 // Like unsigned/int, shouldn't have a type if they dont match.
9992 if (LHSUnsigned != RHSUnsigned)
9993 return {};
9994
9995 if (LHSBits != RHSBits)
9996 return {};
9997 return LHS;
9998 }
9999 }
10000
10001 llvm_unreachable("Invalid Type::Class!")__builtin_unreachable();
10002}
10003
10004bool ASTContext::mergeExtParameterInfo(
10005 const FunctionProtoType *FirstFnType, const FunctionProtoType *SecondFnType,
10006 bool &CanUseFirst, bool &CanUseSecond,
10007 SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &NewParamInfos) {
10008 assert(NewParamInfos.empty() && "param info list not empty")((void)0);
10009 CanUseFirst = CanUseSecond = true;
10010 bool FirstHasInfo = FirstFnType->hasExtParameterInfos();
10011 bool SecondHasInfo = SecondFnType->hasExtParameterInfos();
10012
10013 // Fast path: if the first type doesn't have ext parameter infos,
10014 // we match if and only if the second type also doesn't have them.
10015 if (!FirstHasInfo && !SecondHasInfo)
10016 return true;
10017
10018 bool NeedParamInfo = false;
10019 size_t E = FirstHasInfo ? FirstFnType->getExtParameterInfos().size()
10020 : SecondFnType->getExtParameterInfos().size();
10021
10022 for (size_t I = 0; I < E; ++I) {
10023 FunctionProtoType::ExtParameterInfo FirstParam, SecondParam;
10024 if (FirstHasInfo)
10025 FirstParam = FirstFnType->getExtParameterInfo(I);
10026 if (SecondHasInfo)
10027 SecondParam = SecondFnType->getExtParameterInfo(I);
10028
10029 // Cannot merge unless everything except the noescape flag matches.
10030 if (FirstParam.withIsNoEscape(false) != SecondParam.withIsNoEscape(false))
10031 return false;
10032
10033 bool FirstNoEscape = FirstParam.isNoEscape();
10034 bool SecondNoEscape = SecondParam.isNoEscape();
10035 bool IsNoEscape = FirstNoEscape && SecondNoEscape;
10036 NewParamInfos.push_back(FirstParam.withIsNoEscape(IsNoEscape));
10037 if (NewParamInfos.back().getOpaqueValue())
10038 NeedParamInfo = true;
10039 if (FirstNoEscape != IsNoEscape)
10040 CanUseFirst = false;
10041 if (SecondNoEscape != IsNoEscape)
10042 CanUseSecond = false;
10043 }
10044
10045 if (!NeedParamInfo)
10046 NewParamInfos.clear();
10047
10048 return true;
10049}
10050
10051void ASTContext::ResetObjCLayout(const ObjCContainerDecl *CD) {
10052 ObjCLayouts[CD] = nullptr;
10053}
10054
10055/// mergeObjCGCQualifiers - This routine merges ObjC's GC attribute of 'LHS' and
10056/// 'RHS' attributes and returns the merged version; including for function
10057/// return types.
10058QualType ASTContext::mergeObjCGCQualifiers(QualType LHS, QualType RHS) {
10059 QualType LHSCan = getCanonicalType(LHS),
10060 RHSCan = getCanonicalType(RHS);
10061 // If two types are identical, they are compatible.
10062 if (LHSCan == RHSCan)
10063 return LHS;
10064 if (RHSCan->isFunctionType()) {
10065 if (!LHSCan->isFunctionType())
10066 return {};
10067 QualType OldReturnType =
10068 cast<FunctionType>(RHSCan.getTypePtr())->getReturnType();
10069 QualType NewReturnType =
10070 cast<FunctionType>(LHSCan.getTypePtr())->getReturnType();
10071 QualType ResReturnType =
10072 mergeObjCGCQualifiers(NewReturnType, OldReturnType);
10073 if (ResReturnType.isNull())
10074 return {};
10075 if (ResReturnType == NewReturnType || ResReturnType == OldReturnType) {
10076 // id foo(); ... __strong id foo(); or: __strong id foo(); ... id foo();
10077 // In either case, use OldReturnType to build the new function type.
10078 const auto *F = LHS->castAs<FunctionType>();
10079 if (const auto *FPT = cast<FunctionProtoType>(F)) {
10080 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
10081 EPI.ExtInfo = getFunctionExtInfo(LHS);
10082 QualType ResultType =
10083 getFunctionType(OldReturnType, FPT->getParamTypes(), EPI);
10084 return ResultType;
10085 }
10086 }
10087 return {};
10088 }
10089
10090 // If the qualifiers are different, the types can still be merged.
10091 Qualifiers LQuals = LHSCan.getLocalQualifiers();
10092 Qualifiers RQuals = RHSCan.getLocalQualifiers();
10093 if (LQuals != RQuals) {
10094 // If any of these qualifiers are different, we have a type mismatch.
10095 if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() ||
10096 LQuals.getAddressSpace() != RQuals.getAddressSpace())
10097 return {};
10098
10099 // Exactly one GC qualifier difference is allowed: __strong is
10100 // okay if the other type has no GC qualifier but is an Objective
10101 // C object pointer (i.e. implicitly strong by default). We fix
10102 // this by pretending that the unqualified type was actually
10103 // qualified __strong.
10104 Qualifiers::GC GC_L = LQuals.getObjCGCAttr();
10105 Qualifiers::GC GC_R = RQuals.getObjCGCAttr();
10106 assert((GC_L != GC_R) && "unequal qualifier sets had only equal elements")((void)0);
10107
10108 if (GC_L == Qualifiers::Weak || GC_R == Qualifiers::Weak)
10109 return {};
10110
10111 if (GC_L == Qualifiers::Strong)
10112 return LHS;
10113 if (GC_R == Qualifiers::Strong)
10114 return RHS;
10115 return {};
10116 }
10117
10118 if (LHSCan->isObjCObjectPointerType() && RHSCan->isObjCObjectPointerType()) {
10119 QualType LHSBaseQT = LHS->castAs<ObjCObjectPointerType>()->getPointeeType();
10120 QualType RHSBaseQT = RHS->castAs<ObjCObjectPointerType>()->getPointeeType();
10121 QualType ResQT = mergeObjCGCQualifiers(LHSBaseQT, RHSBaseQT);
10122 if (ResQT == LHSBaseQT)
10123 return LHS;
10124 if (ResQT == RHSBaseQT)
10125 return RHS;
10126 }
10127 return {};
10128}
10129
10130//===----------------------------------------------------------------------===//
10131// Integer Predicates
10132//===----------------------------------------------------------------------===//
10133
10134unsigned ASTContext::getIntWidth(QualType T) const {
10135 if (const auto *ET = T->getAs<EnumType>())
10136 T = ET->getDecl()->getIntegerType();
10137 if (T->isBooleanType())
10138 return 1;
10139 if(const auto *EIT = T->getAs<ExtIntType>())
10140 return EIT->getNumBits();
10141 // For builtin types, just use the standard type sizing method
10142 return (unsigned)getTypeSize(T);
10143}
10144
10145QualType ASTContext::getCorrespondingUnsignedType(QualType T) const {
10146 assert((T->hasSignedIntegerRepresentation() || T->isSignedFixedPointType()) &&((void)0)
10147 "Unexpected type")((void)0);
10148
10149 // Turn <4 x signed int> -> <4 x unsigned int>
10150 if (const auto *VTy = T->getAs<VectorType>())
10151 return getVectorType(getCorrespondingUnsignedType(VTy->getElementType()),
10152 VTy->getNumElements(), VTy->getVectorKind());
10153
10154 // For _ExtInt, return an unsigned _ExtInt with same width.
10155 if (const auto *EITy = T->getAs<ExtIntType>())
10156 return getExtIntType(/*IsUnsigned=*/true, EITy->getNumBits());
10157
10158 // For enums, get the underlying integer type of the enum, and let the general
10159 // integer type signchanging code handle it.
10160 if (const auto *ETy = T->getAs<EnumType>())
10161 T = ETy->getDecl()->getIntegerType();
10162
10163 switch (T->castAs<BuiltinType>()->getKind()) {
10164 case BuiltinType::Char_S:
10165 case BuiltinType::SChar:
10166 return UnsignedCharTy;
10167 case BuiltinType::Short:
10168 return UnsignedShortTy;
10169 case BuiltinType::Int:
10170 return UnsignedIntTy;
10171 case BuiltinType::Long:
10172 return UnsignedLongTy;
10173 case BuiltinType::LongLong:
10174 return UnsignedLongLongTy;
10175 case BuiltinType::Int128:
10176 return UnsignedInt128Ty;
10177 // wchar_t is special. It is either signed or not, but when it's signed,
10178 // there's no matching "unsigned wchar_t". Therefore we return the unsigned
10179 // version of it's underlying type instead.
10180 case BuiltinType::WChar_S:
10181 return getUnsignedWCharType();
10182
10183 case BuiltinType::ShortAccum:
10184 return UnsignedShortAccumTy;
10185 case BuiltinType::Accum:
10186 return UnsignedAccumTy;
10187 case BuiltinType::LongAccum:
10188 return UnsignedLongAccumTy;
10189 case BuiltinType::SatShortAccum:
10190 return SatUnsignedShortAccumTy;
10191 case BuiltinType::SatAccum:
10192 return SatUnsignedAccumTy;
10193 case BuiltinType::SatLongAccum:
10194 return SatUnsignedLongAccumTy;
10195 case BuiltinType::ShortFract:
10196 return UnsignedShortFractTy;
10197 case BuiltinType::Fract:
10198 return UnsignedFractTy;
10199 case BuiltinType::LongFract:
10200 return UnsignedLongFractTy;
10201 case BuiltinType::SatShortFract:
10202 return SatUnsignedShortFractTy;
10203 case BuiltinType::SatFract:
10204 return SatUnsignedFractTy;
10205 case BuiltinType::SatLongFract:
10206 return SatUnsignedLongFractTy;
10207 default:
10208 llvm_unreachable("Unexpected signed integer or fixed point type")__builtin_unreachable();
10209 }
10210}
10211
10212QualType ASTContext::getCorrespondingSignedType(QualType T) const {
10213 assert((T->hasUnsignedIntegerRepresentation() ||((void)0)
10214 T->isUnsignedFixedPointType()) &&((void)0)
10215 "Unexpected type")((void)0);
10216
10217 // Turn <4 x unsigned int> -> <4 x signed int>
10218 if (const auto *VTy = T->getAs<VectorType>())
10219 return getVectorType(getCorrespondingSignedType(VTy->getElementType()),
10220 VTy->getNumElements(), VTy->getVectorKind());
10221
10222 // For _ExtInt, return a signed _ExtInt with same width.
10223 if (const auto *EITy = T->getAs<ExtIntType>())
10224 return getExtIntType(/*IsUnsigned=*/false, EITy->getNumBits());
10225
10226 // For enums, get the underlying integer type of the enum, and let the general
10227 // integer type signchanging code handle it.
10228 if (const auto *ETy = T->getAs<EnumType>())
10229 T = ETy->getDecl()->getIntegerType();
10230
10231 switch (T->castAs<BuiltinType>()->getKind()) {
10232 case BuiltinType::Char_U:
10233 case BuiltinType::UChar:
10234 return SignedCharTy;
10235 case BuiltinType::UShort:
10236 return ShortTy;
10237 case BuiltinType::UInt:
10238 return IntTy;
10239 case BuiltinType::ULong:
10240 return LongTy;
10241 case BuiltinType::ULongLong:
10242 return LongLongTy;
10243 case BuiltinType::UInt128:
10244 return Int128Ty;
10245 // wchar_t is special. It is either unsigned or not, but when it's unsigned,
10246 // there's no matching "signed wchar_t". Therefore we return the signed
10247 // version of it's underlying type instead.
10248 case BuiltinType::WChar_U:
10249 return getSignedWCharType();
10250
10251 case BuiltinType::UShortAccum:
10252 return ShortAccumTy;
10253 case BuiltinType::UAccum:
10254 return AccumTy;
10255 case BuiltinType::ULongAccum:
10256 return LongAccumTy;
10257 case BuiltinType::SatUShortAccum:
10258 return SatShortAccumTy;
10259 case BuiltinType::SatUAccum:
10260 return SatAccumTy;
10261 case BuiltinType::SatULongAccum:
10262 return SatLongAccumTy;
10263 case BuiltinType::UShortFract:
10264 return ShortFractTy;
10265 case BuiltinType::UFract:
10266 return FractTy;
10267 case BuiltinType::ULongFract:
10268 return LongFractTy;
10269 case BuiltinType::SatUShortFract:
10270 return SatShortFractTy;
10271 case BuiltinType::SatUFract:
10272 return SatFractTy;
10273 case BuiltinType::SatULongFract:
10274 return SatLongFractTy;
10275 default:
10276 llvm_unreachable("Unexpected unsigned integer or fixed point type")__builtin_unreachable();
10277 }
10278}
10279
10280ASTMutationListener::~ASTMutationListener() = default;
10281
10282void ASTMutationListener::DeducedReturnType(const FunctionDecl *FD,
10283 QualType ReturnType) {}
10284
10285//===----------------------------------------------------------------------===//
10286// Builtin Type Computation
10287//===----------------------------------------------------------------------===//
10288
10289/// DecodeTypeFromStr - This decodes one type descriptor from Str, advancing the
10290/// pointer over the consumed characters. This returns the resultant type. If
10291/// AllowTypeModifiers is false then modifier like * are not parsed, just basic
10292/// types. This allows "v2i*" to be parsed as a pointer to a v2i instead of
10293/// a vector of "i*".
10294///
10295/// RequiresICE is filled in on return to indicate whether the value is required
10296/// to be an Integer Constant Expression.
10297static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context,
10298 ASTContext::GetBuiltinTypeError &Error,
10299 bool &RequiresICE,
10300 bool AllowTypeModifiers) {
10301 // Modifiers.
10302 int HowLong = 0;
10303 bool Signed = false, Unsigned = false;
10304 RequiresICE = false;
10305
10306 // Read the prefixed modifiers first.
10307 bool Done = false;
10308 #ifndef NDEBUG1
10309 bool IsSpecial = false;
10310 #endif
10311 while (!Done) {
10312 switch (*Str++) {
10313 default: Done = true; --Str; break;
10314 case 'I':
10315 RequiresICE = true;
10316 break;
10317 case 'S':
10318 assert(!Unsigned && "Can't use both 'S' and 'U' modifiers!")((void)0);
10319 assert(!Signed && "Can't use 'S' modifier multiple times!")((void)0);
10320 Signed = true;
10321 break;
10322 case 'U':
10323 assert(!Signed && "Can't use both 'S' and 'U' modifiers!")((void)0);
10324 assert(!Unsigned && "Can't use 'U' modifier multiple times!")((void)0);
10325 Unsigned = true;
10326 break;
10327 case 'L':
10328 assert(!IsSpecial && "Can't use 'L' with 'W', 'N', 'Z' or 'O' modifiers")((void)0);
10329 assert(HowLong <= 2 && "Can't have LLLL modifier")((void)0);
10330 ++HowLong;
10331 break;
10332 case 'N':
10333 // 'N' behaves like 'L' for all non LP64 targets and 'int' otherwise.
10334 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!")((void)0);
10335 assert(HowLong == 0 && "Can't use both 'L' and 'N' modifiers!")((void)0);
10336 #ifndef NDEBUG1
10337 IsSpecial = true;
10338 #endif
10339 if (Context.getTargetInfo().getLongWidth() == 32)
10340 ++HowLong;
10341 break;
10342 case 'W':
10343 // This modifier represents int64 type.
10344 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!")((void)0);
10345 assert(HowLong == 0 && "Can't use both 'L' and 'W' modifiers!")((void)0);
10346 #ifndef NDEBUG1
10347 IsSpecial = true;
10348 #endif
10349 switch (Context.getTargetInfo().getInt64Type()) {
10350 default:
10351 llvm_unreachable("Unexpected integer type")__builtin_unreachable();
10352 case TargetInfo::SignedLong:
10353 HowLong = 1;
10354 break;
10355 case TargetInfo::SignedLongLong:
10356 HowLong = 2;
10357 break;
10358 }
10359 break;
10360 case 'Z':
10361 // This modifier represents int32 type.
10362 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!")((void)0);
10363 assert(HowLong == 0 && "Can't use both 'L' and 'Z' modifiers!")((void)0);
10364 #ifndef NDEBUG1
10365 IsSpecial = true;
10366 #endif
10367 switch (Context.getTargetInfo().getIntTypeByWidth(32, true)) {
10368 default:
10369 llvm_unreachable("Unexpected integer type")__builtin_unreachable();
10370 case TargetInfo::SignedInt:
10371 HowLong = 0;
10372 break;
10373 case TargetInfo::SignedLong:
10374 HowLong = 1;
10375 break;
10376 case TargetInfo::SignedLongLong:
10377 HowLong = 2;
10378 break;
10379 }
10380 break;
10381 case 'O':
10382 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!")((void)0);
10383 assert(HowLong == 0 && "Can't use both 'L' and 'O' modifiers!")((void)0);
10384 #ifndef NDEBUG1
10385 IsSpecial = true;
10386 #endif
10387 if (Context.getLangOpts().OpenCL)
10388 HowLong = 1;
10389 else
10390 HowLong = 2;
10391 break;
10392 }
10393 }
10394
10395 QualType Type;
10396
10397 // Read the base type.
10398 switch (*Str++) {
10399 default: llvm_unreachable("Unknown builtin type letter!")__builtin_unreachable();
10400 case 'x':
10401 assert(HowLong == 0 && !Signed && !Unsigned &&((void)0)
10402 "Bad modifiers used with 'x'!")((void)0);
10403 Type = Context.Float16Ty;
10404 break;
10405 case 'y':
10406 assert(HowLong == 0 && !Signed && !Unsigned &&((void)0)
10407 "Bad modifiers used with 'y'!")((void)0);
10408 Type = Context.BFloat16Ty;
10409 break;
10410 case 'v':
10411 assert(HowLong == 0 && !Signed && !Unsigned &&((void)0)
10412 "Bad modifiers used with 'v'!")((void)0);
10413 Type = Context.VoidTy;
10414 break;
10415 case 'h':
10416 assert(HowLong == 0 && !Signed && !Unsigned &&((void)0)
10417 "Bad modifiers used with 'h'!")((void)0);
10418 Type = Context.HalfTy;
10419 break;
10420 case 'f':
10421 assert(HowLong == 0 && !Signed && !Unsigned &&((void)0)
10422 "Bad modifiers used with 'f'!")((void)0);
10423 Type = Context.FloatTy;
10424 break;
10425 case 'd':
10426 assert(HowLong < 3 && !Signed && !Unsigned &&((void)0)
10427 "Bad modifiers used with 'd'!")((void)0);
10428 if (HowLong == 1)
10429 Type = Context.LongDoubleTy;
10430 else if (HowLong == 2)
10431 Type = Context.Float128Ty;
10432 else
10433 Type = Context.DoubleTy;
10434 break;
10435 case 's':
10436 assert(HowLong == 0 && "Bad modifiers used with 's'!")((void)0);
10437 if (Unsigned)
10438 Type = Context.UnsignedShortTy;
10439 else
10440 Type = Context.ShortTy;
10441 break;
10442 case 'i':
10443 if (HowLong == 3)
10444 Type = Unsigned ? Context.UnsignedInt128Ty : Context.Int128Ty;
10445 else if (HowLong == 2)
10446 Type = Unsigned ? Context.UnsignedLongLongTy : Context.LongLongTy;
10447 else if (HowLong == 1)
10448 Type = Unsigned ? Context.UnsignedLongTy : Context.LongTy;
10449 else
10450 Type = Unsigned ? Context.UnsignedIntTy : Context.IntTy;
10451 break;
10452 case 'c':
10453 assert(HowLong == 0 && "Bad modifiers used with 'c'!")((void)0);
10454 if (Signed)
10455 Type = Context.SignedCharTy;
10456 else if (Unsigned)
10457 Type = Context.UnsignedCharTy;
10458 else
10459 Type = Context.CharTy;
10460 break;
10461 case 'b': // boolean
10462 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'b'!")((void)0);
10463 Type = Context.BoolTy;
10464 break;
10465 case 'z': // size_t.
10466 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'z'!")((void)0);
10467 Type = Context.getSizeType();
10468 break;
10469 case 'w': // wchar_t.
10470 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'w'!")((void)0);
10471 Type = Context.getWideCharType();
10472 break;
10473 case 'F':
10474 Type = Context.getCFConstantStringType();
10475 break;
10476 case 'G':
10477 Type = Context.getObjCIdType();
10478 break;
10479 case 'H':
10480 Type = Context.getObjCSelType();
10481 break;
10482 case 'M':
10483 Type = Context.getObjCSuperType();
10484 break;
10485 case 'a':
10486 Type = Context.getBuiltinVaListType();
10487 assert(!Type.isNull() && "builtin va list type not initialized!")((void)0);
10488 break;
10489 case 'A':
10490 // This is a "reference" to a va_list; however, what exactly
10491 // this means depends on how va_list is defined. There are two
10492 // different kinds of va_list: ones passed by value, and ones
10493 // passed by reference. An example of a by-value va_list is
10494 // x86, where va_list is a char*. An example of by-ref va_list
10495 // is x86-64, where va_list is a __va_list_tag[1]. For x86,
10496 // we want this argument to be a char*&; for x86-64, we want
10497 // it to be a __va_list_tag*.
10498 Type = Context.getBuiltinVaListType();
10499 assert(!Type.isNull() && "builtin va list type not initialized!")((void)0);
10500 if (Type->isArrayType())
10501 Type = Context.getArrayDecayedType(Type);
10502 else
10503 Type = Context.getLValueReferenceType(Type);
10504 break;
10505 case 'q': {
10506 char *End;
10507 unsigned NumElements = strtoul(Str, &End, 10);
10508 assert(End != Str && "Missing vector size")((void)0);
10509 Str = End;
10510
10511 QualType ElementType = DecodeTypeFromStr(Str, Context, Error,
10512 RequiresICE, false);
10513 assert(!RequiresICE && "Can't require vector ICE")((void)0);
10514
10515 Type = Context.getScalableVectorType(ElementType, NumElements);
10516 break;
10517 }
10518 case 'V': {
10519 char *End;
10520 unsigned NumElements = strtoul(Str, &End, 10);
10521 assert(End != Str && "Missing vector size")((void)0);
10522 Str = End;
10523
10524 QualType ElementType = DecodeTypeFromStr(Str, Context, Error,
10525 RequiresICE, false);
10526 assert(!RequiresICE && "Can't require vector ICE")((void)0);
10527
10528 // TODO: No way to make AltiVec vectors in builtins yet.
10529 Type = Context.getVectorType(ElementType, NumElements,
10530 VectorType::GenericVector);
10531 break;
10532 }
10533 case 'E': {
10534 char *End;
10535
10536 unsigned NumElements = strtoul(Str, &End, 10);
10537 assert(End != Str && "Missing vector size")((void)0);
10538
10539 Str = End;
10540
10541 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, RequiresICE,
10542 false);
10543 Type = Context.getExtVectorType(ElementType, NumElements);
10544 break;
10545 }
10546 case 'X': {
10547 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, RequiresICE,
10548 false);
10549 assert(!RequiresICE && "Can't require complex ICE")((void)0);
10550 Type = Context.getComplexType(ElementType);
10551 break;
10552 }
10553 case 'Y':
10554 Type = Context.getPointerDiffType();
10555 break;
10556 case 'P':
10557 Type = Context.getFILEType();
10558 if (Type.isNull()) {
10559 Error = ASTContext::GE_Missing_stdio;
10560 return {};
10561 }
10562 break;
10563 case 'J':
10564 if (Signed)
10565 Type = Context.getsigjmp_bufType();
10566 else
10567 Type = Context.getjmp_bufType();
10568
10569 if (Type.isNull()) {
10570 Error = ASTContext::GE_Missing_setjmp;
10571 return {};
10572 }
10573 break;
10574 case 'K':
10575 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'K'!")((void)0);
10576 Type = Context.getucontext_tType();
10577
10578 if (Type.isNull()) {
10579 Error = ASTContext::GE_Missing_ucontext;
10580 return {};
10581 }
10582 break;
10583 case 'p':
10584 Type = Context.getProcessIDType();
10585 break;
10586 }
10587
10588 // If there are modifiers and if we're allowed to parse them, go for it.
10589 Done = !AllowTypeModifiers;
10590 while (!Done) {
10591 switch (char c = *Str++) {
10592 default: Done = true; --Str; break;
10593 case '*':
10594 case '&': {
10595 // Both pointers and references can have their pointee types
10596 // qualified with an address space.
10597 char *End;
10598 unsigned AddrSpace = strtoul(Str, &End, 10);
10599 if (End != Str) {
10600 // Note AddrSpace == 0 is not the same as an unspecified address space.
10601 Type = Context.getAddrSpaceQualType(
10602 Type,
10603 Context.getLangASForBuiltinAddressSpace(AddrSpace));
10604 Str = End;
10605 }
10606 if (c == '*')
10607 Type = Context.getPointerType(Type);
10608 else
10609 Type = Context.getLValueReferenceType(Type);
10610 break;
10611 }
10612 // FIXME: There's no way to have a built-in with an rvalue ref arg.
10613 case 'C':
10614 Type = Type.withConst();
10615 break;
10616 case 'D':
10617 Type = Context.getVolatileType(Type);
10618 break;
10619 case 'R':
10620 Type = Type.withRestrict();
10621 break;
10622 }
10623 }
10624
10625 assert((!RequiresICE || Type->isIntegralOrEnumerationType()) &&((void)0)
10626 "Integer constant 'I' type must be an integer")((void)0);
10627
10628 return Type;
10629}
10630
10631// On some targets such as PowerPC, some of the builtins are defined with custom
10632// type decriptors for target-dependent types. These descriptors are decoded in
10633// other functions, but it may be useful to be able to fall back to default
10634// descriptor decoding to define builtins mixing target-dependent and target-
10635// independent types. This function allows decoding one type descriptor with
10636// default decoding.
10637QualType ASTContext::DecodeTypeStr(const char *&Str, const ASTContext &Context,
10638 GetBuiltinTypeError &Error, bool &RequireICE,
10639 bool AllowTypeModifiers) const {
10640 return DecodeTypeFromStr(Str, Context, Error, RequireICE, AllowTypeModifiers);
10641}
10642
10643/// GetBuiltinType - Return the type for the specified builtin.
10644QualType ASTContext::GetBuiltinType(unsigned Id,
10645 GetBuiltinTypeError &Error,
10646 unsigned *IntegerConstantArgs) const {
10647 const char *TypeStr = BuiltinInfo.getTypeString(Id);
10648 if (TypeStr[0] == '\0') {
10649 Error = GE_Missing_type;
10650 return {};
10651 }
10652
10653 SmallVector<QualType, 8> ArgTypes;
10654
10655 bool RequiresICE = false;
10656 Error = GE_None;
10657 QualType ResType = DecodeTypeFromStr(TypeStr, *this, Error,
10658 RequiresICE, true);
10659 if (Error != GE_None)
10660 return {};
10661
10662 assert(!RequiresICE && "Result of intrinsic cannot be required to be an ICE")((void)0);
10663
10664 while (TypeStr[0] && TypeStr[0] != '.') {
10665 QualType Ty = DecodeTypeFromStr(TypeStr, *this, Error, RequiresICE, true);
10666 if (Error != GE_None)
10667 return {};
10668
10669 // If this argument is required to be an IntegerConstantExpression and the
10670 // caller cares, fill in the bitmask we return.
10671 if (RequiresICE && IntegerConstantArgs)
10672 *IntegerConstantArgs |= 1 << ArgTypes.size();
10673
10674 // Do array -> pointer decay. The builtin should use the decayed type.
10675 if (Ty->isArrayType())
10676 Ty = getArrayDecayedType(Ty);
10677
10678 ArgTypes.push_back(Ty);
10679 }
10680
10681 if (Id == Builtin::BI__GetExceptionInfo)
10682 return {};
10683
10684 assert((TypeStr[0] != '.' || TypeStr[1] == 0) &&((void)0)
10685 "'.' should only occur at end of builtin type list!")((void)0);
10686
10687 bool Variadic = (TypeStr[0] == '.');
10688
10689 FunctionType::ExtInfo EI(getDefaultCallingConvention(
10690 Variadic, /*IsCXXMethod=*/false, /*IsBuiltin=*/true));
10691 if (BuiltinInfo.isNoReturn(Id)) EI = EI.withNoReturn(true);
10692
10693
10694 // We really shouldn't be making a no-proto type here.
10695 if (ArgTypes.empty() && Variadic && !getLangOpts().CPlusPlus)
10696 return getFunctionNoProtoType(ResType, EI);
10697
10698 FunctionProtoType::ExtProtoInfo EPI;
10699 EPI.ExtInfo = EI;
10700 EPI.Variadic = Variadic;
10701 if (getLangOpts().CPlusPlus && BuiltinInfo.isNoThrow(Id))
10702 EPI.ExceptionSpec.Type =
10703 getLangOpts().CPlusPlus11 ? EST_BasicNoexcept : EST_DynamicNone;
10704
10705 return getFunctionType(ResType, ArgTypes, EPI);
10706}
10707
10708static GVALinkage basicGVALinkageForFunction(const ASTContext &Context,
10709 const FunctionDecl *FD) {
10710 if (!FD->isExternallyVisible())
10711 return GVA_Internal;
10712
10713 // Non-user-provided functions get emitted as weak definitions with every
10714 // use, no matter whether they've been explicitly instantiated etc.
10715 if (const auto *MD = dyn_cast<CXXMethodDecl>(FD))
10716 if (!MD->isUserProvided())
10717 return GVA_DiscardableODR;
10718
10719 GVALinkage External;
10720 switch (FD->getTemplateSpecializationKind()) {
10721 case TSK_Undeclared:
10722 case TSK_ExplicitSpecialization:
10723 External = GVA_StrongExternal;
10724 break;
10725
10726 case TSK_ExplicitInstantiationDefinition:
10727 return GVA_StrongODR;
10728
10729 // C++11 [temp.explicit]p10:
10730 // [ Note: The intent is that an inline function that is the subject of
10731 // an explicit instantiation declaration will still be implicitly
10732 // instantiated when used so that the body can be considered for
10733 // inlining, but that no out-of-line copy of the inline function would be
10734 // generated in the translation unit. -- end note ]
10735 case TSK_ExplicitInstantiationDeclaration:
10736 return GVA_AvailableExternally;
10737
10738 case TSK_ImplicitInstantiation:
10739 External = GVA_DiscardableODR;
10740 break;
10741 }
10742
10743 if (!FD->isInlined())
10744 return External;
10745
10746 if ((!Context.getLangOpts().CPlusPlus &&
10747 !Context.getTargetInfo().getCXXABI().isMicrosoft() &&
10748 !FD->hasAttr<DLLExportAttr>()) ||
10749 FD->hasAttr<GNUInlineAttr>()) {
10750 // FIXME: This doesn't match gcc's behavior for dllexport inline functions.
10751
10752 // GNU or C99 inline semantics. Determine whether this symbol should be
10753 // externally visible.
10754 if (FD->isInlineDefinitionExternallyVisible())
10755 return External;
10756
10757 // C99 inline semantics, where the symbol is not externally visible.
10758 return GVA_AvailableExternally;
10759 }
10760
10761 // Functions specified with extern and inline in -fms-compatibility mode
10762 // forcibly get emitted. While the body of the function cannot be later
10763 // replaced, the function definition cannot be discarded.
10764 if (FD->isMSExternInline())
10765 return GVA_StrongODR;
10766
10767 return GVA_DiscardableODR;
10768}
10769
10770static GVALinkage adjustGVALinkageForAttributes(const ASTContext &Context,
10771 const Decl *D, GVALinkage L) {
10772 // See http://msdn.microsoft.com/en-us/library/xa0d9ste.aspx
10773 // dllexport/dllimport on inline functions.
10774 if (D->hasAttr<DLLImportAttr>()) {
10775 if (L == GVA_DiscardableODR || L == GVA_StrongODR)
10776 return GVA_AvailableExternally;
10777 } else if (D->hasAttr<DLLExportAttr>()) {
10778 if (L == GVA_DiscardableODR)
10779 return GVA_StrongODR;
10780 } else if (Context.getLangOpts().CUDA && Context.getLangOpts().CUDAIsDevice) {
10781 // Device-side functions with __global__ attribute must always be
10782 // visible externally so they can be launched from host.
10783 if (D->hasAttr<CUDAGlobalAttr>() &&
10784 (L == GVA_DiscardableODR || L == GVA_Internal))
10785 return GVA_StrongODR;
10786 // Single source offloading languages like CUDA/HIP need to be able to
10787 // access static device variables from host code of the same compilation
10788 // unit. This is done by externalizing the static variable with a shared
10789 // name between the host and device compilation which is the same for the
10790 // same compilation unit whereas different among different compilation
10791 // units.
10792 if (Context.shouldExternalizeStaticVar(D))
10793 return GVA_StrongExternal;
10794 }
10795 return L;
10796}
10797
10798/// Adjust the GVALinkage for a declaration based on what an external AST source
10799/// knows about whether there can be other definitions of this declaration.
10800static GVALinkage
10801adjustGVALinkageForExternalDefinitionKind(const ASTContext &Ctx, const Decl *D,
10802 GVALinkage L) {
10803 ExternalASTSource *Source = Ctx.getExternalSource();
10804 if (!Source)
10805 return L;
10806
10807 switch (Source->hasExternalDefinitions(D)) {
10808 case ExternalASTSource::EK_Never:
10809 // Other translation units rely on us to provide the definition.
10810 if (L == GVA_DiscardableODR)
10811 return GVA_StrongODR;
10812 break;
10813
10814 case ExternalASTSource::EK_Always:
10815 return GVA_AvailableExternally;
10816
10817 case ExternalASTSource::EK_ReplyHazy:
10818 break;
10819 }
10820 return L;
10821}
10822
10823GVALinkage ASTContext::GetGVALinkageForFunction(const FunctionDecl *FD) const {
10824 return adjustGVALinkageForExternalDefinitionKind(*this, FD,
10825 adjustGVALinkageForAttributes(*this, FD,
10826 basicGVALinkageForFunction(*this, FD)));
10827}
10828
10829static GVALinkage basicGVALinkageForVariable(const ASTContext &Context,
10830 const VarDecl *VD) {
10831 if (!VD->isExternallyVisible())
10832 return GVA_Internal;
10833
10834 if (VD->isStaticLocal()) {
10835 const DeclContext *LexicalContext = VD->getParentFunctionOrMethod();
10836 while (LexicalContext && !isa<FunctionDecl>(LexicalContext))
10837 LexicalContext = LexicalContext->getLexicalParent();
10838
10839 // ObjC Blocks can create local variables that don't have a FunctionDecl
10840 // LexicalContext.
10841 if (!LexicalContext)
10842 return GVA_DiscardableODR;
10843
10844 // Otherwise, let the static local variable inherit its linkage from the
10845 // nearest enclosing function.
10846 auto StaticLocalLinkage =
10847 Context.GetGVALinkageForFunction(cast<FunctionDecl>(LexicalContext));
10848
10849 // Itanium ABI 5.2.2: "Each COMDAT group [for a static local variable] must
10850 // be emitted in any object with references to the symbol for the object it
10851 // contains, whether inline or out-of-line."
10852 // Similar behavior is observed with MSVC. An alternative ABI could use
10853 // StrongODR/AvailableExternally to match the function, but none are
10854 // known/supported currently.
10855 if (StaticLocalLinkage == GVA_StrongODR ||
10856 StaticLocalLinkage == GVA_AvailableExternally)
10857 return GVA_DiscardableODR;
10858 return StaticLocalLinkage;
10859 }
10860
10861 // MSVC treats in-class initialized static data members as definitions.
10862 // By giving them non-strong linkage, out-of-line definitions won't
10863 // cause link errors.
10864 if (Context.isMSStaticDataMemberInlineDefinition(VD))
10865 return GVA_DiscardableODR;
10866
10867 // Most non-template variables have strong linkage; inline variables are
10868 // linkonce_odr or (occasionally, for compatibility) weak_odr.
10869 GVALinkage StrongLinkage;
10870 switch (Context.getInlineVariableDefinitionKind(VD)) {
10871 case ASTContext::InlineVariableDefinitionKind::None:
10872 StrongLinkage = GVA_StrongExternal;
10873 break;
10874 case ASTContext::InlineVariableDefinitionKind::Weak:
10875 case ASTContext::InlineVariableDefinitionKind::WeakUnknown:
10876 StrongLinkage = GVA_DiscardableODR;
10877 break;
10878 case ASTContext::InlineVariableDefinitionKind::Strong:
10879 StrongLinkage = GVA_StrongODR;
10880 break;
10881 }
10882
10883 switch (VD->getTemplateSpecializationKind()) {
10884 case TSK_Undeclared:
10885 return StrongLinkage;
10886
10887 case TSK_ExplicitSpecialization:
10888 return Context.getTargetInfo().getCXXABI().isMicrosoft() &&
10889 VD->isStaticDataMember()
10890 ? GVA_StrongODR
10891 : StrongLinkage;
10892
10893 case TSK_ExplicitInstantiationDefinition:
10894 return GVA_StrongODR;
10895
10896 case TSK_ExplicitInstantiationDeclaration:
10897 return GVA_AvailableExternally;
10898
10899 case TSK_ImplicitInstantiation:
10900 return GVA_DiscardableODR;
10901 }
10902
10903 llvm_unreachable("Invalid Linkage!")__builtin_unreachable();
10904}
10905
10906GVALinkage ASTContext::GetGVALinkageForVariable(const VarDecl *VD) {
10907 return adjustGVALinkageForExternalDefinitionKind(*this, VD,
10908 adjustGVALinkageForAttributes(*this, VD,
10909 basicGVALinkageForVariable(*this, VD)));
10910}
10911
10912bool ASTContext::DeclMustBeEmitted(const Decl *D) {
10913 if (const auto *VD = dyn_cast<VarDecl>(D)) {
10914 if (!VD->isFileVarDecl())
10915 return false;
10916 // Global named register variables (GNU extension) are never emitted.
10917 if (VD->getStorageClass() == SC_Register)
10918 return false;
10919 if (VD->getDescribedVarTemplate() ||
10920 isa<VarTemplatePartialSpecializationDecl>(VD))
10921 return false;
10922 } else if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
10923 // We never need to emit an uninstantiated function template.
10924 if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate)
10925 return false;
10926 } else if (isa<PragmaCommentDecl>(D))
10927 return true;
10928 else if (isa<PragmaDetectMismatchDecl>(D))
10929 return true;
10930 else if (isa<OMPRequiresDecl>(D))
10931 return true;
10932 else if (isa<OMPThreadPrivateDecl>(D))
10933 return !D->getDeclContext()->isDependentContext();
10934 else if (isa<OMPAllocateDecl>(D))
10935 return !D->getDeclContext()->isDependentContext();
10936 else if (isa<OMPDeclareReductionDecl>(D) || isa<OMPDeclareMapperDecl>(D))
10937 return !D->getDeclContext()->isDependentContext();
10938 else if (isa<ImportDecl>(D))
10939 return true;
10940 else
10941 return false;
10942
10943 // If this is a member of a class template, we do not need to emit it.
10944 if (D->getDeclContext()->isDependentContext())
10945 return false;
10946
10947 // Weak references don't produce any output by themselves.
10948 if (D->hasAttr<WeakRefAttr>())
10949 return false;
10950
10951 // Aliases and used decls are required.
10952 if (D->hasAttr<AliasAttr>() || D->hasAttr<UsedAttr>())
10953 return true;
10954
10955 if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
10956 // Forward declarations aren't required.
10957 if (!FD->doesThisDeclarationHaveABody())
10958 return FD->doesDeclarationForceExternallyVisibleDefinition();
10959
10960 // Constructors and destructors are required.
10961 if (FD->hasAttr<ConstructorAttr>() || FD->hasAttr<DestructorAttr>())
10962 return true;
10963
10964 // The key function for a class is required. This rule only comes
10965 // into play when inline functions can be key functions, though.
10966 if (getTargetInfo().getCXXABI().canKeyFunctionBeInline()) {
10967 if (const auto *MD = dyn_cast<CXXMethodDecl>(FD)) {
10968 const CXXRecordDecl *RD = MD->getParent();
10969 if (MD->isOutOfLine() && RD->isDynamicClass()) {
10970 const CXXMethodDecl *KeyFunc = getCurrentKeyFunction(RD);
10971 if (KeyFunc && KeyFunc->getCanonicalDecl() == MD->getCanonicalDecl())
10972 return true;
10973 }
10974 }
10975 }
10976
10977 GVALinkage Linkage = GetGVALinkageForFunction(FD);
10978
10979 // static, static inline, always_inline, and extern inline functions can
10980 // always be deferred. Normal inline functions can be deferred in C99/C++.
10981 // Implicit template instantiations can also be deferred in C++.
10982 return !isDiscardableGVALinkage(Linkage);
10983 }
10984
10985 const auto *VD = cast<VarDecl>(D);
10986 assert(VD->isFileVarDecl() && "Expected file scoped var")((void)0);
10987
10988 // If the decl is marked as `declare target to`, it should be emitted for the
10989 // host and for the device.
10990 if (LangOpts.OpenMP &&
10991 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
10992 return true;
10993
10994 if (VD->isThisDeclarationADefinition() == VarDecl::DeclarationOnly &&
10995 !isMSStaticDataMemberInlineDefinition(VD))
10996 return false;
10997
10998 // Variables that can be needed in other TUs are required.
10999 auto Linkage = GetGVALinkageForVariable(VD);
11000 if (!isDiscardableGVALinkage(Linkage))
11001 return true;
11002
11003 // We never need to emit a variable that is available in another TU.
11004 if (Linkage == GVA_AvailableExternally)
11005 return false;
11006
11007 // Variables that have destruction with side-effects are required.
11008 if (VD->needsDestruction(*this))
11009 return true;
11010
11011 // Variables that have initialization with side-effects are required.
11012 if (VD->getInit() && VD->getInit()->HasSideEffects(*this) &&
11013 // We can get a value-dependent initializer during error recovery.
11014 (VD->getInit()->isValueDependent() || !VD->evaluateValue()))
11015 return true;
11016
11017 // Likewise, variables with tuple-like bindings are required if their
11018 // bindings have side-effects.
11019 if (const auto *DD = dyn_cast<DecompositionDecl>(VD))
11020 for (const auto *BD : DD->bindings())
11021 if (const auto *BindingVD = BD->getHoldingVar())
11022 if (DeclMustBeEmitted(BindingVD))
11023 return true;
11024
11025 return false;
11026}
11027
11028void ASTContext::forEachMultiversionedFunctionVersion(
11029 const FunctionDecl *FD,
11030 llvm::function_ref<void(FunctionDecl *)> Pred) const {
11031 assert(FD->isMultiVersion() && "Only valid for multiversioned functions")((void)0);
11032 llvm::SmallDenseSet<const FunctionDecl*, 4> SeenDecls;
11033 FD = FD->getMostRecentDecl();
11034 // FIXME: The order of traversal here matters and depends on the order of
11035 // lookup results, which happens to be (mostly) oldest-to-newest, but we
11036 // shouldn't rely on that.
11037 for (auto *CurDecl :
11038 FD->getDeclContext()->getRedeclContext()->lookup(FD->getDeclName())) {
11039 FunctionDecl *CurFD = CurDecl->getAsFunction()->getMostRecentDecl();
11040 if (CurFD && hasSameType(CurFD->getType(), FD->getType()) &&
11041 std::end(SeenDecls) == llvm::find(SeenDecls, CurFD)) {
11042 SeenDecls.insert(CurFD);
11043 Pred(CurFD);
11044 }
11045 }
11046}
11047
11048CallingConv ASTContext::getDefaultCallingConvention(bool IsVariadic,
11049 bool IsCXXMethod,
11050 bool IsBuiltin) const {
11051 // Pass through to the C++ ABI object
11052 if (IsCXXMethod)
11053 return ABI->getDefaultMethodCallConv(IsVariadic);
11054
11055 // Builtins ignore user-specified default calling convention and remain the
11056 // Target's default calling convention.
11057 if (!IsBuiltin) {
11058 switch (LangOpts.getDefaultCallingConv()) {
11059 case LangOptions::DCC_None:
11060 break;
11061 case LangOptions::DCC_CDecl:
11062 return CC_C;
11063 case LangOptions::DCC_FastCall:
11064 if (getTargetInfo().hasFeature("sse2") && !IsVariadic)
11065 return CC_X86FastCall;
11066 break;
11067 case LangOptions::DCC_StdCall:
11068 if (!IsVariadic)
11069 return CC_X86StdCall;
11070 break;
11071 case LangOptions::DCC_VectorCall:
11072 // __vectorcall cannot be applied to variadic functions.
11073 if (!IsVariadic)
11074 return CC_X86VectorCall;
11075 break;
11076 case LangOptions::DCC_RegCall:
11077 // __regcall cannot be applied to variadic functions.
11078 if (!IsVariadic)
11079 return CC_X86RegCall;
11080 break;
11081 }
11082 }
11083 return Target->getDefaultCallingConv();
11084}
11085
11086bool ASTContext::isNearlyEmpty(const CXXRecordDecl *RD) const {
11087 // Pass through to the C++ ABI object
11088 return ABI->isNearlyEmpty(RD);
11089}
11090
11091VTableContextBase *ASTContext::getVTableContext() {
11092 if (!VTContext.get()) {
11093 auto ABI = Target->getCXXABI();
11094 if (ABI.isMicrosoft())
11095 VTContext.reset(new MicrosoftVTableContext(*this));
11096 else {
11097 auto ComponentLayout = getLangOpts().RelativeCXXABIVTables
11098 ? ItaniumVTableContext::Relative
11099 : ItaniumVTableContext::Pointer;
11100 VTContext.reset(new ItaniumVTableContext(*this, ComponentLayout));
11101 }
11102 }
11103 return VTContext.get();
11104}
11105
11106MangleContext *ASTContext::createMangleContext(const TargetInfo *T) {
11107 if (!T)
11108 T = Target;
11109 switch (T->getCXXABI().getKind()) {
11110 case TargetCXXABI::AppleARM64:
11111 case TargetCXXABI::Fuchsia:
11112 case TargetCXXABI::GenericAArch64:
11113 case TargetCXXABI::GenericItanium:
11114 case TargetCXXABI::GenericARM:
11115 case TargetCXXABI::GenericMIPS:
11116 case TargetCXXABI::iOS:
11117 case TargetCXXABI::WebAssembly:
11118 case TargetCXXABI::WatchOS:
11119 case TargetCXXABI::XL:
11120 return ItaniumMangleContext::create(*this, getDiagnostics());
11121 case TargetCXXABI::Microsoft:
11122 return MicrosoftMangleContext::create(*this, getDiagnostics());
11123 }
11124 llvm_unreachable("Unsupported ABI")__builtin_unreachable();
11125}
11126
11127MangleContext *ASTContext::createDeviceMangleContext(const TargetInfo &T) {
11128 assert(T.getCXXABI().getKind() != TargetCXXABI::Microsoft &&((void)0)
11129 "Device mangle context does not support Microsoft mangling.")((void)0);
11130 switch (T.getCXXABI().getKind()) {
11131 case TargetCXXABI::AppleARM64:
11132 case TargetCXXABI::Fuchsia:
11133 case TargetCXXABI::GenericAArch64:
11134 case TargetCXXABI::GenericItanium:
11135 case TargetCXXABI::GenericARM:
11136 case TargetCXXABI::GenericMIPS:
11137 case TargetCXXABI::iOS:
11138 case TargetCXXABI::WebAssembly:
11139 case TargetCXXABI::WatchOS:
11140 case TargetCXXABI::XL:
11141 return ItaniumMangleContext::create(
11142 *this, getDiagnostics(),
11143 [](ASTContext &, const NamedDecl *ND) -> llvm::Optional<unsigned> {
11144 if (const auto *RD = dyn_cast<CXXRecordDecl>(ND))
11145 return RD->getDeviceLambdaManglingNumber();
11146 return llvm::None;
11147 });
11148 case TargetCXXABI::Microsoft:
11149 return MicrosoftMangleContext::create(*this, getDiagnostics());
11150 }
11151 llvm_unreachable("Unsupported ABI")__builtin_unreachable();
11152}
11153
11154CXXABI::~CXXABI() = default;
11155
11156size_t ASTContext::getSideTableAllocatedMemory() const {
11157 return ASTRecordLayouts.getMemorySize() +
11158 llvm::capacity_in_bytes(ObjCLayouts) +
11159 llvm::capacity_in_bytes(KeyFunctions) +
11160 llvm::capacity_in_bytes(ObjCImpls) +
11161 llvm::capacity_in_bytes(BlockVarCopyInits) +
11162 llvm::capacity_in_bytes(DeclAttrs) +
11163 llvm::capacity_in_bytes(TemplateOrInstantiation) +
11164 llvm::capacity_in_bytes(InstantiatedFromUsingDecl) +
11165 llvm::capacity_in_bytes(InstantiatedFromUsingShadowDecl) +
11166 llvm::capacity_in_bytes(InstantiatedFromUnnamedFieldDecl) +
11167 llvm::capacity_in_bytes(OverriddenMethods) +
11168 llvm::capacity_in_bytes(Types) +
11169 llvm::capacity_in_bytes(VariableArrayTypes);
11170}
11171
11172/// getIntTypeForBitwidth -
11173/// sets integer QualTy according to specified details:
11174/// bitwidth, signed/unsigned.
11175/// Returns empty type if there is no appropriate target types.
11176QualType ASTContext::getIntTypeForBitwidth(unsigned DestWidth,
11177 unsigned Signed) const {
11178 TargetInfo::IntType Ty = getTargetInfo().getIntTypeByWidth(DestWidth, Signed);
11179 CanQualType QualTy = getFromTargetType(Ty);
11180 if (!QualTy && DestWidth == 128)
11181 return Signed ? Int128Ty : UnsignedInt128Ty;
11182 return QualTy;
11183}
11184
11185/// getRealTypeForBitwidth -
11186/// sets floating point QualTy according to specified bitwidth.
11187/// Returns empty type if there is no appropriate target types.
11188QualType ASTContext::getRealTypeForBitwidth(unsigned DestWidth,
11189 bool ExplicitIEEE) const {
11190 TargetInfo::RealType Ty =
11191 getTargetInfo().getRealTypeByWidth(DestWidth, ExplicitIEEE);
11192 switch (Ty) {
11193 case TargetInfo::Float:
11194 return FloatTy;
11195 case TargetInfo::Double:
11196 return DoubleTy;
11197 case TargetInfo::LongDouble:
11198 return LongDoubleTy;
11199 case TargetInfo::Float128:
11200 return Float128Ty;
11201 case TargetInfo::NoFloat:
11202 return {};
11203 }
11204
11205 llvm_unreachable("Unhandled TargetInfo::RealType value")__builtin_unreachable();
11206}
11207
11208void ASTContext::setManglingNumber(const NamedDecl *ND, unsigned Number) {
11209 if (Number > 1)
11210 MangleNumbers[ND] = Number;
11211}
11212
11213unsigned ASTContext::getManglingNumber(const NamedDecl *ND) const {
11214 auto I = MangleNumbers.find(ND);
11215 return I != MangleNumbers.end() ? I->second : 1;
11216}
11217
11218void ASTContext::setStaticLocalNumber(const VarDecl *VD, unsigned Number) {
11219 if (Number > 1)
11220 StaticLocalNumbers[VD] = Number;
11221}
11222
11223unsigned ASTContext::getStaticLocalNumber(const VarDecl *VD) const {
11224 auto I = StaticLocalNumbers.find(VD);
11225 return I != StaticLocalNumbers.end() ? I->second : 1;
11226}
11227
11228MangleNumberingContext &
11229ASTContext::getManglingNumberContext(const DeclContext *DC) {
11230 assert(LangOpts.CPlusPlus)((void)0); // We don't need mangling numbers for plain C.
11231 std::unique_ptr<MangleNumberingContext> &MCtx = MangleNumberingContexts[DC];
11232 if (!MCtx)
11233 MCtx = createMangleNumberingContext();
11234 return *MCtx;
11235}
11236
11237MangleNumberingContext &
11238ASTContext::getManglingNumberContext(NeedExtraManglingDecl_t, const Decl *D) {
11239 assert(LangOpts.CPlusPlus)((void)0); // We don't need mangling numbers for plain C.
11240 std::unique_ptr<MangleNumberingContext> &MCtx =
11241 ExtraMangleNumberingContexts[D];
11242 if (!MCtx)
11243 MCtx = createMangleNumberingContext();
11244 return *MCtx;
11245}
11246
11247std::unique_ptr<MangleNumberingContext>
11248ASTContext::createMangleNumberingContext() const {
11249 return ABI->createMangleNumberingContext();
11250}
11251
11252const CXXConstructorDecl *
11253ASTContext::getCopyConstructorForExceptionObject(CXXRecordDecl *RD) {
11254 return ABI->getCopyConstructorForExceptionObject(
11255 cast<CXXRecordDecl>(RD->getFirstDecl()));
11256}
11257
11258void ASTContext::addCopyConstructorForExceptionObject(CXXRecordDecl *RD,
11259 CXXConstructorDecl *CD) {
11260 return ABI->addCopyConstructorForExceptionObject(
11261 cast<CXXRecordDecl>(RD->getFirstDecl()),
11262 cast<CXXConstructorDecl>(CD->getFirstDecl()));
11263}
11264
11265void ASTContext::addTypedefNameForUnnamedTagDecl(TagDecl *TD,
11266 TypedefNameDecl *DD) {
11267 return ABI->addTypedefNameForUnnamedTagDecl(TD, DD);
11268}
11269
11270TypedefNameDecl *
11271ASTContext::getTypedefNameForUnnamedTagDecl(const TagDecl *TD) {
11272 return ABI->getTypedefNameForUnnamedTagDecl(TD);
11273}
11274
11275void ASTContext::addDeclaratorForUnnamedTagDecl(TagDecl *TD,
11276 DeclaratorDecl *DD) {
11277 return ABI->addDeclaratorForUnnamedTagDecl(TD, DD);
11278}
11279
11280DeclaratorDecl *ASTContext::getDeclaratorForUnnamedTagDecl(const TagDecl *TD) {
11281 return ABI->getDeclaratorForUnnamedTagDecl(TD);
11282}
11283
11284void ASTContext::setParameterIndex(const ParmVarDecl *D, unsigned int index) {
11285 ParamIndices[D] = index;
11286}
11287
11288unsigned ASTContext::getParameterIndex(const ParmVarDecl *D) const {
11289 ParameterIndexTable::const_iterator I = ParamIndices.find(D);
11290 assert(I != ParamIndices.end() &&((void)0)
11291 "ParmIndices lacks entry set by ParmVarDecl")((void)0);
11292 return I->second;
11293}
11294
11295QualType ASTContext::getStringLiteralArrayType(QualType EltTy,
11296 unsigned Length) const {
11297 // A C++ string literal has a const-qualified element type (C++ 2.13.4p1).
11298 if (getLangOpts().CPlusPlus || getLangOpts().ConstStrings)
11299 EltTy = EltTy.withConst();
11300
11301 EltTy = adjustStringLiteralBaseType(EltTy);
11302
11303 // Get an array type for the string, according to C99 6.4.5. This includes
11304 // the null terminator character.
11305 return getConstantArrayType(EltTy, llvm::APInt(32, Length + 1), nullptr,
11306 ArrayType::Normal, /*IndexTypeQuals*/ 0);
11307}
11308
11309StringLiteral *
11310ASTContext::getPredefinedStringLiteralFromCache(StringRef Key) const {
11311 StringLiteral *&Result = StringLiteralCache[Key];
11312 if (!Result)
11313 Result = StringLiteral::Create(
11314 *this, Key, StringLiteral::Ascii,
11315 /*Pascal*/ false, getStringLiteralArrayType(CharTy, Key.size()),
11316 SourceLocation());
11317 return Result;
11318}
11319
11320MSGuidDecl *
11321ASTContext::getMSGuidDecl(MSGuidDecl::Parts Parts) const {
11322 assert(MSGuidTagDecl && "building MS GUID without MS extensions?")((void)0);
11323
11324 llvm::FoldingSetNodeID ID;
11325 MSGuidDecl::Profile(ID, Parts);
11326
11327 void *InsertPos;
11328 if (MSGuidDecl *Existing = MSGuidDecls.FindNodeOrInsertPos(ID, InsertPos))
11329 return Existing;
11330
11331 QualType GUIDType = getMSGuidType().withConst();
11332 MSGuidDecl *New = MSGuidDecl::Create(*this, GUIDType, Parts);
11333 MSGuidDecls.InsertNode(New, InsertPos);
11334 return New;
11335}
11336
11337TemplateParamObjectDecl *
11338ASTContext::getTemplateParamObjectDecl(QualType T, const APValue &V) const {
11339 assert(T->isRecordType() && "template param object of unexpected type")((void)0);
11340
11341 // C++ [temp.param]p8:
11342 // [...] a static storage duration object of type 'const T' [...]
11343 T.addConst();
11344
11345 llvm::FoldingSetNodeID ID;
11346 TemplateParamObjectDecl::Profile(ID, T, V);
11347
11348 void *InsertPos;
11349 if (TemplateParamObjectDecl *Existing =
11350 TemplateParamObjectDecls.FindNodeOrInsertPos(ID, InsertPos))
11351 return Existing;
11352
11353 TemplateParamObjectDecl *New = TemplateParamObjectDecl::Create(*this, T, V);
11354 TemplateParamObjectDecls.InsertNode(New, InsertPos);
11355 return New;
11356}
11357
11358bool ASTContext::AtomicUsesUnsupportedLibcall(const AtomicExpr *E) const {
11359 const llvm::Triple &T = getTargetInfo().getTriple();
11360 if (!T.isOSDarwin())
11361 return false;
11362
11363 if (!(T.isiOS() && T.isOSVersionLT(7)) &&
11364 !(T.isMacOSX() && T.isOSVersionLT(10, 9)))
11365 return false;
11366
11367 QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
11368 CharUnits sizeChars = getTypeSizeInChars(AtomicTy);
11369 uint64_t Size = sizeChars.getQuantity();
11370 CharUnits alignChars = getTypeAlignInChars(AtomicTy);
11371 unsigned Align = alignChars.getQuantity();
11372 unsigned MaxInlineWidthInBits = getTargetInfo().getMaxAtomicInlineWidth();
11373 return (Size != Align || toBits(sizeChars) > MaxInlineWidthInBits);
11374}
11375
11376bool
11377ASTContext::ObjCMethodsAreEqual(const ObjCMethodDecl *MethodDecl,
11378 const ObjCMethodDecl *MethodImpl) {
11379 // No point trying to match an unavailable/deprecated mothod.
11380 if (MethodDecl->hasAttr<UnavailableAttr>()
11381 || MethodDecl->hasAttr<DeprecatedAttr>())
11382 return false;
11383 if (MethodDecl->getObjCDeclQualifier() !=
11384 MethodImpl->getObjCDeclQualifier())
11385 return false;
11386 if (!hasSameType(MethodDecl->getReturnType(), MethodImpl->getReturnType()))
11387 return false;
11388
11389 if (MethodDecl->param_size() != MethodImpl->param_size())
11390 return false;
11391
11392 for (ObjCMethodDecl::param_const_iterator IM = MethodImpl->param_begin(),
11393 IF = MethodDecl->param_begin(), EM = MethodImpl->param_end(),
11394 EF = MethodDecl->param_end();
11395 IM != EM && IF != EF; ++IM, ++IF) {
11396 const ParmVarDecl *DeclVar = (*IF);
11397 const ParmVarDecl *ImplVar = (*IM);
11398 if (ImplVar->getObjCDeclQualifier() != DeclVar->getObjCDeclQualifier())
11399 return false;
11400 if (!hasSameType(DeclVar->getType(), ImplVar->getType()))
11401 return false;
11402 }
11403
11404 return (MethodDecl->isVariadic() == MethodImpl->isVariadic());
11405}
11406
11407uint64_t ASTContext::getTargetNullPointerValue(QualType QT) const {
11408 LangAS AS;
11409 if (QT->getUnqualifiedDesugaredType()->isNullPtrType())
11410 AS = LangAS::Default;
11411 else
11412 AS = QT->getPointeeType().getAddressSpace();
11413
11414 return getTargetInfo().getNullPointerValue(AS);
11415}
11416
11417unsigned ASTContext::getTargetAddressSpace(LangAS AS) const {
11418 if (isTargetAddressSpace(AS))
11419 return toTargetAddressSpace(AS);
11420 else
11421 return (*AddrSpaceMap)[(unsigned)AS];
11422}
11423
11424QualType ASTContext::getCorrespondingSaturatedType(QualType Ty) const {
11425 assert(Ty->isFixedPointType())((void)0);
11426
11427 if (Ty->isSaturatedFixedPointType()) return Ty;
11428
11429 switch (Ty->castAs<BuiltinType>()->getKind()) {
11430 default:
11431 llvm_unreachable("Not a fixed point type!")__builtin_unreachable();
11432 case BuiltinType::ShortAccum:
11433 return SatShortAccumTy;
11434 case BuiltinType::Accum:
11435 return SatAccumTy;
11436 case BuiltinType::LongAccum:
11437 return SatLongAccumTy;
11438 case BuiltinType::UShortAccum:
11439 return SatUnsignedShortAccumTy;
11440 case BuiltinType::UAccum:
11441 return SatUnsignedAccumTy;
11442 case BuiltinType::ULongAccum:
11443 return SatUnsignedLongAccumTy;
11444 case BuiltinType::ShortFract:
11445 return SatShortFractTy;
11446 case BuiltinType::Fract:
11447 return SatFractTy;
11448 case BuiltinType::LongFract:
11449 return SatLongFractTy;
11450 case BuiltinType::UShortFract:
11451 return SatUnsignedShortFractTy;
11452 case BuiltinType::UFract:
11453 return SatUnsignedFractTy;
11454 case BuiltinType::ULongFract:
11455 return SatUnsignedLongFractTy;
11456 }
11457}
11458
11459LangAS ASTContext::getLangASForBuiltinAddressSpace(unsigned AS) const {
11460 if (LangOpts.OpenCL)
11461 return getTargetInfo().getOpenCLBuiltinAddressSpace(AS);
11462
11463 if (LangOpts.CUDA)
11464 return getTargetInfo().getCUDABuiltinAddressSpace(AS);
11465
11466 return getLangASFromTargetAS(AS);
11467}
11468
11469// Explicitly instantiate this in case a Redeclarable<T> is used from a TU that
11470// doesn't include ASTContext.h
11471template
11472clang::LazyGenerationalUpdatePtr<
11473 const Decl *, Decl *, &ExternalASTSource::CompleteRedeclChain>::ValueType
11474clang::LazyGenerationalUpdatePtr<
11475 const Decl *, Decl *, &ExternalASTSource::CompleteRedeclChain>::makeValue(
11476 const clang::ASTContext &Ctx, Decl *Value);
11477
11478unsigned char ASTContext::getFixedPointScale(QualType Ty) const {
11479 assert(Ty->isFixedPointType())((void)0);
11480
11481 const TargetInfo &Target = getTargetInfo();
11482 switch (Ty->castAs<BuiltinType>()->getKind()) {
11483 default:
11484 llvm_unreachable("Not a fixed point type!")__builtin_unreachable();
11485 case BuiltinType::ShortAccum:
11486 case BuiltinType::SatShortAccum:
11487 return Target.getShortAccumScale();
11488 case BuiltinType::Accum:
11489 case BuiltinType::SatAccum:
11490 return Target.getAccumScale();
11491 case BuiltinType::LongAccum:
11492 case BuiltinType::SatLongAccum:
11493 return Target.getLongAccumScale();
11494 case BuiltinType::UShortAccum:
11495 case BuiltinType::SatUShortAccum:
11496 return Target.getUnsignedShortAccumScale();
11497 case BuiltinType::UAccum:
11498 case BuiltinType::SatUAccum:
11499 return Target.getUnsignedAccumScale();
11500 case BuiltinType::ULongAccum:
11501 case BuiltinType::SatULongAccum:
11502 return Target.getUnsignedLongAccumScale();
11503 case BuiltinType::ShortFract:
11504 case BuiltinType::SatShortFract:
11505 return Target.getShortFractScale();
11506 case BuiltinType::Fract:
11507 case BuiltinType::SatFract:
11508 return Target.getFractScale();
11509 case BuiltinType::LongFract:
11510 case BuiltinType::SatLongFract:
11511 return Target.getLongFractScale();
11512 case BuiltinType::UShortFract:
11513 case BuiltinType::SatUShortFract:
11514 return Target.getUnsignedShortFractScale();
11515 case BuiltinType::UFract:
11516 case BuiltinType::SatUFract:
11517 return Target.getUnsignedFractScale();
11518 case BuiltinType::ULongFract:
11519 case BuiltinType::SatULongFract:
11520 return Target.getUnsignedLongFractScale();
11521 }
11522}
11523
11524unsigned char ASTContext::getFixedPointIBits(QualType Ty) const {
11525 assert(Ty->isFixedPointType())((void)0);
11526
11527 const TargetInfo &Target = getTargetInfo();
11528 switch (Ty->castAs<BuiltinType>()->getKind()) {
11529 default:
11530 llvm_unreachable("Not a fixed point type!")__builtin_unreachable();
11531 case BuiltinType::ShortAccum:
11532 case BuiltinType::SatShortAccum:
11533 return Target.getShortAccumIBits();
11534 case BuiltinType::Accum:
11535 case BuiltinType::SatAccum:
11536 return Target.getAccumIBits();
11537 case BuiltinType::LongAccum:
11538 case BuiltinType::SatLongAccum:
11539 return Target.getLongAccumIBits();
11540 case BuiltinType::UShortAccum:
11541 case BuiltinType::SatUShortAccum:
11542 return Target.getUnsignedShortAccumIBits();
11543 case BuiltinType::UAccum:
11544 case BuiltinType::SatUAccum:
11545 return Target.getUnsignedAccumIBits();
11546 case BuiltinType::ULongAccum:
11547 case BuiltinType::SatULongAccum:
11548 return Target.getUnsignedLongAccumIBits();
11549 case BuiltinType::ShortFract:
11550 case BuiltinType::SatShortFract:
11551 case BuiltinType::Fract:
11552 case BuiltinType::SatFract:
11553 case BuiltinType::LongFract:
11554 case BuiltinType::SatLongFract:
11555 case BuiltinType::UShortFract:
11556 case BuiltinType::SatUShortFract:
11557 case BuiltinType::UFract:
11558 case BuiltinType::SatUFract:
11559 case BuiltinType::ULongFract:
11560 case BuiltinType::SatULongFract:
11561 return 0;
11562 }
11563}
11564
11565llvm::FixedPointSemantics
11566ASTContext::getFixedPointSemantics(QualType Ty) const {
11567 assert((Ty->isFixedPointType() || Ty->isIntegerType()) &&((void)0)
11568 "Can only get the fixed point semantics for a "((void)0)
11569 "fixed point or integer type.")((void)0);
11570 if (Ty->isIntegerType())
11571 return llvm::FixedPointSemantics::GetIntegerSemantics(
11572 getIntWidth(Ty), Ty->isSignedIntegerType());
11573
11574 bool isSigned = Ty->isSignedFixedPointType();
11575 return llvm::FixedPointSemantics(
11576 static_cast<unsigned>(getTypeSize(Ty)), getFixedPointScale(Ty), isSigned,
11577 Ty->isSaturatedFixedPointType(),
11578 !isSigned && getTargetInfo().doUnsignedFixedPointTypesHavePadding());
11579}
11580
11581llvm::APFixedPoint ASTContext::getFixedPointMax(QualType Ty) const {
11582 assert(Ty->isFixedPointType())((void)0);
11583 return llvm::APFixedPoint::getMax(getFixedPointSemantics(Ty));
11584}
11585
11586llvm::APFixedPoint ASTContext::getFixedPointMin(QualType Ty) const {
11587 assert(Ty->isFixedPointType())((void)0);
11588 return llvm::APFixedPoint::getMin(getFixedPointSemantics(Ty));
11589}
11590
11591QualType ASTContext::getCorrespondingSignedFixedPointType(QualType Ty) const {
11592 assert(Ty->isUnsignedFixedPointType() &&((void)0)
11593 "Expected unsigned fixed point type")((void)0);
11594
11595 switch (Ty->castAs<BuiltinType>()->getKind()) {
11596 case BuiltinType::UShortAccum:
11597 return ShortAccumTy;
11598 case BuiltinType::UAccum:
11599 return AccumTy;
11600 case BuiltinType::ULongAccum:
11601 return LongAccumTy;
11602 case BuiltinType::SatUShortAccum:
11603 return SatShortAccumTy;
11604 case BuiltinType::SatUAccum:
11605 return SatAccumTy;
11606 case BuiltinType::SatULongAccum:
11607 return SatLongAccumTy;
11608 case BuiltinType::UShortFract:
11609 return ShortFractTy;
11610 case BuiltinType::UFract:
11611 return FractTy;
11612 case BuiltinType::ULongFract:
11613 return LongFractTy;
11614 case BuiltinType::SatUShortFract:
11615 return SatShortFractTy;
11616 case BuiltinType::SatUFract:
11617 return SatFractTy;
11618 case BuiltinType::SatULongFract:
11619 return SatLongFractTy;
11620 default:
11621 llvm_unreachable("Unexpected unsigned fixed point type")__builtin_unreachable();
11622 }
11623}
11624
11625ParsedTargetAttr
11626ASTContext::filterFunctionTargetAttrs(const TargetAttr *TD) const {
11627 assert(TD != nullptr)((void)0);
11628 ParsedTargetAttr ParsedAttr = TD->parse();
11629
11630 ParsedAttr.Features.erase(
11631 llvm::remove_if(ParsedAttr.Features,
11632 [&](const std::string &Feat) {
11633 return !Target->isValidFeatureName(
11634 StringRef{Feat}.substr(1));
11635 }),
11636 ParsedAttr.Features.end());
11637 return ParsedAttr;
11638}
11639
11640void ASTContext::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap,
11641 const FunctionDecl *FD) const {
11642 if (FD)
11643 getFunctionFeatureMap(FeatureMap, GlobalDecl().getWithDecl(FD));
11644 else
11645 Target->initFeatureMap(FeatureMap, getDiagnostics(),
11646 Target->getTargetOpts().CPU,
11647 Target->getTargetOpts().Features);
11648}
11649
11650// Fills in the supplied string map with the set of target features for the
11651// passed in function.
11652void ASTContext::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap,
11653 GlobalDecl GD) const {
11654 StringRef TargetCPU = Target->getTargetOpts().CPU;
11655 const FunctionDecl *FD = GD.getDecl()->getAsFunction();
11656 if (const auto *TD = FD->getAttr<TargetAttr>()) {
11657 ParsedTargetAttr ParsedAttr = filterFunctionTargetAttrs(TD);
11658
11659 // Make a copy of the features as passed on the command line into the
11660 // beginning of the additional features from the function to override.
11661 ParsedAttr.Features.insert(
11662 ParsedAttr.Features.begin(),
11663 Target->getTargetOpts().FeaturesAsWritten.begin(),
11664 Target->getTargetOpts().FeaturesAsWritten.end());
11665
11666 if (ParsedAttr.Architecture != "" &&
11667 Target->isValidCPUName(ParsedAttr.Architecture))
11668 TargetCPU = ParsedAttr.Architecture;
11669
11670 // Now populate the feature map, first with the TargetCPU which is either
11671 // the default or a new one from the target attribute string. Then we'll use
11672 // the passed in features (FeaturesAsWritten) along with the new ones from
11673 // the attribute.
11674 Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU,
11675 ParsedAttr.Features);
11676 } else if (const auto *SD = FD->getAttr<CPUSpecificAttr>()) {
11677 llvm::SmallVector<StringRef, 32> FeaturesTmp;
11678 Target->getCPUSpecificCPUDispatchFeatures(
11679 SD->getCPUName(GD.getMultiVersionIndex())->getName(), FeaturesTmp);
11680 std::vector<std::string> Features(FeaturesTmp.begin(), FeaturesTmp.end());
11681 Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, Features);
11682 } else {
11683 FeatureMap = Target->getTargetOpts().FeatureMap;
11684 }
11685}
11686
11687OMPTraitInfo &ASTContext::getNewOMPTraitInfo() {
11688 OMPTraitInfoVector.emplace_back(new OMPTraitInfo());
11689 return *OMPTraitInfoVector.back();
11690}
11691
11692const StreamingDiagnostic &clang::
11693operator<<(const StreamingDiagnostic &DB,
11694 const ASTContext::SectionInfo &Section) {
11695 if (Section.Decl)
11696 return DB << Section.Decl;
11697 return DB << "a prior #pragma section";
11698}
11699
11700bool ASTContext::mayExternalizeStaticVar(const Decl *D) const {
11701 bool IsStaticVar =
11702 isa<VarDecl>(D) && cast<VarDecl>(D)->getStorageClass() == SC_Static;
11703 bool IsExplicitDeviceVar = (D->hasAttr<CUDADeviceAttr>() &&
11704 !D->getAttr<CUDADeviceAttr>()->isImplicit()) ||
11705 (D->hasAttr<CUDAConstantAttr>() &&
11706 !D->getAttr<CUDAConstantAttr>()->isImplicit());
11707 // CUDA/HIP: static managed variables need to be externalized since it is
11708 // a declaration in IR, therefore cannot have internal linkage.
11709 return IsStaticVar &&
11710 (D->hasAttr<HIPManagedAttr>() || IsExplicitDeviceVar);
11711}
11712
11713bool ASTContext::shouldExternalizeStaticVar(const Decl *D) const {
11714 return mayExternalizeStaticVar(D) &&
11715 (D->hasAttr<HIPManagedAttr>() ||
11716 CUDADeviceVarODRUsedByHost.count(cast<VarDecl>(D)));
11717}
11718
11719StringRef ASTContext::getCUIDHash() const {
11720 if (!CUIDHash.empty())
11721 return CUIDHash;
11722 if (LangOpts.CUID.empty())
11723 return StringRef();
11724 CUIDHash = llvm::utohexstr(llvm::MD5Hash(LangOpts.CUID), /*LowerCase=*/true);
11725 return CUIDHash;
11726}
11727
11728// Get the closest named parent, so we can order the sycl naming decls somewhere
11729// that mangling is meaningful.
11730static const DeclContext *GetNamedParent(const CXXRecordDecl *RD) {
11731 const DeclContext *DC = RD->getDeclContext();
11732
11733 while (!isa<NamedDecl, TranslationUnitDecl>(DC))
11734 DC = DC->getParent();
11735 return DC;
11736}
11737
11738void ASTContext::AddSYCLKernelNamingDecl(const CXXRecordDecl *RD) {
11739 assert(getLangOpts().isSYCL() && "Only valid for SYCL programs")((void)0);
11740 RD = RD->getCanonicalDecl();
11741 const DeclContext *DC = GetNamedParent(RD);
11742
11743 assert(RD->getLocation().isValid() &&((void)0)
11744 "Invalid location on kernel naming decl")((void)0);
11745
11746 (void)SYCLKernelNamingTypes[DC].insert(RD);
11747}
11748
11749bool ASTContext::IsSYCLKernelNamingDecl(const NamedDecl *ND) const {
11750 assert(getLangOpts().isSYCL() && "Only valid for SYCL programs")((void)0);
11751 const auto *RD = dyn_cast<CXXRecordDecl>(ND);
11752 if (!RD)
11753 return false;
11754 RD = RD->getCanonicalDecl();
11755 const DeclContext *DC = GetNamedParent(RD);
11756
11757 auto Itr = SYCLKernelNamingTypes.find(DC);
11758
11759 if (Itr == SYCLKernelNamingTypes.end())
11760 return false;
11761
11762 return Itr->getSecond().count(RD);
11763}
11764
11765// Filters the Decls list to those that share the lambda mangling with the
11766// passed RD.
11767void ASTContext::FilterSYCLKernelNamingDecls(
11768 const CXXRecordDecl *RD,
11769 llvm::SmallVectorImpl<const CXXRecordDecl *> &Decls) {
11770
11771 if (!SYCLKernelFilterContext)
11772 SYCLKernelFilterContext.reset(
11773 ItaniumMangleContext::create(*this, getDiagnostics()));
11774
11775 llvm::SmallString<128> LambdaSig;
11776 llvm::raw_svector_ostream Out(LambdaSig);
11777 SYCLKernelFilterContext->mangleLambdaSig(RD, Out);
11778
11779 llvm::erase_if(Decls, [this, &LambdaSig](const CXXRecordDecl *LocalRD) {
11780 llvm::SmallString<128> LocalLambdaSig;
11781 llvm::raw_svector_ostream LocalOut(LocalLambdaSig);
11782 SYCLKernelFilterContext->mangleLambdaSig(LocalRD, LocalOut);
11783 return LambdaSig != LocalLambdaSig;
11784 });
11785}
11786
11787unsigned ASTContext::GetSYCLKernelNamingIndex(const NamedDecl *ND) {
11788 assert(getLangOpts().isSYCL() && "Only valid for SYCL programs")((void)0);
11789 assert(IsSYCLKernelNamingDecl(ND) &&((void)0)
11790 "Lambda not involved in mangling asked for a naming index?")((void)0);
11791
11792 const CXXRecordDecl *RD = cast<CXXRecordDecl>(ND)->getCanonicalDecl();
11793 const DeclContext *DC = GetNamedParent(RD);
11794
11795 auto Itr = SYCLKernelNamingTypes.find(DC);
11796 assert(Itr != SYCLKernelNamingTypes.end() && "Not a valid DeclContext?")((void)0);
11797
11798 const llvm::SmallPtrSet<const CXXRecordDecl *, 4> &Set = Itr->getSecond();
11799
11800 llvm::SmallVector<const CXXRecordDecl *> Decls{Set.begin(), Set.end()};
11801
11802 FilterSYCLKernelNamingDecls(RD, Decls);
11803
11804 llvm::sort(Decls, [](const CXXRecordDecl *LHS, const CXXRecordDecl *RHS) {
11805 return LHS->getLambdaManglingNumber() < RHS->getLambdaManglingNumber();
11806 });
11807
11808 return llvm::find(Decls, RD) - Decls.begin();
11809}