Bug Summary

File:src/gnu/usr.bin/clang/libclangSema/../../../llvm/clang/lib/Sema/SemaOpenMP.cpp
Warning:line 8938, column 22
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name SemaOpenMP.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/gnu/usr.bin/clang/libclangSema/obj -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/gnu/usr.bin/clang/libclangSema/obj/../include/clang/Sema -I /usr/src/gnu/usr.bin/clang/libclangSema/../../../llvm/clang/include -I /usr/src/gnu/usr.bin/clang/libclangSema/../../../llvm/llvm/include -I /usr/src/gnu/usr.bin/clang/libclangSema/../include -I /usr/src/gnu/usr.bin/clang/libclangSema/obj -I /usr/src/gnu/usr.bin/clang/libclangSema/obj/../include -D NDEBUG -D __STDC_LIMIT_MACROS -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D LLVM_PREFIX="/usr" -internal-isystem /usr/include/c++/v1 -internal-isystem /usr/local/lib/clang/13.0.0/include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/usr/src/gnu/usr.bin/clang/libclangSema/obj -ferror-limit 19 -fvisibility-inlines-hidden -fwrapv -stack-protector 2 -fno-rtti -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /home/ben/Projects/vmm/scan-build/2022-01-12-194120-40624-1 -x c++ /usr/src/gnu/usr.bin/clang/libclangSema/../../../llvm/clang/lib/Sema/SemaOpenMP.cpp
1//===--- SemaOpenMP.cpp - Semantic Analysis for OpenMP constructs ---------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements semantic analysis for OpenMP directives and
10/// clauses.
11///
12//===----------------------------------------------------------------------===//
13
14#include "TreeTransform.h"
15#include "clang/AST/ASTContext.h"
16#include "clang/AST/ASTMutationListener.h"
17#include "clang/AST/CXXInheritance.h"
18#include "clang/AST/Decl.h"
19#include "clang/AST/DeclCXX.h"
20#include "clang/AST/DeclOpenMP.h"
21#include "clang/AST/OpenMPClause.h"
22#include "clang/AST/StmtCXX.h"
23#include "clang/AST/StmtOpenMP.h"
24#include "clang/AST/StmtVisitor.h"
25#include "clang/AST/TypeOrdering.h"
26#include "clang/Basic/DiagnosticSema.h"
27#include "clang/Basic/OpenMPKinds.h"
28#include "clang/Basic/PartialDiagnostic.h"
29#include "clang/Basic/TargetInfo.h"
30#include "clang/Sema/Initialization.h"
31#include "clang/Sema/Lookup.h"
32#include "clang/Sema/Scope.h"
33#include "clang/Sema/ScopeInfo.h"
34#include "clang/Sema/SemaInternal.h"
35#include "llvm/ADT/IndexedMap.h"
36#include "llvm/ADT/PointerEmbeddedInt.h"
37#include "llvm/ADT/STLExtras.h"
38#include "llvm/ADT/StringExtras.h"
39#include "llvm/Frontend/OpenMP/OMPConstants.h"
40#include <set>
41
42using namespace clang;
43using namespace llvm::omp;
44
45//===----------------------------------------------------------------------===//
46// Stack of data-sharing attributes for variables
47//===----------------------------------------------------------------------===//
48
49static const Expr *checkMapClauseExpressionBase(
50 Sema &SemaRef, Expr *E,
51 OMPClauseMappableExprCommon::MappableExprComponentList &CurComponents,
52 OpenMPClauseKind CKind, OpenMPDirectiveKind DKind, bool NoDiagnose);
53
54namespace {
55/// Default data sharing attributes, which can be applied to directive.
56enum DefaultDataSharingAttributes {
57 DSA_unspecified = 0, /// Data sharing attribute not specified.
58 DSA_none = 1 << 0, /// Default data sharing attribute 'none'.
59 DSA_shared = 1 << 1, /// Default data sharing attribute 'shared'.
60 DSA_firstprivate = 1 << 2, /// Default data sharing attribute 'firstprivate'.
61};
62
63/// Stack for tracking declarations used in OpenMP directives and
64/// clauses and their data-sharing attributes.
65class DSAStackTy {
66public:
67 struct DSAVarData {
68 OpenMPDirectiveKind DKind = OMPD_unknown;
69 OpenMPClauseKind CKind = OMPC_unknown;
70 unsigned Modifier = 0;
71 const Expr *RefExpr = nullptr;
72 DeclRefExpr *PrivateCopy = nullptr;
73 SourceLocation ImplicitDSALoc;
74 bool AppliedToPointee = false;
75 DSAVarData() = default;
76 DSAVarData(OpenMPDirectiveKind DKind, OpenMPClauseKind CKind,
77 const Expr *RefExpr, DeclRefExpr *PrivateCopy,
78 SourceLocation ImplicitDSALoc, unsigned Modifier,
79 bool AppliedToPointee)
80 : DKind(DKind), CKind(CKind), Modifier(Modifier), RefExpr(RefExpr),
81 PrivateCopy(PrivateCopy), ImplicitDSALoc(ImplicitDSALoc),
82 AppliedToPointee(AppliedToPointee) {}
83 };
84 using OperatorOffsetTy =
85 llvm::SmallVector<std::pair<Expr *, OverloadedOperatorKind>, 4>;
86 using DoacrossDependMapTy =
87 llvm::DenseMap<OMPDependClause *, OperatorOffsetTy>;
88 /// Kind of the declaration used in the uses_allocators clauses.
89 enum class UsesAllocatorsDeclKind {
90 /// Predefined allocator
91 PredefinedAllocator,
92 /// User-defined allocator
93 UserDefinedAllocator,
94 /// The declaration that represent allocator trait
95 AllocatorTrait,
96 };
97
98private:
99 struct DSAInfo {
100 OpenMPClauseKind Attributes = OMPC_unknown;
101 unsigned Modifier = 0;
102 /// Pointer to a reference expression and a flag which shows that the
103 /// variable is marked as lastprivate(true) or not (false).
104 llvm::PointerIntPair<const Expr *, 1, bool> RefExpr;
105 DeclRefExpr *PrivateCopy = nullptr;
106 /// true if the attribute is applied to the pointee, not the variable
107 /// itself.
108 bool AppliedToPointee = false;
109 };
110 using DeclSAMapTy = llvm::SmallDenseMap<const ValueDecl *, DSAInfo, 8>;
111 using UsedRefMapTy = llvm::SmallDenseMap<const ValueDecl *, const Expr *, 8>;
112 using LCDeclInfo = std::pair<unsigned, VarDecl *>;
113 using LoopControlVariablesMapTy =
114 llvm::SmallDenseMap<const ValueDecl *, LCDeclInfo, 8>;
115 /// Struct that associates a component with the clause kind where they are
116 /// found.
117 struct MappedExprComponentTy {
118 OMPClauseMappableExprCommon::MappableExprComponentLists Components;
119 OpenMPClauseKind Kind = OMPC_unknown;
120 };
121 using MappedExprComponentsTy =
122 llvm::DenseMap<const ValueDecl *, MappedExprComponentTy>;
123 using CriticalsWithHintsTy =
124 llvm::StringMap<std::pair<const OMPCriticalDirective *, llvm::APSInt>>;
125 struct ReductionData {
126 using BOKPtrType = llvm::PointerEmbeddedInt<BinaryOperatorKind, 16>;
127 SourceRange ReductionRange;
128 llvm::PointerUnion<const Expr *, BOKPtrType> ReductionOp;
129 ReductionData() = default;
130 void set(BinaryOperatorKind BO, SourceRange RR) {
131 ReductionRange = RR;
132 ReductionOp = BO;
133 }
134 void set(const Expr *RefExpr, SourceRange RR) {
135 ReductionRange = RR;
136 ReductionOp = RefExpr;
137 }
138 };
139 using DeclReductionMapTy =
140 llvm::SmallDenseMap<const ValueDecl *, ReductionData, 4>;
141 struct DefaultmapInfo {
142 OpenMPDefaultmapClauseModifier ImplicitBehavior =
143 OMPC_DEFAULTMAP_MODIFIER_unknown;
144 SourceLocation SLoc;
145 DefaultmapInfo() = default;
146 DefaultmapInfo(OpenMPDefaultmapClauseModifier M, SourceLocation Loc)
147 : ImplicitBehavior(M), SLoc(Loc) {}
148 };
149
150 struct SharingMapTy {
151 DeclSAMapTy SharingMap;
152 DeclReductionMapTy ReductionMap;
153 UsedRefMapTy AlignedMap;
154 UsedRefMapTy NontemporalMap;
155 MappedExprComponentsTy MappedExprComponents;
156 LoopControlVariablesMapTy LCVMap;
157 DefaultDataSharingAttributes DefaultAttr = DSA_unspecified;
158 SourceLocation DefaultAttrLoc;
159 DefaultmapInfo DefaultmapMap[OMPC_DEFAULTMAP_unknown];
160 OpenMPDirectiveKind Directive = OMPD_unknown;
161 DeclarationNameInfo DirectiveName;
162 Scope *CurScope = nullptr;
163 DeclContext *Context = nullptr;
164 SourceLocation ConstructLoc;
165 /// Set of 'depend' clauses with 'sink|source' dependence kind. Required to
166 /// get the data (loop counters etc.) about enclosing loop-based construct.
167 /// This data is required during codegen.
168 DoacrossDependMapTy DoacrossDepends;
169 /// First argument (Expr *) contains optional argument of the
170 /// 'ordered' clause, the second one is true if the regions has 'ordered'
171 /// clause, false otherwise.
172 llvm::Optional<std::pair<const Expr *, OMPOrderedClause *>> OrderedRegion;
173 unsigned AssociatedLoops = 1;
174 bool HasMutipleLoops = false;
175 const Decl *PossiblyLoopCounter = nullptr;
176 bool NowaitRegion = false;
177 bool CancelRegion = false;
178 bool LoopStart = false;
179 bool BodyComplete = false;
180 SourceLocation PrevScanLocation;
181 SourceLocation PrevOrderedLocation;
182 SourceLocation InnerTeamsRegionLoc;
183 /// Reference to the taskgroup task_reduction reference expression.
184 Expr *TaskgroupReductionRef = nullptr;
185 llvm::DenseSet<QualType> MappedClassesQualTypes;
186 SmallVector<Expr *, 4> InnerUsedAllocators;
187 llvm::DenseSet<CanonicalDeclPtr<Decl>> ImplicitTaskFirstprivates;
188 /// List of globals marked as declare target link in this target region
189 /// (isOpenMPTargetExecutionDirective(Directive) == true).
190 llvm::SmallVector<DeclRefExpr *, 4> DeclareTargetLinkVarDecls;
191 /// List of decls used in inclusive/exclusive clauses of the scan directive.
192 llvm::DenseSet<CanonicalDeclPtr<Decl>> UsedInScanDirective;
193 llvm::DenseMap<CanonicalDeclPtr<const Decl>, UsesAllocatorsDeclKind>
194 UsesAllocatorsDecls;
195 Expr *DeclareMapperVar = nullptr;
196 SharingMapTy(OpenMPDirectiveKind DKind, DeclarationNameInfo Name,
197 Scope *CurScope, SourceLocation Loc)
198 : Directive(DKind), DirectiveName(Name), CurScope(CurScope),
199 ConstructLoc(Loc) {}
200 SharingMapTy() = default;
201 };
202
203 using StackTy = SmallVector<SharingMapTy, 4>;
204
205 /// Stack of used declaration and their data-sharing attributes.
206 DeclSAMapTy Threadprivates;
207 const FunctionScopeInfo *CurrentNonCapturingFunctionScope = nullptr;
208 SmallVector<std::pair<StackTy, const FunctionScopeInfo *>, 4> Stack;
209 /// true, if check for DSA must be from parent directive, false, if
210 /// from current directive.
211 OpenMPClauseKind ClauseKindMode = OMPC_unknown;
212 Sema &SemaRef;
213 bool ForceCapturing = false;
214 /// true if all the variables in the target executable directives must be
215 /// captured by reference.
216 bool ForceCaptureByReferenceInTargetExecutable = false;
217 CriticalsWithHintsTy Criticals;
218 unsigned IgnoredStackElements = 0;
219
220 /// Iterators over the stack iterate in order from innermost to outermost
221 /// directive.
222 using const_iterator = StackTy::const_reverse_iterator;
223 const_iterator begin() const {
224 return Stack.empty() ? const_iterator()
225 : Stack.back().first.rbegin() + IgnoredStackElements;
226 }
227 const_iterator end() const {
228 return Stack.empty() ? const_iterator() : Stack.back().first.rend();
229 }
230 using iterator = StackTy::reverse_iterator;
231 iterator begin() {
232 return Stack.empty() ? iterator()
233 : Stack.back().first.rbegin() + IgnoredStackElements;
234 }
235 iterator end() {
236 return Stack.empty() ? iterator() : Stack.back().first.rend();
237 }
238
239 // Convenience operations to get at the elements of the stack.
240
241 bool isStackEmpty() const {
242 return Stack.empty() ||
243 Stack.back().second != CurrentNonCapturingFunctionScope ||
244 Stack.back().first.size() <= IgnoredStackElements;
245 }
246 size_t getStackSize() const {
247 return isStackEmpty() ? 0
248 : Stack.back().first.size() - IgnoredStackElements;
249 }
250
251 SharingMapTy *getTopOfStackOrNull() {
252 size_t Size = getStackSize();
253 if (Size == 0)
254 return nullptr;
255 return &Stack.back().first[Size - 1];
256 }
257 const SharingMapTy *getTopOfStackOrNull() const {
258 return const_cast<DSAStackTy&>(*this).getTopOfStackOrNull();
259 }
260 SharingMapTy &getTopOfStack() {
261 assert(!isStackEmpty() && "no current directive")((void)0);
262 return *getTopOfStackOrNull();
263 }
264 const SharingMapTy &getTopOfStack() const {
265 return const_cast<DSAStackTy&>(*this).getTopOfStack();
266 }
267
268 SharingMapTy *getSecondOnStackOrNull() {
269 size_t Size = getStackSize();
270 if (Size <= 1)
271 return nullptr;
272 return &Stack.back().first[Size - 2];
273 }
274 const SharingMapTy *getSecondOnStackOrNull() const {
275 return const_cast<DSAStackTy&>(*this).getSecondOnStackOrNull();
276 }
277
278 /// Get the stack element at a certain level (previously returned by
279 /// \c getNestingLevel).
280 ///
281 /// Note that nesting levels count from outermost to innermost, and this is
282 /// the reverse of our iteration order where new inner levels are pushed at
283 /// the front of the stack.
284 SharingMapTy &getStackElemAtLevel(unsigned Level) {
285 assert(Level < getStackSize() && "no such stack element")((void)0);
286 return Stack.back().first[Level];
287 }
288 const SharingMapTy &getStackElemAtLevel(unsigned Level) const {
289 return const_cast<DSAStackTy&>(*this).getStackElemAtLevel(Level);
290 }
291
292 DSAVarData getDSA(const_iterator &Iter, ValueDecl *D) const;
293
294 /// Checks if the variable is a local for OpenMP region.
295 bool isOpenMPLocal(VarDecl *D, const_iterator Iter) const;
296
297 /// Vector of previously declared requires directives
298 SmallVector<const OMPRequiresDecl *, 2> RequiresDecls;
299 /// omp_allocator_handle_t type.
300 QualType OMPAllocatorHandleT;
301 /// omp_depend_t type.
302 QualType OMPDependT;
303 /// omp_event_handle_t type.
304 QualType OMPEventHandleT;
305 /// omp_alloctrait_t type.
306 QualType OMPAlloctraitT;
307 /// Expression for the predefined allocators.
308 Expr *OMPPredefinedAllocators[OMPAllocateDeclAttr::OMPUserDefinedMemAlloc] = {
309 nullptr};
310 /// Vector of previously encountered target directives
311 SmallVector<SourceLocation, 2> TargetLocations;
312 SourceLocation AtomicLocation;
313
314public:
315 explicit DSAStackTy(Sema &S) : SemaRef(S) {}
316
317 /// Sets omp_allocator_handle_t type.
318 void setOMPAllocatorHandleT(QualType Ty) { OMPAllocatorHandleT = Ty; }
319 /// Gets omp_allocator_handle_t type.
320 QualType getOMPAllocatorHandleT() const { return OMPAllocatorHandleT; }
321 /// Sets omp_alloctrait_t type.
322 void setOMPAlloctraitT(QualType Ty) { OMPAlloctraitT = Ty; }
323 /// Gets omp_alloctrait_t type.
324 QualType getOMPAlloctraitT() const { return OMPAlloctraitT; }
325 /// Sets the given default allocator.
326 void setAllocator(OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind,
327 Expr *Allocator) {
328 OMPPredefinedAllocators[AllocatorKind] = Allocator;
329 }
330 /// Returns the specified default allocator.
331 Expr *getAllocator(OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind) const {
332 return OMPPredefinedAllocators[AllocatorKind];
333 }
334 /// Sets omp_depend_t type.
335 void setOMPDependT(QualType Ty) { OMPDependT = Ty; }
336 /// Gets omp_depend_t type.
337 QualType getOMPDependT() const { return OMPDependT; }
338
339 /// Sets omp_event_handle_t type.
340 void setOMPEventHandleT(QualType Ty) { OMPEventHandleT = Ty; }
341 /// Gets omp_event_handle_t type.
342 QualType getOMPEventHandleT() const { return OMPEventHandleT; }
343
344 bool isClauseParsingMode() const { return ClauseKindMode != OMPC_unknown; }
345 OpenMPClauseKind getClauseParsingMode() const {
346 assert(isClauseParsingMode() && "Must be in clause parsing mode.")((void)0);
347 return ClauseKindMode;
348 }
349 void setClauseParsingMode(OpenMPClauseKind K) { ClauseKindMode = K; }
350
351 bool isBodyComplete() const {
352 const SharingMapTy *Top = getTopOfStackOrNull();
353 return Top && Top->BodyComplete;
354 }
355 void setBodyComplete() {
356 getTopOfStack().BodyComplete = true;
357 }
358
359 bool isForceVarCapturing() const { return ForceCapturing; }
360 void setForceVarCapturing(bool V) { ForceCapturing = V; }
361
362 void setForceCaptureByReferenceInTargetExecutable(bool V) {
363 ForceCaptureByReferenceInTargetExecutable = V;
364 }
365 bool isForceCaptureByReferenceInTargetExecutable() const {
366 return ForceCaptureByReferenceInTargetExecutable;
367 }
368
369 void push(OpenMPDirectiveKind DKind, const DeclarationNameInfo &DirName,
370 Scope *CurScope, SourceLocation Loc) {
371 assert(!IgnoredStackElements &&((void)0)
372 "cannot change stack while ignoring elements")((void)0);
373 if (Stack.empty() ||
374 Stack.back().second != CurrentNonCapturingFunctionScope)
375 Stack.emplace_back(StackTy(), CurrentNonCapturingFunctionScope);
376 Stack.back().first.emplace_back(DKind, DirName, CurScope, Loc);
377 Stack.back().first.back().DefaultAttrLoc = Loc;
378 }
379
380 void pop() {
381 assert(!IgnoredStackElements &&((void)0)
382 "cannot change stack while ignoring elements")((void)0);
383 assert(!Stack.back().first.empty() &&((void)0)
384 "Data-sharing attributes stack is empty!")((void)0);
385 Stack.back().first.pop_back();
386 }
387
388 /// RAII object to temporarily leave the scope of a directive when we want to
389 /// logically operate in its parent.
390 class ParentDirectiveScope {
391 DSAStackTy &Self;
392 bool Active;
393 public:
394 ParentDirectiveScope(DSAStackTy &Self, bool Activate)
395 : Self(Self), Active(false) {
396 if (Activate)
397 enable();
398 }
399 ~ParentDirectiveScope() { disable(); }
400 void disable() {
401 if (Active) {
402 --Self.IgnoredStackElements;
403 Active = false;
404 }
405 }
406 void enable() {
407 if (!Active) {
408 ++Self.IgnoredStackElements;
409 Active = true;
410 }
411 }
412 };
413
414 /// Marks that we're started loop parsing.
415 void loopInit() {
416 assert(isOpenMPLoopDirective(getCurrentDirective()) &&((void)0)
417 "Expected loop-based directive.")((void)0);
418 getTopOfStack().LoopStart = true;
419 }
420 /// Start capturing of the variables in the loop context.
421 void loopStart() {
422 assert(isOpenMPLoopDirective(getCurrentDirective()) &&((void)0)
423 "Expected loop-based directive.")((void)0);
424 getTopOfStack().LoopStart = false;
425 }
426 /// true, if variables are captured, false otherwise.
427 bool isLoopStarted() const {
428 assert(isOpenMPLoopDirective(getCurrentDirective()) &&((void)0)
429 "Expected loop-based directive.")((void)0);
430 return !getTopOfStack().LoopStart;
431 }
432 /// Marks (or clears) declaration as possibly loop counter.
433 void resetPossibleLoopCounter(const Decl *D = nullptr) {
434 getTopOfStack().PossiblyLoopCounter =
435 D ? D->getCanonicalDecl() : D;
436 }
437 /// Gets the possible loop counter decl.
438 const Decl *getPossiblyLoopCunter() const {
439 return getTopOfStack().PossiblyLoopCounter;
440 }
441 /// Start new OpenMP region stack in new non-capturing function.
442 void pushFunction() {
443 assert(!IgnoredStackElements &&((void)0)
444 "cannot change stack while ignoring elements")((void)0);
445 const FunctionScopeInfo *CurFnScope = SemaRef.getCurFunction();
446 assert(!isa<CapturingScopeInfo>(CurFnScope))((void)0);
447 CurrentNonCapturingFunctionScope = CurFnScope;
448 }
449 /// Pop region stack for non-capturing function.
450 void popFunction(const FunctionScopeInfo *OldFSI) {
451 assert(!IgnoredStackElements &&((void)0)
452 "cannot change stack while ignoring elements")((void)0);
453 if (!Stack.empty() && Stack.back().second == OldFSI) {
454 assert(Stack.back().first.empty())((void)0);
455 Stack.pop_back();
456 }
457 CurrentNonCapturingFunctionScope = nullptr;
458 for (const FunctionScopeInfo *FSI : llvm::reverse(SemaRef.FunctionScopes)) {
459 if (!isa<CapturingScopeInfo>(FSI)) {
460 CurrentNonCapturingFunctionScope = FSI;
461 break;
462 }
463 }
464 }
465
466 void addCriticalWithHint(const OMPCriticalDirective *D, llvm::APSInt Hint) {
467 Criticals.try_emplace(D->getDirectiveName().getAsString(), D, Hint);
468 }
469 const std::pair<const OMPCriticalDirective *, llvm::APSInt>
470 getCriticalWithHint(const DeclarationNameInfo &Name) const {
471 auto I = Criticals.find(Name.getAsString());
472 if (I != Criticals.end())
473 return I->second;
474 return std::make_pair(nullptr, llvm::APSInt());
475 }
476 /// If 'aligned' declaration for given variable \a D was not seen yet,
477 /// add it and return NULL; otherwise return previous occurrence's expression
478 /// for diagnostics.
479 const Expr *addUniqueAligned(const ValueDecl *D, const Expr *NewDE);
480 /// If 'nontemporal' declaration for given variable \a D was not seen yet,
481 /// add it and return NULL; otherwise return previous occurrence's expression
482 /// for diagnostics.
483 const Expr *addUniqueNontemporal(const ValueDecl *D, const Expr *NewDE);
484
485 /// Register specified variable as loop control variable.
486 void addLoopControlVariable(const ValueDecl *D, VarDecl *Capture);
487 /// Check if the specified variable is a loop control variable for
488 /// current region.
489 /// \return The index of the loop control variable in the list of associated
490 /// for-loops (from outer to inner).
491 const LCDeclInfo isLoopControlVariable(const ValueDecl *D) const;
492 /// Check if the specified variable is a loop control variable for
493 /// parent region.
494 /// \return The index of the loop control variable in the list of associated
495 /// for-loops (from outer to inner).
496 const LCDeclInfo isParentLoopControlVariable(const ValueDecl *D) const;
497 /// Check if the specified variable is a loop control variable for
498 /// current region.
499 /// \return The index of the loop control variable in the list of associated
500 /// for-loops (from outer to inner).
501 const LCDeclInfo isLoopControlVariable(const ValueDecl *D,
502 unsigned Level) const;
503 /// Get the loop control variable for the I-th loop (or nullptr) in
504 /// parent directive.
505 const ValueDecl *getParentLoopControlVariable(unsigned I) const;
506
507 /// Marks the specified decl \p D as used in scan directive.
508 void markDeclAsUsedInScanDirective(ValueDecl *D) {
509 if (SharingMapTy *Stack = getSecondOnStackOrNull())
510 Stack->UsedInScanDirective.insert(D);
511 }
512
513 /// Checks if the specified declaration was used in the inner scan directive.
514 bool isUsedInScanDirective(ValueDecl *D) const {
515 if (const SharingMapTy *Stack = getTopOfStackOrNull())
516 return Stack->UsedInScanDirective.count(D) > 0;
517 return false;
518 }
519
520 /// Adds explicit data sharing attribute to the specified declaration.
521 void addDSA(const ValueDecl *D, const Expr *E, OpenMPClauseKind A,
522 DeclRefExpr *PrivateCopy = nullptr, unsigned Modifier = 0,
523 bool AppliedToPointee = false);
524
525 /// Adds additional information for the reduction items with the reduction id
526 /// represented as an operator.
527 void addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
528 BinaryOperatorKind BOK);
529 /// Adds additional information for the reduction items with the reduction id
530 /// represented as reduction identifier.
531 void addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
532 const Expr *ReductionRef);
533 /// Returns the location and reduction operation from the innermost parent
534 /// region for the given \p D.
535 const DSAVarData
536 getTopMostTaskgroupReductionData(const ValueDecl *D, SourceRange &SR,
537 BinaryOperatorKind &BOK,
538 Expr *&TaskgroupDescriptor) const;
539 /// Returns the location and reduction operation from the innermost parent
540 /// region for the given \p D.
541 const DSAVarData
542 getTopMostTaskgroupReductionData(const ValueDecl *D, SourceRange &SR,
543 const Expr *&ReductionRef,
544 Expr *&TaskgroupDescriptor) const;
545 /// Return reduction reference expression for the current taskgroup or
546 /// parallel/worksharing directives with task reductions.
547 Expr *getTaskgroupReductionRef() const {
548 assert((getTopOfStack().Directive == OMPD_taskgroup ||((void)0)
549 ((isOpenMPParallelDirective(getTopOfStack().Directive) ||((void)0)
550 isOpenMPWorksharingDirective(getTopOfStack().Directive)) &&((void)0)
551 !isOpenMPSimdDirective(getTopOfStack().Directive))) &&((void)0)
552 "taskgroup reference expression requested for non taskgroup or "((void)0)
553 "parallel/worksharing directive.")((void)0);
554 return getTopOfStack().TaskgroupReductionRef;
555 }
556 /// Checks if the given \p VD declaration is actually a taskgroup reduction
557 /// descriptor variable at the \p Level of OpenMP regions.
558 bool isTaskgroupReductionRef(const ValueDecl *VD, unsigned Level) const {
559 return getStackElemAtLevel(Level).TaskgroupReductionRef &&
560 cast<DeclRefExpr>(getStackElemAtLevel(Level).TaskgroupReductionRef)
561 ->getDecl() == VD;
562 }
563
564 /// Returns data sharing attributes from top of the stack for the
565 /// specified declaration.
566 const DSAVarData getTopDSA(ValueDecl *D, bool FromParent);
567 /// Returns data-sharing attributes for the specified declaration.
568 const DSAVarData getImplicitDSA(ValueDecl *D, bool FromParent) const;
569 /// Returns data-sharing attributes for the specified declaration.
570 const DSAVarData getImplicitDSA(ValueDecl *D, unsigned Level) const;
571 /// Checks if the specified variables has data-sharing attributes which
572 /// match specified \a CPred predicate in any directive which matches \a DPred
573 /// predicate.
574 const DSAVarData
575 hasDSA(ValueDecl *D,
576 const llvm::function_ref<bool(OpenMPClauseKind, bool)> CPred,
577 const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
578 bool FromParent) const;
579 /// Checks if the specified variables has data-sharing attributes which
580 /// match specified \a CPred predicate in any innermost directive which
581 /// matches \a DPred predicate.
582 const DSAVarData
583 hasInnermostDSA(ValueDecl *D,
584 const llvm::function_ref<bool(OpenMPClauseKind, bool)> CPred,
585 const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
586 bool FromParent) const;
587 /// Checks if the specified variables has explicit data-sharing
588 /// attributes which match specified \a CPred predicate at the specified
589 /// OpenMP region.
590 bool
591 hasExplicitDSA(const ValueDecl *D,
592 const llvm::function_ref<bool(OpenMPClauseKind, bool)> CPred,
593 unsigned Level, bool NotLastprivate = false) const;
594
595 /// Returns true if the directive at level \Level matches in the
596 /// specified \a DPred predicate.
597 bool hasExplicitDirective(
598 const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
599 unsigned Level) const;
600
601 /// Finds a directive which matches specified \a DPred predicate.
602 bool hasDirective(
603 const llvm::function_ref<bool(
604 OpenMPDirectiveKind, const DeclarationNameInfo &, SourceLocation)>
605 DPred,
606 bool FromParent) const;
607
608 /// Returns currently analyzed directive.
609 OpenMPDirectiveKind getCurrentDirective() const {
610 const SharingMapTy *Top = getTopOfStackOrNull();
611 return Top ? Top->Directive : OMPD_unknown;
612 }
613 /// Returns directive kind at specified level.
614 OpenMPDirectiveKind getDirective(unsigned Level) const {
615 assert(!isStackEmpty() && "No directive at specified level.")((void)0);
616 return getStackElemAtLevel(Level).Directive;
617 }
618 /// Returns the capture region at the specified level.
619 OpenMPDirectiveKind getCaptureRegion(unsigned Level,
620 unsigned OpenMPCaptureLevel) const {
621 SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
622 getOpenMPCaptureRegions(CaptureRegions, getDirective(Level));
623 return CaptureRegions[OpenMPCaptureLevel];
624 }
625 /// Returns parent directive.
626 OpenMPDirectiveKind getParentDirective() const {
627 const SharingMapTy *Parent = getSecondOnStackOrNull();
628 return Parent ? Parent->Directive : OMPD_unknown;
629 }
630
631 /// Add requires decl to internal vector
632 void addRequiresDecl(OMPRequiresDecl *RD) {
633 RequiresDecls.push_back(RD);
634 }
635
636 /// Checks if the defined 'requires' directive has specified type of clause.
637 template <typename ClauseType>
638 bool hasRequiresDeclWithClause() const {
639 return llvm::any_of(RequiresDecls, [](const OMPRequiresDecl *D) {
640 return llvm::any_of(D->clauselists(), [](const OMPClause *C) {
641 return isa<ClauseType>(C);
642 });
643 });
644 }
645
646 /// Checks for a duplicate clause amongst previously declared requires
647 /// directives
648 bool hasDuplicateRequiresClause(ArrayRef<OMPClause *> ClauseList) const {
649 bool IsDuplicate = false;
650 for (OMPClause *CNew : ClauseList) {
651 for (const OMPRequiresDecl *D : RequiresDecls) {
652 for (const OMPClause *CPrev : D->clauselists()) {
653 if (CNew->getClauseKind() == CPrev->getClauseKind()) {
654 SemaRef.Diag(CNew->getBeginLoc(),
655 diag::err_omp_requires_clause_redeclaration)
656 << getOpenMPClauseName(CNew->getClauseKind());
657 SemaRef.Diag(CPrev->getBeginLoc(),
658 diag::note_omp_requires_previous_clause)
659 << getOpenMPClauseName(CPrev->getClauseKind());
660 IsDuplicate = true;
661 }
662 }
663 }
664 }
665 return IsDuplicate;
666 }
667
668 /// Add location of previously encountered target to internal vector
669 void addTargetDirLocation(SourceLocation LocStart) {
670 TargetLocations.push_back(LocStart);
671 }
672
673 /// Add location for the first encountered atomicc directive.
674 void addAtomicDirectiveLoc(SourceLocation Loc) {
675 if (AtomicLocation.isInvalid())
676 AtomicLocation = Loc;
677 }
678
679 /// Returns the location of the first encountered atomic directive in the
680 /// module.
681 SourceLocation getAtomicDirectiveLoc() const {
682 return AtomicLocation;
683 }
684
685 // Return previously encountered target region locations.
686 ArrayRef<SourceLocation> getEncounteredTargetLocs() const {
687 return TargetLocations;
688 }
689
690 /// Set default data sharing attribute to none.
691 void setDefaultDSANone(SourceLocation Loc) {
692 getTopOfStack().DefaultAttr = DSA_none;
693 getTopOfStack().DefaultAttrLoc = Loc;
694 }
695 /// Set default data sharing attribute to shared.
696 void setDefaultDSAShared(SourceLocation Loc) {
697 getTopOfStack().DefaultAttr = DSA_shared;
698 getTopOfStack().DefaultAttrLoc = Loc;
699 }
700 /// Set default data sharing attribute to firstprivate.
701 void setDefaultDSAFirstPrivate(SourceLocation Loc) {
702 getTopOfStack().DefaultAttr = DSA_firstprivate;
703 getTopOfStack().DefaultAttrLoc = Loc;
704 }
705 /// Set default data mapping attribute to Modifier:Kind
706 void setDefaultDMAAttr(OpenMPDefaultmapClauseModifier M,
707 OpenMPDefaultmapClauseKind Kind,
708 SourceLocation Loc) {
709 DefaultmapInfo &DMI = getTopOfStack().DefaultmapMap[Kind];
710 DMI.ImplicitBehavior = M;
711 DMI.SLoc = Loc;
712 }
713 /// Check whether the implicit-behavior has been set in defaultmap
714 bool checkDefaultmapCategory(OpenMPDefaultmapClauseKind VariableCategory) {
715 if (VariableCategory == OMPC_DEFAULTMAP_unknown)
716 return getTopOfStack()
717 .DefaultmapMap[OMPC_DEFAULTMAP_aggregate]
718 .ImplicitBehavior != OMPC_DEFAULTMAP_MODIFIER_unknown ||
719 getTopOfStack()
720 .DefaultmapMap[OMPC_DEFAULTMAP_scalar]
721 .ImplicitBehavior != OMPC_DEFAULTMAP_MODIFIER_unknown ||
722 getTopOfStack()
723 .DefaultmapMap[OMPC_DEFAULTMAP_pointer]
724 .ImplicitBehavior != OMPC_DEFAULTMAP_MODIFIER_unknown;
725 return getTopOfStack().DefaultmapMap[VariableCategory].ImplicitBehavior !=
726 OMPC_DEFAULTMAP_MODIFIER_unknown;
727 }
728
729 DefaultDataSharingAttributes getDefaultDSA(unsigned Level) const {
730 return getStackSize() <= Level ? DSA_unspecified
731 : getStackElemAtLevel(Level).DefaultAttr;
732 }
733 DefaultDataSharingAttributes getDefaultDSA() const {
734 return isStackEmpty() ? DSA_unspecified
735 : getTopOfStack().DefaultAttr;
736 }
737 SourceLocation getDefaultDSALocation() const {
738 return isStackEmpty() ? SourceLocation()
739 : getTopOfStack().DefaultAttrLoc;
740 }
741 OpenMPDefaultmapClauseModifier
742 getDefaultmapModifier(OpenMPDefaultmapClauseKind Kind) const {
743 return isStackEmpty()
744 ? OMPC_DEFAULTMAP_MODIFIER_unknown
745 : getTopOfStack().DefaultmapMap[Kind].ImplicitBehavior;
746 }
747 OpenMPDefaultmapClauseModifier
748 getDefaultmapModifierAtLevel(unsigned Level,
749 OpenMPDefaultmapClauseKind Kind) const {
750 return getStackElemAtLevel(Level).DefaultmapMap[Kind].ImplicitBehavior;
751 }
752 bool isDefaultmapCapturedByRef(unsigned Level,
753 OpenMPDefaultmapClauseKind Kind) const {
754 OpenMPDefaultmapClauseModifier M =
755 getDefaultmapModifierAtLevel(Level, Kind);
756 if (Kind == OMPC_DEFAULTMAP_scalar || Kind == OMPC_DEFAULTMAP_pointer) {
757 return (M == OMPC_DEFAULTMAP_MODIFIER_alloc) ||
758 (M == OMPC_DEFAULTMAP_MODIFIER_to) ||
759 (M == OMPC_DEFAULTMAP_MODIFIER_from) ||
760 (M == OMPC_DEFAULTMAP_MODIFIER_tofrom);
761 }
762 return true;
763 }
764 static bool mustBeFirstprivateBase(OpenMPDefaultmapClauseModifier M,
765 OpenMPDefaultmapClauseKind Kind) {
766 switch (Kind) {
767 case OMPC_DEFAULTMAP_scalar:
768 case OMPC_DEFAULTMAP_pointer:
769 return (M == OMPC_DEFAULTMAP_MODIFIER_unknown) ||
770 (M == OMPC_DEFAULTMAP_MODIFIER_firstprivate) ||
771 (M == OMPC_DEFAULTMAP_MODIFIER_default);
772 case OMPC_DEFAULTMAP_aggregate:
773 return M == OMPC_DEFAULTMAP_MODIFIER_firstprivate;
774 default:
775 break;
776 }
777 llvm_unreachable("Unexpected OpenMPDefaultmapClauseKind enum")__builtin_unreachable();
778 }
779 bool mustBeFirstprivateAtLevel(unsigned Level,
780 OpenMPDefaultmapClauseKind Kind) const {
781 OpenMPDefaultmapClauseModifier M =
782 getDefaultmapModifierAtLevel(Level, Kind);
783 return mustBeFirstprivateBase(M, Kind);
784 }
785 bool mustBeFirstprivate(OpenMPDefaultmapClauseKind Kind) const {
786 OpenMPDefaultmapClauseModifier M = getDefaultmapModifier(Kind);
787 return mustBeFirstprivateBase(M, Kind);
788 }
789
790 /// Checks if the specified variable is a threadprivate.
791 bool isThreadPrivate(VarDecl *D) {
792 const DSAVarData DVar = getTopDSA(D, false);
793 return isOpenMPThreadPrivate(DVar.CKind);
794 }
795
796 /// Marks current region as ordered (it has an 'ordered' clause).
797 void setOrderedRegion(bool IsOrdered, const Expr *Param,
798 OMPOrderedClause *Clause) {
799 if (IsOrdered)
800 getTopOfStack().OrderedRegion.emplace(Param, Clause);
801 else
802 getTopOfStack().OrderedRegion.reset();
803 }
804 /// Returns true, if region is ordered (has associated 'ordered' clause),
805 /// false - otherwise.
806 bool isOrderedRegion() const {
807 if (const SharingMapTy *Top = getTopOfStackOrNull())
808 return Top->OrderedRegion.hasValue();
809 return false;
810 }
811 /// Returns optional parameter for the ordered region.
812 std::pair<const Expr *, OMPOrderedClause *> getOrderedRegionParam() const {
813 if (const SharingMapTy *Top = getTopOfStackOrNull())
814 if (Top->OrderedRegion.hasValue())
815 return Top->OrderedRegion.getValue();
816 return std::make_pair(nullptr, nullptr);
817 }
818 /// Returns true, if parent region is ordered (has associated
819 /// 'ordered' clause), false - otherwise.
820 bool isParentOrderedRegion() const {
821 if (const SharingMapTy *Parent = getSecondOnStackOrNull())
822 return Parent->OrderedRegion.hasValue();
823 return false;
824 }
825 /// Returns optional parameter for the ordered region.
826 std::pair<const Expr *, OMPOrderedClause *>
827 getParentOrderedRegionParam() const {
828 if (const SharingMapTy *Parent = getSecondOnStackOrNull())
829 if (Parent->OrderedRegion.hasValue())
830 return Parent->OrderedRegion.getValue();
831 return std::make_pair(nullptr, nullptr);
832 }
833 /// Marks current region as nowait (it has a 'nowait' clause).
834 void setNowaitRegion(bool IsNowait = true) {
835 getTopOfStack().NowaitRegion = IsNowait;
836 }
837 /// Returns true, if parent region is nowait (has associated
838 /// 'nowait' clause), false - otherwise.
839 bool isParentNowaitRegion() const {
840 if (const SharingMapTy *Parent = getSecondOnStackOrNull())
841 return Parent->NowaitRegion;
842 return false;
843 }
844 /// Marks parent region as cancel region.
845 void setParentCancelRegion(bool Cancel = true) {
846 if (SharingMapTy *Parent = getSecondOnStackOrNull())
847 Parent->CancelRegion |= Cancel;
848 }
849 /// Return true if current region has inner cancel construct.
850 bool isCancelRegion() const {
851 const SharingMapTy *Top = getTopOfStackOrNull();
852 return Top ? Top->CancelRegion : false;
853 }
854
855 /// Mark that parent region already has scan directive.
856 void setParentHasScanDirective(SourceLocation Loc) {
857 if (SharingMapTy *Parent = getSecondOnStackOrNull())
858 Parent->PrevScanLocation = Loc;
859 }
860 /// Return true if current region has inner cancel construct.
861 bool doesParentHasScanDirective() const {
862 const SharingMapTy *Top = getSecondOnStackOrNull();
863 return Top ? Top->PrevScanLocation.isValid() : false;
864 }
865 /// Return true if current region has inner cancel construct.
866 SourceLocation getParentScanDirectiveLoc() const {
867 const SharingMapTy *Top = getSecondOnStackOrNull();
868 return Top ? Top->PrevScanLocation : SourceLocation();
869 }
870 /// Mark that parent region already has ordered directive.
871 void setParentHasOrderedDirective(SourceLocation Loc) {
872 if (SharingMapTy *Parent = getSecondOnStackOrNull())
873 Parent->PrevOrderedLocation = Loc;
874 }
875 /// Return true if current region has inner ordered construct.
876 bool doesParentHasOrderedDirective() const {
877 const SharingMapTy *Top = getSecondOnStackOrNull();
878 return Top ? Top->PrevOrderedLocation.isValid() : false;
879 }
880 /// Returns the location of the previously specified ordered directive.
881 SourceLocation getParentOrderedDirectiveLoc() const {
882 const SharingMapTy *Top = getSecondOnStackOrNull();
883 return Top ? Top->PrevOrderedLocation : SourceLocation();
884 }
885
886 /// Set collapse value for the region.
887 void setAssociatedLoops(unsigned Val) {
888 getTopOfStack().AssociatedLoops = Val;
889 if (Val > 1)
890 getTopOfStack().HasMutipleLoops = true;
891 }
892 /// Return collapse value for region.
893 unsigned getAssociatedLoops() const {
894 const SharingMapTy *Top = getTopOfStackOrNull();
895 return Top ? Top->AssociatedLoops : 0;
896 }
897 /// Returns true if the construct is associated with multiple loops.
898 bool hasMutipleLoops() const {
899 const SharingMapTy *Top = getTopOfStackOrNull();
900 return Top ? Top->HasMutipleLoops : false;
901 }
902
903 /// Marks current target region as one with closely nested teams
904 /// region.
905 void setParentTeamsRegionLoc(SourceLocation TeamsRegionLoc) {
906 if (SharingMapTy *Parent = getSecondOnStackOrNull())
907 Parent->InnerTeamsRegionLoc = TeamsRegionLoc;
908 }
909 /// Returns true, if current region has closely nested teams region.
910 bool hasInnerTeamsRegion() const {
911 return getInnerTeamsRegionLoc().isValid();
912 }
913 /// Returns location of the nested teams region (if any).
914 SourceLocation getInnerTeamsRegionLoc() const {
915 const SharingMapTy *Top = getTopOfStackOrNull();
916 return Top ? Top->InnerTeamsRegionLoc : SourceLocation();
917 }
918
919 Scope *getCurScope() const {
920 const SharingMapTy *Top = getTopOfStackOrNull();
921 return Top ? Top->CurScope : nullptr;
922 }
923 void setContext(DeclContext *DC) { getTopOfStack().Context = DC; }
924 SourceLocation getConstructLoc() const {
925 const SharingMapTy *Top = getTopOfStackOrNull();
926 return Top ? Top->ConstructLoc : SourceLocation();
927 }
928
929 /// Do the check specified in \a Check to all component lists and return true
930 /// if any issue is found.
931 bool checkMappableExprComponentListsForDecl(
932 const ValueDecl *VD, bool CurrentRegionOnly,
933 const llvm::function_ref<
934 bool(OMPClauseMappableExprCommon::MappableExprComponentListRef,
935 OpenMPClauseKind)>
936 Check) const {
937 if (isStackEmpty())
938 return false;
939 auto SI = begin();
940 auto SE = end();
941
942 if (SI == SE)
943 return false;
944
945 if (CurrentRegionOnly)
946 SE = std::next(SI);
947 else
948 std::advance(SI, 1);
949
950 for (; SI != SE; ++SI) {
951 auto MI = SI->MappedExprComponents.find(VD);
952 if (MI != SI->MappedExprComponents.end())
953 for (OMPClauseMappableExprCommon::MappableExprComponentListRef L :
954 MI->second.Components)
955 if (Check(L, MI->second.Kind))
956 return true;
957 }
958 return false;
959 }
960
961 /// Do the check specified in \a Check to all component lists at a given level
962 /// and return true if any issue is found.
963 bool checkMappableExprComponentListsForDeclAtLevel(
964 const ValueDecl *VD, unsigned Level,
965 const llvm::function_ref<
966 bool(OMPClauseMappableExprCommon::MappableExprComponentListRef,
967 OpenMPClauseKind)>
968 Check) const {
969 if (getStackSize() <= Level)
970 return false;
971
972 const SharingMapTy &StackElem = getStackElemAtLevel(Level);
973 auto MI = StackElem.MappedExprComponents.find(VD);
974 if (MI != StackElem.MappedExprComponents.end())
975 for (OMPClauseMappableExprCommon::MappableExprComponentListRef L :
976 MI->second.Components)
977 if (Check(L, MI->second.Kind))
978 return true;
979 return false;
980 }
981
982 /// Create a new mappable expression component list associated with a given
983 /// declaration and initialize it with the provided list of components.
984 void addMappableExpressionComponents(
985 const ValueDecl *VD,
986 OMPClauseMappableExprCommon::MappableExprComponentListRef Components,
987 OpenMPClauseKind WhereFoundClauseKind) {
988 MappedExprComponentTy &MEC = getTopOfStack().MappedExprComponents[VD];
989 // Create new entry and append the new components there.
990 MEC.Components.resize(MEC.Components.size() + 1);
991 MEC.Components.back().append(Components.begin(), Components.end());
992 MEC.Kind = WhereFoundClauseKind;
993 }
994
995 unsigned getNestingLevel() const {
996 assert(!isStackEmpty())((void)0);
997 return getStackSize() - 1;
998 }
999 void addDoacrossDependClause(OMPDependClause *C,
1000 const OperatorOffsetTy &OpsOffs) {
1001 SharingMapTy *Parent = getSecondOnStackOrNull();
1002 assert(Parent && isOpenMPWorksharingDirective(Parent->Directive))((void)0);
1003 Parent->DoacrossDepends.try_emplace(C, OpsOffs);
1004 }
1005 llvm::iterator_range<DoacrossDependMapTy::const_iterator>
1006 getDoacrossDependClauses() const {
1007 const SharingMapTy &StackElem = getTopOfStack();
1008 if (isOpenMPWorksharingDirective(StackElem.Directive)) {
1009 const DoacrossDependMapTy &Ref = StackElem.DoacrossDepends;
1010 return llvm::make_range(Ref.begin(), Ref.end());
1011 }
1012 return llvm::make_range(StackElem.DoacrossDepends.end(),
1013 StackElem.DoacrossDepends.end());
1014 }
1015
1016 // Store types of classes which have been explicitly mapped
1017 void addMappedClassesQualTypes(QualType QT) {
1018 SharingMapTy &StackElem = getTopOfStack();
1019 StackElem.MappedClassesQualTypes.insert(QT);
1020 }
1021
1022 // Return set of mapped classes types
1023 bool isClassPreviouslyMapped(QualType QT) const {
1024 const SharingMapTy &StackElem = getTopOfStack();
1025 return StackElem.MappedClassesQualTypes.count(QT) != 0;
1026 }
1027
1028 /// Adds global declare target to the parent target region.
1029 void addToParentTargetRegionLinkGlobals(DeclRefExpr *E) {
1030 assert(*OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(((void)0)
1031 E->getDecl()) == OMPDeclareTargetDeclAttr::MT_Link &&((void)0)
1032 "Expected declare target link global.")((void)0);
1033 for (auto &Elem : *this) {
1034 if (isOpenMPTargetExecutionDirective(Elem.Directive)) {
1035 Elem.DeclareTargetLinkVarDecls.push_back(E);
1036 return;
1037 }
1038 }
1039 }
1040
1041 /// Returns the list of globals with declare target link if current directive
1042 /// is target.
1043 ArrayRef<DeclRefExpr *> getLinkGlobals() const {
1044 assert(isOpenMPTargetExecutionDirective(getCurrentDirective()) &&((void)0)
1045 "Expected target executable directive.")((void)0);
1046 return getTopOfStack().DeclareTargetLinkVarDecls;
1047 }
1048
1049 /// Adds list of allocators expressions.
1050 void addInnerAllocatorExpr(Expr *E) {
1051 getTopOfStack().InnerUsedAllocators.push_back(E);
1052 }
1053 /// Return list of used allocators.
1054 ArrayRef<Expr *> getInnerAllocators() const {
1055 return getTopOfStack().InnerUsedAllocators;
1056 }
1057 /// Marks the declaration as implicitly firstprivate nin the task-based
1058 /// regions.
1059 void addImplicitTaskFirstprivate(unsigned Level, Decl *D) {
1060 getStackElemAtLevel(Level).ImplicitTaskFirstprivates.insert(D);
1061 }
1062 /// Checks if the decl is implicitly firstprivate in the task-based region.
1063 bool isImplicitTaskFirstprivate(Decl *D) const {
1064 return getTopOfStack().ImplicitTaskFirstprivates.count(D) > 0;
1065 }
1066
1067 /// Marks decl as used in uses_allocators clause as the allocator.
1068 void addUsesAllocatorsDecl(const Decl *D, UsesAllocatorsDeclKind Kind) {
1069 getTopOfStack().UsesAllocatorsDecls.try_emplace(D, Kind);
1070 }
1071 /// Checks if specified decl is used in uses allocator clause as the
1072 /// allocator.
1073 Optional<UsesAllocatorsDeclKind> isUsesAllocatorsDecl(unsigned Level,
1074 const Decl *D) const {
1075 const SharingMapTy &StackElem = getTopOfStack();
1076 auto I = StackElem.UsesAllocatorsDecls.find(D);
1077 if (I == StackElem.UsesAllocatorsDecls.end())
1078 return None;
1079 return I->getSecond();
1080 }
1081 Optional<UsesAllocatorsDeclKind> isUsesAllocatorsDecl(const Decl *D) const {
1082 const SharingMapTy &StackElem = getTopOfStack();
1083 auto I = StackElem.UsesAllocatorsDecls.find(D);
1084 if (I == StackElem.UsesAllocatorsDecls.end())
1085 return None;
1086 return I->getSecond();
1087 }
1088
1089 void addDeclareMapperVarRef(Expr *Ref) {
1090 SharingMapTy &StackElem = getTopOfStack();
1091 StackElem.DeclareMapperVar = Ref;
1092 }
1093 const Expr *getDeclareMapperVarRef() const {
1094 const SharingMapTy *Top = getTopOfStackOrNull();
1095 return Top ? Top->DeclareMapperVar : nullptr;
1096 }
1097};
1098
1099bool isImplicitTaskingRegion(OpenMPDirectiveKind DKind) {
1100 return isOpenMPParallelDirective(DKind) || isOpenMPTeamsDirective(DKind);
1101}
1102
1103bool isImplicitOrExplicitTaskingRegion(OpenMPDirectiveKind DKind) {
1104 return isImplicitTaskingRegion(DKind) || isOpenMPTaskingDirective(DKind) ||
1105 DKind == OMPD_unknown;
1106}
1107
1108} // namespace
1109
1110static const Expr *getExprAsWritten(const Expr *E) {
1111 if (const auto *FE = dyn_cast<FullExpr>(E))
1112 E = FE->getSubExpr();
1113
1114 if (const auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E))
1115 E = MTE->getSubExpr();
1116
1117 while (const auto *Binder = dyn_cast<CXXBindTemporaryExpr>(E))
1118 E = Binder->getSubExpr();
1119
1120 if (const auto *ICE = dyn_cast<ImplicitCastExpr>(E))
1121 E = ICE->getSubExprAsWritten();
1122 return E->IgnoreParens();
1123}
1124
1125static Expr *getExprAsWritten(Expr *E) {
1126 return const_cast<Expr *>(getExprAsWritten(const_cast<const Expr *>(E)));
1127}
1128
1129static const ValueDecl *getCanonicalDecl(const ValueDecl *D) {
1130 if (const auto *CED = dyn_cast<OMPCapturedExprDecl>(D))
1131 if (const auto *ME = dyn_cast<MemberExpr>(getExprAsWritten(CED->getInit())))
1132 D = ME->getMemberDecl();
1133 const auto *VD = dyn_cast<VarDecl>(D);
1134 const auto *FD = dyn_cast<FieldDecl>(D);
1135 if (VD != nullptr) {
1136 VD = VD->getCanonicalDecl();
1137 D = VD;
1138 } else {
1139 assert(FD)((void)0);
1140 FD = FD->getCanonicalDecl();
1141 D = FD;
1142 }
1143 return D;
1144}
1145
1146static ValueDecl *getCanonicalDecl(ValueDecl *D) {
1147 return const_cast<ValueDecl *>(
1148 getCanonicalDecl(const_cast<const ValueDecl *>(D)));
1149}
1150
1151DSAStackTy::DSAVarData DSAStackTy::getDSA(const_iterator &Iter,
1152 ValueDecl *D) const {
1153 D = getCanonicalDecl(D);
1154 auto *VD = dyn_cast<VarDecl>(D);
1155 const auto *FD = dyn_cast<FieldDecl>(D);
1156 DSAVarData DVar;
1157 if (Iter == end()) {
1158 // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1159 // in a region but not in construct]
1160 // File-scope or namespace-scope variables referenced in called routines
1161 // in the region are shared unless they appear in a threadprivate
1162 // directive.
1163 if (VD && !VD->isFunctionOrMethodVarDecl() && !isa<ParmVarDecl>(VD))
1164 DVar.CKind = OMPC_shared;
1165
1166 // OpenMP [2.9.1.2, Data-sharing Attribute Rules for Variables Referenced
1167 // in a region but not in construct]
1168 // Variables with static storage duration that are declared in called
1169 // routines in the region are shared.
1170 if (VD && VD->hasGlobalStorage())
1171 DVar.CKind = OMPC_shared;
1172
1173 // Non-static data members are shared by default.
1174 if (FD)
1175 DVar.CKind = OMPC_shared;
1176
1177 return DVar;
1178 }
1179
1180 // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1181 // in a Construct, C/C++, predetermined, p.1]
1182 // Variables with automatic storage duration that are declared in a scope
1183 // inside the construct are private.
1184 if (VD && isOpenMPLocal(VD, Iter) && VD->isLocalVarDecl() &&
1185 (VD->getStorageClass() == SC_Auto || VD->getStorageClass() == SC_None)) {
1186 DVar.CKind = OMPC_private;
1187 return DVar;
1188 }
1189
1190 DVar.DKind = Iter->Directive;
1191 // Explicitly specified attributes and local variables with predetermined
1192 // attributes.
1193 if (Iter->SharingMap.count(D)) {
1194 const DSAInfo &Data = Iter->SharingMap.lookup(D);
1195 DVar.RefExpr = Data.RefExpr.getPointer();
1196 DVar.PrivateCopy = Data.PrivateCopy;
1197 DVar.CKind = Data.Attributes;
1198 DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
1199 DVar.Modifier = Data.Modifier;
1200 DVar.AppliedToPointee = Data.AppliedToPointee;
1201 return DVar;
1202 }
1203
1204 // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1205 // in a Construct, C/C++, implicitly determined, p.1]
1206 // In a parallel or task construct, the data-sharing attributes of these
1207 // variables are determined by the default clause, if present.
1208 switch (Iter->DefaultAttr) {
1209 case DSA_shared:
1210 DVar.CKind = OMPC_shared;
1211 DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
1212 return DVar;
1213 case DSA_none:
1214 return DVar;
1215 case DSA_firstprivate:
1216 if (VD->getStorageDuration() == SD_Static &&
1217 VD->getDeclContext()->isFileContext()) {
1218 DVar.CKind = OMPC_unknown;
1219 } else {
1220 DVar.CKind = OMPC_firstprivate;
1221 }
1222 DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
1223 return DVar;
1224 case DSA_unspecified:
1225 // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1226 // in a Construct, implicitly determined, p.2]
1227 // In a parallel construct, if no default clause is present, these
1228 // variables are shared.
1229 DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
1230 if ((isOpenMPParallelDirective(DVar.DKind) &&
1231 !isOpenMPTaskLoopDirective(DVar.DKind)) ||
1232 isOpenMPTeamsDirective(DVar.DKind)) {
1233 DVar.CKind = OMPC_shared;
1234 return DVar;
1235 }
1236
1237 // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1238 // in a Construct, implicitly determined, p.4]
1239 // In a task construct, if no default clause is present, a variable that in
1240 // the enclosing context is determined to be shared by all implicit tasks
1241 // bound to the current team is shared.
1242 if (isOpenMPTaskingDirective(DVar.DKind)) {
1243 DSAVarData DVarTemp;
1244 const_iterator I = Iter, E = end();
1245 do {
1246 ++I;
1247 // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables
1248 // Referenced in a Construct, implicitly determined, p.6]
1249 // In a task construct, if no default clause is present, a variable
1250 // whose data-sharing attribute is not determined by the rules above is
1251 // firstprivate.
1252 DVarTemp = getDSA(I, D);
1253 if (DVarTemp.CKind != OMPC_shared) {
1254 DVar.RefExpr = nullptr;
1255 DVar.CKind = OMPC_firstprivate;
1256 return DVar;
1257 }
1258 } while (I != E && !isImplicitTaskingRegion(I->Directive));
1259 DVar.CKind =
1260 (DVarTemp.CKind == OMPC_unknown) ? OMPC_firstprivate : OMPC_shared;
1261 return DVar;
1262 }
1263 }
1264 // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1265 // in a Construct, implicitly determined, p.3]
1266 // For constructs other than task, if no default clause is present, these
1267 // variables inherit their data-sharing attributes from the enclosing
1268 // context.
1269 return getDSA(++Iter, D);
1270}
1271
1272const Expr *DSAStackTy::addUniqueAligned(const ValueDecl *D,
1273 const Expr *NewDE) {
1274 assert(!isStackEmpty() && "Data sharing attributes stack is empty")((void)0);
1275 D = getCanonicalDecl(D);
1276 SharingMapTy &StackElem = getTopOfStack();
1277 auto It = StackElem.AlignedMap.find(D);
1278 if (It == StackElem.AlignedMap.end()) {
1279 assert(NewDE && "Unexpected nullptr expr to be added into aligned map")((void)0);
1280 StackElem.AlignedMap[D] = NewDE;
1281 return nullptr;
1282 }
1283 assert(It->second && "Unexpected nullptr expr in the aligned map")((void)0);
1284 return It->second;
1285}
1286
1287const Expr *DSAStackTy::addUniqueNontemporal(const ValueDecl *D,
1288 const Expr *NewDE) {
1289 assert(!isStackEmpty() && "Data sharing attributes stack is empty")((void)0);
1290 D = getCanonicalDecl(D);
1291 SharingMapTy &StackElem = getTopOfStack();
1292 auto It = StackElem.NontemporalMap.find(D);
1293 if (It == StackElem.NontemporalMap.end()) {
1294 assert(NewDE && "Unexpected nullptr expr to be added into aligned map")((void)0);
1295 StackElem.NontemporalMap[D] = NewDE;
1296 return nullptr;
1297 }
1298 assert(It->second && "Unexpected nullptr expr in the aligned map")((void)0);
1299 return It->second;
1300}
1301
1302void DSAStackTy::addLoopControlVariable(const ValueDecl *D, VarDecl *Capture) {
1303 assert(!isStackEmpty() && "Data-sharing attributes stack is empty")((void)0);
1304 D = getCanonicalDecl(D);
1305 SharingMapTy &StackElem = getTopOfStack();
1306 StackElem.LCVMap.try_emplace(
1307 D, LCDeclInfo(StackElem.LCVMap.size() + 1, Capture));
1308}
1309
1310const DSAStackTy::LCDeclInfo
1311DSAStackTy::isLoopControlVariable(const ValueDecl *D) const {
1312 assert(!isStackEmpty() && "Data-sharing attributes stack is empty")((void)0);
1313 D = getCanonicalDecl(D);
1314 const SharingMapTy &StackElem = getTopOfStack();
1315 auto It = StackElem.LCVMap.find(D);
1316 if (It != StackElem.LCVMap.end())
1317 return It->second;
1318 return {0, nullptr};
1319}
1320
1321const DSAStackTy::LCDeclInfo
1322DSAStackTy::isLoopControlVariable(const ValueDecl *D, unsigned Level) const {
1323 assert(!isStackEmpty() && "Data-sharing attributes stack is empty")((void)0);
1324 D = getCanonicalDecl(D);
1325 for (unsigned I = Level + 1; I > 0; --I) {
1326 const SharingMapTy &StackElem = getStackElemAtLevel(I - 1);
1327 auto It = StackElem.LCVMap.find(D);
1328 if (It != StackElem.LCVMap.end())
1329 return It->second;
1330 }
1331 return {0, nullptr};
1332}
1333
1334const DSAStackTy::LCDeclInfo
1335DSAStackTy::isParentLoopControlVariable(const ValueDecl *D) const {
1336 const SharingMapTy *Parent = getSecondOnStackOrNull();
1337 assert(Parent && "Data-sharing attributes stack is empty")((void)0);
1338 D = getCanonicalDecl(D);
1339 auto It = Parent->LCVMap.find(D);
1340 if (It != Parent->LCVMap.end())
1341 return It->second;
1342 return {0, nullptr};
1343}
1344
1345const ValueDecl *DSAStackTy::getParentLoopControlVariable(unsigned I) const {
1346 const SharingMapTy *Parent = getSecondOnStackOrNull();
1347 assert(Parent && "Data-sharing attributes stack is empty")((void)0);
1348 if (Parent->LCVMap.size() < I)
1349 return nullptr;
1350 for (const auto &Pair : Parent->LCVMap)
1351 if (Pair.second.first == I)
1352 return Pair.first;
1353 return nullptr;
1354}
1355
1356void DSAStackTy::addDSA(const ValueDecl *D, const Expr *E, OpenMPClauseKind A,
1357 DeclRefExpr *PrivateCopy, unsigned Modifier,
1358 bool AppliedToPointee) {
1359 D = getCanonicalDecl(D);
1360 if (A == OMPC_threadprivate) {
1361 DSAInfo &Data = Threadprivates[D];
1362 Data.Attributes = A;
1363 Data.RefExpr.setPointer(E);
1364 Data.PrivateCopy = nullptr;
1365 Data.Modifier = Modifier;
1366 } else {
1367 DSAInfo &Data = getTopOfStack().SharingMap[D];
1368 assert(Data.Attributes == OMPC_unknown || (A == Data.Attributes) ||((void)0)
1369 (A == OMPC_firstprivate && Data.Attributes == OMPC_lastprivate) ||((void)0)
1370 (A == OMPC_lastprivate && Data.Attributes == OMPC_firstprivate) ||((void)0)
1371 (isLoopControlVariable(D).first && A == OMPC_private))((void)0);
1372 Data.Modifier = Modifier;
1373 if (A == OMPC_lastprivate && Data.Attributes == OMPC_firstprivate) {
1374 Data.RefExpr.setInt(/*IntVal=*/true);
1375 return;
1376 }
1377 const bool IsLastprivate =
1378 A == OMPC_lastprivate || Data.Attributes == OMPC_lastprivate;
1379 Data.Attributes = A;
1380 Data.RefExpr.setPointerAndInt(E, IsLastprivate);
1381 Data.PrivateCopy = PrivateCopy;
1382 Data.AppliedToPointee = AppliedToPointee;
1383 if (PrivateCopy) {
1384 DSAInfo &Data = getTopOfStack().SharingMap[PrivateCopy->getDecl()];
1385 Data.Modifier = Modifier;
1386 Data.Attributes = A;
1387 Data.RefExpr.setPointerAndInt(PrivateCopy, IsLastprivate);
1388 Data.PrivateCopy = nullptr;
1389 Data.AppliedToPointee = AppliedToPointee;
1390 }
1391 }
1392}
1393
1394/// Build a variable declaration for OpenMP loop iteration variable.
1395static VarDecl *buildVarDecl(Sema &SemaRef, SourceLocation Loc, QualType Type,
1396 StringRef Name, const AttrVec *Attrs = nullptr,
1397 DeclRefExpr *OrigRef = nullptr) {
1398 DeclContext *DC = SemaRef.CurContext;
1399 IdentifierInfo *II = &SemaRef.PP.getIdentifierTable().get(Name);
1400 TypeSourceInfo *TInfo = SemaRef.Context.getTrivialTypeSourceInfo(Type, Loc);
1401 auto *Decl =
1402 VarDecl::Create(SemaRef.Context, DC, Loc, Loc, II, Type, TInfo, SC_None);
1403 if (Attrs) {
1404 for (specific_attr_iterator<AlignedAttr> I(Attrs->begin()), E(Attrs->end());
1405 I != E; ++I)
1406 Decl->addAttr(*I);
1407 }
1408 Decl->setImplicit();
1409 if (OrigRef) {
1410 Decl->addAttr(
1411 OMPReferencedVarAttr::CreateImplicit(SemaRef.Context, OrigRef));
1412 }
1413 return Decl;
1414}
1415
1416static DeclRefExpr *buildDeclRefExpr(Sema &S, VarDecl *D, QualType Ty,
1417 SourceLocation Loc,
1418 bool RefersToCapture = false) {
1419 D->setReferenced();
1420 D->markUsed(S.Context);
1421 return DeclRefExpr::Create(S.getASTContext(), NestedNameSpecifierLoc(),
1422 SourceLocation(), D, RefersToCapture, Loc, Ty,
1423 VK_LValue);
1424}
1425
1426void DSAStackTy::addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
1427 BinaryOperatorKind BOK) {
1428 D = getCanonicalDecl(D);
1429 assert(!isStackEmpty() && "Data-sharing attributes stack is empty")((void)0);
1430 assert(((void)0)
1431 getTopOfStack().SharingMap[D].Attributes == OMPC_reduction &&((void)0)
1432 "Additional reduction info may be specified only for reduction items.")((void)0);
1433 ReductionData &ReductionData = getTopOfStack().ReductionMap[D];
1434 assert(ReductionData.ReductionRange.isInvalid() &&((void)0)
1435 (getTopOfStack().Directive == OMPD_taskgroup ||((void)0)
1436 ((isOpenMPParallelDirective(getTopOfStack().Directive) ||((void)0)
1437 isOpenMPWorksharingDirective(getTopOfStack().Directive)) &&((void)0)
1438 !isOpenMPSimdDirective(getTopOfStack().Directive))) &&((void)0)
1439 "Additional reduction info may be specified only once for reduction "((void)0)
1440 "items.")((void)0);
1441 ReductionData.set(BOK, SR);
1442 Expr *&TaskgroupReductionRef =
1443 getTopOfStack().TaskgroupReductionRef;
1444 if (!TaskgroupReductionRef) {
1445 VarDecl *VD = buildVarDecl(SemaRef, SR.getBegin(),
1446 SemaRef.Context.VoidPtrTy, ".task_red.");
1447 TaskgroupReductionRef =
1448 buildDeclRefExpr(SemaRef, VD, SemaRef.Context.VoidPtrTy, SR.getBegin());
1449 }
1450}
1451
1452void DSAStackTy::addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
1453 const Expr *ReductionRef) {
1454 D = getCanonicalDecl(D);
1455 assert(!isStackEmpty() && "Data-sharing attributes stack is empty")((void)0);
1456 assert(((void)0)
1457 getTopOfStack().SharingMap[D].Attributes == OMPC_reduction &&((void)0)
1458 "Additional reduction info may be specified only for reduction items.")((void)0);
1459 ReductionData &ReductionData = getTopOfStack().ReductionMap[D];
1460 assert(ReductionData.ReductionRange.isInvalid() &&((void)0)
1461 (getTopOfStack().Directive == OMPD_taskgroup ||((void)0)
1462 ((isOpenMPParallelDirective(getTopOfStack().Directive) ||((void)0)
1463 isOpenMPWorksharingDirective(getTopOfStack().Directive)) &&((void)0)
1464 !isOpenMPSimdDirective(getTopOfStack().Directive))) &&((void)0)
1465 "Additional reduction info may be specified only once for reduction "((void)0)
1466 "items.")((void)0);
1467 ReductionData.set(ReductionRef, SR);
1468 Expr *&TaskgroupReductionRef =
1469 getTopOfStack().TaskgroupReductionRef;
1470 if (!TaskgroupReductionRef) {
1471 VarDecl *VD = buildVarDecl(SemaRef, SR.getBegin(),
1472 SemaRef.Context.VoidPtrTy, ".task_red.");
1473 TaskgroupReductionRef =
1474 buildDeclRefExpr(SemaRef, VD, SemaRef.Context.VoidPtrTy, SR.getBegin());
1475 }
1476}
1477
1478const DSAStackTy::DSAVarData DSAStackTy::getTopMostTaskgroupReductionData(
1479 const ValueDecl *D, SourceRange &SR, BinaryOperatorKind &BOK,
1480 Expr *&TaskgroupDescriptor) const {
1481 D = getCanonicalDecl(D);
1482 assert(!isStackEmpty() && "Data-sharing attributes stack is empty.")((void)0);
1483 for (const_iterator I = begin() + 1, E = end(); I != E; ++I) {
1484 const DSAInfo &Data = I->SharingMap.lookup(D);
1485 if (Data.Attributes != OMPC_reduction ||
1486 Data.Modifier != OMPC_REDUCTION_task)
1487 continue;
1488 const ReductionData &ReductionData = I->ReductionMap.lookup(D);
1489 if (!ReductionData.ReductionOp ||
1490 ReductionData.ReductionOp.is<const Expr *>())
1491 return DSAVarData();
1492 SR = ReductionData.ReductionRange;
1493 BOK = ReductionData.ReductionOp.get<ReductionData::BOKPtrType>();
1494 assert(I->TaskgroupReductionRef && "taskgroup reduction reference "((void)0)
1495 "expression for the descriptor is not "((void)0)
1496 "set.")((void)0);
1497 TaskgroupDescriptor = I->TaskgroupReductionRef;
1498 return DSAVarData(I->Directive, OMPC_reduction, Data.RefExpr.getPointer(),
1499 Data.PrivateCopy, I->DefaultAttrLoc, OMPC_REDUCTION_task,
1500 /*AppliedToPointee=*/false);
1501 }
1502 return DSAVarData();
1503}
1504
1505const DSAStackTy::DSAVarData DSAStackTy::getTopMostTaskgroupReductionData(
1506 const ValueDecl *D, SourceRange &SR, const Expr *&ReductionRef,
1507 Expr *&TaskgroupDescriptor) const {
1508 D = getCanonicalDecl(D);
1509 assert(!isStackEmpty() && "Data-sharing attributes stack is empty.")((void)0);
1510 for (const_iterator I = begin() + 1, E = end(); I != E; ++I) {
1511 const DSAInfo &Data = I->SharingMap.lookup(D);
1512 if (Data.Attributes != OMPC_reduction ||
1513 Data.Modifier != OMPC_REDUCTION_task)
1514 continue;
1515 const ReductionData &ReductionData = I->ReductionMap.lookup(D);
1516 if (!ReductionData.ReductionOp ||
1517 !ReductionData.ReductionOp.is<const Expr *>())
1518 return DSAVarData();
1519 SR = ReductionData.ReductionRange;
1520 ReductionRef = ReductionData.ReductionOp.get<const Expr *>();
1521 assert(I->TaskgroupReductionRef && "taskgroup reduction reference "((void)0)
1522 "expression for the descriptor is not "((void)0)
1523 "set.")((void)0);
1524 TaskgroupDescriptor = I->TaskgroupReductionRef;
1525 return DSAVarData(I->Directive, OMPC_reduction, Data.RefExpr.getPointer(),
1526 Data.PrivateCopy, I->DefaultAttrLoc, OMPC_REDUCTION_task,
1527 /*AppliedToPointee=*/false);
1528 }
1529 return DSAVarData();
1530}
1531
1532bool DSAStackTy::isOpenMPLocal(VarDecl *D, const_iterator I) const {
1533 D = D->getCanonicalDecl();
1534 for (const_iterator E = end(); I != E; ++I) {
1535 if (isImplicitOrExplicitTaskingRegion(I->Directive) ||
1536 isOpenMPTargetExecutionDirective(I->Directive)) {
1537 if (I->CurScope) {
1538 Scope *TopScope = I->CurScope->getParent();
1539 Scope *CurScope = getCurScope();
1540 while (CurScope && CurScope != TopScope && !CurScope->isDeclScope(D))
1541 CurScope = CurScope->getParent();
1542 return CurScope != TopScope;
1543 }
1544 for (DeclContext *DC = D->getDeclContext(); DC; DC = DC->getParent())
1545 if (I->Context == DC)
1546 return true;
1547 return false;
1548 }
1549 }
1550 return false;
1551}
1552
1553static bool isConstNotMutableType(Sema &SemaRef, QualType Type,
1554 bool AcceptIfMutable = true,
1555 bool *IsClassType = nullptr) {
1556 ASTContext &Context = SemaRef.getASTContext();
1557 Type = Type.getNonReferenceType().getCanonicalType();
1558 bool IsConstant = Type.isConstant(Context);
1559 Type = Context.getBaseElementType(Type);
1560 const CXXRecordDecl *RD = AcceptIfMutable && SemaRef.getLangOpts().CPlusPlus
1561 ? Type->getAsCXXRecordDecl()
1562 : nullptr;
1563 if (const auto *CTSD = dyn_cast_or_null<ClassTemplateSpecializationDecl>(RD))
1564 if (const ClassTemplateDecl *CTD = CTSD->getSpecializedTemplate())
1565 RD = CTD->getTemplatedDecl();
1566 if (IsClassType)
1567 *IsClassType = RD;
1568 return IsConstant && !(SemaRef.getLangOpts().CPlusPlus && RD &&
1569 RD->hasDefinition() && RD->hasMutableFields());
1570}
1571
1572static bool rejectConstNotMutableType(Sema &SemaRef, const ValueDecl *D,
1573 QualType Type, OpenMPClauseKind CKind,
1574 SourceLocation ELoc,
1575 bool AcceptIfMutable = true,
1576 bool ListItemNotVar = false) {
1577 ASTContext &Context = SemaRef.getASTContext();
1578 bool IsClassType;
1579 if (isConstNotMutableType(SemaRef, Type, AcceptIfMutable, &IsClassType)) {
1580 unsigned Diag = ListItemNotVar
1581 ? diag::err_omp_const_list_item
1582 : IsClassType ? diag::err_omp_const_not_mutable_variable
1583 : diag::err_omp_const_variable;
1584 SemaRef.Diag(ELoc, Diag) << getOpenMPClauseName(CKind);
1585 if (!ListItemNotVar && D) {
1586 const VarDecl *VD = dyn_cast<VarDecl>(D);
1587 bool IsDecl = !VD || VD->isThisDeclarationADefinition(Context) ==
1588 VarDecl::DeclarationOnly;
1589 SemaRef.Diag(D->getLocation(),
1590 IsDecl ? diag::note_previous_decl : diag::note_defined_here)
1591 << D;
1592 }
1593 return true;
1594 }
1595 return false;
1596}
1597
1598const DSAStackTy::DSAVarData DSAStackTy::getTopDSA(ValueDecl *D,
1599 bool FromParent) {
1600 D = getCanonicalDecl(D);
1601 DSAVarData DVar;
1602
1603 auto *VD = dyn_cast<VarDecl>(D);
1604 auto TI = Threadprivates.find(D);
1605 if (TI != Threadprivates.end()) {
1606 DVar.RefExpr = TI->getSecond().RefExpr.getPointer();
1607 DVar.CKind = OMPC_threadprivate;
1608 DVar.Modifier = TI->getSecond().Modifier;
1609 return DVar;
1610 }
1611 if (VD && VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
1612 DVar.RefExpr = buildDeclRefExpr(
1613 SemaRef, VD, D->getType().getNonReferenceType(),
1614 VD->getAttr<OMPThreadPrivateDeclAttr>()->getLocation());
1615 DVar.CKind = OMPC_threadprivate;
1616 addDSA(D, DVar.RefExpr, OMPC_threadprivate);
1617 return DVar;
1618 }
1619 // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1620 // in a Construct, C/C++, predetermined, p.1]
1621 // Variables appearing in threadprivate directives are threadprivate.
1622 if ((VD && VD->getTLSKind() != VarDecl::TLS_None &&
1623 !(VD->hasAttr<OMPThreadPrivateDeclAttr>() &&
1624 SemaRef.getLangOpts().OpenMPUseTLS &&
1625 SemaRef.getASTContext().getTargetInfo().isTLSSupported())) ||
1626 (VD && VD->getStorageClass() == SC_Register &&
1627 VD->hasAttr<AsmLabelAttr>() && !VD->isLocalVarDecl())) {
1628 DVar.RefExpr = buildDeclRefExpr(
1629 SemaRef, VD, D->getType().getNonReferenceType(), D->getLocation());
1630 DVar.CKind = OMPC_threadprivate;
1631 addDSA(D, DVar.RefExpr, OMPC_threadprivate);
1632 return DVar;
1633 }
1634 if (SemaRef.getLangOpts().OpenMPCUDAMode && VD &&
1635 VD->isLocalVarDeclOrParm() && !isStackEmpty() &&
1636 !isLoopControlVariable(D).first) {
1637 const_iterator IterTarget =
1638 std::find_if(begin(), end(), [](const SharingMapTy &Data) {
1639 return isOpenMPTargetExecutionDirective(Data.Directive);
1640 });
1641 if (IterTarget != end()) {
1642 const_iterator ParentIterTarget = IterTarget + 1;
1643 for (const_iterator Iter = begin();
1644 Iter != ParentIterTarget; ++Iter) {
1645 if (isOpenMPLocal(VD, Iter)) {
1646 DVar.RefExpr =
1647 buildDeclRefExpr(SemaRef, VD, D->getType().getNonReferenceType(),
1648 D->getLocation());
1649 DVar.CKind = OMPC_threadprivate;
1650 return DVar;
1651 }
1652 }
1653 if (!isClauseParsingMode() || IterTarget != begin()) {
1654 auto DSAIter = IterTarget->SharingMap.find(D);
1655 if (DSAIter != IterTarget->SharingMap.end() &&
1656 isOpenMPPrivate(DSAIter->getSecond().Attributes)) {
1657 DVar.RefExpr = DSAIter->getSecond().RefExpr.getPointer();
1658 DVar.CKind = OMPC_threadprivate;
1659 return DVar;
1660 }
1661 const_iterator End = end();
1662 if (!SemaRef.isOpenMPCapturedByRef(
1663 D, std::distance(ParentIterTarget, End),
1664 /*OpenMPCaptureLevel=*/0)) {
1665 DVar.RefExpr =
1666 buildDeclRefExpr(SemaRef, VD, D->getType().getNonReferenceType(),
1667 IterTarget->ConstructLoc);
1668 DVar.CKind = OMPC_threadprivate;
1669 return DVar;
1670 }
1671 }
1672 }
1673 }
1674
1675 if (isStackEmpty())
1676 // Not in OpenMP execution region and top scope was already checked.
1677 return DVar;
1678
1679 // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1680 // in a Construct, C/C++, predetermined, p.4]
1681 // Static data members are shared.
1682 // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1683 // in a Construct, C/C++, predetermined, p.7]
1684 // Variables with static storage duration that are declared in a scope
1685 // inside the construct are shared.
1686 if (VD && VD->isStaticDataMember()) {
1687 // Check for explicitly specified attributes.
1688 const_iterator I = begin();
1689 const_iterator EndI = end();
1690 if (FromParent && I != EndI)
1691 ++I;
1692 if (I != EndI) {
1693 auto It = I->SharingMap.find(D);
1694 if (It != I->SharingMap.end()) {
1695 const DSAInfo &Data = It->getSecond();
1696 DVar.RefExpr = Data.RefExpr.getPointer();
1697 DVar.PrivateCopy = Data.PrivateCopy;
1698 DVar.CKind = Data.Attributes;
1699 DVar.ImplicitDSALoc = I->DefaultAttrLoc;
1700 DVar.DKind = I->Directive;
1701 DVar.Modifier = Data.Modifier;
1702 DVar.AppliedToPointee = Data.AppliedToPointee;
1703 return DVar;
1704 }
1705 }
1706
1707 DVar.CKind = OMPC_shared;
1708 return DVar;
1709 }
1710
1711 auto &&MatchesAlways = [](OpenMPDirectiveKind) { return true; };
1712 // The predetermined shared attribute for const-qualified types having no
1713 // mutable members was removed after OpenMP 3.1.
1714 if (SemaRef.LangOpts.OpenMP <= 31) {
1715 // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1716 // in a Construct, C/C++, predetermined, p.6]
1717 // Variables with const qualified type having no mutable member are
1718 // shared.
1719 if (isConstNotMutableType(SemaRef, D->getType())) {
1720 // Variables with const-qualified type having no mutable member may be
1721 // listed in a firstprivate clause, even if they are static data members.
1722 DSAVarData DVarTemp = hasInnermostDSA(
1723 D,
1724 [](OpenMPClauseKind C, bool) {
1725 return C == OMPC_firstprivate || C == OMPC_shared;
1726 },
1727 MatchesAlways, FromParent);
1728 if (DVarTemp.CKind != OMPC_unknown && DVarTemp.RefExpr)
1729 return DVarTemp;
1730
1731 DVar.CKind = OMPC_shared;
1732 return DVar;
1733 }
1734 }
1735
1736 // Explicitly specified attributes and local variables with predetermined
1737 // attributes.
1738 const_iterator I = begin();
1739 const_iterator EndI = end();
1740 if (FromParent && I != EndI)
1741 ++I;
1742 if (I == EndI)
1743 return DVar;
1744 auto It = I->SharingMap.find(D);
1745 if (It != I->SharingMap.end()) {
1746 const DSAInfo &Data = It->getSecond();
1747 DVar.RefExpr = Data.RefExpr.getPointer();
1748 DVar.PrivateCopy = Data.PrivateCopy;
1749 DVar.CKind = Data.Attributes;
1750 DVar.ImplicitDSALoc = I->DefaultAttrLoc;
1751 DVar.DKind = I->Directive;
1752 DVar.Modifier = Data.Modifier;
1753 DVar.AppliedToPointee = Data.AppliedToPointee;
1754 }
1755
1756 return DVar;
1757}
1758
1759const DSAStackTy::DSAVarData DSAStackTy::getImplicitDSA(ValueDecl *D,
1760 bool FromParent) const {
1761 if (isStackEmpty()) {
1762 const_iterator I;
1763 return getDSA(I, D);
1764 }
1765 D = getCanonicalDecl(D);
1766 const_iterator StartI = begin();
1767 const_iterator EndI = end();
1768 if (FromParent && StartI != EndI)
1769 ++StartI;
1770 return getDSA(StartI, D);
1771}
1772
1773const DSAStackTy::DSAVarData DSAStackTy::getImplicitDSA(ValueDecl *D,
1774 unsigned Level) const {
1775 if (getStackSize() <= Level)
1776 return DSAVarData();
1777 D = getCanonicalDecl(D);
1778 const_iterator StartI = std::next(begin(), getStackSize() - 1 - Level);
1779 return getDSA(StartI, D);
1780}
1781
1782const DSAStackTy::DSAVarData
1783DSAStackTy::hasDSA(ValueDecl *D,
1784 const llvm::function_ref<bool(OpenMPClauseKind, bool)> CPred,
1785 const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
1786 bool FromParent) const {
1787 if (isStackEmpty())
1788 return {};
1789 D = getCanonicalDecl(D);
1790 const_iterator I = begin();
1791 const_iterator EndI = end();
1792 if (FromParent && I != EndI)
1793 ++I;
1794 for (; I != EndI; ++I) {
1795 if (!DPred(I->Directive) &&
1796 !isImplicitOrExplicitTaskingRegion(I->Directive))
1797 continue;
1798 const_iterator NewI = I;
1799 DSAVarData DVar = getDSA(NewI, D);
1800 if (I == NewI && CPred(DVar.CKind, DVar.AppliedToPointee))
1801 return DVar;
1802 }
1803 return {};
1804}
1805
1806const DSAStackTy::DSAVarData DSAStackTy::hasInnermostDSA(
1807 ValueDecl *D, const llvm::function_ref<bool(OpenMPClauseKind, bool)> CPred,
1808 const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
1809 bool FromParent) const {
1810 if (isStackEmpty())
1811 return {};
1812 D = getCanonicalDecl(D);
1813 const_iterator StartI = begin();
1814 const_iterator EndI = end();
1815 if (FromParent && StartI != EndI)
1816 ++StartI;
1817 if (StartI == EndI || !DPred(StartI->Directive))
1818 return {};
1819 const_iterator NewI = StartI;
1820 DSAVarData DVar = getDSA(NewI, D);
1821 return (NewI == StartI && CPred(DVar.CKind, DVar.AppliedToPointee))
1822 ? DVar
1823 : DSAVarData();
1824}
1825
1826bool DSAStackTy::hasExplicitDSA(
1827 const ValueDecl *D,
1828 const llvm::function_ref<bool(OpenMPClauseKind, bool)> CPred,
1829 unsigned Level, bool NotLastprivate) const {
1830 if (getStackSize() <= Level)
1831 return false;
1832 D = getCanonicalDecl(D);
1833 const SharingMapTy &StackElem = getStackElemAtLevel(Level);
1834 auto I = StackElem.SharingMap.find(D);
1835 if (I != StackElem.SharingMap.end() && I->getSecond().RefExpr.getPointer() &&
1836 CPred(I->getSecond().Attributes, I->getSecond().AppliedToPointee) &&
1837 (!NotLastprivate || !I->getSecond().RefExpr.getInt()))
1838 return true;
1839 // Check predetermined rules for the loop control variables.
1840 auto LI = StackElem.LCVMap.find(D);
1841 if (LI != StackElem.LCVMap.end())
1842 return CPred(OMPC_private, /*AppliedToPointee=*/false);
1843 return false;
1844}
1845
1846bool DSAStackTy::hasExplicitDirective(
1847 const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
1848 unsigned Level) const {
1849 if (getStackSize() <= Level)
1850 return false;
1851 const SharingMapTy &StackElem = getStackElemAtLevel(Level);
1852 return DPred(StackElem.Directive);
1853}
1854
1855bool DSAStackTy::hasDirective(
1856 const llvm::function_ref<bool(OpenMPDirectiveKind,
1857 const DeclarationNameInfo &, SourceLocation)>
1858 DPred,
1859 bool FromParent) const {
1860 // We look only in the enclosing region.
1861 size_t Skip = FromParent ? 2 : 1;
1862 for (const_iterator I = begin() + std::min(Skip, getStackSize()), E = end();
1863 I != E; ++I) {
1864 if (DPred(I->Directive, I->DirectiveName, I->ConstructLoc))
1865 return true;
1866 }
1867 return false;
1868}
1869
1870void Sema::InitDataSharingAttributesStack() {
1871 VarDataSharingAttributesStack = new DSAStackTy(*this);
1872}
1873
1874#define DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
static_cast<DSAStackTy *>(VarDataSharingAttributesStack)
1875
1876void Sema::pushOpenMPFunctionRegion() {
1877 DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->pushFunction();
1878}
1879
1880void Sema::popOpenMPFunctionRegion(const FunctionScopeInfo *OldFSI) {
1881 DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->popFunction(OldFSI);
1882}
1883
1884static bool isOpenMPDeviceDelayedContext(Sema &S) {
1885 assert(S.LangOpts.OpenMP && S.LangOpts.OpenMPIsDevice &&((void)0)
1886 "Expected OpenMP device compilation.")((void)0);
1887 return !S.isInOpenMPTargetExecutionDirective();
1888}
1889
1890namespace {
1891/// Status of the function emission on the host/device.
1892enum class FunctionEmissionStatus {
1893 Emitted,
1894 Discarded,
1895 Unknown,
1896};
1897} // anonymous namespace
1898
1899Sema::SemaDiagnosticBuilder Sema::diagIfOpenMPDeviceCode(SourceLocation Loc,
1900 unsigned DiagID,
1901 FunctionDecl *FD) {
1902 assert(LangOpts.OpenMP && LangOpts.OpenMPIsDevice &&((void)0)
1903 "Expected OpenMP device compilation.")((void)0);
1904
1905 SemaDiagnosticBuilder::Kind Kind = SemaDiagnosticBuilder::K_Nop;
1906 if (FD) {
1907 FunctionEmissionStatus FES = getEmissionStatus(FD);
1908 switch (FES) {
1909 case FunctionEmissionStatus::Emitted:
1910 Kind = SemaDiagnosticBuilder::K_Immediate;
1911 break;
1912 case FunctionEmissionStatus::Unknown:
1913 // TODO: We should always delay diagnostics here in case a target
1914 // region is in a function we do not emit. However, as the
1915 // current diagnostics are associated with the function containing
1916 // the target region and we do not emit that one, we would miss out
1917 // on diagnostics for the target region itself. We need to anchor
1918 // the diagnostics with the new generated function *or* ensure we
1919 // emit diagnostics associated with the surrounding function.
1920 Kind = isOpenMPDeviceDelayedContext(*this)
1921 ? SemaDiagnosticBuilder::K_Deferred
1922 : SemaDiagnosticBuilder::K_Immediate;
1923 break;
1924 case FunctionEmissionStatus::TemplateDiscarded:
1925 case FunctionEmissionStatus::OMPDiscarded:
1926 Kind = SemaDiagnosticBuilder::K_Nop;
1927 break;
1928 case FunctionEmissionStatus::CUDADiscarded:
1929 llvm_unreachable("CUDADiscarded unexpected in OpenMP device compilation")__builtin_unreachable();
1930 break;
1931 }
1932 }
1933
1934 return SemaDiagnosticBuilder(Kind, Loc, DiagID, FD, *this);
1935}
1936
1937Sema::SemaDiagnosticBuilder Sema::diagIfOpenMPHostCode(SourceLocation Loc,
1938 unsigned DiagID,
1939 FunctionDecl *FD) {
1940 assert(LangOpts.OpenMP && !LangOpts.OpenMPIsDevice &&((void)0)
1941 "Expected OpenMP host compilation.")((void)0);
1942
1943 SemaDiagnosticBuilder::Kind Kind = SemaDiagnosticBuilder::K_Nop;
1944 if (FD) {
1945 FunctionEmissionStatus FES = getEmissionStatus(FD);
1946 switch (FES) {
1947 case FunctionEmissionStatus::Emitted:
1948 Kind = SemaDiagnosticBuilder::K_Immediate;
1949 break;
1950 case FunctionEmissionStatus::Unknown:
1951 Kind = SemaDiagnosticBuilder::K_Deferred;
1952 break;
1953 case FunctionEmissionStatus::TemplateDiscarded:
1954 case FunctionEmissionStatus::OMPDiscarded:
1955 case FunctionEmissionStatus::CUDADiscarded:
1956 Kind = SemaDiagnosticBuilder::K_Nop;
1957 break;
1958 }
1959 }
1960
1961 return SemaDiagnosticBuilder(Kind, Loc, DiagID, FD, *this);
1962}
1963
1964static OpenMPDefaultmapClauseKind
1965getVariableCategoryFromDecl(const LangOptions &LO, const ValueDecl *VD) {
1966 if (LO.OpenMP <= 45) {
1967 if (VD->getType().getNonReferenceType()->isScalarType())
1968 return OMPC_DEFAULTMAP_scalar;
1969 return OMPC_DEFAULTMAP_aggregate;
1970 }
1971 if (VD->getType().getNonReferenceType()->isAnyPointerType())
1972 return OMPC_DEFAULTMAP_pointer;
1973 if (VD->getType().getNonReferenceType()->isScalarType())
1974 return OMPC_DEFAULTMAP_scalar;
1975 return OMPC_DEFAULTMAP_aggregate;
1976}
1977
1978bool Sema::isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level,
1979 unsigned OpenMPCaptureLevel) const {
1980 assert(LangOpts.OpenMP && "OpenMP is not allowed")((void)0);
1981
1982 ASTContext &Ctx = getASTContext();
1983 bool IsByRef = true;
1984
1985 // Find the directive that is associated with the provided scope.
1986 D = cast<ValueDecl>(D->getCanonicalDecl());
1987 QualType Ty = D->getType();
1988
1989 bool IsVariableUsedInMapClause = false;
1990 if (DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->hasExplicitDirective(isOpenMPTargetExecutionDirective, Level)) {
1991 // This table summarizes how a given variable should be passed to the device
1992 // given its type and the clauses where it appears. This table is based on
1993 // the description in OpenMP 4.5 [2.10.4, target Construct] and
1994 // OpenMP 4.5 [2.15.5, Data-mapping Attribute Rules and Clauses].
1995 //
1996 // =========================================================================
1997 // | type | defaultmap | pvt | first | is_device_ptr | map | res. |
1998 // | |(tofrom:scalar)| | pvt | | | |
1999 // =========================================================================
2000 // | scl | | | | - | | bycopy|
2001 // | scl | | - | x | - | - | bycopy|
2002 // | scl | | x | - | - | - | null |
2003 // | scl | x | | | - | | byref |
2004 // | scl | x | - | x | - | - | bycopy|
2005 // | scl | x | x | - | - | - | null |
2006 // | scl | | - | - | - | x | byref |
2007 // | scl | x | - | - | - | x | byref |
2008 //
2009 // | agg | n.a. | | | - | | byref |
2010 // | agg | n.a. | - | x | - | - | byref |
2011 // | agg | n.a. | x | - | - | - | null |
2012 // | agg | n.a. | - | - | - | x | byref |
2013 // | agg | n.a. | - | - | - | x[] | byref |
2014 //
2015 // | ptr | n.a. | | | - | | bycopy|
2016 // | ptr | n.a. | - | x | - | - | bycopy|
2017 // | ptr | n.a. | x | - | - | - | null |
2018 // | ptr | n.a. | - | - | - | x | byref |
2019 // | ptr | n.a. | - | - | - | x[] | bycopy|
2020 // | ptr | n.a. | - | - | x | | bycopy|
2021 // | ptr | n.a. | - | - | x | x | bycopy|
2022 // | ptr | n.a. | - | - | x | x[] | bycopy|
2023 // =========================================================================
2024 // Legend:
2025 // scl - scalar
2026 // ptr - pointer
2027 // agg - aggregate
2028 // x - applies
2029 // - - invalid in this combination
2030 // [] - mapped with an array section
2031 // byref - should be mapped by reference
2032 // byval - should be mapped by value
2033 // null - initialize a local variable to null on the device
2034 //
2035 // Observations:
2036 // - All scalar declarations that show up in a map clause have to be passed
2037 // by reference, because they may have been mapped in the enclosing data
2038 // environment.
2039 // - If the scalar value does not fit the size of uintptr, it has to be
2040 // passed by reference, regardless the result in the table above.
2041 // - For pointers mapped by value that have either an implicit map or an
2042 // array section, the runtime library may pass the NULL value to the
2043 // device instead of the value passed to it by the compiler.
2044
2045 if (Ty->isReferenceType())
2046 Ty = Ty->castAs<ReferenceType>()->getPointeeType();
2047
2048 // Locate map clauses and see if the variable being captured is referred to
2049 // in any of those clauses. Here we only care about variables, not fields,
2050 // because fields are part of aggregates.
2051 bool IsVariableAssociatedWithSection = false;
2052
2053 DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->checkMappableExprComponentListsForDeclAtLevel(
2054 D, Level,
2055 [&IsVariableUsedInMapClause, &IsVariableAssociatedWithSection, D](
2056 OMPClauseMappableExprCommon::MappableExprComponentListRef
2057 MapExprComponents,
2058 OpenMPClauseKind WhereFoundClauseKind) {
2059 // Only the map clause information influences how a variable is
2060 // captured. E.g. is_device_ptr does not require changing the default
2061 // behavior.
2062 if (WhereFoundClauseKind != OMPC_map)
2063 return false;
2064
2065 auto EI = MapExprComponents.rbegin();
2066 auto EE = MapExprComponents.rend();
2067
2068 assert(EI != EE && "Invalid map expression!")((void)0);
2069
2070 if (isa<DeclRefExpr>(EI->getAssociatedExpression()))
2071 IsVariableUsedInMapClause |= EI->getAssociatedDeclaration() == D;
2072
2073 ++EI;
2074 if (EI == EE)
2075 return false;
2076
2077 if (isa<ArraySubscriptExpr>(EI->getAssociatedExpression()) ||
2078 isa<OMPArraySectionExpr>(EI->getAssociatedExpression()) ||
2079 isa<MemberExpr>(EI->getAssociatedExpression()) ||
2080 isa<OMPArrayShapingExpr>(EI->getAssociatedExpression())) {
2081 IsVariableAssociatedWithSection = true;
2082 // There is nothing more we need to know about this variable.
2083 return true;
2084 }
2085
2086 // Keep looking for more map info.
2087 return false;
2088 });
2089
2090 if (IsVariableUsedInMapClause) {
2091 // If variable is identified in a map clause it is always captured by
2092 // reference except if it is a pointer that is dereferenced somehow.
2093 IsByRef = !(Ty->isPointerType() && IsVariableAssociatedWithSection);
2094 } else {
2095 // By default, all the data that has a scalar type is mapped by copy
2096 // (except for reduction variables).
2097 // Defaultmap scalar is mutual exclusive to defaultmap pointer
2098 IsByRef = (DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->isForceCaptureByReferenceInTargetExecutable() &&
2099 !Ty->isAnyPointerType()) ||
2100 !Ty->isScalarType() ||
2101 DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->isDefaultmapCapturedByRef(
2102 Level, getVariableCategoryFromDecl(LangOpts, D)) ||
2103 DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->hasExplicitDSA(
2104 D,
2105 [](OpenMPClauseKind K, bool AppliedToPointee) {
2106 return K == OMPC_reduction && !AppliedToPointee;
2107 },
2108 Level);
2109 }
2110 }
2111
2112 if (IsByRef && Ty.getNonReferenceType()->isScalarType()) {
2113 IsByRef =
2114 ((IsVariableUsedInMapClause &&
2115 DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->getCaptureRegion(Level, OpenMPCaptureLevel) ==
2116 OMPD_target) ||
2117 !(DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->hasExplicitDSA(
2118 D,
2119 [](OpenMPClauseKind K, bool AppliedToPointee) -> bool {
2120 return K == OMPC_firstprivate ||
2121 (K == OMPC_reduction && AppliedToPointee);
2122 },
2123 Level, /*NotLastprivate=*/true) ||
2124 DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->isUsesAllocatorsDecl(Level, D))) &&
2125 // If the variable is artificial and must be captured by value - try to
2126 // capture by value.
2127 !(isa<OMPCapturedExprDecl>(D) && !D->hasAttr<OMPCaptureNoInitAttr>() &&
2128 !cast<OMPCapturedExprDecl>(D)->getInit()->isGLValue()) &&
2129 // If the variable is implicitly firstprivate and scalar - capture by
2130 // copy
2131 !(DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->getDefaultDSA() == DSA_firstprivate &&
2132 !DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->hasExplicitDSA(
2133 D, [](OpenMPClauseKind K, bool) { return K != OMPC_unknown; },
2134 Level) &&
2135 !DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->isLoopControlVariable(D, Level).first);
2136 }
2137
2138 // When passing data by copy, we need to make sure it fits the uintptr size
2139 // and alignment, because the runtime library only deals with uintptr types.
2140 // If it does not fit the uintptr size, we need to pass the data by reference
2141 // instead.
2142 if (!IsByRef &&
2143 (Ctx.getTypeSizeInChars(Ty) >
2144 Ctx.getTypeSizeInChars(Ctx.getUIntPtrType()) ||
2145 Ctx.getDeclAlign(D) > Ctx.getTypeAlignInChars(Ctx.getUIntPtrType()))) {
2146 IsByRef = true;
2147 }
2148
2149 return IsByRef;
2150}
2151
2152unsigned Sema::getOpenMPNestingLevel() const {
2153 assert(getLangOpts().OpenMP)((void)0);
2154 return DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->getNestingLevel();
2155}
2156
2157bool Sema::isInOpenMPTargetExecutionDirective() const {
2158 return (isOpenMPTargetExecutionDirective(DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->getCurrentDirective()) &&
2159 !DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->isClauseParsingMode()) ||
2160 DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->hasDirective(
2161 [](OpenMPDirectiveKind K, const DeclarationNameInfo &,
2162 SourceLocation) -> bool {
2163 return isOpenMPTargetExecutionDirective(K);
2164 },
2165 false);
2166}
2167
2168VarDecl *Sema::isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo,
2169 unsigned StopAt) {
2170 assert(LangOpts.OpenMP && "OpenMP is not allowed")((void)0);
2171 D = getCanonicalDecl(D);
2172
2173 auto *VD = dyn_cast<VarDecl>(D);
2174 // Do not capture constexpr variables.
2175 if (VD && VD->isConstexpr())
2176 return nullptr;
2177
2178 // If we want to determine whether the variable should be captured from the
2179 // perspective of the current capturing scope, and we've already left all the
2180 // capturing scopes of the top directive on the stack, check from the
2181 // perspective of its parent directive (if any) instead.
2182 DSAStackTy::ParentDirectiveScope InParentDirectiveRAII(
2183 *DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
, CheckScopeInfo && DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->isBodyComplete());
2184
2185 // If we are attempting to capture a global variable in a directive with
2186 // 'target' we return true so that this global is also mapped to the device.
2187 //
2188 if (VD && !VD->hasLocalStorage() &&
2189 (getCurCapturedRegion() || getCurBlock() || getCurLambda())) {
2190 if (isInOpenMPTargetExecutionDirective()) {
2191 DSAStackTy::DSAVarData DVarTop =
2192 DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->getTopDSA(D, DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->isClauseParsingMode());
2193 if (DVarTop.CKind != OMPC_unknown && DVarTop.RefExpr)
2194 return VD;
2195 // If the declaration is enclosed in a 'declare target' directive,
2196 // then it should not be captured.
2197 //
2198 if (OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
2199 return nullptr;
2200 CapturedRegionScopeInfo *CSI = nullptr;
2201 for (FunctionScopeInfo *FSI : llvm::drop_begin(
2202 llvm::reverse(FunctionScopes),
2203 CheckScopeInfo ? (FunctionScopes.size() - (StopAt + 1)) : 0)) {
2204 if (!isa<CapturingScopeInfo>(FSI))
2205 return nullptr;
2206 if (auto *RSI = dyn_cast<CapturedRegionScopeInfo>(FSI))
2207 if (RSI->CapRegionKind == CR_OpenMP) {
2208 CSI = RSI;
2209 break;
2210 }
2211 }
2212 assert(CSI && "Failed to find CapturedRegionScopeInfo")((void)0);
2213 SmallVector<OpenMPDirectiveKind, 4> Regions;
2214 getOpenMPCaptureRegions(Regions,
2215 DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->getDirective(CSI->OpenMPLevel));
2216 if (Regions[CSI->OpenMPCaptureLevel] != OMPD_task)
2217 return VD;
2218 }
2219 if (isInOpenMPDeclareTargetContext()) {
2220 // Try to mark variable as declare target if it is used in capturing
2221 // regions.
2222 if (LangOpts.OpenMP <= 45 &&
2223 !OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
2224 checkDeclIsAllowedInOpenMPTarget(nullptr, VD);
2225 return nullptr;
2226 }
2227 }
2228
2229 if (CheckScopeInfo) {
2230 bool OpenMPFound = false;
2231 for (unsigned I = StopAt + 1; I > 0; --I) {
2232 FunctionScopeInfo *FSI = FunctionScopes[I - 1];
2233 if(!isa<CapturingScopeInfo>(FSI))
2234 return nullptr;
2235 if (auto *RSI = dyn_cast<CapturedRegionScopeInfo>(FSI))
2236 if (RSI->CapRegionKind == CR_OpenMP) {
2237 OpenMPFound = true;
2238 break;
2239 }
2240 }
2241 if (!OpenMPFound)
2242 return nullptr;
2243 }
2244
2245 if (DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->getCurrentDirective() != OMPD_unknown &&
2246 (!DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->isClauseParsingMode() ||
2247 DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->getParentDirective() != OMPD_unknown)) {
2248 auto &&Info = DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->isLoopControlVariable(D);
2249 if (Info.first ||
2250 (VD && VD->hasLocalStorage() &&
2251 isImplicitOrExplicitTaskingRegion(DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->getCurrentDirective())) ||
2252 (VD && DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->isForceVarCapturing()))
2253 return VD ? VD : Info.second;
2254 DSAStackTy::DSAVarData DVarTop =
2255 DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->getTopDSA(D, DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->isClauseParsingMode());
2256 if (DVarTop.CKind != OMPC_unknown && isOpenMPPrivate(DVarTop.CKind) &&
2257 (!VD || VD->hasLocalStorage() || !DVarTop.AppliedToPointee))
2258 return VD ? VD : cast<VarDecl>(DVarTop.PrivateCopy->getDecl());
2259 // Threadprivate variables must not be captured.
2260 if (isOpenMPThreadPrivate(DVarTop.CKind))
2261 return nullptr;
2262 // The variable is not private or it is the variable in the directive with
2263 // default(none) clause and not used in any clause.
2264 DSAStackTy::DSAVarData DVarPrivate = DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->hasDSA(
2265 D,
2266 [](OpenMPClauseKind C, bool AppliedToPointee) {
2267 return isOpenMPPrivate(C) && !AppliedToPointee;
2268 },
2269 [](OpenMPDirectiveKind) { return true; },
2270 DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->isClauseParsingMode());
2271 // Global shared must not be captured.
2272 if (VD && !VD->hasLocalStorage() && DVarPrivate.CKind == OMPC_unknown &&
2273 ((DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->getDefaultDSA() != DSA_none &&
2274 DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->getDefaultDSA() != DSA_firstprivate) ||
2275 DVarTop.CKind == OMPC_shared))
2276 return nullptr;
2277 if (DVarPrivate.CKind != OMPC_unknown ||
2278 (VD && (DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->getDefaultDSA() == DSA_none ||
2279 DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->getDefaultDSA() == DSA_firstprivate)))
2280 return VD ? VD : cast<VarDecl>(DVarPrivate.PrivateCopy->getDecl());
2281 }
2282 return nullptr;
2283}
2284
2285void Sema::adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
2286 unsigned Level) const {
2287 FunctionScopesIndex -= getOpenMPCaptureLevels(DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->getDirective(Level));
2288}
2289
2290void Sema::startOpenMPLoop() {
2291 assert(LangOpts.OpenMP && "OpenMP must be enabled.")((void)0);
2292 if (isOpenMPLoopDirective(DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->getCurrentDirective()))
2293 DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->loopInit();
2294}
2295
2296void Sema::startOpenMPCXXRangeFor() {
2297 assert(LangOpts.OpenMP && "OpenMP must be enabled.")((void)0);
2298 if (isOpenMPLoopDirective(DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->getCurrentDirective())) {
2299 DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->resetPossibleLoopCounter();
2300 DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->loopStart();
2301 }
2302}
2303
2304OpenMPClauseKind Sema::isOpenMPPrivateDecl(ValueDecl *D, unsigned Level,
2305 unsigned CapLevel) const {
2306 assert(LangOpts.OpenMP && "OpenMP is not allowed")((void)0);
2307 if (DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->hasExplicitDirective(
2308 [](OpenMPDirectiveKind K) { return isOpenMPTaskingDirective(K); },
2309 Level)) {
2310 bool IsTriviallyCopyable =
2311 D->getType().getNonReferenceType().isTriviallyCopyableType(Context) &&
2312 !D->getType()
2313 .getNonReferenceType()
2314 .getCanonicalType()
2315 ->getAsCXXRecordDecl();
2316 OpenMPDirectiveKind DKind = DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->getDirective(Level);
2317 SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
2318 getOpenMPCaptureRegions(CaptureRegions, DKind);
2319 if (isOpenMPTaskingDirective(CaptureRegions[CapLevel]) &&
2320 (IsTriviallyCopyable ||
2321 !isOpenMPTaskLoopDirective(CaptureRegions[CapLevel]))) {
2322 if (DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->hasExplicitDSA(
2323 D,
2324 [](OpenMPClauseKind K, bool) { return K == OMPC_firstprivate; },
2325 Level, /*NotLastprivate=*/true))
2326 return OMPC_firstprivate;
2327 DSAStackTy::DSAVarData DVar = DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->getImplicitDSA(D, Level);
2328 if (DVar.CKind != OMPC_shared &&
2329 !DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->isLoopControlVariable(D, Level).first && !DVar.RefExpr) {
2330 DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->addImplicitTaskFirstprivate(Level, D);
2331 return OMPC_firstprivate;
2332 }
2333 }
2334 }
2335 if (isOpenMPLoopDirective(DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->getCurrentDirective())) {
2336 if (DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->getAssociatedLoops() > 0 &&
2337 !DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->isLoopStarted()) {
2338 DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->resetPossibleLoopCounter(D);
2339 DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->loopStart();
2340 return OMPC_private;
2341 }
2342 if ((DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->getPossiblyLoopCunter() == D->getCanonicalDecl() ||
2343 DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->isLoopControlVariable(D).first) &&
2344 !DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->hasExplicitDSA(
2345 D, [](OpenMPClauseKind K, bool) { return K != OMPC_private; },
2346 Level) &&
2347 !isOpenMPSimdDirective(DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->getCurrentDirective()))
2348 return OMPC_private;
2349 }
2350 if (const auto *VD = dyn_cast<VarDecl>(D)) {
2351 if (DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->isThreadPrivate(const_cast<VarDecl *>(VD)) &&
2352 DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->isForceVarCapturing() &&
2353 !DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->hasExplicitDSA(
2354 D, [](OpenMPClauseKind K, bool) { return K == OMPC_copyin; },
2355 Level))
2356 return OMPC_private;
2357 }
2358 // User-defined allocators are private since they must be defined in the
2359 // context of target region.
2360 if (DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->hasExplicitDirective(isOpenMPTargetExecutionDirective, Level) &&
2361 DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->isUsesAllocatorsDecl(Level, D).getValueOr(
2362 DSAStackTy::UsesAllocatorsDeclKind::AllocatorTrait) ==
2363 DSAStackTy::UsesAllocatorsDeclKind::UserDefinedAllocator)
2364 return OMPC_private;
2365 return (DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->hasExplicitDSA(
2366 D, [](OpenMPClauseKind K, bool) { return K == OMPC_private; },
2367 Level) ||
2368 (DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->isClauseParsingMode() &&
2369 DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->getClauseParsingMode() == OMPC_private) ||
2370 // Consider taskgroup reduction descriptor variable a private
2371 // to avoid possible capture in the region.
2372 (DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->hasExplicitDirective(
2373 [](OpenMPDirectiveKind K) {
2374 return K == OMPD_taskgroup ||
2375 ((isOpenMPParallelDirective(K) ||
2376 isOpenMPWorksharingDirective(K)) &&
2377 !isOpenMPSimdDirective(K));
2378 },
2379 Level) &&
2380 DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->isTaskgroupReductionRef(D, Level)))
2381 ? OMPC_private
2382 : OMPC_unknown;
2383}
2384
2385void Sema::setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D,
2386 unsigned Level) {
2387 assert(LangOpts.OpenMP && "OpenMP is not allowed")((void)0);
2388 D = getCanonicalDecl(D);
2389 OpenMPClauseKind OMPC = OMPC_unknown;
2390 for (unsigned I = DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->getNestingLevel() + 1; I > Level; --I) {
2391 const unsigned NewLevel = I - 1;
2392 if (DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->hasExplicitDSA(
2393 D,
2394 [&OMPC](const OpenMPClauseKind K, bool AppliedToPointee) {
2395 if (isOpenMPPrivate(K) && !AppliedToPointee) {
2396 OMPC = K;
2397 return true;
2398 }
2399 return false;
2400 },
2401 NewLevel))
2402 break;
2403 if (DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->checkMappableExprComponentListsForDeclAtLevel(
2404 D, NewLevel,
2405 [](OMPClauseMappableExprCommon::MappableExprComponentListRef,
2406 OpenMPClauseKind) { return true; })) {
2407 OMPC = OMPC_map;
2408 break;
2409 }
2410 if (DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->hasExplicitDirective(isOpenMPTargetExecutionDirective,
2411 NewLevel)) {
2412 OMPC = OMPC_map;
2413 if (DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->mustBeFirstprivateAtLevel(
2414 NewLevel, getVariableCategoryFromDecl(LangOpts, D)))
2415 OMPC = OMPC_firstprivate;
2416 break;
2417 }
2418 }
2419 if (OMPC != OMPC_unknown)
2420 FD->addAttr(OMPCaptureKindAttr::CreateImplicit(Context, unsigned(OMPC)));
2421}
2422
2423bool Sema::isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level,
2424 unsigned CaptureLevel) const {
2425 assert(LangOpts.OpenMP && "OpenMP is not allowed")((void)0);
2426 // Return true if the current level is no longer enclosed in a target region.
2427
2428 SmallVector<OpenMPDirectiveKind, 4> Regions;
2429 getOpenMPCaptureRegions(Regions, DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->getDirective(Level));
2430 const auto *VD = dyn_cast<VarDecl>(D);
2431 return VD && !VD->hasLocalStorage() &&
2432 DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->hasExplicitDirective(isOpenMPTargetExecutionDirective,
2433 Level) &&
2434 Regions[CaptureLevel] != OMPD_task;
2435}
2436
2437bool Sema::isOpenMPGlobalCapturedDecl(ValueDecl *D, unsigned Level,
2438 unsigned CaptureLevel) const {
2439 assert(LangOpts.OpenMP && "OpenMP is not allowed")((void)0);
2440 // Return true if the current level is no longer enclosed in a target region.
2441
2442 if (const auto *VD = dyn_cast<VarDecl>(D)) {
2443 if (!VD->hasLocalStorage()) {
2444 if (isInOpenMPTargetExecutionDirective())
2445 return true;
2446 DSAStackTy::DSAVarData TopDVar =
2447 DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->getTopDSA(D, /*FromParent=*/false);
2448 unsigned NumLevels =
2449 getOpenMPCaptureLevels(DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->getDirective(Level));
2450 if (Level == 0)
2451 return (NumLevels == CaptureLevel + 1) && TopDVar.CKind != OMPC_shared;
2452 do {
2453 --Level;
2454 DSAStackTy::DSAVarData DVar = DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->getImplicitDSA(D, Level);
2455 if (DVar.CKind != OMPC_shared)
2456 return true;
2457 } while (Level > 0);
2458 }
2459 }
2460 return true;
2461}
2462
2463void Sema::DestroyDataSharingAttributesStack() { delete DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
; }
2464
2465void Sema::ActOnOpenMPBeginDeclareVariant(SourceLocation Loc,
2466 OMPTraitInfo &TI) {
2467 OMPDeclareVariantScopes.push_back(OMPDeclareVariantScope(TI));
2468}
2469
2470void Sema::ActOnOpenMPEndDeclareVariant() {
2471 assert(isInOpenMPDeclareVariantScope() &&((void)0)
2472 "Not in OpenMP declare variant scope!")((void)0);
2473
2474 OMPDeclareVariantScopes.pop_back();
2475}
2476
2477void Sema::finalizeOpenMPDelayedAnalysis(const FunctionDecl *Caller,
2478 const FunctionDecl *Callee,
2479 SourceLocation Loc) {
2480 assert(LangOpts.OpenMP && "Expected OpenMP compilation mode.")((void)0);
2481 Optional<OMPDeclareTargetDeclAttr::DevTypeTy> DevTy =
2482 OMPDeclareTargetDeclAttr::getDeviceType(Caller->getMostRecentDecl());
2483 // Ignore host functions during device analyzis.
2484 if (LangOpts.OpenMPIsDevice &&
2485 (!DevTy || *DevTy == OMPDeclareTargetDeclAttr::DT_Host))
2486 return;
2487 // Ignore nohost functions during host analyzis.
2488 if (!LangOpts.OpenMPIsDevice && DevTy &&
2489 *DevTy == OMPDeclareTargetDeclAttr::DT_NoHost)
2490 return;
2491 const FunctionDecl *FD = Callee->getMostRecentDecl();
2492 DevTy = OMPDeclareTargetDeclAttr::getDeviceType(FD);
2493 if (LangOpts.OpenMPIsDevice && DevTy &&
2494 *DevTy == OMPDeclareTargetDeclAttr::DT_Host) {
2495 // Diagnose host function called during device codegen.
2496 StringRef HostDevTy =
2497 getOpenMPSimpleClauseTypeName(OMPC_device_type, OMPC_DEVICE_TYPE_host);
2498 Diag(Loc, diag::err_omp_wrong_device_function_call) << HostDevTy << 0;
2499 Diag(*OMPDeclareTargetDeclAttr::getLocation(FD),
2500 diag::note_omp_marked_device_type_here)
2501 << HostDevTy;
2502 return;
2503 }
2504 if (!LangOpts.OpenMPIsDevice && DevTy &&
2505 *DevTy == OMPDeclareTargetDeclAttr::DT_NoHost) {
2506 // Diagnose nohost function called during host codegen.
2507 StringRef NoHostDevTy = getOpenMPSimpleClauseTypeName(
2508 OMPC_device_type, OMPC_DEVICE_TYPE_nohost);
2509 Diag(Loc, diag::err_omp_wrong_device_function_call) << NoHostDevTy << 1;
2510 Diag(*OMPDeclareTargetDeclAttr::getLocation(FD),
2511 diag::note_omp_marked_device_type_here)
2512 << NoHostDevTy;
2513 }
2514}
2515
2516void Sema::StartOpenMPDSABlock(OpenMPDirectiveKind DKind,
2517 const DeclarationNameInfo &DirName,
2518 Scope *CurScope, SourceLocation Loc) {
2519 DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->push(DKind, DirName, CurScope, Loc);
2520 PushExpressionEvaluationContext(
2521 ExpressionEvaluationContext::PotentiallyEvaluated);
2522}
2523
2524void Sema::StartOpenMPClause(OpenMPClauseKind K) {
2525 DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->setClauseParsingMode(K);
2526}
2527
2528void Sema::EndOpenMPClause() {
2529 DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->setClauseParsingMode(/*K=*/OMPC_unknown);
2530 CleanupVarDeclMarking();
2531}
2532
2533static std::pair<ValueDecl *, bool>
2534getPrivateItem(Sema &S, Expr *&RefExpr, SourceLocation &ELoc,
2535 SourceRange &ERange, bool AllowArraySection = false);
2536
2537/// Check consistency of the reduction clauses.
2538static void checkReductionClauses(Sema &S, DSAStackTy *Stack,
2539 ArrayRef<OMPClause *> Clauses) {
2540 bool InscanFound = false;
2541 SourceLocation InscanLoc;
2542 // OpenMP 5.0, 2.19.5.4 reduction Clause, Restrictions.
2543 // A reduction clause without the inscan reduction-modifier may not appear on
2544 // a construct on which a reduction clause with the inscan reduction-modifier
2545 // appears.
2546 for (OMPClause *C : Clauses) {
2547 if (C->getClauseKind() != OMPC_reduction)
2548 continue;
2549 auto *RC = cast<OMPReductionClause>(C);
2550 if (RC->getModifier() == OMPC_REDUCTION_inscan) {
2551 InscanFound = true;
2552 InscanLoc = RC->getModifierLoc();
2553 continue;
2554 }
2555 if (RC->getModifier() == OMPC_REDUCTION_task) {
2556 // OpenMP 5.0, 2.19.5.4 reduction Clause.
2557 // A reduction clause with the task reduction-modifier may only appear on
2558 // a parallel construct, a worksharing construct or a combined or
2559 // composite construct for which any of the aforementioned constructs is a
2560 // constituent construct and simd or loop are not constituent constructs.
2561 OpenMPDirectiveKind CurDir = Stack->getCurrentDirective();
2562 if (!(isOpenMPParallelDirective(CurDir) ||
2563 isOpenMPWorksharingDirective(CurDir)) ||
2564 isOpenMPSimdDirective(CurDir))
2565 S.Diag(RC->getModifierLoc(),
2566 diag::err_omp_reduction_task_not_parallel_or_worksharing);
2567 continue;
2568 }
2569 }
2570 if (InscanFound) {
2571 for (OMPClause *C : Clauses) {
2572 if (C->getClauseKind() != OMPC_reduction)
2573 continue;
2574 auto *RC = cast<OMPReductionClause>(C);
2575 if (RC->getModifier() != OMPC_REDUCTION_inscan) {
2576 S.Diag(RC->getModifier() == OMPC_REDUCTION_unknown
2577 ? RC->getBeginLoc()
2578 : RC->getModifierLoc(),
2579 diag::err_omp_inscan_reduction_expected);
2580 S.Diag(InscanLoc, diag::note_omp_previous_inscan_reduction);
2581 continue;
2582 }
2583 for (Expr *Ref : RC->varlists()) {
2584 assert(Ref && "NULL expr in OpenMP nontemporal clause.")((void)0);
2585 SourceLocation ELoc;
2586 SourceRange ERange;
2587 Expr *SimpleRefExpr = Ref;
2588 auto Res = getPrivateItem(S, SimpleRefExpr, ELoc, ERange,
2589 /*AllowArraySection=*/true);
2590 ValueDecl *D = Res.first;
2591 if (!D)
2592 continue;
2593 if (!Stack->isUsedInScanDirective(getCanonicalDecl(D))) {
2594 S.Diag(Ref->getExprLoc(),
2595 diag::err_omp_reduction_not_inclusive_exclusive)
2596 << Ref->getSourceRange();
2597 }
2598 }
2599 }
2600 }
2601}
2602
2603static void checkAllocateClauses(Sema &S, DSAStackTy *Stack,
2604 ArrayRef<OMPClause *> Clauses);
2605static DeclRefExpr *buildCapture(Sema &S, ValueDecl *D, Expr *CaptureExpr,
2606 bool WithInit);
2607
2608static void reportOriginalDsa(Sema &SemaRef, const DSAStackTy *Stack,
2609 const ValueDecl *D,
2610 const DSAStackTy::DSAVarData &DVar,
2611 bool IsLoopIterVar = false);
2612
2613void Sema::EndOpenMPDSABlock(Stmt *CurDirective) {
2614 // OpenMP [2.14.3.5, Restrictions, C/C++, p.1]
2615 // A variable of class type (or array thereof) that appears in a lastprivate
2616 // clause requires an accessible, unambiguous default constructor for the
2617 // class type, unless the list item is also specified in a firstprivate
2618 // clause.
2619 if (const auto *D = dyn_cast_or_null<OMPExecutableDirective>(CurDirective)) {
2620 for (OMPClause *C : D->clauses()) {
2621 if (auto *Clause = dyn_cast<OMPLastprivateClause>(C)) {
2622 SmallVector<Expr *, 8> PrivateCopies;
2623 for (Expr *DE : Clause->varlists()) {
2624 if (DE->isValueDependent() || DE->isTypeDependent()) {
2625 PrivateCopies.push_back(nullptr);
2626 continue;
2627 }
2628 auto *DRE = cast<DeclRefExpr>(DE->IgnoreParens());
2629 auto *VD = cast<VarDecl>(DRE->getDecl());
2630 QualType Type = VD->getType().getNonReferenceType();
2631 const DSAStackTy::DSAVarData DVar =
2632 DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->getTopDSA(VD, /*FromParent=*/false);
2633 if (DVar.CKind == OMPC_lastprivate) {
2634 // Generate helper private variable and initialize it with the
2635 // default value. The address of the original variable is replaced
2636 // by the address of the new private variable in CodeGen. This new
2637 // variable is not added to IdResolver, so the code in the OpenMP
2638 // region uses original variable for proper diagnostics.
2639 VarDecl *VDPrivate = buildVarDecl(
2640 *this, DE->getExprLoc(), Type.getUnqualifiedType(),
2641 VD->getName(), VD->hasAttrs() ? &VD->getAttrs() : nullptr, DRE);
2642 ActOnUninitializedDecl(VDPrivate);
2643 if (VDPrivate->isInvalidDecl()) {
2644 PrivateCopies.push_back(nullptr);
2645 continue;
2646 }
2647 PrivateCopies.push_back(buildDeclRefExpr(
2648 *this, VDPrivate, DE->getType(), DE->getExprLoc()));
2649 } else {
2650 // The variable is also a firstprivate, so initialization sequence
2651 // for private copy is generated already.
2652 PrivateCopies.push_back(nullptr);
2653 }
2654 }
2655 Clause->setPrivateCopies(PrivateCopies);
2656 continue;
2657 }
2658 // Finalize nontemporal clause by handling private copies, if any.
2659 if (auto *Clause = dyn_cast<OMPNontemporalClause>(C)) {
2660 SmallVector<Expr *, 8> PrivateRefs;
2661 for (Expr *RefExpr : Clause->varlists()) {
2662 assert(RefExpr && "NULL expr in OpenMP nontemporal clause.")((void)0);
2663 SourceLocation ELoc;
2664 SourceRange ERange;
2665 Expr *SimpleRefExpr = RefExpr;
2666 auto Res = getPrivateItem(*this, SimpleRefExpr, ELoc, ERange);
2667 if (Res.second)
2668 // It will be analyzed later.
2669 PrivateRefs.push_back(RefExpr);
2670 ValueDecl *D = Res.first;
2671 if (!D)
2672 continue;
2673
2674 const DSAStackTy::DSAVarData DVar =
2675 DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->getTopDSA(D, /*FromParent=*/false);
2676 PrivateRefs.push_back(DVar.PrivateCopy ? DVar.PrivateCopy
2677 : SimpleRefExpr);
2678 }
2679 Clause->setPrivateRefs(PrivateRefs);
2680 continue;
2681 }
2682 if (auto *Clause = dyn_cast<OMPUsesAllocatorsClause>(C)) {
2683 for (unsigned I = 0, E = Clause->getNumberOfAllocators(); I < E; ++I) {
2684 OMPUsesAllocatorsClause::Data D = Clause->getAllocatorData(I);
2685 auto *DRE = dyn_cast<DeclRefExpr>(D.Allocator->IgnoreParenImpCasts());
2686 if (!DRE)
2687 continue;
2688 ValueDecl *VD = DRE->getDecl();
2689 if (!VD || !isa<VarDecl>(VD))
2690 continue;
2691 DSAStackTy::DSAVarData DVar =
2692 DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->getTopDSA(VD, /*FromParent=*/false);
2693 // OpenMP [2.12.5, target Construct]
2694 // Memory allocators that appear in a uses_allocators clause cannot
2695 // appear in other data-sharing attribute clauses or data-mapping
2696 // attribute clauses in the same construct.
2697 Expr *MapExpr = nullptr;
2698 if (DVar.RefExpr ||
2699 DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->checkMappableExprComponentListsForDecl(
2700 VD, /*CurrentRegionOnly=*/true,
2701 [VD, &MapExpr](
2702 OMPClauseMappableExprCommon::MappableExprComponentListRef
2703 MapExprComponents,
2704 OpenMPClauseKind C) {
2705 auto MI = MapExprComponents.rbegin();
2706 auto ME = MapExprComponents.rend();
2707 if (MI != ME &&
2708 MI->getAssociatedDeclaration()->getCanonicalDecl() ==
2709 VD->getCanonicalDecl()) {
2710 MapExpr = MI->getAssociatedExpression();
2711 return true;
2712 }
2713 return false;
2714 })) {
2715 Diag(D.Allocator->getExprLoc(),
2716 diag::err_omp_allocator_used_in_clauses)
2717 << D.Allocator->getSourceRange();
2718 if (DVar.RefExpr)
2719 reportOriginalDsa(*this, DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
, VD, DVar);
2720 else
2721 Diag(MapExpr->getExprLoc(), diag::note_used_here)
2722 << MapExpr->getSourceRange();
2723 }
2724 }
2725 continue;
2726 }
2727 }
2728 // Check allocate clauses.
2729 if (!CurContext->isDependentContext())
2730 checkAllocateClauses(*this, DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
, D->clauses());
2731 checkReductionClauses(*this, DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
, D->clauses());
2732 }
2733
2734 DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->pop();
2735 DiscardCleanupsInEvaluationContext();
2736 PopExpressionEvaluationContext();
2737}
2738
2739static bool FinishOpenMPLinearClause(OMPLinearClause &Clause, DeclRefExpr *IV,
2740 Expr *NumIterations, Sema &SemaRef,
2741 Scope *S, DSAStackTy *Stack);
2742
2743namespace {
2744
2745class VarDeclFilterCCC final : public CorrectionCandidateCallback {
2746private:
2747 Sema &SemaRef;
2748
2749public:
2750 explicit VarDeclFilterCCC(Sema &S) : SemaRef(S) {}
2751 bool ValidateCandidate(const TypoCorrection &Candidate) override {
2752 NamedDecl *ND = Candidate.getCorrectionDecl();
2753 if (const auto *VD = dyn_cast_or_null<VarDecl>(ND)) {
2754 return VD->hasGlobalStorage() &&
2755 SemaRef.isDeclInScope(ND, SemaRef.getCurLexicalContext(),
2756 SemaRef.getCurScope());
2757 }
2758 return false;
2759 }
2760
2761 std::unique_ptr<CorrectionCandidateCallback> clone() override {
2762 return std::make_unique<VarDeclFilterCCC>(*this);
2763 }
2764
2765};
2766
2767class VarOrFuncDeclFilterCCC final : public CorrectionCandidateCallback {
2768private:
2769 Sema &SemaRef;
2770
2771public:
2772 explicit VarOrFuncDeclFilterCCC(Sema &S) : SemaRef(S) {}
2773 bool ValidateCandidate(const TypoCorrection &Candidate) override {
2774 NamedDecl *ND = Candidate.getCorrectionDecl();
2775 if (ND && ((isa<VarDecl>(ND) && ND->getKind() == Decl::Var) ||
2776 isa<FunctionDecl>(ND))) {
2777 return SemaRef.isDeclInScope(ND, SemaRef.getCurLexicalContext(),
2778 SemaRef.getCurScope());
2779 }
2780 return false;
2781 }
2782
2783 std::unique_ptr<CorrectionCandidateCallback> clone() override {
2784 return std::make_unique<VarOrFuncDeclFilterCCC>(*this);
2785 }
2786};
2787
2788} // namespace
2789
2790ExprResult Sema::ActOnOpenMPIdExpression(Scope *CurScope,
2791 CXXScopeSpec &ScopeSpec,
2792 const DeclarationNameInfo &Id,
2793 OpenMPDirectiveKind Kind) {
2794 LookupResult Lookup(*this, Id, LookupOrdinaryName);
2795 LookupParsedName(Lookup, CurScope, &ScopeSpec, true);
2796
2797 if (Lookup.isAmbiguous())
2798 return ExprError();
2799
2800 VarDecl *VD;
2801 if (!Lookup.isSingleResult()) {
2802 VarDeclFilterCCC CCC(*this);
2803 if (TypoCorrection Corrected =
2804 CorrectTypo(Id, LookupOrdinaryName, CurScope, nullptr, CCC,
2805 CTK_ErrorRecovery)) {
2806 diagnoseTypo(Corrected,
2807 PDiag(Lookup.empty()
2808 ? diag::err_undeclared_var_use_suggest
2809 : diag::err_omp_expected_var_arg_suggest)
2810 << Id.getName());
2811 VD = Corrected.getCorrectionDeclAs<VarDecl>();
2812 } else {
2813 Diag(Id.getLoc(), Lookup.empty() ? diag::err_undeclared_var_use
2814 : diag::err_omp_expected_var_arg)
2815 << Id.getName();
2816 return ExprError();
2817 }
2818 } else if (!(VD = Lookup.getAsSingle<VarDecl>())) {
2819 Diag(Id.getLoc(), diag::err_omp_expected_var_arg) << Id.getName();
2820 Diag(Lookup.getFoundDecl()->getLocation(), diag::note_declared_at);
2821 return ExprError();
2822 }
2823 Lookup.suppressDiagnostics();
2824
2825 // OpenMP [2.9.2, Syntax, C/C++]
2826 // Variables must be file-scope, namespace-scope, or static block-scope.
2827 if (Kind == OMPD_threadprivate && !VD->hasGlobalStorage()) {
2828 Diag(Id.getLoc(), diag::err_omp_global_var_arg)
2829 << getOpenMPDirectiveName(Kind) << !VD->isStaticLocal();
2830 bool IsDecl =
2831 VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
2832 Diag(VD->getLocation(),
2833 IsDecl ? diag::note_previous_decl : diag::note_defined_here)
2834 << VD;
2835 return ExprError();
2836 }
2837
2838 VarDecl *CanonicalVD = VD->getCanonicalDecl();
2839 NamedDecl *ND = CanonicalVD;
2840 // OpenMP [2.9.2, Restrictions, C/C++, p.2]
2841 // A threadprivate directive for file-scope variables must appear outside
2842 // any definition or declaration.
2843 if (CanonicalVD->getDeclContext()->isTranslationUnit() &&
2844 !getCurLexicalContext()->isTranslationUnit()) {
2845 Diag(Id.getLoc(), diag::err_omp_var_scope)
2846 << getOpenMPDirectiveName(Kind) << VD;
2847 bool IsDecl =
2848 VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
2849 Diag(VD->getLocation(),
2850 IsDecl ? diag::note_previous_decl : diag::note_defined_here)
2851 << VD;
2852 return ExprError();
2853 }
2854 // OpenMP [2.9.2, Restrictions, C/C++, p.3]
2855 // A threadprivate directive for static class member variables must appear
2856 // in the class definition, in the same scope in which the member
2857 // variables are declared.
2858 if (CanonicalVD->isStaticDataMember() &&
2859 !CanonicalVD->getDeclContext()->Equals(getCurLexicalContext())) {
2860 Diag(Id.getLoc(), diag::err_omp_var_scope)
2861 << getOpenMPDirectiveName(Kind) << VD;
2862 bool IsDecl =
2863 VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
2864 Diag(VD->getLocation(),
2865 IsDecl ? diag::note_previous_decl : diag::note_defined_here)
2866 << VD;
2867 return ExprError();
2868 }
2869 // OpenMP [2.9.2, Restrictions, C/C++, p.4]
2870 // A threadprivate directive for namespace-scope variables must appear
2871 // outside any definition or declaration other than the namespace
2872 // definition itself.
2873 if (CanonicalVD->getDeclContext()->isNamespace() &&
2874 (!getCurLexicalContext()->isFileContext() ||
2875 !getCurLexicalContext()->Encloses(CanonicalVD->getDeclContext()))) {
2876 Diag(Id.getLoc(), diag::err_omp_var_scope)
2877 << getOpenMPDirectiveName(Kind) << VD;
2878 bool IsDecl =
2879 VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
2880 Diag(VD->getLocation(),
2881 IsDecl ? diag::note_previous_decl : diag::note_defined_here)
2882 << VD;
2883 return ExprError();
2884 }
2885 // OpenMP [2.9.2, Restrictions, C/C++, p.6]
2886 // A threadprivate directive for static block-scope variables must appear
2887 // in the scope of the variable and not in a nested scope.
2888 if (CanonicalVD->isLocalVarDecl() && CurScope &&
2889 !isDeclInScope(ND, getCurLexicalContext(), CurScope)) {
2890 Diag(Id.getLoc(), diag::err_omp_var_scope)
2891 << getOpenMPDirectiveName(Kind) << VD;
2892 bool IsDecl =
2893 VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
2894 Diag(VD->getLocation(),
2895 IsDecl ? diag::note_previous_decl : diag::note_defined_here)
2896 << VD;
2897 return ExprError();
2898 }
2899
2900 // OpenMP [2.9.2, Restrictions, C/C++, p.2-6]
2901 // A threadprivate directive must lexically precede all references to any
2902 // of the variables in its list.
2903 if (Kind == OMPD_threadprivate && VD->isUsed() &&
2904 !DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->isThreadPrivate(VD)) {
2905 Diag(Id.getLoc(), diag::err_omp_var_used)
2906 << getOpenMPDirectiveName(Kind) << VD;
2907 return ExprError();
2908 }
2909
2910 QualType ExprType = VD->getType().getNonReferenceType();
2911 return DeclRefExpr::Create(Context, NestedNameSpecifierLoc(),
2912 SourceLocation(), VD,
2913 /*RefersToEnclosingVariableOrCapture=*/false,
2914 Id.getLoc(), ExprType, VK_LValue);
2915}
2916
2917Sema::DeclGroupPtrTy
2918Sema::ActOnOpenMPThreadprivateDirective(SourceLocation Loc,
2919 ArrayRef<Expr *> VarList) {
2920 if (OMPThreadPrivateDecl *D = CheckOMPThreadPrivateDecl(Loc, VarList)) {
2921 CurContext->addDecl(D);
2922 return DeclGroupPtrTy::make(DeclGroupRef(D));
2923 }
2924 return nullptr;
2925}
2926
2927namespace {
2928class LocalVarRefChecker final
2929 : public ConstStmtVisitor<LocalVarRefChecker, bool> {
2930 Sema &SemaRef;
2931
2932public:
2933 bool VisitDeclRefExpr(const DeclRefExpr *E) {
2934 if (const auto *VD = dyn_cast<VarDecl>(E->getDecl())) {
2935 if (VD->hasLocalStorage()) {
2936 SemaRef.Diag(E->getBeginLoc(),
2937 diag::err_omp_local_var_in_threadprivate_init)
2938 << E->getSourceRange();
2939 SemaRef.Diag(VD->getLocation(), diag::note_defined_here)
2940 << VD << VD->getSourceRange();
2941 return true;
2942 }
2943 }
2944 return false;
2945 }
2946 bool VisitStmt(const Stmt *S) {
2947 for (const Stmt *Child : S->children()) {
2948 if (Child && Visit(Child))
2949 return true;
2950 }
2951 return false;
2952 }
2953 explicit LocalVarRefChecker(Sema &SemaRef) : SemaRef(SemaRef) {}
2954};
2955} // namespace
2956
2957OMPThreadPrivateDecl *
2958Sema::CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList) {
2959 SmallVector<Expr *, 8> Vars;
2960 for (Expr *RefExpr : VarList) {
2961 auto *DE = cast<DeclRefExpr>(RefExpr);
2962 auto *VD = cast<VarDecl>(DE->getDecl());
2963 SourceLocation ILoc = DE->getExprLoc();
2964
2965 // Mark variable as used.
2966 VD->setReferenced();
2967 VD->markUsed(Context);
2968
2969 QualType QType = VD->getType();
2970 if (QType->isDependentType() || QType->isInstantiationDependentType()) {
2971 // It will be analyzed later.
2972 Vars.push_back(DE);
2973 continue;
2974 }
2975
2976 // OpenMP [2.9.2, Restrictions, C/C++, p.10]
2977 // A threadprivate variable must not have an incomplete type.
2978 if (RequireCompleteType(ILoc, VD->getType(),
2979 diag::err_omp_threadprivate_incomplete_type)) {
2980 continue;
2981 }
2982
2983 // OpenMP [2.9.2, Restrictions, C/C++, p.10]
2984 // A threadprivate variable must not have a reference type.
2985 if (VD->getType()->isReferenceType()) {
2986 Diag(ILoc, diag::err_omp_ref_type_arg)
2987 << getOpenMPDirectiveName(OMPD_threadprivate) << VD->getType();
2988 bool IsDecl =
2989 VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
2990 Diag(VD->getLocation(),
2991 IsDecl ? diag::note_previous_decl : diag::note_defined_here)
2992 << VD;
2993 continue;
2994 }
2995
2996 // Check if this is a TLS variable. If TLS is not being supported, produce
2997 // the corresponding diagnostic.
2998 if ((VD->getTLSKind() != VarDecl::TLS_None &&
2999 !(VD->hasAttr<OMPThreadPrivateDeclAttr>() &&
3000 getLangOpts().OpenMPUseTLS &&
3001 getASTContext().getTargetInfo().isTLSSupported())) ||
3002 (VD->getStorageClass() == SC_Register && VD->hasAttr<AsmLabelAttr>() &&
3003 !VD->isLocalVarDecl())) {
3004 Diag(ILoc, diag::err_omp_var_thread_local)
3005 << VD << ((VD->getTLSKind() != VarDecl::TLS_None) ? 0 : 1);
3006 bool IsDecl =
3007 VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
3008 Diag(VD->getLocation(),
3009 IsDecl ? diag::note_previous_decl : diag::note_defined_here)
3010 << VD;
3011 continue;
3012 }
3013
3014 // Check if initial value of threadprivate variable reference variable with
3015 // local storage (it is not supported by runtime).
3016 if (const Expr *Init = VD->getAnyInitializer()) {
3017 LocalVarRefChecker Checker(*this);
3018 if (Checker.Visit(Init))
3019 continue;
3020 }
3021
3022 Vars.push_back(RefExpr);
3023 DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->addDSA(VD, DE, OMPC_threadprivate);
3024 VD->addAttr(OMPThreadPrivateDeclAttr::CreateImplicit(
3025 Context, SourceRange(Loc, Loc)));
3026 if (ASTMutationListener *ML = Context.getASTMutationListener())
3027 ML->DeclarationMarkedOpenMPThreadPrivate(VD);
3028 }
3029 OMPThreadPrivateDecl *D = nullptr;
3030 if (!Vars.empty()) {
3031 D = OMPThreadPrivateDecl::Create(Context, getCurLexicalContext(), Loc,
3032 Vars);
3033 D->setAccess(AS_public);
3034 }
3035 return D;
3036}
3037
3038static OMPAllocateDeclAttr::AllocatorTypeTy
3039getAllocatorKind(Sema &S, DSAStackTy *Stack, Expr *Allocator) {
3040 if (!Allocator)
3041 return OMPAllocateDeclAttr::OMPNullMemAlloc;
3042 if (Allocator->isTypeDependent() || Allocator->isValueDependent() ||
3043 Allocator->isInstantiationDependent() ||
3044 Allocator->containsUnexpandedParameterPack())
3045 return OMPAllocateDeclAttr::OMPUserDefinedMemAlloc;
3046 auto AllocatorKindRes = OMPAllocateDeclAttr::OMPUserDefinedMemAlloc;
3047 const Expr *AE = Allocator->IgnoreParenImpCasts();
3048 for (int I = 0; I < OMPAllocateDeclAttr::OMPUserDefinedMemAlloc; ++I) {
3049 auto AllocatorKind = static_cast<OMPAllocateDeclAttr::AllocatorTypeTy>(I);
3050 const Expr *DefAllocator = Stack->getAllocator(AllocatorKind);
3051 llvm::FoldingSetNodeID AEId, DAEId;
3052 AE->Profile(AEId, S.getASTContext(), /*Canonical=*/true);
3053 DefAllocator->Profile(DAEId, S.getASTContext(), /*Canonical=*/true);
3054 if (AEId == DAEId) {
3055 AllocatorKindRes = AllocatorKind;
3056 break;
3057 }
3058 }
3059 return AllocatorKindRes;
3060}
3061
3062static bool checkPreviousOMPAllocateAttribute(
3063 Sema &S, DSAStackTy *Stack, Expr *RefExpr, VarDecl *VD,
3064 OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind, Expr *Allocator) {
3065 if (!VD->hasAttr<OMPAllocateDeclAttr>())
3066 return false;
3067 const auto *A = VD->getAttr<OMPAllocateDeclAttr>();
3068 Expr *PrevAllocator = A->getAllocator();
3069 OMPAllocateDeclAttr::AllocatorTypeTy PrevAllocatorKind =
3070 getAllocatorKind(S, Stack, PrevAllocator);
3071 bool AllocatorsMatch = AllocatorKind == PrevAllocatorKind;
3072 if (AllocatorsMatch &&
3073 AllocatorKind == OMPAllocateDeclAttr::OMPUserDefinedMemAlloc &&
3074 Allocator && PrevAllocator) {
3075 const Expr *AE = Allocator->IgnoreParenImpCasts();
3076 const Expr *PAE = PrevAllocator->IgnoreParenImpCasts();
3077 llvm::FoldingSetNodeID AEId, PAEId;
3078 AE->Profile(AEId, S.Context, /*Canonical=*/true);
3079 PAE->Profile(PAEId, S.Context, /*Canonical=*/true);
3080 AllocatorsMatch = AEId == PAEId;
3081 }
3082 if (!AllocatorsMatch) {
3083 SmallString<256> AllocatorBuffer;
3084 llvm::raw_svector_ostream AllocatorStream(AllocatorBuffer);
3085 if (Allocator)
3086 Allocator->printPretty(AllocatorStream, nullptr, S.getPrintingPolicy());
3087 SmallString<256> PrevAllocatorBuffer;
3088 llvm::raw_svector_ostream PrevAllocatorStream(PrevAllocatorBuffer);
3089 if (PrevAllocator)
3090 PrevAllocator->printPretty(PrevAllocatorStream, nullptr,
3091 S.getPrintingPolicy());
3092
3093 SourceLocation AllocatorLoc =
3094 Allocator ? Allocator->getExprLoc() : RefExpr->getExprLoc();
3095 SourceRange AllocatorRange =
3096 Allocator ? Allocator->getSourceRange() : RefExpr->getSourceRange();
3097 SourceLocation PrevAllocatorLoc =
3098 PrevAllocator ? PrevAllocator->getExprLoc() : A->getLocation();
3099 SourceRange PrevAllocatorRange =
3100 PrevAllocator ? PrevAllocator->getSourceRange() : A->getRange();
3101 S.Diag(AllocatorLoc, diag::warn_omp_used_different_allocator)
3102 << (Allocator ? 1 : 0) << AllocatorStream.str()
3103 << (PrevAllocator ? 1 : 0) << PrevAllocatorStream.str()
3104 << AllocatorRange;
3105 S.Diag(PrevAllocatorLoc, diag::note_omp_previous_allocator)
3106 << PrevAllocatorRange;
3107 return true;
3108 }
3109 return false;
3110}
3111
3112static void
3113applyOMPAllocateAttribute(Sema &S, VarDecl *VD,
3114 OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind,
3115 Expr *Allocator, SourceRange SR) {
3116 if (VD->hasAttr<OMPAllocateDeclAttr>())
3117 return;
3118 if (Allocator &&
3119 (Allocator->isTypeDependent() || Allocator->isValueDependent() ||
3120 Allocator->isInstantiationDependent() ||
3121 Allocator->containsUnexpandedParameterPack()))
3122 return;
3123 auto *A = OMPAllocateDeclAttr::CreateImplicit(S.Context, AllocatorKind,
3124 Allocator, SR);
3125 VD->addAttr(A);
3126 if (ASTMutationListener *ML = S.Context.getASTMutationListener())
3127 ML->DeclarationMarkedOpenMPAllocate(VD, A);
3128}
3129
3130Sema::DeclGroupPtrTy Sema::ActOnOpenMPAllocateDirective(
3131 SourceLocation Loc, ArrayRef<Expr *> VarList,
3132 ArrayRef<OMPClause *> Clauses, DeclContext *Owner) {
3133 assert(Clauses.size() <= 1 && "Expected at most one clause.")((void)0);
3134 Expr *Allocator = nullptr;
3135 if (Clauses.empty()) {
3136 // OpenMP 5.0, 2.11.3 allocate Directive, Restrictions.
3137 // allocate directives that appear in a target region must specify an
3138 // allocator clause unless a requires directive with the dynamic_allocators
3139 // clause is present in the same compilation unit.
3140 if (LangOpts.OpenMPIsDevice &&
3141 !DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->hasRequiresDeclWithClause<OMPDynamicAllocatorsClause>())
3142 targetDiag(Loc, diag::err_expected_allocator_clause);
3143 } else {
3144 Allocator = cast<OMPAllocatorClause>(Clauses.back())->getAllocator();
3145 }
3146 OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind =
3147 getAllocatorKind(*this, DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
, Allocator);
3148 SmallVector<Expr *, 8> Vars;
3149 for (Expr *RefExpr : VarList) {
3150 auto *DE = cast<DeclRefExpr>(RefExpr);
3151 auto *VD = cast<VarDecl>(DE->getDecl());
3152
3153 // Check if this is a TLS variable or global register.
3154 if (VD->getTLSKind() != VarDecl::TLS_None ||
3155 VD->hasAttr<OMPThreadPrivateDeclAttr>() ||
3156 (VD->getStorageClass() == SC_Register && VD->hasAttr<AsmLabelAttr>() &&
3157 !VD->isLocalVarDecl()))
3158 continue;
3159
3160 // If the used several times in the allocate directive, the same allocator
3161 // must be used.
3162 if (checkPreviousOMPAllocateAttribute(*this, DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
, RefExpr, VD,
3163 AllocatorKind, Allocator))
3164 continue;
3165
3166 // OpenMP, 2.11.3 allocate Directive, Restrictions, C / C++
3167 // If a list item has a static storage type, the allocator expression in the
3168 // allocator clause must be a constant expression that evaluates to one of
3169 // the predefined memory allocator values.
3170 if (Allocator && VD->hasGlobalStorage()) {
3171 if (AllocatorKind == OMPAllocateDeclAttr::OMPUserDefinedMemAlloc) {
3172 Diag(Allocator->getExprLoc(),
3173 diag::err_omp_expected_predefined_allocator)
3174 << Allocator->getSourceRange();
3175 bool IsDecl = VD->isThisDeclarationADefinition(Context) ==
3176 VarDecl::DeclarationOnly;
3177 Diag(VD->getLocation(),
3178 IsDecl ? diag::note_previous_decl : diag::note_defined_here)
3179 << VD;
3180 continue;
3181 }
3182 }
3183
3184 Vars.push_back(RefExpr);
3185 applyOMPAllocateAttribute(*this, VD, AllocatorKind, Allocator,
3186 DE->getSourceRange());
3187 }
3188 if (Vars.empty())
3189 return nullptr;
3190 if (!Owner)
3191 Owner = getCurLexicalContext();
3192 auto *D = OMPAllocateDecl::Create(Context, Owner, Loc, Vars, Clauses);
3193 D->setAccess(AS_public);
3194 Owner->addDecl(D);
3195 return DeclGroupPtrTy::make(DeclGroupRef(D));
3196}
3197
3198Sema::DeclGroupPtrTy
3199Sema::ActOnOpenMPRequiresDirective(SourceLocation Loc,
3200 ArrayRef<OMPClause *> ClauseList) {
3201 OMPRequiresDecl *D = nullptr;
3202 if (!CurContext->isFileContext()) {
3203 Diag(Loc, diag::err_omp_invalid_scope) << "requires";
3204 } else {
3205 D = CheckOMPRequiresDecl(Loc, ClauseList);
3206 if (D) {
3207 CurContext->addDecl(D);
3208 DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->addRequiresDecl(D);
3209 }
3210 }
3211 return DeclGroupPtrTy::make(DeclGroupRef(D));
3212}
3213
3214void Sema::ActOnOpenMPAssumesDirective(SourceLocation Loc,
3215 OpenMPDirectiveKind DKind,
3216 ArrayRef<StringRef> Assumptions,
3217 bool SkippedClauses) {
3218 if (!SkippedClauses && Assumptions.empty())
3219 Diag(Loc, diag::err_omp_no_clause_for_directive)
3220 << llvm::omp::getAllAssumeClauseOptions()
3221 << llvm::omp::getOpenMPDirectiveName(DKind);
3222
3223 auto *AA = AssumptionAttr::Create(Context, llvm::join(Assumptions, ","), Loc);
3224 if (DKind == llvm::omp::Directive::OMPD_begin_assumes) {
3225 OMPAssumeScoped.push_back(AA);
3226 return;
3227 }
3228
3229 // Global assumes without assumption clauses are ignored.
3230 if (Assumptions.empty())
3231 return;
3232
3233 assert(DKind == llvm::omp::Directive::OMPD_assumes &&((void)0)
3234 "Unexpected omp assumption directive!")((void)0);
3235 OMPAssumeGlobal.push_back(AA);
3236
3237 // The OMPAssumeGlobal scope above will take care of new declarations but
3238 // we also want to apply the assumption to existing ones, e.g., to
3239 // declarations in included headers. To this end, we traverse all existing
3240 // declaration contexts and annotate function declarations here.
3241 SmallVector<DeclContext *, 8> DeclContexts;
3242 auto *Ctx = CurContext;
3243 while (Ctx->getLexicalParent())
3244 Ctx = Ctx->getLexicalParent();
3245 DeclContexts.push_back(Ctx);
3246 while (!DeclContexts.empty()) {
3247 DeclContext *DC = DeclContexts.pop_back_val();
3248 for (auto *SubDC : DC->decls()) {
3249 if (SubDC->isInvalidDecl())
3250 continue;
3251 if (auto *CTD = dyn_cast<ClassTemplateDecl>(SubDC)) {
3252 DeclContexts.push_back(CTD->getTemplatedDecl());
3253 for (auto *S : CTD->specializations())
3254 DeclContexts.push_back(S);
3255 continue;
3256 }
3257 if (auto *DC = dyn_cast<DeclContext>(SubDC))
3258 DeclContexts.push_back(DC);
3259 if (auto *F = dyn_cast<FunctionDecl>(SubDC)) {
3260 F->addAttr(AA);
3261 continue;
3262 }
3263 }
3264 }
3265}
3266
3267void Sema::ActOnOpenMPEndAssumesDirective() {
3268 assert(isInOpenMPAssumeScope() && "Not in OpenMP assumes scope!")((void)0);
3269 OMPAssumeScoped.pop_back();
3270}
3271
3272OMPRequiresDecl *Sema::CheckOMPRequiresDecl(SourceLocation Loc,
3273 ArrayRef<OMPClause *> ClauseList) {
3274 /// For target specific clauses, the requires directive cannot be
3275 /// specified after the handling of any of the target regions in the
3276 /// current compilation unit.
3277 ArrayRef<SourceLocation> TargetLocations =
3278 DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->getEncounteredTargetLocs();
3279 SourceLocation AtomicLoc = DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->getAtomicDirectiveLoc();
3280 if (!TargetLocations.empty() || !AtomicLoc.isInvalid()) {
3281 for (const OMPClause *CNew : ClauseList) {
3282 // Check if any of the requires clauses affect target regions.
3283 if (isa<OMPUnifiedSharedMemoryClause>(CNew) ||
3284 isa<OMPUnifiedAddressClause>(CNew) ||
3285 isa<OMPReverseOffloadClause>(CNew) ||
3286 isa<OMPDynamicAllocatorsClause>(CNew)) {
3287 Diag(Loc, diag::err_omp_directive_before_requires)
3288 << "target" << getOpenMPClauseName(CNew->getClauseKind());
3289 for (SourceLocation TargetLoc : TargetLocations) {
3290 Diag(TargetLoc, diag::note_omp_requires_encountered_directive)
3291 << "target";
3292 }
3293 } else if (!AtomicLoc.isInvalid() &&
3294 isa<OMPAtomicDefaultMemOrderClause>(CNew)) {
3295 Diag(Loc, diag::err_omp_directive_before_requires)
3296 << "atomic" << getOpenMPClauseName(CNew->getClauseKind());
3297 Diag(AtomicLoc, diag::note_omp_requires_encountered_directive)
3298 << "atomic";
3299 }
3300 }
3301 }
3302
3303 if (!DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->hasDuplicateRequiresClause(ClauseList))
3304 return OMPRequiresDecl::Create(Context, getCurLexicalContext(), Loc,
3305 ClauseList);
3306 return nullptr;
3307}
3308
3309static void reportOriginalDsa(Sema &SemaRef, const DSAStackTy *Stack,
3310 const ValueDecl *D,
3311 const DSAStackTy::DSAVarData &DVar,
3312 bool IsLoopIterVar) {
3313 if (DVar.RefExpr) {
3314 SemaRef.Diag(DVar.RefExpr->getExprLoc(), diag::note_omp_explicit_dsa)
3315 << getOpenMPClauseName(DVar.CKind);
3316 return;
3317 }
3318 enum {
3319 PDSA_StaticMemberShared,
3320 PDSA_StaticLocalVarShared,
3321 PDSA_LoopIterVarPrivate,
3322 PDSA_LoopIterVarLinear,
3323 PDSA_LoopIterVarLastprivate,
3324 PDSA_ConstVarShared,
3325 PDSA_GlobalVarShared,
3326 PDSA_TaskVarFirstprivate,
3327 PDSA_LocalVarPrivate,
3328 PDSA_Implicit
3329 } Reason = PDSA_Implicit;
3330 bool ReportHint = false;
3331 auto ReportLoc = D->getLocation();
3332 auto *VD = dyn_cast<VarDecl>(D);
3333 if (IsLoopIterVar) {
3334 if (DVar.CKind == OMPC_private)
3335 Reason = PDSA_LoopIterVarPrivate;
3336 else if (DVar.CKind == OMPC_lastprivate)
3337 Reason = PDSA_LoopIterVarLastprivate;
3338 else
3339 Reason = PDSA_LoopIterVarLinear;
3340 } else if (isOpenMPTaskingDirective(DVar.DKind) &&
3341 DVar.CKind == OMPC_firstprivate) {
3342 Reason = PDSA_TaskVarFirstprivate;
3343 ReportLoc = DVar.ImplicitDSALoc;
3344 } else if (VD && VD->isStaticLocal())
3345 Reason = PDSA_StaticLocalVarShared;
3346 else if (VD && VD->isStaticDataMember())
3347 Reason = PDSA_StaticMemberShared;
3348 else if (VD && VD->isFileVarDecl())
3349 Reason = PDSA_GlobalVarShared;
3350 else if (D->getType().isConstant(SemaRef.getASTContext()))
3351 Reason = PDSA_ConstVarShared;
3352 else if (VD && VD->isLocalVarDecl() && DVar.CKind == OMPC_private) {
3353 ReportHint = true;
3354 Reason = PDSA_LocalVarPrivate;
3355 }
3356 if (Reason != PDSA_Implicit) {
3357 SemaRef.Diag(ReportLoc, diag::note_omp_predetermined_dsa)
3358 << Reason << ReportHint
3359 << getOpenMPDirectiveName(Stack->getCurrentDirective());
3360 } else if (DVar.ImplicitDSALoc.isValid()) {
3361 SemaRef.Diag(DVar.ImplicitDSALoc, diag::note_omp_implicit_dsa)
3362 << getOpenMPClauseName(DVar.CKind);
3363 }
3364}
3365
3366static OpenMPMapClauseKind
3367getMapClauseKindFromModifier(OpenMPDefaultmapClauseModifier M,
3368 bool IsAggregateOrDeclareTarget) {
3369 OpenMPMapClauseKind Kind = OMPC_MAP_unknown;
3370 switch (M) {
3371 case OMPC_DEFAULTMAP_MODIFIER_alloc:
3372 Kind = OMPC_MAP_alloc;
3373 break;
3374 case OMPC_DEFAULTMAP_MODIFIER_to:
3375 Kind = OMPC_MAP_to;
3376 break;
3377 case OMPC_DEFAULTMAP_MODIFIER_from:
3378 Kind = OMPC_MAP_from;
3379 break;
3380 case OMPC_DEFAULTMAP_MODIFIER_tofrom:
3381 Kind = OMPC_MAP_tofrom;
3382 break;
3383 case OMPC_DEFAULTMAP_MODIFIER_present:
3384 // OpenMP 5.1 [2.21.7.3] defaultmap clause, Description]
3385 // If implicit-behavior is present, each variable referenced in the
3386 // construct in the category specified by variable-category is treated as if
3387 // it had been listed in a map clause with the map-type of alloc and
3388 // map-type-modifier of present.
3389 Kind = OMPC_MAP_alloc;
3390 break;
3391 case OMPC_DEFAULTMAP_MODIFIER_firstprivate:
3392 case OMPC_DEFAULTMAP_MODIFIER_last:
3393 llvm_unreachable("Unexpected defaultmap implicit behavior")__builtin_unreachable();
3394 case OMPC_DEFAULTMAP_MODIFIER_none:
3395 case OMPC_DEFAULTMAP_MODIFIER_default:
3396 case OMPC_DEFAULTMAP_MODIFIER_unknown:
3397 // IsAggregateOrDeclareTarget could be true if:
3398 // 1. the implicit behavior for aggregate is tofrom
3399 // 2. it's a declare target link
3400 if (IsAggregateOrDeclareTarget) {
3401 Kind = OMPC_MAP_tofrom;
3402 break;
3403 }
3404 llvm_unreachable("Unexpected defaultmap implicit behavior")__builtin_unreachable();
3405 }
3406 assert(Kind != OMPC_MAP_unknown && "Expect map kind to be known")((void)0);
3407 return Kind;
3408}
3409
3410namespace {
3411class DSAAttrChecker final : public StmtVisitor<DSAAttrChecker, void> {
3412 DSAStackTy *Stack;
3413 Sema &SemaRef;
3414 bool ErrorFound = false;
3415 bool TryCaptureCXXThisMembers = false;
3416 CapturedStmt *CS = nullptr;
3417 const static unsigned DefaultmapKindNum = OMPC_DEFAULTMAP_pointer + 1;
3418 llvm::SmallVector<Expr *, 4> ImplicitFirstprivate;
3419 llvm::SmallVector<Expr *, 4> ImplicitMap[DefaultmapKindNum][OMPC_MAP_delete];
3420 llvm::SmallVector<OpenMPMapModifierKind, NumberOfOMPMapClauseModifiers>
3421 ImplicitMapModifier[DefaultmapKindNum];
3422 Sema::VarsWithInheritedDSAType VarsWithInheritedDSA;
3423 llvm::SmallDenseSet<const ValueDecl *, 4> ImplicitDeclarations;
3424
3425 void VisitSubCaptures(OMPExecutableDirective *S) {
3426 // Check implicitly captured variables.
3427 if (!S->hasAssociatedStmt() || !S->getAssociatedStmt())
3428 return;
3429 if (S->getDirectiveKind() == OMPD_atomic ||
3430 S->getDirectiveKind() == OMPD_critical ||
3431 S->getDirectiveKind() == OMPD_section ||
3432 S->getDirectiveKind() == OMPD_master ||
3433 S->getDirectiveKind() == OMPD_masked ||
3434 isOpenMPLoopTransformationDirective(S->getDirectiveKind())) {
3435 Visit(S->getAssociatedStmt());
3436 return;
3437 }
3438 visitSubCaptures(S->getInnermostCapturedStmt());
3439 // Try to capture inner this->member references to generate correct mappings
3440 // and diagnostics.
3441 if (TryCaptureCXXThisMembers ||
3442 (isOpenMPTargetExecutionDirective(Stack->getCurrentDirective()) &&
3443 llvm::any_of(S->getInnermostCapturedStmt()->captures(),
3444 [](const CapturedStmt::Capture &C) {
3445 return C.capturesThis();
3446 }))) {
3447 bool SavedTryCaptureCXXThisMembers = TryCaptureCXXThisMembers;
3448 TryCaptureCXXThisMembers = true;
3449 Visit(S->getInnermostCapturedStmt()->getCapturedStmt());
3450 TryCaptureCXXThisMembers = SavedTryCaptureCXXThisMembers;
3451 }
3452 // In tasks firstprivates are not captured anymore, need to analyze them
3453 // explicitly.
3454 if (isOpenMPTaskingDirective(S->getDirectiveKind()) &&
3455 !isOpenMPTaskLoopDirective(S->getDirectiveKind())) {
3456 for (OMPClause *C : S->clauses())
3457 if (auto *FC = dyn_cast<OMPFirstprivateClause>(C)) {
3458 for (Expr *Ref : FC->varlists())
3459 Visit(Ref);
3460 }
3461 }
3462 }
3463
3464public:
3465 void VisitDeclRefExpr(DeclRefExpr *E) {
3466 if (TryCaptureCXXThisMembers || E->isTypeDependent() ||
3467 E->isValueDependent() || E->containsUnexpandedParameterPack() ||
3468 E->isInstantiationDependent())
3469 return;
3470 if (auto *VD = dyn_cast<VarDecl>(E->getDecl())) {
3471 // Check the datasharing rules for the expressions in the clauses.
3472 if (!CS) {
3473 if (auto *CED = dyn_cast<OMPCapturedExprDecl>(VD))
3474 if (!CED->hasAttr<OMPCaptureNoInitAttr>()) {
3475 Visit(CED->getInit());
3476 return;
3477 }
3478 } else if (VD->isImplicit() || isa<OMPCapturedExprDecl>(VD))
3479 // Do not analyze internal variables and do not enclose them into
3480 // implicit clauses.
3481 return;
3482 VD = VD->getCanonicalDecl();
3483 // Skip internally declared variables.
3484 if (VD->hasLocalStorage() && CS && !CS->capturesVariable(VD) &&
3485 !Stack->isImplicitTaskFirstprivate(VD))
3486 return;
3487 // Skip allocators in uses_allocators clauses.
3488 if (Stack->isUsesAllocatorsDecl(VD).hasValue())
3489 return;
3490
3491 DSAStackTy::DSAVarData DVar = Stack->getTopDSA(VD, /*FromParent=*/false);
3492 // Check if the variable has explicit DSA set and stop analysis if it so.
3493 if (DVar.RefExpr || !ImplicitDeclarations.insert(VD).second)
3494 return;
3495
3496 // Skip internally declared static variables.
3497 llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
3498 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
3499 if (VD->hasGlobalStorage() && CS && !CS->capturesVariable(VD) &&
3500 (Stack->hasRequiresDeclWithClause<OMPUnifiedSharedMemoryClause>() ||
3501 !Res || *Res != OMPDeclareTargetDeclAttr::MT_Link) &&
3502 !Stack->isImplicitTaskFirstprivate(VD))
3503 return;
3504
3505 SourceLocation ELoc = E->getExprLoc();
3506 OpenMPDirectiveKind DKind = Stack->getCurrentDirective();
3507 // The default(none) clause requires that each variable that is referenced
3508 // in the construct, and does not have a predetermined data-sharing
3509 // attribute, must have its data-sharing attribute explicitly determined
3510 // by being listed in a data-sharing attribute clause.
3511 if (DVar.CKind == OMPC_unknown &&
3512 (Stack->getDefaultDSA() == DSA_none ||
3513 Stack->getDefaultDSA() == DSA_firstprivate) &&
3514 isImplicitOrExplicitTaskingRegion(DKind) &&
3515 VarsWithInheritedDSA.count(VD) == 0) {
3516 bool InheritedDSA = Stack->getDefaultDSA() == DSA_none;
3517 if (!InheritedDSA && Stack->getDefaultDSA() == DSA_firstprivate) {
3518 DSAStackTy::DSAVarData DVar =
3519 Stack->getImplicitDSA(VD, /*FromParent=*/false);
3520 InheritedDSA = DVar.CKind == OMPC_unknown;
3521 }
3522 if (InheritedDSA)
3523 VarsWithInheritedDSA[VD] = E;
3524 return;
3525 }
3526
3527 // OpenMP 5.0 [2.19.7.2, defaultmap clause, Description]
3528 // If implicit-behavior is none, each variable referenced in the
3529 // construct that does not have a predetermined data-sharing attribute
3530 // and does not appear in a to or link clause on a declare target
3531 // directive must be listed in a data-mapping attribute clause, a
3532 // data-haring attribute clause (including a data-sharing attribute
3533 // clause on a combined construct where target. is one of the
3534 // constituent constructs), or an is_device_ptr clause.
3535 OpenMPDefaultmapClauseKind ClauseKind =
3536 getVariableCategoryFromDecl(SemaRef.getLangOpts(), VD);
3537 if (SemaRef.getLangOpts().OpenMP >= 50) {
3538 bool IsModifierNone = Stack->getDefaultmapModifier(ClauseKind) ==
3539 OMPC_DEFAULTMAP_MODIFIER_none;
3540 if (DVar.CKind == OMPC_unknown && IsModifierNone &&
3541 VarsWithInheritedDSA.count(VD) == 0 && !Res) {
3542 // Only check for data-mapping attribute and is_device_ptr here
3543 // since we have already make sure that the declaration does not
3544 // have a data-sharing attribute above
3545 if (!Stack->checkMappableExprComponentListsForDecl(
3546 VD, /*CurrentRegionOnly=*/true,
3547 [VD](OMPClauseMappableExprCommon::MappableExprComponentListRef
3548 MapExprComponents,
3549 OpenMPClauseKind) {
3550 auto MI = MapExprComponents.rbegin();
3551 auto ME = MapExprComponents.rend();
3552 return MI != ME && MI->getAssociatedDeclaration() == VD;
3553 })) {
3554 VarsWithInheritedDSA[VD] = E;
3555 return;
3556 }
3557 }
3558 }
3559 if (SemaRef.getLangOpts().OpenMP > 50) {
3560 bool IsModifierPresent = Stack->getDefaultmapModifier(ClauseKind) ==
3561 OMPC_DEFAULTMAP_MODIFIER_present;
3562 if (IsModifierPresent) {
3563 if (llvm::find(ImplicitMapModifier[ClauseKind],
3564 OMPC_MAP_MODIFIER_present) ==
3565 std::end(ImplicitMapModifier[ClauseKind])) {
3566 ImplicitMapModifier[ClauseKind].push_back(
3567 OMPC_MAP_MODIFIER_present);
3568 }
3569 }
3570 }
3571
3572 if (isOpenMPTargetExecutionDirective(DKind) &&
3573 !Stack->isLoopControlVariable(VD).first) {
3574 if (!Stack->checkMappableExprComponentListsForDecl(
3575 VD, /*CurrentRegionOnly=*/true,
3576 [this](OMPClauseMappableExprCommon::MappableExprComponentListRef
3577 StackComponents,
3578 OpenMPClauseKind) {
3579 if (SemaRef.LangOpts.OpenMP >= 50)
3580 return !StackComponents.empty();
3581 // Variable is used if it has been marked as an array, array
3582 // section, array shaping or the variable iself.
3583 return StackComponents.size() == 1 ||
3584 std::all_of(
3585 std::next(StackComponents.rbegin()),
3586 StackComponents.rend(),
3587 [](const OMPClauseMappableExprCommon::
3588 MappableComponent &MC) {
3589 return MC.getAssociatedDeclaration() ==
3590 nullptr &&
3591 (isa<OMPArraySectionExpr>(
3592 MC.getAssociatedExpression()) ||
3593 isa<OMPArrayShapingExpr>(
3594 MC.getAssociatedExpression()) ||
3595 isa<ArraySubscriptExpr>(
3596 MC.getAssociatedExpression()));
3597 });
3598 })) {
3599 bool IsFirstprivate = false;
3600 // By default lambdas are captured as firstprivates.
3601 if (const auto *RD =
3602 VD->getType().getNonReferenceType()->getAsCXXRecordDecl())
3603 IsFirstprivate = RD->isLambda();
3604 IsFirstprivate =
3605 IsFirstprivate || (Stack->mustBeFirstprivate(ClauseKind) && !Res);
3606 if (IsFirstprivate) {
3607 ImplicitFirstprivate.emplace_back(E);
3608 } else {
3609 OpenMPDefaultmapClauseModifier M =
3610 Stack->getDefaultmapModifier(ClauseKind);
3611 OpenMPMapClauseKind Kind = getMapClauseKindFromModifier(
3612 M, ClauseKind == OMPC_DEFAULTMAP_aggregate || Res);
3613 ImplicitMap[ClauseKind][Kind].emplace_back(E);
3614 }
3615 return;
3616 }
3617 }
3618
3619 // OpenMP [2.9.3.6, Restrictions, p.2]
3620 // A list item that appears in a reduction clause of the innermost
3621 // enclosing worksharing or parallel construct may not be accessed in an
3622 // explicit task.
3623 DVar = Stack->hasInnermostDSA(
3624 VD,
3625 [](OpenMPClauseKind C, bool AppliedToPointee) {
3626 return C == OMPC_reduction && !AppliedToPointee;
3627 },
3628 [](OpenMPDirectiveKind K) {
3629 return isOpenMPParallelDirective(K) ||
3630 isOpenMPWorksharingDirective(K) || isOpenMPTeamsDirective(K);
3631 },
3632 /*FromParent=*/true);
3633 if (isOpenMPTaskingDirective(DKind) && DVar.CKind == OMPC_reduction) {
3634 ErrorFound = true;
3635 SemaRef.Diag(ELoc, diag::err_omp_reduction_in_task);
3636 reportOriginalDsa(SemaRef, Stack, VD, DVar);
3637 return;
3638 }
3639
3640 // Define implicit data-sharing attributes for task.
3641 DVar = Stack->getImplicitDSA(VD, /*FromParent=*/false);
3642 if (((isOpenMPTaskingDirective(DKind) && DVar.CKind != OMPC_shared) ||
3643 (Stack->getDefaultDSA() == DSA_firstprivate &&
3644 DVar.CKind == OMPC_firstprivate && !DVar.RefExpr)) &&
3645 !Stack->isLoopControlVariable(VD).first) {
3646 ImplicitFirstprivate.push_back(E);
3647 return;
3648 }
3649
3650 // Store implicitly used globals with declare target link for parent
3651 // target.
3652 if (!isOpenMPTargetExecutionDirective(DKind) && Res &&
3653 *Res == OMPDeclareTargetDeclAttr::MT_Link) {
3654 Stack->addToParentTargetRegionLinkGlobals(E);
3655 return;
3656 }
3657 }
3658 }
3659 void VisitMemberExpr(MemberExpr *E) {
3660 if (E->isTypeDependent() || E->isValueDependent() ||
3661 E->containsUnexpandedParameterPack() || E->isInstantiationDependent())
3662 return;
3663 auto *FD = dyn_cast<FieldDecl>(E->getMemberDecl());
3664 OpenMPDirectiveKind DKind = Stack->getCurrentDirective();
3665 if (auto *TE = dyn_cast<CXXThisExpr>(E->getBase()->IgnoreParenCasts())) {
3666 if (!FD)
3667 return;
3668 DSAStackTy::DSAVarData DVar = Stack->getTopDSA(FD, /*FromParent=*/false);
3669 // Check if the variable has explicit DSA set and stop analysis if it
3670 // so.
3671 if (DVar.RefExpr || !ImplicitDeclarations.insert(FD).second)
3672 return;
3673
3674 if (isOpenMPTargetExecutionDirective(DKind) &&
3675 !Stack->isLoopControlVariable(FD).first &&
3676 !Stack->checkMappableExprComponentListsForDecl(
3677 FD, /*CurrentRegionOnly=*/true,
3678 [](OMPClauseMappableExprCommon::MappableExprComponentListRef
3679 StackComponents,
3680 OpenMPClauseKind) {
3681 return isa<CXXThisExpr>(
3682 cast<MemberExpr>(
3683 StackComponents.back().getAssociatedExpression())
3684 ->getBase()
3685 ->IgnoreParens());
3686 })) {
3687 // OpenMP 4.5 [2.15.5.1, map Clause, Restrictions, C/C++, p.3]
3688 // A bit-field cannot appear in a map clause.
3689 //
3690 if (FD->isBitField())
3691 return;
3692
3693 // Check to see if the member expression is referencing a class that
3694 // has already been explicitly mapped
3695 if (Stack->isClassPreviouslyMapped(TE->getType()))
3696 return;
3697
3698 OpenMPDefaultmapClauseModifier Modifier =
3699 Stack->getDefaultmapModifier(OMPC_DEFAULTMAP_aggregate);
3700 OpenMPDefaultmapClauseKind ClauseKind =
3701 getVariableCategoryFromDecl(SemaRef.getLangOpts(), FD);
3702 OpenMPMapClauseKind Kind = getMapClauseKindFromModifier(
3703 Modifier, /*IsAggregateOrDeclareTarget*/ true);
3704 ImplicitMap[ClauseKind][Kind].emplace_back(E);
3705 return;
3706 }
3707
3708 SourceLocation ELoc = E->getExprLoc();
3709 // OpenMP [2.9.3.6, Restrictions, p.2]
3710 // A list item that appears in a reduction clause of the innermost
3711 // enclosing worksharing or parallel construct may not be accessed in
3712 // an explicit task.
3713 DVar = Stack->hasInnermostDSA(
3714 FD,
3715 [](OpenMPClauseKind C, bool AppliedToPointee) {
3716 return C == OMPC_reduction && !AppliedToPointee;
3717 },
3718 [](OpenMPDirectiveKind K) {
3719 return isOpenMPParallelDirective(K) ||
3720 isOpenMPWorksharingDirective(K) || isOpenMPTeamsDirective(K);
3721 },
3722 /*FromParent=*/true);
3723 if (isOpenMPTaskingDirective(DKind) && DVar.CKind == OMPC_reduction) {
3724 ErrorFound = true;
3725 SemaRef.Diag(ELoc, diag::err_omp_reduction_in_task);
3726 reportOriginalDsa(SemaRef, Stack, FD, DVar);
3727 return;
3728 }
3729
3730 // Define implicit data-sharing attributes for task.
3731 DVar = Stack->getImplicitDSA(FD, /*FromParent=*/false);
3732 if (isOpenMPTaskingDirective(DKind) && DVar.CKind != OMPC_shared &&
3733 !Stack->isLoopControlVariable(FD).first) {
3734 // Check if there is a captured expression for the current field in the
3735 // region. Do not mark it as firstprivate unless there is no captured
3736 // expression.
3737 // TODO: try to make it firstprivate.
3738 if (DVar.CKind != OMPC_unknown)
3739 ImplicitFirstprivate.push_back(E);
3740 }
3741 return;
3742 }
3743 if (isOpenMPTargetExecutionDirective(DKind)) {
3744 OMPClauseMappableExprCommon::MappableExprComponentList CurComponents;
3745 if (!checkMapClauseExpressionBase(SemaRef, E, CurComponents, OMPC_map,
3746 Stack->getCurrentDirective(),
3747 /*NoDiagnose=*/true))
3748 return;
3749 const auto *VD = cast<ValueDecl>(
3750 CurComponents.back().getAssociatedDeclaration()->getCanonicalDecl());
3751 if (!Stack->checkMappableExprComponentListsForDecl(
3752 VD, /*CurrentRegionOnly=*/true,
3753 [&CurComponents](
3754 OMPClauseMappableExprCommon::MappableExprComponentListRef
3755 StackComponents,
3756 OpenMPClauseKind) {
3757 auto CCI = CurComponents.rbegin();
3758 auto CCE = CurComponents.rend();
3759 for (const auto &SC : llvm::reverse(StackComponents)) {
3760 // Do both expressions have the same kind?
3761 if (CCI->getAssociatedExpression()->getStmtClass() !=
3762 SC.getAssociatedExpression()->getStmtClass())
3763 if (!((isa<OMPArraySectionExpr>(
3764 SC.getAssociatedExpression()) ||
3765 isa<OMPArrayShapingExpr>(
3766 SC.getAssociatedExpression())) &&
3767 isa<ArraySubscriptExpr>(
3768 CCI->getAssociatedExpression())))
3769 return false;
3770
3771 const Decl *CCD = CCI->getAssociatedDeclaration();
3772 const Decl *SCD = SC.getAssociatedDeclaration();
3773 CCD = CCD ? CCD->getCanonicalDecl() : nullptr;
3774 SCD = SCD ? SCD->getCanonicalDecl() : nullptr;
3775 if (SCD != CCD)
3776 return false;
3777 std::advance(CCI, 1);
3778 if (CCI == CCE)
3779 break;
3780 }
3781 return true;
3782 })) {
3783 Visit(E->getBase());
3784 }
3785 } else if (!TryCaptureCXXThisMembers) {
3786 Visit(E->getBase());
3787 }
3788 }
3789 void VisitOMPExecutableDirective(OMPExecutableDirective *S) {
3790 for (OMPClause *C : S->clauses()) {
3791 // Skip analysis of arguments of implicitly defined firstprivate clause
3792 // for task|target directives.
3793 // Skip analysis of arguments of implicitly defined map clause for target
3794 // directives.
3795 if (C && !((isa<OMPFirstprivateClause>(C) || isa<OMPMapClause>(C)) &&
3796 C->isImplicit() &&
3797 !isOpenMPTaskingDirective(Stack->getCurrentDirective()))) {
3798 for (Stmt *CC : C->children()) {
3799 if (CC)
3800 Visit(CC);
3801 }
3802 }
3803 }
3804 // Check implicitly captured variables.
3805 VisitSubCaptures(S);
3806 }
3807
3808 void VisitOMPTileDirective(OMPTileDirective *S) {
3809 // #pragma omp tile does not introduce data sharing.
3810 VisitStmt(S);
3811 }
3812
3813 void VisitOMPUnrollDirective(OMPUnrollDirective *S) {
3814 // #pragma omp unroll does not introduce data sharing.
3815 VisitStmt(S);
3816 }
3817
3818 void VisitStmt(Stmt *S) {
3819 for (Stmt *C : S->children()) {
3820 if (C) {
3821 // Check implicitly captured variables in the task-based directives to
3822 // check if they must be firstprivatized.
3823 Visit(C);
3824 }
3825 }
3826 }
3827
3828 void visitSubCaptures(CapturedStmt *S) {
3829 for (const CapturedStmt::Capture &Cap : S->captures()) {
3830 if (!Cap.capturesVariable() && !Cap.capturesVariableByCopy())
3831 continue;
3832 VarDecl *VD = Cap.getCapturedVar();
3833 // Do not try to map the variable if it or its sub-component was mapped
3834 // already.
3835 if (isOpenMPTargetExecutionDirective(Stack->getCurrentDirective()) &&
3836 Stack->checkMappableExprComponentListsForDecl(
3837 VD, /*CurrentRegionOnly=*/true,
3838 [](OMPClauseMappableExprCommon::MappableExprComponentListRef,
3839 OpenMPClauseKind) { return true; }))
3840 continue;
3841 DeclRefExpr *DRE = buildDeclRefExpr(
3842 SemaRef, VD, VD->getType().getNonLValueExprType(SemaRef.Context),
3843 Cap.getLocation(), /*RefersToCapture=*/true);
3844 Visit(DRE);
3845 }
3846 }
3847 bool isErrorFound() const { return ErrorFound; }
3848 ArrayRef<Expr *> getImplicitFirstprivate() const {
3849 return ImplicitFirstprivate;
3850 }
3851 ArrayRef<Expr *> getImplicitMap(OpenMPDefaultmapClauseKind DK,
3852 OpenMPMapClauseKind MK) const {
3853 return ImplicitMap[DK][MK];
3854 }
3855 ArrayRef<OpenMPMapModifierKind>
3856 getImplicitMapModifier(OpenMPDefaultmapClauseKind Kind) const {
3857 return ImplicitMapModifier[Kind];
3858 }
3859 const Sema::VarsWithInheritedDSAType &getVarsWithInheritedDSA() const {
3860 return VarsWithInheritedDSA;
3861 }
3862
3863 DSAAttrChecker(DSAStackTy *S, Sema &SemaRef, CapturedStmt *CS)
3864 : Stack(S), SemaRef(SemaRef), ErrorFound(false), CS(CS) {
3865 // Process declare target link variables for the target directives.
3866 if (isOpenMPTargetExecutionDirective(S->getCurrentDirective())) {
3867 for (DeclRefExpr *E : Stack->getLinkGlobals())
3868 Visit(E);
3869 }
3870 }
3871};
3872} // namespace
3873
3874void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
3875 switch (DKind) {
3876 case OMPD_parallel:
3877 case OMPD_parallel_for:
3878 case OMPD_parallel_for_simd:
3879 case OMPD_parallel_sections:
3880 case OMPD_parallel_master:
3881 case OMPD_teams:
3882 case OMPD_teams_distribute:
3883 case OMPD_teams_distribute_simd: {
3884 QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
3885 QualType KmpInt32PtrTy =
3886 Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
3887 Sema::CapturedParamNameType Params[] = {
3888 std::make_pair(".global_tid.", KmpInt32PtrTy),
3889 std::make_pair(".bound_tid.", KmpInt32PtrTy),
3890 std::make_pair(StringRef(), QualType()) // __context with shared vars
3891 };
3892 ActOnCapturedRegionStart(DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->getConstructLoc(), CurScope, CR_OpenMP,
3893 Params);
3894 break;
3895 }
3896 case OMPD_target_teams:
3897 case OMPD_target_parallel:
3898 case OMPD_target_parallel_for:
3899 case OMPD_target_parallel_for_simd:
3900 case OMPD_target_teams_distribute:
3901 case OMPD_target_teams_distribute_simd: {
3902 QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
3903 QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
3904 QualType KmpInt32PtrTy =
3905 Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
3906 QualType Args[] = {VoidPtrTy};
3907 FunctionProtoType::ExtProtoInfo EPI;
3908 EPI.Variadic = true;
3909 QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
3910 Sema::CapturedParamNameType Params[] = {
3911 std::make_pair(".global_tid.", KmpInt32Ty),
3912 std::make_pair(".part_id.", KmpInt32PtrTy),
3913 std::make_pair(".privates.", VoidPtrTy),
3914 std::make_pair(
3915 ".copy_fn.",
3916 Context.getPointerType(CopyFnType).withConst().withRestrict()),
3917 std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
3918 std::make_pair(StringRef(), QualType()) // __context with shared vars
3919 };
3920 ActOnCapturedRegionStart(DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->getConstructLoc(), CurScope, CR_OpenMP,
3921 Params, /*OpenMPCaptureLevel=*/0);
3922 // Mark this captured region as inlined, because we don't use outlined
3923 // function directly.
3924 getCurCapturedRegion()->TheCapturedDecl->addAttr(
3925 AlwaysInlineAttr::CreateImplicit(
3926 Context, {}, AttributeCommonInfo::AS_Keyword,
3927 AlwaysInlineAttr::Keyword_forceinline));
3928 Sema::CapturedParamNameType ParamsTarget[] = {
3929 std::make_pair(StringRef(), QualType()) // __context with shared vars
3930 };
3931 // Start a captured region for 'target' with no implicit parameters.
3932 ActOnCapturedRegionStart(DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->getConstructLoc(), CurScope, CR_OpenMP,
3933 ParamsTarget, /*OpenMPCaptureLevel=*/1);
3934 Sema::CapturedParamNameType ParamsTeamsOrParallel[] = {
3935 std::make_pair(".global_tid.", KmpInt32PtrTy),
3936 std::make_pair(".bound_tid.", KmpInt32PtrTy),
3937 std::make_pair(StringRef(), QualType()) // __context with shared vars
3938 };
3939 // Start a captured region for 'teams' or 'parallel'. Both regions have
3940 // the same implicit parameters.
3941 ActOnCapturedRegionStart(DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->getConstructLoc(), CurScope, CR_OpenMP,
3942 ParamsTeamsOrParallel, /*OpenMPCaptureLevel=*/2);
3943 break;
3944 }
3945 case OMPD_target:
3946 case OMPD_target_simd: {
3947 QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
3948 QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
3949 QualType KmpInt32PtrTy =
3950 Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
3951 QualType Args[] = {VoidPtrTy};
3952 FunctionProtoType::ExtProtoInfo EPI;
3953 EPI.Variadic = true;
3954 QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
3955 Sema::CapturedParamNameType Params[] = {
3956 std::make_pair(".global_tid.", KmpInt32Ty),
3957 std::make_pair(".part_id.", KmpInt32PtrTy),
3958 std::make_pair(".privates.", VoidPtrTy),
3959 std::make_pair(
3960 ".copy_fn.",
3961 Context.getPointerType(CopyFnType).withConst().withRestrict()),
3962 std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
3963 std::make_pair(StringRef(), QualType()) // __context with shared vars
3964 };
3965 ActOnCapturedRegionStart(DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->getConstructLoc(), CurScope, CR_OpenMP,
3966 Params, /*OpenMPCaptureLevel=*/0);
3967 // Mark this captured region as inlined, because we don't use outlined
3968 // function directly.
3969 getCurCapturedRegion()->TheCapturedDecl->addAttr(
3970 AlwaysInlineAttr::CreateImplicit(
3971 Context, {}, AttributeCommonInfo::AS_Keyword,
3972 AlwaysInlineAttr::Keyword_forceinline));
3973 ActOnCapturedRegionStart(DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->getConstructLoc(), CurScope, CR_OpenMP,
3974 std::make_pair(StringRef(), QualType()),
3975 /*OpenMPCaptureLevel=*/1);
3976 break;
3977 }
3978 case OMPD_atomic:
3979 case OMPD_critical:
3980 case OMPD_section:
3981 case OMPD_master:
3982 case OMPD_masked:
3983 case OMPD_tile:
3984 case OMPD_unroll:
3985 break;
3986 case OMPD_simd:
3987 case OMPD_for:
3988 case OMPD_for_simd:
3989 case OMPD_sections:
3990 case OMPD_single:
3991 case OMPD_taskgroup:
3992 case OMPD_distribute:
3993 case OMPD_distribute_simd:
3994 case OMPD_ordered:
3995 case OMPD_target_data:
3996 case OMPD_dispatch: {
3997 Sema::CapturedParamNameType Params[] = {
3998 std::make_pair(StringRef(), QualType()) // __context with shared vars
3999 };
4000 ActOnCapturedRegionStart(DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->getConstructLoc(), CurScope, CR_OpenMP,
4001 Params);
4002 break;
4003 }
4004 case OMPD_task: {
4005 QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
4006 QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
4007 QualType KmpInt32PtrTy =
4008 Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
4009 QualType Args[] = {VoidPtrTy};
4010 FunctionProtoType::ExtProtoInfo EPI;
4011 EPI.Variadic = true;
4012 QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
4013 Sema::CapturedParamNameType Params[] = {
4014 std::make_pair(".global_tid.", KmpInt32Ty),
4015 std::make_pair(".part_id.", KmpInt32PtrTy),
4016 std::make_pair(".privates.", VoidPtrTy),
4017 std::make_pair(
4018 ".copy_fn.",
4019 Context.getPointerType(CopyFnType).withConst().withRestrict()),
4020 std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
4021 std::make_pair(StringRef(), QualType()) // __context with shared vars
4022 };
4023 ActOnCapturedRegionStart(DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->getConstructLoc(), CurScope, CR_OpenMP,
4024 Params);
4025 // Mark this captured region as inlined, because we don't use outlined
4026 // function directly.
4027 getCurCapturedRegion()->TheCapturedDecl->addAttr(
4028 AlwaysInlineAttr::CreateImplicit(
4029 Context, {}, AttributeCommonInfo::AS_Keyword,
4030 AlwaysInlineAttr::Keyword_forceinline));
4031 break;
4032 }
4033 case OMPD_taskloop:
4034 case OMPD_taskloop_simd:
4035 case OMPD_master_taskloop:
4036 case OMPD_master_taskloop_simd: {
4037 QualType KmpInt32Ty =
4038 Context.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1)
4039 .withConst();
4040 QualType KmpUInt64Ty =
4041 Context.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0)
4042 .withConst();
4043 QualType KmpInt64Ty =
4044 Context.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1)
4045 .withConst();
4046 QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
4047 QualType KmpInt32PtrTy =
4048 Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
4049 QualType Args[] = {VoidPtrTy};
4050 FunctionProtoType::ExtProtoInfo EPI;
4051 EPI.Variadic = true;
4052 QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
4053 Sema::CapturedParamNameType Params[] = {
4054 std::make_pair(".global_tid.", KmpInt32Ty),
4055 std::make_pair(".part_id.", KmpInt32PtrTy),
4056 std::make_pair(".privates.", VoidPtrTy),
4057 std::make_pair(
4058 ".copy_fn.",
4059 Context.getPointerType(CopyFnType).withConst().withRestrict()),
4060 std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
4061 std::make_pair(".lb.", KmpUInt64Ty),
4062 std::make_pair(".ub.", KmpUInt64Ty),
4063 std::make_pair(".st.", KmpInt64Ty),
4064 std::make_pair(".liter.", KmpInt32Ty),
4065 std::make_pair(".reductions.", VoidPtrTy),
4066 std::make_pair(StringRef(), QualType()) // __context with shared vars
4067 };
4068 ActOnCapturedRegionStart(DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->getConstructLoc(), CurScope, CR_OpenMP,
4069 Params);
4070 // Mark this captured region as inlined, because we don't use outlined
4071 // function directly.
4072 getCurCapturedRegion()->TheCapturedDecl->addAttr(
4073 AlwaysInlineAttr::CreateImplicit(
4074 Context, {}, AttributeCommonInfo::AS_Keyword,
4075 AlwaysInlineAttr::Keyword_forceinline));
4076 break;
4077 }
4078 case OMPD_parallel_master_taskloop:
4079 case OMPD_parallel_master_taskloop_simd: {
4080 QualType KmpInt32Ty =
4081 Context.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1)
4082 .withConst();
4083 QualType KmpUInt64Ty =
4084 Context.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0)
4085 .withConst();
4086 QualType KmpInt64Ty =
4087 Context.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1)
4088 .withConst();
4089 QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
4090 QualType KmpInt32PtrTy =
4091 Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
4092 Sema::CapturedParamNameType ParamsParallel[] = {
4093 std::make_pair(".global_tid.", KmpInt32PtrTy),
4094 std::make_pair(".bound_tid.", KmpInt32PtrTy),
4095 std::make_pair(StringRef(), QualType()) // __context with shared vars
4096 };
4097 // Start a captured region for 'parallel'.
4098 ActOnCapturedRegionStart(DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->getConstructLoc(), CurScope, CR_OpenMP,
4099 ParamsParallel, /*OpenMPCaptureLevel=*/0);
4100 QualType Args[] = {VoidPtrTy};
4101 FunctionProtoType::ExtProtoInfo EPI;
4102 EPI.Variadic = true;
4103 QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
4104 Sema::CapturedParamNameType Params[] = {
4105 std::make_pair(".global_tid.", KmpInt32Ty),
4106 std::make_pair(".part_id.", KmpInt32PtrTy),
4107 std::make_pair(".privates.", VoidPtrTy),
4108 std::make_pair(
4109 ".copy_fn.",
4110 Context.getPointerType(CopyFnType).withConst().withRestrict()),
4111 std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
4112 std::make_pair(".lb.", KmpUInt64Ty),
4113 std::make_pair(".ub.", KmpUInt64Ty),
4114 std::make_pair(".st.", KmpInt64Ty),
4115 std::make_pair(".liter.", KmpInt32Ty),
4116 std::make_pair(".reductions.", VoidPtrTy),
4117 std::make_pair(StringRef(), QualType()) // __context with shared vars
4118 };
4119 ActOnCapturedRegionStart(DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->getConstructLoc(), CurScope, CR_OpenMP,
4120 Params, /*OpenMPCaptureLevel=*/1);
4121 // Mark this captured region as inlined, because we don't use outlined
4122 // function directly.
4123 getCurCapturedRegion()->TheCapturedDecl->addAttr(
4124 AlwaysInlineAttr::CreateImplicit(
4125 Context, {}, AttributeCommonInfo::AS_Keyword,
4126 AlwaysInlineAttr::Keyword_forceinline));
4127 break;
4128 }
4129 case OMPD_distribute_parallel_for_simd:
4130 case OMPD_distribute_parallel_for: {
4131 QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
4132 QualType KmpInt32PtrTy =
4133 Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
4134 Sema::CapturedParamNameType Params[] = {
4135 std::make_pair(".global_tid.", KmpInt32PtrTy),
4136 std::make_pair(".bound_tid.", KmpInt32PtrTy),
4137 std::make_pair(".previous.lb.", Context.getSizeType().withConst()),
4138 std::make_pair(".previous.ub.", Context.getSizeType().withConst()),
4139 std::make_pair(StringRef(), QualType()) // __context with shared vars
4140 };
4141 ActOnCapturedRegionStart(DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->getConstructLoc(), CurScope, CR_OpenMP,
4142 Params);
4143 break;
4144 }
4145 case OMPD_target_teams_distribute_parallel_for:
4146 case OMPD_target_teams_distribute_parallel_for_simd: {
4147 QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
4148 QualType KmpInt32PtrTy =
4149 Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
4150 QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
4151
4152 QualType Args[] = {VoidPtrTy};
4153 FunctionProtoType::ExtProtoInfo EPI;
4154 EPI.Variadic = true;
4155 QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
4156 Sema::CapturedParamNameType Params[] = {
4157 std::make_pair(".global_tid.", KmpInt32Ty),
4158 std::make_pair(".part_id.", KmpInt32PtrTy),
4159 std::make_pair(".privates.", VoidPtrTy),
4160 std::make_pair(
4161 ".copy_fn.",
4162 Context.getPointerType(CopyFnType).withConst().withRestrict()),
4163 std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
4164 std::make_pair(StringRef(), QualType()) // __context with shared vars
4165 };
4166 ActOnCapturedRegionStart(DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->getConstructLoc(), CurScope, CR_OpenMP,
4167 Params, /*OpenMPCaptureLevel=*/0);
4168 // Mark this captured region as inlined, because we don't use outlined
4169 // function directly.
4170 getCurCapturedRegion()->TheCapturedDecl->addAttr(
4171 AlwaysInlineAttr::CreateImplicit(
4172 Context, {}, AttributeCommonInfo::AS_Keyword,
4173 AlwaysInlineAttr::Keyword_forceinline));
4174 Sema::CapturedParamNameType ParamsTarget[] = {
4175 std::make_pair(StringRef(), QualType()) // __context with shared vars
4176 };
4177 // Start a captured region for 'target' with no implicit parameters.
4178 ActOnCapturedRegionStart(DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->getConstructLoc(), CurScope, CR_OpenMP,
4179 ParamsTarget, /*OpenMPCaptureLevel=*/1);
4180
4181 Sema::CapturedParamNameType ParamsTeams[] = {
4182 std::make_pair(".global_tid.", KmpInt32PtrTy),
4183 std::make_pair(".bound_tid.", KmpInt32PtrTy),
4184 std::make_pair(StringRef(), QualType()) // __context with shared vars
4185 };
4186 // Start a captured region for 'target' with no implicit parameters.
4187 ActOnCapturedRegionStart(DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->getConstructLoc(), CurScope, CR_OpenMP,
4188 ParamsTeams, /*OpenMPCaptureLevel=*/2);
4189
4190 Sema::CapturedParamNameType ParamsParallel[] = {
4191 std::make_pair(".global_tid.", KmpInt32PtrTy),
4192 std::make_pair(".bound_tid.", KmpInt32PtrTy),
4193 std::make_pair(".previous.lb.", Context.getSizeType().withConst()),
4194 std::make_pair(".previous.ub.", Context.getSizeType().withConst()),
4195 std::make_pair(StringRef(), QualType()) // __context with shared vars
4196 };
4197 // Start a captured region for 'teams' or 'parallel'. Both regions have
4198 // the same implicit parameters.
4199 ActOnCapturedRegionStart(DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->getConstructLoc(), CurScope, CR_OpenMP,
4200 ParamsParallel, /*OpenMPCaptureLevel=*/3);
4201 break;
4202 }
4203
4204 case OMPD_teams_distribute_parallel_for:
4205 case OMPD_teams_distribute_parallel_for_simd: {
4206 QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
4207 QualType KmpInt32PtrTy =
4208 Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
4209
4210 Sema::CapturedParamNameType ParamsTeams[] = {
4211 std::make_pair(".global_tid.", KmpInt32PtrTy),
4212 std::make_pair(".bound_tid.", KmpInt32PtrTy),
4213 std::make_pair(StringRef(), QualType()) // __context with shared vars
4214 };
4215 // Start a captured region for 'target' with no implicit parameters.
4216 ActOnCapturedRegionStart(DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->getConstructLoc(), CurScope, CR_OpenMP,
4217 ParamsTeams, /*OpenMPCaptureLevel=*/0);
4218
4219 Sema::CapturedParamNameType ParamsParallel[] = {
4220 std::make_pair(".global_tid.", KmpInt32PtrTy),
4221 std::make_pair(".bound_tid.", KmpInt32PtrTy),
4222 std::make_pair(".previous.lb.", Context.getSizeType().withConst()),
4223 std::make_pair(".previous.ub.", Context.getSizeType().withConst()),
4224 std::make_pair(StringRef(), QualType()) // __context with shared vars
4225 };
4226 // Start a captured region for 'teams' or 'parallel'. Both regions have
4227 // the same implicit parameters.
4228 ActOnCapturedRegionStart(DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->getConstructLoc(), CurScope, CR_OpenMP,
4229 ParamsParallel, /*OpenMPCaptureLevel=*/1);
4230 break;
4231 }
4232 case OMPD_target_update:
4233 case OMPD_target_enter_data:
4234 case OMPD_target_exit_data: {
4235 QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
4236 QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
4237 QualType KmpInt32PtrTy =
4238 Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
4239 QualType Args[] = {VoidPtrTy};
4240 FunctionProtoType::ExtProtoInfo EPI;
4241 EPI.Variadic = true;
4242 QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
4243 Sema::CapturedParamNameType Params[] = {
4244 std::make_pair(".global_tid.", KmpInt32Ty),
4245 std::make_pair(".part_id.", KmpInt32PtrTy),
4246 std::make_pair(".privates.", VoidPtrTy),
4247 std::make_pair(
4248 ".copy_fn.",
4249 Context.getPointerType(CopyFnType).withConst().withRestrict()),
4250 std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
4251 std::make_pair(StringRef(), QualType()) // __context with shared vars
4252 };
4253 ActOnCapturedRegionStart(DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->getConstructLoc(), CurScope, CR_OpenMP,
4254 Params);
4255 // Mark this captured region as inlined, because we don't use outlined
4256 // function directly.
4257 getCurCapturedRegion()->TheCapturedDecl->addAttr(
4258 AlwaysInlineAttr::CreateImplicit(
4259 Context, {}, AttributeCommonInfo::AS_Keyword,
4260 AlwaysInlineAttr::Keyword_forceinline));
4261 break;
4262 }
4263 case OMPD_threadprivate:
4264 case OMPD_allocate:
4265 case OMPD_taskyield:
4266 case OMPD_barrier:
4267 case OMPD_taskwait:
4268 case OMPD_cancellation_point:
4269 case OMPD_cancel:
4270 case OMPD_flush:
4271 case OMPD_depobj:
4272 case OMPD_scan:
4273 case OMPD_declare_reduction:
4274 case OMPD_declare_mapper:
4275 case OMPD_declare_simd:
4276 case OMPD_declare_target:
4277 case OMPD_end_declare_target:
4278 case OMPD_requires:
4279 case OMPD_declare_variant:
4280 case OMPD_begin_declare_variant:
4281 case OMPD_end_declare_variant:
4282 llvm_unreachable("OpenMP Directive is not allowed")__builtin_unreachable();
4283 case OMPD_unknown:
4284 default:
4285 llvm_unreachable("Unknown OpenMP directive")__builtin_unreachable();
4286 }
4287 DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->setContext(CurContext);
4288}
4289
4290int Sema::getNumberOfConstructScopes(unsigned Level) const {
4291 return getOpenMPCaptureLevels(DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->getDirective(Level));
4292}
4293
4294int Sema::getOpenMPCaptureLevels(OpenMPDirectiveKind DKind) {
4295 SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
4296 getOpenMPCaptureRegions(CaptureRegions, DKind);
4297 return CaptureRegions.size();
4298}
4299
4300static OMPCapturedExprDecl *buildCaptureDecl(Sema &S, IdentifierInfo *Id,
4301 Expr *CaptureExpr, bool WithInit,
4302 bool AsExpression) {
4303 assert(CaptureExpr)((void)0);
4304 ASTContext &C = S.getASTContext();
4305 Expr *Init = AsExpression ? CaptureExpr : CaptureExpr->IgnoreImpCasts();
4306 QualType Ty = Init->getType();
4307 if (CaptureExpr->getObjectKind() == OK_Ordinary && CaptureExpr->isGLValue()) {
4308 if (S.getLangOpts().CPlusPlus) {
4309 Ty = C.getLValueReferenceType(Ty);
4310 } else {
4311 Ty = C.getPointerType(Ty);
4312 ExprResult Res =
4313 S.CreateBuiltinUnaryOp(CaptureExpr->getExprLoc(), UO_AddrOf, Init);
4314 if (!Res.isUsable())
4315 return nullptr;
4316 Init = Res.get();
4317 }
4318 WithInit = true;
4319 }
4320 auto *CED = OMPCapturedExprDecl::Create(C, S.CurContext, Id, Ty,
4321 CaptureExpr->getBeginLoc());
4322 if (!WithInit)
4323 CED->addAttr(OMPCaptureNoInitAttr::CreateImplicit(C));
4324 S.CurContext->addHiddenDecl(CED);
4325 Sema::TentativeAnalysisScope Trap(S);
4326 S.AddInitializerToDecl(CED, Init, /*DirectInit=*/false);
4327 return CED;
4328}
4329
4330static DeclRefExpr *buildCapture(Sema &S, ValueDecl *D, Expr *CaptureExpr,
4331 bool WithInit) {
4332 OMPCapturedExprDecl *CD;
4333 if (VarDecl *VD = S.isOpenMPCapturedDecl(D))
4334 CD = cast<OMPCapturedExprDecl>(VD);
4335 else
4336 CD = buildCaptureDecl(S, D->getIdentifier(), CaptureExpr, WithInit,
4337 /*AsExpression=*/false);
4338 return buildDeclRefExpr(S, CD, CD->getType().getNonReferenceType(),
4339 CaptureExpr->getExprLoc());
4340}
4341
4342static ExprResult buildCapture(Sema &S, Expr *CaptureExpr, DeclRefExpr *&Ref) {
4343 CaptureExpr = S.DefaultLvalueConversion(CaptureExpr).get();
4344 if (!Ref) {
4345 OMPCapturedExprDecl *CD = buildCaptureDecl(
4346 S, &S.getASTContext().Idents.get(".capture_expr."), CaptureExpr,
4347 /*WithInit=*/true, /*AsExpression=*/true);
4348 Ref = buildDeclRefExpr(S, CD, CD->getType().getNonReferenceType(),
4349 CaptureExpr->getExprLoc());
4350 }
4351 ExprResult Res = Ref;
4352 if (!S.getLangOpts().CPlusPlus &&
4353 CaptureExpr->getObjectKind() == OK_Ordinary && CaptureExpr->isGLValue() &&
4354 Ref->getType()->isPointerType()) {
4355 Res = S.CreateBuiltinUnaryOp(CaptureExpr->getExprLoc(), UO_Deref, Ref);
4356 if (!Res.isUsable())
4357 return ExprError();
4358 }
4359 return S.DefaultLvalueConversion(Res.get());
4360}
4361
4362namespace {
4363// OpenMP directives parsed in this section are represented as a
4364// CapturedStatement with an associated statement. If a syntax error
4365// is detected during the parsing of the associated statement, the
4366// compiler must abort processing and close the CapturedStatement.
4367//
4368// Combined directives such as 'target parallel' have more than one
4369// nested CapturedStatements. This RAII ensures that we unwind out
4370// of all the nested CapturedStatements when an error is found.
4371class CaptureRegionUnwinderRAII {
4372private:
4373 Sema &S;
4374 bool &ErrorFound;
4375 OpenMPDirectiveKind DKind = OMPD_unknown;
4376
4377public:
4378 CaptureRegionUnwinderRAII(Sema &S, bool &ErrorFound,
4379 OpenMPDirectiveKind DKind)
4380 : S(S), ErrorFound(ErrorFound), DKind(DKind) {}
4381 ~CaptureRegionUnwinderRAII() {
4382 if (ErrorFound) {
4383 int ThisCaptureLevel = S.getOpenMPCaptureLevels(DKind);
4384 while (--ThisCaptureLevel >= 0)
4385 S.ActOnCapturedRegionError();
4386 }
4387 }
4388};
4389} // namespace
4390
4391void Sema::tryCaptureOpenMPLambdas(ValueDecl *V) {
4392 // Capture variables captured by reference in lambdas for target-based
4393 // directives.
4394 if (!CurContext->isDependentContext() &&
4395 (isOpenMPTargetExecutionDirective(DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->getCurrentDirective()) ||
4396 isOpenMPTargetDataManagementDirective(
4397 DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->getCurrentDirective()))) {
4398 QualType Type = V->getType();
4399 if (const auto *RD = Type.getCanonicalType()
4400 .getNonReferenceType()
4401 ->getAsCXXRecordDecl()) {
4402 bool SavedForceCaptureByReferenceInTargetExecutable =
4403 DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->isForceCaptureByReferenceInTargetExecutable();
4404 DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->setForceCaptureByReferenceInTargetExecutable(
4405 /*V=*/true);
4406 if (RD->isLambda()) {
4407 llvm::DenseMap<const VarDecl *, FieldDecl *> Captures;
4408 FieldDecl *ThisCapture;
4409 RD->getCaptureFields(Captures, ThisCapture);
4410 for (const LambdaCapture &LC : RD->captures()) {
4411 if (LC.getCaptureKind() == LCK_ByRef) {
4412 VarDecl *VD = LC.getCapturedVar();
4413 DeclContext *VDC = VD->getDeclContext();
4414 if (!VDC->Encloses(CurContext))
4415 continue;
4416 MarkVariableReferenced(LC.getLocation(), VD);
4417 } else if (LC.getCaptureKind() == LCK_This) {
4418 QualType ThisTy = getCurrentThisType();
4419 if (!ThisTy.isNull() &&
4420 Context.typesAreCompatible(ThisTy, ThisCapture->getType()))
4421 CheckCXXThisCapture(LC.getLocation());
4422 }
4423 }
4424 }
4425 DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->setForceCaptureByReferenceInTargetExecutable(
4426 SavedForceCaptureByReferenceInTargetExecutable);
4427 }
4428 }
4429}
4430
4431static bool checkOrderedOrderSpecified(Sema &S,
4432 const ArrayRef<OMPClause *> Clauses) {
4433 const OMPOrderedClause *Ordered = nullptr;
4434 const OMPOrderClause *Order = nullptr;
4435
4436 for (const OMPClause *Clause : Clauses) {
4437 if (Clause->getClauseKind() == OMPC_ordered)
4438 Ordered = cast<OMPOrderedClause>(Clause);
4439 else if (Clause->getClauseKind() == OMPC_order) {
4440 Order = cast<OMPOrderClause>(Clause);
4441 if (Order->getKind() != OMPC_ORDER_concurrent)
4442 Order = nullptr;
4443 }
4444 if (Ordered && Order)
4445 break;
4446 }
4447
4448 if (Ordered && Order) {
4449 S.Diag(Order->getKindKwLoc(),
4450 diag::err_omp_simple_clause_incompatible_with_ordered)
4451 << getOpenMPClauseName(OMPC_order)
4452 << getOpenMPSimpleClauseTypeName(OMPC_order, OMPC_ORDER_concurrent)
4453 << SourceRange(Order->getBeginLoc(), Order->getEndLoc());
4454 S.Diag(Ordered->getBeginLoc(), diag::note_omp_ordered_param)
4455 << 0 << SourceRange(Ordered->getBeginLoc(), Ordered->getEndLoc());
4456 return true;
4457 }
4458 return false;
4459}
4460
4461StmtResult Sema::ActOnOpenMPRegionEnd(StmtResult S,
4462 ArrayRef<OMPClause *> Clauses) {
4463 if (DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->getCurrentDirective() == OMPD_atomic ||
4464 DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->getCurrentDirective() == OMPD_critical ||
4465 DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->getCurrentDirective() == OMPD_section ||
4466 DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->getCurrentDirective() == OMPD_master ||
4467 DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->getCurrentDirective() == OMPD_masked)
4468 return S;
4469
4470 bool ErrorFound = false;
4471 CaptureRegionUnwinderRAII CaptureRegionUnwinder(
4472 *this, ErrorFound, DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->getCurrentDirective());
4473 if (!S.isUsable()) {
4474 ErrorFound = true;
4475 return StmtError();
4476 }
4477
4478 SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
4479 getOpenMPCaptureRegions(CaptureRegions, DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->getCurrentDirective());
4480 OMPOrderedClause *OC = nullptr;
4481 OMPScheduleClause *SC = nullptr;
4482 SmallVector<const OMPLinearClause *, 4> LCs;
4483 SmallVector<const OMPClauseWithPreInit *, 4> PICs;
4484 // This is required for proper codegen.
4485 for (OMPClause *Clause : Clauses) {
4486 if (!LangOpts.OpenMPSimd &&
4487 isOpenMPTaskingDirective(DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->getCurrentDirective()) &&
4488 Clause->getClauseKind() == OMPC_in_reduction) {
4489 // Capture taskgroup task_reduction descriptors inside the tasking regions
4490 // with the corresponding in_reduction items.
4491 auto *IRC = cast<OMPInReductionClause>(Clause);
4492 for (Expr *E : IRC->taskgroup_descriptors())
4493 if (E)
4494 MarkDeclarationsReferencedInExpr(E);
4495 }
4496 if (isOpenMPPrivate(Clause->getClauseKind()) ||
4497 Clause->getClauseKind() == OMPC_copyprivate ||
4498 (getLangOpts().OpenMPUseTLS &&
4499 getASTContext().getTargetInfo().isTLSSupported() &&
4500 Clause->getClauseKind() == OMPC_copyin)) {
4501 DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->setForceVarCapturing(Clause->getClauseKind() == OMPC_copyin);
4502 // Mark all variables in private list clauses as used in inner region.
4503 for (Stmt *VarRef : Clause->children()) {
4504 if (auto *E = cast_or_null<Expr>(VarRef)) {
4505 MarkDeclarationsReferencedInExpr(E);
4506 }
4507 }
4508 DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->setForceVarCapturing(/*V=*/false);
4509 } else if (isOpenMPLoopTransformationDirective(
4510 DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->getCurrentDirective())) {
4511 assert(CaptureRegions.empty() &&((void)0)
4512 "No captured regions in loop transformation directives.")((void)0);
4513 } else if (CaptureRegions.size() > 1 ||
4514 CaptureRegions.back() != OMPD_unknown) {
4515 if (auto *C = OMPClauseWithPreInit::get(Clause))
4516 PICs.push_back(C);
4517 if (auto *C = OMPClauseWithPostUpdate::get(Clause)) {
4518 if (Expr *E = C->getPostUpdateExpr())
4519 MarkDeclarationsReferencedInExpr(E);
4520 }
4521 }
4522 if (Clause->getClauseKind() == OMPC_schedule)
4523 SC = cast<OMPScheduleClause>(Clause);
4524 else if (Clause->getClauseKind() == OMPC_ordered)
4525 OC = cast<OMPOrderedClause>(Clause);
4526 else if (Clause->getClauseKind() == OMPC_linear)
4527 LCs.push_back(cast<OMPLinearClause>(Clause));
4528 }
4529 // Capture allocator expressions if used.
4530 for (Expr *E : DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->getInnerAllocators())
4531 MarkDeclarationsReferencedInExpr(E);
4532 // OpenMP, 2.7.1 Loop Construct, Restrictions
4533 // The nonmonotonic modifier cannot be specified if an ordered clause is
4534 // specified.
4535 if (SC &&
4536 (SC->getFirstScheduleModifier() == OMPC_SCHEDULE_MODIFIER_nonmonotonic ||
4537 SC->getSecondScheduleModifier() ==
4538 OMPC_SCHEDULE_MODIFIER_nonmonotonic) &&
4539 OC) {
4540 Diag(SC->getFirstScheduleModifier() == OMPC_SCHEDULE_MODIFIER_nonmonotonic
4541 ? SC->getFirstScheduleModifierLoc()
4542 : SC->getSecondScheduleModifierLoc(),
4543 diag::err_omp_simple_clause_incompatible_with_ordered)
4544 << getOpenMPClauseName(OMPC_schedule)
4545 << getOpenMPSimpleClauseTypeName(OMPC_schedule,
4546 OMPC_SCHEDULE_MODIFIER_nonmonotonic)
4547 << SourceRange(OC->getBeginLoc(), OC->getEndLoc());
4548 ErrorFound = true;
4549 }
4550 // OpenMP 5.0, 2.9.2 Worksharing-Loop Construct, Restrictions.
4551 // If an order(concurrent) clause is present, an ordered clause may not appear
4552 // on the same directive.
4553 if (checkOrderedOrderSpecified(*this, Clauses))
4554 ErrorFound = true;
4555 if (!LCs.empty() && OC && OC->getNumForLoops()) {
4556 for (const OMPLinearClause *C : LCs) {
4557 Diag(C->getBeginLoc(), diag::err_omp_linear_ordered)
4558 << SourceRange(OC->getBeginLoc(), OC->getEndLoc());
4559 }
4560 ErrorFound = true;
4561 }
4562 if (isOpenMPWorksharingDirective(DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->getCurrentDirective()) &&
4563 isOpenMPSimdDirective(DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->getCurrentDirective()) && OC &&
4564 OC->getNumForLoops()) {
4565 Diag(OC->getBeginLoc(), diag::err_omp_ordered_simd)
4566 << getOpenMPDirectiveName(DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->getCurrentDirective());
4567 ErrorFound = true;
4568 }
4569 if (ErrorFound) {
4570 return StmtError();
4571 }
4572 StmtResult SR = S;
4573 unsigned CompletedRegions = 0;
4574 for (OpenMPDirectiveKind ThisCaptureRegion : llvm::reverse(CaptureRegions)) {
4575 // Mark all variables in private list clauses as used in inner region.
4576 // Required for proper codegen of combined directives.
4577 // TODO: add processing for other clauses.
4578 if (ThisCaptureRegion != OMPD_unknown) {
4579 for (const clang::OMPClauseWithPreInit *C : PICs) {
4580 OpenMPDirectiveKind CaptureRegion = C->getCaptureRegion();
4581 // Find the particular capture region for the clause if the
4582 // directive is a combined one with multiple capture regions.
4583 // If the directive is not a combined one, the capture region
4584 // associated with the clause is OMPD_unknown and is generated
4585 // only once.
4586 if (CaptureRegion == ThisCaptureRegion ||
4587 CaptureRegion == OMPD_unknown) {
4588 if (auto *DS = cast_or_null<DeclStmt>(C->getPreInitStmt())) {
4589 for (Decl *D : DS->decls())
4590 MarkVariableReferenced(D->getLocation(), cast<VarDecl>(D));
4591 }
4592 }
4593 }
4594 }
4595 if (ThisCaptureRegion == OMPD_target) {
4596 // Capture allocator traits in the target region. They are used implicitly
4597 // and, thus, are not captured by default.
4598 for (OMPClause *C : Clauses) {
4599 if (const auto *UAC = dyn_cast<OMPUsesAllocatorsClause>(C)) {
4600 for (unsigned I = 0, End = UAC->getNumberOfAllocators(); I < End;
4601 ++I) {
4602 OMPUsesAllocatorsClause::Data D = UAC->getAllocatorData(I);
4603 if (Expr *E = D.AllocatorTraits)
4604 MarkDeclarationsReferencedInExpr(E);
4605 }
4606 continue;
4607 }
4608 }
4609 }
4610 if (ThisCaptureRegion == OMPD_parallel) {
4611 // Capture temp arrays for inscan reductions and locals in aligned
4612 // clauses.
4613 for (OMPClause *C : Clauses) {
4614 if (auto *RC = dyn_cast<OMPReductionClause>(C)) {
4615 if (RC->getModifier() != OMPC_REDUCTION_inscan)
4616 continue;
4617 for (Expr *E : RC->copy_array_temps())
4618 MarkDeclarationsReferencedInExpr(E);
4619 }
4620 if (auto *AC = dyn_cast<OMPAlignedClause>(C)) {
4621 for (Expr *E : AC->varlists())
4622 MarkDeclarationsReferencedInExpr(E);
4623 }
4624 }
4625 }
4626 if (++CompletedRegions == CaptureRegions.size())
4627 DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
->setBodyComplete();
4628 SR = ActOnCapturedRegionEnd(SR.get());
4629 }
4630 return SR;
4631}
4632
4633static bool checkCancelRegion(Sema &SemaRef, OpenMPDirectiveKind CurrentRegion,
4634 OpenMPDirectiveKind CancelRegion,
4635 SourceLocation StartLoc) {
4636 // CancelRegion is only needed for cancel and cancellation_point.
4637 if (CurrentRegion != OMPD_cancel && CurrentRegion != OMPD_cancellation_point)
4638 return false;
4639
4640 if (CancelRegion == OMPD_parallel || CancelRegion == OMPD_for ||
4641 CancelRegion == OMPD_sections || CancelRegion == OMPD_taskgroup)
4642 return false;
4643
4644 SemaRef.Diag(StartLoc, diag::err_omp_wrong_cancel_region)
4645 << getOpenMPDirectiveName(CancelRegion);
4646 return true;
4647}
4648
4649static bool checkNestingOfRegions(Sema &SemaRef, const DSAStackTy *Stack,
4650 OpenMPDirectiveKind CurrentRegion,
4651 const DeclarationNameInfo &CurrentName,
4652 OpenMPDirectiveKind CancelRegion,
4653 SourceLocation StartLoc) {
4654 if (Stack->getCurScope()) {
4655 OpenMPDirectiveKind ParentRegion = Stack->getParentDirective();
4656 OpenMPDirectiveKind OffendingRegion = ParentRegion;
4657 bool NestingProhibited = false;
4658 bool CloseNesting = true;
4659 bool OrphanSeen = false;
4660 enum {
4661 NoRecommend,
4662 ShouldBeInParallelRegion,
4663 ShouldBeInOrderedRegion,
4664 ShouldBeInTargetRegion,
4665 ShouldBeInTeamsRegion,
4666 ShouldBeInLoopSimdRegion,
4667 } Recommend = NoRecommend;
4668 if (isOpenMPSimdDirective(ParentRegion) &&
4669 ((SemaRef.LangOpts.OpenMP <= 45 && CurrentRegion != OMPD_ordered) ||
4670 (SemaRef.LangOpts.OpenMP >= 50 && CurrentRegion != OMPD_ordered &&
4671 CurrentRegion != OMPD_simd && CurrentRegion != OMPD_atomic &&
4672 CurrentRegion != OMPD_scan))) {
4673 // OpenMP [2.16, Nesting of Regions]
4674 // OpenMP constructs may not be nested inside a simd region.
4675 // OpenMP [2.8.1,simd Construct, Restrictions]
4676 // An ordered construct with the simd clause is the only OpenMP
4677 // construct that can appear in the simd region.
4678 // Allowing a SIMD construct nested in another SIMD construct is an
4679 // extension. The OpenMP 4.5 spec does not allow it. Issue a warning
4680 // message.
4681 // OpenMP 5.0 [2.9.3.1, simd Construct, Restrictions]
4682 // The only OpenMP constructs that can be encountered during execution of
4683 // a simd region are the atomic construct, the loop construct, the simd
4684 // construct and the ordered construct with the simd clause.
4685 SemaRef.Diag(StartLoc, (CurrentRegion != OMPD_simd)
4686 ? diag::err_omp_prohibited_region_simd
4687 : diag::warn_omp_nesting_simd)
4688 << (SemaRef.LangOpts.OpenMP >= 50 ? 1 : 0);
4689 return CurrentRegion != OMPD_simd;
4690 }
4691 if (ParentRegion == OMPD_atomic) {
4692 // OpenMP [2.16, Nesting of Regions]
4693 // OpenMP constructs may not be nested inside an atomic region.
4694 SemaRef.Diag(StartLoc, diag::err_omp_prohibited_region_atomic);
4695 return true;
4696 }
4697 if (CurrentRegion == OMPD_section) {
4698 // OpenMP [2.7.2, sections Construct, Restrictions]
4699 // Orphaned section directives are prohibited. That is, the section
4700 // directives must appear within the sections construct and must not be
4701 // encountered elsewhere in the sections region.
4702 if (ParentRegion != OMPD_sections &&
4703 ParentRegion != OMPD_parallel_sections) {
4704 SemaRef.Diag(StartLoc, diag::err_omp_orphaned_section_directive)
4705 << (ParentRegion != OMPD_unknown)
4706 << getOpenMPDirectiveName(ParentRegion);
4707 return true;
4708 }
4709 return false;
4710 }
4711 // Allow some constructs (except teams and cancellation constructs) to be
4712 // orphaned (they could be used in functions, called from OpenMP regions
4713 // with the required preconditions).
4714 if (ParentRegion == OMPD_unknown &&
4715 !isOpenMPNestingTeamsDirective(CurrentRegion) &&
4716 CurrentRegion != OMPD_cancellation_point &&
4717 CurrentRegion != OMPD_cancel && CurrentRegion != OMPD_scan)
4718 return false;
4719 if (CurrentRegion == OMPD_cancellation_point ||
4720 CurrentRegion == OMPD_cancel) {
4721 // OpenMP [2.16, Nesting of Regions]
4722 // A cancellation point construct for which construct-type-clause is
4723 // taskgroup must be nested inside a task construct. A cancellation
4724 // point construct for which construct-type-clause is not taskgroup must
4725 // be closely nested inside an OpenMP construct that matches the type
4726 // specified in construct-type-clause.
4727 // A cancel construct for which construct-type-clause is taskgroup must be
4728 // nested inside a task construct. A cancel construct for which
4729 // construct-type-clause is not taskgroup must be closely nested inside an
4730 // OpenMP construct that matches the type specified in
4731 // construct-type-clause.
4732 NestingProhibited =
4733 !((CancelRegion == OMPD_parallel &&
4734 (ParentRegion == OMPD_parallel ||
4735 ParentRegion == OMPD_target_parallel)) ||
4736 (CancelRegion == OMPD_for &&
4737 (ParentRegion == OMPD_for || ParentRegion == OMPD_parallel_for ||
4738 ParentRegion == OMPD_target_parallel_for ||
4739 ParentRegion == OMPD_distribute_parallel_for ||
4740 ParentRegion == OMPD_teams_distribute_parallel_for ||
4741 ParentRegion == OMPD_target_teams_distribute_parallel_for)) ||
4742 (CancelRegion == OMPD_taskgroup &&
4743 (ParentRegion == OMPD_task ||
4744 (SemaRef.getLangOpts().OpenMP >= 50 &&
4745 (ParentRegion == OMPD_taskloop ||
4746 ParentRegion == OMPD_master_taskloop ||
4747 ParentRegion == OMPD_parallel_master_taskloop)))) ||
4748 (CancelRegion == OMPD_sections &&
4749 (ParentRegion == OMPD_section || ParentRegion == OMPD_sections ||
4750 ParentRegion == OMPD_parallel_sections)));
4751 OrphanSeen = ParentRegion == OMPD_unknown;
4752 } else if (CurrentRegion == OMPD_master || CurrentRegion == OMPD_masked) {
4753 // OpenMP 5.1 [2.22, Nesting of Regions]
4754 // A masked region may not be closely nested inside a worksharing, loop,
4755 // atomic, task, or taskloop region.
4756 NestingProhibited = isOpenMPWorksharingDirective(ParentRegion) ||
4757 isOpenMPTaskingDirective(ParentRegion);
4758 } else if (CurrentRegion == OMPD_critical && CurrentName.getName()) {
4759 // OpenMP [2.16, Nesting of Regions]
4760 // A critical region may not be nested (closely or otherwise) inside a
4761 // critical region with the same name. Note that this restriction is not
4762 // sufficient to prevent deadlock.
4763 SourceLocation PreviousCriticalLoc;
4764 bool DeadLock = Stack->hasDirective(
4765 [CurrentName, &PreviousCriticalLoc](OpenMPDirectiveKind K,
4766 const DeclarationNameInfo &DNI,
4767 SourceLocation Loc) {
4768 if (K == OMPD_critical && DNI.getName() == CurrentName.getName()) {
4769 PreviousCriticalLoc = Loc;
4770 return true;
4771 }
4772 return false;
4773 },
4774 false /* skip top directive */);
4775 if (DeadLock) {
4776 SemaRef.Diag(StartLoc,
4777 diag::err_omp_prohibited_region_critical_same_name)
4778 << CurrentName.getName();
4779 if (PreviousCriticalLoc.isValid())
4780 SemaRef.Diag(PreviousCriticalLoc,
4781 diag::note_omp_previous_critical_region);
4782 return true;
4783 }
4784 } else if (CurrentRegion == OMPD_barrier) {
4785 // OpenMP 5.1 [2.22, Nesting of Regions]
4786 // A barrier region may not be closely nested inside a worksharing, loop,
4787 // task, taskloop, critical, ordered, atomic, or masked region.
4788 NestingProhibited =
4789 isOpenMPWorksharingDirective(ParentRegion) ||
4790 isOpenMPTaskingDirective(ParentRegion) ||
4791 ParentRegion == OMPD_master || ParentRegion == OMPD_masked ||
4792 ParentRegion == OMPD_parallel_master ||
4793 ParentRegion == OMPD_critical || ParentRegion == OMPD_ordered;
4794 } else if (isOpenMPWorksharingDirective(CurrentRegion) &&
4795 !isOpenMPParallelDirective(CurrentRegion) &&
4796 !isOpenMPTeamsDirective(CurrentRegion)) {
4797 // OpenMP 5.1 [2.22, Nesting of Regions]
4798 // A loop region that binds to a parallel region or a worksharing region
4799 // may not be closely nested inside a worksharing, loop, task, taskloop,
4800 // critical, ordered, atomic, or masked region.
4801 NestingProhibited =
4802 isOpenMPWorksharingDirective(ParentRegion) ||
4803 isOpenMPTaskingDirective(ParentRegion) ||
4804 ParentRegion == OMPD_master || ParentRegion == OMPD_masked ||
4805 ParentRegion == OMPD_parallel_master ||
4806 ParentRegion == OMPD_critical || ParentRegion == OMPD_ordered;
4807 Recommend = ShouldBeInParallelRegion;
4808 } else if (CurrentRegion == OMPD_ordered) {
4809 // OpenMP [2.16, Nesting of Regions]
4810 // An ordered region may not be closely nested inside a critical,
4811 // atomic, or explicit task region.
4812 // An ordered region must be closely nested inside a loop region (or
4813 // parallel loop region) with an ordered clause.
4814 // OpenMP [2.8.1,simd Construct, Restrictions]
4815 // An ordered construct with the simd clause is the only OpenMP construct
4816 // that can appear in the simd region.
4817 NestingProhibited = ParentRegion == OMPD_critical ||
4818 isOpenMPTaskingDirective(ParentRegion) ||
4819 !(isOpenMPSimdDirective(ParentRegion) ||
4820 Stack->isParentOrderedRegion());
4821 Recommend = ShouldBeInOrderedRegion;
4822 } else if (isOpenMPNestingTeamsDirective(CurrentRegion)) {
4823 // OpenMP [2.16, Nesting of Regions]
4824 // If specified, a teams construct must be contained within a target
4825 // construct.
4826 NestingProhibited =
4827 (SemaRef.LangOpts.OpenMP <= 45 && ParentRegion != OMPD_target) ||
4828 (SemaRef.LangOpts.OpenMP >= 50 && ParentRegion != OMPD_unknown &&
4829 ParentRegion != OMPD_target);
4830 OrphanSeen = ParentRegion == OMPD_unknown;
4831 Recommend = ShouldBeInTargetRegion;
4832 } else if (CurrentRegion == OMPD_scan) {
4833 // OpenMP [2.16, Nesting of Regions]
4834 // If specified, a teams construct must be contained within a target
4835 // construct.
4836 NestingProhibited =
4837 SemaRef.LangOpts.OpenMP < 50 ||
4838 (ParentRegion != OMPD_simd && ParentRegion != OMPD_for &&
4839 ParentRegion != OMPD_for_simd && ParentRegion != OMPD_parallel_for &&
4840 ParentRegion != OMPD_parallel_for_simd);
4841 OrphanSeen = ParentRegion == OMPD_unknown;
4842 Recommend = ShouldBeInLoopSimdRegion;
4843 }
4844 if (!NestingProhibited &&
4845 !isOpenMPTargetExecutionDirective(CurrentRegion) &&
4846 !isOpenMPTargetDataManagementDirective(CurrentRegion) &&
4847 (ParentRegion == OMPD_teams || ParentRegion == OMPD_target_teams)) {
4848 // OpenMP [2.16, Nesting of Regions]
4849 // distribute, parallel, parallel sections, parallel workshare, and the
4850 // parallel loop and parallel loop SIMD constructs are the only OpenMP
4851 // constructs that can be closely nested in the teams region.
4852 NestingProhibited = !isOpenMPParallelDirective(CurrentRegion) &&
4853 !isOpenMPDistributeDirective(CurrentRegion);
4854 Recommend = ShouldBeInParallelRegion;
4855 }
4856 if (!NestingProhibited &&
4857 isOpenMPNestingDistributeDirective(CurrentRegion)) {
4858 // OpenMP 4.5 [2.17 Nesting of Regions]
4859 // The region associated with the distribute construct must be strictly
4860 // nested inside a teams region
4861 NestingProhibited =
4862 (ParentRegion != OMPD_teams && ParentRegion != OMPD_target_teams);
4863 Recommend = ShouldBeInTeamsRegion;
4864 }
4865 if (!NestingProhibited &&
4866 (isOpenMPTargetExecutionDirective(CurrentRegion) ||
4867 isOpenMPTargetDataManagementDirective(CurrentRegion))) {
4868 // OpenMP 4.5 [2.17 Nesting of Regions]
4869 // If a target, target update, target data, target enter data, or
4870 // target exit data construct is encountered during execution of a
4871 // target region, the behavior is unspecified.
4872 NestingProhibited = Stack->hasDirective(
4873 [&OffendingRegion](OpenMPDirectiveKind K, const DeclarationNameInfo &,
4874 SourceLocation) {
4875 if (isOpenMPTargetExecutionDirective(K)) {
4876 OffendingRegion = K;
4877 return true;
4878 }
4879 return false;
4880 },
4881 false /* don't skip top directive */);
4882 CloseNesting = false;
4883 }
4884 if (NestingProhibited) {
4885 if (OrphanSeen) {
4886 SemaRef.Diag(StartLoc, diag::err_omp_orphaned_device_directive)
4887 << getOpenMPDirectiveName(CurrentRegion) << Recommend;
4888 } else {
4889 SemaRef.Diag(StartLoc, diag::err_omp_prohibited_region)
4890 << CloseNesting << getOpenMPDirectiveName(OffendingRegion)
4891 << Recommend << getOpenMPDirectiveName(CurrentRegion);
4892 }
4893 return true;
4894 }
4895 }
4896 return false;
4897}
4898
4899struct Kind2Unsigned {
4900 using argument_type = OpenMPDirectiveKind;
4901 unsigned operator()(argument_type DK) { return unsigned(DK); }
4902};
4903static bool checkIfClauses(Sema &S, OpenMPDirectiveKind Kind,
4904 ArrayRef<OMPClause *> Clauses,
4905 ArrayRef<OpenMPDirectiveKind> AllowedNameModifiers) {
4906 bool ErrorFound = false;
4907 unsigned NamedModifiersNumber = 0;
4908 llvm::IndexedMap<const OMPIfClause *, Kind2Unsigned> FoundNameModifiers;
4909 FoundNameModifiers.resize(llvm::omp::Directive_enumSize + 1);
4910 SmallVector<SourceLocation, 4> NameModifierLoc;
4911 for (const OMPClause *C : Clauses) {
4912 if (const auto *IC = dyn_cast_or_null<OMPIfClause>(C)) {
4913 // At most one if clause without a directive-name-modifier can appear on
4914 // the directive.
4915 OpenMPDirectiveKind CurNM = IC->getNameModifier();
4916 if (FoundNameModifiers[CurNM]) {
4917 S.Diag(C->getBeginLoc(), diag::err_omp_more_one_clause)
4918 << getOpenMPDirectiveName(Kind) << getOpenMPClauseName(OMPC_if)
4919 << (CurNM != OMPD_unknown) << getOpenMPDirectiveName(CurNM);
4920 ErrorFound = true;
4921 } else if (CurNM != OMPD_unknown) {
4922 NameModifierLoc.push_back(IC->getNameModifierLoc());
4923 ++NamedModifiersNumber;
4924 }
4925 FoundNameModifiers[CurNM] = IC;
4926 if (CurNM == OMPD_unknown)
4927 continue;
4928 // Check if the specified name modifier is allowed for the current
4929 // directive.
4930 // At most one if clause with the particular directive-name-modifier can
4931 // appear on the directive.
4932 bool MatchFound = false;
4933 for (auto NM : AllowedNameModifiers) {
4934 if (CurNM == NM) {
4935 MatchFound = true;
4936 break;
4937 }
4938 }
4939 if (!MatchFound) {
4940 S.Diag(IC->getNameModifierLoc(),
4941 diag::err_omp_wrong_if_directive_name_modifier)
4942 << getOpenMPDirectiveName(CurNM) << getOpenMPDirectiveName(Kind);
4943 ErrorFound = true;
4944 }
4945 }
4946 }
4947 // If any if clause on the directive includes a directive-name-modifier then
4948 // all if clauses on the directive must include a directive-name-modifier.
4949 if (FoundNameModifiers[OMPD_unknown] && NamedModifiersNumber > 0) {
4950 if (NamedModifiersNumber == AllowedNameModifiers.size()) {
4951 S.Diag(FoundNameModifiers[OMPD_unknown]->getBeginLoc(),
4952 diag::err_omp_no_more_if_clause);
4953 } else {
4954 std::string Values;
4955 std::string Sep(", ");
4956 unsigned AllowedCnt = 0;
4957 unsigned TotalAllowedNum =
4958 AllowedNameModifiers.size() - NamedModifiersNumber;
4959 for (unsigned Cnt = 0, End = AllowedNameModifiers.size(); Cnt < End;
4960 ++Cnt) {
4961 OpenMPDirectiveKind NM = AllowedNameModifiers[Cnt];
4962 if (!FoundNameModifiers[NM]) {
4963 Values += "'";
4964 Values += getOpenMPDirectiveName(NM);
4965 Values += "'";
4966 if (AllowedCnt + 2 == TotalAllowedNum)
4967 Values += " or ";
4968 else if (AllowedCnt + 1 != TotalAllowedNum)
4969 Values += Sep;
4970 ++AllowedCnt;
4971 }
4972 }
4973 S.Diag(FoundNameModifiers[OMPD_unknown]->getCondition()->getBeginLoc(),
4974 diag::err_omp_unnamed_if_clause)
4975 << (TotalAllowedNum > 1) << Values;
4976 }
4977 for (SourceLocation Loc : NameModifierLoc) {
4978 S.Diag(Loc, diag::note_omp_previous_named_if_clause);
4979 }
4980 ErrorFound = true;
4981 }
4982 return ErrorFound;
4983}
4984
4985static std::pair<ValueDecl *, bool> getPrivateItem(Sema &S, Expr *&RefExpr,
4986 SourceLocation &ELoc,
4987 SourceRange &ERange,
4988 bool AllowArraySection) {
4989 if (RefExpr->isTypeDependent() || RefExpr->isValueDependent() ||
4990 RefExpr->containsUnexpandedParameterPack())
4991 return std::make_pair(nullptr, true);
4992
4993 // OpenMP [3.1, C/C++]
4994 // A list item is a variable name.
4995 // OpenMP [2.9.3.3, Restrictions, p.1]
4996 // A variable that is part of another variable (as an array or
4997 // structure element) cannot appear in a private clause.
4998 RefExpr = RefExpr->IgnoreParens();
4999 enum {
5000 NoArrayExpr = -1,
5001 ArraySubscript = 0,
5002 OMPArraySection = 1
5003 } IsArrayExpr = NoArrayExpr;
5004 if (AllowArraySection) {
5005 if (auto *ASE = dyn_cast_or_null<ArraySubscriptExpr>(RefExpr)) {
5006 Expr *Base = ASE->getBase()->IgnoreParenImpCasts();
5007 while (auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
5008 Base = TempASE->getBase()->IgnoreParenImpCasts();
5009 RefExpr = Base;
5010 IsArrayExpr = ArraySubscript;
5011 } else if (auto *OASE = dyn_cast_or_null<OMPArraySectionExpr>(RefExpr)) {
5012 Expr *Base = OASE->getBase()->IgnoreParenImpCasts();
5013 while (auto *TempOASE = dyn_cast<OMPArraySectionExpr>(Base))
5014 Base = TempOASE->getBase()->IgnoreParenImpCasts();
5015 while (auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
5016 Base = TempASE->getBase()->IgnoreParenImpCasts();
5017 RefExpr = Base;
5018 IsArrayExpr = OMPArraySection;
5019 }
5020 }
5021 ELoc = RefExpr->getExprLoc();
5022 ERange = RefExpr->getSourceRange();
5023 RefExpr = RefExpr->IgnoreParenImpCasts();
5024 auto *DE = dyn_cast_or_null<DeclRefExpr>(RefExpr);
5025 auto *ME = dyn_cast_or_null<MemberExpr>(RefExpr);
5026 if ((!DE || !isa<VarDecl>(DE->getDecl())) &&
5027 (S.getCurrentThisType().isNull() || !ME ||
5028 !isa<CXXThisExpr>(ME->getBase()->IgnoreParenImpCasts()) ||
5029 !isa<FieldDecl>(ME->getMemberDecl()))) {
5030 if (IsArrayExpr != NoArrayExpr) {
5031 S.Diag(ELoc, diag::err_omp_expected_base_var_name) << IsArrayExpr
5032 << ERange;
5033 } else {
5034 S.Diag(ELoc,
5035 AllowArraySection
5036 ? diag::err_omp_expected_var_name_member_expr_or_array_item
5037 : diag::err_omp_expected_var_name_member_expr)
5038 << (S.getCurrentThisType().isNull() ? 0 : 1) << ERange;
5039 }
5040 return std::make_pair(nullptr, false);
5041 }
5042 return std::make_pair(
5043 getCanonicalDecl(DE ? DE->getDecl() : ME->getMemberDecl()), false);
5044}
5045
5046namespace {
5047/// Checks if the allocator is used in uses_allocators clause to be allowed in
5048/// target regions.
5049class AllocatorChecker final : public ConstStmtVisitor<AllocatorChecker, bool> {
5050 DSAStackTy *S = nullptr;
5051
5052public:
5053 bool VisitDeclRefExpr(const DeclRefExpr *E) {
5054 return S->isUsesAllocatorsDecl(E->getDecl())
5055 .getValueOr(
5056 DSAStackTy::UsesAllocatorsDeclKind::AllocatorTrait) ==
5057 DSAStackTy::UsesAllocatorsDeclKind::AllocatorTrait;
5058 }
5059 bool VisitStmt(const Stmt *S) {
5060 for (const Stmt *Child : S->children()) {
5061 if (Child && Visit(Child))
5062 return true;
5063 }
5064 return false;
5065 }
5066 explicit AllocatorChecker(DSAStackTy *S) : S(S) {}
5067};
5068} // namespace
5069
5070static void checkAllocateClauses(Sema &S, DSAStackTy *Stack,
5071 ArrayRef<OMPClause *> Clauses) {
5072 assert(!S.CurContext->isDependentContext() &&((void)0)
5073 "Expected non-dependent context.")((void)0);
5074 auto AllocateRange =
5075 llvm::make_filter_range(Clauses, OMPAllocateClause::classof);
5076 llvm::DenseMap<CanonicalDeclPtr<Decl>, CanonicalDeclPtr<VarDecl>>
5077 DeclToCopy;
5078 auto PrivateRange = llvm::make_filter_range(Clauses, [](const OMPClause *C) {
5079 return isOpenMPPrivate(C->getClauseKind());
5080 });
5081 for (OMPClause *Cl : PrivateRange) {
5082 MutableArrayRef<Expr *>::iterator I, It, Et;
5083 if (Cl->getClauseKind() == OMPC_private) {
5084 auto *PC = cast<OMPPrivateClause>(Cl);
5085 I = PC->private_copies().begin();
5086 It = PC->varlist_begin();
5087 Et = PC->varlist_end();
5088 } else if (Cl->getClauseKind() == OMPC_firstprivate) {
5089 auto *PC = cast<OMPFirstprivateClause>(Cl);
5090 I = PC->private_copies().begin();
5091 It = PC->varlist_begin();
5092 Et = PC->varlist_end();
5093 } else if (Cl->getClauseKind() == OMPC_lastprivate) {
5094 auto *PC = cast<OMPLastprivateClause>(Cl);
5095 I = PC->private_copies().begin();
5096 It = PC->varlist_begin();
5097 Et = PC->varlist_end();
5098 } else if (Cl->getClauseKind() == OMPC_linear) {
5099 auto *PC = cast<OMPLinearClause>(Cl);
5100 I = PC->privates().begin();
5101 It = PC->varlist_begin();
5102 Et = PC->varlist_end();
5103 } else if (Cl->getClauseKind() == OMPC_reduction) {
5104 auto *PC = cast<OMPReductionClause>(Cl);
5105 I = PC->privates().begin();
5106 It = PC->varlist_begin();
5107 Et = PC->varlist_end();
5108 } else if (Cl->getClauseKind() == OMPC_task_reduction) {
5109 auto *PC = cast<OMPTaskReductionClause>(Cl);
5110 I = PC->privates().begin();
5111 It = PC->varlist_begin();
5112 Et = PC->varlist_end();
5113 } else if (Cl->getClauseKind() == OMPC_in_reduction) {
5114 auto *PC = cast<OMPInReductionClause>(Cl);
5115 I = PC->privates().begin();
5116 It = PC->varlist_begin();
5117 Et = PC->varlist_end();
5118 } else {
5119 llvm_unreachable("Expected private clause.")__builtin_unreachable();
5120 }
5121 for (Expr *E : llvm::make_range(It, Et)) {
5122 if (!*I) {
5123 ++I;
5124 continue;
5125 }
5126 SourceLocation ELoc;
5127 SourceRange ERange;
5128 Expr *SimpleRefExpr = E;
5129 auto Res = getPrivateItem(S, SimpleRefExpr, ELoc, ERange,
5130 /*AllowArraySection=*/true);
5131 DeclToCopy.try_emplace(Res.first,
5132 cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()));
5133 ++I;
5134 }
5135 }
5136 for (OMPClause *C : AllocateRange) {
5137 auto *AC = cast<OMPAllocateClause>(C);
5138 if (S.getLangOpts().OpenMP >= 50 &&
5139 !Stack->hasRequiresDeclWithClause<OMPDynamicAllocatorsClause>() &&
5140 isOpenMPTargetExecutionDirective(Stack->getCurrentDirective()) &&
5141 AC->getAllocator()) {
5142 Expr *Allocator = AC->getAllocator();
5143 // OpenMP, 2.12.5 target Construct
5144 // Memory allocators that do not appear in a uses_allocators clause cannot
5145 // appear as an allocator in an allocate clause or be used in the target
5146 // region unless a requires directive with the dynamic_allocators clause
5147 // is present in the same compilation unit.
5148 AllocatorChecker Checker(Stack);
5149 if (Checker.Visit(Allocator))
5150 S.Diag(Allocator->getExprLoc(),
5151 diag::err_omp_allocator_not_in_uses_allocators)
5152 << Allocator->getSourceRange();
5153 }
5154 OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind =
5155 getAllocatorKind(S, Stack, AC->getAllocator());
5156 // OpenMP, 2.11.4 allocate Clause, Restrictions.
5157 // For task, taskloop or target directives, allocation requests to memory
5158 // allocators with the trait access set to thread result in unspecified
5159 // behavior.
5160 if (AllocatorKind == OMPAllocateDeclAttr::OMPThreadMemAlloc &&
5161 (isOpenMPTaskingDirective(Stack->getCurrentDirective()) ||
5162 isOpenMPTargetExecutionDirective(Stack->getCurrentDirective()))) {
5163 S.Diag(AC->getAllocator()->getExprLoc(),
5164 diag::warn_omp_allocate_thread_on_task_target_directive)
5165 << getOpenMPDirectiveName(Stack->getCurrentDirective());
5166 }
5167 for (Expr *E : AC->varlists()) {
5168 SourceLocation ELoc;
5169 SourceRange ERange;
5170 Expr *SimpleRefExpr = E;
5171 auto Res = getPrivateItem(S, SimpleRefExpr, ELoc, ERange);
5172 ValueDecl *VD = Res.first;
5173 DSAStackTy::DSAVarData Data = Stack->getTopDSA(VD, /*FromParent=*/false);
5174 if (!isOpenMPPrivate(Data.CKind)) {
5175 S.Diag(E->getExprLoc(),
5176 diag::err_omp_expected_private_copy_for_allocate);
5177 continue;
5178 }
5179 VarDecl *PrivateVD = DeclToCopy[VD];
5180 if (checkPreviousOMPAllocateAttribute(S, Stack, E, PrivateVD,
5181 AllocatorKind, AC->getAllocator()))
5182 continue;
5183 applyOMPAllocateAttribute(S, PrivateVD, AllocatorKind, AC->getAllocator(),
5184 E->getSourceRange());
5185 }
5186 }
5187}
5188
5189namespace {
5190/// Rewrite statements and expressions for Sema \p Actions CurContext.
5191///
5192/// Used to wrap already parsed statements/expressions into a new CapturedStmt
5193/// context. DeclRefExpr used inside the new context are changed to refer to the
5194/// captured variable instead.
5195class CaptureVars : public TreeTransform<CaptureVars> {
5196 using BaseTransform = TreeTransform<CaptureVars>;
5197
5198public:
5199 CaptureVars(Sema &Actions) : BaseTransform(Actions) {}
5200
5201 bool AlwaysRebuild() { return true; }
5202};
5203} // namespace
5204
5205static VarDecl *precomputeExpr(Sema &Actions,
5206 SmallVectorImpl<Stmt *> &BodyStmts, Expr *E,
5207 StringRef Name) {
5208 Expr *NewE = AssertSuccess(CaptureVars(Actions).TransformExpr(E));
5209 VarDecl *NewVar = buildVarDecl(Actions, {}, NewE->getType(), Name, nullptr,
5210 dyn_cast<DeclRefExpr>(E->IgnoreImplicit()));
5211 auto *NewDeclStmt = cast<DeclStmt>(AssertSuccess(
5212 Actions.ActOnDeclStmt(Actions.ConvertDeclToDeclGroup(NewVar), {}, {})));
5213 Actions.AddInitializerToDecl(NewDeclStmt->getSingleDecl(), NewE, false);
5214 BodyStmts.push_back(NewDeclStmt);
5215 return NewVar;
5216}
5217
5218/// Create a closure that computes the number of iterations of a loop.
5219///
5220/// \param Actions The Sema object.
5221/// \param LogicalTy Type for the logical iteration number.
5222/// \param Rel Comparison operator of the loop condition.
5223/// \param StartExpr Value of the loop counter at the first iteration.
5224/// \param StopExpr Expression the loop counter is compared against in the loop
5225/// condition. \param StepExpr Amount of increment after each iteration.
5226///
5227/// \return Closure (CapturedStmt) of the distance calculation.
5228static CapturedStmt *buildDistanceFunc(Sema &Actions, QualType LogicalTy,
5229 BinaryOperator::Opcode Rel,
5230 Expr *StartExpr, Expr *StopExpr,
5231 Expr *StepExpr) {
5232 ASTContext &Ctx = Actions.getASTContext();
5233 TypeSourceInfo *LogicalTSI = Ctx.getTrivialTypeSourceInfo(LogicalTy);
5234
5235 // Captured regions currently don't support return values, we use an
5236 // out-parameter instead. All inputs are implicit captures.
5237 // TODO: Instead of capturing each DeclRefExpr occurring in
5238 // StartExpr/StopExpr/Step, these could also be passed as a value capture.
5239 QualType ResultTy = Ctx.getLValueReferenceType(LogicalTy);
5240 Sema::CapturedParamNameType Params[] = {{"Distance", ResultTy},
5241 {StringRef(), QualType()}};
5242 Actions.ActOnCapturedRegionStart({}, nullptr, CR_Default, Params);
5243
5244 Stmt *Body;
5245 {
5246 Sema::CompoundScopeRAII CompoundScope(Actions);
5247 CapturedDecl *CS = cast<CapturedDecl>(Actions.CurContext);
5248
5249 // Get the LValue expression for the result.
5250 ImplicitParamDecl *DistParam = CS->getParam(0);
5251 DeclRefExpr *DistRef = Actions.BuildDeclRefExpr(
5252 DistParam, LogicalTy, VK_LValue, {}, nullptr, nullptr, {}, nullptr);
5253
5254 SmallVector<Stmt *, 4> BodyStmts;
5255
5256 // Capture all referenced variable references.
5257 // TODO: Instead of computing NewStart/NewStop/NewStep inside the
5258 // CapturedStmt, we could compute them before and capture the result, to be
5259 // used jointly with the LoopVar function.
5260 VarDecl *NewStart = precomputeExpr(Actions, BodyStmts, StartExpr, ".start");
5261 VarDecl *NewStop = precomputeExpr(Actions, BodyStmts, StopExpr, ".stop");
5262 VarDecl *NewStep = precomputeExpr(Actions, BodyStmts, StepExpr, ".step");
5263 auto BuildVarRef = [&](VarDecl *VD) {
5264 return buildDeclRefExpr(Actions, VD, VD->getType(), {});
5265 };
5266
5267 IntegerLiteral *Zero = IntegerLiteral::Create(
5268 Ctx, llvm::APInt(Ctx.getIntWidth(LogicalTy), 0), LogicalTy, {});
5269 Expr *Dist;
5270 if (Rel == BO_NE) {
5271 // When using a != comparison, the increment can be +1 or -1. This can be
5272 // dynamic at runtime, so we need to check for the direction.
5273 Expr *IsNegStep = AssertSuccess(
5274 Actions.BuildBinOp(nullptr, {}, BO_LT, BuildVarRef(NewStep), Zero));
5275
5276 // Positive increment.
5277 Expr *ForwardRange = AssertSuccess(Actions.BuildBinOp(
5278 nullptr, {}, BO_Sub, BuildVarRef(NewStop), BuildVarRef(NewStart)));
5279 ForwardRange = AssertSuccess(
5280 Actions.BuildCStyleCastExpr({}, LogicalTSI, {}, ForwardRange));
5281 Expr *ForwardDist = AssertSuccess(Actions.BuildBinOp(
5282 nullptr, {}, BO_Div, ForwardRange, BuildVarRef(NewStep)));
5283
5284 // Negative increment.
5285 Expr *BackwardRange = AssertSuccess(Actions.BuildBinOp(
5286 nullptr, {}, BO_Sub, BuildVarRef(NewStart), BuildVarRef(NewStop)));
5287 BackwardRange = AssertSuccess(
5288 Actions.BuildCStyleCastExpr({}, LogicalTSI, {}, BackwardRange));
5289 Expr *NegIncAmount = AssertSuccess(
5290 Actions.BuildUnaryOp(nullptr, {}, UO_Minus, BuildVarRef(NewStep)));
5291 Expr *BackwardDist = AssertSuccess(
5292 Actions.BuildBinOp(nullptr, {}, BO_Div, BackwardRange, NegIncAmount));
5293
5294 // Use the appropriate case.
5295 Dist = AssertSuccess(Actions.ActOnConditionalOp(
5296 {}, {}, IsNegStep, BackwardDist, ForwardDist));
5297 } else {
5298 assert((Rel == BO_LT || Rel == BO_LE || Rel == BO_GE || Rel == BO_GT) &&((void)0)
5299 "Expected one of these relational operators")((void)0);
5300
5301 // We can derive the direction from any other comparison operator. It is
5302 // non well-formed OpenMP if Step increments/decrements in the other
5303 // directions. Whether at least the first iteration passes the loop
5304 // condition.
5305 Expr *HasAnyIteration = AssertSuccess(Actions.BuildBinOp(
5306 nullptr, {}, Rel, BuildVarRef(NewStart), BuildVarRef(NewStop)));
5307
5308 // Compute the range between first and last counter value.
5309 Expr *Range;
5310 if (Rel == BO_GE || Rel == BO_GT)
5311 Range = AssertSuccess(Actions.BuildBinOp(
5312 nullptr, {}, BO_Sub, BuildVarRef(NewStart), BuildVarRef(NewStop)));
5313 else
5314 Range = AssertSuccess(Actions.BuildBinOp(
5315 nullptr, {}, BO_Sub, BuildVarRef(NewStop), BuildVarRef(NewStart)));
5316
5317 // Ensure unsigned range space.
5318 Range =
5319 AssertSuccess(Actions.BuildCStyleCastExpr({}, LogicalTSI, {}, Range));
5320
5321 if (Rel == BO_LE || Rel == BO_GE) {
5322 // Add one to the range if the relational operator is inclusive.
5323 Range =
5324 AssertSuccess(Actions.BuildUnaryOp(nullptr, {}, UO_PreInc, Range));
5325 }
5326
5327 // Divide by the absolute step amount.
5328 Expr *Divisor = BuildVarRef(NewStep);
5329 if (Rel == BO_GE || Rel == BO_GT)
5330 Divisor =
5331 AssertSuccess(Actions.BuildUnaryOp(nullptr, {}, UO_Minus, Divisor));
5332 Dist = AssertSuccess(
5333 Actions.BuildBinOp(nullptr, {}, BO_Div, Range, Divisor));
5334
5335 // If there is not at least one iteration, the range contains garbage. Fix
5336 // to zero in this case.
5337 Dist = AssertSuccess(
5338 Actions.ActOnConditionalOp({}, {}, HasAnyIteration, Dist, Zero));
5339 }
5340
5341 // Assign the result to the out-parameter.
5342 Stmt *ResultAssign = AssertSuccess(Actions.BuildBinOp(
5343 Actions.getCurScope(), {}, BO_Assign, DistRef, Dist));
5344 BodyStmts.push_back(ResultAssign);
5345
5346 Body = AssertSuccess(Actions.ActOnCompoundStmt({}, {}, BodyStmts, false));
5347 }
5348
5349 return cast<CapturedStmt>(
5350 AssertSuccess(Actions.ActOnCapturedRegionEnd(Body)));
5351}
5352
5353/// Create a closure that computes the loop variable from the logical iteration
5354/// number.
5355///
5356/// \param Actions The Sema object.
5357/// \param LoopVarTy Type for the loop variable used for result value.
5358/// \param LogicalTy Type for the logical iteration number.
5359/// \param StartExpr Value of the loop counter at the first iteration.
5360/// \param Step Amount of increment after each iteration.
5361/// \param Deref Whether the loop variable is a dereference of the loop
5362/// counter variable.
5363///
5364/// \return Closure (CapturedStmt) of the loop value calculation.
5365static CapturedStmt *buildLoopVarFunc(Sema &Actions, QualType LoopVarTy,
5366 QualType LogicalTy,
5367 DeclRefExpr *StartExpr, Expr *Step,
5368 bool Deref) {
5369 ASTContext &Ctx = Actions.getASTContext();
5370
5371 // Pass the result as an out-parameter. Passing as return value would require
5372 // the OpenMPIRBuilder to know additional C/C++ semantics, such as how to
5373 // invoke a copy constructor.
5374 QualType TargetParamTy = Ctx.getLValueReferenceType(LoopVarTy);
5375 Sema::CapturedParamNameType Params[] = {{"LoopVar", TargetParamTy},
5376 {"Logical", LogicalTy},
5377 {StringRef(), QualType()}};
5378 Actions.ActOnCapturedRegionStart({}, nullptr, CR_Default, Params);
5379
5380 // Capture the initial iterator which represents the LoopVar value at the
5381 // zero's logical iteration. Since the original ForStmt/CXXForRangeStmt update
5382 // it in every iteration, capture it by value before it is modified.
5383 VarDecl *StartVar = cast<VarDecl>(StartExpr->getDecl());
5384 bool Invalid = Actions.tryCaptureVariable(StartVar, {},
5385 Sema::TryCapture_ExplicitByVal, {});
5386 (void)Invalid;
5387 assert(!Invalid && "Expecting capture-by-value to work.")((void)0);
5388
5389 Expr *Body;
5390 {
5391 Sema::CompoundScopeRAII CompoundScope(Actions);
5392 auto *CS = cast<CapturedDecl>(Actions.CurContext);
5393
5394 ImplicitParamDecl *TargetParam = CS->getParam(0);
5395 DeclRefExpr *TargetRef = Actions.BuildDeclRefExpr(
5396 TargetParam, LoopVarTy, VK_LValue, {}, nullptr, nullptr, {}, nullptr);
5397 ImplicitParamDecl *IndvarParam = CS->getParam(1);
5398 DeclRefExpr *LogicalRef = Actions.BuildDeclRefExpr(
5399 IndvarParam, LogicalTy, VK_LValue, {}, nullptr, nullptr, {}, nullptr);
5400
5401 // Capture the Start expression.
5402 CaptureVars Recap(Actions);
5403 Expr *NewStart = AssertSuccess(Recap.TransformExpr(StartExpr));
5404 Expr *NewStep = AssertSuccess(Recap.TransformExpr(Step));
5405
5406 Expr *Skip = AssertSuccess(
5407 Actions.BuildBinOp(nullptr, {}, BO_Mul, NewStep, LogicalRef));
5408 // TODO: Explicitly cast to the iterator's difference_type instead of
5409 // relying on implicit conversion.
5410 Expr *Advanced =
5411 AssertSuccess(Actions.BuildBinOp(nullptr, {}, BO_Add, NewStart, Skip));
5412
5413 if (Deref) {
5414 // For range-based for-loops convert the loop counter value to a concrete
5415 // loop variable value by dereferencing the iterator.
5416 Advanced =
5417 AssertSuccess(Actions.BuildUnaryOp(nullptr, {}, UO_Deref, Advanced));
5418 }
5419
5420 // Assign the result to the output parameter.
5421 Body = AssertSuccess(Actions.BuildBinOp(Actions.getCurScope(), {},
5422 BO_Assign, TargetRef, Advanced));
5423 }
5424 return cast<CapturedStmt>(
5425 AssertSuccess(Actions.ActOnCapturedRegionEnd(Body)));
5426}
5427
5428StmtResult Sema::ActOnOpenMPCanonicalLoop(Stmt *AStmt) {
5429 ASTContext &Ctx = getASTContext();
5430
5431 // Extract the common elements of ForStmt and CXXForRangeStmt:
5432 // Loop variable, repeat condition, increment
5433 Expr *Cond, *Inc;
5434 VarDecl *LIVDecl, *LUVDecl;
5435 if (auto *For = dyn_cast<ForStmt>(AStmt)) {
5436 Stmt *Init = For->getInit();
5437 if (auto *LCVarDeclStmt = dyn_cast<DeclStmt>(Init)) {
5438 // For statement declares loop variable.
5439 LIVDecl = cast<VarDecl>(LCVarDeclStmt->getSingleDecl());
5440 } else if (auto *LCAssign = dyn_cast<BinaryOperator>(Init)) {
5441 // For statement reuses variable.
5442 assert(LCAssign->getOpcode() == BO_Assign &&((void)0)
5443 "init part must be a loop variable assignment")((void)0);
5444 auto *CounterRef = cast<DeclRefExpr>(LCAssign->getLHS());
5445 LIVDecl = cast<VarDecl>(CounterRef->getDecl());
5446 } else
5447 llvm_unreachable("Cannot determine loop variable")__builtin_unreachable();
5448 LUVDecl = LIVDecl;
5449
5450 Cond = For->getCond();
5451 Inc = For->getInc();
5452 } else if (auto *RangeFor = dyn_cast<CXXForRangeStmt>(AStmt)) {
5453 DeclStmt *BeginStmt = RangeFor->getBeginStmt();
5454 LIVDecl = cast<VarDecl>(BeginStmt->getSingleDecl());
5455 LUVDecl = RangeFor->getLoopVariable();
5456
5457 Cond = RangeFor->getCond();
5458 Inc = RangeFor->getInc();
5459 } else
5460 llvm_unreachable("unhandled kind of loop")__builtin_unreachable();
5461
5462 QualType CounterTy = LIVDecl->getType();
5463 QualType LVTy = LUVDecl->getType();
5464
5465 // Analyze the loop condition.
5466 Expr *LHS, *RHS;
5467 BinaryOperator::Opcode CondRel;
5468 Cond = Cond->IgnoreImplicit();
5469 if (auto *CondBinExpr = dyn_cast<BinaryOperator>(Cond)) {
5470 LHS = CondBinExpr->getLHS();
5471 RHS = CondBinExpr->getRHS();
5472 CondRel = CondBinExpr->getOpcode();
5473 } else if (auto *CondCXXOp = dyn_cast<CXXOperatorCallExpr>(Cond)) {
5474 assert(CondCXXOp->getNumArgs() == 2 && "Comparison should have 2 operands")((void)0);
5475 LHS = CondCXXOp->getArg(0);
5476 RHS = CondCXXOp->getArg(1);
5477 switch (CondCXXOp->getOperator()) {
5478 case OO_ExclaimEqual:
5479 CondRel = BO_NE;
5480 break;
5481 case OO_Less:
5482 CondRel = BO_LT;
5483 break;
5484 case OO_LessEqual:
5485 CondRel = BO_LE;
5486 break;
5487 case OO_Greater:
5488 CondRel = BO_GT;
5489 break;
5490 case OO_GreaterEqual:
5491 CondRel = BO_GE;
5492 break;
5493 default:
5494 llvm_unreachable("unexpected iterator operator")__builtin_unreachable();
5495 }
5496 } else
5497 llvm_unreachable("unexpected loop condition")__builtin_unreachable();
5498
5499 // Normalize such that the loop counter is on the LHS.
5500 if (!isa<DeclRefExpr>(LHS->IgnoreImplicit()) ||
5501 cast<DeclRefExpr>(LHS->IgnoreImplicit())->getDecl() != LIVDecl) {
5502 std::swap(LHS, RHS);
5503 CondRel = BinaryOperator::reverseComparisonOp(CondRel);
5504 }
5505 auto *CounterRef = cast<DeclRefExpr>(LHS->IgnoreImplicit());
5506
5507 // Decide the bit width for the logical iteration counter. By default use the
5508 // unsigned ptrdiff_t integer size (for iterators and pointers).
5509 // TODO: For iterators, use iterator::difference_type,
5510 // std::iterator_traits<>::difference_type or decltype(it - end).
5511 QualType LogicalTy = Ctx.getUnsignedPointerDiffType();
5512 if (CounterTy->isIntegerType()) {
5513 unsigned BitWidth = Ctx.getIntWidth(CounterTy);
5514 LogicalTy = Ctx.getIntTypeForBitwidth(BitWidth, false);
5515 }
5516
5517 // Analyze the loop increment.
5518 Expr *Step;
5519 if (auto *IncUn = dyn_cast<UnaryOperator>(Inc)) {
5520 int Direction;
5521 switch (IncUn->getOpcode()) {
5522 case UO_PreInc:
5523 case UO_PostInc:
5524 Direction = 1;
5525 break;
5526 case UO_PreDec:
5527 case UO_PostDec:
5528 Direction = -1;
5529 break;
5530 default:
5531 llvm_unreachable("unhandled unary increment operator")__builtin_unreachable();
5532 }
5533 Step = IntegerLiteral::Create(
5534 Ctx, llvm::APInt(Ctx.getIntWidth(LogicalTy), Direction), LogicalTy, {});
5535 } else if (auto *IncBin = dyn_cast<BinaryOperator>(Inc)) {
5536 if (IncBin->getOpcode() == BO_AddAssign) {
5537 Step = IncBin->getRHS();
5538 } else if (IncBin->getOpcode() == BO_SubAssign) {
5539 Step =
5540 AssertSuccess(BuildUnaryOp(nullptr, {}, UO_Minus, IncBin->getRHS()));
5541 } else
5542 llvm_unreachable("unhandled binary increment operator")__builtin_unreachable();
5543 } else if (auto *CondCXXOp = dyn_cast<CXXOperatorCallExpr>(Inc)) {
5544 switch (CondCXXOp->getOperator()) {
5545 case OO_PlusPlus:
5546 Step = IntegerLiteral::Create(
5547 Ctx, llvm::APInt(Ctx.getIntWidth(LogicalTy), 1), LogicalTy, {});
5548 break;
5549 case OO_MinusMinus:
5550 Step = IntegerLiteral::Create(
5551 Ctx, llvm::APInt(Ctx.getIntWidth(LogicalTy), -1), LogicalTy, {});
5552 break;
5553 case OO_PlusEqual:
5554 Step = CondCXXOp->getArg(1);
5555 break;
5556 case OO_MinusEqual:
5557 Step = AssertSuccess(
5558 BuildUnaryOp(nullptr, {}, UO_Minus, CondCXXOp->getArg(1)));
5559 break;
5560 default:
5561 llvm_unreachable("unhandled overloaded increment operator")__builtin_unreachable();
5562 }
5563 } else
5564 llvm_unreachable("unknown increment expression")__builtin_unreachable();
5565
5566 CapturedStmt *DistanceFunc =
5567 buildDistanceFunc(*this, LogicalTy, CondRel, LHS, RHS, Step);
5568 CapturedStmt *LoopVarFunc = buildLoopVarFunc(
5569 *this, LVTy, LogicalTy, CounterRef, Step, isa<CXXForRangeStmt>(AStmt));
5570 DeclRefExpr *LVRef = BuildDeclRefExpr(LUVDecl, LUVDecl->getType(), VK_LValue,
5571 {}, nullptr, nullptr, {}, nullptr);
5572 return OMPCanonicalLoop::create(getASTContext(), AStmt, DistanceFunc,
5573 LoopVarFunc, LVRef);
5574}
5575
5576static ExprResult buildUserDefinedMapperRef(Sema &SemaRef, Scope *S,
5577 CXXScopeSpec &MapperIdScopeSpec,
5578 const DeclarationNameInfo &MapperId,
5579 QualType Type,
5580 Expr *UnresolvedMapper);
5581
5582/// Perform DFS through the structure/class data members trying to find
5583/// member(s) with user-defined 'default' mapper and generate implicit map
5584/// clauses for such members with the found 'default' mapper.
5585static void
5586processImplicitMapsWithDefaultMappers(Sema &S, DSAStackTy *Stack,
5587 SmallVectorImpl<OMPClause *> &Clauses) {
5588 // Check for the deault mapper for data members.
5589 if (S.getLangOpts().OpenMP < 50)
5590 return;
5591 SmallVector<OMPClause *, 4> ImplicitMaps;
5592 for (int Cnt = 0, EndCnt = Clauses.size(); Cnt < EndCnt; ++Cnt) {
5593 auto *C = dyn_cast<OMPMapClause>(Clauses[Cnt]);
5594 if (!C)
5595 continue;
5596 SmallVector<Expr *, 4> SubExprs;
5597 auto *MI = C->mapperlist_begin();
5598 for (auto I = C->varlist_begin(), End = C->varlist_end(); I != End;
5599 ++I, ++MI) {
5600 // Expression is mapped using mapper - skip it.
5601 if (*MI)
5602 continue;
5603 Expr *E = *I;
5604 // Expression is dependent - skip it, build the mapper when it gets
5605 // instantiated.
5606 if (E->isTypeDependent() || E->isValueDependent() ||
5607 E->containsUnexpandedParameterPack())
5608 continue;
5609 // Array section - need to check for the mapping of the array section
5610 // element.
5611 QualType CanonType = E->getType().getCanonicalType();
5612 if (CanonType->isSpecificBuiltinType(BuiltinType::OMPArraySection)) {
5613 const auto *OASE = cast<OMPArraySectionExpr>(E->IgnoreParenImpCasts());
5614 QualType BaseType =
5615 OMPArraySectionExpr::getBaseOriginalType(OASE->getBase());
5616 QualType ElemType;
5617 if (const auto *ATy = BaseType->getAsArrayTypeUnsafe())
5618 ElemType = ATy->getElementType();
5619 else
5620 ElemType = BaseType->getPointeeType();
5621 CanonType = ElemType;
5622 }
5623
5624 // DFS over data members in structures/classes.
5625 SmallVector<std::pair<QualType, FieldDecl *>, 4> Types(
5626 1, {CanonType, nullptr});
5627 llvm::DenseMap<const Type *, Expr *> Visited;
5628 SmallVector<std::pair<FieldDecl *, unsigned>, 4> ParentChain(
5629 1, {nullptr, 1});
5630 while (!Types.empty()) {
5631 QualType BaseType;
5632 FieldDecl *CurFD;
5633 std::tie(BaseType, CurFD) = Types.pop_back_val();
5634 while (ParentChain.back().second == 0)
5635 ParentChain.pop_back();
5636 --ParentChain.back().second;
5637 if (BaseType.isNull())
5638 continue;
5639 // Only structs/classes are allowed to have mappers.
5640 const RecordDecl *RD = BaseType.getCanonicalType()->getAsRecordDecl();
5641 if (!RD)
5642 continue;
5643 auto It = Visited.find(BaseType.getTypePtr());
5644 if (It == Visited.end()) {
5645 // Try to find the associated user-defined mapper.
5646 CXXScopeSpec MapperIdScopeSpec;
5647 DeclarationNameInfo DefaultMapperId;
5648 DefaultMapperId.setName(S.Context.DeclarationNames.getIdentifier(
5649 &S.Context.Idents.get("default")));
5650 DefaultMapperId.setLoc(E->getExprLoc());
5651 ExprResult ER = buildUserDefinedMapperRef(
5652 S, Stack->getCurScope(), MapperIdScopeSpec, DefaultMapperId,
5653 BaseType, /*UnresolvedMapper=*/nullptr);
5654 if (ER.isInvalid())
5655 continue;
5656 It = Visited.try_emplace(BaseType.getTypePtr(), ER.get()).first;
5657 }
5658 // Found default mapper.
5659 if (It->second) {
5660 auto *OE = new (S.Context) OpaqueValueExpr(E->getExprLoc(), CanonType,
5661 VK_LValue, OK_Ordinary, E);
5662 OE->setIsUnique(/*V=*/true);
5663 Expr *BaseExpr = OE;
5664 for (const auto &P : ParentChain) {
5665 if (P.first) {
5666 BaseExpr = S.BuildMemberExpr(
5667 BaseExpr, /*IsArrow=*/false, E->getExprLoc(),
5668 NestedNameSpecifierLoc(), SourceLocation(), P.first,
5669 DeclAccessPair::make(P.first, P.first->getAccess()),
5670 /*HadMultipleCandidates=*/false, DeclarationNameInfo(),
5671 P.first->getType(), VK_LValue, OK_Ordinary);
5672 BaseExpr = S.DefaultLvalueConversion(BaseExpr).get();
5673 }
5674 }
5675 if (CurFD)
5676 BaseExpr = S.BuildMemberExpr(
5677 BaseExpr, /*IsArrow=*/false, E->getExprLoc(),
5678 NestedNameSpecifierLoc(), SourceLocation(), CurFD,
5679 DeclAccessPair::make(CurFD, CurFD->getAccess()),
5680 /*HadMultipleCandidates=*/false, DeclarationNameInfo(),
5681 CurFD->getType(), VK_LValue, OK_Ordinary);
5682 SubExprs.push_back(BaseExpr);
5683 continue;
5684 }
5685 // Check for the "default" mapper for data memebers.
5686 bool FirstIter = true;
5687 for (FieldDecl *FD : RD->fields()) {
5688 if (!FD)
5689 continue;
5690 QualType FieldTy = FD->getType();
5691 if (FieldTy.isNull() ||
5692 !(FieldTy->isStructureOrClassType() || FieldTy->isUnionType()))
5693 continue;
5694 if (FirstIter) {
5695 FirstIter = false;
5696 ParentChain.emplace_back(CurFD, 1);
5697 } else {
5698 ++ParentChain.back().second;
5699 }
5700 Types.emplace_back(FieldTy, FD);
5701 }
5702 }
5703 }
5704 if (SubExprs.empty())
5705 continue;
5706 CXXScopeSpec MapperIdScopeSpec;
5707 DeclarationNameInfo MapperId;
5708 if (OMPClause *NewClause = S.ActOnOpenMPMapClause(
5709 C->getMapTypeModifiers(), C->getMapTypeModifiersLoc(),
5710 MapperIdScopeSpec, MapperId, C->getMapType(),
5711 /*IsMapTypeImplicit=*/true, SourceLocation(), SourceLocation(),
5712 SubExprs, OMPVarListLocTy()))
5713 Clauses.push_back(NewClause);
5714 }
5715}
5716
5717StmtResult Sema::ActOnOpenMPExecutableDirective(
5718 OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName,
5719 OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
5720 Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc) {
5721 StmtResult Res = StmtError();
5722 // First check CancelRegion which is then used in checkNestingOfRegions.
5723 if (checkCancelRegion(*this, Kind, CancelRegion, StartLoc) ||
5724 checkNestingOfRegions(*this, DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
, Kind, DirName, CancelRegion,
5725 StartLoc))
5726 return StmtError();
5727
5728 llvm::SmallVector<OMPClause *, 8> ClausesWithImplicit;
5729 VarsWithInheritedDSAType VarsWithInheritedDSA;
5730 bool ErrorFound = false;
5731 ClausesWithImplicit.append(Clauses.begin(), Clauses.end());
5732 if (AStmt && !CurContext->isDependentContext() && Kind != OMPD_atomic &&
5733 Kind != OMPD_critical && Kind != OMPD_section && Kind != OMPD_master &&
5734 Kind != OMPD_masked && !isOpenMPLoopTransformationDirective(Kind)) {
5735 assert(isa<CapturedStmt>(AStmt) && "Captured statement expected")((void)0);
5736
5737 // Check default data sharing attributes for referenced variables.
5738 DSAAttrChecker DSAChecker(DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
, *this, cast<CapturedStmt>(AStmt));
5739 int ThisCaptureLevel = getOpenMPCaptureLevels(Kind);
5740 Stmt *S = AStmt;
5741 while (--ThisCaptureLevel >= 0)
5742 S = cast<CapturedStmt>(S)->getCapturedStmt();
5743 DSAChecker.Visit(S);
5744 if (!isOpenMPTargetDataManagementDirective(Kind) &&
5745 !isOpenMPTaskingDirective(Kind)) {
5746 // Visit subcaptures to generate implicit clauses for captured vars.
5747 auto *CS = cast<CapturedStmt>(AStmt);
5748 SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
5749 getOpenMPCaptureRegions(CaptureRegions, Kind);
5750 // Ignore outer tasking regions for target directives.
5751 if (CaptureRegions.size() > 1 && CaptureRegions.front() == OMPD_task)
5752 CS = cast<CapturedStmt>(CS->getCapturedStmt());
5753 DSAChecker.visitSubCaptures(CS);
5754 }
5755 if (DSAChecker.isErrorFound())
5756 return StmtError();
5757 // Generate list of implicitly defined firstprivate variables.
5758 VarsWithInheritedDSA = DSAChecker.getVarsWithInheritedDSA();
5759
5760 SmallVector<Expr *, 4> ImplicitFirstprivates(
5761 DSAChecker.getImplicitFirstprivate().begin(),
5762 DSAChecker.getImplicitFirstprivate().end());
5763 const unsigned DefaultmapKindNum = OMPC_DEFAULTMAP_pointer + 1;
5764 SmallVector<Expr *, 4> ImplicitMaps[DefaultmapKindNum][OMPC_MAP_delete];
5765 SmallVector<OpenMPMapModifierKind, NumberOfOMPMapClauseModifiers>
5766 ImplicitMapModifiers[DefaultmapKindNum];
5767 SmallVector<SourceLocation, NumberOfOMPMapClauseModifiers>
5768 ImplicitMapModifiersLoc[DefaultmapKindNum];
5769 // Get the original location of present modifier from Defaultmap clause.
5770 SourceLocation PresentModifierLocs[DefaultmapKindNum];
5771 for (OMPClause *C : Clauses) {
5772 if (auto *DMC = dyn_cast<OMPDefaultmapClause>(C))
5773 if (DMC->getDefaultmapModifier() == OMPC_DEFAULTMAP_MODIFIER_present)
5774 PresentModifierLocs[DMC->getDefaultmapKind()] =
5775 DMC->getDefaultmapModifierLoc();
5776 }
5777 for (unsigned VC = 0; VC < DefaultmapKindNum; ++VC) {
5778 auto Kind = static_cast<OpenMPDefaultmapClauseKind>(VC);
5779 for (unsigned I = 0; I < OMPC_MAP_delete; ++I) {
5780 ArrayRef<Expr *> ImplicitMap = DSAChecker.getImplicitMap(
5781 Kind, static_cast<OpenMPMapClauseKind>(I));
5782 ImplicitMaps[VC][I].append(ImplicitMap.begin(), ImplicitMap.end());
5783 }
5784 ArrayRef<OpenMPMapModifierKind> ImplicitModifier =
5785 DSAChecker.getImplicitMapModifier(Kind);
5786 ImplicitMapModifiers[VC].append(ImplicitModifier.begin(),
5787 ImplicitModifier.end());
5788 std::fill_n(std::back_inserter(ImplicitMapModifiersLoc[VC]),
5789 ImplicitModifier.size(), PresentModifierLocs[VC]);
5790 }
5791 // Mark taskgroup task_reduction descriptors as implicitly firstprivate.
5792 for (OMPClause *C : Clauses) {
5793 if (auto *IRC = dyn_cast<OMPInReductionClause>(C)) {
5794 for (Expr *E : IRC->taskgroup_descriptors())
5795 if (E)
5796 ImplicitFirstprivates.emplace_back(E);
5797 }
5798 // OpenMP 5.0, 2.10.1 task Construct
5799 // [detach clause]... The event-handle will be considered as if it was
5800 // specified on a firstprivate clause.
5801 if (auto *DC = dyn_cast<OMPDetachClause>(C))
5802 ImplicitFirstprivates.push_back(DC->getEventHandler());
5803 }
5804 if (!ImplicitFirstprivates.empty()) {
5805 if (OMPClause *Implicit = ActOnOpenMPFirstprivateClause(
5806 ImplicitFirstprivates, SourceLocation(), SourceLocation(),
5807 SourceLocation())) {
5808 ClausesWithImplicit.push_back(Implicit);
5809 ErrorFound = cast<OMPFirstprivateClause>(Implicit)->varlist_size() !=
5810 ImplicitFirstprivates.size();
5811 } else {
5812 ErrorFound = true;
5813 }
5814 }
5815 for (unsigned I = 0, E = DefaultmapKindNum; I < E; ++I) {
5816 int ClauseKindCnt = -1;
5817 for (ArrayRef<Expr *> ImplicitMap : ImplicitMaps[I]) {
5818 ++ClauseKindCnt;
5819 if (ImplicitMap.empty())
5820 continue;
5821 CXXScopeSpec MapperIdScopeSpec;
5822 DeclarationNameInfo MapperId;
5823 auto Kind = static_cast<OpenMPMapClauseKind>(ClauseKindCnt);
5824 if (OMPClause *Implicit = ActOnOpenMPMapClause(
5825 ImplicitMapModifiers[I], ImplicitMapModifiersLoc[I],
5826 MapperIdScopeSpec, MapperId, Kind, /*IsMapTypeImplicit=*/true,
5827 SourceLocation(), SourceLocation(), ImplicitMap,
5828 OMPVarListLocTy())) {
5829 ClausesWithImplicit.emplace_back(Implicit);
5830 ErrorFound |= cast<OMPMapClause>(Implicit)->varlist_size() !=
5831 ImplicitMap.size();
5832 } else {
5833 ErrorFound = true;
5834 }
5835 }
5836 }
5837 // Build expressions for implicit maps of data members with 'default'
5838 // mappers.
5839 if (LangOpts.OpenMP >= 50)
5840 processImplicitMapsWithDefaultMappers(*this, DSAStackstatic_cast<DSAStackTy *>(VarDataSharingAttributesStack
)
,
5841 ClausesWithImplicit);
5842 }
5843
5844 llvm::SmallVector<OpenMPDirectiveKind, 4> AllowedNameModifiers;
5845 switch (Kind) {
5846 case OMPD_parallel:
5847 Res = ActOnOpenMPParallelDirective(ClausesWithImplicit, AStmt, StartLoc,
5848 EndLoc);
5849 AllowedNameModifiers.push_back(OMPD_parallel);
5850 break;
5851 case OMPD_simd:
5852 Res = ActOnOpenMPSimdDirective(ClausesWithImplicit, AStmt, StartLoc, EndLoc,
5853 VarsWithInheritedDSA);
5854 if (LangOpts.OpenMP >= 50)
5855 AllowedNameModifiers.push_back(OMPD_simd);
5856 break;
5857 case OMPD_tile:
5858 Res =
5859 ActOnOpenMPTileDirective(ClausesWithImplicit, AStmt, StartLoc, EndLoc);
5860 break;
5861 case OMPD_unroll:
5862 Res = ActOnOpenMPUnrollDirective(ClausesWithImplicit, AStmt, StartLoc,
5863 EndLoc);
5864 break;
5865 case OMPD_for:
5866 Res = ActOnOpenMPForDirective(ClausesWithImplicit, AStmt, StartLoc, EndLoc,
5867 VarsWithInheritedDSA);
5868 break;
5869 case OMPD_for_simd:
5870 Res = ActOnOpenMPForSimdDirective(ClausesWithImplicit, AStmt, StartLoc,
5871 EndLoc, VarsWithInheritedDSA);
5872 if (LangOpts.OpenMP >= 50)
5873 AllowedNameModifiers.push_back(OMPD_simd);
5874 break;
5875 case OMPD_sections:
5876 Res = ActOnOpenMPSectionsDirective(ClausesWithImplicit, AStmt, StartLoc,
5877 EndLoc);
5878 break;
5879 case OMPD_section:
5880 assert(ClausesWithImplicit.empty() &&((void)0)
5881 "No clauses are allowed for 'omp section' directive")((void)0);
5882 Res = ActOnOpenMPSectionDirective(AStmt, StartLoc, EndLoc);
5883 break;
5884 case OMPD_single:
5885 Res = ActOnOpenMPSingleDirective(ClausesWithImplicit, AStmt, StartLoc,
5886 EndLoc);
5887 break;
5888 case OMPD_master:
5889 assert(ClausesWithImplicit.empty() &&((void)0)
5890 "No clauses are allowed for 'omp master' directive")((void)0);
5891 Res = ActOnOpenMPMasterDirective(AStmt, StartLoc, EndLoc);
5892 break;
5893 case OMPD_masked:
5894 Res = ActOnOpenMPMaskedDirective(ClausesWithImplicit, AStmt, StartLoc,
5895 EndLoc);
5896 break;
5897 case OMPD_critical:
5898 Res = ActOnOpenMPCriticalDirective(DirName, ClausesWithImplicit, AStmt,
5899 StartLoc, EndLoc);
5900 break;
5901 case OMPD_parallel_for:
5902 Res = ActOnOpenMPParallelForDirective(ClausesWithImplicit, AStmt, StartLoc,
5903 EndLoc, VarsWithInheritedDSA);
5904 AllowedNameModifiers.push_back(OMPD_parallel);
5905 break;
5906 case OMPD_parallel_for_simd:
5907 Res = ActOnOpenMPParallelForSimdDirective(
5908 ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
5909 AllowedNameModifiers.push_back(OMPD_parallel);
5910 if (LangOpts.OpenMP >= 50)
5911 AllowedNameModifiers.push_back(OMPD_simd);
5912 break;
5913 case OMPD_parallel_master:
5914 Res = ActOnOpenMPParallelMasterDirective(ClausesWithImplicit, AStmt,
5915 StartLoc, EndLoc);
5916 AllowedNameModifiers.push_back(OMPD_parallel);
5917 break;
5918 case OMPD_parallel_sections:
5919 Res = ActOnOpenMPParallelSectionsDirective(ClausesWithImplicit, AStmt,
5920 StartLoc, EndLoc);
5921 AllowedNameModifiers.push_back(OMPD_parallel);
5922 break;
5923 case OMPD_task:
5924 Res =
5925 ActOnOpenMPTaskDirective(ClausesWithImplicit, AStmt, StartLoc, EndLoc);
5926 AllowedNameModifiers.push_back(OMPD_task);
5927 break;
5928 case OMPD_taskyield:
5929 assert(ClausesWithImplicit.empty() &&((void)0)
5930 "No clauses are allowed for 'omp taskyield' directive")((void)0);
5931 assert(AStmt == nullptr &&((void)0)
5932 "No associated statement allowed for 'omp taskyield' directive")((void)0);
5933 Res = ActOnOpenMPTaskyieldDirective(StartLoc, EndLoc);
5934 break;
5935 case OMPD_barrier:
5936 assert(ClausesWithImplicit.empty() &&((void)0)
5937 "No clauses are allowed for 'omp barrier' directive")((void)0);
5938 assert(AStmt == nullptr &&((void)0)
5939 "No associated statement allowed for 'omp barrier' directive")((void)0);
5940 Res = ActOnOpenMPBarrierDirective(StartLoc, EndLoc);
5941 break;
5942 case OMPD_taskwait:
5943 assert(ClausesWithImplicit.empty() &&((void)0)
5944 "No clauses are allowed for 'omp taskwait' directive")((void)0);
5945 assert(AStmt == nullptr &&((void)0)
5946 "No associated statement allowed for 'omp taskwait' directive")((void)0);
5947 Res = ActOnOpenMPTaskwaitDirective(StartLoc, EndLoc);
5948 break;
5949 case OMPD_taskgroup:
5950 Res = ActOnOpenMPTaskgroupDirective(ClausesWithImplicit, AStmt, StartLoc,
5951 EndLoc);
5952 break;
5953 case OMPD_flush:
5954 assert(AStmt == nullptr &&((void)0)
5955 "No associated statement allowed for 'omp flush' directive")((void)0);
5956 Res = ActOnOpenMPFlushDirective(ClausesWithImplicit, StartLoc, EndLoc);
5957