clang 20.0.0git
SemaOpenMP.cpp
Go to the documentation of this file.
1//===--- SemaOpenMP.cpp - Semantic Analysis for OpenMP constructs ---------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements semantic analysis for OpenMP directives and
10/// clauses.
11///
12//===----------------------------------------------------------------------===//
13
15
16#include "TreeTransform.h"
20#include "clang/AST/Decl.h"
21#include "clang/AST/DeclCXX.h"
25#include "clang/AST/StmtCXX.h"
35#include "clang/Sema/Lookup.h"
37#include "clang/Sema/Scope.h"
39#include "clang/Sema/Sema.h"
41#include "llvm/ADT/IndexedMap.h"
42#include "llvm/ADT/PointerEmbeddedInt.h"
43#include "llvm/ADT/STLExtras.h"
44#include "llvm/ADT/Sequence.h"
45#include "llvm/ADT/SmallSet.h"
46#include "llvm/ADT/StringExtras.h"
47#include "llvm/Frontend/OpenMP/OMPAssume.h"
48#include "llvm/Frontend/OpenMP/OMPConstants.h"
49#include "llvm/IR/Assumptions.h"
50#include <optional>
51#include <set>
52
53using namespace clang;
54using namespace llvm::omp;
55
56//===----------------------------------------------------------------------===//
57// Stack of data-sharing attributes for variables
58//===----------------------------------------------------------------------===//
59
61 Sema &SemaRef, Expr *E,
63 OpenMPClauseKind CKind, OpenMPDirectiveKind DKind, bool NoDiagnose);
64
65namespace {
66/// Default data sharing attributes, which can be applied to directive.
67enum DefaultDataSharingAttributes {
68 DSA_unspecified = 0, /// Data sharing attribute not specified.
69 DSA_none = 1 << 0, /// Default data sharing attribute 'none'.
70 DSA_shared = 1 << 1, /// Default data sharing attribute 'shared'.
71 DSA_private = 1 << 2, /// Default data sharing attribute 'private'.
72 DSA_firstprivate = 1 << 3, /// Default data sharing attribute 'firstprivate'.
73};
74
75/// Stack for tracking declarations used in OpenMP directives and
76/// clauses and their data-sharing attributes.
77class DSAStackTy {
78public:
79 struct DSAVarData {
80 OpenMPDirectiveKind DKind = OMPD_unknown;
81 OpenMPClauseKind CKind = OMPC_unknown;
82 unsigned Modifier = 0;
83 const Expr *RefExpr = nullptr;
84 DeclRefExpr *PrivateCopy = nullptr;
85 SourceLocation ImplicitDSALoc;
86 bool AppliedToPointee = false;
87 DSAVarData() = default;
88 DSAVarData(OpenMPDirectiveKind DKind, OpenMPClauseKind CKind,
89 const Expr *RefExpr, DeclRefExpr *PrivateCopy,
90 SourceLocation ImplicitDSALoc, unsigned Modifier,
91 bool AppliedToPointee)
92 : DKind(DKind), CKind(CKind), Modifier(Modifier), RefExpr(RefExpr),
93 PrivateCopy(PrivateCopy), ImplicitDSALoc(ImplicitDSALoc),
94 AppliedToPointee(AppliedToPointee) {}
95 };
96 using OperatorOffsetTy =
98 using DoacrossClauseMapTy = llvm::DenseMap<OMPClause *, OperatorOffsetTy>;
99 /// Kind of the declaration used in the uses_allocators clauses.
100 enum class UsesAllocatorsDeclKind {
101 /// Predefined allocator
102 PredefinedAllocator,
103 /// User-defined allocator
104 UserDefinedAllocator,
105 /// The declaration that represent allocator trait
106 AllocatorTrait,
107 };
108
109private:
110 struct DSAInfo {
111 OpenMPClauseKind Attributes = OMPC_unknown;
112 unsigned Modifier = 0;
113 /// Pointer to a reference expression and a flag which shows that the
114 /// variable is marked as lastprivate(true) or not (false).
115 llvm::PointerIntPair<const Expr *, 1, bool> RefExpr;
116 DeclRefExpr *PrivateCopy = nullptr;
117 /// true if the attribute is applied to the pointee, not the variable
118 /// itself.
119 bool AppliedToPointee = false;
120 };
121 using DeclSAMapTy = llvm::SmallDenseMap<const ValueDecl *, DSAInfo, 8>;
122 using UsedRefMapTy = llvm::SmallDenseMap<const ValueDecl *, const Expr *, 8>;
123 using LCDeclInfo = std::pair<unsigned, VarDecl *>;
124 using LoopControlVariablesMapTy =
125 llvm::SmallDenseMap<const ValueDecl *, LCDeclInfo, 8>;
126 /// Struct that associates a component with the clause kind where they are
127 /// found.
128 struct MappedExprComponentTy {
130 OpenMPClauseKind Kind = OMPC_unknown;
131 };
132 using MappedExprComponentsTy =
133 llvm::DenseMap<const ValueDecl *, MappedExprComponentTy>;
134 using CriticalsWithHintsTy =
135 llvm::StringMap<std::pair<const OMPCriticalDirective *, llvm::APSInt>>;
136 struct ReductionData {
137 using BOKPtrType = llvm::PointerEmbeddedInt<BinaryOperatorKind, 16>;
138 SourceRange ReductionRange;
139 llvm::PointerUnion<const Expr *, BOKPtrType> ReductionOp;
140 ReductionData() = default;
141 void set(BinaryOperatorKind BO, SourceRange RR) {
142 ReductionRange = RR;
143 ReductionOp = BO;
144 }
145 void set(const Expr *RefExpr, SourceRange RR) {
146 ReductionRange = RR;
147 ReductionOp = RefExpr;
148 }
149 };
150 using DeclReductionMapTy =
151 llvm::SmallDenseMap<const ValueDecl *, ReductionData, 4>;
152 struct DefaultmapInfo {
153 OpenMPDefaultmapClauseModifier ImplicitBehavior =
155 SourceLocation SLoc;
156 DefaultmapInfo() = default;
158 : ImplicitBehavior(M), SLoc(Loc) {}
159 };
160
161 struct SharingMapTy {
162 DeclSAMapTy SharingMap;
163 DeclReductionMapTy ReductionMap;
164 UsedRefMapTy AlignedMap;
165 UsedRefMapTy NontemporalMap;
166 MappedExprComponentsTy MappedExprComponents;
167 LoopControlVariablesMapTy LCVMap;
168 DefaultDataSharingAttributes DefaultAttr = DSA_unspecified;
169 SourceLocation DefaultAttrLoc;
170 DefaultmapInfo DefaultmapMap[OMPC_DEFAULTMAP_unknown + 1];
171 OpenMPDirectiveKind Directive = OMPD_unknown;
172 DeclarationNameInfo DirectiveName;
173 Scope *CurScope = nullptr;
174 DeclContext *Context = nullptr;
175 SourceLocation ConstructLoc;
176 /// Set of 'depend' clauses with 'sink|source' dependence kind. Required to
177 /// get the data (loop counters etc.) about enclosing loop-based construct.
178 /// This data is required during codegen.
179 DoacrossClauseMapTy DoacrossDepends;
180 /// First argument (Expr *) contains optional argument of the
181 /// 'ordered' clause, the second one is true if the regions has 'ordered'
182 /// clause, false otherwise.
183 std::optional<std::pair<const Expr *, OMPOrderedClause *>> OrderedRegion;
184 bool RegionHasOrderConcurrent = false;
185 unsigned AssociatedLoops = 1;
186 bool HasMutipleLoops = false;
187 const Decl *PossiblyLoopCounter = nullptr;
188 bool NowaitRegion = false;
189 bool UntiedRegion = false;
190 bool CancelRegion = false;
191 bool LoopStart = false;
192 bool BodyComplete = false;
193 SourceLocation PrevScanLocation;
194 SourceLocation PrevOrderedLocation;
195 SourceLocation InnerTeamsRegionLoc;
196 /// Reference to the taskgroup task_reduction reference expression.
197 Expr *TaskgroupReductionRef = nullptr;
198 llvm::DenseSet<QualType> MappedClassesQualTypes;
199 SmallVector<Expr *, 4> InnerUsedAllocators;
200 llvm::DenseSet<CanonicalDeclPtr<Decl>> ImplicitTaskFirstprivates;
201 /// List of globals marked as declare target link in this target region
202 /// (isOpenMPTargetExecutionDirective(Directive) == true).
203 llvm::SmallVector<DeclRefExpr *, 4> DeclareTargetLinkVarDecls;
204 /// List of decls used in inclusive/exclusive clauses of the scan directive.
205 llvm::DenseSet<CanonicalDeclPtr<Decl>> UsedInScanDirective;
206 llvm::DenseMap<CanonicalDeclPtr<const Decl>, UsesAllocatorsDeclKind>
207 UsesAllocatorsDecls;
208 /// Data is required on creating capture fields for implicit
209 /// default first|private clause.
210 struct ImplicitDefaultFDInfoTy {
211 /// Field decl.
212 const FieldDecl *FD = nullptr;
213 /// Nesting stack level
214 size_t StackLevel = 0;
215 /// Capture variable decl.
216 VarDecl *VD = nullptr;
217 ImplicitDefaultFDInfoTy(const FieldDecl *FD, size_t StackLevel,
218 VarDecl *VD)
219 : FD(FD), StackLevel(StackLevel), VD(VD) {}
220 };
221 /// List of captured fields
223 ImplicitDefaultFirstprivateFDs;
224 Expr *DeclareMapperVar = nullptr;
225 SmallVector<VarDecl *, 16> IteratorVarDecls;
226 SharingMapTy(OpenMPDirectiveKind DKind, DeclarationNameInfo Name,
227 Scope *CurScope, SourceLocation Loc)
228 : Directive(DKind), DirectiveName(Name), CurScope(CurScope),
229 ConstructLoc(Loc) {}
230 SharingMapTy() = default;
231 };
232
233 using StackTy = SmallVector<SharingMapTy, 4>;
234
235 /// Stack of used declaration and their data-sharing attributes.
236 DeclSAMapTy Threadprivates;
237 const FunctionScopeInfo *CurrentNonCapturingFunctionScope = nullptr;
239 /// true, if check for DSA must be from parent directive, false, if
240 /// from current directive.
241 OpenMPClauseKind ClauseKindMode = OMPC_unknown;
242 Sema &SemaRef;
243 bool ForceCapturing = false;
244 /// true if all the variables in the target executable directives must be
245 /// captured by reference.
246 bool ForceCaptureByReferenceInTargetExecutable = false;
247 CriticalsWithHintsTy Criticals;
248 unsigned IgnoredStackElements = 0;
249
250 /// Iterators over the stack iterate in order from innermost to outermost
251 /// directive.
252 using const_iterator = StackTy::const_reverse_iterator;
253 const_iterator begin() const {
254 return Stack.empty() ? const_iterator()
255 : Stack.back().first.rbegin() + IgnoredStackElements;
256 }
257 const_iterator end() const {
258 return Stack.empty() ? const_iterator() : Stack.back().first.rend();
259 }
260 using iterator = StackTy::reverse_iterator;
261 iterator begin() {
262 return Stack.empty() ? iterator()
263 : Stack.back().first.rbegin() + IgnoredStackElements;
264 }
265 iterator end() {
266 return Stack.empty() ? iterator() : Stack.back().first.rend();
267 }
268
269 // Convenience operations to get at the elements of the stack.
270
271 bool isStackEmpty() const {
272 return Stack.empty() ||
273 Stack.back().second != CurrentNonCapturingFunctionScope ||
274 Stack.back().first.size() <= IgnoredStackElements;
275 }
276 size_t getStackSize() const {
277 return isStackEmpty() ? 0
278 : Stack.back().first.size() - IgnoredStackElements;
279 }
280
281 SharingMapTy *getTopOfStackOrNull() {
282 size_t Size = getStackSize();
283 if (Size == 0)
284 return nullptr;
285 return &Stack.back().first[Size - 1];
286 }
287 const SharingMapTy *getTopOfStackOrNull() const {
288 return const_cast<DSAStackTy &>(*this).getTopOfStackOrNull();
289 }
290 SharingMapTy &getTopOfStack() {
291 assert(!isStackEmpty() && "no current directive");
292 return *getTopOfStackOrNull();
293 }
294 const SharingMapTy &getTopOfStack() const {
295 return const_cast<DSAStackTy &>(*this).getTopOfStack();
296 }
297
298 SharingMapTy *getSecondOnStackOrNull() {
299 size_t Size = getStackSize();
300 if (Size <= 1)
301 return nullptr;
302 return &Stack.back().first[Size - 2];
303 }
304 const SharingMapTy *getSecondOnStackOrNull() const {
305 return const_cast<DSAStackTy &>(*this).getSecondOnStackOrNull();
306 }
307
308 /// Get the stack element at a certain level (previously returned by
309 /// \c getNestingLevel).
310 ///
311 /// Note that nesting levels count from outermost to innermost, and this is
312 /// the reverse of our iteration order where new inner levels are pushed at
313 /// the front of the stack.
314 SharingMapTy &getStackElemAtLevel(unsigned Level) {
315 assert(Level < getStackSize() && "no such stack element");
316 return Stack.back().first[Level];
317 }
318 const SharingMapTy &getStackElemAtLevel(unsigned Level) const {
319 return const_cast<DSAStackTy &>(*this).getStackElemAtLevel(Level);
320 }
321
322 DSAVarData getDSA(const_iterator &Iter, ValueDecl *D) const;
323
324 /// Checks if the variable is a local for OpenMP region.
325 bool isOpenMPLocal(VarDecl *D, const_iterator Iter) const;
326
327 /// Vector of previously declared requires directives
329 /// omp_allocator_handle_t type.
330 QualType OMPAllocatorHandleT;
331 /// omp_depend_t type.
332 QualType OMPDependT;
333 /// omp_event_handle_t type.
334 QualType OMPEventHandleT;
335 /// omp_alloctrait_t type.
336 QualType OMPAlloctraitT;
337 /// Expression for the predefined allocators.
338 Expr *OMPPredefinedAllocators[OMPAllocateDeclAttr::OMPUserDefinedMemAlloc] = {
339 nullptr};
340 /// Vector of previously encountered target directives
341 SmallVector<SourceLocation, 2> TargetLocations;
342 SourceLocation AtomicLocation;
343 /// Vector of declare variant construct traits.
345
346public:
347 explicit DSAStackTy(Sema &S) : SemaRef(S) {}
348
349 /// Sets omp_allocator_handle_t type.
350 void setOMPAllocatorHandleT(QualType Ty) { OMPAllocatorHandleT = Ty; }
351 /// Gets omp_allocator_handle_t type.
352 QualType getOMPAllocatorHandleT() const { return OMPAllocatorHandleT; }
353 /// Sets omp_alloctrait_t type.
354 void setOMPAlloctraitT(QualType Ty) { OMPAlloctraitT = Ty; }
355 /// Gets omp_alloctrait_t type.
356 QualType getOMPAlloctraitT() const { return OMPAlloctraitT; }
357 /// Sets the given default allocator.
358 void setAllocator(OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind,
359 Expr *Allocator) {
360 OMPPredefinedAllocators[AllocatorKind] = Allocator;
361 }
362 /// Returns the specified default allocator.
363 Expr *getAllocator(OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind) const {
364 return OMPPredefinedAllocators[AllocatorKind];
365 }
366 /// Sets omp_depend_t type.
367 void setOMPDependT(QualType Ty) { OMPDependT = Ty; }
368 /// Gets omp_depend_t type.
369 QualType getOMPDependT() const { return OMPDependT; }
370
371 /// Sets omp_event_handle_t type.
372 void setOMPEventHandleT(QualType Ty) { OMPEventHandleT = Ty; }
373 /// Gets omp_event_handle_t type.
374 QualType getOMPEventHandleT() const { return OMPEventHandleT; }
375
376 bool isClauseParsingMode() const { return ClauseKindMode != OMPC_unknown; }
377 OpenMPClauseKind getClauseParsingMode() const {
378 assert(isClauseParsingMode() && "Must be in clause parsing mode.");
379 return ClauseKindMode;
380 }
381 void setClauseParsingMode(OpenMPClauseKind K) { ClauseKindMode = K; }
382
383 bool isBodyComplete() const {
384 const SharingMapTy *Top = getTopOfStackOrNull();
385 return Top && Top->BodyComplete;
386 }
387 void setBodyComplete() { getTopOfStack().BodyComplete = true; }
388
389 bool isForceVarCapturing() const { return ForceCapturing; }
390 void setForceVarCapturing(bool V) { ForceCapturing = V; }
391
392 void setForceCaptureByReferenceInTargetExecutable(bool V) {
393 ForceCaptureByReferenceInTargetExecutable = V;
394 }
395 bool isForceCaptureByReferenceInTargetExecutable() const {
396 return ForceCaptureByReferenceInTargetExecutable;
397 }
398
399 void push(OpenMPDirectiveKind DKind, const DeclarationNameInfo &DirName,
400 Scope *CurScope, SourceLocation Loc) {
401 assert(!IgnoredStackElements &&
402 "cannot change stack while ignoring elements");
403 if (Stack.empty() ||
404 Stack.back().second != CurrentNonCapturingFunctionScope)
405 Stack.emplace_back(StackTy(), CurrentNonCapturingFunctionScope);
406 Stack.back().first.emplace_back(DKind, DirName, CurScope, Loc);
407 Stack.back().first.back().DefaultAttrLoc = Loc;
408 }
409
410 void pop() {
411 assert(!IgnoredStackElements &&
412 "cannot change stack while ignoring elements");
413 assert(!Stack.back().first.empty() &&
414 "Data-sharing attributes stack is empty!");
415 Stack.back().first.pop_back();
416 }
417
418 /// RAII object to temporarily leave the scope of a directive when we want to
419 /// logically operate in its parent.
420 class ParentDirectiveScope {
421 DSAStackTy &Self;
422 bool Active;
423
424 public:
425 ParentDirectiveScope(DSAStackTy &Self, bool Activate)
426 : Self(Self), Active(false) {
427 if (Activate)
428 enable();
429 }
430 ~ParentDirectiveScope() { disable(); }
431 void disable() {
432 if (Active) {
433 --Self.IgnoredStackElements;
434 Active = false;
435 }
436 }
437 void enable() {
438 if (!Active) {
439 ++Self.IgnoredStackElements;
440 Active = true;
441 }
442 }
443 };
444
445 /// Marks that we're started loop parsing.
446 void loopInit() {
447 assert(isOpenMPLoopDirective(getCurrentDirective()) &&
448 "Expected loop-based directive.");
449 getTopOfStack().LoopStart = true;
450 }
451 /// Start capturing of the variables in the loop context.
452 void loopStart() {
453 assert(isOpenMPLoopDirective(getCurrentDirective()) &&
454 "Expected loop-based directive.");
455 getTopOfStack().LoopStart = false;
456 }
457 /// true, if variables are captured, false otherwise.
458 bool isLoopStarted() const {
459 assert(isOpenMPLoopDirective(getCurrentDirective()) &&
460 "Expected loop-based directive.");
461 return !getTopOfStack().LoopStart;
462 }
463 /// Marks (or clears) declaration as possibly loop counter.
464 void resetPossibleLoopCounter(const Decl *D = nullptr) {
465 getTopOfStack().PossiblyLoopCounter = D ? D->getCanonicalDecl() : D;
466 }
467 /// Gets the possible loop counter decl.
468 const Decl *getPossiblyLoopCounter() const {
469 return getTopOfStack().PossiblyLoopCounter;
470 }
471 /// Start new OpenMP region stack in new non-capturing function.
472 void pushFunction() {
473 assert(!IgnoredStackElements &&
474 "cannot change stack while ignoring elements");
475 const FunctionScopeInfo *CurFnScope = SemaRef.getCurFunction();
476 assert(!isa<CapturingScopeInfo>(CurFnScope));
477 CurrentNonCapturingFunctionScope = CurFnScope;
478 }
479 /// Pop region stack for non-capturing function.
480 void popFunction(const FunctionScopeInfo *OldFSI) {
481 assert(!IgnoredStackElements &&
482 "cannot change stack while ignoring elements");
483 if (!Stack.empty() && Stack.back().second == OldFSI) {
484 assert(Stack.back().first.empty());
485 Stack.pop_back();
486 }
487 CurrentNonCapturingFunctionScope = nullptr;
488 for (const FunctionScopeInfo *FSI : llvm::reverse(SemaRef.FunctionScopes)) {
489 if (!isa<CapturingScopeInfo>(FSI)) {
490 CurrentNonCapturingFunctionScope = FSI;
491 break;
492 }
493 }
494 }
495
496 void addCriticalWithHint(const OMPCriticalDirective *D, llvm::APSInt Hint) {
497 Criticals.try_emplace(D->getDirectiveName().getAsString(), D, Hint);
498 }
499 const std::pair<const OMPCriticalDirective *, llvm::APSInt>
500 getCriticalWithHint(const DeclarationNameInfo &Name) const {
501 auto I = Criticals.find(Name.getAsString());
502 if (I != Criticals.end())
503 return I->second;
504 return std::make_pair(nullptr, llvm::APSInt());
505 }
506 /// If 'aligned' declaration for given variable \a D was not seen yet,
507 /// add it and return NULL; otherwise return previous occurrence's expression
508 /// for diagnostics.
509 const Expr *addUniqueAligned(const ValueDecl *D, const Expr *NewDE);
510 /// If 'nontemporal' declaration for given variable \a D was not seen yet,
511 /// add it and return NULL; otherwise return previous occurrence's expression
512 /// for diagnostics.
513 const Expr *addUniqueNontemporal(const ValueDecl *D, const Expr *NewDE);
514
515 /// Register specified variable as loop control variable.
516 void addLoopControlVariable(const ValueDecl *D, VarDecl *Capture);
517 /// Check if the specified variable is a loop control variable for
518 /// current region.
519 /// \return The index of the loop control variable in the list of associated
520 /// for-loops (from outer to inner).
521 const LCDeclInfo isLoopControlVariable(const ValueDecl *D) const;
522 /// Check if the specified variable is a loop control variable for
523 /// parent region.
524 /// \return The index of the loop control variable in the list of associated
525 /// for-loops (from outer to inner).
526 const LCDeclInfo isParentLoopControlVariable(const ValueDecl *D) const;
527 /// Check if the specified variable is a loop control variable for
528 /// current region.
529 /// \return The index of the loop control variable in the list of associated
530 /// for-loops (from outer to inner).
531 const LCDeclInfo isLoopControlVariable(const ValueDecl *D,
532 unsigned Level) const;
533 /// Get the loop control variable for the I-th loop (or nullptr) in
534 /// parent directive.
535 const ValueDecl *getParentLoopControlVariable(unsigned I) const;
536
537 /// Marks the specified decl \p D as used in scan directive.
538 void markDeclAsUsedInScanDirective(ValueDecl *D) {
539 if (SharingMapTy *Stack = getSecondOnStackOrNull())
540 Stack->UsedInScanDirective.insert(D);
541 }
542
543 /// Checks if the specified declaration was used in the inner scan directive.
544 bool isUsedInScanDirective(ValueDecl *D) const {
545 if (const SharingMapTy *Stack = getTopOfStackOrNull())
546 return Stack->UsedInScanDirective.contains(D);
547 return false;
548 }
549
550 /// Adds explicit data sharing attribute to the specified declaration.
551 void addDSA(const ValueDecl *D, const Expr *E, OpenMPClauseKind A,
552 DeclRefExpr *PrivateCopy = nullptr, unsigned Modifier = 0,
553 bool AppliedToPointee = false);
554
555 /// Adds additional information for the reduction items with the reduction id
556 /// represented as an operator.
557 void addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
559 /// Adds additional information for the reduction items with the reduction id
560 /// represented as reduction identifier.
561 void addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
562 const Expr *ReductionRef);
563 /// Returns the location and reduction operation from the innermost parent
564 /// region for the given \p D.
565 const DSAVarData
566 getTopMostTaskgroupReductionData(const ValueDecl *D, SourceRange &SR,
568 Expr *&TaskgroupDescriptor) const;
569 /// Returns the location and reduction operation from the innermost parent
570 /// region for the given \p D.
571 const DSAVarData
572 getTopMostTaskgroupReductionData(const ValueDecl *D, SourceRange &SR,
573 const Expr *&ReductionRef,
574 Expr *&TaskgroupDescriptor) const;
575 /// Return reduction reference expression for the current taskgroup or
576 /// parallel/worksharing directives with task reductions.
577 Expr *getTaskgroupReductionRef() const {
578 assert((getTopOfStack().Directive == OMPD_taskgroup ||
579 ((isOpenMPParallelDirective(getTopOfStack().Directive) ||
580 isOpenMPWorksharingDirective(getTopOfStack().Directive)) &&
581 !isOpenMPSimdDirective(getTopOfStack().Directive))) &&
582 "taskgroup reference expression requested for non taskgroup or "
583 "parallel/worksharing directive.");
584 return getTopOfStack().TaskgroupReductionRef;
585 }
586 /// Checks if the given \p VD declaration is actually a taskgroup reduction
587 /// descriptor variable at the \p Level of OpenMP regions.
588 bool isTaskgroupReductionRef(const ValueDecl *VD, unsigned Level) const {
589 return getStackElemAtLevel(Level).TaskgroupReductionRef &&
590 cast<DeclRefExpr>(getStackElemAtLevel(Level).TaskgroupReductionRef)
591 ->getDecl() == VD;
592 }
593
594 /// Returns data sharing attributes from top of the stack for the
595 /// specified declaration.
596 const DSAVarData getTopDSA(ValueDecl *D, bool FromParent);
597 /// Returns data-sharing attributes for the specified declaration.
598 const DSAVarData getImplicitDSA(ValueDecl *D, bool FromParent) const;
599 /// Returns data-sharing attributes for the specified declaration.
600 const DSAVarData getImplicitDSA(ValueDecl *D, unsigned Level) const;
601 /// Checks if the specified variables has data-sharing attributes which
602 /// match specified \a CPred predicate in any directive which matches \a DPred
603 /// predicate.
604 const DSAVarData
605 hasDSA(ValueDecl *D,
606 const llvm::function_ref<bool(OpenMPClauseKind, bool,
607 DefaultDataSharingAttributes)>
608 CPred,
609 const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
610 bool FromParent) const;
611 /// Checks if the specified variables has data-sharing attributes which
612 /// match specified \a CPred predicate in any innermost directive which
613 /// matches \a DPred predicate.
614 const DSAVarData
615 hasInnermostDSA(ValueDecl *D,
616 const llvm::function_ref<bool(OpenMPClauseKind, bool)> CPred,
617 const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
618 bool FromParent) const;
619 /// Checks if the specified variables has explicit data-sharing
620 /// attributes which match specified \a CPred predicate at the specified
621 /// OpenMP region.
622 bool
623 hasExplicitDSA(const ValueDecl *D,
624 const llvm::function_ref<bool(OpenMPClauseKind, bool)> CPred,
625 unsigned Level, bool NotLastprivate = false) const;
626
627 /// Returns true if the directive at level \Level matches in the
628 /// specified \a DPred predicate.
629 bool hasExplicitDirective(
630 const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
631 unsigned Level) const;
632
633 /// Finds a directive which matches specified \a DPred predicate.
634 bool hasDirective(
635 const llvm::function_ref<bool(
637 DPred,
638 bool FromParent) const;
639
640 /// Returns currently analyzed directive.
641 OpenMPDirectiveKind getCurrentDirective() const {
642 const SharingMapTy *Top = getTopOfStackOrNull();
643 return Top ? Top->Directive : OMPD_unknown;
644 }
645 /// Returns directive kind at specified level.
646 OpenMPDirectiveKind getDirective(unsigned Level) const {
647 assert(!isStackEmpty() && "No directive at specified level.");
648 return getStackElemAtLevel(Level).Directive;
649 }
650 /// Returns the capture region at the specified level.
651 OpenMPDirectiveKind getCaptureRegion(unsigned Level,
652 unsigned OpenMPCaptureLevel) const {
654 getOpenMPCaptureRegions(CaptureRegions, getDirective(Level));
655 return CaptureRegions[OpenMPCaptureLevel];
656 }
657 /// Returns parent directive.
658 OpenMPDirectiveKind getParentDirective() const {
659 const SharingMapTy *Parent = getSecondOnStackOrNull();
660 return Parent ? Parent->Directive : OMPD_unknown;
661 }
662
663 /// Add requires decl to internal vector
664 void addRequiresDecl(OMPRequiresDecl *RD) { RequiresDecls.push_back(RD); }
665
666 /// Checks if the defined 'requires' directive has specified type of clause.
667 template <typename ClauseType> bool hasRequiresDeclWithClause() const {
668 return llvm::any_of(RequiresDecls, [](const OMPRequiresDecl *D) {
669 return llvm::any_of(D->clauselists(), [](const OMPClause *C) {
670 return isa<ClauseType>(C);
671 });
672 });
673 }
674
675 /// Checks for a duplicate clause amongst previously declared requires
676 /// directives
677 bool hasDuplicateRequiresClause(ArrayRef<OMPClause *> ClauseList) const {
678 bool IsDuplicate = false;
679 for (OMPClause *CNew : ClauseList) {
680 for (const OMPRequiresDecl *D : RequiresDecls) {
681 for (const OMPClause *CPrev : D->clauselists()) {
682 if (CNew->getClauseKind() == CPrev->getClauseKind()) {
683 SemaRef.Diag(CNew->getBeginLoc(),
684 diag::err_omp_requires_clause_redeclaration)
685 << getOpenMPClauseName(CNew->getClauseKind());
686 SemaRef.Diag(CPrev->getBeginLoc(),
687 diag::note_omp_requires_previous_clause)
688 << getOpenMPClauseName(CPrev->getClauseKind());
689 IsDuplicate = true;
690 }
691 }
692 }
693 }
694 return IsDuplicate;
695 }
696
697 /// Add location of previously encountered target to internal vector
698 void addTargetDirLocation(SourceLocation LocStart) {
699 TargetLocations.push_back(LocStart);
700 }
701
702 /// Add location for the first encountered atomic directive.
703 void addAtomicDirectiveLoc(SourceLocation Loc) {
704 if (AtomicLocation.isInvalid())
705 AtomicLocation = Loc;
706 }
707
708 /// Returns the location of the first encountered atomic directive in the
709 /// module.
710 SourceLocation getAtomicDirectiveLoc() const { return AtomicLocation; }
711
712 // Return previously encountered target region locations.
713 ArrayRef<SourceLocation> getEncounteredTargetLocs() const {
714 return TargetLocations;
715 }
716
717 /// Set default data sharing attribute to none.
718 void setDefaultDSANone(SourceLocation Loc) {
719 getTopOfStack().DefaultAttr = DSA_none;
720 getTopOfStack().DefaultAttrLoc = Loc;
721 }
722 /// Set default data sharing attribute to shared.
723 void setDefaultDSAShared(SourceLocation Loc) {
724 getTopOfStack().DefaultAttr = DSA_shared;
725 getTopOfStack().DefaultAttrLoc = Loc;
726 }
727 /// Set default data sharing attribute to private.
728 void setDefaultDSAPrivate(SourceLocation Loc) {
729 getTopOfStack().DefaultAttr = DSA_private;
730 getTopOfStack().DefaultAttrLoc = Loc;
731 }
732 /// Set default data sharing attribute to firstprivate.
733 void setDefaultDSAFirstPrivate(SourceLocation Loc) {
734 getTopOfStack().DefaultAttr = DSA_firstprivate;
735 getTopOfStack().DefaultAttrLoc = Loc;
736 }
737 /// Set default data mapping attribute to Modifier:Kind
738 void setDefaultDMAAttr(OpenMPDefaultmapClauseModifier M,
740 DefaultmapInfo &DMI = getTopOfStack().DefaultmapMap[Kind];
741 DMI.ImplicitBehavior = M;
742 DMI.SLoc = Loc;
743 }
744 /// Check whether the implicit-behavior has been set in defaultmap
745 bool checkDefaultmapCategory(OpenMPDefaultmapClauseKind VariableCategory) {
746 if (VariableCategory == OMPC_DEFAULTMAP_unknown)
747 return getTopOfStack()
748 .DefaultmapMap[OMPC_DEFAULTMAP_aggregate]
749 .ImplicitBehavior != OMPC_DEFAULTMAP_MODIFIER_unknown ||
750 getTopOfStack()
751 .DefaultmapMap[OMPC_DEFAULTMAP_scalar]
752 .ImplicitBehavior != OMPC_DEFAULTMAP_MODIFIER_unknown ||
753 getTopOfStack()
754 .DefaultmapMap[OMPC_DEFAULTMAP_pointer]
755 .ImplicitBehavior != OMPC_DEFAULTMAP_MODIFIER_unknown;
756 return getTopOfStack().DefaultmapMap[VariableCategory].ImplicitBehavior !=
758 }
759
760 ArrayRef<llvm::omp::TraitProperty> getConstructTraits() {
761 return ConstructTraits;
762 }
763 void handleConstructTrait(ArrayRef<llvm::omp::TraitProperty> Traits,
764 bool ScopeEntry) {
765 if (ScopeEntry)
766 ConstructTraits.append(Traits.begin(), Traits.end());
767 else
768 for (llvm::omp::TraitProperty Trait : llvm::reverse(Traits)) {
769 llvm::omp::TraitProperty Top = ConstructTraits.pop_back_val();
770 assert(Top == Trait && "Something left a trait on the stack!");
771 (void)Trait;
772 (void)Top;
773 }
774 }
775
776 DefaultDataSharingAttributes getDefaultDSA(unsigned Level) const {
777 return getStackSize() <= Level ? DSA_unspecified
778 : getStackElemAtLevel(Level).DefaultAttr;
779 }
780 DefaultDataSharingAttributes getDefaultDSA() const {
781 return isStackEmpty() ? DSA_unspecified : getTopOfStack().DefaultAttr;
782 }
783 SourceLocation getDefaultDSALocation() const {
784 return isStackEmpty() ? SourceLocation() : getTopOfStack().DefaultAttrLoc;
785 }
787 getDefaultmapModifier(OpenMPDefaultmapClauseKind Kind) const {
788 return isStackEmpty()
790 : getTopOfStack().DefaultmapMap[Kind].ImplicitBehavior;
791 }
793 getDefaultmapModifierAtLevel(unsigned Level,
794 OpenMPDefaultmapClauseKind Kind) const {
795 return getStackElemAtLevel(Level).DefaultmapMap[Kind].ImplicitBehavior;
796 }
797 bool isDefaultmapCapturedByRef(unsigned Level,
798 OpenMPDefaultmapClauseKind Kind) const {
800 getDefaultmapModifierAtLevel(Level, Kind);
801 if (Kind == OMPC_DEFAULTMAP_scalar || Kind == OMPC_DEFAULTMAP_pointer) {
802 return (M == OMPC_DEFAULTMAP_MODIFIER_alloc) ||
803 (M == OMPC_DEFAULTMAP_MODIFIER_to) ||
804 (M == OMPC_DEFAULTMAP_MODIFIER_from) ||
805 (M == OMPC_DEFAULTMAP_MODIFIER_tofrom) ||
806 (M == OMPC_DEFAULTMAP_MODIFIER_present);
807 }
808 return true;
809 }
810 static bool mustBeFirstprivateBase(OpenMPDefaultmapClauseModifier M,
812 switch (Kind) {
813 case OMPC_DEFAULTMAP_scalar:
814 case OMPC_DEFAULTMAP_pointer:
815 return (M == OMPC_DEFAULTMAP_MODIFIER_unknown) ||
816 (M == OMPC_DEFAULTMAP_MODIFIER_firstprivate) ||
817 (M == OMPC_DEFAULTMAP_MODIFIER_default);
818 case OMPC_DEFAULTMAP_aggregate:
819 return M == OMPC_DEFAULTMAP_MODIFIER_firstprivate;
820 default:
821 break;
822 }
823 llvm_unreachable("Unexpected OpenMPDefaultmapClauseKind enum");
824 }
825 bool mustBeFirstprivateAtLevel(unsigned Level,
826 OpenMPDefaultmapClauseKind Kind) const {
828 getDefaultmapModifierAtLevel(Level, Kind);
829 return mustBeFirstprivateBase(M, Kind);
830 }
831 bool mustBeFirstprivate(OpenMPDefaultmapClauseKind Kind) const {
832 OpenMPDefaultmapClauseModifier M = getDefaultmapModifier(Kind);
833 return mustBeFirstprivateBase(M, Kind);
834 }
835
836 /// Checks if the specified variable is a threadprivate.
837 bool isThreadPrivate(VarDecl *D) {
838 const DSAVarData DVar = getTopDSA(D, false);
839 return isOpenMPThreadPrivate(DVar.CKind);
840 }
841
842 /// Marks current region as ordered (it has an 'ordered' clause).
843 void setOrderedRegion(bool IsOrdered, const Expr *Param,
844 OMPOrderedClause *Clause) {
845 if (IsOrdered)
846 getTopOfStack().OrderedRegion.emplace(Param, Clause);
847 else
848 getTopOfStack().OrderedRegion.reset();
849 }
850 /// Returns true, if region is ordered (has associated 'ordered' clause),
851 /// false - otherwise.
852 bool isOrderedRegion() const {
853 if (const SharingMapTy *Top = getTopOfStackOrNull())
854 return Top->OrderedRegion.has_value();
855 return false;
856 }
857 /// Returns optional parameter for the ordered region.
858 std::pair<const Expr *, OMPOrderedClause *> getOrderedRegionParam() const {
859 if (const SharingMapTy *Top = getTopOfStackOrNull())
860 if (Top->OrderedRegion)
861 return *Top->OrderedRegion;
862 return std::make_pair(nullptr, nullptr);
863 }
864 /// Returns true, if parent region is ordered (has associated
865 /// 'ordered' clause), false - otherwise.
866 bool isParentOrderedRegion() const {
867 if (const SharingMapTy *Parent = getSecondOnStackOrNull())
868 return Parent->OrderedRegion.has_value();
869 return false;
870 }
871 /// Returns optional parameter for the ordered region.
872 std::pair<const Expr *, OMPOrderedClause *>
873 getParentOrderedRegionParam() const {
874 if (const SharingMapTy *Parent = getSecondOnStackOrNull())
875 if (Parent->OrderedRegion)
876 return *Parent->OrderedRegion;
877 return std::make_pair(nullptr, nullptr);
878 }
879 /// Marks current region as having an 'order' clause.
880 void setRegionHasOrderConcurrent(bool HasOrderConcurrent) {
881 getTopOfStack().RegionHasOrderConcurrent = HasOrderConcurrent;
882 }
883 /// Returns true, if parent region is order (has associated
884 /// 'order' clause), false - otherwise.
885 bool isParentOrderConcurrent() const {
886 if (const SharingMapTy *Parent = getSecondOnStackOrNull())
887 return Parent->RegionHasOrderConcurrent;
888 return false;
889 }
890 /// Marks current region as nowait (it has a 'nowait' clause).
891 void setNowaitRegion(bool IsNowait = true) {
892 getTopOfStack().NowaitRegion = IsNowait;
893 }
894 /// Returns true, if parent region is nowait (has associated
895 /// 'nowait' clause), false - otherwise.
896 bool isParentNowaitRegion() const {
897 if (const SharingMapTy *Parent = getSecondOnStackOrNull())
898 return Parent->NowaitRegion;
899 return false;
900 }
901 /// Marks current region as untied (it has a 'untied' clause).
902 void setUntiedRegion(bool IsUntied = true) {
903 getTopOfStack().UntiedRegion = IsUntied;
904 }
905 /// Return true if current region is untied.
906 bool isUntiedRegion() const {
907 const SharingMapTy *Top = getTopOfStackOrNull();
908 return Top ? Top->UntiedRegion : false;
909 }
910 /// Marks parent region as cancel region.
911 void setParentCancelRegion(bool Cancel = true) {
912 if (SharingMapTy *Parent = getSecondOnStackOrNull())
913 Parent->CancelRegion |= Cancel;
914 }
915 /// Return true if current region has inner cancel construct.
916 bool isCancelRegion() const {
917 const SharingMapTy *Top = getTopOfStackOrNull();
918 return Top ? Top->CancelRegion : false;
919 }
920
921 /// Mark that parent region already has scan directive.
922 void setParentHasScanDirective(SourceLocation Loc) {
923 if (SharingMapTy *Parent = getSecondOnStackOrNull())
924 Parent->PrevScanLocation = Loc;
925 }
926 /// Return true if current region has inner cancel construct.
927 bool doesParentHasScanDirective() const {
928 const SharingMapTy *Top = getSecondOnStackOrNull();
929 return Top ? Top->PrevScanLocation.isValid() : false;
930 }
931 /// Return true if current region has inner cancel construct.
932 SourceLocation getParentScanDirectiveLoc() const {
933 const SharingMapTy *Top = getSecondOnStackOrNull();
934 return Top ? Top->PrevScanLocation : SourceLocation();
935 }
936 /// Mark that parent region already has ordered directive.
937 void setParentHasOrderedDirective(SourceLocation Loc) {
938 if (SharingMapTy *Parent = getSecondOnStackOrNull())
939 Parent->PrevOrderedLocation = Loc;
940 }
941 /// Return true if current region has inner ordered construct.
942 bool doesParentHasOrderedDirective() const {
943 const SharingMapTy *Top = getSecondOnStackOrNull();
944 return Top ? Top->PrevOrderedLocation.isValid() : false;
945 }
946 /// Returns the location of the previously specified ordered directive.
947 SourceLocation getParentOrderedDirectiveLoc() const {
948 const SharingMapTy *Top = getSecondOnStackOrNull();
949 return Top ? Top->PrevOrderedLocation : SourceLocation();
950 }
951
952 /// Set collapse value for the region.
953 void setAssociatedLoops(unsigned Val) {
954 getTopOfStack().AssociatedLoops = Val;
955 if (Val > 1)
956 getTopOfStack().HasMutipleLoops = true;
957 }
958 /// Return collapse value for region.
959 unsigned getAssociatedLoops() const {
960 const SharingMapTy *Top = getTopOfStackOrNull();
961 return Top ? Top->AssociatedLoops : 0;
962 }
963 /// Returns true if the construct is associated with multiple loops.
964 bool hasMutipleLoops() const {
965 const SharingMapTy *Top = getTopOfStackOrNull();
966 return Top ? Top->HasMutipleLoops : false;
967 }
968
969 /// Marks current target region as one with closely nested teams
970 /// region.
971 void setParentTeamsRegionLoc(SourceLocation TeamsRegionLoc) {
972 if (SharingMapTy *Parent = getSecondOnStackOrNull())
973 Parent->InnerTeamsRegionLoc = TeamsRegionLoc;
974 }
975 /// Returns true, if current region has closely nested teams region.
976 bool hasInnerTeamsRegion() const {
977 return getInnerTeamsRegionLoc().isValid();
978 }
979 /// Returns location of the nested teams region (if any).
980 SourceLocation getInnerTeamsRegionLoc() const {
981 const SharingMapTy *Top = getTopOfStackOrNull();
982 return Top ? Top->InnerTeamsRegionLoc : SourceLocation();
983 }
984
985 Scope *getCurScope() const {
986 const SharingMapTy *Top = getTopOfStackOrNull();
987 return Top ? Top->CurScope : nullptr;
988 }
989 void setContext(DeclContext *DC) { getTopOfStack().Context = DC; }
990 SourceLocation getConstructLoc() const {
991 const SharingMapTy *Top = getTopOfStackOrNull();
992 return Top ? Top->ConstructLoc : SourceLocation();
993 }
994
995 /// Do the check specified in \a Check to all component lists and return true
996 /// if any issue is found.
997 bool checkMappableExprComponentListsForDecl(
998 const ValueDecl *VD, bool CurrentRegionOnly,
999 const llvm::function_ref<
1002 Check) const {
1003 if (isStackEmpty())
1004 return false;
1005 auto SI = begin();
1006 auto SE = end();
1007
1008 if (SI == SE)
1009 return false;
1010
1011 if (CurrentRegionOnly)
1012 SE = std::next(SI);
1013 else
1014 std::advance(SI, 1);
1015
1016 for (; SI != SE; ++SI) {
1017 auto MI = SI->MappedExprComponents.find(VD);
1018 if (MI != SI->MappedExprComponents.end())
1020 MI->second.Components)
1021 if (Check(L, MI->second.Kind))
1022 return true;
1023 }
1024 return false;
1025 }
1026
1027 /// Do the check specified in \a Check to all component lists at a given level
1028 /// and return true if any issue is found.
1029 bool checkMappableExprComponentListsForDeclAtLevel(
1030 const ValueDecl *VD, unsigned Level,
1031 const llvm::function_ref<
1034 Check) const {
1035 if (getStackSize() <= Level)
1036 return false;
1037
1038 const SharingMapTy &StackElem = getStackElemAtLevel(Level);
1039 auto MI = StackElem.MappedExprComponents.find(VD);
1040 if (MI != StackElem.MappedExprComponents.end())
1042 MI->second.Components)
1043 if (Check(L, MI->second.Kind))
1044 return true;
1045 return false;
1046 }
1047
1048 /// Create a new mappable expression component list associated with a given
1049 /// declaration and initialize it with the provided list of components.
1050 void addMappableExpressionComponents(
1051 const ValueDecl *VD,
1053 OpenMPClauseKind WhereFoundClauseKind) {
1054 MappedExprComponentTy &MEC = getTopOfStack().MappedExprComponents[VD];
1055 // Create new entry and append the new components there.
1056 MEC.Components.resize(MEC.Components.size() + 1);
1057 MEC.Components.back().append(Components.begin(), Components.end());
1058 MEC.Kind = WhereFoundClauseKind;
1059 }
1060
1061 unsigned getNestingLevel() const {
1062 assert(!isStackEmpty());
1063 return getStackSize() - 1;
1064 }
1065 void addDoacrossDependClause(OMPClause *C, const OperatorOffsetTy &OpsOffs) {
1066 SharingMapTy *Parent = getSecondOnStackOrNull();
1067 assert(Parent && isOpenMPWorksharingDirective(Parent->Directive));
1068 Parent->DoacrossDepends.try_emplace(C, OpsOffs);
1069 }
1070 llvm::iterator_range<DoacrossClauseMapTy::const_iterator>
1071 getDoacrossDependClauses() const {
1072 const SharingMapTy &StackElem = getTopOfStack();
1073 if (isOpenMPWorksharingDirective(StackElem.Directive)) {
1074 const DoacrossClauseMapTy &Ref = StackElem.DoacrossDepends;
1075 return llvm::make_range(Ref.begin(), Ref.end());
1076 }
1077 return llvm::make_range(StackElem.DoacrossDepends.end(),
1078 StackElem.DoacrossDepends.end());
1079 }
1080
1081 // Store types of classes which have been explicitly mapped
1082 void addMappedClassesQualTypes(QualType QT) {
1083 SharingMapTy &StackElem = getTopOfStack();
1084 StackElem.MappedClassesQualTypes.insert(QT);
1085 }
1086
1087 // Return set of mapped classes types
1088 bool isClassPreviouslyMapped(QualType QT) const {
1089 const SharingMapTy &StackElem = getTopOfStack();
1090 return StackElem.MappedClassesQualTypes.contains(QT);
1091 }
1092
1093 /// Adds global declare target to the parent target region.
1094 void addToParentTargetRegionLinkGlobals(DeclRefExpr *E) {
1095 assert(*OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(
1096 E->getDecl()) == OMPDeclareTargetDeclAttr::MT_Link &&
1097 "Expected declare target link global.");
1098 for (auto &Elem : *this) {
1099 if (isOpenMPTargetExecutionDirective(Elem.Directive)) {
1100 Elem.DeclareTargetLinkVarDecls.push_back(E);
1101 return;
1102 }
1103 }
1104 }
1105
1106 /// Returns the list of globals with declare target link if current directive
1107 /// is target.
1108 ArrayRef<DeclRefExpr *> getLinkGlobals() const {
1109 assert(isOpenMPTargetExecutionDirective(getCurrentDirective()) &&
1110 "Expected target executable directive.");
1111 return getTopOfStack().DeclareTargetLinkVarDecls;
1112 }
1113
1114 /// Adds list of allocators expressions.
1115 void addInnerAllocatorExpr(Expr *E) {
1116 getTopOfStack().InnerUsedAllocators.push_back(E);
1117 }
1118 /// Return list of used allocators.
1119 ArrayRef<Expr *> getInnerAllocators() const {
1120 return getTopOfStack().InnerUsedAllocators;
1121 }
1122 /// Marks the declaration as implicitly firstprivate nin the task-based
1123 /// regions.
1124 void addImplicitTaskFirstprivate(unsigned Level, Decl *D) {
1125 getStackElemAtLevel(Level).ImplicitTaskFirstprivates.insert(D);
1126 }
1127 /// Checks if the decl is implicitly firstprivate in the task-based region.
1128 bool isImplicitTaskFirstprivate(Decl *D) const {
1129 return getTopOfStack().ImplicitTaskFirstprivates.contains(D);
1130 }
1131
1132 /// Marks decl as used in uses_allocators clause as the allocator.
1133 void addUsesAllocatorsDecl(const Decl *D, UsesAllocatorsDeclKind Kind) {
1134 getTopOfStack().UsesAllocatorsDecls.try_emplace(D, Kind);
1135 }
1136 /// Checks if specified decl is used in uses allocator clause as the
1137 /// allocator.
1138 std::optional<UsesAllocatorsDeclKind>
1139 isUsesAllocatorsDecl(unsigned Level, const Decl *D) const {
1140 const SharingMapTy &StackElem = getTopOfStack();
1141 auto I = StackElem.UsesAllocatorsDecls.find(D);
1142 if (I == StackElem.UsesAllocatorsDecls.end())
1143 return std::nullopt;
1144 return I->getSecond();
1145 }
1146 std::optional<UsesAllocatorsDeclKind>
1147 isUsesAllocatorsDecl(const Decl *D) const {
1148 const SharingMapTy &StackElem = getTopOfStack();
1149 auto I = StackElem.UsesAllocatorsDecls.find(D);
1150 if (I == StackElem.UsesAllocatorsDecls.end())
1151 return std::nullopt;
1152 return I->getSecond();
1153 }
1154
1155 void addDeclareMapperVarRef(Expr *Ref) {
1156 SharingMapTy &StackElem = getTopOfStack();
1157 StackElem.DeclareMapperVar = Ref;
1158 }
1159 const Expr *getDeclareMapperVarRef() const {
1160 const SharingMapTy *Top = getTopOfStackOrNull();
1161 return Top ? Top->DeclareMapperVar : nullptr;
1162 }
1163
1164 /// Add a new iterator variable.
1165 void addIteratorVarDecl(VarDecl *VD) {
1166 SharingMapTy &StackElem = getTopOfStack();
1167 StackElem.IteratorVarDecls.push_back(VD->getCanonicalDecl());
1168 }
1169 /// Check if variable declaration is an iterator VarDecl.
1170 bool isIteratorVarDecl(const VarDecl *VD) const {
1171 const SharingMapTy *Top = getTopOfStackOrNull();
1172 if (!Top)
1173 return false;
1174
1175 return llvm::is_contained(Top->IteratorVarDecls, VD->getCanonicalDecl());
1176 }
1177 /// get captured field from ImplicitDefaultFirstprivateFDs
1178 VarDecl *getImplicitFDCapExprDecl(const FieldDecl *FD) const {
1179 const_iterator I = begin();
1180 const_iterator EndI = end();
1181 size_t StackLevel = getStackSize();
1182 for (; I != EndI; ++I) {
1183 if (I->DefaultAttr == DSA_firstprivate || I->DefaultAttr == DSA_private)
1184 break;
1185 StackLevel--;
1186 }
1187 assert((StackLevel > 0 && I != EndI) || (StackLevel == 0 && I == EndI));
1188 if (I == EndI)
1189 return nullptr;
1190 for (const auto &IFD : I->ImplicitDefaultFirstprivateFDs)
1191 if (IFD.FD == FD && IFD.StackLevel == StackLevel)
1192 return IFD.VD;
1193 return nullptr;
1194 }
1195 /// Check if capture decl is field captured in ImplicitDefaultFirstprivateFDs
1196 bool isImplicitDefaultFirstprivateFD(VarDecl *VD) const {
1197 const_iterator I = begin();
1198 const_iterator EndI = end();
1199 for (; I != EndI; ++I)
1200 if (I->DefaultAttr == DSA_firstprivate || I->DefaultAttr == DSA_private)
1201 break;
1202 if (I == EndI)
1203 return false;
1204 for (const auto &IFD : I->ImplicitDefaultFirstprivateFDs)
1205 if (IFD.VD == VD)
1206 return true;
1207 return false;
1208 }
1209 /// Store capture FD info in ImplicitDefaultFirstprivateFDs
1210 void addImplicitDefaultFirstprivateFD(const FieldDecl *FD, VarDecl *VD) {
1211 iterator I = begin();
1212 const_iterator EndI = end();
1213 size_t StackLevel = getStackSize();
1214 for (; I != EndI; ++I) {
1215 if (I->DefaultAttr == DSA_private || I->DefaultAttr == DSA_firstprivate) {
1216 I->ImplicitDefaultFirstprivateFDs.emplace_back(FD, StackLevel, VD);
1217 break;
1218 }
1219 StackLevel--;
1220 }
1221 assert((StackLevel > 0 && I != EndI) || (StackLevel == 0 && I == EndI));
1222 }
1223};
1224
1225bool isImplicitTaskingRegion(OpenMPDirectiveKind DKind) {
1226 return isOpenMPParallelDirective(DKind) || isOpenMPTeamsDirective(DKind);
1227}
1228
1229bool isImplicitOrExplicitTaskingRegion(OpenMPDirectiveKind DKind) {
1230 return isImplicitTaskingRegion(DKind) || isOpenMPTaskingDirective(DKind) ||
1231 DKind == OMPD_unknown;
1232}
1233
1234} // namespace
1235
1236static const Expr *getExprAsWritten(const Expr *E) {
1237 if (const auto *FE = dyn_cast<FullExpr>(E))
1238 E = FE->getSubExpr();
1239
1240 if (const auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E))
1241 E = MTE->getSubExpr();
1242
1243 while (const auto *Binder = dyn_cast<CXXBindTemporaryExpr>(E))
1244 E = Binder->getSubExpr();
1245
1246 if (const auto *ICE = dyn_cast<ImplicitCastExpr>(E))
1247 E = ICE->getSubExprAsWritten();
1248 return E->IgnoreParens();
1249}
1250
1252 return const_cast<Expr *>(getExprAsWritten(const_cast<const Expr *>(E)));
1253}
1254
1255static const ValueDecl *getCanonicalDecl(const ValueDecl *D) {
1256 if (const auto *CED = dyn_cast<OMPCapturedExprDecl>(D))
1257 if (const auto *ME = dyn_cast<MemberExpr>(getExprAsWritten(CED->getInit())))
1258 D = ME->getMemberDecl();
1259
1260 D = cast<ValueDecl>(D->getCanonicalDecl());
1261 return D;
1262}
1263
1265 return const_cast<ValueDecl *>(
1266 getCanonicalDecl(const_cast<const ValueDecl *>(D)));
1267}
1268
1269DSAStackTy::DSAVarData DSAStackTy::getDSA(const_iterator &Iter,
1270 ValueDecl *D) const {
1271 D = getCanonicalDecl(D);
1272 auto *VD = dyn_cast<VarDecl>(D);
1273 const auto *FD = dyn_cast<FieldDecl>(D);
1274 DSAVarData DVar;
1275 if (Iter == end()) {
1276 // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1277 // in a region but not in construct]
1278 // File-scope or namespace-scope variables referenced in called routines
1279 // in the region are shared unless they appear in a threadprivate
1280 // directive.
1281 if (VD && !VD->isFunctionOrMethodVarDecl() && !isa<ParmVarDecl>(VD))
1282 DVar.CKind = OMPC_shared;
1283
1284 // OpenMP [2.9.1.2, Data-sharing Attribute Rules for Variables Referenced
1285 // in a region but not in construct]
1286 // Variables with static storage duration that are declared in called
1287 // routines in the region are shared.
1288 if (VD && VD->hasGlobalStorage())
1289 DVar.CKind = OMPC_shared;
1290
1291 // Non-static data members are shared by default.
1292 if (FD)
1293 DVar.CKind = OMPC_shared;
1294
1295 return DVar;
1296 }
1297
1298 // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1299 // in a Construct, C/C++, predetermined, p.1]
1300 // Variables with automatic storage duration that are declared in a scope
1301 // inside the construct are private.
1302 if (VD && isOpenMPLocal(VD, Iter) && VD->isLocalVarDecl() &&
1303 (VD->getStorageClass() == SC_Auto || VD->getStorageClass() == SC_None)) {
1304 DVar.CKind = OMPC_private;
1305 return DVar;
1306 }
1307
1308 DVar.DKind = Iter->Directive;
1309 // Explicitly specified attributes and local variables with predetermined
1310 // attributes.
1311 if (Iter->SharingMap.count(D)) {
1312 const DSAInfo &Data = Iter->SharingMap.lookup(D);
1313 DVar.RefExpr = Data.RefExpr.getPointer();
1314 DVar.PrivateCopy = Data.PrivateCopy;
1315 DVar.CKind = Data.Attributes;
1316 DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
1317 DVar.Modifier = Data.Modifier;
1318 DVar.AppliedToPointee = Data.AppliedToPointee;
1319 return DVar;
1320 }
1321
1322 // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1323 // in a Construct, C/C++, implicitly determined, p.1]
1324 // In a parallel or task construct, the data-sharing attributes of these
1325 // variables are determined by the default clause, if present.
1326 switch (Iter->DefaultAttr) {
1327 case DSA_shared:
1328 DVar.CKind = OMPC_shared;
1329 DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
1330 return DVar;
1331 case DSA_none:
1332 return DVar;
1333 case DSA_firstprivate:
1334 if (VD && VD->getStorageDuration() == SD_Static &&
1335 VD->getDeclContext()->isFileContext()) {
1336 DVar.CKind = OMPC_unknown;
1337 } else {
1338 DVar.CKind = OMPC_firstprivate;
1339 }
1340 DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
1341 return DVar;
1342 case DSA_private:
1343 // each variable with static storage duration that is declared
1344 // in a namespace or global scope and referenced in the construct,
1345 // and that does not have a predetermined data-sharing attribute
1346 if (VD && VD->getStorageDuration() == SD_Static &&
1347 VD->getDeclContext()->isFileContext()) {
1348 DVar.CKind = OMPC_unknown;
1349 } else {
1350 DVar.CKind = OMPC_private;
1351 }
1352 DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
1353 return DVar;
1354 case DSA_unspecified:
1355 // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1356 // in a Construct, implicitly determined, p.2]
1357 // In a parallel construct, if no default clause is present, these
1358 // variables are shared.
1359 DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
1360 if ((isOpenMPParallelDirective(DVar.DKind) &&
1361 !isOpenMPTaskLoopDirective(DVar.DKind)) ||
1362 isOpenMPTeamsDirective(DVar.DKind)) {
1363 DVar.CKind = OMPC_shared;
1364 return DVar;
1365 }
1366
1367 // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1368 // in a Construct, implicitly determined, p.4]
1369 // In a task construct, if no default clause is present, a variable that in
1370 // the enclosing context is determined to be shared by all implicit tasks
1371 // bound to the current team is shared.
1372 if (isOpenMPTaskingDirective(DVar.DKind)) {
1373 DSAVarData DVarTemp;
1374 const_iterator I = Iter, E = end();
1375 do {
1376 ++I;
1377 // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables
1378 // Referenced in a Construct, implicitly determined, p.6]
1379 // In a task construct, if no default clause is present, a variable
1380 // whose data-sharing attribute is not determined by the rules above is
1381 // firstprivate.
1382 DVarTemp = getDSA(I, D);
1383 if (DVarTemp.CKind != OMPC_shared) {
1384 DVar.RefExpr = nullptr;
1385 DVar.CKind = OMPC_firstprivate;
1386 return DVar;
1387 }
1388 } while (I != E && !isImplicitTaskingRegion(I->Directive));
1389 DVar.CKind =
1390 (DVarTemp.CKind == OMPC_unknown) ? OMPC_firstprivate : OMPC_shared;
1391 return DVar;
1392 }
1393 }
1394 // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1395 // in a Construct, implicitly determined, p.3]
1396 // For constructs other than task, if no default clause is present, these
1397 // variables inherit their data-sharing attributes from the enclosing
1398 // context.
1399 return getDSA(++Iter, D);
1400}
1401
1402const Expr *DSAStackTy::addUniqueAligned(const ValueDecl *D,
1403 const Expr *NewDE) {
1404 assert(!isStackEmpty() && "Data sharing attributes stack is empty");
1405 D = getCanonicalDecl(D);
1406 SharingMapTy &StackElem = getTopOfStack();
1407 auto It = StackElem.AlignedMap.find(D);
1408 if (It == StackElem.AlignedMap.end()) {
1409 assert(NewDE && "Unexpected nullptr expr to be added into aligned map");
1410 StackElem.AlignedMap[D] = NewDE;
1411 return nullptr;
1412 }
1413 assert(It->second && "Unexpected nullptr expr in the aligned map");
1414 return It->second;
1415}
1416
1417const Expr *DSAStackTy::addUniqueNontemporal(const ValueDecl *D,
1418 const Expr *NewDE) {
1419 assert(!isStackEmpty() && "Data sharing attributes stack is empty");
1420 D = getCanonicalDecl(D);
1421 SharingMapTy &StackElem = getTopOfStack();
1422 auto It = StackElem.NontemporalMap.find(D);
1423 if (It == StackElem.NontemporalMap.end()) {
1424 assert(NewDE && "Unexpected nullptr expr to be added into aligned map");
1425 StackElem.NontemporalMap[D] = NewDE;
1426 return nullptr;
1427 }
1428 assert(It->second && "Unexpected nullptr expr in the aligned map");
1429 return It->second;
1430}
1431
1432void DSAStackTy::addLoopControlVariable(const ValueDecl *D, VarDecl *Capture) {
1433 assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
1434 D = getCanonicalDecl(D);
1435 SharingMapTy &StackElem = getTopOfStack();
1436 StackElem.LCVMap.try_emplace(
1437 D, LCDeclInfo(StackElem.LCVMap.size() + 1, Capture));
1438}
1439
1440const DSAStackTy::LCDeclInfo
1441DSAStackTy::isLoopControlVariable(const ValueDecl *D) const {
1442 assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
1443 D = getCanonicalDecl(D);
1444 const SharingMapTy &StackElem = getTopOfStack();
1445 auto It = StackElem.LCVMap.find(D);
1446 if (It != StackElem.LCVMap.end())
1447 return It->second;
1448 return {0, nullptr};
1449}
1450
1451const DSAStackTy::LCDeclInfo
1452DSAStackTy::isLoopControlVariable(const ValueDecl *D, unsigned Level) const {
1453 assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
1454 D = getCanonicalDecl(D);
1455 for (unsigned I = Level + 1; I > 0; --I) {
1456 const SharingMapTy &StackElem = getStackElemAtLevel(I - 1);
1457 auto It = StackElem.LCVMap.find(D);
1458 if (It != StackElem.LCVMap.end())
1459 return It->second;
1460 }
1461 return {0, nullptr};
1462}
1463
1464const DSAStackTy::LCDeclInfo
1465DSAStackTy::isParentLoopControlVariable(const ValueDecl *D) const {
1466 const SharingMapTy *Parent = getSecondOnStackOrNull();
1467 assert(Parent && "Data-sharing attributes stack is empty");
1468 D = getCanonicalDecl(D);
1469 auto It = Parent->LCVMap.find(D);
1470 if (It != Parent->LCVMap.end())
1471 return It->second;
1472 return {0, nullptr};
1473}
1474
1475const ValueDecl *DSAStackTy::getParentLoopControlVariable(unsigned I) const {
1476 const SharingMapTy *Parent = getSecondOnStackOrNull();
1477 assert(Parent && "Data-sharing attributes stack is empty");
1478 if (Parent->LCVMap.size() < I)
1479 return nullptr;
1480 for (const auto &Pair : Parent->LCVMap)
1481 if (Pair.second.first == I)
1482 return Pair.first;
1483 return nullptr;
1484}
1485
1486void DSAStackTy::addDSA(const ValueDecl *D, const Expr *E, OpenMPClauseKind A,
1487 DeclRefExpr *PrivateCopy, unsigned Modifier,
1488 bool AppliedToPointee) {
1489 D = getCanonicalDecl(D);
1490 if (A == OMPC_threadprivate) {
1491 DSAInfo &Data = Threadprivates[D];
1492 Data.Attributes = A;
1493 Data.RefExpr.setPointer(E);
1494 Data.PrivateCopy = nullptr;
1495 Data.Modifier = Modifier;
1496 } else {
1497 DSAInfo &Data = getTopOfStack().SharingMap[D];
1498 assert(Data.Attributes == OMPC_unknown || (A == Data.Attributes) ||
1499 (A == OMPC_firstprivate && Data.Attributes == OMPC_lastprivate) ||
1500 (A == OMPC_lastprivate && Data.Attributes == OMPC_firstprivate) ||
1501 (isLoopControlVariable(D).first && A == OMPC_private));
1502 Data.Modifier = Modifier;
1503 if (A == OMPC_lastprivate && Data.Attributes == OMPC_firstprivate) {
1504 Data.RefExpr.setInt(/*IntVal=*/true);
1505 return;
1506 }
1507 const bool IsLastprivate =
1508 A == OMPC_lastprivate || Data.Attributes == OMPC_lastprivate;
1509 Data.Attributes = A;
1510 Data.RefExpr.setPointerAndInt(E, IsLastprivate);
1511 Data.PrivateCopy = PrivateCopy;
1512 Data.AppliedToPointee = AppliedToPointee;
1513 if (PrivateCopy) {
1514 DSAInfo &Data = getTopOfStack().SharingMap[PrivateCopy->getDecl()];
1515 Data.Modifier = Modifier;
1516 Data.Attributes = A;
1517 Data.RefExpr.setPointerAndInt(PrivateCopy, IsLastprivate);
1518 Data.PrivateCopy = nullptr;
1519 Data.AppliedToPointee = AppliedToPointee;
1520 }
1521 }
1522}
1523
1524/// Build a variable declaration for OpenMP loop iteration variable.
1526 StringRef Name, const AttrVec *Attrs = nullptr,
1527 DeclRefExpr *OrigRef = nullptr) {
1528 DeclContext *DC = SemaRef.CurContext;
1529 IdentifierInfo *II = &SemaRef.PP.getIdentifierTable().get(Name);
1531 auto *Decl =
1532 VarDecl::Create(SemaRef.Context, DC, Loc, Loc, II, Type, TInfo, SC_None);
1533 if (Attrs) {
1534 for (specific_attr_iterator<AlignedAttr> I(Attrs->begin()), E(Attrs->end());
1535 I != E; ++I)
1536 Decl->addAttr(*I);
1537 }
1538 Decl->setImplicit();
1539 if (OrigRef) {
1540 Decl->addAttr(
1541 OMPReferencedVarAttr::CreateImplicit(SemaRef.Context, OrigRef));
1542 }
1543 return Decl;
1544}
1545
1548 bool RefersToCapture = false) {
1549 D->setReferenced();
1550 D->markUsed(S.Context);
1552 SourceLocation(), D, RefersToCapture, Loc, Ty,
1553 VK_LValue);
1554}
1555
1556void DSAStackTy::addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
1557 BinaryOperatorKind BOK) {
1558 D = getCanonicalDecl(D);
1559 assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
1560 assert(
1561 getTopOfStack().SharingMap[D].Attributes == OMPC_reduction &&
1562 "Additional reduction info may be specified only for reduction items.");
1563 ReductionData &ReductionData = getTopOfStack().ReductionMap[D];
1564 assert(ReductionData.ReductionRange.isInvalid() &&
1565 (getTopOfStack().Directive == OMPD_taskgroup ||
1566 ((isOpenMPParallelDirective(getTopOfStack().Directive) ||
1567 isOpenMPWorksharingDirective(getTopOfStack().Directive)) &&
1568 !isOpenMPSimdDirective(getTopOfStack().Directive))) &&
1569 "Additional reduction info may be specified only once for reduction "
1570 "items.");
1571 ReductionData.set(BOK, SR);
1572 Expr *&TaskgroupReductionRef = getTopOfStack().TaskgroupReductionRef;
1573 if (!TaskgroupReductionRef) {
1574 VarDecl *VD = buildVarDecl(SemaRef, SR.getBegin(),
1575 SemaRef.Context.VoidPtrTy, ".task_red.");
1576 TaskgroupReductionRef =
1577 buildDeclRefExpr(SemaRef, VD, SemaRef.Context.VoidPtrTy, SR.getBegin());
1578 }
1579}
1580
1581void DSAStackTy::addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
1582 const Expr *ReductionRef) {
1583 D = getCanonicalDecl(D);
1584 assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
1585 assert(
1586 getTopOfStack().SharingMap[D].Attributes == OMPC_reduction &&
1587 "Additional reduction info may be specified only for reduction items.");
1588 ReductionData &ReductionData = getTopOfStack().ReductionMap[D];
1589 assert(ReductionData.ReductionRange.isInvalid() &&
1590 (getTopOfStack().Directive == OMPD_taskgroup ||
1591 ((isOpenMPParallelDirective(getTopOfStack().Directive) ||
1592 isOpenMPWorksharingDirective(getTopOfStack().Directive)) &&
1593 !isOpenMPSimdDirective(getTopOfStack().Directive))) &&
1594 "Additional reduction info may be specified only once for reduction "
1595 "items.");
1596 ReductionData.set(ReductionRef, SR);
1597 Expr *&TaskgroupReductionRef = getTopOfStack().TaskgroupReductionRef;
1598 if (!TaskgroupReductionRef) {
1599 VarDecl *VD = buildVarDecl(SemaRef, SR.getBegin(),
1600 SemaRef.Context.VoidPtrTy, ".task_red.");
1601 TaskgroupReductionRef =
1602 buildDeclRefExpr(SemaRef, VD, SemaRef.Context.VoidPtrTy, SR.getBegin());
1603 }
1604}
1605
1606const DSAStackTy::DSAVarData DSAStackTy::getTopMostTaskgroupReductionData(
1607 const ValueDecl *D, SourceRange &SR, BinaryOperatorKind &BOK,
1608 Expr *&TaskgroupDescriptor) const {
1609 D = getCanonicalDecl(D);
1610 assert(!isStackEmpty() && "Data-sharing attributes stack is empty.");
1611 for (const_iterator I = begin() + 1, E = end(); I != E; ++I) {
1612 const DSAInfo &Data = I->SharingMap.lookup(D);
1613 if (Data.Attributes != OMPC_reduction ||
1614 Data.Modifier != OMPC_REDUCTION_task)
1615 continue;
1616 const ReductionData &ReductionData = I->ReductionMap.lookup(D);
1617 if (!ReductionData.ReductionOp ||
1618 ReductionData.ReductionOp.is<const Expr *>())
1619 return DSAVarData();
1620 SR = ReductionData.ReductionRange;
1621 BOK = ReductionData.ReductionOp.get<ReductionData::BOKPtrType>();
1622 assert(I->TaskgroupReductionRef && "taskgroup reduction reference "
1623 "expression for the descriptor is not "
1624 "set.");
1625 TaskgroupDescriptor = I->TaskgroupReductionRef;
1626 return DSAVarData(I->Directive, OMPC_reduction, Data.RefExpr.getPointer(),
1627 Data.PrivateCopy, I->DefaultAttrLoc, OMPC_REDUCTION_task,
1628 /*AppliedToPointee=*/false);
1629 }
1630 return DSAVarData();
1631}
1632
1633const DSAStackTy::DSAVarData DSAStackTy::getTopMostTaskgroupReductionData(
1634 const ValueDecl *D, SourceRange &SR, const Expr *&ReductionRef,
1635 Expr *&TaskgroupDescriptor) const {
1636 D = getCanonicalDecl(D);
1637 assert(!isStackEmpty() && "Data-sharing attributes stack is empty.");
1638 for (const_iterator I = begin() + 1, E = end(); I != E; ++I) {
1639 const DSAInfo &Data = I->SharingMap.lookup(D);
1640 if (Data.Attributes != OMPC_reduction ||
1641 Data.Modifier != OMPC_REDUCTION_task)
1642 continue;
1643 const ReductionData &ReductionData = I->ReductionMap.lookup(D);
1644 if (!ReductionData.ReductionOp ||
1645 !ReductionData.ReductionOp.is<const Expr *>())
1646 return DSAVarData();
1647 SR = ReductionData.ReductionRange;
1648 ReductionRef = ReductionData.ReductionOp.get<const Expr *>();
1649 assert(I->TaskgroupReductionRef && "taskgroup reduction reference "
1650 "expression for the descriptor is not "
1651 "set.");
1652 TaskgroupDescriptor = I->TaskgroupReductionRef;
1653 return DSAVarData(I->Directive, OMPC_reduction, Data.RefExpr.getPointer(),
1654 Data.PrivateCopy, I->DefaultAttrLoc, OMPC_REDUCTION_task,
1655 /*AppliedToPointee=*/false);
1656 }
1657 return DSAVarData();
1658}
1659
1660bool DSAStackTy::isOpenMPLocal(VarDecl *D, const_iterator I) const {
1661 D = D->getCanonicalDecl();
1662 for (const_iterator E = end(); I != E; ++I) {
1663 if (isImplicitOrExplicitTaskingRegion(I->Directive) ||
1664 isOpenMPTargetExecutionDirective(I->Directive)) {
1665 if (I->CurScope) {
1666 Scope *TopScope = I->CurScope->getParent();
1667 Scope *CurScope = getCurScope();
1668 while (CurScope && CurScope != TopScope && !CurScope->isDeclScope(D))
1669 CurScope = CurScope->getParent();
1670 return CurScope != TopScope;
1671 }
1672 for (DeclContext *DC = D->getDeclContext(); DC; DC = DC->getParent())
1673 if (I->Context == DC)
1674 return true;
1675 return false;
1676 }
1677 }
1678 return false;
1679}
1680
1682 bool AcceptIfMutable = true,
1683 bool *IsClassType = nullptr) {
1684 ASTContext &Context = SemaRef.getASTContext();
1685 Type = Type.getNonReferenceType().getCanonicalType();
1686 bool IsConstant = Type.isConstant(Context);
1687 Type = Context.getBaseElementType(Type);
1688 const CXXRecordDecl *RD = AcceptIfMutable && SemaRef.getLangOpts().CPlusPlus
1690 : nullptr;
1691 if (const auto *CTSD = dyn_cast_or_null<ClassTemplateSpecializationDecl>(RD))
1692 if (const ClassTemplateDecl *CTD = CTSD->getSpecializedTemplate())
1693 RD = CTD->getTemplatedDecl();
1694 if (IsClassType)
1695 *IsClassType = RD;
1696 return IsConstant && !(SemaRef.getLangOpts().CPlusPlus && RD &&
1697 RD->hasDefinition() && RD->hasMutableFields());
1698}
1699
1700static bool rejectConstNotMutableType(Sema &SemaRef, const ValueDecl *D,
1702 SourceLocation ELoc,
1703 bool AcceptIfMutable = true,
1704 bool ListItemNotVar = false) {
1705 ASTContext &Context = SemaRef.getASTContext();
1706 bool IsClassType;
1707 if (isConstNotMutableType(SemaRef, Type, AcceptIfMutable, &IsClassType)) {
1708 unsigned Diag = ListItemNotVar ? diag::err_omp_const_list_item
1709 : IsClassType ? diag::err_omp_const_not_mutable_variable
1710 : diag::err_omp_const_variable;
1711 SemaRef.Diag(ELoc, Diag) << getOpenMPClauseName(CKind);
1712 if (!ListItemNotVar && D) {
1713 const VarDecl *VD = dyn_cast<VarDecl>(D);
1714 bool IsDecl = !VD || VD->isThisDeclarationADefinition(Context) ==
1716 SemaRef.Diag(D->getLocation(),
1717 IsDecl ? diag::note_previous_decl : diag::note_defined_here)
1718 << D;
1719 }
1720 return true;
1721 }
1722 return false;
1723}
1724
1725const DSAStackTy::DSAVarData DSAStackTy::getTopDSA(ValueDecl *D,
1726 bool FromParent) {
1727 D = getCanonicalDecl(D);
1728 DSAVarData DVar;
1729
1730 auto *VD = dyn_cast<VarDecl>(D);
1731 auto TI = Threadprivates.find(D);
1732 if (TI != Threadprivates.end()) {
1733 DVar.RefExpr = TI->getSecond().RefExpr.getPointer();
1734 DVar.CKind = OMPC_threadprivate;
1735 DVar.Modifier = TI->getSecond().Modifier;
1736 return DVar;
1737 }
1738 if (VD && VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
1739 DVar.RefExpr = buildDeclRefExpr(
1740 SemaRef, VD, D->getType().getNonReferenceType(),
1741 VD->getAttr<OMPThreadPrivateDeclAttr>()->getLocation());
1742 DVar.CKind = OMPC_threadprivate;
1743 addDSA(D, DVar.RefExpr, OMPC_threadprivate);
1744 return DVar;
1745 }
1746 // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1747 // in a Construct, C/C++, predetermined, p.1]
1748 // Variables appearing in threadprivate directives are threadprivate.
1749 if ((VD && VD->getTLSKind() != VarDecl::TLS_None &&
1750 !(VD->hasAttr<OMPThreadPrivateDeclAttr>() &&
1751 SemaRef.getLangOpts().OpenMPUseTLS &&
1752 SemaRef.getASTContext().getTargetInfo().isTLSSupported())) ||
1753 (VD && VD->getStorageClass() == SC_Register &&
1754 VD->hasAttr<AsmLabelAttr>() && !VD->isLocalVarDecl())) {
1755 DVar.RefExpr = buildDeclRefExpr(
1756 SemaRef, VD, D->getType().getNonReferenceType(), D->getLocation());
1757 DVar.CKind = OMPC_threadprivate;
1758 addDSA(D, DVar.RefExpr, OMPC_threadprivate);
1759 return DVar;
1760 }
1761 if (SemaRef.getLangOpts().OpenMPCUDAMode && VD &&
1762 VD->isLocalVarDeclOrParm() && !isStackEmpty() &&
1763 !isLoopControlVariable(D).first) {
1764 const_iterator IterTarget =
1765 std::find_if(begin(), end(), [](const SharingMapTy &Data) {
1766 return isOpenMPTargetExecutionDirective(Data.Directive);
1767 });
1768 if (IterTarget != end()) {
1769 const_iterator ParentIterTarget = IterTarget + 1;
1770 for (const_iterator Iter = begin(); Iter != ParentIterTarget; ++Iter) {
1771 if (isOpenMPLocal(VD, Iter)) {
1772 DVar.RefExpr =
1773 buildDeclRefExpr(SemaRef, VD, D->getType().getNonReferenceType(),
1774 D->getLocation());
1775 DVar.CKind = OMPC_threadprivate;
1776 return DVar;
1777 }
1778 }
1779 if (!isClauseParsingMode() || IterTarget != begin()) {
1780 auto DSAIter = IterTarget->SharingMap.find(D);
1781 if (DSAIter != IterTarget->SharingMap.end() &&
1782 isOpenMPPrivate(DSAIter->getSecond().Attributes)) {
1783 DVar.RefExpr = DSAIter->getSecond().RefExpr.getPointer();
1784 DVar.CKind = OMPC_threadprivate;
1785 return DVar;
1786 }
1787 const_iterator End = end();
1788 if (!SemaRef.OpenMP().isOpenMPCapturedByRef(
1789 D, std::distance(ParentIterTarget, End),
1790 /*OpenMPCaptureLevel=*/0)) {
1791 DVar.RefExpr =
1792 buildDeclRefExpr(SemaRef, VD, D->getType().getNonReferenceType(),
1793 IterTarget->ConstructLoc);
1794 DVar.CKind = OMPC_threadprivate;
1795 return DVar;
1796 }
1797 }
1798 }
1799 }
1800
1801 if (isStackEmpty())
1802 // Not in OpenMP execution region and top scope was already checked.
1803 return DVar;
1804
1805 // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1806 // in a Construct, C/C++, predetermined, p.4]
1807 // Static data members are shared.
1808 // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1809 // in a Construct, C/C++, predetermined, p.7]
1810 // Variables with static storage duration that are declared in a scope
1811 // inside the construct are shared.
1812 if (VD && VD->isStaticDataMember()) {
1813 // Check for explicitly specified attributes.
1814 const_iterator I = begin();
1815 const_iterator EndI = end();
1816 if (FromParent && I != EndI)
1817 ++I;
1818 if (I != EndI) {
1819 auto It = I->SharingMap.find(D);
1820 if (It != I->SharingMap.end()) {
1821 const DSAInfo &Data = It->getSecond();
1822 DVar.RefExpr = Data.RefExpr.getPointer();
1823 DVar.PrivateCopy = Data.PrivateCopy;
1824 DVar.CKind = Data.Attributes;
1825 DVar.ImplicitDSALoc = I->DefaultAttrLoc;
1826 DVar.DKind = I->Directive;
1827 DVar.Modifier = Data.Modifier;
1828 DVar.AppliedToPointee = Data.AppliedToPointee;
1829 return DVar;
1830 }
1831 }
1832
1833 DVar.CKind = OMPC_shared;
1834 return DVar;
1835 }
1836
1837 auto &&MatchesAlways = [](OpenMPDirectiveKind) { return true; };
1838 // The predetermined shared attribute for const-qualified types having no
1839 // mutable members was removed after OpenMP 3.1.
1840 if (SemaRef.LangOpts.OpenMP <= 31) {
1841 // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1842 // in a Construct, C/C++, predetermined, p.6]
1843 // Variables with const qualified type having no mutable member are
1844 // shared.
1845 if (isConstNotMutableType(SemaRef, D->getType())) {
1846 // Variables with const-qualified type having no mutable member may be
1847 // listed in a firstprivate clause, even if they are static data members.
1848 DSAVarData DVarTemp = hasInnermostDSA(
1849 D,
1850 [](OpenMPClauseKind C, bool) {
1851 return C == OMPC_firstprivate || C == OMPC_shared;
1852 },
1853 MatchesAlways, FromParent);
1854 if (DVarTemp.CKind != OMPC_unknown && DVarTemp.RefExpr)
1855 return DVarTemp;
1856
1857 DVar.CKind = OMPC_shared;
1858 return DVar;
1859 }
1860 }
1861
1862 // Explicitly specified attributes and local variables with predetermined
1863 // attributes.
1864 const_iterator I = begin();
1865 const_iterator EndI = end();
1866 if (FromParent && I != EndI)
1867 ++I;
1868 if (I == EndI)
1869 return DVar;
1870 auto It = I->SharingMap.find(D);
1871 if (It != I->SharingMap.end()) {
1872 const DSAInfo &Data = It->getSecond();
1873 DVar.RefExpr = Data.RefExpr.getPointer();
1874 DVar.PrivateCopy = Data.PrivateCopy;
1875 DVar.CKind = Data.Attributes;
1876 DVar.ImplicitDSALoc = I->DefaultAttrLoc;
1877 DVar.DKind = I->Directive;
1878 DVar.Modifier = Data.Modifier;
1879 DVar.AppliedToPointee = Data.AppliedToPointee;
1880 }
1881
1882 return DVar;
1883}
1884
1885const DSAStackTy::DSAVarData DSAStackTy::getImplicitDSA(ValueDecl *D,
1886 bool FromParent) const {
1887 if (isStackEmpty()) {
1888 const_iterator I;
1889 return getDSA(I, D);
1890 }
1891 D = getCanonicalDecl(D);
1892 const_iterator StartI = begin();
1893 const_iterator EndI = end();
1894 if (FromParent && StartI != EndI)
1895 ++StartI;
1896 return getDSA(StartI, D);
1897}
1898
1899const DSAStackTy::DSAVarData DSAStackTy::getImplicitDSA(ValueDecl *D,
1900 unsigned Level) const {
1901 if (getStackSize() <= Level)
1902 return DSAVarData();
1903 D = getCanonicalDecl(D);
1904 const_iterator StartI = std::next(begin(), getStackSize() - 1 - Level);
1905 return getDSA(StartI, D);
1906}
1907
1908const DSAStackTy::DSAVarData
1909DSAStackTy::hasDSA(ValueDecl *D,
1910 const llvm::function_ref<bool(OpenMPClauseKind, bool,
1911 DefaultDataSharingAttributes)>
1912 CPred,
1913 const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
1914 bool FromParent) const {
1915 if (isStackEmpty())
1916 return {};
1917 D = getCanonicalDecl(D);
1918 const_iterator I = begin();
1919 const_iterator EndI = end();
1920 if (FromParent && I != EndI)
1921 ++I;
1922 for (; I != EndI; ++I) {
1923 if (!DPred(I->Directive) &&
1924 !isImplicitOrExplicitTaskingRegion(I->Directive))
1925 continue;
1926 const_iterator NewI = I;
1927 DSAVarData DVar = getDSA(NewI, D);
1928 if (I == NewI && CPred(DVar.CKind, DVar.AppliedToPointee, I->DefaultAttr))
1929 return DVar;
1930 }
1931 return {};
1932}
1933
1934const DSAStackTy::DSAVarData DSAStackTy::hasInnermostDSA(
1935 ValueDecl *D, const llvm::function_ref<bool(OpenMPClauseKind, bool)> CPred,
1936 const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
1937 bool FromParent) const {
1938 if (isStackEmpty())
1939 return {};
1940 D = getCanonicalDecl(D);
1941 const_iterator StartI = begin();
1942 const_iterator EndI = end();
1943 if (FromParent && StartI != EndI)
1944 ++StartI;
1945 if (StartI == EndI || !DPred(StartI->Directive))
1946 return {};
1947 const_iterator NewI = StartI;
1948 DSAVarData DVar = getDSA(NewI, D);
1949 return (NewI == StartI && CPred(DVar.CKind, DVar.AppliedToPointee))
1950 ? DVar
1951 : DSAVarData();
1952}
1953
1954bool DSAStackTy::hasExplicitDSA(
1955 const ValueDecl *D,
1956 const llvm::function_ref<bool(OpenMPClauseKind, bool)> CPred,
1957 unsigned Level, bool NotLastprivate) const {
1958 if (getStackSize() <= Level)
1959 return false;
1960 D = getCanonicalDecl(D);
1961 const SharingMapTy &StackElem = getStackElemAtLevel(Level);
1962 auto I = StackElem.SharingMap.find(D);
1963 if (I != StackElem.SharingMap.end() && I->getSecond().RefExpr.getPointer() &&
1964 CPred(I->getSecond().Attributes, I->getSecond().AppliedToPointee) &&
1965 (!NotLastprivate || !I->getSecond().RefExpr.getInt()))
1966 return true;
1967 // Check predetermined rules for the loop control variables.
1968 auto LI = StackElem.LCVMap.find(D);
1969 if (LI != StackElem.LCVMap.end())
1970 return CPred(OMPC_private, /*AppliedToPointee=*/false);
1971 return false;
1972}
1973
1974bool DSAStackTy::hasExplicitDirective(
1975 const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
1976 unsigned Level) const {
1977 if (getStackSize() <= Level)
1978 return false;
1979 const SharingMapTy &StackElem = getStackElemAtLevel(Level);
1980 return DPred(StackElem.Directive);
1981}
1982
1983bool DSAStackTy::hasDirective(
1984 const llvm::function_ref<bool(OpenMPDirectiveKind,
1986 DPred,
1987 bool FromParent) const {
1988 // We look only in the enclosing region.
1989 size_t Skip = FromParent ? 2 : 1;
1990 for (const_iterator I = begin() + std::min(Skip, getStackSize()), E = end();
1991 I != E; ++I) {
1992 if (DPred(I->Directive, I->DirectiveName, I->ConstructLoc))
1993 return true;
1994 }
1995 return false;
1996}
1997
1998void SemaOpenMP::InitDataSharingAttributesStack() {
1999 VarDataSharingAttributesStack = new DSAStackTy(SemaRef);
2000}
2001
2002#define DSAStack static_cast<DSAStackTy *>(VarDataSharingAttributesStack)
2003
2004void SemaOpenMP::pushOpenMPFunctionRegion() { DSAStack->pushFunction(); }
2005
2006void SemaOpenMP::popOpenMPFunctionRegion(const FunctionScopeInfo *OldFSI) {
2007 DSAStack->popFunction(OldFSI);
2008}
2009
2011 assert(S.LangOpts.OpenMP && S.LangOpts.OpenMPIsTargetDevice &&
2012 "Expected OpenMP device compilation.");
2014}
2015
2016namespace {
2017/// Status of the function emission on the host/device.
2018enum class FunctionEmissionStatus {
2019 Emitted,
2020 Discarded,
2021 Unknown,
2022};
2023} // anonymous namespace
2024
2027 const FunctionDecl *FD) {
2028 assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice &&
2029 "Expected OpenMP device compilation.");
2030
2031 SemaDiagnosticBuilder::Kind Kind = SemaDiagnosticBuilder::K_Nop;
2032 if (FD) {
2034 switch (FES) {
2036 Kind = SemaDiagnosticBuilder::K_Immediate;
2037 break;
2039 // TODO: We should always delay diagnostics here in case a target
2040 // region is in a function we do not emit. However, as the
2041 // current diagnostics are associated with the function containing
2042 // the target region and we do not emit that one, we would miss out
2043 // on diagnostics for the target region itself. We need to anchor
2044 // the diagnostics with the new generated function *or* ensure we
2045 // emit diagnostics associated with the surrounding function.
2047 ? SemaDiagnosticBuilder::K_Deferred
2048 : SemaDiagnosticBuilder::K_Immediate;
2049 break;
2052 Kind = SemaDiagnosticBuilder::K_Nop;
2053 break;
2055 llvm_unreachable("CUDADiscarded unexpected in OpenMP device compilation");
2056 break;
2057 }
2058 }
2059
2060 return SemaDiagnosticBuilder(Kind, Loc, DiagID, FD, SemaRef);
2061}
2062
2065 const FunctionDecl *FD) {
2066 assert(getLangOpts().OpenMP && !getLangOpts().OpenMPIsTargetDevice &&
2067 "Expected OpenMP host compilation.");
2068
2069 SemaDiagnosticBuilder::Kind Kind = SemaDiagnosticBuilder::K_Nop;
2070 if (FD) {
2072 switch (FES) {
2074 Kind = SemaDiagnosticBuilder::K_Immediate;
2075 break;
2077 Kind = SemaDiagnosticBuilder::K_Deferred;
2078 break;
2082 Kind = SemaDiagnosticBuilder::K_Nop;
2083 break;
2084 }
2085 }
2086
2087 return SemaDiagnosticBuilder(Kind, Loc, DiagID, FD, SemaRef);
2088}
2089
2092 if (LO.OpenMP <= 45) {
2094 return OMPC_DEFAULTMAP_scalar;
2095 return OMPC_DEFAULTMAP_aggregate;
2096 }
2098 return OMPC_DEFAULTMAP_pointer;
2100 return OMPC_DEFAULTMAP_scalar;
2101 return OMPC_DEFAULTMAP_aggregate;
2102}
2103
2105 unsigned OpenMPCaptureLevel) const {
2106 assert(getLangOpts().OpenMP && "OpenMP is not allowed");
2107
2108 ASTContext &Ctx = getASTContext();
2109 bool IsByRef = true;
2110
2111 // Find the directive that is associated with the provided scope.
2112 D = cast<ValueDecl>(D->getCanonicalDecl());
2113 QualType Ty = D->getType();
2114
2115 bool IsVariableUsedInMapClause = false;
2116 if (DSAStack->hasExplicitDirective(isOpenMPTargetExecutionDirective, Level)) {
2117 // This table summarizes how a given variable should be passed to the device
2118 // given its type and the clauses where it appears. This table is based on
2119 // the description in OpenMP 4.5 [2.10.4, target Construct] and
2120 // OpenMP 4.5 [2.15.5, Data-mapping Attribute Rules and Clauses].
2121 //
2122 // =========================================================================
2123 // | type | defaultmap | pvt | first | is_device_ptr | map | res. |
2124 // | |(tofrom:scalar)| | pvt | |has_dv_adr| |
2125 // =========================================================================
2126 // | scl | | | | - | | bycopy|
2127 // | scl | | - | x | - | - | bycopy|
2128 // | scl | | x | - | - | - | null |
2129 // | scl | x | | | - | | byref |
2130 // | scl | x | - | x | - | - | bycopy|
2131 // | scl | x | x | - | - | - | null |
2132 // | scl | | - | - | - | x | byref |
2133 // | scl | x | - | - | - | x | byref |
2134 //
2135 // | agg | n.a. | | | - | | byref |
2136 // | agg | n.a. | - | x | - | - | byref |
2137 // | agg | n.a. | x | - | - | - | null |
2138 // | agg | n.a. | - | - | - | x | byref |
2139 // | agg | n.a. | - | - | - | x[] | byref |
2140 //
2141 // | ptr | n.a. | | | - | | bycopy|
2142 // | ptr | n.a. | - | x | - | - | bycopy|
2143 // | ptr | n.a. | x | - | - | - | null |
2144 // | ptr | n.a. | - | - | - | x | byref |
2145 // | ptr | n.a. | - | - | - | x[] | bycopy|
2146 // | ptr | n.a. | - | - | x | | bycopy|
2147 // | ptr | n.a. | - | - | x | x | bycopy|
2148 // | ptr | n.a. | - | - | x | x[] | bycopy|
2149 // =========================================================================
2150 // Legend:
2151 // scl - scalar
2152 // ptr - pointer
2153 // agg - aggregate
2154 // x - applies
2155 // - - invalid in this combination
2156 // [] - mapped with an array section
2157 // byref - should be mapped by reference
2158 // byval - should be mapped by value
2159 // null - initialize a local variable to null on the device
2160 //
2161 // Observations:
2162 // - All scalar declarations that show up in a map clause have to be passed
2163 // by reference, because they may have been mapped in the enclosing data
2164 // environment.
2165 // - If the scalar value does not fit the size of uintptr, it has to be
2166 // passed by reference, regardless the result in the table above.
2167 // - For pointers mapped by value that have either an implicit map or an
2168 // array section, the runtime library may pass the NULL value to the
2169 // device instead of the value passed to it by the compiler.
2170
2171 if (Ty->isReferenceType())
2172 Ty = Ty->castAs<ReferenceType>()->getPointeeType();
2173
2174 // Locate map clauses and see if the variable being captured is referred to
2175 // in any of those clauses. Here we only care about variables, not fields,
2176 // because fields are part of aggregates.
2177 bool IsVariableAssociatedWithSection = false;
2178
2179 DSAStack->checkMappableExprComponentListsForDeclAtLevel(
2180 D, Level,
2181 [&IsVariableUsedInMapClause, &IsVariableAssociatedWithSection,
2183 MapExprComponents,
2184 OpenMPClauseKind WhereFoundClauseKind) {
2185 // Both map and has_device_addr clauses information influences how a
2186 // variable is captured. E.g. is_device_ptr does not require changing
2187 // the default behavior.
2188 if (WhereFoundClauseKind != OMPC_map &&
2189 WhereFoundClauseKind != OMPC_has_device_addr)
2190 return false;
2191
2192 auto EI = MapExprComponents.rbegin();
2193 auto EE = MapExprComponents.rend();
2194
2195 assert(EI != EE && "Invalid map expression!");
2196
2197 if (isa<DeclRefExpr>(EI->getAssociatedExpression()))
2198 IsVariableUsedInMapClause |= EI->getAssociatedDeclaration() == D;
2199
2200 ++EI;
2201 if (EI == EE)
2202 return false;
2203 auto Last = std::prev(EE);
2204 const auto *UO =
2205 dyn_cast<UnaryOperator>(Last->getAssociatedExpression());
2206 if ((UO && UO->getOpcode() == UO_Deref) ||
2207 isa<ArraySubscriptExpr>(Last->getAssociatedExpression()) ||
2208 isa<ArraySectionExpr>(Last->getAssociatedExpression()) ||
2209 isa<MemberExpr>(EI->getAssociatedExpression()) ||
2210 isa<OMPArrayShapingExpr>(Last->getAssociatedExpression())) {
2211 IsVariableAssociatedWithSection = true;
2212 // There is nothing more we need to know about this variable.
2213 return true;
2214 }
2215
2216 // Keep looking for more map info.
2217 return false;
2218 });
2219
2220 if (IsVariableUsedInMapClause) {
2221 // If variable is identified in a map clause it is always captured by
2222 // reference except if it is a pointer that is dereferenced somehow.
2223 IsByRef = !(Ty->isPointerType() && IsVariableAssociatedWithSection);
2224 } else {
2225 // By default, all the data that has a scalar type is mapped by copy
2226 // (except for reduction variables).
2227 // Defaultmap scalar is mutual exclusive to defaultmap pointer
2228 IsByRef = (DSAStack->isForceCaptureByReferenceInTargetExecutable() &&
2229 !Ty->isAnyPointerType()) ||
2230 !Ty->isScalarType() ||
2231 DSAStack->isDefaultmapCapturedByRef(
2233 DSAStack->hasExplicitDSA(
2234 D,
2235 [](OpenMPClauseKind K, bool AppliedToPointee) {
2236 return K == OMPC_reduction && !AppliedToPointee;
2237 },
2238 Level);
2239 }
2240 }
2241
2242 if (IsByRef && Ty.getNonReferenceType()->isScalarType()) {
2243 IsByRef =
2244 ((IsVariableUsedInMapClause &&
2245 DSAStack->getCaptureRegion(Level, OpenMPCaptureLevel) ==
2246 OMPD_target) ||
2247 !(DSAStack->hasExplicitDSA(
2248 D,
2249 [](OpenMPClauseKind K, bool AppliedToPointee) -> bool {
2250 return K == OMPC_firstprivate ||
2251 (K == OMPC_reduction && AppliedToPointee);
2252 },
2253 Level, /*NotLastprivate=*/true) ||
2254 DSAStack->isUsesAllocatorsDecl(Level, D))) &&
2255 // If the variable is artificial and must be captured by value - try to
2256 // capture by value.
2257 !(isa<OMPCapturedExprDecl>(D) && !D->hasAttr<OMPCaptureNoInitAttr>() &&
2258 !cast<OMPCapturedExprDecl>(D)->getInit()->isGLValue()) &&
2259 // If the variable is implicitly firstprivate and scalar - capture by
2260 // copy
2261 !((DSAStack->getDefaultDSA() == DSA_firstprivate ||
2262 DSAStack->getDefaultDSA() == DSA_private) &&
2263 !DSAStack->hasExplicitDSA(
2264 D, [](OpenMPClauseKind K, bool) { return K != OMPC_unknown; },
2265 Level) &&
2266 !DSAStack->isLoopControlVariable(D, Level).first);
2267 }
2268
2269 // When passing data by copy, we need to make sure it fits the uintptr size
2270 // and alignment, because the runtime library only deals with uintptr types.
2271 // If it does not fit the uintptr size, we need to pass the data by reference
2272 // instead.
2273 if (!IsByRef && (Ctx.getTypeSizeInChars(Ty) >
2275 Ctx.getAlignOfGlobalVarInChars(Ty, dyn_cast<VarDecl>(D)) >
2276 Ctx.getTypeAlignInChars(Ctx.getUIntPtrType()))) {
2277 IsByRef = true;
2278 }
2279
2280 return IsByRef;
2281}
2282
2283unsigned SemaOpenMP::getOpenMPNestingLevel() const {
2284 assert(getLangOpts().OpenMP);
2285 return DSAStack->getNestingLevel();
2286}
2287
2289 return isOpenMPTaskingDirective(DSAStack->getCurrentDirective()) &&
2290 DSAStack->isUntiedRegion();
2291}
2292
2294 return (isOpenMPTargetExecutionDirective(DSAStack->getCurrentDirective()) &&
2295 !DSAStack->isClauseParsingMode()) ||
2296 DSAStack->hasDirective(
2298 SourceLocation) -> bool {
2300 },
2301 false);
2302}
2303
2305 // Only rebuild for Field.
2306 if (!dyn_cast<FieldDecl>(D))
2307 return false;
2308 DSAStackTy::DSAVarData DVarPrivate = DSAStack->hasDSA(
2309 D,
2310 [](OpenMPClauseKind C, bool AppliedToPointee,
2311 DefaultDataSharingAttributes DefaultAttr) {
2312 return isOpenMPPrivate(C) && !AppliedToPointee &&
2313 (DefaultAttr == DSA_firstprivate || DefaultAttr == DSA_private);
2314 },
2315 [](OpenMPDirectiveKind) { return true; },
2316 DSAStack->isClauseParsingMode());
2317 if (DVarPrivate.CKind != OMPC_unknown)
2318 return true;
2319 return false;
2320}
2321
2323 Expr *CaptureExpr, bool WithInit,
2324 DeclContext *CurContext,
2325 bool AsExpression);
2326
2328 unsigned StopAt) {
2329 assert(getLangOpts().OpenMP && "OpenMP is not allowed");
2330 D = getCanonicalDecl(D);
2331
2332 auto *VD = dyn_cast<VarDecl>(D);
2333 // Do not capture constexpr variables.
2334 if (VD && VD->isConstexpr())
2335 return nullptr;
2336
2337 // If we want to determine whether the variable should be captured from the
2338 // perspective of the current capturing scope, and we've already left all the
2339 // capturing scopes of the top directive on the stack, check from the
2340 // perspective of its parent directive (if any) instead.
2341 DSAStackTy::ParentDirectiveScope InParentDirectiveRAII(
2342 *DSAStack, CheckScopeInfo && DSAStack->isBodyComplete());
2343
2344 // If we are attempting to capture a global variable in a directive with
2345 // 'target' we return true so that this global is also mapped to the device.
2346 //
2347 if (VD && !VD->hasLocalStorage() &&
2349 SemaRef.getCurLambda())) {
2351 DSAStackTy::DSAVarData DVarTop =
2352 DSAStack->getTopDSA(D, DSAStack->isClauseParsingMode());
2353 if (DVarTop.CKind != OMPC_unknown && DVarTop.RefExpr)
2354 return VD;
2355 // If the declaration is enclosed in a 'declare target' directive,
2356 // then it should not be captured.
2357 //
2358 if (OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
2359 return nullptr;
2360 CapturedRegionScopeInfo *CSI = nullptr;
2361 for (FunctionScopeInfo *FSI : llvm::drop_begin(
2362 llvm::reverse(SemaRef.FunctionScopes),
2363 CheckScopeInfo ? (SemaRef.FunctionScopes.size() - (StopAt + 1))
2364 : 0)) {
2365 if (!isa<CapturingScopeInfo>(FSI))
2366 return nullptr;
2367 if (auto *RSI = dyn_cast<CapturedRegionScopeInfo>(FSI))
2368 if (RSI->CapRegionKind == CR_OpenMP) {
2369 CSI = RSI;
2370 break;
2371 }
2372 }
2373 assert(CSI && "Failed to find CapturedRegionScopeInfo");
2376 DSAStack->getDirective(CSI->OpenMPLevel));
2377 if (Regions[CSI->OpenMPCaptureLevel] != OMPD_task)
2378 return VD;
2379 }
2381 // Try to mark variable as declare target if it is used in capturing
2382 // regions.
2383 if (getLangOpts().OpenMP <= 45 &&
2384 !OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
2386 return nullptr;
2387 }
2388 }
2389
2390 if (CheckScopeInfo) {
2391 bool OpenMPFound = false;
2392 for (unsigned I = StopAt + 1; I > 0; --I) {
2394 if (!isa<CapturingScopeInfo>(FSI))
2395 return nullptr;
2396 if (auto *RSI = dyn_cast<CapturedRegionScopeInfo>(FSI))
2397 if (RSI->CapRegionKind == CR_OpenMP) {
2398 OpenMPFound = true;
2399 break;
2400 }
2401 }
2402 if (!OpenMPFound)
2403 return nullptr;
2404 }
2405
2406 if (DSAStack->getCurrentDirective() != OMPD_unknown &&
2407 (!DSAStack->isClauseParsingMode() ||
2408 DSAStack->getParentDirective() != OMPD_unknown)) {
2409 auto &&Info = DSAStack->isLoopControlVariable(D);
2410 if (Info.first ||
2411 (VD && VD->hasLocalStorage() &&
2412 isImplicitOrExplicitTaskingRegion(DSAStack->getCurrentDirective())) ||
2413 (VD && DSAStack->isForceVarCapturing()))
2414 return VD ? VD : Info.second;
2415 DSAStackTy::DSAVarData DVarTop =
2416 DSAStack->getTopDSA(D, DSAStack->isClauseParsingMode());
2417 if (DVarTop.CKind != OMPC_unknown && isOpenMPPrivate(DVarTop.CKind) &&
2418 (!VD || VD->hasLocalStorage() || !DVarTop.AppliedToPointee))
2419 return VD ? VD : cast<VarDecl>(DVarTop.PrivateCopy->getDecl());
2420 // Threadprivate variables must not be captured.
2421 if (isOpenMPThreadPrivate(DVarTop.CKind))
2422 return nullptr;
2423 // The variable is not private or it is the variable in the directive with
2424 // default(none) clause and not used in any clause.
2425 DSAStackTy::DSAVarData DVarPrivate = DSAStack->hasDSA(
2426 D,
2427 [](OpenMPClauseKind C, bool AppliedToPointee, bool) {
2428 return isOpenMPPrivate(C) && !AppliedToPointee;
2429 },
2430 [](OpenMPDirectiveKind) { return true; },
2431 DSAStack->isClauseParsingMode());
2432 // Global shared must not be captured.
2433 if (VD && !VD->hasLocalStorage() && DVarPrivate.CKind == OMPC_unknown &&
2434 ((DSAStack->getDefaultDSA() != DSA_none &&
2435 DSAStack->getDefaultDSA() != DSA_private &&
2436 DSAStack->getDefaultDSA() != DSA_firstprivate) ||
2437 DVarTop.CKind == OMPC_shared))
2438 return nullptr;
2439 auto *FD = dyn_cast<FieldDecl>(D);
2440 if (DVarPrivate.CKind != OMPC_unknown && !VD && FD &&
2441 !DVarPrivate.PrivateCopy) {
2442 DSAStackTy::DSAVarData DVarPrivate = DSAStack->hasDSA(
2443 D,
2444 [](OpenMPClauseKind C, bool AppliedToPointee,
2445 DefaultDataSharingAttributes DefaultAttr) {
2446 return isOpenMPPrivate(C) && !AppliedToPointee &&
2447 (DefaultAttr == DSA_firstprivate ||
2448 DefaultAttr == DSA_private);
2449 },
2450 [](OpenMPDirectiveKind) { return true; },
2451 DSAStack->isClauseParsingMode());
2452 if (DVarPrivate.CKind == OMPC_unknown)
2453 return nullptr;
2454
2455 VarDecl *VD = DSAStack->getImplicitFDCapExprDecl(FD);
2456 if (VD)
2457 return VD;
2459 return nullptr;
2462 /*IsImplicit=*/true);
2463 const CXXScopeSpec CS = CXXScopeSpec();
2465 ThisExpr, /*IsArrow=*/true, SourceLocation(),
2468 /*HadMultipleCandidates=*/false, DeclarationNameInfo(), FD->getType(),
2471 SemaRef, FD->getIdentifier(), ME, DVarPrivate.CKind != OMPC_private,
2472 SemaRef.CurContext->getParent(), /*AsExpression=*/false);
2473 DeclRefExpr *VDPrivateRefExpr = buildDeclRefExpr(
2475 VD = cast<VarDecl>(VDPrivateRefExpr->getDecl());
2476 DSAStack->addImplicitDefaultFirstprivateFD(FD, VD);
2477 return VD;
2478 }
2479 if (DVarPrivate.CKind != OMPC_unknown ||
2480 (VD && (DSAStack->getDefaultDSA() == DSA_none ||
2481 DSAStack->getDefaultDSA() == DSA_private ||
2482 DSAStack->getDefaultDSA() == DSA_firstprivate)))
2483 return VD ? VD : cast<VarDecl>(DVarPrivate.PrivateCopy->getDecl());
2484 }
2485 return nullptr;
2486}
2487
2488void SemaOpenMP::adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
2489 unsigned Level) const {
2490 FunctionScopesIndex -= getOpenMPCaptureLevels(DSAStack->getDirective(Level));
2491}
2492
2494 assert(getLangOpts().OpenMP && "OpenMP must be enabled.");
2495 if (isOpenMPLoopDirective(DSAStack->getCurrentDirective()))
2496 DSAStack->loopInit();
2497}
2498
2500 assert(getLangOpts().OpenMP && "OpenMP must be enabled.");
2501 if (isOpenMPLoopDirective(DSAStack->getCurrentDirective())) {
2502 DSAStack->resetPossibleLoopCounter();
2503 DSAStack->loopStart();
2504 }
2505}
2506
2508 unsigned CapLevel) const {
2509 assert(getLangOpts().OpenMP && "OpenMP is not allowed");
2510 if (DSAStack->getCurrentDirective() != OMPD_unknown &&
2511 (!DSAStack->isClauseParsingMode() ||
2512 DSAStack->getParentDirective() != OMPD_unknown)) {
2513 DSAStackTy::DSAVarData DVarPrivate = DSAStack->hasDSA(
2514 D,
2515 [](OpenMPClauseKind C, bool AppliedToPointee,
2516 DefaultDataSharingAttributes DefaultAttr) {
2517 return isOpenMPPrivate(C) && !AppliedToPointee &&
2518 DefaultAttr == DSA_private;
2519 },
2520 [](OpenMPDirectiveKind) { return true; },
2521 DSAStack->isClauseParsingMode());
2522 if (DVarPrivate.CKind == OMPC_private && isa<OMPCapturedExprDecl>(D) &&
2523 DSAStack->isImplicitDefaultFirstprivateFD(cast<VarDecl>(D)) &&
2524 !DSAStack->isLoopControlVariable(D).first)
2525 return OMPC_private;
2526 }
2527 if (DSAStack->hasExplicitDirective(isOpenMPTaskingDirective, Level)) {
2528 bool IsTriviallyCopyable =
2529 D->getType().getNonReferenceType().isTriviallyCopyableType(
2530 getASTContext()) &&
2531 !D->getType()
2532 .getNonReferenceType()
2533 .getCanonicalType()
2534 ->getAsCXXRecordDecl();
2535 OpenMPDirectiveKind DKind = DSAStack->getDirective(Level);
2537 getOpenMPCaptureRegions(CaptureRegions, DKind);
2538 if (isOpenMPTaskingDirective(CaptureRegions[CapLevel]) &&
2539 (IsTriviallyCopyable ||
2540 !isOpenMPTaskLoopDirective(CaptureRegions[CapLevel]))) {
2541 if (DSAStack->hasExplicitDSA(
2542 D,
2543 [](OpenMPClauseKind K, bool) { return K == OMPC_firstprivate; },
2544 Level, /*NotLastprivate=*/true))
2545 return OMPC_firstprivate;
2546 DSAStackTy::DSAVarData DVar = DSAStack->getImplicitDSA(D, Level);
2547 if (DVar.CKind != OMPC_shared &&
2548 !DSAStack->isLoopControlVariable(D, Level).first && !DVar.RefExpr) {
2549 DSAStack->addImplicitTaskFirstprivate(Level, D);
2550 return OMPC_firstprivate;
2551 }
2552 }
2553 }
2554 if (isOpenMPLoopDirective(DSAStack->getCurrentDirective()) &&
2555 !isOpenMPLoopTransformationDirective(DSAStack->getCurrentDirective())) {
2556 if (DSAStack->getAssociatedLoops() > 0 && !DSAStack->isLoopStarted()) {
2557 DSAStack->resetPossibleLoopCounter(D);
2558 DSAStack->loopStart();
2559 return OMPC_private;
2560 }
2561 if ((DSAStack->getPossiblyLoopCounter() == D->getCanonicalDecl() ||
2562 DSAStack->isLoopControlVariable(D).first) &&
2563 !DSAStack->hasExplicitDSA(
2564 D, [](OpenMPClauseKind K, bool) { return K != OMPC_private; },
2565 Level) &&
2566 !isOpenMPSimdDirective(DSAStack->getCurrentDirective()))
2567 return OMPC_private;
2568 }
2569 if (const auto *VD = dyn_cast<VarDecl>(D)) {
2570 if (DSAStack->isThreadPrivate(const_cast<VarDecl *>(VD)) &&
2571 DSAStack->isForceVarCapturing() &&
2572 !DSAStack->hasExplicitDSA(
2573 D, [](OpenMPClauseKind K, bool) { return K == OMPC_copyin; },
2574 Level))
2575 return OMPC_private;
2576 }
2577 // User-defined allocators are private since they must be defined in the
2578 // context of target region.
2579 if (DSAStack->hasExplicitDirective(isOpenMPTargetExecutionDirective, Level) &&
2580 DSAStack->isUsesAllocatorsDecl(Level, D).value_or(
2581 DSAStackTy::UsesAllocatorsDeclKind::AllocatorTrait) ==
2582 DSAStackTy::UsesAllocatorsDeclKind::UserDefinedAllocator)
2583 return OMPC_private;
2584 return (DSAStack->hasExplicitDSA(
2585 D, [](OpenMPClauseKind K, bool) { return K == OMPC_private; },
2586 Level) ||
2587 (DSAStack->isClauseParsingMode() &&
2588 DSAStack->getClauseParsingMode() == OMPC_private) ||
2589 // Consider taskgroup reduction descriptor variable a private
2590 // to avoid possible capture in the region.
2591 (DSAStack->hasExplicitDirective(
2592 [](OpenMPDirectiveKind K) {
2593 return K == OMPD_taskgroup ||
2594 ((isOpenMPParallelDirective(K) ||
2595 isOpenMPWorksharingDirective(K)) &&
2596 !isOpenMPSimdDirective(K));
2597 },
2598 Level) &&
2599 DSAStack->isTaskgroupReductionRef(D, Level)))
2600 ? OMPC_private
2601 : OMPC_unknown;
2602}
2603
2605 unsigned Level) {
2606 assert(getLangOpts().OpenMP && "OpenMP is not allowed");
2607 D = getCanonicalDecl(D);
2608 OpenMPClauseKind OMPC = OMPC_unknown;
2609 for (unsigned I = DSAStack->getNestingLevel() + 1; I > Level; --I) {
2610 const unsigned NewLevel = I - 1;
2611 if (DSAStack->hasExplicitDSA(
2612 D,
2613 [&OMPC](const OpenMPClauseKind K, bool AppliedToPointee) {
2614 if (isOpenMPPrivate(K) && !AppliedToPointee) {
2615 OMPC = K;
2616 return true;
2617 }
2618 return false;
2619 },
2620 NewLevel))
2621 break;
2622 if (DSAStack->checkMappableExprComponentListsForDeclAtLevel(
2623 D, NewLevel,
2625 OpenMPClauseKind) { return true; })) {
2626 OMPC = OMPC_map;
2627 break;
2628 }
2629 if (DSAStack->hasExplicitDirective(isOpenMPTargetExecutionDirective,
2630 NewLevel)) {
2631 OMPC = OMPC_map;
2632 if (DSAStack->mustBeFirstprivateAtLevel(
2634 OMPC = OMPC_firstprivate;
2635 break;
2636 }
2637 }
2638 if (OMPC != OMPC_unknown)
2639 FD->addAttr(
2640 OMPCaptureKindAttr::CreateImplicit(getASTContext(), unsigned(OMPC)));
2641}
2642
2644 unsigned CaptureLevel) const {
2645 assert(getLangOpts().OpenMP && "OpenMP is not allowed");
2646 // Return true if the current level is no longer enclosed in a target region.
2647
2649 getOpenMPCaptureRegions(Regions, DSAStack->getDirective(Level));
2650 const auto *VD = dyn_cast<VarDecl>(D);
2651 return VD && !VD->hasLocalStorage() &&
2652 DSAStack->hasExplicitDirective(isOpenMPTargetExecutionDirective,
2653 Level) &&
2654 Regions[CaptureLevel] != OMPD_task;
2655}
2656
2658 unsigned CaptureLevel) const {
2659 assert(getLangOpts().OpenMP && "OpenMP is not allowed");
2660 // Return true if the current level is no longer enclosed in a target region.
2661
2662 if (const auto *VD = dyn_cast<VarDecl>(D)) {
2663 if (!VD->hasLocalStorage()) {
2665 return true;
2666 DSAStackTy::DSAVarData TopDVar =
2667 DSAStack->getTopDSA(D, /*FromParent=*/false);
2668 unsigned NumLevels =
2669 getOpenMPCaptureLevels(DSAStack->getDirective(Level));
2670 if (Level == 0)
2671 // non-file scope static variable with default(firstprivate)
2672 // should be global captured.
2673 return (NumLevels == CaptureLevel + 1 &&
2674 (TopDVar.CKind != OMPC_shared ||
2675 DSAStack->getDefaultDSA() == DSA_firstprivate));
2676 do {
2677 --Level;
2678 DSAStackTy::DSAVarData DVar = DSAStack->getImplicitDSA(D, Level);
2679 if (DVar.CKind != OMPC_shared)
2680 return true;
2681 } while (Level > 0);
2682 }
2683 }
2684 return true;
2685}
2686
2687void SemaOpenMP::DestroyDataSharingAttributesStack() { delete DSAStack; }
2688
2690 OMPTraitInfo &TI) {
2691 OMPDeclareVariantScopes.push_back(OMPDeclareVariantScope(TI));
2692}
2693
2696 "Not in OpenMP declare variant scope!");
2697
2698 OMPDeclareVariantScopes.pop_back();
2699}
2700
2702 const FunctionDecl *Callee,
2704 assert(getLangOpts().OpenMP && "Expected OpenMP compilation mode.");
2705 std::optional<OMPDeclareTargetDeclAttr::DevTypeTy> DevTy =
2706 OMPDeclareTargetDeclAttr::getDeviceType(Caller->getMostRecentDecl());
2707 // Ignore host functions during device analysis.
2708 if (getLangOpts().OpenMPIsTargetDevice &&
2709 (!DevTy || *DevTy == OMPDeclareTargetDeclAttr::DT_Host))
2710 return;
2711 // Ignore nohost functions during host analysis.
2712 if (!getLangOpts().OpenMPIsTargetDevice && DevTy &&
2713 *DevTy == OMPDeclareTargetDeclAttr::DT_NoHost)
2714 return;
2715 const FunctionDecl *FD = Callee->getMostRecentDecl();
2716 DevTy = OMPDeclareTargetDeclAttr::getDeviceType(FD);
2717 if (getLangOpts().OpenMPIsTargetDevice && DevTy &&
2718 *DevTy == OMPDeclareTargetDeclAttr::DT_Host) {
2719 // Diagnose host function called during device codegen.
2720 StringRef HostDevTy =
2721 getOpenMPSimpleClauseTypeName(OMPC_device_type, OMPC_DEVICE_TYPE_host);
2722 Diag(Loc, diag::err_omp_wrong_device_function_call) << HostDevTy << 0;
2723 Diag(*OMPDeclareTargetDeclAttr::getLocation(FD),
2724 diag::note_omp_marked_device_type_here)
2725 << HostDevTy;
2726 return;
2727 }
2728 if (!getLangOpts().OpenMPIsTargetDevice &&
2729 !getLangOpts().OpenMPOffloadMandatory && DevTy &&
2730 *DevTy == OMPDeclareTargetDeclAttr::DT_NoHost) {
2731 // In OpenMP 5.2 or later, if the function has a host variant then allow
2732 // that to be called instead
2733 auto &&HasHostAttr = [](const FunctionDecl *Callee) {
2734 for (OMPDeclareVariantAttr *A :
2735 Callee->specific_attrs<OMPDeclareVariantAttr>()) {
2736 auto *DeclRefVariant = cast<DeclRefExpr>(A->getVariantFuncRef());
2737 auto *VariantFD = cast<FunctionDecl>(DeclRefVariant->getDecl());
2738 std::optional<OMPDeclareTargetDeclAttr::DevTypeTy> DevTy =
2739 OMPDeclareTargetDeclAttr::getDeviceType(
2740 VariantFD->getMostRecentDecl());
2741 if (!DevTy || *DevTy == OMPDeclareTargetDeclAttr::DT_Host)
2742 return true;
2743 }
2744 return false;
2745 };
2746 if (getLangOpts().OpenMP >= 52 &&
2747 Callee->hasAttr<OMPDeclareVariantAttr>() && HasHostAttr(Callee))
2748 return;
2749 // Diagnose nohost function called during host codegen.
2750 StringRef NoHostDevTy = getOpenMPSimpleClauseTypeName(
2751 OMPC_device_type, OMPC_DEVICE_TYPE_nohost);
2752 Diag(Loc, diag::err_omp_wrong_device_function_call) << NoHostDevTy << 1;
2753 Diag(*OMPDeclareTargetDeclAttr::getLocation(FD),
2754 diag::note_omp_marked_device_type_here)
2755 << NoHostDevTy;
2756 }
2757}
2758
2760 const DeclarationNameInfo &DirName,
2761 Scope *CurScope, SourceLocation Loc) {
2762 DSAStack->push(DKind, DirName, CurScope, Loc);
2765}
2766
2768 DSAStack->setClauseParsingMode(K);
2769}
2770
2772 DSAStack->setClauseParsingMode(/*K=*/OMPC_unknown);
2774}
2775
2776static std::pair<ValueDecl *, bool>
2777getPrivateItem(Sema &S, Expr *&RefExpr, SourceLocation &ELoc,
2778 SourceRange &ERange, bool AllowArraySection = false,
2779 StringRef DiagType = "");
2780
2781/// Check consistency of the reduction clauses.
2782static void checkReductionClauses(Sema &S, DSAStackTy *Stack,
2783 ArrayRef<OMPClause *> Clauses) {
2784 bool InscanFound = false;
2785 SourceLocation InscanLoc;
2786 // OpenMP 5.0, 2.19.5.4 reduction Clause, Restrictions.
2787 // A reduction clause without the inscan reduction-modifier may not appear on
2788 // a construct on which a reduction clause with the inscan reduction-modifier
2789 // appears.
2790 for (OMPClause *C : Clauses) {
2791 if (C->getClauseKind() != OMPC_reduction)
2792 continue;
2793 auto *RC = cast<OMPReductionClause>(C);
2794 if (RC->getModifier() == OMPC_REDUCTION_inscan) {
2795 InscanFound = true;
2796 InscanLoc = RC->getModifierLoc();
2797 continue;
2798 }
2799 if (RC->getModifier() == OMPC_REDUCTION_task) {
2800 // OpenMP 5.0, 2.19.5.4 reduction Clause.
2801 // A reduction clause with the task reduction-modifier may only appear on
2802 // a parallel construct, a worksharing construct or a combined or
2803 // composite construct for which any of the aforementioned constructs is a
2804 // constituent construct and simd or loop are not constituent constructs.
2805 OpenMPDirectiveKind CurDir = Stack->getCurrentDirective();
2806 if (!(isOpenMPParallelDirective(CurDir) ||
2808 isOpenMPSimdDirective(CurDir))
2809 S.Diag(RC->getModifierLoc(),
2810 diag::err_omp_reduction_task_not_parallel_or_worksharing);
2811 continue;
2812 }
2813 }
2814 if (InscanFound) {
2815 for (OMPClause *C : Clauses) {
2816 if (C->getClauseKind() != OMPC_reduction)
2817 continue;
2818 auto *RC = cast<OMPReductionClause>(C);
2819 if (RC->getModifier() != OMPC_REDUCTION_inscan) {
2820 S.Diag(RC->getModifier() == OMPC_REDUCTION_unknown
2821 ? RC->getBeginLoc()
2822 : RC->getModifierLoc(),
2823 diag::err_omp_inscan_reduction_expected);
2824 S.Diag(InscanLoc, diag::note_omp_previous_inscan_reduction);
2825 continue;
2826 }
2827 for (Expr *Ref : RC->varlist()) {
2828 assert(Ref && "NULL expr in OpenMP nontemporal clause.");
2829 SourceLocation ELoc;
2830 SourceRange ERange;
2831 Expr *SimpleRefExpr = Ref;
2832 auto Res = getPrivateItem(S, SimpleRefExpr, ELoc, ERange,
2833 /*AllowArraySection=*/true);
2834 ValueDecl *D = Res.first;
2835 if (!D)
2836 continue;
2837 if (!Stack->isUsedInScanDirective(getCanonicalDecl(D))) {
2838 S.Diag(Ref->getExprLoc(),
2839 diag::err_omp_reduction_not_inclusive_exclusive)
2840 << Ref->getSourceRange();
2841 }
2842 }
2843 }
2844 }
2845}
2846
2847static void checkAllocateClauses(Sema &S, DSAStackTy *Stack,
2848 ArrayRef<OMPClause *> Clauses);
2849static DeclRefExpr *buildCapture(Sema &S, ValueDecl *D, Expr *CaptureExpr,
2850 bool WithInit);
2851
2852static void reportOriginalDsa(Sema &SemaRef, const DSAStackTy *Stack,
2853 const ValueDecl *D,
2854 const DSAStackTy::DSAVarData &DVar,
2855 bool IsLoopIterVar = false);
2856
2858 // OpenMP [2.14.3.5, Restrictions, C/C++, p.1]
2859 // A variable of class type (or array thereof) that appears in a lastprivate
2860 // clause requires an accessible, unambiguous default constructor for the
2861 // class type, unless the list item is also specified in a firstprivate
2862 // clause.
2863 if (const auto *D = dyn_cast_or_null<OMPExecutableDirective>(CurDirective)) {
2864 for (OMPClause *C : D->clauses()) {
2865 if (auto *Clause = dyn_cast<OMPLastprivateClause>(C)) {
2866 SmallVector<Expr *, 8> PrivateCopies;
2867 for (Expr *DE : Clause->varlist()) {
2868 if (DE->isValueDependent() || DE->isTypeDependent()) {
2869 PrivateCopies.push_back(nullptr);
2870 continue;
2871 }
2872 auto *DRE = cast<DeclRefExpr>(DE->IgnoreParens());
2873 auto *VD = cast<VarDecl>(DRE->getDecl());
2875 const DSAStackTy::DSAVarData DVar =
2876 DSAStack->getTopDSA(VD, /*FromParent=*/false);
2877 if (DVar.CKind == OMPC_lastprivate) {
2878 // Generate helper private variable and initialize it with the
2879 // default value. The address of the original variable is replaced
2880 // by the address of the new private variable in CodeGen. This new
2881 // variable is not added to IdResolver, so the code in the OpenMP
2882 // region uses original variable for proper diagnostics.
2883 VarDecl *VDPrivate = buildVarDecl(
2884 SemaRef, DE->getExprLoc(), Type.getUnqualifiedType(),
2885 VD->getName(), VD->hasAttrs() ? &VD->getAttrs() : nullptr, DRE);
2887 if (VDPrivate->isInvalidDecl()) {
2888 PrivateCopies.push_back(nullptr);
2889 continue;
2890 }
2891 PrivateCopies.push_back(buildDeclRefExpr(
2892 SemaRef, VDPrivate, DE->getType(), DE->getExprLoc()));
2893 } else {
2894 // The variable is also a firstprivate, so initialization sequence
2895 // for private copy is generated already.
2896 PrivateCopies.push_back(nullptr);
2897 }
2898 }
2899 Clause->setPrivateCopies(PrivateCopies);
2900 continue;
2901 }
2902 // Finalize nontemporal clause by handling private copies, if any.
2903 if (auto *Clause = dyn_cast<OMPNontemporalClause>(C)) {
2904 SmallVector<Expr *, 8> PrivateRefs;
2905 for (Expr *RefExpr : Clause->varlist()) {
2906 assert(RefExpr && "NULL expr in OpenMP nontemporal clause.");
2907 SourceLocation ELoc;
2908 SourceRange ERange;
2909 Expr *SimpleRefExpr = RefExpr;
2910 auto Res = getPrivateItem(SemaRef, SimpleRefExpr, ELoc, ERange);
2911 if (Res.second)
2912 // It will be analyzed later.
2913 PrivateRefs.push_back(RefExpr);
2914 ValueDecl *D = Res.first;
2915 if (!D)
2916 continue;
2917
2918 const DSAStackTy::DSAVarData DVar =
2919 DSAStack->getTopDSA(D, /*FromParent=*/false);
2920 PrivateRefs.push_back(DVar.PrivateCopy ? DVar.PrivateCopy
2921 : SimpleRefExpr);
2922 }
2923 Clause->setPrivateRefs(PrivateRefs);
2924 continue;
2925 }
2926 if (auto *Clause = dyn_cast<OMPUsesAllocatorsClause>(C)) {
2927 for (unsigned I = 0, E = Clause->getNumberOfAllocators(); I < E; ++I) {
2928 OMPUsesAllocatorsClause::Data D = Clause->getAllocatorData(I);
2929 auto *DRE = dyn_cast<DeclRefExpr>(D.Allocator->IgnoreParenImpCasts());
2930 if (!DRE)
2931 continue;
2932 ValueDecl *VD = DRE->getDecl();
2933 if (!VD || !isa<VarDecl>(VD))
2934 continue;
2935 DSAStackTy::DSAVarData DVar =
2936 DSAStack->getTopDSA(VD, /*FromParent=*/false);
2937 // OpenMP [2.12.5, target Construct]
2938 // Memory allocators that appear in a uses_allocators clause cannot
2939 // appear in other data-sharing attribute clauses or data-mapping
2940 // attribute clauses in the same construct.
2941 Expr *MapExpr = nullptr;
2942 if (DVar.RefExpr ||
2943 DSAStack->checkMappableExprComponentListsForDecl(
2944 VD, /*CurrentRegionOnly=*/true,
2945 [VD, &MapExpr](
2947 MapExprComponents,
2949 auto MI = MapExprComponents.rbegin();
2950 auto ME = MapExprComponents.rend();
2951 if (MI != ME &&
2952 MI->getAssociatedDeclaration()->getCanonicalDecl() ==
2953 VD->getCanonicalDecl()) {
2954 MapExpr = MI->getAssociatedExpression();
2955 return true;
2956 }
2957 return false;
2958 })) {
2959 Diag(D.Allocator->getExprLoc(),
2960 diag::err_omp_allocator_used_in_clauses)
2961 << D.Allocator->getSourceRange();
2962 if (DVar.RefExpr)
2964 else
2965 Diag(MapExpr->getExprLoc(), diag::note_used_here)
2966 << MapExpr->getSourceRange();
2967 }
2968 }
2969 continue;
2970 }
2971 }
2972 // Check allocate clauses.
2974 checkAllocateClauses(SemaRef, DSAStack, D->clauses());
2976 }
2977
2978 DSAStack->pop();
2981}
2982
2984 Expr *NumIterations, Sema &SemaRef,
2985 Scope *S, DSAStackTy *Stack);
2986
2987static bool finishLinearClauses(Sema &SemaRef, ArrayRef<OMPClause *> Clauses,
2989 DSAStackTy *Stack) {
2990 assert((SemaRef.CurContext->isDependentContext() || B.builtAll()) &&
2991 "loop exprs were not built");
2992
2993 if (SemaRef.CurContext->isDependentContext())
2994 return false;
2995
2996 // Finalize the clauses that need pre-built expressions for CodeGen.
2997 for (OMPClause *C : Clauses) {
2998 auto *LC = dyn_cast<OMPLinearClause>(C);
2999 if (!LC)
3000 continue;
3001 if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
3002 B.NumIterations, SemaRef,
3003 SemaRef.getCurScope(), Stack))
3004 return true;
3005 }
3006
3007 return false;
3008}
3009
3010namespace {
3011
3012class VarDeclFilterCCC final : public CorrectionCandidateCallback {
3013private:
3014 Sema &SemaRef;
3015
3016public:
3017 explicit VarDeclFilterCCC(Sema &S) : SemaRef(S) {}
3018 bool ValidateCandidate(const TypoCorrection &Candidate) override {
3019 NamedDecl *ND = Candidate.getCorrectionDecl();
3020 if (const auto *VD = dyn_cast_or_null<VarDecl>(ND)) {
3021 return VD->hasGlobalStorage() &&
3022 SemaRef.isDeclInScope(ND, SemaRef.getCurLexicalContext(),
3023 SemaRef.getCurScope());
3024 }
3025 return false;
3026 }
3027
3028 std::unique_ptr<CorrectionCandidateCallback> clone() override {
3029 return std::make_unique<VarDeclFilterCCC>(*this);
3030 }
3031};
3032
3033class VarOrFuncDeclFilterCCC final : public CorrectionCandidateCallback {
3034private:
3035 Sema &SemaRef;
3036
3037public:
3038 explicit VarOrFuncDeclFilterCCC(Sema &S) : SemaRef(S) {}
3039 bool ValidateCandidate(const TypoCorrection &Candidate) override {
3040 NamedDecl *ND = Candidate.getCorrectionDecl();
3041 if (ND && ((isa<VarDecl>(ND) && ND->getKind() == Decl::Var) ||
3042 isa<FunctionDecl>(ND))) {
3043 return SemaRef.isDeclInScope(ND, SemaRef.getCurLexicalContext(),
3044 SemaRef.getCurScope());
3045 }
3046 return false;
3047 }
3048
3049 std::unique_ptr<CorrectionCandidateCallback> clone() override {
3050 return std::make_unique<VarOrFuncDeclFilterCCC>(*this);
3051 }
3052};
3053
3054} // namespace
3055
3057 CXXScopeSpec &ScopeSpec,
3058 const DeclarationNameInfo &Id,
3059 OpenMPDirectiveKind Kind) {
3060 ASTContext &Context = getASTContext();
3062 SemaRef.LookupParsedName(Lookup, CurScope, &ScopeSpec,
3063 /*ObjectType=*/QualType(),
3064 /*AllowBuiltinCreation=*/true);
3065
3066 if (Lookup.isAmbiguous())
3067 return ExprError();
3068
3069 VarDecl *VD;
3070 if (!Lookup.isSingleResult()) {
3071 VarDeclFilterCCC CCC(SemaRef);
3072 if (TypoCorrection Corrected =
3073 SemaRef.CorrectTypo(Id, Sema::LookupOrdinaryName, CurScope, nullptr,
3076 Corrected,
3077 SemaRef.PDiag(Lookup.empty() ? diag::err_undeclared_var_use_suggest
3078 : diag::err_omp_expected_var_arg_suggest)
3079 << Id.getName());
3080 VD = Corrected.getCorrectionDeclAs<VarDecl>();
3081 } else {
3082 Diag(Id.getLoc(), Lookup.empty() ? diag::err_undeclared_var_use
3083 : diag::err_omp_expected_var_arg)
3084 << Id.getName();
3085 return ExprError();
3086 }
3087 } else if (!(VD = Lookup.getAsSingle<VarDecl>())) {
3088 Diag(Id.getLoc(), diag::err_omp_expected_var_arg) << Id.getName();
3089 Diag(Lookup.getFoundDecl()->getLocation(), diag::note_declared_at);
3090 return ExprError();
3091 }
3092 Lookup.suppressDiagnostics();
3093
3094 // OpenMP [2.9.2, Syntax, C/C++]
3095 // Variables must be file-scope, namespace-scope, or static block-scope.
3096 if (Kind == OMPD_threadprivate && !VD->hasGlobalStorage()) {
3097 Diag(Id.getLoc(), diag::err_omp_global_var_arg)
3098 << getOpenMPDirectiveName(Kind) << !VD->isStaticLocal();
3099 bool IsDecl =
3101 Diag(VD->getLocation(),
3102 IsDecl ? diag::note_previous_decl : diag::note_defined_here)
3103 << VD;
3104 return ExprError();
3105 }
3106
3107 VarDecl *CanonicalVD = VD->getCanonicalDecl();
3108 NamedDecl *ND = CanonicalVD;
3109 // OpenMP [2.9.2, Restrictions, C/C++, p.2]
3110 // A threadprivate directive for file-scope variables must appear outside
3111 // any definition or declaration.
3112 if (CanonicalVD->getDeclContext()->isTranslationUnit() &&
3114 Diag(Id.getLoc(), diag::err_omp_var_scope)
3115 << getOpenMPDirectiveName(Kind) << VD;
3116 bool IsDecl =
3118 Diag(VD->getLocation(),
3119 IsDecl ? diag::note_previous_decl : diag::note_defined_here)
3120 << VD;
3121 return ExprError();
3122 }
3123 // OpenMP [2.9.2, Restrictions, C/C++, p.3]
3124 // A threadprivate directive for static class member variables must appear
3125 // in the class definition, in the same scope in which the member
3126 // variables are declared.
3127 if (CanonicalVD->isStaticDataMember() &&
3128 !CanonicalVD->getDeclContext()->Equals(SemaRef.getCurLexicalContext())) {
3129 Diag(Id.getLoc(), diag::err_omp_var_scope)
3130 << getOpenMPDirectiveName(Kind) << VD;
3131 bool IsDecl =
3133 Diag(VD->getLocation(),
3134 IsDecl ? diag::note_previous_decl : diag::note_defined_here)
3135 << VD;
3136 return ExprError();
3137 }
3138 // OpenMP [2.9.2, Restrictions, C/C++, p.4]
3139 // A threadprivate directive for namespace-scope variables must appear
3140 // outside any definition or declaration other than the namespace
3141 // definition itself.
3142 if (CanonicalVD->getDeclContext()->isNamespace() &&
3145 CanonicalVD->getDeclContext()))) {
3146 Diag(Id.getLoc(), diag::err_omp_var_scope)
3147 << getOpenMPDirectiveName(Kind) << VD;
3148 bool IsDecl =
3150 Diag(VD->getLocation(),
3151 IsDecl ? diag::note_previous_decl : diag::note_defined_here)
3152 << VD;
3153 return ExprError();
3154 }
3155 // OpenMP [2.9.2, Restrictions, C/C++, p.6]
3156 // A threadprivate directive for static block-scope variables must appear
3157 // in the scope of the variable and not in a nested scope.
3158 if (CanonicalVD->isLocalVarDecl() && CurScope &&
3160 Diag(Id.getLoc(), diag::err_omp_var_scope)
3161 << getOpenMPDirectiveName(Kind) << VD;
3162 bool IsDecl =
3164 Diag(VD->getLocation(),
3165 IsDecl ? diag::note_previous_decl : diag::note_defined_here)
3166 << VD;
3167 return ExprError();
3168 }
3169
3170 // OpenMP [2.9.2, Restrictions, C/C++, p.2-6]
3171 // A threadprivate directive must lexically precede all references to any
3172 // of the variables in its list.
3173 if (Kind == OMPD_threadprivate && VD->isUsed() &&
3174 !DSAStack->isThreadPrivate(VD)) {
3175 Diag(Id.getLoc(), diag::err_omp_var_used)
3176 << getOpenMPDirectiveName(Kind) << VD;
3177 return ExprError();
3178 }
3179
3180 QualType ExprType = VD->getType().getNonReferenceType();
3182 SourceLocation(), VD,
3183 /*RefersToEnclosingVariableOrCapture=*/false,
3184 Id.getLoc(), ExprType, VK_LValue);
3185}
3186
3189 ArrayRef<Expr *> VarList) {
3193 }
3194 return nullptr;
3195}
3196
3197namespace {
3198class LocalVarRefChecker final
3199 : public ConstStmtVisitor<LocalVarRefChecker, bool> {
3200 Sema &SemaRef;
3201
3202public:
3203 bool VisitDeclRefExpr(const DeclRefExpr *E) {
3204 if (const auto *VD = dyn_cast<VarDecl>(E->getDecl())) {
3205 if (VD->hasLocalStorage()) {
3206 SemaRef.Diag(E->getBeginLoc(),
3207 diag::err_omp_local_var_in_threadprivate_init)
3208 << E->getSourceRange();
3209 SemaRef.Diag(VD->getLocation(), diag::note_defined_here)
3210 << VD << VD->getSourceRange();
3211 return true;
3212 }
3213 }
3214 return false;
3215 }
3216 bool VisitStmt(const Stmt *S) {
3217 for (const Stmt *Child : S->children()) {
3218 if (Child && Visit(Child))
3219 return true;
3220 }
3221 return false;
3222 }
3223 explicit LocalVarRefChecker(Sema &SemaRef) : SemaRef(SemaRef) {}
3224};
3225} // namespace
3226
3229 ArrayRef<Expr *> VarList) {
3230 ASTContext &Context = getASTContext();
3232 for (Expr *RefExpr : VarList) {
3233 auto *DE = cast<DeclRefExpr>(RefExpr);
3234 auto *VD = cast<VarDecl>(DE->getDecl());
3235 SourceLocation ILoc = DE->getExprLoc();
3236
3237 // Mark variable as used.
3238 VD->setReferenced();
3239 VD->markUsed(Context);
3240
3241 QualType QType = VD->getType();
3242 if (QType->isDependentType() || QType->isInstantiationDependentType()) {
3243 // It will be analyzed later.
3244 Vars.push_back(DE);
3245 continue;
3246 }
3247
3248 // OpenMP [2.9.2, Restrictions, C/C++, p.10]
3249 // A threadprivate variable must not have an incomplete type.
3251 ILoc, VD->getType(), diag::err_omp_threadprivate_incomplete_type)) {
3252 continue;
3253 }
3254
3255 // OpenMP [2.9.2, Restrictions, C/C++, p.10]
3256 // A threadprivate variable must not have a reference type.
3257 if (VD->getType()->isReferenceType()) {
3258 Diag(ILoc, diag::err_omp_ref_type_arg)
3259 << getOpenMPDirectiveName(OMPD_threadprivate) << VD->getType();
3260 bool IsDecl =
3262 Diag(VD->getLocation(),
3263 IsDecl ? diag::note_previous_decl : diag::note_defined_here)
3264 << VD;
3265 continue;
3266 }
3267
3268 // Check if this is a TLS variable. If TLS is not being supported, produce
3269 // the corresponding diagnostic.
3270 if ((VD->getTLSKind() != VarDecl::TLS_None &&
3271 !(VD->hasAttr<OMPThreadPrivateDeclAttr>() &&
3272 getLangOpts().OpenMPUseTLS &&
3273 getASTContext().getTargetInfo().isTLSSupported())) ||
3274 (VD->getStorageClass() == SC_Register && VD->hasAttr<AsmLabelAttr>() &&
3275 !VD->isLocalVarDecl())) {
3276 Diag(ILoc, diag::err_omp_var_thread_local)
3277 << VD << ((VD->getTLSKind() != VarDecl::TLS_None) ? 0 : 1);
3278 bool IsDecl =
3280 Diag(VD->getLocation(),
3281 IsDecl ? diag::note_previous_decl : diag::note_defined_here)
3282 << VD;
3283 continue;
3284 }
3285
3286 // Check if initial value of threadprivate variable reference variable with
3287 // local storage (it is not supported by runtime).
3288 if (const Expr *Init = VD->getAnyInitializer()) {
3289 LocalVarRefChecker Checker(SemaRef);
3290 if (Checker.Visit(Init))
3291 continue;
3292 }
3293
3294 Vars.push_back(RefExpr);
3295 DSAStack->addDSA(VD, DE, OMPC_threadprivate);
3296 VD->addAttr(OMPThreadPrivateDeclAttr::CreateImplicit(
3297 Context, SourceRange(Loc, Loc)));
3298 if (ASTMutationListener *ML = Context.getASTMutationListener())
3299 ML->DeclarationMarkedOpenMPThreadPrivate(VD);
3300 }
3301 OMPThreadPrivateDecl *D = nullptr;
3302 if (!Vars.empty()) {
3304 Loc, Vars);
3306 }
3307 return D;
3308}
3309
3310static OMPAllocateDeclAttr::AllocatorTypeTy
3311getAllocatorKind(Sema &S, DSAStackTy *Stack, Expr *Allocator) {
3312 if (!Allocator)
3313 return OMPAllocateDeclAttr::OMPNullMemAlloc;
3314 if (Allocator->isTypeDependent() || Allocator->isValueDependent() ||
3315 Allocator->isInstantiationDependent() ||
3316 Allocator->containsUnexpandedParameterPack())
3317 return OMPAllocateDeclAttr::OMPUserDefinedMemAlloc;
3318 auto AllocatorKindRes = OMPAllocateDeclAttr::OMPUserDefinedMemAlloc;
3319 llvm::FoldingSetNodeID AEId;
3320 const Expr *AE = Allocator->IgnoreParenImpCasts();
3321 AE->IgnoreImpCasts()->Profile(AEId, S.getASTContext(), /*Canonical=*/true);
3322 for (int I = 0; I < OMPAllocateDeclAttr::OMPUserDefinedMemAlloc; ++I) {
3323 auto AllocatorKind = static_cast<OMPAllocateDeclAttr::AllocatorTypeTy>(I);
3324 const Expr *DefAllocator = Stack->getAllocator(AllocatorKind);
3325 llvm::FoldingSetNodeID DAEId;
3326 DefAllocator->IgnoreImpCasts()->Profile(DAEId, S.getASTContext(),
3327 /*Canonical=*/true);
3328 if (AEId == DAEId) {
3329 AllocatorKindRes = AllocatorKind;
3330 break;
3331 }
3332 }
3333 return AllocatorKindRes;
3334}
3335
3337 Sema &S, DSAStackTy *Stack, Expr *RefExpr, VarDecl *VD,
3338 OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind, Expr *Allocator) {
3339 if (!VD->hasAttr<OMPAllocateDeclAttr>())
3340 return false;
3341 const auto *A = VD->getAttr<OMPAllocateDeclAttr>();
3342 Expr *PrevAllocator = A->getAllocator();
3343 OMPAllocateDeclAttr::AllocatorTypeTy PrevAllocatorKind =
3344 getAllocatorKind(S, Stack, PrevAllocator);
3345 bool AllocatorsMatch = AllocatorKind == PrevAllocatorKind;
3346 if (AllocatorsMatch &&
3347 AllocatorKind == OMPAllocateDeclAttr::OMPUserDefinedMemAlloc &&
3348 Allocator && PrevAllocator) {
3349 const Expr *AE = Allocator->IgnoreParenImpCasts();
3350 const Expr *PAE = PrevAllocator->IgnoreParenImpCasts();
3351 llvm::FoldingSetNodeID AEId, PAEId;
3352 AE->Profile(AEId, S.Context, /*Canonical=*/true);
3353 PAE->Profile(PAEId, S.Context, /*Canonical=*/true);
3354 AllocatorsMatch = AEId == PAEId;
3355 }
3356 if (!AllocatorsMatch) {
3357 SmallString<256> AllocatorBuffer;
3358 llvm::raw_svector_ostream AllocatorStream(AllocatorBuffer);
3359 if (Allocator)
3360 Allocator->printPretty(AllocatorStream, nullptr, S.getPrintingPolicy());
3361 SmallString<256> PrevAllocatorBuffer;
3362 llvm::raw_svector_ostream PrevAllocatorStream(PrevAllocatorBuffer);
3363 if (PrevAllocator)
3364 PrevAllocator->printPretty(PrevAllocatorStream, nullptr,
3365 S.getPrintingPolicy());
3366
3367 SourceLocation AllocatorLoc =
3368 Allocator ? Allocator->getExprLoc() : RefExpr->getExprLoc();
3369 SourceRange AllocatorRange =
3370 Allocator ? Allocator->getSourceRange() : RefExpr->getSourceRange();
3371 SourceLocation PrevAllocatorLoc =
3372 PrevAllocator ? PrevAllocator->getExprLoc() : A->getLocation();
3373 SourceRange PrevAllocatorRange =
3374 PrevAllocator ? PrevAllocator->getSourceRange() : A->getRange();
3375 S.Diag(AllocatorLoc, diag::warn_omp_used_different_allocator)
3376 << (Allocator ? 1 : 0) << AllocatorStream.str()
3377 << (PrevAllocator ? 1 : 0) << PrevAllocatorStream.str()
3378 << AllocatorRange;
3379 S.Diag(PrevAllocatorLoc, diag::note_omp_previous_allocator)
3380 << PrevAllocatorRange;
3381 return true;
3382 }
3383 return false;
3384}
3385
3386static void
3388 OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind,
3389 Expr *Allocator, Expr *Alignment, SourceRange SR) {
3390 if (VD->hasAttr<OMPAllocateDeclAttr>())
3391 return;
3392 if (Alignment &&
3393 (Alignment->isTypeDependent() || Alignment->isValueDependent() ||
3394 Alignment->isInstantiationDependent() ||
3395 Alignment->containsUnexpandedParameterPack()))
3396 // Apply later when we have a usable value.
3397 return;
3398 if (Allocator &&
3399 (Allocator->isTypeDependent() || Allocator->isValueDependent() ||
3400 Allocator->isInstantiationDependent() ||
3401 Allocator->containsUnexpandedParameterPack()))
3402 return;
3403 auto *A = OMPAllocateDeclAttr::CreateImplicit(S.Context, AllocatorKind,
3404 Allocator, Alignment, SR);
3405 VD->addAttr(A);
3407 ML->DeclarationMarkedOpenMPAllocate(VD, A);
3408}
3409
3412 DeclContext *Owner) {
3413 assert(Clauses.size() <= 2 && "Expected at most two clauses.");
3414 Expr *Alignment = nullptr;
3415 Expr *Allocator = nullptr;
3416 if (Clauses.empty()) {
3417 // OpenMP 5.0, 2.11.3 allocate Directive, Restrictions.
3418 // allocate directives that appear in a target region must specify an
3419 // allocator clause unless a requires directive with the dynamic_allocators
3420 // clause is present in the same compilation unit.
3421 if (getLangOpts().OpenMPIsTargetDevice &&
3422 !DSAStack->hasRequiresDeclWithClause<OMPDynamicAllocatorsClause>())
3423 SemaRef.targetDiag(Loc, diag::err_expected_allocator_clause);
3424 } else {
3425 for (const OMPClause *C : Clauses)
3426 if (const auto *AC = dyn_cast<OMPAllocatorClause>(C))
3427 Allocator = AC->getAllocator();
3428 else if (const auto *AC = dyn_cast<OMPAlignClause>(C))
3429 Alignment = AC->getAlignment();
3430 else
3431 llvm_unreachable("Unexpected clause on allocate directive");
3432 }
3433 OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind =
3434 getAllocatorKind(SemaRef, DSAStack, Allocator);
3436 for (Expr *RefExpr : VarList) {
3437 auto *DE = cast<DeclRefExpr>(RefExpr);
3438 auto *VD = cast<VarDecl>(DE->getDecl());
3439
3440 // Check if this is a TLS variable or global register.
3441 if (VD->getTLSKind() != VarDecl::TLS_None ||
3442 VD->hasAttr<OMPThreadPrivateDeclAttr>() ||
3443 (VD->getStorageClass() == SC_Register && VD->hasAttr<AsmLabelAttr>() &&
3444 !VD->isLocalVarDecl()))
3445 continue;
3446
3447 // If the used several times in the allocate directive, the same allocator
3448 // must be used.
3450 AllocatorKind, Allocator))
3451 continue;
3452
3453 // OpenMP, 2.11.3 allocate Directive, Restrictions, C / C++
3454 // If a list item has a static storage type, the allocator expression in the
3455 // allocator clause must be a constant expression that evaluates to one of
3456 // the predefined memory allocator values.
3457 if (Allocator && VD->hasGlobalStorage()) {
3458 if (AllocatorKind == OMPAllocateDeclAttr::OMPUserDefinedMemAlloc) {
3459 Diag(Allocator->getExprLoc(),
3460 diag::err_omp_expected_predefined_allocator)
3461 << Allocator->getSourceRange();
3462 bool IsDecl = VD->isThisDeclarationADefinition(getASTContext()) ==
3464 Diag(VD->getLocation(),
3465 IsDecl ? diag::note_previous_decl : diag::note_defined_here)
3466 << VD;
3467 continue;
3468 }
3469 }
3470
3471 Vars.push_back(RefExpr);
3472 applyOMPAllocateAttribute(SemaRef, VD, AllocatorKind, Allocator, Alignment,
3473 DE->getSourceRange());
3474 }
3475 if (Vars.empty())
3476 return nullptr;
3477 if (!Owner)
3478 Owner = SemaRef.getCurLexicalContext();
3479 auto *D = OMPAllocateDecl::Create(getASTContext(), Owner, Loc, Vars, Clauses);
3481 Owner->addDecl(D);
3483}
3484
3487 ArrayRef<OMPClause *> ClauseList) {
3488 OMPRequiresDecl *D = nullptr;
3490 Diag(Loc, diag::err_omp_invalid_scope) << "requires";
3491 } else {
3492 D = CheckOMPRequiresDecl(Loc, ClauseList);
3493 if (D) {
3495 DSAStack->addRequiresDecl(D);
3496 }
3497 }
3499}
3500
3502 OpenMPDirectiveKind DKind,
3503 ArrayRef<std::string> Assumptions,
3504 bool SkippedClauses) {
3505 if (!SkippedClauses && Assumptions.empty())
3506 Diag(Loc, diag::err_omp_no_clause_for_directive)
3507 << llvm::omp::getAllAssumeClauseOptions()
3508 << llvm::omp::getOpenMPDirectiveName(DKind);
3509
3510 auto *AA =
3511 OMPAssumeAttr::Create(getASTContext(), llvm::join(Assumptions, ","), Loc);
3512 if (DKind == llvm::omp::Directive::OMPD_begin_assumes) {
3513 OMPAssumeScoped.push_back(AA);
3514 return;
3515 }
3516
3517 // Global assumes without assumption clauses are ignored.
3518 if (Assumptions.empty())
3519 return;
3520
3521 assert(DKind == llvm::omp::Directive::OMPD_assumes &&
3522 "Unexpected omp assumption directive!");
3523 OMPAssumeGlobal.push_back(AA);
3524
3525 // The OMPAssumeGlobal scope above will take care of new declarations but
3526 // we also want to apply the assumption to existing ones, e.g., to
3527 // declarations in included headers. To this end, we traverse all existing
3528 // declaration contexts and annotate function declarations here.
3529 SmallVector<DeclContext *, 8> DeclContexts;
3530 auto *Ctx = SemaRef.CurContext;
3531 while (Ctx->getLexicalParent())
3532 Ctx = Ctx->getLexicalParent();
3533 DeclContexts.push_back(Ctx);
3534 while (!DeclContexts.empty()) {
3535 DeclContext *DC = DeclContexts.pop_back_val();
3536 for (auto *SubDC : DC->decls()) {
3537 if (SubDC->isInvalidDecl())
3538 continue;
3539 if (auto *CTD = dyn_cast<ClassTemplateDecl>(SubDC)) {
3540 DeclContexts.push_back(CTD->getTemplatedDecl());
3541 llvm::append_range(DeclContexts, CTD->specializations());
3542 continue;
3543 }
3544 if (auto *DC = dyn_cast<DeclContext>(SubDC))
3545 DeclContexts.push_back(DC);
3546 if (auto *F = dyn_cast<FunctionDecl>(SubDC)) {
3547 F->addAttr(AA);
3548 continue;
3549 }
3550 }
3551 }
3552}
3553
3555 assert(isInOpenMPAssumeScope() && "Not in OpenMP assumes scope!");
3556 OMPAssumeScoped.pop_back();
3557}
3558
3560 Stmt *AStmt,
3561 SourceLocation StartLoc,
3562 SourceLocation EndLoc) {
3563 if (!AStmt)
3564 return StmtError();
3565
3566 return OMPAssumeDirective::Create(getASTContext(), StartLoc, EndLoc, Clauses,
3567 AStmt);
3568}
3569
3572 ArrayRef<OMPClause *> ClauseList) {
3573 /// For target specific clauses, the requires directive cannot be
3574 /// specified after the handling of any of the target regions in the
3575 /// current compilation unit.
3576 ArrayRef<SourceLocation> TargetLocations =
3577 DSAStack->getEncounteredTargetLocs();
3578 SourceLocation AtomicLoc = DSAStack->getAtomicDirectiveLoc();
3579 if (!TargetLocations.empty() || !AtomicLoc.isInvalid()) {
3580 for (const OMPClause *CNew : ClauseList) {
3581 // Check if any of the requires clauses affect target regions.
3582 if (isa<OMPUnifiedSharedMemoryClause>(CNew) ||
3583 isa<OMPUnifiedAddressClause>(CNew) ||
3584 isa<OMPReverseOffloadClause>(CNew) ||
3585 isa<OMPDynamicAllocatorsClause>(CNew)) {
3586 Diag(Loc, diag::err_omp_directive_before_requires)
3587 << "target" << getOpenMPClauseName(CNew->getClauseKind());
3588 for (SourceLocation TargetLoc : TargetLocations) {
3589 Diag(TargetLoc, diag::note_omp_requires_encountered_directive)
3590 << "target";
3591 }
3592 } else if (!AtomicLoc.isInvalid() &&
3593 isa<OMPAtomicDefaultMemOrderClause>(CNew)) {
3594 Diag(Loc, diag::err_omp_directive_before_requires)
3595 << "atomic" << getOpenMPClauseName(CNew->getClauseKind());
3596 Diag(AtomicLoc, diag::note_omp_requires_encountered_directive)
3597 << "atomic";
3598 }
3599 }
3600 }
3601
3602 if (!DSAStack->hasDuplicateRequiresClause(ClauseList))
3604 getASTContext(), SemaRef.getCurLexicalContext(), Loc, ClauseList);
3605 return nullptr;
3606}
3607
3608static void reportOriginalDsa(Sema &SemaRef, const DSAStackTy *Stack,
3609 const ValueDecl *D,
3610 const DSAStackTy::DSAVarData &DVar,
3611 bool IsLoopIterVar) {
3612 if (DVar.RefExpr) {
3613 SemaRef.Diag(DVar.RefExpr->getExprLoc(), diag::note_omp_explicit_dsa)
3614 << getOpenMPClauseName(DVar.CKind);
3615 return;
3616 }
3617 enum {
3618 PDSA_StaticMemberShared,
3619 PDSA_StaticLocalVarShared,
3620 PDSA_LoopIterVarPrivate,
3621 PDSA_LoopIterVarLinear,
3622 PDSA_LoopIterVarLastprivate,
3623 PDSA_ConstVarShared,
3624 PDSA_GlobalVarShared,
3625 PDSA_TaskVarFirstprivate,
3626 PDSA_LocalVarPrivate,
3627 PDSA_Implicit
3628 } Reason = PDSA_Implicit;
3629 bool ReportHint = false;
3630 auto ReportLoc = D->getLocation();
3631 auto *VD = dyn_cast<VarDecl>(D);
3632 if (IsLoopIterVar) {
3633 if (DVar.CKind == OMPC_private)
3634 Reason = PDSA_LoopIterVarPrivate;
3635 else if (DVar.CKind == OMPC_lastprivate)
3636 Reason = PDSA_LoopIterVarLastprivate;
3637 else
3638 Reason = PDSA_LoopIterVarLinear;
3639 } else if (isOpenMPTaskingDirective(DVar.DKind) &&
3640 DVar.CKind == OMPC_firstprivate) {
3641 Reason = PDSA_TaskVarFirstprivate;
3642 ReportLoc = DVar.ImplicitDSALoc;
3643 } else if (VD && VD->isStaticLocal())
3644 Reason = PDSA_StaticLocalVarShared;
3645 else if (VD && VD->isStaticDataMember())
3646 Reason = PDSA_StaticMemberShared;
3647 else if (VD && VD->isFileVarDecl())
3648 Reason = PDSA_GlobalVarShared;
3649 else if (D->getType().isConstant(SemaRef.getASTContext()))
3650 Reason = PDSA_ConstVarShared;
3651 else if (VD && VD->isLocalVarDecl() && DVar.CKind == OMPC_private) {
3652 ReportHint = true;
3653 Reason = PDSA_LocalVarPrivate;
3654 }
3655 if (Reason != PDSA_Implicit) {
3656 SemaRef.Diag(ReportLoc, diag::note_omp_predetermined_dsa)
3657 << Reason << ReportHint
3658 << getOpenMPDirectiveName(Stack->getCurrentDirective());
3659 } else if (DVar.ImplicitDSALoc.isValid()) {
3660 SemaRef.Diag(DVar.ImplicitDSALoc, diag::note_omp_implicit_dsa)
3661 << getOpenMPClauseName(DVar.CKind);
3662 }
3663}
3664
3667 bool IsAggregateOrDeclareTarget) {
3669 switch (M) {
3670 case OMPC_DEFAULTMAP_MODIFIER_alloc:
3671 Kind = OMPC_MAP_alloc;
3672 break;
3673 case OMPC_DEFAULTMAP_MODIFIER_to:
3674 Kind = OMPC_MAP_to;
3675 break;
3676 case OMPC_DEFAULTMAP_MODIFIER_from:
3677 Kind = OMPC_MAP_from;
3678 break;
3679 case OMPC_DEFAULTMAP_MODIFIER_tofrom:
3680 Kind = OMPC_MAP_tofrom;
3681 break;
3682 case OMPC_DEFAULTMAP_MODIFIER_present:
3683 // OpenMP 5.1 [2.21.7.3] defaultmap clause, Description]
3684 // If implicit-behavior is present, each variable referenced in the
3685 // construct in the category specified by variable-category is treated as if
3686 // it had been listed in a map clause with the map-type of alloc and
3687 // map-type-modifier of present.
3688 Kind = OMPC_MAP_alloc;
3689 break;
3690 case OMPC_DEFAULTMAP_MODIFIER_firstprivate:
3692 llvm_unreachable("Unexpected defaultmap implicit behavior");
3693 case OMPC_DEFAULTMAP_MODIFIER_none:
3694 case OMPC_DEFAULTMAP_MODIFIER_default:
3696 // IsAggregateOrDeclareTarget could be true if:
3697 // 1. the implicit behavior for aggregate is tofrom
3698 // 2. it's a declare target link
3699 if (IsAggregateOrDeclareTarget) {
3700 Kind = OMPC_MAP_tofrom;
3701 break;
3702 }
3703 llvm_unreachable("Unexpected defaultmap implicit behavior");
3704 }
3705 assert(Kind != OMPC_MAP_unknown && "Expect map kind to be known");
3706 return Kind;
3707}
3708
3709namespace {
3710class DSAAttrChecker final : public StmtVisitor<DSAAttrChecker, void> {
3711 DSAStackTy *Stack;
3712 Sema &SemaRef;
3713 OpenMPDirectiveKind DKind = OMPD_unknown;
3714 bool ErrorFound = false;
3715 bool TryCaptureCXXThisMembers = false;
3716 CapturedStmt *CS = nullptr;
3717 const static unsigned DefaultmapKindNum = OMPC_DEFAULTMAP_unknown + 1;
3718 llvm::SmallVector<Expr *, 4> ImplicitFirstprivate;
3719 llvm::SmallVector<Expr *, 4> ImplicitPrivate;
3720 llvm::SmallVector<Expr *, 4> ImplicitMap[DefaultmapKindNum][OMPC_MAP_delete];
3722 ImplicitMapModifier[DefaultmapKindNum];
3723 SemaOpenMP::VarsWithInheritedDSAType VarsWithInheritedDSA;
3724 llvm::SmallDenseSet<const ValueDecl *, 4> ImplicitDeclarations;
3725
3726 void VisitSubCaptures(OMPExecutableDirective *S) {
3727 // Check implicitly captured variables.
3728 if (!S->hasAssociatedStmt() || !S->getAssociatedStmt())
3729 return;
3730 if (S->getDirectiveKind() == OMPD_atomic ||
3731 S->getDirectiveKind() == OMPD_critical ||
3732 S->getDirectiveKind() == OMPD_section ||
3733 S->getDirectiveKind() == OMPD_master ||
3734 S->getDirectiveKind() == OMPD_masked ||
3735 S->getDirectiveKind() == OMPD_scope ||
3736 S->getDirectiveKind() == OMPD_assume ||
3737 isOpenMPLoopTransformationDirective(S->getDirectiveKind())) {
3738 Visit(S->getAssociatedStmt());
3739 return;
3740 }
3741 visitSubCaptures(S->getInnermostCapturedStmt());
3742 // Try to capture inner this->member references to generate correct mappings
3743 // and diagnostics.
3744 if (TryCaptureCXXThisMembers ||
3746 llvm::any_of(S->getInnermostCapturedStmt()->captures(),
3747 [](const CapturedStmt::Capture &C) {
3748 return C.capturesThis();
3749 }))) {
3750 bool SavedTryCaptureCXXThisMembers = TryCaptureCXXThisMembers;
3751 TryCaptureCXXThisMembers = true;
3752 Visit(S->getInnermostCapturedStmt()->getCapturedStmt());
3753 TryCaptureCXXThisMembers = SavedTryCaptureCXXThisMembers;
3754 }
3755 // In tasks firstprivates are not captured anymore, need to analyze them
3756 // explicitly.
3757 if (isOpenMPTaskingDirective(S->getDirectiveKind()) &&
3758 !isOpenMPTaskLoopDirective(S->getDirectiveKind())) {
3759 for (OMPClause *C : S->clauses())
3760 if (auto *FC = dyn_cast<OMPFirstprivateClause>(C)) {
3761 for (Expr *Ref : FC->varlist())
3762 Visit(Ref);
3763 }
3764 }
3765 }
3766
3767public:
3768 void VisitDeclRefExpr(DeclRefExpr *E) {
3769 if (TryCaptureCXXThisMembers || E->isTypeDependent() ||
3772 E->isNonOdrUse() == clang::NOUR_Unevaluated)
3773 return;
3774 if (auto *VD = dyn_cast<VarDecl>(E->getDecl())) {
3775 // Check the datasharing rules for the expressions in the clauses.
3776 if (!CS || (isa<OMPCapturedExprDecl>(VD) && !CS->capturesVariable(VD) &&
3777 !Stack->getTopDSA(VD, /*FromParent=*/false).RefExpr &&
3778 !Stack->isImplicitDefaultFirstprivateFD(VD))) {
3779 if (auto *CED = dyn_cast<OMPCapturedExprDecl>(VD))
3780 if (!CED->hasAttr<OMPCaptureNoInitAttr>()) {
3781 Visit(CED->getInit());
3782 return;
3783 }
3784 } else if (VD->isImplicit() || isa<OMPCapturedExprDecl>(VD))
3785 // Do not analyze internal variables and do not enclose them into
3786 // implicit clauses.
3787 if (!Stack->isImplicitDefaultFirstprivateFD(VD))
3788 return;
3789 VD = VD->getCanonicalDecl();
3790 // Skip internally declared variables.
3791 if (VD->hasLocalStorage() && CS && !CS->capturesVariable(VD) &&
3792 !Stack->isImplicitDefaultFirstprivateFD(VD) &&
3793 !Stack->isImplicitTaskFirstprivate(VD))
3794 return;
3795 // Skip allocators in uses_allocators clauses.
3796 if (Stack->isUsesAllocatorsDecl(VD))
3797 return;
3798
3799 DSAStackTy::DSAVarData DVar = Stack->getTopDSA(VD, /*FromParent=*/false);
3800 // Check if the variable has explicit DSA set and stop analysis if it so.
3801 if (DVar.RefExpr || !ImplicitDeclarations.insert(VD).second)
3802 return;
3803
3804 // Skip internally declared static variables.
3805 std::optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
3806 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
3807 if (VD->hasGlobalStorage() && CS && !CS->capturesVariable(VD) &&
3808 (Stack->hasRequiresDeclWithClause<OMPUnifiedSharedMemoryClause>() ||
3809 !Res || *Res != OMPDeclareTargetDeclAttr::MT_Link) &&
3810 !Stack->isImplicitDefaultFirstprivateFD(VD) &&
3811 !Stack->isImplicitTaskFirstprivate(VD))
3812 return;
3813
3814 SourceLocation ELoc = E->getExprLoc();
3815 // The default(none) clause requires that each variable that is referenced
3816 // in the construct, and does not have a predetermined data-sharing
3817 // attribute, must have its data-sharing attribute explicitly determined
3818 // by being listed in a data-sharing attribute clause.
3819 if (DVar.CKind == OMPC_unknown &&
3820 (Stack->getDefaultDSA() == DSA_none ||
3821 Stack->getDefaultDSA() == DSA_private ||
3822 Stack->getDefaultDSA() == DSA_firstprivate) &&
3823 isImplicitOrExplicitTaskingRegion(DKind) &&
3824 VarsWithInheritedDSA.count(VD) == 0) {
3825 bool InheritedDSA = Stack->getDefaultDSA() == DSA_none;
3826 if (!InheritedDSA && (Stack->getDefaultDSA() == DSA_firstprivate ||
3827 Stack->getDefaultDSA() == DSA_private)) {
3828 DSAStackTy::DSAVarData DVar =
3829 Stack->getImplicitDSA(VD, /*FromParent=*/false);
3830 InheritedDSA = DVar.CKind == OMPC_unknown;
3831 }
3832 if (InheritedDSA)
3833 VarsWithInheritedDSA[VD] = E;
3834 if (Stack->getDefaultDSA() == DSA_none)
3835 return;
3836 }
3837
3838 // OpenMP 5.0 [2.19.7.2, defaultmap clause, Description]
3839 // If implicit-behavior is none, each variable referenced in the
3840 // construct that does not have a predetermined data-sharing attribute
3841 // and does not appear in a to or link clause on a declare target
3842 // directive must be listed in a data-mapping attribute clause, a
3843 // data-sharing attribute clause (including a data-sharing attribute
3844 // clause on a combined construct where target. is one of the
3845 // constituent constructs), or an is_device_ptr clause.
3846 OpenMPDefaultmapClauseKind ClauseKind =
3848 if (SemaRef.getLangOpts().OpenMP >= 50) {
3849 bool IsModifierNone = Stack->getDefaultmapModifier(ClauseKind) ==
3850 OMPC_DEFAULTMAP_MODIFIER_none;
3851 if (DVar.CKind == OMPC_unknown && IsModifierNone &&
3852 VarsWithInheritedDSA.count(VD) == 0 && !Res) {
3853 // Only check for data-mapping attribute and is_device_ptr here
3854 // since we have already make sure that the declaration does not
3855 // have a data-sharing attribute above
3856 if (!Stack->checkMappableExprComponentListsForDecl(
3857 VD, /*CurrentRegionOnly=*/true,
3859 MapExprComponents,
3861 auto MI = MapExprComponents.rbegin();
3862 auto ME = MapExprComponents.rend();
3863 return MI != ME && MI->getAssociatedDeclaration() == VD;
3864 })) {
3865 VarsWithInheritedDSA[VD] = E;
3866 return;
3867 }
3868 }
3869 }
3870 if (SemaRef.getLangOpts().OpenMP > 50) {
3871 bool IsModifierPresent = Stack->getDefaultmapModifier(ClauseKind) ==
3872 OMPC_DEFAULTMAP_MODIFIER_present;
3873 if (IsModifierPresent) {
3874 if (!llvm::is_contained(ImplicitMapModifier[ClauseKind],
3875 OMPC_MAP_MODIFIER_present)) {
3876 ImplicitMapModifier[ClauseKind].push_back(
3877 OMPC_MAP_MODIFIER_present);
3878 }
3879 }
3880 }
3881
3883 !Stack->isLoopControlVariable(VD).first) {
3884 if (!Stack->checkMappableExprComponentListsForDecl(
3885 VD, /*CurrentRegionOnly=*/true,
3887 StackComponents,
3889 if (SemaRef.LangOpts.OpenMP >= 50)
3890 return !StackComponents.empty();
3891 // Variable is used if it has been marked as an array, array
3892 // section, array shaping or the variable itself.
3893 return StackComponents.size() == 1 ||
3894 llvm::all_of(
3895 llvm::drop_begin(llvm::reverse(StackComponents)),
3896 [](const OMPClauseMappableExprCommon::
3897 MappableComponent &MC) {
3898 return MC.getAssociatedDeclaration() ==
3899 nullptr &&
3900 (isa<ArraySectionExpr>(
3901 MC.getAssociatedExpression()) ||
3902 isa<OMPArrayShapingExpr>(
3903 MC.getAssociatedExpression()) ||
3904 isa<ArraySubscriptExpr>(
3905 MC.getAssociatedExpression()));
3906 });
3907 })) {
3908 bool IsFirstprivate = false;
3909 // By default lambdas are captured as firstprivates.
3910 if (const auto *RD =
3912 IsFirstprivate = RD->isLambda();
3913 IsFirstprivate =
3914 IsFirstprivate || (Stack->mustBeFirstprivate(ClauseKind) && !Res);
3915 if (IsFirstprivate) {
3916 ImplicitFirstprivate.emplace_back(E);
3917 } else {
3919 Stack->getDefaultmapModifier(ClauseKind);
3921 M, ClauseKind == OMPC_DEFAULTMAP_aggregate || Res);
3922 ImplicitMap[ClauseKind][Kind].emplace_back(E);
3923 }
3924 return;
3925 }
3926 }
3927
3928 // OpenMP [2.9.3.6, Restrictions, p.2]
3929 // A list item that appears in a reduction clause of the innermost
3930 // enclosing worksharing or parallel construct may not be accessed in an
3931 // explicit task.
3932 DVar = Stack->hasInnermostDSA(
3933 VD,
3934 [](OpenMPClauseKind C, bool AppliedToPointee) {
3935 return C == OMPC_reduction && !AppliedToPointee;
3936 },
3937 [](OpenMPDirectiveKind K) {
3938 return isOpenMPParallelDirective(K) ||
3940 },
3941 /*FromParent=*/true);
3942 if (isOpenMPTaskingDirective(DKind) && DVar.CKind == OMPC_reduction) {
3943 ErrorFound = true;
3944 SemaRef.Diag(ELoc, diag::err_omp_reduction_in_task);
3945 reportOriginalDsa(SemaRef, Stack, VD, DVar);
3946 return;
3947 }
3948
3949 // Define implicit data-sharing attributes for task.
3950 DVar = Stack->getImplicitDSA(VD, /*FromParent=*/false);
3951 if (((isOpenMPTaskingDirective(DKind) && DVar.CKind != OMPC_shared) ||
3952 (((Stack->getDefaultDSA() == DSA_firstprivate &&
3953 DVar.CKind == OMPC_firstprivate) ||
3954 (Stack->getDefaultDSA() == DSA_private &&
3955 DVar.CKind == OMPC_private)) &&
3956 !DVar.RefExpr)) &&
3957 !Stack->isLoopControlVariable(VD).first) {
3958 if (Stack->getDefaultDSA() == DSA_private)
3959 ImplicitPrivate.push_back(E);
3960 else
3961 ImplicitFirstprivate.push_back(E);
3962 return;
3963 }
3964
3965 // Store implicitly used globals with declare target link for parent
3966 // target.
3967 if (!isOpenMPTargetExecutionDirective(DKind) && Res &&
3968 *Res == OMPDeclareTargetDeclAttr::MT_Link) {
3969 Stack->addToParentTargetRegionLinkGlobals(E);
3970 return;
3971 }
3972 }
3973 }
3974 void VisitMemberExpr(MemberExpr *E) {
3975 if (E->isTypeDependent() || E->isValueDependent() ||
3977 return;
3978 auto *FD = dyn_cast<FieldDecl>(E->getMemberDecl());
3979 if (auto *TE = dyn_cast<CXXThisExpr>(E->getBase()->IgnoreParenCasts())) {
3980 if (!FD)
3981 return;
3982 DSAStackTy::DSAVarData DVar = Stack->getTopDSA(FD, /*FromParent=*/false);
3983 // Check if the variable has explicit DSA set and stop analysis if it
3984 // so.
3985 if (DVar.RefExpr || !ImplicitDeclarations.insert(FD).second)
3986 return;
3987
3989 !Stack->isLoopControlVariable(FD).first &&
3990 !Stack->checkMappableExprComponentListsForDecl(
3991 FD, /*CurrentRegionOnly=*/true,
3993 StackComponents,
3995 return isa<CXXThisExpr>(
3996 cast<MemberExpr>(
3997 StackComponents.back().getAssociatedExpression())
3998 ->getBase()
3999 ->IgnoreParens());
4000 })) {
4001 // OpenMP 4.5 [2.15.5.1, map Clause, Restrictions, C/C++, p.3]
4002 // A bit-field cannot appear in a map clause.
4003 //
4004 if (FD->isBitField())
4005 return;
4006
4007 // Check to see if the member expression is referencing a class that
4008 // has already been explicitly mapped
4009 if (Stack->isClassPreviouslyMapped(TE->getType()))
4010 return;
4011
4013 Stack->getDefaultmapModifier(OMPC_DEFAULTMAP_aggregate);
4014 OpenMPDefaultmapClauseKind ClauseKind =
4017 Modifier, /*IsAggregateOrDeclareTarget=*/true);
4018 ImplicitMap[ClauseKind][Kind].emplace_back(E);
4019 return;
4020 }
4021
4022 SourceLocation ELoc = E->getExprLoc();
4023 // OpenMP [2.9.3.6, Restrictions, p.2]
4024 // A list item that appears in a reduction clause of the innermost
4025 // enclosing worksharing or parallel construct may not be accessed in
4026 // an explicit task.
4027 DVar = Stack->hasInnermostDSA(
4028 FD,
4029 [](OpenMPClauseKind C, bool AppliedToPointee) {
4030 return C == OMPC_reduction && !AppliedToPointee;
4031 },
4032 [](OpenMPDirectiveKind K) {
4033 return isOpenMPParallelDirective(K) ||
4035 },
4036 /*FromParent=*/true);
4037 if (isOpenMPTaskingDirective(DKind) && DVar.CKind == OMPC_reduction) {
4038 ErrorFound = true;
4039 SemaRef.Diag(ELoc, diag::err_omp_reduction_in_task);
4040 reportOriginalDsa(SemaRef, Stack, FD, DVar);
4041 return;
4042 }
4043
4044 // Define implicit data-sharing attributes for task.
4045 DVar = Stack->getImplicitDSA(FD, /*FromParent=*/false);
4046 if (isOpenMPTaskingDirective(DKind) && DVar.CKind != OMPC_shared &&
4047 !Stack->isLoopControlVariable(FD).first) {
4048 // Check if there is a captured expression for the current field in the
4049 // region. Do not mark it as firstprivate unless there is no captured
4050 // expression.
4051 // TODO: try to make it firstprivate.
4052 if (DVar.CKind != OMPC_unknown)
4053 ImplicitFirstprivate.push_back(E);
4054 }
4055 return;
4056 }
4059 if (!checkMapClauseExpressionBase(SemaRef, E, CurComponents, OMPC_map,
4060 DKind, /*NoDiagnose=*/true))
4061 return;
4062 const auto *VD = cast<ValueDecl>(
4063 CurComponents.back().getAssociatedDeclaration()->getCanonicalDecl());
4064 if (!Stack->checkMappableExprComponentListsForDecl(
4065 VD, /*CurrentRegionOnly=*/true,
4066 [&CurComponents](
4068 StackComponents,
4070 auto CCI = CurComponents.rbegin();
4071 auto CCE = CurComponents.rend();
4072 for (const auto &SC : llvm::reverse(StackComponents)) {
4073 // Do both expressions have the same kind?
4074 if (CCI->getAssociatedExpression()->getStmtClass() !=
4075 SC.getAssociatedExpression()->getStmtClass())
4076 if (!((isa<ArraySectionExpr>(
4077 SC.getAssociatedExpression()) ||
4078 isa<OMPArrayShapingExpr>(
4079 SC.getAssociatedExpression())) &&
4080 isa<ArraySubscriptExpr>(
4081 CCI->getAssociatedExpression())))
4082 return false;
4083
4084 const Decl *CCD = CCI->getAssociatedDeclaration();
4085 const Decl *SCD = SC.getAssociatedDeclaration();
4086 CCD = CCD ? CCD->getCanonicalDecl() : nullptr;
4087 SCD = SCD ? SCD->getCanonicalDecl() : nullptr;
4088 if (SCD != CCD)
4089 return false;
4090 std::advance(CCI, 1);
4091 if (CCI == CCE)
4092 break;
4093 }
4094 return true;
4095 })) {
4096 Visit(E->getBase());
4097 }
4098 } else if (!TryCaptureCXXThisMembers) {
4099 Visit(E->getBase());
4100 }
4101 }
4102 void VisitOMPExecutableDirective(OMPExecutableDirective *S) {
4103 for (OMPClause *C : S->clauses()) {
4104 // Skip analysis of arguments of private clauses for task|target
4105 // directives.
4106 if (isa_and_nonnull<OMPPrivateClause>(C))
4107 continue;
4108 // Skip analysis of arguments of implicitly defined firstprivate clause
4109 // for task|target directives.
4110 // Skip analysis of arguments of implicitly defined map clause for target
4111 // directives.
4112 if (C && !((isa<OMPFirstprivateClause>(C) || isa<OMPMapClause>(C)) &&
4113 C->isImplicit() && !isOpenMPTaskingDirective(DKind))) {
4114 for (Stmt *CC : C->children()) {
4115 if (CC)
4116 Visit(CC);
4117 }
4118 }
4119 }
4120 // Check implicitly captured variables.
4121 VisitSubCaptures(S);
4122 }
4123
4124 void VisitOMPLoopTransformationDirective(OMPLoopTransformationDirective *S) {
4125 // Loop transformation directives do not introduce data sharing
4126 VisitStmt(S);
4127 }
4128
4129 void VisitCallExpr(CallExpr *S) {
4130 for (Stmt *C : S->arguments()) {
4131 if (C) {
4132 // Check implicitly captured variables in the task-based directives to
4133 // check if they must be firstprivatized.
4134 Visit(C);
4135 }
4136 }
4137 if (Expr *Callee = S->getCallee()) {
4138 auto *CI = Callee->IgnoreParenImpCasts();
4139 if (auto *CE = dyn_cast<MemberExpr>(CI))
4140 Visit(CE->getBase());
4141 else if (auto *CE = dyn_cast<DeclRefExpr>(CI))
4142 Visit(CE);
4143 }
4144 }
4145 void VisitStmt(Stmt *S) {
4146 for (Stmt *C : S->children()) {
4147 if (C) {
4148 // Check implicitly captured variables in the task-based directives to
4149 // check if they must be firstprivatized.
4150 Visit(C);
4151 }
4152 }
4153 }
4154
4155 void visitSubCaptures(CapturedStmt *S) {
4156 for (const CapturedStmt::Capture &Cap : S->captures()) {
4157 if (!Cap.capturesVariable() && !Cap.capturesVariableByCopy())
4158 continue;
4159 VarDecl *VD = Cap.getCapturedVar();
4160 // Do not try to map the variable if it or its sub-component was mapped
4161 // already.
4163 Stack->checkMappableExprComponentListsForDecl(
4164 VD, /*CurrentRegionOnly=*/true,
4166 OpenMPClauseKind) { return true; }))
4167 continue;
4169 SemaRef, VD, VD->getType().getNonLValueExprType(SemaRef.Context),
4170 Cap.getLocation(), /*RefersToCapture=*/true);
4171 Visit(DRE);
4172 }
4173 }
4174 bool isErrorFound() const { return ErrorFound; }
4175 ArrayRef<Expr *> getImplicitFirstprivate() const {
4176 return ImplicitFirstprivate;
4177 }
4178 ArrayRef<Expr *> getImplicitPrivate() const { return ImplicitPrivate; }
4180 OpenMPMapClauseKind MK) const {
4181 return ImplicitMap[DK][MK];
4182 }
4184 getImplicitMapModifier(OpenMPDefaultmapClauseKind Kind) const {
4185 return ImplicitMapModifier[Kind];
4186 }
4187 const SemaOpenMP::VarsWithInheritedDSAType &getVarsWithInheritedDSA() const {
4188 return VarsWithInheritedDSA;
4189 }
4190
4191 DSAAttrChecker(DSAStackTy *S, Sema &SemaRef, CapturedStmt *CS)
4192 : Stack(S), SemaRef(SemaRef), ErrorFound(false), CS(CS) {
4193 DKind = S->getCurrentDirective();
4194 // Process declare target link variables for the target directives.
4196 for (DeclRefExpr *E : Stack->getLinkGlobals())
4197 Visit(E);
4198 }
4199 }
4200};
4201} // namespace
4202
4203static void handleDeclareVariantConstructTrait(DSAStackTy *Stack,
4204 OpenMPDirectiveKind DKind,
4205 bool ScopeEntry) {
4208 Traits.emplace_back(llvm::omp::TraitProperty::construct_target_target);
4209 if (isOpenMPTeamsDirective(DKind))
4210 Traits.emplace_back(llvm::omp::TraitProperty::construct_teams_teams);
4211 if (isOpenMPParallelDirective(DKind))
4212 Traits.emplace_back(llvm::omp::TraitProperty::construct_parallel_parallel);
4214 Traits.emplace_back(llvm::omp::TraitProperty::construct_for_for);
4215 if (isOpenMPSimdDirective(DKind))
4216 Traits.emplace_back(llvm::omp::TraitProperty::construct_simd_simd);
4217 Stack->handleConstructTrait(Traits, ScopeEntry);
4218}
4219
4221getParallelRegionParams(Sema &SemaRef, bool LoopBoundSharing) {
4222 ASTContext &Context = SemaRef.getASTContext();
4223 QualType KmpInt32Ty =
4224 Context.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1).withConst();
4225 QualType KmpInt32PtrTy =
4226 Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
4228 std::make_pair(".global_tid.", KmpInt32PtrTy),
4229 std::make_pair(".bound_tid.", KmpInt32PtrTy),
4230 };
4231 if (LoopBoundSharing) {
4232 QualType KmpSizeTy = Context.getSizeType().withConst();
4233 Params.push_back(std::make_pair(".previous.lb.", KmpSizeTy));
4234 Params.push_back(std::make_pair(".previous.ub.", KmpSizeTy));
4235 }
4236
4237 // __context with shared vars
4238 Params.push_back(std::make_pair(StringRef(), QualType()));
4239 return Params;
4240}
4241
4244 return getParallelRegionParams(SemaRef, /*LoopBoundSharing=*/false);
4245}
4246
4249 ASTContext &Context = SemaRef.getASTContext();
4250 QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
4251 QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
4252 QualType KmpInt32PtrTy =
4253 Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
4254 QualType Args[] = {VoidPtrTy};
4256 EPI.Variadic = true;
4257 QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
4259 std::make_pair(".global_tid.", KmpInt32Ty),
4260 std::make_pair(".part_id.", KmpInt32PtrTy),
4261 std::make_pair(".privates.", VoidPtrTy),
4262 std::make_pair(
4263 ".copy_fn.",
4264 Context.getPointerType(CopyFnType).withConst().withRestrict()),
4265 std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
4266 std::make_pair(StringRef(), QualType()) // __context with shared vars
4267 };
4268 return Params;
4269}
4270
4273 ASTContext &Context = SemaRef.getASTContext();
4275 if (SemaRef.getLangOpts().OpenMPIsTargetDevice) {
4276 QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
4277 Params.push_back(std::make_pair(StringRef("dyn_ptr"), VoidPtrTy));
4278 }
4279 // __context with shared vars
4280 Params.push_back(std::make_pair(StringRef(), QualType()));
4281 return Params;
4282}
4283
4287 std::make_pair(StringRef(), QualType()) // __context with shared vars
4288 };
4289 return Params;
4290}
4291
4294 ASTContext &Context = SemaRef.getASTContext();
4295 QualType KmpInt32Ty =
4296 Context.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1).withConst();
4297 QualType KmpUInt64Ty =
4298 Context.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0).withConst();
4299 QualType KmpInt64Ty =
4300 Context.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1).withConst();
4301 QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
4302 QualType KmpInt32PtrTy =
4303 Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
4304 QualType Args[] = {VoidPtrTy};
4306 EPI.Variadic = true;
4307 QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
4309 std::make_pair(".global_tid.", KmpInt32Ty),
4310 std::make_pair(".part_id.", KmpInt32PtrTy),
4311 std::make_pair(".privates.", VoidPtrTy),
4312 std::make_pair(
4313 ".copy_fn.",
4314 Context.getPointerType(CopyFnType).withConst().withRestrict()),
4315 std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
4316 std::make_pair(".lb.", KmpUInt64Ty),
4317 std::make_pair(".ub.", KmpUInt64Ty),
4318 std::make_pair(".st.", KmpInt64Ty),
4319 std::make_pair(".liter.", KmpInt32Ty),
4320 std::make_pair(".reductions.", VoidPtrTy),
4321 std::make_pair(StringRef(), QualType()) // __context with shared vars
4322 };
4323 return Params;
4324}
4325
4327 Scope *CurScope, SourceLocation Loc) {
4329 getOpenMPCaptureRegions(Regions, DKind);
4330
4331 bool LoopBoundSharing = isOpenMPLoopBoundSharingDirective(DKind);
4332
4333 auto MarkAsInlined = [&](CapturedRegionScopeInfo *CSI) {
4334 CSI->TheCapturedDecl->addAttr(AlwaysInlineAttr::CreateImplicit(
4335 SemaRef.getASTContext(), {}, AlwaysInlineAttr::Keyword_forceinline));
4336 };
4337
4338 for (auto [Level, RKind] : llvm::enumerate(Regions)) {
4339 switch (RKind) {
4340 // All region kinds that can be returned from `getOpenMPCaptureRegions`
4341 // are listed here.
4342 case OMPD_parallel:
4344 Loc, CurScope, CR_OpenMP,
4345 getParallelRegionParams(SemaRef, LoopBoundSharing), Level);
4346 break;
4347 case OMPD_teams:
4348 SemaRef.ActOnCapturedRegionStart(Loc, CurScope, CR_OpenMP,
4349 getTeamsRegionParams(SemaRef), Level);
4350 break;
4351 case OMPD_task:
4352 SemaRef.ActOnCapturedRegionStart(Loc, CurScope, CR_OpenMP,
4353 getTaskRegionParams(SemaRef), Level);
4354 // Mark this captured region as inlined, because we don't use outlined
4355 // function directly.
4356 MarkAsInlined(SemaRef.getCurCapturedRegion());
4357 break;
4358 case OMPD_taskloop:
4359 SemaRef.ActOnCapturedRegionStart(Loc, CurScope, CR_OpenMP,
4360 getTaskloopRegionParams(SemaRef), Level);
4361 // Mark this captured region as inlined, because we don't use outlined
4362 // function directly.
4363 MarkAsInlined(SemaRef.getCurCapturedRegion());
4364 break;
4365 case OMPD_target:
4366 SemaRef.ActOnCapturedRegionStart(Loc, CurScope, CR_OpenMP,
4367 getTargetRegionParams(SemaRef), Level);
4368 break;
4369 case OMPD_unknown:
4370 SemaRef.ActOnCapturedRegionStart(Loc, CurScope, CR_OpenMP,
4371 getUnknownRegionParams(SemaRef));
4372 break;
4373 case OMPD_metadirective:
4374 case OMPD_nothing:
4375 default:
4376 llvm_unreachable("Unexpected capture region");
4377 }
4378 }
4379}
4380
4382 Scope *CurScope) {
4383 switch (DKind) {
4384 case OMPD_atomic:
4385 case OMPD_critical:
4386 case OMPD_masked:
4387 case OMPD_master:
4388 case OMPD_section:
4389 case OMPD_tile:
4390 case OMPD_unroll:
4391 case OMPD_reverse:
4392 case OMPD_interchange:
4393 case OMPD_assume:
4394 break;
4395 default:
4396 processCapturedRegions(SemaRef, DKind, CurScope,
4397 DSAStack->getConstructLoc());
4398 break;
4399 }
4400
4401 DSAStack->setContext(SemaRef.CurContext);
4402 handleDeclareVariantConstructTrait(DSAStack, DKind, /*ScopeEntry=*/true);
4403}
4404
4405int SemaOpenMP::getNumberOfConstructScopes(unsigned Level) const {
4406 return getOpenMPCaptureLevels(DSAStack->getDirective(Level));
4407}
4408
4411 getOpenMPCaptureRegions(CaptureRegions, DKind);
4412 return CaptureRegions.size();
4413}
4414
4416 Expr *CaptureExpr, bool WithInit,
4417 DeclContext *CurContext,
4418 bool AsExpression) {
4419 assert(CaptureExpr);
4420 ASTContext &C = S.getASTContext();
4421 Expr *Init = AsExpression ? CaptureExpr : CaptureExpr->IgnoreImpCasts();
4422 QualType Ty = Init->getType();
4423 if (CaptureExpr->getObjectKind() == OK_Ordinary && CaptureExpr->isGLValue()) {
4424 if (S.getLangOpts().CPlusPlus) {
4425 Ty = C.getLValueReferenceType(Ty);
4426 } else {
4427 Ty = C.getPointerType(Ty);
4428 ExprResult Res =
4429 S.CreateBuiltinUnaryOp(CaptureExpr->getExprLoc(), UO_AddrOf, Init);
4430 if (!Res.isUsable())
4431 return nullptr;
4432 Init = Res.get();
4433 }
4434 WithInit = true;
4435 }
4436 auto *CED = OMPCapturedExprDecl::Create(C, CurContext, Id, Ty,
4437 CaptureExpr->getBeginLoc());
4438 if (!WithInit)
4439 CED->addAttr(OMPCaptureNoInitAttr::CreateImplicit(C));
4440 CurContext->addHiddenDecl(CED);
4442 S.AddInitializerToDecl(CED, Init, /*DirectInit=*/false);
4443 return CED;
4444}
4445
4446static DeclRefExpr *buildCapture(Sema &S, ValueDecl *D, Expr *CaptureExpr,
4447 bool WithInit) {
4449 if (VarDecl *VD = S.OpenMP().isOpenMPCapturedDecl(D))
4450 CD = cast<OMPCapturedExprDecl>(VD);
4451 else
4452 CD = buildCaptureDecl(S, D->getIdentifier(), CaptureExpr, WithInit,
4453 S.CurContext,
4454 /*AsExpression=*/false);
4455 return buildDeclRefExpr(S, CD, CD->getType().getNonReferenceType(),
4456 CaptureExpr->getExprLoc());
4457}
4458
4459static ExprResult buildCapture(Sema &S, Expr *CaptureExpr, DeclRefExpr *&Ref,
4460 StringRef Name) {
4461 CaptureExpr = S.DefaultLvalueConversion(CaptureExpr).get();
4462 if (!Ref) {
4464 S, &S.getASTContext().Idents.get(Name), CaptureExpr,
4465 /*WithInit=*/true, S.CurContext, /*AsExpression=*/true);
4466 Ref = buildDeclRefExpr(S, CD, CD->getType().getNonReferenceType(),
4467 CaptureExpr->getExprLoc());
4468 }
4469 ExprResult Res = Ref;
4470 if (!S.getLangOpts().CPlusPlus &&
4471 CaptureExpr->getObjectKind() == OK_Ordinary && CaptureExpr->isGLValue() &&
4472 Ref->getType()->isPointerType()) {
4473 Res = S.CreateBuiltinUnaryOp(CaptureExpr->getExprLoc(), UO_Deref, Ref);
4474 if (!Res.isUsable())
4475 return ExprError();
4476 }
4477 return S.DefaultLvalueConversion(Res.get());
4478}
4479
4480namespace {
4481// OpenMP directives parsed in this section are represented as a
4482// CapturedStatement with an associated statement. If a syntax error
4483// is detected during the parsing of the associated statement, the
4484// compiler must abort processing and close the CapturedStatement.
4485//
4486// Combined directives such as 'target parallel' have more than one
4487// nested CapturedStatements. This RAII ensures that we unwind out
4488// of all the nested CapturedStatements when an error is found.
4489class CaptureRegionUnwinderRAII {
4490private:
4491 Sema &S;
4492 bool &ErrorFound;
4493 OpenMPDirectiveKind DKind = OMPD_unknown;
4494
4495public:
4496 CaptureRegionUnwinderRAII(Sema &S, bool &ErrorFound,
4497 OpenMPDirectiveKind DKind)
4498 : S(S), ErrorFound(ErrorFound), DKind(DKind) {}
4499 ~CaptureRegionUnwinderRAII() {
4500 if (ErrorFound) {
4501 int ThisCaptureLevel = S.OpenMP().getOpenMPCaptureLevels(DKind);
4502 while (--ThisCaptureLevel >= 0)
4504 }
4505 }
4506};
4507} // namespace
4508
4510 // Capture variables captured by reference in lambdas for target-based
4511 // directives.
4513 (isOpenMPTargetExecutionDirective(DSAStack->getCurrentDirective()) ||
4515 DSAStack->getCurrentDirective()))) {
4516 QualType Type = V->getType();
4517 if (const auto *RD = Type.getCanonicalType()
4518 .getNonReferenceType()
4519 ->getAsCXXRecordDecl()) {
4520 bool SavedForceCaptureByReferenceInTargetExecutable =
4521 DSAStack->isForceCaptureByReferenceInTargetExecutable();
4522 DSAStack->setForceCaptureByReferenceInTargetExecutable(
4523 /*V=*/true);
4524 if (RD->isLambda()) {
4525 llvm::DenseMap<const ValueDecl *, FieldDecl *> Captures;
4526 FieldDecl *ThisCapture;
4527 RD->getCaptureFields(Captures, ThisCapture);
4528 for (const LambdaCapture &LC : RD->captures()) {
4529 if (LC.getCaptureKind() == LCK_ByRef) {
4530 VarDecl *VD = cast<VarDecl>(LC.getCapturedVar());
4531 DeclContext *VDC = VD->getDeclContext();
4532 if (!VDC->Encloses(SemaRef.CurContext))
4533 continue;
4534 SemaRef.MarkVariableReferenced(LC.getLocation(), VD);
4535 } else if (LC.getCaptureKind() == LCK_This) {
4537 if (!ThisTy.isNull() && getASTContext().typesAreCompatible(
4538 ThisTy, ThisCapture->getType()))
4539 SemaRef.CheckCXXThisCapture(LC.getLocation());
4540 }
4541 }
4542 }
4543 DSAStack->setForceCaptureByReferenceInTargetExecutable(
4544 SavedForceCaptureByReferenceInTargetExecutable);
4545 }
4546 }
4547}
4548
4550 const ArrayRef<OMPClause *> Clauses) {
4551 const OMPOrderedClause *Ordered = nullptr;
4552 const OMPOrderClause *Order = nullptr;
4553
4554 for (const OMPClause *Clause : Clauses) {
4555 if (Clause->getClauseKind() == OMPC_ordered)
4556 Ordered = cast<OMPOrderedClause>(Clause);
4557 else if (Clause->getClauseKind() == OMPC_order) {
4558 Order = cast<OMPOrderClause>(Clause);
4559 if (Order->getKind() != OMPC_ORDER_concurrent)
4560 Order = nullptr;
4561 }
4562 if (Ordered && Order)
4563 break;
4564 }
4565
4566 if (Ordered && Order) {
4567 S.Diag(Order->getKindKwLoc(),
4568 diag::err_omp_simple_clause_incompatible_with_ordered)
4569 << getOpenMPClauseName(OMPC_order)
4570 << getOpenMPSimpleClauseTypeName(OMPC_order, OMPC_ORDER_concurrent)
4571 << SourceRange(Order->getBeginLoc(), Order->getEndLoc());
4572 S.Diag(Ordered->getBeginLoc(), diag::note_omp_ordered_param)
4573 << 0 << SourceRange(Ordered->getBeginLoc(), Ordered->getEndLoc());
4574 return true;
4575 }
4576 return false;
4577}
4578
4580 ArrayRef<OMPClause *> Clauses) {
4582 /*ScopeEntry=*/false);
4583 if (!isOpenMPCapturingDirective(DSAStack->getCurrentDirective()))
4584 return S;
4585
4586 bool ErrorFound = false;
4587 CaptureRegionUnwinderRAII CaptureRegionUnwinder(
4588 SemaRef, ErrorFound, DSAStack->getCurrentDirective());
4589 if (!S.isUsable()) {
4590 ErrorFound = true;
4591 return StmtError();
4592 }
4593
4595 getOpenMPCaptureRegions(CaptureRegions, DSAStack->getCurrentDirective());
4596 OMPOrderedClause *OC = nullptr;
4597 OMPScheduleClause *SC = nullptr;
4600 // This is required for proper codegen.
4601 for (OMPClause *Clause : Clauses) {
4602 if (!getLangOpts().OpenMPSimd &&
4603 (isOpenMPTaskingDirective(DSAStack->getCurrentDirective()) ||
4604 DSAStack->getCurrentDirective() == OMPD_target) &&
4605 Clause->getClauseKind() == OMPC_in_reduction) {
4606 // Capture taskgroup task_reduction descriptors inside the tasking regions
4607 // with the corresponding in_reduction items.
4608 auto *IRC = cast<OMPInReductionClause>(Clause);
4609 for (Expr *E : IRC->taskgroup_descriptors())
4610 if (E)
4612 }
4613 if (isOpenMPPrivate(Clause->getClauseKind()) ||
4614 Clause->getClauseKind() == OMPC_copyprivate ||
4615 (getLangOpts().OpenMPUseTLS &&
4616 getASTContext().getTargetInfo().isTLSSupported() &&
4617 Clause->getClauseKind() == OMPC_copyin)) {
4618 DSAStack->setForceVarCapturing(Clause->getClauseKind() == OMPC_copyin);
4619 // Mark all variables in private list clauses as used in inner region.
4620 for (Stmt *VarRef : Clause->children()) {
4621 if (auto *E = cast_or_null<Expr>(VarRef)) {
4623 }
4624 }
4625 DSAStack->setForceVarCapturing(/*V=*/false);
4626 } else if (CaptureRegions.size() > 1 ||
4627 CaptureRegions.back() != OMPD_unknown) {
4628 if (auto *C = OMPClauseWithPreInit::get(Clause))
4629 PICs.push_back(C);
4630 if (auto *C = OMPClauseWithPostUpdate::get(Clause)) {
4631 if (Expr *E = C->getPostUpdateExpr())
4633 }
4634 }
4635 if (Clause->getClauseKind() == OMPC_schedule)
4636 SC = cast<OMPScheduleClause>(Clause);
4637 else if (Clause->getClauseKind() == OMPC_ordered)
4638 OC = cast<OMPOrderedClause>(Clause);
4639 else if (Clause->getClauseKind() == OMPC_linear)
4640 LCs.push_back(cast<OMPLinearClause>(Clause));
4641 }
4642 // Capture allocator expressions if used.
4643 for (Expr *E : DSAStack->getInnerAllocators())
4645 // OpenMP, 2.7.1 Loop Construct, Restrictions
4646 // The nonmonotonic modifier cannot be specified if an ordered clause is
4647 // specified.
4648 if (SC &&
4649 (SC->getFirstScheduleModifier() == OMPC_SCHEDULE_MODIFIER_nonmonotonic ||
4651 OMPC_SCHEDULE_MODIFIER_nonmonotonic) &&
4652 OC) {
4653 Diag(SC->getFirstScheduleModifier() == OMPC_SCHEDULE_MODIFIER_nonmonotonic
4656 diag::err_omp_simple_clause_incompatible_with_ordered)
4657 << getOpenMPClauseName(OMPC_schedule)
4658 << getOpenMPSimpleClauseTypeName(OMPC_schedule,
4659 OMPC_SCHEDULE_MODIFIER_nonmonotonic)
4660 << SourceRange(OC->getBeginLoc(), OC->getEndLoc());
4661 ErrorFound = true;
4662 }
4663 // OpenMP 5.0, 2.9.2 Worksharing-Loop Construct, Restrictions.
4664 // If an order(concurrent) clause is present, an ordered clause may not appear
4665 // on the same directive.
4666 if (checkOrderedOrderSpecified(SemaRef, Clauses))
4667 ErrorFound = true;
4668 if (!LCs.empty() && OC && OC->getNumForLoops()) {
4669 for (const OMPLinearClause *C : LCs) {
4670 Diag(C->getBeginLoc(), diag::err_omp_linear_ordered)
4671 << SourceRange(OC->getBeginLoc(), OC->getEndLoc());
4672 }
4673 ErrorFound = true;
4674 }
4675 if (isOpenMPWorksharingDirective(DSAStack->getCurrentDirective()) &&
4676 isOpenMPSimdDirective(DSAStack->getCurrentDirective()) && OC &&
4677 OC->getNumForLoops()) {
4678 Diag(OC->getBeginLoc(), diag::err_omp_ordered_simd)
4679 << getOpenMPDirectiveName(DSAStack->getCurrentDirective());
4680 ErrorFound = true;
4681 }
4682 if (ErrorFound) {
4683 return StmtError();
4684 }
4685 StmtResult SR = S;
4686 unsigned CompletedRegions = 0;
4687 for (OpenMPDirectiveKind ThisCaptureRegion : llvm::reverse(CaptureRegions)) {
4688 // Mark all variables in private list clauses as used in inner region.
4689 // Required for proper codegen of combined directives.
4690 // TODO: add processing for other clauses.
4691 if (ThisCaptureRegion != OMPD_unknown) {
4692 for (const clang::OMPClauseWithPreInit *C : PICs) {
4693 OpenMPDirectiveKind CaptureRegion = C->getCaptureRegion();
4694 // Find the particular capture region for the clause if the
4695 // directive is a combined one with multiple capture regions.
4696 // If the directive is not a combined one, the capture region
4697 // associated with the clause is OMPD_unknown and is generated
4698 // only once.
4699 if (CaptureRegion == ThisCaptureRegion ||
4700 CaptureRegion == OMPD_unknown) {
4701 if (auto *DS = cast_or_null<DeclStmt>(C->getPreInitStmt())) {
4702 for (Decl *D : DS->decls())
4704 cast<VarDecl>(D));
4705 }
4706 }
4707 }
4708 }
4709 if (ThisCaptureRegion == OMPD_target) {
4710 // Capture allocator traits in the target region. They are used implicitly
4711 // and, thus, are not captured by default.
4712 for (OMPClause *C : Clauses) {
4713 if (const auto *UAC = dyn_cast<OMPUsesAllocatorsClause>(C)) {
4714 for (unsigned I = 0, End = UAC->getNumberOfAllocators(); I < End;
4715 ++I) {
4716 OMPUsesAllocatorsClause::Data D = UAC->getAllocatorData(I);
4717 if (Expr *E = D.AllocatorTraits)
4719 }
4720 continue;
4721 }
4722 }
4723 }
4724 if (ThisCaptureRegion == OMPD_parallel) {
4725 // Capture temp arrays for inscan reductions and locals in aligned
4726 // clauses.
4727 for (OMPClause *C : Clauses) {
4728 if (auto *RC = dyn_cast<OMPReductionClause>(C)) {
4729 if (RC->getModifier() != OMPC_REDUCTION_inscan)
4730 continue;
4731 for (Expr *E : RC->copy_array_temps())
4732 if (E)
4734 }
4735 if (auto *AC = dyn_cast<OMPAlignedClause>(C)) {
4736 for (Expr *E : AC->varlist())
4738 }
4739 }
4740 }
4741 if (++CompletedRegions == CaptureRegions.size())
4742 DSAStack->setBodyComplete();
4744 }
4745 return SR;
4746}
4747
4748static bool checkCancelRegion(Sema &SemaRef, OpenMPDirectiveKind CurrentRegion,
4749 OpenMPDirectiveKind CancelRegion,
4750 SourceLocation StartLoc) {
4751 // CancelRegion is only needed for cancel and cancellation_point.
4752 if (CurrentRegion != OMPD_cancel && CurrentRegion != OMPD_cancellation_point)
4753 return false;
4754
4755 if (CancelRegion == OMPD_parallel || CancelRegion == OMPD_for ||
4756 CancelRegion == OMPD_sections || CancelRegion == OMPD_taskgroup)
4757 return false;
4758
4759 SemaRef.Diag(StartLoc, diag::err_omp_wrong_cancel_region)
4760 << getOpenMPDirectiveName(CancelRegion);
4761 return true;
4762}
4763
4764static bool checkNestingOfRegions(Sema &SemaRef, const DSAStackTy *Stack,
4765 OpenMPDirectiveKind CurrentRegion,
4766 const DeclarationNameInfo &CurrentName,
4767 OpenMPDirectiveKind CancelRegion,
4768 OpenMPBindClauseKind BindKind,
4769 SourceLocation StartLoc) {
4770 if (!Stack->getCurScope())
4771 return false;
4772
4773 OpenMPDirectiveKind ParentRegion = Stack->getParentDirective();
4774 OpenMPDirectiveKind OffendingRegion = ParentRegion;
4775 bool NestingProhibited = false;
4776 bool CloseNesting = true;
4777 bool OrphanSeen = false;
4778 enum {
4779 NoRecommend,
4780 ShouldBeInParallelRegion,
4781 ShouldBeInOrderedRegion,
4782 ShouldBeInTargetRegion,
4783 ShouldBeInTeamsRegion,
4784 ShouldBeInLoopSimdRegion,
4785 } Recommend = NoRecommend;
4786
4789 getLeafOrCompositeConstructs(ParentRegion, LeafOrComposite);
4790 OpenMPDirectiveKind EnclosingConstruct = ParentLOC.back();
4791
4792 if (SemaRef.LangOpts.OpenMP >= 51 && Stack->isParentOrderConcurrent() &&
4793 CurrentRegion != OMPD_simd && CurrentRegion != OMPD_loop &&
4794 CurrentRegion != OMPD_parallel &&
4795 !isOpenMPCombinedParallelADirective(CurrentRegion)) {
4796 SemaRef.Diag(StartLoc, diag::err_omp_prohibited_region_order)
4797 << getOpenMPDirectiveName(CurrentRegion);
4798 return true;
4799 }
4800 if (isOpenMPSimdDirective(ParentRegion) &&
4801 ((SemaRef.LangOpts.OpenMP <= 45 && CurrentRegion != OMPD_ordered) ||
4802 (SemaRef.LangOpts.OpenMP >= 50 && CurrentRegion != OMPD_ordered &&
4803 CurrentRegion != OMPD_simd && CurrentRegion != OMPD_atomic &&
4804 CurrentRegion != OMPD_scan))) {
4805 // OpenMP [2.16, Nesting of Regions]
4806 // OpenMP constructs may not be nested inside a simd region.
4807 // OpenMP [2.8.1,simd Construct, Restrictions]
4808 // An ordered construct with the simd clause is the only OpenMP
4809 // construct that can appear in the simd region.
4810 // Allowing a SIMD construct nested in another SIMD construct is an
4811 // extension. The OpenMP 4.5 spec does not allow it. Issue a warning
4812 // message.
4813 // OpenMP 5.0 [2.9.3.1, simd Construct, Restrictions]
4814 // The only OpenMP constructs that can be encountered during execution of
4815 // a simd region are the atomic construct, the loop construct, the simd
4816 // construct and the ordered construct with the simd clause.
4817 SemaRef.Diag(StartLoc, (CurrentRegion != OMPD_simd)
4818 ? diag::err_omp_prohibited_region_simd
4819 : diag::warn_omp_nesting_simd)
4820 << (SemaRef.LangOpts.OpenMP >= 50 ? 1 : 0);
4821 return CurrentRegion != OMPD_simd;
4822 }
4823 if (EnclosingConstruct == OMPD_atomic) {
4824 // OpenMP [2.16, Nesting of Regions]
4825 // OpenMP constructs may not be nested inside an atomic region.
4826 SemaRef.Diag(StartLoc, diag::err_omp_prohibited_region_atomic);
4827 return true;
4828 }
4829 if (CurrentRegion == OMPD_section) {
4830 // OpenMP [2.7.2, sections Construct, Restrictions]
4831 // Orphaned section directives are prohibited. That is, the section
4832 // directives must appear within the sections construct and must not be
4833 // encountered elsewhere in the sections region.
4834 if (EnclosingConstruct != OMPD_sections) {
4835 SemaRef.Diag(StartLoc, diag::err_omp_orphaned_section_directive)
4836 << (ParentRegion != OMPD_unknown)
4837 << getOpenMPDirectiveName(ParentRegion);
4838 return true;
4839 }
4840 return false;
4841 }
4842 // Allow some constructs (except teams and cancellation constructs) to be
4843 // orphaned (they could be used in functions, called from OpenMP regions
4844 // with the required preconditions).
4845 if (ParentRegion == OMPD_unknown &&
4846 !isOpenMPNestingTeamsDirective(CurrentRegion) &&
4847 CurrentRegion != OMPD_cancellation_point &&
4848 CurrentRegion != OMPD_cancel && CurrentRegion != OMPD_scan)
4849 return false;
4850 // Checks needed for mapping "loop" construct. Please check mapLoopConstruct
4851 // for a detailed explanation
4852 if (SemaRef.LangOpts.OpenMP >= 50 && CurrentRegion == OMPD_loop &&
4853 (BindKind == OMPC_BIND_parallel || BindKind == OMPC_BIND_teams) &&
4854 (isOpenMPWorksharingDirective(ParentRegion) ||
4855 EnclosingConstruct == OMPD_loop)) {
4856 int ErrorMsgNumber = (BindKind == OMPC_BIND_parallel) ? 1 : 4;
4857 SemaRef.Diag(StartLoc, diag::err_omp_prohibited_region)
4858 << true << getOpenMPDirectiveName(ParentRegion) << ErrorMsgNumber
4859 << getOpenMPDirectiveName(CurrentRegion);
4860 return true;
4861 }
4862 if (CurrentRegion == OMPD_cancellation_point ||
4863 CurrentRegion == OMPD_cancel) {
4864 // OpenMP [2.16, Nesting of Regions]
4865 // A cancellation point construct for which construct-type-clause is
4866 // taskgroup must be nested inside a task construct. A cancellation
4867 // point construct for which construct-type-clause is not taskgroup must
4868 // be closely nested inside an OpenMP construct that matches the type
4869 // specified in construct-type-clause.
4870 // A cancel construct for which construct-type-clause is taskgroup must be
4871 // nested inside a task construct. A cancel construct for which
4872 // construct-type-clause is not taskgroup must be closely nested inside an
4873 // OpenMP construct that matches the type specified in
4874 // construct-type-clause.
4875 ArrayRef<OpenMPDirectiveKind> Leafs = getLeafConstructsOrSelf(ParentRegion);
4876 if (CancelRegion == OMPD_taskgroup) {
4877 NestingProhibited = EnclosingConstruct != OMPD_task &&
4878 (SemaRef.getLangOpts().OpenMP < 50 ||
4879 EnclosingConstruct != OMPD_taskloop);
4880 } else if (CancelRegion == OMPD_sections) {
4881 NestingProhibited = EnclosingConstruct != OMPD_section &&
4882 EnclosingConstruct != OMPD_sections;
4883 } else {
4884 NestingProhibited = CancelRegion != Leafs.back();
4885 }
4886 OrphanSeen = ParentRegion == OMPD_unknown;
4887 } else if (CurrentRegion == OMPD_master || CurrentRegion == OMPD_masked) {
4888 // OpenMP 5.1 [2.22, Nesting of Regions]
4889 // A masked region may not be closely nested inside a worksharing, loop,
4890 // atomic, task, or taskloop region.
4891 NestingProhibited = isOpenMPWorksharingDirective(ParentRegion) ||
4892 isOpenMPGenericLoopDirective(ParentRegion) ||
4893 isOpenMPTaskingDirective(ParentRegion);
4894 } else if (CurrentRegion == OMPD_critical && CurrentName.getName()) {
4895 // OpenMP [2.16, Nesting of Regions]
4896 // A critical region may not be nested (closely or otherwise) inside a
4897 // critical region with the same name. Note that this restriction is not
4898 // sufficient to prevent deadlock.
4899 SourceLocation PreviousCriticalLoc;
4900 bool DeadLock = Stack->hasDirective(
4901 [CurrentName, &PreviousCriticalLoc](OpenMPDirectiveKind K,
4902 const DeclarationNameInfo &DNI,
4904 if (K == OMPD_critical && DNI.getName() == CurrentName.getName()) {
4905 PreviousCriticalLoc = Loc;
4906 return true;
4907 }
4908 return false;
4909 },
4910 false /* skip top directive */);
4911 if (DeadLock) {
4912 SemaRef.Diag(StartLoc, diag::err_omp_prohibited_region_critical_same_name)
4913 << CurrentName.getName();
4914 if (PreviousCriticalLoc.isValid())
4915 SemaRef.Diag(PreviousCriticalLoc,
4916 diag::note_omp_previous_critical_region);
4917 return true;
4918 }
4919 } else if (CurrentRegion == OMPD_barrier || CurrentRegion == OMPD_scope) {
4920 // OpenMP 5.1 [2.22, Nesting of Regions]
4921 // A scope region may not be closely nested inside a worksharing, loop,
4922 // task, taskloop, critical, ordered, atomic, or masked region.
4923 // OpenMP 5.1 [2.22, Nesting of Regions]
4924 // A barrier region may not be closely nested inside a worksharing, loop,
4925 // task, taskloop, critical, ordered, atomic, or masked region.
4926 NestingProhibited = isOpenMPWorksharingDirective(ParentRegion) ||
4927 isOpenMPGenericLoopDirective(ParentRegion) ||
4928 isOpenMPTaskingDirective(ParentRegion) ||
4929 llvm::is_contained({OMPD_masked, OMPD_master,
4930 OMPD_critical, OMPD_ordered},
4931 EnclosingConstruct);
4932 } else if (isOpenMPWorksharingDirective(CurrentRegion) &&
4933 !isOpenMPParallelDirective(CurrentRegion) &&
4934 !isOpenMPTeamsDirective(CurrentRegion)) {
4935 // OpenMP 5.1 [2.22, Nesting of Regions]
4936 // A loop region that binds to a parallel region or a worksharing region
4937 // may not be closely nested inside a worksharing, loop, task, taskloop,
4938 // critical, ordered, atomic, or masked region.
4939 NestingProhibited = isOpenMPWorksharingDirective(ParentRegion) ||
4940 isOpenMPGenericLoopDirective(ParentRegion) ||
4941 isOpenMPTaskingDirective(ParentRegion) ||
4942 llvm::is_contained({OMPD_masked, OMPD_master,
4943 OMPD_critical, OMPD_ordered},
4944 EnclosingConstruct);
4945 Recommend = ShouldBeInParallelRegion;
4946 } else if (CurrentRegion == OMPD_ordered) {
4947 // OpenMP [2.16, Nesting of Regions]
4948 // An ordered region may not be closely nested inside a critical,
4949 // atomic, or explicit task region.
4950 // An ordered region must be closely nested inside a loop region (or
4951 // parallel loop region) with an ordered clause.
4952 // OpenMP [2.8.1,simd Construct, Restrictions]
4953 // An ordered construct with the simd clause is the only OpenMP construct
4954 // that can appear in the simd region.
4955 NestingProhibited = EnclosingConstruct == OMPD_critical ||
4956 isOpenMPTaskingDirective(ParentRegion) ||
4957 !(isOpenMPSimdDirective(ParentRegion) ||
4958 Stack->isParentOrderedRegion());
4959 Recommend = ShouldBeInOrderedRegion;
4960 } else if (isOpenMPNestingTeamsDirective(CurrentRegion)) {
4961 // OpenMP [2.16, Nesting of Regions]
4962 // If specified, a teams construct must be contained within a target
4963 // construct.
4964 NestingProhibited =
4965 (SemaRef.LangOpts.OpenMP <= 45 && EnclosingConstruct != OMPD_target) ||
4966 (SemaRef.LangOpts.OpenMP >= 50 && EnclosingConstruct != OMPD_unknown &&
4967 EnclosingConstruct != OMPD_target);
4968 OrphanSeen = ParentRegion == OMPD_unknown;
4969 Recommend = ShouldBeInTargetRegion;
4970 } else if (CurrentRegion == OMPD_scan) {
4971 if (SemaRef.LangOpts.OpenMP >= 50) {
4972 // OpenMP spec 5.0 and 5.1 require scan to be directly enclosed by for,
4973 // simd, or for simd. This has to take into account combined directives.
4974 // In 5.2 this seems to be implied by the fact that the specified
4975 // separated constructs are do, for, and simd.
4976 NestingProhibited = !llvm::is_contained(
4977 {OMPD_for, OMPD_simd, OMPD_for_simd}, EnclosingConstruct);
4978 } else {
4979 NestingProhibited = true;
4980 }
4981 OrphanSeen = ParentRegion == OMPD_unknown;
4982 Recommend = ShouldBeInLoopSimdRegion;
4983 }
4984 if (!NestingProhibited && !isOpenMPTargetExecutionDirective(CurrentRegion) &&
4985 !isOpenMPTargetDataManagementDirective(CurrentRegion) &&
4986 EnclosingConstruct == OMPD_teams) {
4987 // OpenMP [5.1, 2.22, Nesting of Regions]
4988 // distribute, distribute simd, distribute parallel worksharing-loop,
4989 // distribute parallel worksharing-loop SIMD, loop, parallel regions,
4990 // including any parallel regions arising from combined constructs,
4991 // omp_get_num_teams() regions, and omp_get_team_num() regions are the
4992 // only OpenMP regions that may be strictly nested inside the teams
4993 // region.
4994 //
4995 // As an extension, we permit atomic within teams as well.
4996 NestingProhibited = !isOpenMPParallelDirective(CurrentRegion) &&
4997 !isOpenMPDistributeDirective(CurrentRegion) &&
4998 CurrentRegion != OMPD_loop &&
4999 !(SemaRef.getLangOpts().OpenMPExtensions &&
5000 CurrentRegion == OMPD_atomic);
5001 Recommend = ShouldBeInParallelRegion;
5002 }
5003 if (!NestingProhibited && CurrentRegion == OMPD_loop) {
5004 // OpenMP [5.1, 2.11.7, loop Construct, Restrictions]
5005 // If the bind clause is present on the loop construct and binding is
5006 // teams then the corresponding loop region must be strictly nested inside
5007 // a teams region.
5008 NestingProhibited =
5009 BindKind == OMPC_BIND_teams && EnclosingConstruct != OMPD_teams;
5010 Recommend = ShouldBeInTeamsRegion;
5011 }
5012 if (!NestingProhibited && isOpenMPNestingDistributeDirective(CurrentRegion)) {
5013 // OpenMP 4.5 [2.17 Nesting of Regions]
5014 // The region associated with the distribute construct must be strictly
5015 // nested inside a teams region
5016 NestingProhibited = EnclosingConstruct != OMPD_teams;
5017 Recommend = ShouldBeInTeamsRegion;
5018 }
5019 if (!NestingProhibited &&
5020 (isOpenMPTargetExecutionDirective(CurrentRegion) ||
5021 isOpenMPTargetDataManagementDirective(CurrentRegion))) {
5022 // OpenMP 4.5 [2.17 Nesting of Regions]
5023 // If a target, target update, target data, target enter data, or
5024 // target exit data construct is encountered during execution of a
5025 // target region, the behavior is unspecified.
5026 NestingProhibited = Stack->hasDirective(
5027 [&OffendingRegion](OpenMPDirectiveKind K, const DeclarationNameInfo &,
5030 OffendingRegion = K;
5031 return true;
5032 }
5033 return false;
5034 },
5035 false /* don't skip top directive */);
5036 CloseNesting = false;
5037 }
5038 if (NestingProhibited) {
5039 if (OrphanSeen) {
5040 SemaRef.Diag(StartLoc, diag::err_omp_orphaned_device_directive)
5041 << getOpenMPDirectiveName(CurrentRegion) << Recommend;
5042 } else {
5043 SemaRef.Diag(StartLoc, diag::err_omp_prohibited_region)
5044 << CloseNesting << getOpenMPDirectiveName(OffendingRegion)
5045 << Recommend << getOpenMPDirectiveName(CurrentRegion);
5046 }
5047 return true;
5048 }
5049 return false;
5050}
5051
5054 unsigned operator()(argument_type DK) { return unsigned(DK); }
5055};
5057 ArrayRef<OMPClause *> Clauses,
5058 ArrayRef<OpenMPDirectiveKind> AllowedNameModifiers) {
5059 bool ErrorFound = false;
5060 unsigned NamedModifiersNumber = 0;
5061 llvm::IndexedMap<const OMPIfClause *, Kind2Unsigned> FoundNameModifiers;
5062 FoundNameModifiers.resize(llvm::omp::Directive_enumSize + 1);
5063 SmallVector<SourceLocation, 4> NameModifierLoc;
5064 for (const OMPClause *C : Clauses) {
5065 if (const auto *IC = dyn_cast_or_null<OMPIfClause>(C)) {
5066 // At most one if clause without a directive-name-modifier can appear on
5067 // the directive.
5068 OpenMPDirectiveKind CurNM = IC->getNameModifier();
5069 if (FoundNameModifiers[CurNM]) {
5070 S.Diag(C->getBeginLoc(), diag::err_omp_more_one_clause)
5071 << getOpenMPDirectiveName(Kind) << getOpenMPClauseName(OMPC_if)
5072 << (CurNM != OMPD_unknown) << getOpenMPDirectiveName(CurNM);
5073 ErrorFound = true;
5074 } else if (CurNM != OMPD_unknown) {
5075 NameModifierLoc.push_back(IC->getNameModifierLoc());
5076 ++NamedModifiersNumber;
5077 }
5078 FoundNameModifiers[CurNM] = IC;
5079 if (CurNM == OMPD_unknown)
5080 continue;
5081 // Check if the specified name modifier is allowed for the current
5082 // directive.
5083 // At most one if clause with the particular directive-name-modifier can
5084 // appear on the directive.
5085 if (!llvm::is_contained(AllowedNameModifiers, CurNM)) {
5086 S.Diag(IC->getNameModifierLoc(),
5087 diag::err_omp_wrong_if_directive_name_modifier)
5088 << getOpenMPDirectiveName(CurNM) << getOpenMPDirectiveName(Kind);
5089 ErrorFound = true;
5090 }
5091 }
5092 }
5093 // If any if clause on the directive includes a directive-name-modifier then
5094 // all if clauses on the directive must include a directive-name-modifier.
5095 if (FoundNameModifiers[OMPD_unknown] && NamedModifiersNumber > 0) {
5096 if (NamedModifiersNumber == AllowedNameModifiers.size()) {
5097 S.Diag(FoundNameModifiers[OMPD_unknown]->getBeginLoc(),
5098 diag::err_omp_no_more_if_clause);
5099 } else {
5100 std::string Values;
5101 std::string Sep(", ");
5102 unsigned AllowedCnt = 0;
5103 unsigned TotalAllowedNum =
5104 AllowedNameModifiers.size() - NamedModifiersNumber;
5105 for (unsigned Cnt = 0, End = AllowedNameModifiers.size(); Cnt < End;
5106 ++Cnt) {
5107 OpenMPDirectiveKind NM = AllowedNameModifiers[Cnt];
5108 if (!FoundNameModifiers[NM]) {
5109 Values += "'";
5110 Values += getOpenMPDirectiveName(NM);
5111 Values += "'";
5112 if (AllowedCnt + 2 == TotalAllowedNum)
5113 Values += " or ";
5114 else if (AllowedCnt + 1 != TotalAllowedNum)</