clang 20.0.0git
SemaOpenMP.cpp
Go to the documentation of this file.
1//===--- SemaOpenMP.cpp - Semantic Analysis for OpenMP constructs ---------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements semantic analysis for OpenMP directives and
10/// clauses.
11///
12//===----------------------------------------------------------------------===//
13
15
16#include "TreeTransform.h"
20#include "clang/AST/Decl.h"
21#include "clang/AST/DeclCXX.h"
25#include "clang/AST/StmtCXX.h"
34#include "clang/Sema/Lookup.h"
36#include "clang/Sema/Scope.h"
38#include "clang/Sema/Sema.h"
39#include "llvm/ADT/IndexedMap.h"
40#include "llvm/ADT/PointerEmbeddedInt.h"
41#include "llvm/ADT/STLExtras.h"
42#include "llvm/ADT/Sequence.h"
43#include "llvm/ADT/SetVector.h"
44#include "llvm/ADT/SmallSet.h"
45#include "llvm/ADT/StringExtras.h"
46#include "llvm/Frontend/OpenMP/OMPAssume.h"
47#include "llvm/Frontend/OpenMP/OMPConstants.h"
48#include "llvm/IR/Assumptions.h"
49#include <optional>
50
51using namespace clang;
52using namespace llvm::omp;
53
54//===----------------------------------------------------------------------===//
55// Stack of data-sharing attributes for variables
56//===----------------------------------------------------------------------===//
57
59 Sema &SemaRef, Expr *E,
61 OpenMPClauseKind CKind, OpenMPDirectiveKind DKind, bool NoDiagnose);
62
63namespace {
64/// Default data sharing attributes, which can be applied to directive.
65enum DefaultDataSharingAttributes {
66 DSA_unspecified = 0, /// Data sharing attribute not specified.
67 DSA_none = 1 << 0, /// Default data sharing attribute 'none'.
68 DSA_shared = 1 << 1, /// Default data sharing attribute 'shared'.
69 DSA_private = 1 << 2, /// Default data sharing attribute 'private'.
70 DSA_firstprivate = 1 << 3, /// Default data sharing attribute 'firstprivate'.
71};
72
73/// Stack for tracking declarations used in OpenMP directives and
74/// clauses and their data-sharing attributes.
75class DSAStackTy {
76public:
77 struct DSAVarData {
78 OpenMPDirectiveKind DKind = OMPD_unknown;
79 OpenMPClauseKind CKind = OMPC_unknown;
80 unsigned Modifier = 0;
81 const Expr *RefExpr = nullptr;
82 DeclRefExpr *PrivateCopy = nullptr;
83 SourceLocation ImplicitDSALoc;
84 bool AppliedToPointee = false;
85 DSAVarData() = default;
86 DSAVarData(OpenMPDirectiveKind DKind, OpenMPClauseKind CKind,
87 const Expr *RefExpr, DeclRefExpr *PrivateCopy,
88 SourceLocation ImplicitDSALoc, unsigned Modifier,
89 bool AppliedToPointee)
90 : DKind(DKind), CKind(CKind), Modifier(Modifier), RefExpr(RefExpr),
91 PrivateCopy(PrivateCopy), ImplicitDSALoc(ImplicitDSALoc),
92 AppliedToPointee(AppliedToPointee) {}
93 };
94 using OperatorOffsetTy =
96 using DoacrossClauseMapTy = llvm::DenseMap<OMPClause *, OperatorOffsetTy>;
97 /// Kind of the declaration used in the uses_allocators clauses.
98 enum class UsesAllocatorsDeclKind {
99 /// Predefined allocator
100 PredefinedAllocator,
101 /// User-defined allocator
102 UserDefinedAllocator,
103 /// The declaration that represent allocator trait
104 AllocatorTrait,
105 };
106
107private:
108 struct DSAInfo {
109 OpenMPClauseKind Attributes = OMPC_unknown;
110 unsigned Modifier = 0;
111 /// Pointer to a reference expression and a flag which shows that the
112 /// variable is marked as lastprivate(true) or not (false).
113 llvm::PointerIntPair<const Expr *, 1, bool> RefExpr;
114 DeclRefExpr *PrivateCopy = nullptr;
115 /// true if the attribute is applied to the pointee, not the variable
116 /// itself.
117 bool AppliedToPointee = false;
118 };
119 using DeclSAMapTy = llvm::SmallDenseMap<const ValueDecl *, DSAInfo, 8>;
120 using UsedRefMapTy = llvm::SmallDenseMap<const ValueDecl *, const Expr *, 8>;
121 using LCDeclInfo = std::pair<unsigned, VarDecl *>;
122 using LoopControlVariablesMapTy =
123 llvm::SmallDenseMap<const ValueDecl *, LCDeclInfo, 8>;
124 /// Struct that associates a component with the clause kind where they are
125 /// found.
126 struct MappedExprComponentTy {
128 OpenMPClauseKind Kind = OMPC_unknown;
129 };
130 using MappedExprComponentsTy =
131 llvm::DenseMap<const ValueDecl *, MappedExprComponentTy>;
132 using CriticalsWithHintsTy =
133 llvm::StringMap<std::pair<const OMPCriticalDirective *, llvm::APSInt>>;
134 struct ReductionData {
135 using BOKPtrType = llvm::PointerEmbeddedInt<BinaryOperatorKind, 16>;
136 SourceRange ReductionRange;
137 llvm::PointerUnion<const Expr *, BOKPtrType> ReductionOp;
138 ReductionData() = default;
139 void set(BinaryOperatorKind BO, SourceRange RR) {
140 ReductionRange = RR;
141 ReductionOp = BO;
142 }
143 void set(const Expr *RefExpr, SourceRange RR) {
144 ReductionRange = RR;
145 ReductionOp = RefExpr;
146 }
147 };
148 using DeclReductionMapTy =
149 llvm::SmallDenseMap<const ValueDecl *, ReductionData, 4>;
150 struct DefaultmapInfo {
151 OpenMPDefaultmapClauseModifier ImplicitBehavior =
153 SourceLocation SLoc;
154 DefaultmapInfo() = default;
156 : ImplicitBehavior(M), SLoc(Loc) {}
157 };
158
159 struct SharingMapTy {
160 DeclSAMapTy SharingMap;
161 DeclReductionMapTy ReductionMap;
162 UsedRefMapTy AlignedMap;
163 UsedRefMapTy NontemporalMap;
164 MappedExprComponentsTy MappedExprComponents;
165 LoopControlVariablesMapTy LCVMap;
166 DefaultDataSharingAttributes DefaultAttr = DSA_unspecified;
167 SourceLocation DefaultAttrLoc;
168 DefaultmapInfo DefaultmapMap[OMPC_DEFAULTMAP_unknown + 1];
169 OpenMPDirectiveKind Directive = OMPD_unknown;
170 DeclarationNameInfo DirectiveName;
171 Scope *CurScope = nullptr;
172 DeclContext *Context = nullptr;
173 SourceLocation ConstructLoc;
174 /// Set of 'depend' clauses with 'sink|source' dependence kind. Required to
175 /// get the data (loop counters etc.) about enclosing loop-based construct.
176 /// This data is required during codegen.
177 DoacrossClauseMapTy DoacrossDepends;
178 /// First argument (Expr *) contains optional argument of the
179 /// 'ordered' clause, the second one is true if the regions has 'ordered'
180 /// clause, false otherwise.
181 std::optional<std::pair<const Expr *, OMPOrderedClause *>> OrderedRegion;
182 bool RegionHasOrderConcurrent = false;
183 unsigned AssociatedLoops = 1;
184 bool HasMutipleLoops = false;
185 const Decl *PossiblyLoopCounter = nullptr;
186 bool NowaitRegion = false;
187 bool UntiedRegion = false;
188 bool CancelRegion = false;
189 bool LoopStart = false;
190 bool BodyComplete = false;
191 SourceLocation PrevScanLocation;
192 SourceLocation PrevOrderedLocation;
193 SourceLocation InnerTeamsRegionLoc;
194 /// Reference to the taskgroup task_reduction reference expression.
195 Expr *TaskgroupReductionRef = nullptr;
196 llvm::DenseSet<QualType> MappedClassesQualTypes;
197 SmallVector<Expr *, 4> InnerUsedAllocators;
198 llvm::DenseSet<CanonicalDeclPtr<Decl>> ImplicitTaskFirstprivates;
199 /// List of globals marked as declare target link in this target region
200 /// (isOpenMPTargetExecutionDirective(Directive) == true).
201 llvm::SmallVector<DeclRefExpr *, 4> DeclareTargetLinkVarDecls;
202 /// List of decls used in inclusive/exclusive clauses of the scan directive.
203 llvm::DenseSet<CanonicalDeclPtr<Decl>> UsedInScanDirective;
204 llvm::DenseMap<CanonicalDeclPtr<const Decl>, UsesAllocatorsDeclKind>
205 UsesAllocatorsDecls;
206 /// Data is required on creating capture fields for implicit
207 /// default first|private clause.
208 struct ImplicitDefaultFDInfoTy {
209 /// Field decl.
210 const FieldDecl *FD = nullptr;
211 /// Nesting stack level
212 size_t StackLevel = 0;
213 /// Capture variable decl.
214 VarDecl *VD = nullptr;
215 ImplicitDefaultFDInfoTy(const FieldDecl *FD, size_t StackLevel,
216 VarDecl *VD)
217 : FD(FD), StackLevel(StackLevel), VD(VD) {}
218 };
219 /// List of captured fields
221 ImplicitDefaultFirstprivateFDs;
222 Expr *DeclareMapperVar = nullptr;
223 SmallVector<VarDecl *, 16> IteratorVarDecls;
224 SharingMapTy(OpenMPDirectiveKind DKind, DeclarationNameInfo Name,
225 Scope *CurScope, SourceLocation Loc)
226 : Directive(DKind), DirectiveName(Name), CurScope(CurScope),
227 ConstructLoc(Loc) {}
228 SharingMapTy() = default;
229 };
230
231 using StackTy = SmallVector<SharingMapTy, 4>;
232
233 /// Stack of used declaration and their data-sharing attributes.
234 DeclSAMapTy Threadprivates;
235 const FunctionScopeInfo *CurrentNonCapturingFunctionScope = nullptr;
237 /// true, if check for DSA must be from parent directive, false, if
238 /// from current directive.
239 OpenMPClauseKind ClauseKindMode = OMPC_unknown;
240 Sema &SemaRef;
241 bool ForceCapturing = false;
242 /// true if all the variables in the target executable directives must be
243 /// captured by reference.
244 bool ForceCaptureByReferenceInTargetExecutable = false;
245 CriticalsWithHintsTy Criticals;
246 unsigned IgnoredStackElements = 0;
247
248 /// Iterators over the stack iterate in order from innermost to outermost
249 /// directive.
250 using const_iterator = StackTy::const_reverse_iterator;
251 const_iterator begin() const {
252 return Stack.empty() ? const_iterator()
253 : Stack.back().first.rbegin() + IgnoredStackElements;
254 }
255 const_iterator end() const {
256 return Stack.empty() ? const_iterator() : Stack.back().first.rend();
257 }
258 using iterator = StackTy::reverse_iterator;
259 iterator begin() {
260 return Stack.empty() ? iterator()
261 : Stack.back().first.rbegin() + IgnoredStackElements;
262 }
263 iterator end() {
264 return Stack.empty() ? iterator() : Stack.back().first.rend();
265 }
266
267 // Convenience operations to get at the elements of the stack.
268
269 bool isStackEmpty() const {
270 return Stack.empty() ||
271 Stack.back().second != CurrentNonCapturingFunctionScope ||
272 Stack.back().first.size() <= IgnoredStackElements;
273 }
274 size_t getStackSize() const {
275 return isStackEmpty() ? 0
276 : Stack.back().first.size() - IgnoredStackElements;
277 }
278
279 SharingMapTy *getTopOfStackOrNull() {
280 size_t Size = getStackSize();
281 if (Size == 0)
282 return nullptr;
283 return &Stack.back().first[Size - 1];
284 }
285 const SharingMapTy *getTopOfStackOrNull() const {
286 return const_cast<DSAStackTy &>(*this).getTopOfStackOrNull();
287 }
288 SharingMapTy &getTopOfStack() {
289 assert(!isStackEmpty() && "no current directive");
290 return *getTopOfStackOrNull();
291 }
292 const SharingMapTy &getTopOfStack() const {
293 return const_cast<DSAStackTy &>(*this).getTopOfStack();
294 }
295
296 SharingMapTy *getSecondOnStackOrNull() {
297 size_t Size = getStackSize();
298 if (Size <= 1)
299 return nullptr;
300 return &Stack.back().first[Size - 2];
301 }
302 const SharingMapTy *getSecondOnStackOrNull() const {
303 return const_cast<DSAStackTy &>(*this).getSecondOnStackOrNull();
304 }
305
306 /// Get the stack element at a certain level (previously returned by
307 /// \c getNestingLevel).
308 ///
309 /// Note that nesting levels count from outermost to innermost, and this is
310 /// the reverse of our iteration order where new inner levels are pushed at
311 /// the front of the stack.
312 SharingMapTy &getStackElemAtLevel(unsigned Level) {
313 assert(Level < getStackSize() && "no such stack element");
314 return Stack.back().first[Level];
315 }
316 const SharingMapTy &getStackElemAtLevel(unsigned Level) const {
317 return const_cast<DSAStackTy &>(*this).getStackElemAtLevel(Level);
318 }
319
320 DSAVarData getDSA(const_iterator &Iter, ValueDecl *D) const;
321
322 /// Checks if the variable is a local for OpenMP region.
323 bool isOpenMPLocal(VarDecl *D, const_iterator Iter) const;
324
325 /// Vector of previously declared requires directives
327 /// omp_allocator_handle_t type.
328 QualType OMPAllocatorHandleT;
329 /// omp_depend_t type.
330 QualType OMPDependT;
331 /// omp_event_handle_t type.
332 QualType OMPEventHandleT;
333 /// omp_alloctrait_t type.
334 QualType OMPAlloctraitT;
335 /// Expression for the predefined allocators.
336 Expr *OMPPredefinedAllocators[OMPAllocateDeclAttr::OMPUserDefinedMemAlloc] = {
337 nullptr};
338 /// Vector of previously encountered target directives
339 SmallVector<SourceLocation, 2> TargetLocations;
340 SourceLocation AtomicLocation;
341 /// Vector of declare variant construct traits.
343
344public:
345 explicit DSAStackTy(Sema &S) : SemaRef(S) {}
346
347 /// Sets omp_allocator_handle_t type.
348 void setOMPAllocatorHandleT(QualType Ty) { OMPAllocatorHandleT = Ty; }
349 /// Gets omp_allocator_handle_t type.
350 QualType getOMPAllocatorHandleT() const { return OMPAllocatorHandleT; }
351 /// Sets omp_alloctrait_t type.
352 void setOMPAlloctraitT(QualType Ty) { OMPAlloctraitT = Ty; }
353 /// Gets omp_alloctrait_t type.
354 QualType getOMPAlloctraitT() const { return OMPAlloctraitT; }
355 /// Sets the given default allocator.
356 void setAllocator(OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind,
357 Expr *Allocator) {
358 OMPPredefinedAllocators[AllocatorKind] = Allocator;
359 }
360 /// Returns the specified default allocator.
361 Expr *getAllocator(OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind) const {
362 return OMPPredefinedAllocators[AllocatorKind];
363 }
364 /// Sets omp_depend_t type.
365 void setOMPDependT(QualType Ty) { OMPDependT = Ty; }
366 /// Gets omp_depend_t type.
367 QualType getOMPDependT() const { return OMPDependT; }
368
369 /// Sets omp_event_handle_t type.
370 void setOMPEventHandleT(QualType Ty) { OMPEventHandleT = Ty; }
371 /// Gets omp_event_handle_t type.
372 QualType getOMPEventHandleT() const { return OMPEventHandleT; }
373
374 bool isClauseParsingMode() const { return ClauseKindMode != OMPC_unknown; }
375 OpenMPClauseKind getClauseParsingMode() const {
376 assert(isClauseParsingMode() && "Must be in clause parsing mode.");
377 return ClauseKindMode;
378 }
379 void setClauseParsingMode(OpenMPClauseKind K) { ClauseKindMode = K; }
380
381 bool isBodyComplete() const {
382 const SharingMapTy *Top = getTopOfStackOrNull();
383 return Top && Top->BodyComplete;
384 }
385 void setBodyComplete() { getTopOfStack().BodyComplete = true; }
386
387 bool isForceVarCapturing() const { return ForceCapturing; }
388 void setForceVarCapturing(bool V) { ForceCapturing = V; }
389
390 void setForceCaptureByReferenceInTargetExecutable(bool V) {
391 ForceCaptureByReferenceInTargetExecutable = V;
392 }
393 bool isForceCaptureByReferenceInTargetExecutable() const {
394 return ForceCaptureByReferenceInTargetExecutable;
395 }
396
397 void push(OpenMPDirectiveKind DKind, const DeclarationNameInfo &DirName,
398 Scope *CurScope, SourceLocation Loc) {
399 assert(!IgnoredStackElements &&
400 "cannot change stack while ignoring elements");
401 if (Stack.empty() ||
402 Stack.back().second != CurrentNonCapturingFunctionScope)
403 Stack.emplace_back(StackTy(), CurrentNonCapturingFunctionScope);
404 Stack.back().first.emplace_back(DKind, DirName, CurScope, Loc);
405 Stack.back().first.back().DefaultAttrLoc = Loc;
406 }
407
408 void pop() {
409 assert(!IgnoredStackElements &&
410 "cannot change stack while ignoring elements");
411 assert(!Stack.back().first.empty() &&
412 "Data-sharing attributes stack is empty!");
413 Stack.back().first.pop_back();
414 }
415
416 /// RAII object to temporarily leave the scope of a directive when we want to
417 /// logically operate in its parent.
418 class ParentDirectiveScope {
419 DSAStackTy &Self;
420 bool Active;
421
422 public:
423 ParentDirectiveScope(DSAStackTy &Self, bool Activate)
424 : Self(Self), Active(false) {
425 if (Activate)
426 enable();
427 }
428 ~ParentDirectiveScope() { disable(); }
429 void disable() {
430 if (Active) {
431 --Self.IgnoredStackElements;
432 Active = false;
433 }
434 }
435 void enable() {
436 if (!Active) {
437 ++Self.IgnoredStackElements;
438 Active = true;
439 }
440 }
441 };
442
443 /// Marks that we're started loop parsing.
444 void loopInit() {
445 assert(isOpenMPLoopDirective(getCurrentDirective()) &&
446 "Expected loop-based directive.");
447 getTopOfStack().LoopStart = true;
448 }
449 /// Start capturing of the variables in the loop context.
450 void loopStart() {
451 assert(isOpenMPLoopDirective(getCurrentDirective()) &&
452 "Expected loop-based directive.");
453 getTopOfStack().LoopStart = false;
454 }
455 /// true, if variables are captured, false otherwise.
456 bool isLoopStarted() const {
457 assert(isOpenMPLoopDirective(getCurrentDirective()) &&
458 "Expected loop-based directive.");
459 return !getTopOfStack().LoopStart;
460 }
461 /// Marks (or clears) declaration as possibly loop counter.
462 void resetPossibleLoopCounter(const Decl *D = nullptr) {
463 getTopOfStack().PossiblyLoopCounter = D ? D->getCanonicalDecl() : D;
464 }
465 /// Gets the possible loop counter decl.
466 const Decl *getPossiblyLoopCounter() const {
467 return getTopOfStack().PossiblyLoopCounter;
468 }
469 /// Start new OpenMP region stack in new non-capturing function.
470 void pushFunction() {
471 assert(!IgnoredStackElements &&
472 "cannot change stack while ignoring elements");
473 const FunctionScopeInfo *CurFnScope = SemaRef.getCurFunction();
474 assert(!isa<CapturingScopeInfo>(CurFnScope));
475 CurrentNonCapturingFunctionScope = CurFnScope;
476 }
477 /// Pop region stack for non-capturing function.
478 void popFunction(const FunctionScopeInfo *OldFSI) {
479 assert(!IgnoredStackElements &&
480 "cannot change stack while ignoring elements");
481 if (!Stack.empty() && Stack.back().second == OldFSI) {
482 assert(Stack.back().first.empty());
483 Stack.pop_back();
484 }
485 CurrentNonCapturingFunctionScope = nullptr;
486 for (const FunctionScopeInfo *FSI : llvm::reverse(SemaRef.FunctionScopes)) {
487 if (!isa<CapturingScopeInfo>(FSI)) {
488 CurrentNonCapturingFunctionScope = FSI;
489 break;
490 }
491 }
492 }
493
494 void addCriticalWithHint(const OMPCriticalDirective *D, llvm::APSInt Hint) {
495 Criticals.try_emplace(D->getDirectiveName().getAsString(), D, Hint);
496 }
497 const std::pair<const OMPCriticalDirective *, llvm::APSInt>
498 getCriticalWithHint(const DeclarationNameInfo &Name) const {
499 auto I = Criticals.find(Name.getAsString());
500 if (I != Criticals.end())
501 return I->second;
502 return std::make_pair(nullptr, llvm::APSInt());
503 }
504 /// If 'aligned' declaration for given variable \a D was not seen yet,
505 /// add it and return NULL; otherwise return previous occurrence's expression
506 /// for diagnostics.
507 const Expr *addUniqueAligned(const ValueDecl *D, const Expr *NewDE);
508 /// If 'nontemporal' declaration for given variable \a D was not seen yet,
509 /// add it and return NULL; otherwise return previous occurrence's expression
510 /// for diagnostics.
511 const Expr *addUniqueNontemporal(const ValueDecl *D, const Expr *NewDE);
512
513 /// Register specified variable as loop control variable.
514 void addLoopControlVariable(const ValueDecl *D, VarDecl *Capture);
515 /// Check if the specified variable is a loop control variable for
516 /// current region.
517 /// \return The index of the loop control variable in the list of associated
518 /// for-loops (from outer to inner).
519 const LCDeclInfo isLoopControlVariable(const ValueDecl *D) const;
520 /// Check if the specified variable is a loop control variable for
521 /// parent region.
522 /// \return The index of the loop control variable in the list of associated
523 /// for-loops (from outer to inner).
524 const LCDeclInfo isParentLoopControlVariable(const ValueDecl *D) const;
525 /// Check if the specified variable is a loop control variable for
526 /// current region.
527 /// \return The index of the loop control variable in the list of associated
528 /// for-loops (from outer to inner).
529 const LCDeclInfo isLoopControlVariable(const ValueDecl *D,
530 unsigned Level) const;
531 /// Get the loop control variable for the I-th loop (or nullptr) in
532 /// parent directive.
533 const ValueDecl *getParentLoopControlVariable(unsigned I) const;
534
535 /// Marks the specified decl \p D as used in scan directive.
536 void markDeclAsUsedInScanDirective(ValueDecl *D) {
537 if (SharingMapTy *Stack = getSecondOnStackOrNull())
538 Stack->UsedInScanDirective.insert(D);
539 }
540
541 /// Checks if the specified declaration was used in the inner scan directive.
542 bool isUsedInScanDirective(ValueDecl *D) const {
543 if (const SharingMapTy *Stack = getTopOfStackOrNull())
544 return Stack->UsedInScanDirective.contains(D);
545 return false;
546 }
547
548 /// Adds explicit data sharing attribute to the specified declaration.
549 void addDSA(const ValueDecl *D, const Expr *E, OpenMPClauseKind A,
550 DeclRefExpr *PrivateCopy = nullptr, unsigned Modifier = 0,
551 bool AppliedToPointee = false);
552
553 /// Adds additional information for the reduction items with the reduction id
554 /// represented as an operator.
555 void addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
557 /// Adds additional information for the reduction items with the reduction id
558 /// represented as reduction identifier.
559 void addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
560 const Expr *ReductionRef);
561 /// Returns the location and reduction operation from the innermost parent
562 /// region for the given \p D.
563 const DSAVarData
564 getTopMostTaskgroupReductionData(const ValueDecl *D, SourceRange &SR,
566 Expr *&TaskgroupDescriptor) const;
567 /// Returns the location and reduction operation from the innermost parent
568 /// region for the given \p D.
569 const DSAVarData
570 getTopMostTaskgroupReductionData(const ValueDecl *D, SourceRange &SR,
571 const Expr *&ReductionRef,
572 Expr *&TaskgroupDescriptor) const;
573 /// Return reduction reference expression for the current taskgroup or
574 /// parallel/worksharing directives with task reductions.
575 Expr *getTaskgroupReductionRef() const {
576 assert((getTopOfStack().Directive == OMPD_taskgroup ||
577 ((isOpenMPParallelDirective(getTopOfStack().Directive) ||
578 isOpenMPWorksharingDirective(getTopOfStack().Directive)) &&
579 !isOpenMPSimdDirective(getTopOfStack().Directive))) &&
580 "taskgroup reference expression requested for non taskgroup or "
581 "parallel/worksharing directive.");
582 return getTopOfStack().TaskgroupReductionRef;
583 }
584 /// Checks if the given \p VD declaration is actually a taskgroup reduction
585 /// descriptor variable at the \p Level of OpenMP regions.
586 bool isTaskgroupReductionRef(const ValueDecl *VD, unsigned Level) const {
587 return getStackElemAtLevel(Level).TaskgroupReductionRef &&
588 cast<DeclRefExpr>(getStackElemAtLevel(Level).TaskgroupReductionRef)
589 ->getDecl() == VD;
590 }
591
592 /// Returns data sharing attributes from top of the stack for the
593 /// specified declaration.
594 const DSAVarData getTopDSA(ValueDecl *D, bool FromParent);
595 /// Returns data-sharing attributes for the specified declaration.
596 const DSAVarData getImplicitDSA(ValueDecl *D, bool FromParent) const;
597 /// Returns data-sharing attributes for the specified declaration.
598 const DSAVarData getImplicitDSA(ValueDecl *D, unsigned Level) const;
599 /// Checks if the specified variables has data-sharing attributes which
600 /// match specified \a CPred predicate in any directive which matches \a DPred
601 /// predicate.
602 const DSAVarData
603 hasDSA(ValueDecl *D,
604 const llvm::function_ref<bool(OpenMPClauseKind, bool,
605 DefaultDataSharingAttributes)>
606 CPred,
607 const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
608 bool FromParent) const;
609 /// Checks if the specified variables has data-sharing attributes which
610 /// match specified \a CPred predicate in any innermost directive which
611 /// matches \a DPred predicate.
612 const DSAVarData
613 hasInnermostDSA(ValueDecl *D,
614 const llvm::function_ref<bool(OpenMPClauseKind, bool)> CPred,
615 const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
616 bool FromParent) const;
617 /// Checks if the specified variables has explicit data-sharing
618 /// attributes which match specified \a CPred predicate at the specified
619 /// OpenMP region.
620 bool
621 hasExplicitDSA(const ValueDecl *D,
622 const llvm::function_ref<bool(OpenMPClauseKind, bool)> CPred,
623 unsigned Level, bool NotLastprivate = false) const;
624
625 /// Returns true if the directive at level \Level matches in the
626 /// specified \a DPred predicate.
627 bool hasExplicitDirective(
628 const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
629 unsigned Level) const;
630
631 /// Finds a directive which matches specified \a DPred predicate.
632 bool hasDirective(
633 const llvm::function_ref<bool(
635 DPred,
636 bool FromParent) const;
637
638 /// Returns currently analyzed directive.
639 OpenMPDirectiveKind getCurrentDirective() const {
640 const SharingMapTy *Top = getTopOfStackOrNull();
641 return Top ? Top->Directive : OMPD_unknown;
642 }
643 /// Returns directive kind at specified level.
644 OpenMPDirectiveKind getDirective(unsigned Level) const {
645 assert(!isStackEmpty() && "No directive at specified level.");
646 return getStackElemAtLevel(Level).Directive;
647 }
648 /// Returns the capture region at the specified level.
649 OpenMPDirectiveKind getCaptureRegion(unsigned Level,
650 unsigned OpenMPCaptureLevel) const {
652 getOpenMPCaptureRegions(CaptureRegions, getDirective(Level));
653 return CaptureRegions[OpenMPCaptureLevel];
654 }
655 /// Returns parent directive.
656 OpenMPDirectiveKind getParentDirective() const {
657 const SharingMapTy *Parent = getSecondOnStackOrNull();
658 return Parent ? Parent->Directive : OMPD_unknown;
659 }
660
661 /// Add requires decl to internal vector
662 void addRequiresDecl(OMPRequiresDecl *RD) { RequiresDecls.push_back(RD); }
663
664 /// Checks if the defined 'requires' directive has specified type of clause.
665 template <typename ClauseType> bool hasRequiresDeclWithClause() const {
666 return llvm::any_of(RequiresDecls, [](const OMPRequiresDecl *D) {
667 return llvm::any_of(D->clauselists(), [](const OMPClause *C) {
668 return isa<ClauseType>(C);
669 });
670 });
671 }
672
673 /// Checks for a duplicate clause amongst previously declared requires
674 /// directives
675 bool hasDuplicateRequiresClause(ArrayRef<OMPClause *> ClauseList) const {
676 bool IsDuplicate = false;
677 for (OMPClause *CNew : ClauseList) {
678 for (const OMPRequiresDecl *D : RequiresDecls) {
679 for (const OMPClause *CPrev : D->clauselists()) {
680 if (CNew->getClauseKind() == CPrev->getClauseKind()) {
681 SemaRef.Diag(CNew->getBeginLoc(),
682 diag::err_omp_requires_clause_redeclaration)
683 << getOpenMPClauseName(CNew->getClauseKind());
684 SemaRef.Diag(CPrev->getBeginLoc(),
685 diag::note_omp_requires_previous_clause)
686 << getOpenMPClauseName(CPrev->getClauseKind());
687 IsDuplicate = true;
688 }
689 }
690 }
691 }
692 return IsDuplicate;
693 }
694
695 /// Add location of previously encountered target to internal vector
696 void addTargetDirLocation(SourceLocation LocStart) {
697 TargetLocations.push_back(LocStart);
698 }
699
700 /// Add location for the first encountered atomic directive.
701 void addAtomicDirectiveLoc(SourceLocation Loc) {
702 if (AtomicLocation.isInvalid())
703 AtomicLocation = Loc;
704 }
705
706 /// Returns the location of the first encountered atomic directive in the
707 /// module.
708 SourceLocation getAtomicDirectiveLoc() const { return AtomicLocation; }
709
710 // Return previously encountered target region locations.
711 ArrayRef<SourceLocation> getEncounteredTargetLocs() const {
712 return TargetLocations;
713 }
714
715 /// Set default data sharing attribute to none.
716 void setDefaultDSANone(SourceLocation Loc) {
717 getTopOfStack().DefaultAttr = DSA_none;
718 getTopOfStack().DefaultAttrLoc = Loc;
719 }
720 /// Set default data sharing attribute to shared.
721 void setDefaultDSAShared(SourceLocation Loc) {
722 getTopOfStack().DefaultAttr = DSA_shared;
723 getTopOfStack().DefaultAttrLoc = Loc;
724 }
725 /// Set default data sharing attribute to private.
726 void setDefaultDSAPrivate(SourceLocation Loc) {
727 getTopOfStack().DefaultAttr = DSA_private;
728 getTopOfStack().DefaultAttrLoc = Loc;
729 }
730 /// Set default data sharing attribute to firstprivate.
731 void setDefaultDSAFirstPrivate(SourceLocation Loc) {
732 getTopOfStack().DefaultAttr = DSA_firstprivate;
733 getTopOfStack().DefaultAttrLoc = Loc;
734 }
735 /// Set default data mapping attribute to Modifier:Kind
736 void setDefaultDMAAttr(OpenMPDefaultmapClauseModifier M,
738 DefaultmapInfo &DMI = getTopOfStack().DefaultmapMap[Kind];
739 DMI.ImplicitBehavior = M;
740 DMI.SLoc = Loc;
741 }
742 /// Check whether the implicit-behavior has been set in defaultmap
743 bool checkDefaultmapCategory(OpenMPDefaultmapClauseKind VariableCategory) {
744 if (VariableCategory == OMPC_DEFAULTMAP_unknown)
745 return getTopOfStack()
746 .DefaultmapMap[OMPC_DEFAULTMAP_aggregate]
747 .ImplicitBehavior != OMPC_DEFAULTMAP_MODIFIER_unknown ||
748 getTopOfStack()
749 .DefaultmapMap[OMPC_DEFAULTMAP_scalar]
750 .ImplicitBehavior != OMPC_DEFAULTMAP_MODIFIER_unknown ||
751 getTopOfStack()
752 .DefaultmapMap[OMPC_DEFAULTMAP_pointer]
753 .ImplicitBehavior != OMPC_DEFAULTMAP_MODIFIER_unknown;
754 return getTopOfStack().DefaultmapMap[VariableCategory].ImplicitBehavior !=
756 }
757
758 ArrayRef<llvm::omp::TraitProperty> getConstructTraits() {
759 return ConstructTraits;
760 }
761 void handleConstructTrait(ArrayRef<llvm::omp::TraitProperty> Traits,
762 bool ScopeEntry) {
763 if (ScopeEntry)
764 ConstructTraits.append(Traits.begin(), Traits.end());
765 else
766 for (llvm::omp::TraitProperty Trait : llvm::reverse(Traits)) {
767 llvm::omp::TraitProperty Top = ConstructTraits.pop_back_val();
768 assert(Top == Trait && "Something left a trait on the stack!");
769 (void)Trait;
770 (void)Top;
771 }
772 }
773
774 DefaultDataSharingAttributes getDefaultDSA(unsigned Level) const {
775 return getStackSize() <= Level ? DSA_unspecified
776 : getStackElemAtLevel(Level).DefaultAttr;
777 }
778 DefaultDataSharingAttributes getDefaultDSA() const {
779 return isStackEmpty() ? DSA_unspecified : getTopOfStack().DefaultAttr;
780 }
781 SourceLocation getDefaultDSALocation() const {
782 return isStackEmpty() ? SourceLocation() : getTopOfStack().DefaultAttrLoc;
783 }
785 getDefaultmapModifier(OpenMPDefaultmapClauseKind Kind) const {
786 return isStackEmpty()
788 : getTopOfStack().DefaultmapMap[Kind].ImplicitBehavior;
789 }
791 getDefaultmapModifierAtLevel(unsigned Level,
792 OpenMPDefaultmapClauseKind Kind) const {
793 return getStackElemAtLevel(Level).DefaultmapMap[Kind].ImplicitBehavior;
794 }
795 bool isDefaultmapCapturedByRef(unsigned Level,
796 OpenMPDefaultmapClauseKind Kind) const {
798 getDefaultmapModifierAtLevel(Level, Kind);
799 if (Kind == OMPC_DEFAULTMAP_scalar || Kind == OMPC_DEFAULTMAP_pointer) {
800 return (M == OMPC_DEFAULTMAP_MODIFIER_alloc) ||
801 (M == OMPC_DEFAULTMAP_MODIFIER_to) ||
802 (M == OMPC_DEFAULTMAP_MODIFIER_from) ||
803 (M == OMPC_DEFAULTMAP_MODIFIER_tofrom) ||
804 (M == OMPC_DEFAULTMAP_MODIFIER_present);
805 }
806 return true;
807 }
808 static bool mustBeFirstprivateBase(OpenMPDefaultmapClauseModifier M,
810 switch (Kind) {
811 case OMPC_DEFAULTMAP_scalar:
812 case OMPC_DEFAULTMAP_pointer:
813 return (M == OMPC_DEFAULTMAP_MODIFIER_unknown) ||
814 (M == OMPC_DEFAULTMAP_MODIFIER_firstprivate) ||
815 (M == OMPC_DEFAULTMAP_MODIFIER_default);
816 case OMPC_DEFAULTMAP_aggregate:
817 return M == OMPC_DEFAULTMAP_MODIFIER_firstprivate;
818 default:
819 break;
820 }
821 llvm_unreachable("Unexpected OpenMPDefaultmapClauseKind enum");
822 }
823 bool mustBeFirstprivateAtLevel(unsigned Level,
824 OpenMPDefaultmapClauseKind Kind) const {
826 getDefaultmapModifierAtLevel(Level, Kind);
827 return mustBeFirstprivateBase(M, Kind);
828 }
829 bool mustBeFirstprivate(OpenMPDefaultmapClauseKind Kind) const {
830 OpenMPDefaultmapClauseModifier M = getDefaultmapModifier(Kind);
831 return mustBeFirstprivateBase(M, Kind);
832 }
833
834 /// Checks if the specified variable is a threadprivate.
835 bool isThreadPrivate(VarDecl *D) {
836 const DSAVarData DVar = getTopDSA(D, false);
837 return isOpenMPThreadPrivate(DVar.CKind);
838 }
839
840 /// Marks current region as ordered (it has an 'ordered' clause).
841 void setOrderedRegion(bool IsOrdered, const Expr *Param,
842 OMPOrderedClause *Clause) {
843 if (IsOrdered)
844 getTopOfStack().OrderedRegion.emplace(Param, Clause);
845 else
846 getTopOfStack().OrderedRegion.reset();
847 }
848 /// Returns true, if region is ordered (has associated 'ordered' clause),
849 /// false - otherwise.
850 bool isOrderedRegion() const {
851 if (const SharingMapTy *Top = getTopOfStackOrNull())
852 return Top->OrderedRegion.has_value();
853 return false;
854 }
855 /// Returns optional parameter for the ordered region.
856 std::pair<const Expr *, OMPOrderedClause *> getOrderedRegionParam() const {
857 if (const SharingMapTy *Top = getTopOfStackOrNull())
858 if (Top->OrderedRegion)
859 return *Top->OrderedRegion;
860 return std::make_pair(nullptr, nullptr);
861 }
862 /// Returns true, if parent region is ordered (has associated
863 /// 'ordered' clause), false - otherwise.
864 bool isParentOrderedRegion() const {
865 if (const SharingMapTy *Parent = getSecondOnStackOrNull())
866 return Parent->OrderedRegion.has_value();
867 return false;
868 }
869 /// Returns optional parameter for the ordered region.
870 std::pair<const Expr *, OMPOrderedClause *>
871 getParentOrderedRegionParam() const {
872 if (const SharingMapTy *Parent = getSecondOnStackOrNull())
873 if (Parent->OrderedRegion)
874 return *Parent->OrderedRegion;
875 return std::make_pair(nullptr, nullptr);
876 }
877 /// Marks current region as having an 'order' clause.
878 void setRegionHasOrderConcurrent(bool HasOrderConcurrent) {
879 getTopOfStack().RegionHasOrderConcurrent = HasOrderConcurrent;
880 }
881 /// Returns true, if parent region is order (has associated
882 /// 'order' clause), false - otherwise.
883 bool isParentOrderConcurrent() const {
884 if (const SharingMapTy *Parent = getSecondOnStackOrNull())
885 return Parent->RegionHasOrderConcurrent;
886 return false;
887 }
888 /// Marks current region as nowait (it has a 'nowait' clause).
889 void setNowaitRegion(bool IsNowait = true) {
890 getTopOfStack().NowaitRegion = IsNowait;
891 }
892 /// Returns true, if parent region is nowait (has associated
893 /// 'nowait' clause), false - otherwise.
894 bool isParentNowaitRegion() const {
895 if (const SharingMapTy *Parent = getSecondOnStackOrNull())
896 return Parent->NowaitRegion;
897 return false;
898 }
899 /// Marks current region as untied (it has a 'untied' clause).
900 void setUntiedRegion(bool IsUntied = true) {
901 getTopOfStack().UntiedRegion = IsUntied;
902 }
903 /// Return true if current region is untied.
904 bool isUntiedRegion() const {
905 const SharingMapTy *Top = getTopOfStackOrNull();
906 return Top ? Top->UntiedRegion : false;
907 }
908 /// Marks parent region as cancel region.
909 void setParentCancelRegion(bool Cancel = true) {
910 if (SharingMapTy *Parent = getSecondOnStackOrNull())
911 Parent->CancelRegion |= Cancel;
912 }
913 /// Return true if current region has inner cancel construct.
914 bool isCancelRegion() const {
915 const SharingMapTy *Top = getTopOfStackOrNull();
916 return Top ? Top->CancelRegion : false;
917 }
918
919 /// Mark that parent region already has scan directive.
920 void setParentHasScanDirective(SourceLocation Loc) {
921 if (SharingMapTy *Parent = getSecondOnStackOrNull())
922 Parent->PrevScanLocation = Loc;
923 }
924 /// Return true if current region has inner cancel construct.
925 bool doesParentHasScanDirective() const {
926 const SharingMapTy *Top = getSecondOnStackOrNull();
927 return Top ? Top->PrevScanLocation.isValid() : false;
928 }
929 /// Return true if current region has inner cancel construct.
930 SourceLocation getParentScanDirectiveLoc() const {
931 const SharingMapTy *Top = getSecondOnStackOrNull();
932 return Top ? Top->PrevScanLocation : SourceLocation();
933 }
934 /// Mark that parent region already has ordered directive.
935 void setParentHasOrderedDirective(SourceLocation Loc) {
936 if (SharingMapTy *Parent = getSecondOnStackOrNull())
937 Parent->PrevOrderedLocation = Loc;
938 }
939 /// Return true if current region has inner ordered construct.
940 bool doesParentHasOrderedDirective() const {
941 const SharingMapTy *Top = getSecondOnStackOrNull();
942 return Top ? Top->PrevOrderedLocation.isValid() : false;
943 }
944 /// Returns the location of the previously specified ordered directive.
945 SourceLocation getParentOrderedDirectiveLoc() const {
946 const SharingMapTy *Top = getSecondOnStackOrNull();
947 return Top ? Top->PrevOrderedLocation : SourceLocation();
948 }
949
950 /// Set collapse value for the region.
951 void setAssociatedLoops(unsigned Val) {
952 getTopOfStack().AssociatedLoops = Val;
953 if (Val > 1)
954 getTopOfStack().HasMutipleLoops = true;
955 }
956 /// Return collapse value for region.
957 unsigned getAssociatedLoops() const {
958 const SharingMapTy *Top = getTopOfStackOrNull();
959 return Top ? Top->AssociatedLoops : 0;
960 }
961 /// Returns true if the construct is associated with multiple loops.
962 bool hasMutipleLoops() const {
963 const SharingMapTy *Top = getTopOfStackOrNull();
964 return Top ? Top->HasMutipleLoops : false;
965 }
966
967 /// Marks current target region as one with closely nested teams
968 /// region.
969 void setParentTeamsRegionLoc(SourceLocation TeamsRegionLoc) {
970 if (SharingMapTy *Parent = getSecondOnStackOrNull())
971 Parent->InnerTeamsRegionLoc = TeamsRegionLoc;
972 }
973 /// Returns true, if current region has closely nested teams region.
974 bool hasInnerTeamsRegion() const {
975 return getInnerTeamsRegionLoc().isValid();
976 }
977 /// Returns location of the nested teams region (if any).
978 SourceLocation getInnerTeamsRegionLoc() const {
979 const SharingMapTy *Top = getTopOfStackOrNull();
980 return Top ? Top->InnerTeamsRegionLoc : SourceLocation();
981 }
982
983 Scope *getCurScope() const {
984 const SharingMapTy *Top = getTopOfStackOrNull();
985 return Top ? Top->CurScope : nullptr;
986 }
987 void setContext(DeclContext *DC) { getTopOfStack().Context = DC; }
988 SourceLocation getConstructLoc() const {
989 const SharingMapTy *Top = getTopOfStackOrNull();
990 return Top ? Top->ConstructLoc : SourceLocation();
991 }
992
993 /// Do the check specified in \a Check to all component lists and return true
994 /// if any issue is found.
995 bool checkMappableExprComponentListsForDecl(
996 const ValueDecl *VD, bool CurrentRegionOnly,
997 const llvm::function_ref<
1000 Check) const {
1001 if (isStackEmpty())
1002 return false;
1003 auto SI = begin();
1004 auto SE = end();
1005
1006 if (SI == SE)
1007 return false;
1008
1009 if (CurrentRegionOnly)
1010 SE = std::next(SI);
1011 else
1012 std::advance(SI, 1);
1013
1014 for (; SI != SE; ++SI) {
1015 auto MI = SI->MappedExprComponents.find(VD);
1016 if (MI != SI->MappedExprComponents.end())
1018 MI->second.Components)
1019 if (Check(L, MI->second.Kind))
1020 return true;
1021 }
1022 return false;
1023 }
1024
1025 /// Do the check specified in \a Check to all component lists at a given level
1026 /// and return true if any issue is found.
1027 bool checkMappableExprComponentListsForDeclAtLevel(
1028 const ValueDecl *VD, unsigned Level,
1029 const llvm::function_ref<
1032 Check) const {
1033 if (getStackSize() <= Level)
1034 return false;
1035
1036 const SharingMapTy &StackElem = getStackElemAtLevel(Level);
1037 auto MI = StackElem.MappedExprComponents.find(VD);
1038 if (MI != StackElem.MappedExprComponents.end())
1040 MI->second.Components)
1041 if (Check(L, MI->second.Kind))
1042 return true;
1043 return false;
1044 }
1045
1046 /// Create a new mappable expression component list associated with a given
1047 /// declaration and initialize it with the provided list of components.
1048 void addMappableExpressionComponents(
1049 const ValueDecl *VD,
1051 OpenMPClauseKind WhereFoundClauseKind) {
1052 MappedExprComponentTy &MEC = getTopOfStack().MappedExprComponents[VD];
1053 // Create new entry and append the new components there.
1054 MEC.Components.resize(MEC.Components.size() + 1);
1055 MEC.Components.back().append(Components.begin(), Components.end());
1056 MEC.Kind = WhereFoundClauseKind;
1057 }
1058
1059 unsigned getNestingLevel() const {
1060 assert(!isStackEmpty());
1061 return getStackSize() - 1;
1062 }
1063 void addDoacrossDependClause(OMPClause *C, const OperatorOffsetTy &OpsOffs) {
1064 SharingMapTy *Parent = getSecondOnStackOrNull();
1065 assert(Parent && isOpenMPWorksharingDirective(Parent->Directive));
1066 Parent->DoacrossDepends.try_emplace(C, OpsOffs);
1067 }
1068 llvm::iterator_range<DoacrossClauseMapTy::const_iterator>
1069 getDoacrossDependClauses() const {
1070 const SharingMapTy &StackElem = getTopOfStack();
1071 if (isOpenMPWorksharingDirective(StackElem.Directive)) {
1072 const DoacrossClauseMapTy &Ref = StackElem.DoacrossDepends;
1073 return llvm::make_range(Ref.begin(), Ref.end());
1074 }
1075 return llvm::make_range(StackElem.DoacrossDepends.end(),
1076 StackElem.DoacrossDepends.end());
1077 }
1078
1079 // Store types of classes which have been explicitly mapped
1080 void addMappedClassesQualTypes(QualType QT) {
1081 SharingMapTy &StackElem = getTopOfStack();
1082 StackElem.MappedClassesQualTypes.insert(QT);
1083 }
1084
1085 // Return set of mapped classes types
1086 bool isClassPreviouslyMapped(QualType QT) const {
1087 const SharingMapTy &StackElem = getTopOfStack();
1088 return StackElem.MappedClassesQualTypes.contains(QT);
1089 }
1090
1091 /// Adds global declare target to the parent target region.
1092 void addToParentTargetRegionLinkGlobals(DeclRefExpr *E) {
1093 assert(*OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(
1094 E->getDecl()) == OMPDeclareTargetDeclAttr::MT_Link &&
1095 "Expected declare target link global.");
1096 for (auto &Elem : *this) {
1097 if (isOpenMPTargetExecutionDirective(Elem.Directive)) {
1098 Elem.DeclareTargetLinkVarDecls.push_back(E);
1099 return;
1100 }
1101 }
1102 }
1103
1104 /// Returns the list of globals with declare target link if current directive
1105 /// is target.
1106 ArrayRef<DeclRefExpr *> getLinkGlobals() const {
1107 assert(isOpenMPTargetExecutionDirective(getCurrentDirective()) &&
1108 "Expected target executable directive.");
1109 return getTopOfStack().DeclareTargetLinkVarDecls;
1110 }
1111
1112 /// Adds list of allocators expressions.
1113 void addInnerAllocatorExpr(Expr *E) {
1114 getTopOfStack().InnerUsedAllocators.push_back(E);
1115 }
1116 /// Return list of used allocators.
1117 ArrayRef<Expr *> getInnerAllocators() const {
1118 return getTopOfStack().InnerUsedAllocators;
1119 }
1120 /// Marks the declaration as implicitly firstprivate nin the task-based
1121 /// regions.
1122 void addImplicitTaskFirstprivate(unsigned Level, Decl *D) {
1123 getStackElemAtLevel(Level).ImplicitTaskFirstprivates.insert(D);
1124 }
1125 /// Checks if the decl is implicitly firstprivate in the task-based region.
1126 bool isImplicitTaskFirstprivate(Decl *D) const {
1127 return getTopOfStack().ImplicitTaskFirstprivates.contains(D);
1128 }
1129
1130 /// Marks decl as used in uses_allocators clause as the allocator.
1131 void addUsesAllocatorsDecl(const Decl *D, UsesAllocatorsDeclKind Kind) {
1132 getTopOfStack().UsesAllocatorsDecls.try_emplace(D, Kind);
1133 }
1134 /// Checks if specified decl is used in uses allocator clause as the
1135 /// allocator.
1136 std::optional<UsesAllocatorsDeclKind>
1137 isUsesAllocatorsDecl(unsigned Level, const Decl *D) const {
1138 const SharingMapTy &StackElem = getTopOfStack();
1139 auto I = StackElem.UsesAllocatorsDecls.find(D);
1140 if (I == StackElem.UsesAllocatorsDecls.end())
1141 return std::nullopt;
1142 return I->getSecond();
1143 }
1144 std::optional<UsesAllocatorsDeclKind>
1145 isUsesAllocatorsDecl(const Decl *D) const {
1146 const SharingMapTy &StackElem = getTopOfStack();
1147 auto I = StackElem.UsesAllocatorsDecls.find(D);
1148 if (I == StackElem.UsesAllocatorsDecls.end())
1149 return std::nullopt;
1150 return I->getSecond();
1151 }
1152
1153 void addDeclareMapperVarRef(Expr *Ref) {
1154 SharingMapTy &StackElem = getTopOfStack();
1155 StackElem.DeclareMapperVar = Ref;
1156 }
1157 const Expr *getDeclareMapperVarRef() const {
1158 const SharingMapTy *Top = getTopOfStackOrNull();
1159 return Top ? Top->DeclareMapperVar : nullptr;
1160 }
1161
1162 /// Add a new iterator variable.
1163 void addIteratorVarDecl(VarDecl *VD) {
1164 SharingMapTy &StackElem = getTopOfStack();
1165 StackElem.IteratorVarDecls.push_back(VD->getCanonicalDecl());
1166 }
1167 /// Check if variable declaration is an iterator VarDecl.
1168 bool isIteratorVarDecl(const VarDecl *VD) const {
1169 const SharingMapTy *Top = getTopOfStackOrNull();
1170 if (!Top)
1171 return false;
1172
1173 return llvm::is_contained(Top->IteratorVarDecls, VD->getCanonicalDecl());
1174 }
1175 /// get captured field from ImplicitDefaultFirstprivateFDs
1176 VarDecl *getImplicitFDCapExprDecl(const FieldDecl *FD) const {
1177 const_iterator I = begin();
1178 const_iterator EndI = end();
1179 size_t StackLevel = getStackSize();
1180 for (; I != EndI; ++I) {
1181 if (I->DefaultAttr == DSA_firstprivate || I->DefaultAttr == DSA_private)
1182 break;
1183 StackLevel--;
1184 }
1185 assert((StackLevel > 0 && I != EndI) || (StackLevel == 0 && I == EndI));
1186 if (I == EndI)
1187 return nullptr;
1188 for (const auto &IFD : I->ImplicitDefaultFirstprivateFDs)
1189 if (IFD.FD == FD && IFD.StackLevel == StackLevel)
1190 return IFD.VD;
1191 return nullptr;
1192 }
1193 /// Check if capture decl is field captured in ImplicitDefaultFirstprivateFDs
1194 bool isImplicitDefaultFirstprivateFD(VarDecl *VD) const {
1195 const_iterator I = begin();
1196 const_iterator EndI = end();
1197 for (; I != EndI; ++I)
1198 if (I->DefaultAttr == DSA_firstprivate || I->DefaultAttr == DSA_private)
1199 break;
1200 if (I == EndI)
1201 return false;
1202 for (const auto &IFD : I->ImplicitDefaultFirstprivateFDs)
1203 if (IFD.VD == VD)
1204 return true;
1205 return false;
1206 }
1207 /// Store capture FD info in ImplicitDefaultFirstprivateFDs
1208 void addImplicitDefaultFirstprivateFD(const FieldDecl *FD, VarDecl *VD) {
1209 iterator I = begin();
1210 const_iterator EndI = end();
1211 size_t StackLevel = getStackSize();
1212 for (; I != EndI; ++I) {
1213 if (I->DefaultAttr == DSA_private || I->DefaultAttr == DSA_firstprivate) {
1214 I->ImplicitDefaultFirstprivateFDs.emplace_back(FD, StackLevel, VD);
1215 break;
1216 }
1217 StackLevel--;
1218 }
1219 assert((StackLevel > 0 && I != EndI) || (StackLevel == 0 && I == EndI));
1220 }
1221};
1222
1223bool isImplicitTaskingRegion(OpenMPDirectiveKind DKind) {
1224 return isOpenMPParallelDirective(DKind) || isOpenMPTeamsDirective(DKind);
1225}
1226
1227bool isImplicitOrExplicitTaskingRegion(OpenMPDirectiveKind DKind) {
1228 return isImplicitTaskingRegion(DKind) || isOpenMPTaskingDirective(DKind) ||
1229 DKind == OMPD_unknown;
1230}
1231
1232} // namespace
1233
1234static const Expr *getExprAsWritten(const Expr *E) {
1235 if (const auto *FE = dyn_cast<FullExpr>(E))
1236 E = FE->getSubExpr();
1237
1238 if (const auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E))
1239 E = MTE->getSubExpr();
1240
1241 while (const auto *Binder = dyn_cast<CXXBindTemporaryExpr>(E))
1242 E = Binder->getSubExpr();
1243
1244 if (const auto *ICE = dyn_cast<ImplicitCastExpr>(E))
1245 E = ICE->getSubExprAsWritten();
1246 return E->IgnoreParens();
1247}
1248
1250 return const_cast<Expr *>(getExprAsWritten(const_cast<const Expr *>(E)));
1251}
1252
1253static const ValueDecl *getCanonicalDecl(const ValueDecl *D) {
1254 if (const auto *CED = dyn_cast<OMPCapturedExprDecl>(D))
1255 if (const auto *ME = dyn_cast<MemberExpr>(getExprAsWritten(CED->getInit())))
1256 D = ME->getMemberDecl();
1257
1258 D = cast<ValueDecl>(D->getCanonicalDecl());
1259 return D;
1260}
1261
1263 return const_cast<ValueDecl *>(
1264 getCanonicalDecl(const_cast<const ValueDecl *>(D)));
1265}
1266
1267DSAStackTy::DSAVarData DSAStackTy::getDSA(const_iterator &Iter,
1268 ValueDecl *D) const {
1269 D = getCanonicalDecl(D);
1270 auto *VD = dyn_cast<VarDecl>(D);
1271 const auto *FD = dyn_cast<FieldDecl>(D);
1272 DSAVarData DVar;
1273 if (Iter == end()) {
1274 // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1275 // in a region but not in construct]
1276 // File-scope or namespace-scope variables referenced in called routines
1277 // in the region are shared unless they appear in a threadprivate
1278 // directive.
1279 if (VD && !VD->isFunctionOrMethodVarDecl() && !isa<ParmVarDecl>(VD))
1280 DVar.CKind = OMPC_shared;
1281
1282 // OpenMP [2.9.1.2, Data-sharing Attribute Rules for Variables Referenced
1283 // in a region but not in construct]
1284 // Variables with static storage duration that are declared in called
1285 // routines in the region are shared.
1286 if (VD && VD->hasGlobalStorage())
1287 DVar.CKind = OMPC_shared;
1288
1289 // Non-static data members are shared by default.
1290 if (FD)
1291 DVar.CKind = OMPC_shared;
1292
1293 return DVar;
1294 }
1295
1296 // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1297 // in a Construct, C/C++, predetermined, p.1]
1298 // Variables with automatic storage duration that are declared in a scope
1299 // inside the construct are private.
1300 if (VD && isOpenMPLocal(VD, Iter) && VD->isLocalVarDecl() &&
1301 (VD->getStorageClass() == SC_Auto || VD->getStorageClass() == SC_None)) {
1302 DVar.CKind = OMPC_private;
1303 return DVar;
1304 }
1305
1306 DVar.DKind = Iter->Directive;
1307 // Explicitly specified attributes and local variables with predetermined
1308 // attributes.
1309 if (Iter->SharingMap.count(D)) {
1310 const DSAInfo &Data = Iter->SharingMap.lookup(D);
1311 DVar.RefExpr = Data.RefExpr.getPointer();
1312 DVar.PrivateCopy = Data.PrivateCopy;
1313 DVar.CKind = Data.Attributes;
1314 DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
1315 DVar.Modifier = Data.Modifier;
1316 DVar.AppliedToPointee = Data.AppliedToPointee;
1317 return DVar;
1318 }
1319
1320 // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1321 // in a Construct, C/C++, implicitly determined, p.1]
1322 // In a parallel or task construct, the data-sharing attributes of these
1323 // variables are determined by the default clause, if present.
1324 switch (Iter->DefaultAttr) {
1325 case DSA_shared:
1326 DVar.CKind = OMPC_shared;
1327 DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
1328 return DVar;
1329 case DSA_none:
1330 return DVar;
1331 case DSA_firstprivate:
1332 if (VD && VD->getStorageDuration() == SD_Static &&
1333 VD->getDeclContext()->isFileContext()) {
1334 DVar.CKind = OMPC_unknown;
1335 } else {
1336 DVar.CKind = OMPC_firstprivate;
1337 }
1338 DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
1339 return DVar;
1340 case DSA_private:
1341 // each variable with static storage duration that is declared
1342 // in a namespace or global scope and referenced in the construct,
1343 // and that does not have a predetermined data-sharing attribute
1344 if (VD && VD->getStorageDuration() == SD_Static &&
1345 VD->getDeclContext()->isFileContext()) {
1346 DVar.CKind = OMPC_unknown;
1347 } else {
1348 DVar.CKind = OMPC_private;
1349 }
1350 DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
1351 return DVar;
1352 case DSA_unspecified:
1353 // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1354 // in a Construct, implicitly determined, p.2]
1355 // In a parallel construct, if no default clause is present, these
1356 // variables are shared.
1357 DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
1358 if ((isOpenMPParallelDirective(DVar.DKind) &&
1359 !isOpenMPTaskLoopDirective(DVar.DKind)) ||
1360 isOpenMPTeamsDirective(DVar.DKind)) {
1361 DVar.CKind = OMPC_shared;
1362 return DVar;
1363 }
1364
1365 // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1366 // in a Construct, implicitly determined, p.4]
1367 // In a task construct, if no default clause is present, a variable that in
1368 // the enclosing context is determined to be shared by all implicit tasks
1369 // bound to the current team is shared.
1370 if (isOpenMPTaskingDirective(DVar.DKind)) {
1371 DSAVarData DVarTemp;
1372 const_iterator I = Iter, E = end();
1373 do {
1374 ++I;
1375 // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables
1376 // Referenced in a Construct, implicitly determined, p.6]
1377 // In a task construct, if no default clause is present, a variable
1378 // whose data-sharing attribute is not determined by the rules above is
1379 // firstprivate.
1380 DVarTemp = getDSA(I, D);
1381 if (DVarTemp.CKind != OMPC_shared) {
1382 DVar.RefExpr = nullptr;
1383 DVar.CKind = OMPC_firstprivate;
1384 return DVar;
1385 }
1386 } while (I != E && !isImplicitTaskingRegion(I->Directive));
1387 DVar.CKind =
1388 (DVarTemp.CKind == OMPC_unknown) ? OMPC_firstprivate : OMPC_shared;
1389 return DVar;
1390 }
1391 }
1392 // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1393 // in a Construct, implicitly determined, p.3]
1394 // For constructs other than task, if no default clause is present, these
1395 // variables inherit their data-sharing attributes from the enclosing
1396 // context.
1397 return getDSA(++Iter, D);
1398}
1399
1400const Expr *DSAStackTy::addUniqueAligned(const ValueDecl *D,
1401 const Expr *NewDE) {
1402 assert(!isStackEmpty() && "Data sharing attributes stack is empty");
1403 D = getCanonicalDecl(D);
1404 SharingMapTy &StackElem = getTopOfStack();
1405 auto [It, Inserted] = StackElem.AlignedMap.try_emplace(D, NewDE);
1406 if (Inserted) {
1407 assert(NewDE && "Unexpected nullptr expr to be added into aligned map");
1408 return nullptr;
1409 }
1410 assert(It->second && "Unexpected nullptr expr in the aligned map");
1411 return It->second;
1412}
1413
1414const Expr *DSAStackTy::addUniqueNontemporal(const ValueDecl *D,
1415 const Expr *NewDE) {
1416 assert(!isStackEmpty() && "Data sharing attributes stack is empty");
1417 D = getCanonicalDecl(D);
1418 SharingMapTy &StackElem = getTopOfStack();
1419 auto [It, Inserted] = StackElem.NontemporalMap.try_emplace(D, NewDE);
1420 if (Inserted) {
1421 assert(NewDE && "Unexpected nullptr expr to be added into aligned map");
1422 return nullptr;
1423 }
1424 assert(It->second && "Unexpected nullptr expr in the aligned map");
1425 return It->second;
1426}
1427
1428void DSAStackTy::addLoopControlVariable(const ValueDecl *D, VarDecl *Capture) {
1429 assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
1430 D = getCanonicalDecl(D);
1431 SharingMapTy &StackElem = getTopOfStack();
1432 StackElem.LCVMap.try_emplace(
1433 D, LCDeclInfo(StackElem.LCVMap.size() + 1, Capture));
1434}
1435
1436const DSAStackTy::LCDeclInfo
1437DSAStackTy::isLoopControlVariable(const ValueDecl *D) const {
1438 assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
1439 D = getCanonicalDecl(D);
1440 const SharingMapTy &StackElem = getTopOfStack();
1441 auto It = StackElem.LCVMap.find(D);
1442 if (It != StackElem.LCVMap.end())
1443 return It->second;
1444 return {0, nullptr};
1445}
1446
1447const DSAStackTy::LCDeclInfo
1448DSAStackTy::isLoopControlVariable(const ValueDecl *D, unsigned Level) const {
1449 assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
1450 D = getCanonicalDecl(D);
1451 for (unsigned I = Level + 1; I > 0; --I) {
1452 const SharingMapTy &StackElem = getStackElemAtLevel(I - 1);
1453 auto It = StackElem.LCVMap.find(D);
1454 if (It != StackElem.LCVMap.end())
1455 return It->second;
1456 }
1457 return {0, nullptr};
1458}
1459
1460const DSAStackTy::LCDeclInfo
1461DSAStackTy::isParentLoopControlVariable(const ValueDecl *D) const {
1462 const SharingMapTy *Parent = getSecondOnStackOrNull();
1463 assert(Parent && "Data-sharing attributes stack is empty");
1464 D = getCanonicalDecl(D);
1465 auto It = Parent->LCVMap.find(D);
1466 if (It != Parent->LCVMap.end())
1467 return It->second;
1468 return {0, nullptr};
1469}
1470
1471const ValueDecl *DSAStackTy::getParentLoopControlVariable(unsigned I) const {
1472 const SharingMapTy *Parent = getSecondOnStackOrNull();
1473 assert(Parent && "Data-sharing attributes stack is empty");
1474 if (Parent->LCVMap.size() < I)
1475 return nullptr;
1476 for (const auto &Pair : Parent->LCVMap)
1477 if (Pair.second.first == I)
1478 return Pair.first;
1479 return nullptr;
1480}
1481
1482void DSAStackTy::addDSA(const ValueDecl *D, const Expr *E, OpenMPClauseKind A,
1483 DeclRefExpr *PrivateCopy, unsigned Modifier,
1484 bool AppliedToPointee) {
1485 D = getCanonicalDecl(D);
1486 if (A == OMPC_threadprivate) {
1487 DSAInfo &Data = Threadprivates[D];
1488 Data.Attributes = A;
1489 Data.RefExpr.setPointer(E);
1490 Data.PrivateCopy = nullptr;
1491 Data.Modifier = Modifier;
1492 } else {
1493 DSAInfo &Data = getTopOfStack().SharingMap[D];
1494 assert(Data.Attributes == OMPC_unknown || (A == Data.Attributes) ||
1495 (A == OMPC_firstprivate && Data.Attributes == OMPC_lastprivate) ||
1496 (A == OMPC_lastprivate && Data.Attributes == OMPC_firstprivate) ||
1497 (isLoopControlVariable(D).first && A == OMPC_private));
1498 Data.Modifier = Modifier;
1499 if (A == OMPC_lastprivate && Data.Attributes == OMPC_firstprivate) {
1500 Data.RefExpr.setInt(/*IntVal=*/true);
1501 return;
1502 }
1503 const bool IsLastprivate =
1504 A == OMPC_lastprivate || Data.Attributes == OMPC_lastprivate;
1505 Data.Attributes = A;
1506 Data.RefExpr.setPointerAndInt(E, IsLastprivate);
1507 Data.PrivateCopy = PrivateCopy;
1508 Data.AppliedToPointee = AppliedToPointee;
1509 if (PrivateCopy) {
1510 DSAInfo &Data = getTopOfStack().SharingMap[PrivateCopy->getDecl()];
1511 Data.Modifier = Modifier;
1512 Data.Attributes = A;
1513 Data.RefExpr.setPointerAndInt(PrivateCopy, IsLastprivate);
1514 Data.PrivateCopy = nullptr;
1515 Data.AppliedToPointee = AppliedToPointee;
1516 }
1517 }
1518}
1519
1520/// Build a variable declaration for OpenMP loop iteration variable.
1522 StringRef Name, const AttrVec *Attrs = nullptr,
1523 DeclRefExpr *OrigRef = nullptr) {
1524 DeclContext *DC = SemaRef.CurContext;
1525 IdentifierInfo *II = &SemaRef.PP.getIdentifierTable().get(Name);
1527 auto *Decl =
1528 VarDecl::Create(SemaRef.Context, DC, Loc, Loc, II, Type, TInfo, SC_None);
1529 if (Attrs) {
1530 for (specific_attr_iterator<AlignedAttr> I(Attrs->begin()), E(Attrs->end());
1531 I != E; ++I)
1532 Decl->addAttr(*I);
1533 }
1534 Decl->setImplicit();
1535 if (OrigRef) {
1536 Decl->addAttr(
1537 OMPReferencedVarAttr::CreateImplicit(SemaRef.Context, OrigRef));
1538 }
1539 return Decl;
1540}
1541
1544 bool RefersToCapture = false) {
1545 D->setReferenced();
1546 D->markUsed(S.Context);
1548 SourceLocation(), D, RefersToCapture, Loc, Ty,
1549 VK_LValue);
1550}
1551
1552void DSAStackTy::addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
1553 BinaryOperatorKind BOK) {
1554 D = getCanonicalDecl(D);
1555 assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
1556 assert(
1557 getTopOfStack().SharingMap[D].Attributes == OMPC_reduction &&
1558 "Additional reduction info may be specified only for reduction items.");
1559 ReductionData &ReductionData = getTopOfStack().ReductionMap[D];
1560 assert(ReductionData.ReductionRange.isInvalid() &&
1561 (getTopOfStack().Directive == OMPD_taskgroup ||
1562 ((isOpenMPParallelDirective(getTopOfStack().Directive) ||
1563 isOpenMPWorksharingDirective(getTopOfStack().Directive)) &&
1564 !isOpenMPSimdDirective(getTopOfStack().Directive))) &&
1565 "Additional reduction info may be specified only once for reduction "
1566 "items.");
1567 ReductionData.set(BOK, SR);
1568 Expr *&TaskgroupReductionRef = getTopOfStack().TaskgroupReductionRef;
1569 if (!TaskgroupReductionRef) {
1570 VarDecl *VD = buildVarDecl(SemaRef, SR.getBegin(),
1571 SemaRef.Context.VoidPtrTy, ".task_red.");
1572 TaskgroupReductionRef =
1573 buildDeclRefExpr(SemaRef, VD, SemaRef.Context.VoidPtrTy, SR.getBegin());
1574 }
1575}
1576
1577void DSAStackTy::addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
1578 const Expr *ReductionRef) {
1579 D = getCanonicalDecl(D);
1580 assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
1581 assert(
1582 getTopOfStack().SharingMap[D].Attributes == OMPC_reduction &&
1583 "Additional reduction info may be specified only for reduction items.");
1584 ReductionData &ReductionData = getTopOfStack().ReductionMap[D];
1585 assert(ReductionData.ReductionRange.isInvalid() &&
1586 (getTopOfStack().Directive == OMPD_taskgroup ||
1587 ((isOpenMPParallelDirective(getTopOfStack().Directive) ||
1588 isOpenMPWorksharingDirective(getTopOfStack().Directive)) &&
1589 !isOpenMPSimdDirective(getTopOfStack().Directive))) &&
1590 "Additional reduction info may be specified only once for reduction "
1591 "items.");
1592 ReductionData.set(ReductionRef, SR);
1593 Expr *&TaskgroupReductionRef = getTopOfStack().TaskgroupReductionRef;
1594 if (!TaskgroupReductionRef) {
1595 VarDecl *VD = buildVarDecl(SemaRef, SR.getBegin(),
1596 SemaRef.Context.VoidPtrTy, ".task_red.");
1597 TaskgroupReductionRef =
1598 buildDeclRefExpr(SemaRef, VD, SemaRef.Context.VoidPtrTy, SR.getBegin());
1599 }
1600}
1601
1602const DSAStackTy::DSAVarData DSAStackTy::getTopMostTaskgroupReductionData(
1603 const ValueDecl *D, SourceRange &SR, BinaryOperatorKind &BOK,
1604 Expr *&TaskgroupDescriptor) const {
1605 D = getCanonicalDecl(D);
1606 assert(!isStackEmpty() && "Data-sharing attributes stack is empty.");
1607 for (const_iterator I = begin() + 1, E = end(); I != E; ++I) {
1608 const DSAInfo &Data = I->SharingMap.lookup(D);
1609 if (Data.Attributes != OMPC_reduction ||
1610 Data.Modifier != OMPC_REDUCTION_task)
1611 continue;
1612 const ReductionData &ReductionData = I->ReductionMap.lookup(D);
1613 if (!ReductionData.ReductionOp ||
1614 isa<const Expr *>(ReductionData.ReductionOp))
1615 return DSAVarData();
1616 SR = ReductionData.ReductionRange;
1617 BOK = cast<ReductionData::BOKPtrType>(ReductionData.ReductionOp);
1618 assert(I->TaskgroupReductionRef && "taskgroup reduction reference "
1619 "expression for the descriptor is not "
1620 "set.");
1621 TaskgroupDescriptor = I->TaskgroupReductionRef;
1622 return DSAVarData(I->Directive, OMPC_reduction, Data.RefExpr.getPointer(),
1623 Data.PrivateCopy, I->DefaultAttrLoc, OMPC_REDUCTION_task,
1624 /*AppliedToPointee=*/false);
1625 }
1626 return DSAVarData();
1627}
1628
1629const DSAStackTy::DSAVarData DSAStackTy::getTopMostTaskgroupReductionData(
1630 const ValueDecl *D, SourceRange &SR, const Expr *&ReductionRef,
1631 Expr *&TaskgroupDescriptor) const {
1632 D = getCanonicalDecl(D);
1633 assert(!isStackEmpty() && "Data-sharing attributes stack is empty.");
1634 for (const_iterator I = begin() + 1, E = end(); I != E; ++I) {
1635 const DSAInfo &Data = I->SharingMap.lookup(D);
1636 if (Data.Attributes != OMPC_reduction ||
1637 Data.Modifier != OMPC_REDUCTION_task)
1638 continue;
1639 const ReductionData &ReductionData = I->ReductionMap.lookup(D);
1640 if (!ReductionData.ReductionOp ||
1641 !isa<const Expr *>(ReductionData.ReductionOp))
1642 return DSAVarData();
1643 SR = ReductionData.ReductionRange;
1644 ReductionRef = cast<const Expr *>(ReductionData.ReductionOp);
1645 assert(I->TaskgroupReductionRef && "taskgroup reduction reference "
1646 "expression for the descriptor is not "
1647 "set.");
1648 TaskgroupDescriptor = I->TaskgroupReductionRef;
1649 return DSAVarData(I->Directive, OMPC_reduction, Data.RefExpr.getPointer(),
1650 Data.PrivateCopy, I->DefaultAttrLoc, OMPC_REDUCTION_task,
1651 /*AppliedToPointee=*/false);
1652 }
1653 return DSAVarData();
1654}
1655
1656bool DSAStackTy::isOpenMPLocal(VarDecl *D, const_iterator I) const {
1657 D = D->getCanonicalDecl();
1658 for (const_iterator E = end(); I != E; ++I) {
1659 if (isImplicitOrExplicitTaskingRegion(I->Directive) ||
1660 isOpenMPTargetExecutionDirective(I->Directive)) {
1661 if (I->CurScope) {
1662 Scope *TopScope = I->CurScope->getParent();
1663 Scope *CurScope = getCurScope();
1664 while (CurScope && CurScope != TopScope && !CurScope->isDeclScope(D))
1665 CurScope = CurScope->getParent();
1666 return CurScope != TopScope;
1667 }
1668 for (DeclContext *DC = D->getDeclContext(); DC; DC = DC->getParent())
1669 if (I->Context == DC)
1670 return true;
1671 return false;
1672 }
1673 }
1674 return false;
1675}
1676
1678 bool AcceptIfMutable = true,
1679 bool *IsClassType = nullptr) {
1680 ASTContext &Context = SemaRef.getASTContext();
1681 Type = Type.getNonReferenceType().getCanonicalType();
1682 bool IsConstant = Type.isConstant(Context);
1683 Type = Context.getBaseElementType(Type);
1684 const CXXRecordDecl *RD = AcceptIfMutable && SemaRef.getLangOpts().CPlusPlus
1686 : nullptr;
1687 if (const auto *CTSD = dyn_cast_or_null<ClassTemplateSpecializationDecl>(RD))
1688 if (const ClassTemplateDecl *CTD = CTSD->getSpecializedTemplate())
1689 RD = CTD->getTemplatedDecl();
1690 if (IsClassType)
1691 *IsClassType = RD;
1692 return IsConstant && !(SemaRef.getLangOpts().CPlusPlus && RD &&
1693 RD->hasDefinition() && RD->hasMutableFields());
1694}
1695
1696static bool rejectConstNotMutableType(Sema &SemaRef, const ValueDecl *D,
1698 SourceLocation ELoc,
1699 bool AcceptIfMutable = true,
1700 bool ListItemNotVar = false) {
1701 ASTContext &Context = SemaRef.getASTContext();
1702 bool IsClassType;
1703 if (isConstNotMutableType(SemaRef, Type, AcceptIfMutable, &IsClassType)) {
1704 unsigned Diag = ListItemNotVar ? diag::err_omp_const_list_item
1705 : IsClassType ? diag::err_omp_const_not_mutable_variable
1706 : diag::err_omp_const_variable;
1707 SemaRef.Diag(ELoc, Diag) << getOpenMPClauseName(CKind);
1708 if (!ListItemNotVar && D) {
1709 const VarDecl *VD = dyn_cast<VarDecl>(D);
1710 bool IsDecl = !VD || VD->isThisDeclarationADefinition(Context) ==
1712 SemaRef.Diag(D->getLocation(),
1713 IsDecl ? diag::note_previous_decl : diag::note_defined_here)
1714 << D;
1715 }
1716 return true;
1717 }
1718 return false;
1719}
1720
1721const DSAStackTy::DSAVarData DSAStackTy::getTopDSA(ValueDecl *D,
1722 bool FromParent) {
1723 D = getCanonicalDecl(D);
1724 DSAVarData DVar;
1725
1726 auto *VD = dyn_cast<VarDecl>(D);
1727 auto TI = Threadprivates.find(D);
1728 if (TI != Threadprivates.end()) {
1729 DVar.RefExpr = TI->getSecond().RefExpr.getPointer();
1730 DVar.CKind = OMPC_threadprivate;
1731 DVar.Modifier = TI->getSecond().Modifier;
1732 return DVar;
1733 }
1734 if (VD && VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
1735 DVar.RefExpr = buildDeclRefExpr(
1736 SemaRef, VD, D->getType().getNonReferenceType(),
1737 VD->getAttr<OMPThreadPrivateDeclAttr>()->getLocation());
1738 DVar.CKind = OMPC_threadprivate;
1739 addDSA(D, DVar.RefExpr, OMPC_threadprivate);
1740 return DVar;
1741 }
1742 // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1743 // in a Construct, C/C++, predetermined, p.1]
1744 // Variables appearing in threadprivate directives are threadprivate.
1745 if ((VD && VD->getTLSKind() != VarDecl::TLS_None &&
1746 !(VD->hasAttr<OMPThreadPrivateDeclAttr>() &&
1747 SemaRef.getLangOpts().OpenMPUseTLS &&
1748 SemaRef.getASTContext().getTargetInfo().isTLSSupported())) ||
1749 (VD && VD->getStorageClass() == SC_Register &&
1750 VD->hasAttr<AsmLabelAttr>() && !VD->isLocalVarDecl())) {
1751 DVar.RefExpr = buildDeclRefExpr(
1752 SemaRef, VD, D->getType().getNonReferenceType(), D->getLocation());
1753 DVar.CKind = OMPC_threadprivate;
1754 addDSA(D, DVar.RefExpr, OMPC_threadprivate);
1755 return DVar;
1756 }
1757 if (SemaRef.getLangOpts().OpenMPCUDAMode && VD &&
1758 VD->isLocalVarDeclOrParm() && !isStackEmpty() &&
1759 !isLoopControlVariable(D).first) {
1760 const_iterator IterTarget =
1761 std::find_if(begin(), end(), [](const SharingMapTy &Data) {
1762 return isOpenMPTargetExecutionDirective(Data.Directive);
1763 });
1764 if (IterTarget != end()) {
1765 const_iterator ParentIterTarget = IterTarget + 1;
1766 for (const_iterator Iter = begin(); Iter != ParentIterTarget; ++Iter) {
1767 if (isOpenMPLocal(VD, Iter)) {
1768 DVar.RefExpr =
1769 buildDeclRefExpr(SemaRef, VD, D->getType().getNonReferenceType(),
1770 D->getLocation());
1771 DVar.CKind = OMPC_threadprivate;
1772 return DVar;
1773 }
1774 }
1775 if (!isClauseParsingMode() || IterTarget != begin()) {
1776 auto DSAIter = IterTarget->SharingMap.find(D);
1777 if (DSAIter != IterTarget->SharingMap.end() &&
1778 isOpenMPPrivate(DSAIter->getSecond().Attributes)) {
1779 DVar.RefExpr = DSAIter->getSecond().RefExpr.getPointer();
1780 DVar.CKind = OMPC_threadprivate;
1781 return DVar;
1782 }
1783 const_iterator End = end();
1784 if (!SemaRef.OpenMP().isOpenMPCapturedByRef(
1785 D, std::distance(ParentIterTarget, End),
1786 /*OpenMPCaptureLevel=*/0)) {
1787 DVar.RefExpr =
1788 buildDeclRefExpr(SemaRef, VD, D->getType().getNonReferenceType(),
1789 IterTarget->ConstructLoc);
1790 DVar.CKind = OMPC_threadprivate;
1791 return DVar;
1792 }
1793 }
1794 }
1795 }
1796
1797 if (isStackEmpty())
1798 // Not in OpenMP execution region and top scope was already checked.
1799 return DVar;
1800
1801 // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1802 // in a Construct, C/C++, predetermined, p.4]
1803 // Static data members are shared.
1804 // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1805 // in a Construct, C/C++, predetermined, p.7]
1806 // Variables with static storage duration that are declared in a scope
1807 // inside the construct are shared.
1808 if (VD && VD->isStaticDataMember()) {
1809 // Check for explicitly specified attributes.
1810 const_iterator I = begin();
1811 const_iterator EndI = end();
1812 if (FromParent && I != EndI)
1813 ++I;
1814 if (I != EndI) {
1815 auto It = I->SharingMap.find(D);
1816 if (It != I->SharingMap.end()) {
1817 const DSAInfo &Data = It->getSecond();
1818 DVar.RefExpr = Data.RefExpr.getPointer();
1819 DVar.PrivateCopy = Data.PrivateCopy;
1820 DVar.CKind = Data.Attributes;
1821 DVar.ImplicitDSALoc = I->DefaultAttrLoc;
1822 DVar.DKind = I->Directive;
1823 DVar.Modifier = Data.Modifier;
1824 DVar.AppliedToPointee = Data.AppliedToPointee;
1825 return DVar;
1826 }
1827 }
1828
1829 DVar.CKind = OMPC_shared;
1830 return DVar;
1831 }
1832
1833 auto &&MatchesAlways = [](OpenMPDirectiveKind) { return true; };
1834 // The predetermined shared attribute for const-qualified types having no
1835 // mutable members was removed after OpenMP 3.1.
1836 if (SemaRef.LangOpts.OpenMP <= 31) {
1837 // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1838 // in a Construct, C/C++, predetermined, p.6]
1839 // Variables with const qualified type having no mutable member are
1840 // shared.
1841 if (isConstNotMutableType(SemaRef, D->getType())) {
1842 // Variables with const-qualified type having no mutable member may be
1843 // listed in a firstprivate clause, even if they are static data members.
1844 DSAVarData DVarTemp = hasInnermostDSA(
1845 D,
1846 [](OpenMPClauseKind C, bool) {
1847 return C == OMPC_firstprivate || C == OMPC_shared;
1848 },
1849 MatchesAlways, FromParent);
1850 if (DVarTemp.CKind != OMPC_unknown && DVarTemp.RefExpr)
1851 return DVarTemp;
1852
1853 DVar.CKind = OMPC_shared;
1854 return DVar;
1855 }
1856 }
1857
1858 // Explicitly specified attributes and local variables with predetermined
1859 // attributes.
1860 const_iterator I = begin();
1861 const_iterator EndI = end();
1862 if (FromParent && I != EndI)
1863 ++I;
1864 if (I == EndI)
1865 return DVar;
1866 auto It = I->SharingMap.find(D);
1867 if (It != I->SharingMap.end()) {
1868 const DSAInfo &Data = It->getSecond();
1869 DVar.RefExpr = Data.RefExpr.getPointer();
1870 DVar.PrivateCopy = Data.PrivateCopy;
1871 DVar.CKind = Data.Attributes;
1872 DVar.ImplicitDSALoc = I->DefaultAttrLoc;
1873 DVar.DKind = I->Directive;
1874 DVar.Modifier = Data.Modifier;
1875 DVar.AppliedToPointee = Data.AppliedToPointee;
1876 }
1877
1878 return DVar;
1879}
1880
1881const DSAStackTy::DSAVarData DSAStackTy::getImplicitDSA(ValueDecl *D,
1882 bool FromParent) const {
1883 if (isStackEmpty()) {
1884 const_iterator I;
1885 return getDSA(I, D);
1886 }
1887 D = getCanonicalDecl(D);
1888 const_iterator StartI = begin();
1889 const_iterator EndI = end();
1890 if (FromParent && StartI != EndI)
1891 ++StartI;
1892 return getDSA(StartI, D);
1893}
1894
1895const DSAStackTy::DSAVarData DSAStackTy::getImplicitDSA(ValueDecl *D,
1896 unsigned Level) const {
1897 if (getStackSize() <= Level)
1898 return DSAVarData();
1899 D = getCanonicalDecl(D);
1900 const_iterator StartI = std::next(begin(), getStackSize() - 1 - Level);
1901 return getDSA(StartI, D);
1902}
1903
1904const DSAStackTy::DSAVarData
1905DSAStackTy::hasDSA(ValueDecl *D,
1906 const llvm::function_ref<bool(OpenMPClauseKind, bool,
1907 DefaultDataSharingAttributes)>
1908 CPred,
1909 const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
1910 bool FromParent) const {
1911 if (isStackEmpty())
1912 return {};
1913 D = getCanonicalDecl(D);
1914 const_iterator I = begin();
1915 const_iterator EndI = end();
1916 if (FromParent && I != EndI)
1917 ++I;
1918 for (; I != EndI; ++I) {
1919 if (!DPred(I->Directive) &&
1920 !isImplicitOrExplicitTaskingRegion(I->Directive))
1921 continue;
1922 const_iterator NewI = I;
1923 DSAVarData DVar = getDSA(NewI, D);
1924 if (I == NewI && CPred(DVar.CKind, DVar.AppliedToPointee, I->DefaultAttr))
1925 return DVar;
1926 }
1927 return {};
1928}
1929
1930const DSAStackTy::DSAVarData DSAStackTy::hasInnermostDSA(
1931 ValueDecl *D, const llvm::function_ref<bool(OpenMPClauseKind, bool)> CPred,
1932 const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
1933 bool FromParent) const {
1934 if (isStackEmpty())
1935 return {};
1936 D = getCanonicalDecl(D);
1937 const_iterator StartI = begin();
1938 const_iterator EndI = end();
1939 if (FromParent && StartI != EndI)
1940 ++StartI;
1941 if (StartI == EndI || !DPred(StartI->Directive))
1942 return {};
1943 const_iterator NewI = StartI;
1944 DSAVarData DVar = getDSA(NewI, D);
1945 return (NewI == StartI && CPred(DVar.CKind, DVar.AppliedToPointee))
1946 ? DVar
1947 : DSAVarData();
1948}
1949
1950bool DSAStackTy::hasExplicitDSA(
1951 const ValueDecl *D,
1952 const llvm::function_ref<bool(OpenMPClauseKind, bool)> CPred,
1953 unsigned Level, bool NotLastprivate) const {
1954 if (getStackSize() <= Level)
1955 return false;
1956 D = getCanonicalDecl(D);
1957 const SharingMapTy &StackElem = getStackElemAtLevel(Level);
1958 auto I = StackElem.SharingMap.find(D);
1959 if (I != StackElem.SharingMap.end() && I->getSecond().RefExpr.getPointer() &&
1960 CPred(I->getSecond().Attributes, I->getSecond().AppliedToPointee) &&
1961 (!NotLastprivate || !I->getSecond().RefExpr.getInt()))
1962 return true;
1963 // Check predetermined rules for the loop control variables.
1964 auto LI = StackElem.LCVMap.find(D);
1965 if (LI != StackElem.LCVMap.end())
1966 return CPred(OMPC_private, /*AppliedToPointee=*/false);
1967 return false;
1968}
1969
1970bool DSAStackTy::hasExplicitDirective(
1971 const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
1972 unsigned Level) const {
1973 if (getStackSize() <= Level)
1974 return false;
1975 const SharingMapTy &StackElem = getStackElemAtLevel(Level);
1976 return DPred(StackElem.Directive);
1977}
1978
1979bool DSAStackTy::hasDirective(
1980 const llvm::function_ref<bool(OpenMPDirectiveKind,
1982 DPred,
1983 bool FromParent) const {
1984 // We look only in the enclosing region.
1985 size_t Skip = FromParent ? 2 : 1;
1986 for (const_iterator I = begin() + std::min(Skip, getStackSize()), E = end();
1987 I != E; ++I) {
1988 if (DPred(I->Directive, I->DirectiveName, I->ConstructLoc))
1989 return true;
1990 }
1991 return false;
1992}
1993
1994void SemaOpenMP::InitDataSharingAttributesStack() {
1995 VarDataSharingAttributesStack = new DSAStackTy(SemaRef);
1996}
1997
1998#define DSAStack static_cast<DSAStackTy *>(VarDataSharingAttributesStack)
1999
2000void SemaOpenMP::pushOpenMPFunctionRegion() { DSAStack->pushFunction(); }
2001
2002void SemaOpenMP::popOpenMPFunctionRegion(const FunctionScopeInfo *OldFSI) {
2003 DSAStack->popFunction(OldFSI);
2004}
2005
2007 assert(S.LangOpts.OpenMP && S.LangOpts.OpenMPIsTargetDevice &&
2008 "Expected OpenMP device compilation.");
2010}
2011
2012namespace {
2013/// Status of the function emission on the host/device.
2014enum class FunctionEmissionStatus {
2015 Emitted,
2016 Discarded,
2017 Unknown,
2018};
2019} // anonymous namespace
2020
2023 const FunctionDecl *FD) {
2024 assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice &&
2025 "Expected OpenMP device compilation.");
2026
2027 SemaDiagnosticBuilder::Kind Kind = SemaDiagnosticBuilder::K_Nop;
2028 if (FD) {
2030 switch (FES) {
2032 Kind = SemaDiagnosticBuilder::K_Immediate;
2033 break;
2035 // TODO: We should always delay diagnostics here in case a target
2036 // region is in a function we do not emit. However, as the
2037 // current diagnostics are associated with the function containing
2038 // the target region and we do not emit that one, we would miss out
2039 // on diagnostics for the target region itself. We need to anchor
2040 // the diagnostics with the new generated function *or* ensure we
2041 // emit diagnostics associated with the surrounding function.
2043 ? SemaDiagnosticBuilder::K_Deferred
2044 : SemaDiagnosticBuilder::K_Immediate;
2045 break;
2048 Kind = SemaDiagnosticBuilder::K_Nop;
2049 break;
2051 llvm_unreachable("CUDADiscarded unexpected in OpenMP device compilation");
2052 break;
2053 }
2054 }
2055
2056 return SemaDiagnosticBuilder(Kind, Loc, DiagID, FD, SemaRef);
2057}
2058
2061 const FunctionDecl *FD) {
2062 assert(getLangOpts().OpenMP && !getLangOpts().OpenMPIsTargetDevice &&
2063 "Expected OpenMP host compilation.");
2064
2065 SemaDiagnosticBuilder::Kind Kind = SemaDiagnosticBuilder::K_Nop;
2066 if (FD) {
2068 switch (FES) {
2070 Kind = SemaDiagnosticBuilder::K_Immediate;
2071 break;
2073 Kind = SemaDiagnosticBuilder::K_Deferred;
2074 break;
2078 Kind = SemaDiagnosticBuilder::K_Nop;
2079 break;
2080 }
2081 }
2082
2083 return SemaDiagnosticBuilder(Kind, Loc, DiagID, FD, SemaRef);
2084}
2085
2088 if (LO.OpenMP <= 45) {
2090 return OMPC_DEFAULTMAP_scalar;
2091 return OMPC_DEFAULTMAP_aggregate;
2092 }
2094 return OMPC_DEFAULTMAP_pointer;
2096 return OMPC_DEFAULTMAP_scalar;
2097 return OMPC_DEFAULTMAP_aggregate;
2098}
2099
2101 unsigned OpenMPCaptureLevel) const {
2102 assert(getLangOpts().OpenMP && "OpenMP is not allowed");
2103
2104 ASTContext &Ctx = getASTContext();
2105 bool IsByRef = true;
2106
2107 // Find the directive that is associated with the provided scope.
2108 D = cast<ValueDecl>(D->getCanonicalDecl());
2109 QualType Ty = D->getType();
2110
2111 bool IsVariableUsedInMapClause = false;
2112 if (DSAStack->hasExplicitDirective(isOpenMPTargetExecutionDirective, Level)) {
2113 // This table summarizes how a given variable should be passed to the device
2114 // given its type and the clauses where it appears. This table is based on
2115 // the description in OpenMP 4.5 [2.10.4, target Construct] and
2116 // OpenMP 4.5 [2.15.5, Data-mapping Attribute Rules and Clauses].
2117 //
2118 // =========================================================================
2119 // | type | defaultmap | pvt | first | is_device_ptr | map | res. |
2120 // | |(tofrom:scalar)| | pvt | |has_dv_adr| |
2121 // =========================================================================
2122 // | scl | | | | - | | bycopy|
2123 // | scl | | - | x | - | - | bycopy|
2124 // | scl | | x | - | - | - | null |
2125 // | scl | x | | | - | | byref |
2126 // | scl | x | - | x | - | - | bycopy|
2127 // | scl | x | x | - | - | - | null |
2128 // | scl | | - | - | - | x | byref |
2129 // | scl | x | - | - | - | x | byref |
2130 //
2131 // | agg | n.a. | | | - | | byref |
2132 // | agg | n.a. | - | x | - | - | byref |
2133 // | agg | n.a. | x | - | - | - | null |
2134 // | agg | n.a. | - | - | - | x | byref |
2135 // | agg | n.a. | - | - | - | x[] | byref |
2136 //
2137 // | ptr | n.a. | | | - | | bycopy|
2138 // | ptr | n.a. | - | x | - | - | bycopy|
2139 // | ptr | n.a. | x | - | - | - | null |
2140 // | ptr | n.a. | - | - | - | x | byref |
2141 // | ptr | n.a. | - | - | - | x[] | bycopy|
2142 // | ptr | n.a. | - | - | x | | bycopy|
2143 // | ptr | n.a. | - | - | x | x | bycopy|
2144 // | ptr | n.a. | - | - | x | x[] | bycopy|
2145 // =========================================================================
2146 // Legend:
2147 // scl - scalar
2148 // ptr - pointer
2149 // agg - aggregate
2150 // x - applies
2151 // - - invalid in this combination
2152 // [] - mapped with an array section
2153 // byref - should be mapped by reference
2154 // byval - should be mapped by value
2155 // null - initialize a local variable to null on the device
2156 //
2157 // Observations:
2158 // - All scalar declarations that show up in a map clause have to be passed
2159 // by reference, because they may have been mapped in the enclosing data
2160 // environment.
2161 // - If the scalar value does not fit the size of uintptr, it has to be
2162 // passed by reference, regardless the result in the table above.
2163 // - For pointers mapped by value that have either an implicit map or an
2164 // array section, the runtime library may pass the NULL value to the
2165 // device instead of the value passed to it by the compiler.
2166
2167 if (Ty->isReferenceType())
2168 Ty = Ty->castAs<ReferenceType>()->getPointeeType();
2169
2170 // Locate map clauses and see if the variable being captured is referred to
2171 // in any of those clauses. Here we only care about variables, not fields,
2172 // because fields are part of aggregates.
2173 bool IsVariableAssociatedWithSection = false;
2174
2175 DSAStack->checkMappableExprComponentListsForDeclAtLevel(
2176 D, Level,
2177 [&IsVariableUsedInMapClause, &IsVariableAssociatedWithSection,
2179 MapExprComponents,
2180 OpenMPClauseKind WhereFoundClauseKind) {
2181 // Both map and has_device_addr clauses information influences how a
2182 // variable is captured. E.g. is_device_ptr does not require changing
2183 // the default behavior.
2184 if (WhereFoundClauseKind != OMPC_map &&
2185 WhereFoundClauseKind != OMPC_has_device_addr)
2186 return false;
2187
2188 auto EI = MapExprComponents.rbegin();
2189 auto EE = MapExprComponents.rend();
2190
2191 assert(EI != EE && "Invalid map expression!");
2192
2193 if (isa<DeclRefExpr>(EI->getAssociatedExpression()))
2194 IsVariableUsedInMapClause |= EI->getAssociatedDeclaration() == D;
2195
2196 ++EI;
2197 if (EI == EE)
2198 return false;
2199 auto Last = std::prev(EE);
2200 const auto *UO =
2201 dyn_cast<UnaryOperator>(Last->getAssociatedExpression());
2202 if ((UO && UO->getOpcode() == UO_Deref) ||
2203 isa<ArraySubscriptExpr>(Last->getAssociatedExpression()) ||
2204 isa<ArraySectionExpr>(Last->getAssociatedExpression()) ||
2205 isa<MemberExpr>(EI->getAssociatedExpression()) ||
2206 isa<OMPArrayShapingExpr>(Last->getAssociatedExpression())) {
2207 IsVariableAssociatedWithSection = true;
2208 // There is nothing more we need to know about this variable.
2209 return true;
2210 }
2211
2212 // Keep looking for more map info.
2213 return false;
2214 });
2215
2216 if (IsVariableUsedInMapClause) {
2217 // If variable is identified in a map clause it is always captured by
2218 // reference except if it is a pointer that is dereferenced somehow.
2219 IsByRef = !(Ty->isPointerType() && IsVariableAssociatedWithSection);
2220 } else {
2221 // By default, all the data that has a scalar type is mapped by copy
2222 // (except for reduction variables).
2223 // Defaultmap scalar is mutual exclusive to defaultmap pointer
2224 IsByRef = (DSAStack->isForceCaptureByReferenceInTargetExecutable() &&
2225 !Ty->isAnyPointerType()) ||
2226 !Ty->isScalarType() ||
2227 DSAStack->isDefaultmapCapturedByRef(
2229 DSAStack->hasExplicitDSA(
2230 D,
2231 [](OpenMPClauseKind K, bool AppliedToPointee) {
2232 return K == OMPC_reduction && !AppliedToPointee;
2233 },
2234 Level);
2235 }
2236 }
2237
2238 if (IsByRef && Ty.getNonReferenceType()->isScalarType()) {
2239 IsByRef =
2240 ((IsVariableUsedInMapClause &&
2241 DSAStack->getCaptureRegion(Level, OpenMPCaptureLevel) ==
2242 OMPD_target) ||
2243 !(DSAStack->hasExplicitDSA(
2244 D,
2245 [](OpenMPClauseKind K, bool AppliedToPointee) -> bool {
2246 return K == OMPC_firstprivate ||
2247 (K == OMPC_reduction && AppliedToPointee);
2248 },
2249 Level, /*NotLastprivate=*/true) ||
2250 DSAStack->isUsesAllocatorsDecl(Level, D))) &&
2251 // If the variable is artificial and must be captured by value - try to
2252 // capture by value.
2253 !(isa<OMPCapturedExprDecl>(D) && !D->hasAttr<OMPCaptureNoInitAttr>() &&
2254 !cast<OMPCapturedExprDecl>(D)->getInit()->isGLValue()) &&
2255 // If the variable is implicitly firstprivate and scalar - capture by
2256 // copy
2257 !((DSAStack->getDefaultDSA() == DSA_firstprivate ||
2258 DSAStack->getDefaultDSA() == DSA_private) &&
2259 !DSAStack->hasExplicitDSA(
2260 D, [](OpenMPClauseKind K, bool) { return K != OMPC_unknown; },
2261 Level) &&
2262 !DSAStack->isLoopControlVariable(D, Level).first);
2263 }
2264
2265 // When passing data by copy, we need to make sure it fits the uintptr size
2266 // and alignment, because the runtime library only deals with uintptr types.
2267 // If it does not fit the uintptr size, we need to pass the data by reference
2268 // instead.
2269 if (!IsByRef && (Ctx.getTypeSizeInChars(Ty) >
2271 Ctx.getAlignOfGlobalVarInChars(Ty, dyn_cast<VarDecl>(D)) >
2272 Ctx.getTypeAlignInChars(Ctx.getUIntPtrType()))) {
2273 IsByRef = true;
2274 }
2275
2276 return IsByRef;
2277}
2278
2279unsigned SemaOpenMP::getOpenMPNestingLevel() const {
2280 assert(getLangOpts().OpenMP);
2281 return DSAStack->getNestingLevel();
2282}
2283
2285 return isOpenMPTaskingDirective(DSAStack->getCurrentDirective()) &&
2286 DSAStack->isUntiedRegion();
2287}
2288
2290 return (isOpenMPTargetExecutionDirective(DSAStack->getCurrentDirective()) &&
2291 !DSAStack->isClauseParsingMode()) ||
2292 DSAStack->hasDirective(
2294 SourceLocation) -> bool {
2296 },
2297 false);
2298}
2299
2301 // Only rebuild for Field.
2302 if (!dyn_cast<FieldDecl>(D))
2303 return false;
2304 DSAStackTy::DSAVarData DVarPrivate = DSAStack->hasDSA(
2305 D,
2306 [](OpenMPClauseKind C, bool AppliedToPointee,
2307 DefaultDataSharingAttributes DefaultAttr) {
2308 return isOpenMPPrivate(C) && !AppliedToPointee &&
2309 (DefaultAttr == DSA_firstprivate || DefaultAttr == DSA_private);
2310 },
2311 [](OpenMPDirectiveKind) { return true; },
2312 DSAStack->isClauseParsingMode());
2313 if (DVarPrivate.CKind != OMPC_unknown)
2314 return true;
2315 return false;
2316}
2317
2319 Expr *CaptureExpr, bool WithInit,
2320 DeclContext *CurContext,
2321 bool AsExpression);
2322
2324 unsigned StopAt) {
2325 assert(getLangOpts().OpenMP && "OpenMP is not allowed");
2326 D = getCanonicalDecl(D);
2327
2328 auto *VD = dyn_cast<VarDecl>(D);
2329 // Do not capture constexpr variables.
2330 if (VD && VD->isConstexpr())
2331 return nullptr;
2332
2333 // If we want to determine whether the variable should be captured from the
2334 // perspective of the current capturing scope, and we've already left all the
2335 // capturing scopes of the top directive on the stack, check from the
2336 // perspective of its parent directive (if any) instead.
2337 DSAStackTy::ParentDirectiveScope InParentDirectiveRAII(
2338 *DSAStack, CheckScopeInfo && DSAStack->isBodyComplete());
2339
2340 // If we are attempting to capture a global variable in a directive with
2341 // 'target' we return true so that this global is also mapped to the device.
2342 //
2343 if (VD && !VD->hasLocalStorage() &&
2345 SemaRef.getCurLambda())) {
2347 DSAStackTy::DSAVarData DVarTop =
2348 DSAStack->getTopDSA(D, DSAStack->isClauseParsingMode());
2349 if (DVarTop.CKind != OMPC_unknown && DVarTop.RefExpr)
2350 return VD;
2351 // If the declaration is enclosed in a 'declare target' directive,
2352 // then it should not be captured.
2353 //
2354 if (OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
2355 return nullptr;
2356 CapturedRegionScopeInfo *CSI = nullptr;
2357 for (FunctionScopeInfo *FSI : llvm::drop_begin(
2358 llvm::reverse(SemaRef.FunctionScopes),
2359 CheckScopeInfo ? (SemaRef.FunctionScopes.size() - (StopAt + 1))
2360 : 0)) {
2361 if (!isa<CapturingScopeInfo>(FSI))
2362 return nullptr;
2363 if (auto *RSI = dyn_cast<CapturedRegionScopeInfo>(FSI))
2364 if (RSI->CapRegionKind == CR_OpenMP) {
2365 CSI = RSI;
2366 break;
2367 }
2368 }
2369 assert(CSI && "Failed to find CapturedRegionScopeInfo");
2372 DSAStack->getDirective(CSI->OpenMPLevel));
2373 if (Regions[CSI->OpenMPCaptureLevel] != OMPD_task)
2374 return VD;
2375 }
2377 // Try to mark variable as declare target if it is used in capturing
2378 // regions.
2379 if (getLangOpts().OpenMP <= 45 &&
2380 !OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
2382 return nullptr;
2383 }
2384 }
2385
2386 if (CheckScopeInfo) {
2387 bool OpenMPFound = false;
2388 for (unsigned I = StopAt + 1; I > 0; --I) {
2390 if (!isa<CapturingScopeInfo>(FSI))
2391 return nullptr;
2392 if (auto *RSI = dyn_cast<CapturedRegionScopeInfo>(FSI))
2393 if (RSI->CapRegionKind == CR_OpenMP) {
2394 OpenMPFound = true;
2395 break;
2396 }
2397 }
2398 if (!OpenMPFound)
2399 return nullptr;
2400 }
2401
2402 if (DSAStack->getCurrentDirective() != OMPD_unknown &&
2403 (!DSAStack->isClauseParsingMode() ||
2404 DSAStack->getParentDirective() != OMPD_unknown)) {
2405 auto &&Info = DSAStack->isLoopControlVariable(D);
2406 if (Info.first ||
2407 (VD && VD->hasLocalStorage() &&
2408 isImplicitOrExplicitTaskingRegion(DSAStack->getCurrentDirective())) ||
2409 (VD && DSAStack->isForceVarCapturing()))
2410 return VD ? VD : Info.second;
2411 DSAStackTy::DSAVarData DVarTop =
2412 DSAStack->getTopDSA(D, DSAStack->isClauseParsingMode());
2413 if (DVarTop.CKind != OMPC_unknown && isOpenMPPrivate(DVarTop.CKind) &&
2414 (!VD || VD->hasLocalStorage() || !DVarTop.AppliedToPointee))
2415 return VD ? VD : cast<VarDecl>(DVarTop.PrivateCopy->getDecl());
2416 // Threadprivate variables must not be captured.
2417 if (isOpenMPThreadPrivate(DVarTop.CKind))
2418 return nullptr;
2419 // The variable is not private or it is the variable in the directive with
2420 // default(none) clause and not used in any clause.
2421 DSAStackTy::DSAVarData DVarPrivate = DSAStack->hasDSA(
2422 D,
2423 [](OpenMPClauseKind C, bool AppliedToPointee, bool) {
2424 return isOpenMPPrivate(C) && !AppliedToPointee;
2425 },
2426 [](OpenMPDirectiveKind) { return true; },
2427 DSAStack->isClauseParsingMode());
2428 // Global shared must not be captured.
2429 if (VD && !VD->hasLocalStorage() && DVarPrivate.CKind == OMPC_unknown &&
2430 ((DSAStack->getDefaultDSA() != DSA_none &&
2431 DSAStack->getDefaultDSA() != DSA_private &&
2432 DSAStack->getDefaultDSA() != DSA_firstprivate) ||
2433 DVarTop.CKind == OMPC_shared))
2434 return nullptr;
2435 auto *FD = dyn_cast<FieldDecl>(D);
2436 if (DVarPrivate.CKind != OMPC_unknown && !VD && FD &&
2437 !DVarPrivate.PrivateCopy) {
2438 DSAStackTy::DSAVarData DVarPrivate = DSAStack->hasDSA(
2439 D,
2440 [](OpenMPClauseKind C, bool AppliedToPointee,
2441 DefaultDataSharingAttributes DefaultAttr) {
2442 return isOpenMPPrivate(C) && !AppliedToPointee &&
2443 (DefaultAttr == DSA_firstprivate ||
2444 DefaultAttr == DSA_private);
2445 },
2446 [](OpenMPDirectiveKind) { return true; },
2447 DSAStack->isClauseParsingMode());
2448 if (DVarPrivate.CKind == OMPC_unknown)
2449 return nullptr;
2450
2451 VarDecl *VD = DSAStack->getImplicitFDCapExprDecl(FD);
2452 if (VD)
2453 return VD;
2455 return nullptr;
2458 /*IsImplicit=*/true);
2459 const CXXScopeSpec CS = CXXScopeSpec();
2461 ThisExpr, /*IsArrow=*/true, SourceLocation(),
2464 /*HadMultipleCandidates=*/false, DeclarationNameInfo(), FD->getType(),
2467 SemaRef, FD->getIdentifier(), ME, DVarPrivate.CKind != OMPC_private,
2468 SemaRef.CurContext->getParent(), /*AsExpression=*/false);
2469 DeclRefExpr *VDPrivateRefExpr = buildDeclRefExpr(
2471 VD = cast<VarDecl>(VDPrivateRefExpr->getDecl());
2472 DSAStack->addImplicitDefaultFirstprivateFD(FD, VD);
2473 return VD;
2474 }
2475 if (DVarPrivate.CKind != OMPC_unknown ||
2476 (VD && (DSAStack->getDefaultDSA() == DSA_none ||
2477 DSAStack->getDefaultDSA() == DSA_private ||
2478 DSAStack->getDefaultDSA() == DSA_firstprivate)))
2479 return VD ? VD : cast<VarDecl>(DVarPrivate.PrivateCopy->getDecl());
2480 }
2481 return nullptr;
2482}
2483
2484void SemaOpenMP::adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
2485 unsigned Level) const {
2486 FunctionScopesIndex -= getOpenMPCaptureLevels(DSAStack->getDirective(Level));
2487}
2488
2490 assert(getLangOpts().OpenMP && "OpenMP must be enabled.");
2491 if (isOpenMPLoopDirective(DSAStack->getCurrentDirective()))
2492 DSAStack->loopInit();
2493}
2494
2496 assert(getLangOpts().OpenMP && "OpenMP must be enabled.");
2497 if (isOpenMPLoopDirective(DSAStack->getCurrentDirective())) {
2498 DSAStack->resetPossibleLoopCounter();
2499 DSAStack->loopStart();
2500 }
2501}
2502
2504 unsigned CapLevel) const {
2505 assert(getLangOpts().OpenMP && "OpenMP is not allowed");
2506 if (DSAStack->getCurrentDirective() != OMPD_unknown &&
2507 (!DSAStack->isClauseParsingMode() ||
2508 DSAStack->getParentDirective() != OMPD_unknown)) {
2509 DSAStackTy::DSAVarData DVarPrivate = DSAStack->hasDSA(
2510 D,
2511 [](OpenMPClauseKind C, bool AppliedToPointee,
2512 DefaultDataSharingAttributes DefaultAttr) {
2513 return isOpenMPPrivate(C) && !AppliedToPointee &&
2514 DefaultAttr == DSA_private;
2515 },
2516 [](OpenMPDirectiveKind) { return true; },
2517 DSAStack->isClauseParsingMode());
2518 if (DVarPrivate.CKind == OMPC_private && isa<OMPCapturedExprDecl>(D) &&
2519 DSAStack->isImplicitDefaultFirstprivateFD(cast<VarDecl>(D)) &&
2520 !DSAStack->isLoopControlVariable(D).first)
2521 return OMPC_private;
2522 }
2523 if (DSAStack->hasExplicitDirective(isOpenMPTaskingDirective, Level)) {
2524 bool IsTriviallyCopyable =
2525 D->getType().getNonReferenceType().isTriviallyCopyableType(
2526 getASTContext()) &&
2527 !D->getType()
2528 .getNonReferenceType()
2529 .getCanonicalType()
2530 ->getAsCXXRecordDecl();
2531 OpenMPDirectiveKind DKind = DSAStack->getDirective(Level);
2533 getOpenMPCaptureRegions(CaptureRegions, DKind);
2534 if (isOpenMPTaskingDirective(CaptureRegions[CapLevel]) &&
2535 (IsTriviallyCopyable ||
2536 !isOpenMPTaskLoopDirective(CaptureRegions[CapLevel]))) {
2537 if (DSAStack->hasExplicitDSA(
2538 D,
2539 [](OpenMPClauseKind K, bool) { return K == OMPC_firstprivate; },
2540 Level, /*NotLastprivate=*/true))
2541 return OMPC_firstprivate;
2542 DSAStackTy::DSAVarData DVar = DSAStack->getImplicitDSA(D, Level);
2543 if (DVar.CKind != OMPC_shared &&
2544 !DSAStack->isLoopControlVariable(D, Level).first && !DVar.RefExpr) {
2545 DSAStack->addImplicitTaskFirstprivate(Level, D);
2546 return OMPC_firstprivate;
2547 }
2548 }
2549 }
2550 if (isOpenMPLoopDirective(DSAStack->getCurrentDirective()) &&
2551 !isOpenMPLoopTransformationDirective(DSAStack->getCurrentDirective())) {
2552 if (DSAStack->getAssociatedLoops() > 0 && !DSAStack->isLoopStarted()) {
2553 DSAStack->resetPossibleLoopCounter(D);
2554 DSAStack->loopStart();
2555 return OMPC_private;
2556 }
2557 if ((DSAStack->getPossiblyLoopCounter() == D->getCanonicalDecl() ||
2558 DSAStack->isLoopControlVariable(D).first) &&
2559 !DSAStack->hasExplicitDSA(
2560 D, [](OpenMPClauseKind K, bool) { return K != OMPC_private; },
2561 Level) &&
2562 !isOpenMPSimdDirective(DSAStack->getCurrentDirective()))
2563 return OMPC_private;
2564 }
2565 if (const auto *VD = dyn_cast<VarDecl>(D)) {
2566 if (DSAStack->isThreadPrivate(const_cast<VarDecl *>(VD)) &&
2567 DSAStack->isForceVarCapturing() &&
2568 !DSAStack->hasExplicitDSA(
2569 D, [](OpenMPClauseKind K, bool) { return K == OMPC_copyin; },
2570 Level))
2571 return OMPC_private;
2572 }
2573 // User-defined allocators are private since they must be defined in the
2574 // context of target region.
2575 if (DSAStack->hasExplicitDirective(isOpenMPTargetExecutionDirective, Level) &&
2576 DSAStack->isUsesAllocatorsDecl(Level, D).value_or(
2577 DSAStackTy::UsesAllocatorsDeclKind::AllocatorTrait) ==
2578 DSAStackTy::UsesAllocatorsDeclKind::UserDefinedAllocator)
2579 return OMPC_private;
2580 return (DSAStack->hasExplicitDSA(
2581 D, [](OpenMPClauseKind K, bool) { return K == OMPC_private; },
2582 Level) ||
2583 (DSAStack->isClauseParsingMode() &&
2584 DSAStack->getClauseParsingMode() == OMPC_private) ||
2585 // Consider taskgroup reduction descriptor variable a private
2586 // to avoid possible capture in the region.
2587 (DSAStack->hasExplicitDirective(
2588 [](OpenMPDirectiveKind K) {
2589 return K == OMPD_taskgroup ||
2590 ((isOpenMPParallelDirective(K) ||
2591 isOpenMPWorksharingDirective(K)) &&
2592 !isOpenMPSimdDirective(K));
2593 },
2594 Level) &&
2595 DSAStack->isTaskgroupReductionRef(D, Level)))
2596 ? OMPC_private
2597 : OMPC_unknown;
2598}
2599
2601 unsigned Level) {
2602 assert(getLangOpts().OpenMP && "OpenMP is not allowed");
2603 D = getCanonicalDecl(D);
2604 OpenMPClauseKind OMPC = OMPC_unknown;
2605 for (unsigned I = DSAStack->getNestingLevel() + 1; I > Level; --I) {
2606 const unsigned NewLevel = I - 1;
2607 if (DSAStack->hasExplicitDSA(
2608 D,
2609 [&OMPC](const OpenMPClauseKind K, bool AppliedToPointee) {
2610 if (isOpenMPPrivate(K) && !AppliedToPointee) {
2611 OMPC = K;
2612 return true;
2613 }
2614 return false;
2615 },
2616 NewLevel))
2617 break;
2618 if (DSAStack->checkMappableExprComponentListsForDeclAtLevel(
2619 D, NewLevel,
2621 OpenMPClauseKind) { return true; })) {
2622 OMPC = OMPC_map;
2623 break;
2624 }
2625 if (DSAStack->hasExplicitDirective(isOpenMPTargetExecutionDirective,
2626 NewLevel)) {
2627 OMPC = OMPC_map;
2628 if (DSAStack->mustBeFirstprivateAtLevel(
2630 OMPC = OMPC_firstprivate;
2631 break;
2632 }
2633 }
2634 if (OMPC != OMPC_unknown)
2635 FD->addAttr(
2636 OMPCaptureKindAttr::CreateImplicit(getASTContext(), unsigned(OMPC)));
2637}
2638
2640 unsigned CaptureLevel) const {
2641 assert(getLangOpts().OpenMP && "OpenMP is not allowed");
2642 // Return true if the current level is no longer enclosed in a target region.
2643
2645 getOpenMPCaptureRegions(Regions, DSAStack->getDirective(Level));
2646 const auto *VD = dyn_cast<VarDecl>(D);
2647 return VD && !VD->hasLocalStorage() &&
2648 DSAStack->hasExplicitDirective(isOpenMPTargetExecutionDirective,
2649 Level) &&
2650 Regions[CaptureLevel] != OMPD_task;
2651}
2652
2654 unsigned CaptureLevel) const {
2655 assert(getLangOpts().OpenMP && "OpenMP is not allowed");
2656 // Return true if the current level is no longer enclosed in a target region.
2657
2658 if (const auto *VD = dyn_cast<VarDecl>(D)) {
2659 if (!VD->hasLocalStorage()) {
2661 return true;
2662 DSAStackTy::DSAVarData TopDVar =
2663 DSAStack->getTopDSA(D, /*FromParent=*/false);
2664 unsigned NumLevels =
2665 getOpenMPCaptureLevels(DSAStack->getDirective(Level));
2666 if (Level == 0)
2667 // non-file scope static variable with default(firstprivate)
2668 // should be global captured.
2669 return (NumLevels == CaptureLevel + 1 &&
2670 (TopDVar.CKind != OMPC_shared ||
2671 DSAStack->getDefaultDSA() == DSA_firstprivate));
2672 do {
2673 --Level;
2674 DSAStackTy::DSAVarData DVar = DSAStack->getImplicitDSA(D, Level);
2675 if (DVar.CKind != OMPC_shared)
2676 return true;
2677 } while (Level > 0);
2678 }
2679 }
2680 return true;
2681}
2682
2683void SemaOpenMP::DestroyDataSharingAttributesStack() { delete DSAStack; }
2684
2686 OMPTraitInfo &TI) {
2687 OMPDeclareVariantScopes.push_back(OMPDeclareVariantScope(TI));
2688}
2689
2692 "Not in OpenMP declare variant scope!");
2693
2694 OMPDeclareVariantScopes.pop_back();
2695}
2696
2698 const FunctionDecl *Callee,
2700 assert(getLangOpts().OpenMP && "Expected OpenMP compilation mode.");
2701 std::optional<OMPDeclareTargetDeclAttr::DevTypeTy> DevTy =
2702 OMPDeclareTargetDeclAttr::getDeviceType(Caller->getMostRecentDecl());
2703 // Ignore host functions during device analysis.
2704 if (getLangOpts().OpenMPIsTargetDevice &&
2705 (!DevTy || *DevTy == OMPDeclareTargetDeclAttr::DT_Host))
2706 return;
2707 // Ignore nohost functions during host analysis.
2708 if (!getLangOpts().OpenMPIsTargetDevice && DevTy &&
2709 *DevTy == OMPDeclareTargetDeclAttr::DT_NoHost)
2710 return;
2711 const FunctionDecl *FD = Callee->getMostRecentDecl();
2712 DevTy = OMPDeclareTargetDeclAttr::getDeviceType(FD);
2713 if (getLangOpts().OpenMPIsTargetDevice && DevTy &&
2714 *DevTy == OMPDeclareTargetDeclAttr::DT_Host) {
2715 // Diagnose host function called during device codegen.
2716 StringRef HostDevTy =
2717 getOpenMPSimpleClauseTypeName(OMPC_device_type, OMPC_DEVICE_TYPE_host);
2718 Diag(Loc, diag::err_omp_wrong_device_function_call) << HostDevTy << 0;
2719 Diag(*OMPDeclareTargetDeclAttr::getLocation(FD),
2720 diag::note_omp_marked_device_type_here)
2721 << HostDevTy;
2722 return;
2723 }
2724 if (!getLangOpts().OpenMPIsTargetDevice &&
2725 !getLangOpts().OpenMPOffloadMandatory && DevTy &&
2726 *DevTy == OMPDeclareTargetDeclAttr::DT_NoHost) {
2727 // In OpenMP 5.2 or later, if the function has a host variant then allow
2728 // that to be called instead
2729 auto &&HasHostAttr = [](const FunctionDecl *Callee) {
2730 for (OMPDeclareVariantAttr *A :
2731 Callee->specific_attrs<OMPDeclareVariantAttr>()) {
2732 auto *DeclRefVariant = cast<DeclRefExpr>(A->getVariantFuncRef());
2733 auto *VariantFD = cast<FunctionDecl>(DeclRefVariant->getDecl());
2734 std::optional<OMPDeclareTargetDeclAttr::DevTypeTy> DevTy =
2735 OMPDeclareTargetDeclAttr::getDeviceType(
2736 VariantFD->getMostRecentDecl());
2737 if (!DevTy || *DevTy == OMPDeclareTargetDeclAttr::DT_Host)
2738 return true;
2739 }
2740 return false;
2741 };
2742 if (getLangOpts().OpenMP >= 52 &&
2743 Callee->hasAttr<OMPDeclareVariantAttr>() && HasHostAttr(Callee))
2744 return;
2745 // Diagnose nohost function called during host codegen.
2746 StringRef NoHostDevTy = getOpenMPSimpleClauseTypeName(
2747 OMPC_device_type, OMPC_DEVICE_TYPE_nohost);
2748 Diag(Loc, diag::err_omp_wrong_device_function_call) << NoHostDevTy << 1;
2749 Diag(*OMPDeclareTargetDeclAttr::getLocation(FD),
2750 diag::note_omp_marked_device_type_here)
2751 << NoHostDevTy;
2752 }
2753}
2754
2756 const DeclarationNameInfo &DirName,
2757 Scope *CurScope, SourceLocation Loc) {
2758 DSAStack->push(DKind, DirName, CurScope, Loc);
2761}
2762
2764 DSAStack->setClauseParsingMode(K);
2765}
2766
2768 DSAStack->setClauseParsingMode(/*K=*/OMPC_unknown);
2770}
2771
2772static std::pair<ValueDecl *, bool>
2773getPrivateItem(Sema &S, Expr *&RefExpr, SourceLocation &ELoc,
2774 SourceRange &ERange, bool AllowArraySection = false,
2775 StringRef DiagType = "");
2776
2777/// Check consistency of the reduction clauses.
2778static void checkReductionClauses(Sema &S, DSAStackTy *Stack,
2779 ArrayRef<OMPClause *> Clauses) {
2780 bool InscanFound = false;
2781 SourceLocation InscanLoc;
2782 // OpenMP 5.0, 2.19.5.4 reduction Clause, Restrictions.
2783 // A reduction clause without the inscan reduction-modifier may not appear on
2784 // a construct on which a reduction clause with the inscan reduction-modifier
2785 // appears.
2786 for (OMPClause *C : Clauses) {
2787 if (C->getClauseKind() != OMPC_reduction)
2788 continue;
2789 auto *RC = cast<OMPReductionClause>(C);
2790 if (RC->getModifier() == OMPC_REDUCTION_inscan) {
2791 InscanFound = true;
2792 InscanLoc = RC->getModifierLoc();
2793 continue;
2794 }
2795 if (RC->getModifier() == OMPC_REDUCTION_task) {
2796 // OpenMP 5.0, 2.19.5.4 reduction Clause.
2797 // A reduction clause with the task reduction-modifier may only appear on
2798 // a parallel construct, a worksharing construct or a combined or
2799 // composite construct for which any of the aforementioned constructs is a
2800 // constituent construct and simd or loop are not constituent constructs.
2801 OpenMPDirectiveKind CurDir = Stack->getCurrentDirective();
2802 if (!(isOpenMPParallelDirective(CurDir) ||
2804 isOpenMPSimdDirective(CurDir))
2805 S.Diag(RC->getModifierLoc(),
2806 diag::err_omp_reduction_task_not_parallel_or_worksharing);
2807 continue;
2808 }
2809 }
2810 if (InscanFound) {
2811 for (OMPClause *C : Clauses) {
2812 if (C->getClauseKind() != OMPC_reduction)
2813 continue;
2814 auto *RC = cast<OMPReductionClause>(C);
2815 if (RC->getModifier() != OMPC_REDUCTION_inscan) {
2816 S.Diag(RC->getModifier() == OMPC_REDUCTION_unknown
2817 ? RC->getBeginLoc()
2818 : RC->getModifierLoc(),
2819 diag::err_omp_inscan_reduction_expected);
2820 S.Diag(InscanLoc, diag::note_omp_previous_inscan_reduction);
2821 continue;
2822 }
2823 for (Expr *Ref : RC->varlist()) {
2824 assert(Ref && "NULL expr in OpenMP nontemporal clause.");
2825 SourceLocation ELoc;
2826 SourceRange ERange;
2827 Expr *SimpleRefExpr = Ref;
2828 auto Res = getPrivateItem(S, SimpleRefExpr, ELoc, ERange,
2829 /*AllowArraySection=*/true);
2830 ValueDecl *D = Res.first;
2831 if (!D)
2832 continue;
2833 if (!Stack->isUsedInScanDirective(getCanonicalDecl(D))) {
2834 S.Diag(Ref->getExprLoc(),
2835 diag::err_omp_reduction_not_inclusive_exclusive)
2836 << Ref->getSourceRange();
2837 }
2838 }
2839 }
2840 }
2841}
2842
2843static void checkAllocateClauses(Sema &S, DSAStackTy *Stack,
2844 ArrayRef<OMPClause *> Clauses);
2845static DeclRefExpr *buildCapture(Sema &S, ValueDecl *D, Expr *CaptureExpr,
2846 bool WithInit);
2847
2848static void reportOriginalDsa(Sema &SemaRef, const DSAStackTy *Stack,
2849 const ValueDecl *D,
2850 const DSAStackTy::DSAVarData &DVar,
2851 bool IsLoopIterVar = false);
2852
2854 // OpenMP [2.14.3.5, Restrictions, C/C++, p.1]
2855 // A variable of class type (or array thereof) that appears in a lastprivate
2856 // clause requires an accessible, unambiguous default constructor for the
2857 // class type, unless the list item is also specified in a firstprivate
2858 // clause.
2859
2860 auto FinalizeLastprivate = [&](OMPLastprivateClause *Clause) {
2861 SmallVector<Expr *, 8> PrivateCopies;
2862 for (Expr *DE : Clause->varlist()) {
2863 if (DE->isValueDependent() || DE->isTypeDependent()) {
2864 PrivateCopies.push_back(nullptr);
2865 continue;
2866 }
2867 auto *DRE = cast<DeclRefExpr>(DE->IgnoreParens());
2868 auto *VD = cast<VarDecl>(DRE->getDecl());
2870 const DSAStackTy::DSAVarData DVar =
2871 DSAStack->getTopDSA(VD, /*FromParent=*/false);
2872 if (DVar.CKind != OMPC_lastprivate) {
2873 // The variable is also a firstprivate, so initialization sequence
2874 // for private copy is generated already.
2875 PrivateCopies.push_back(nullptr);
2876 continue;
2877 }
2878 // Generate helper private variable and initialize it with the
2879 // default value. The address of the original variable is replaced
2880 // by the address of the new private variable in CodeGen. This new
2881 // variable is not added to IdResolver, so the code in the OpenMP
2882 // region uses original variable for proper diagnostics.
2883 VarDecl *VDPrivate = buildVarDecl(
2884 SemaRef, DE->getExprLoc(), Type.getUnqualifiedType(), VD->getName(),
2885 VD->hasAttrs() ? &VD->getAttrs() : nullptr, DRE);
2887 if (VDPrivate->isInvalidDecl()) {
2888 PrivateCopies.push_back(nullptr);
2889 continue;
2890 }
2891 PrivateCopies.push_back(buildDeclRefExpr(
2892 SemaRef, VDPrivate, DE->getType(), DE->getExprLoc()));
2893 }
2894 Clause->setPrivateCopies(PrivateCopies);
2895 };
2896
2897 auto FinalizeNontemporal = [&](OMPNontemporalClause *Clause) {
2898 // Finalize nontemporal clause by handling private copies, if any.
2899 SmallVector<Expr *, 8> PrivateRefs;
2900 for (Expr *RefExpr : Clause->varlist()) {
2901 assert(RefExpr && "NULL expr in OpenMP nontemporal clause.");
2902 SourceLocation ELoc;
2903 SourceRange ERange;
2904 Expr *SimpleRefExpr = RefExpr;
2905 auto Res = getPrivateItem(SemaRef, SimpleRefExpr, ELoc, ERange);
2906 if (Res.second)
2907 // It will be analyzed later.
2908 PrivateRefs.push_back(RefExpr);
2909 ValueDecl *D = Res.first;
2910 if (!D)
2911 continue;
2912
2913 const DSAStackTy::DSAVarData DVar =
2914 DSAStack->getTopDSA(D, /*FromParent=*/false);
2915 PrivateRefs.push_back(DVar.PrivateCopy ? DVar.PrivateCopy
2916 : SimpleRefExpr);
2917 }
2918 Clause->setPrivateRefs(PrivateRefs);
2919 };
2920
2921 auto FinalizeAllocators = [&](OMPUsesAllocatorsClause *Clause) {
2922 for (unsigned I = 0, E = Clause->getNumberOfAllocators(); I < E; ++I) {
2923 OMPUsesAllocatorsClause::Data D = Clause->getAllocatorData(I);
2924 auto *DRE = dyn_cast<DeclRefExpr>(D.Allocator->IgnoreParenImpCasts());
2925 if (!DRE)
2926 continue;
2927 ValueDecl *VD = DRE->getDecl();
2928 if (!VD || !isa<VarDecl>(VD))
2929 continue;
2930 DSAStackTy::DSAVarData DVar =
2931 DSAStack->getTopDSA(VD, /*FromParent=*/false);
2932 // OpenMP [2.12.5, target Construct]
2933 // Memory allocators that appear in a uses_allocators clause cannot
2934 // appear in other data-sharing attribute clauses or data-mapping
2935 // attribute clauses in the same construct.
2936 Expr *MapExpr = nullptr;
2937 if (DVar.RefExpr ||
2938 DSAStack->checkMappableExprComponentListsForDecl(
2939 VD, /*CurrentRegionOnly=*/true,
2940 [VD, &MapExpr](
2942 MapExprComponents,
2944 auto MI = MapExprComponents.rbegin();
2945 auto ME = MapExprComponents.rend();
2946 if (MI != ME &&
2947 MI->getAssociatedDeclaration()->getCanonicalDecl() ==
2948 VD->getCanonicalDecl()) {
2949 MapExpr = MI->getAssociatedExpression();
2950 return true;
2951 }
2952 return false;
2953 })) {
2954 Diag(D.Allocator->getExprLoc(), diag::err_omp_allocator_used_in_clauses)
2955 << D.Allocator->getSourceRange();
2956 if (DVar.RefExpr)
2958 else
2959 Diag(MapExpr->getExprLoc(), diag::note_used_here)
2960 << MapExpr->getSourceRange();
2961 }
2962 }
2963 };
2964
2965 if (const auto *D = dyn_cast_or_null<OMPExecutableDirective>(CurDirective)) {
2966 for (OMPClause *C : D->clauses()) {
2967 if (auto *Clause = dyn_cast<OMPLastprivateClause>(C)) {
2968 FinalizeLastprivate(Clause);
2969 } else if (auto *Clause = dyn_cast<OMPNontemporalClause>(C)) {
2970 FinalizeNontemporal(Clause);
2971 } else if (auto *Clause = dyn_cast<OMPUsesAllocatorsClause>(C)) {
2972 FinalizeAllocators(Clause);
2973 }
2974 }
2975 // Check allocate clauses.
2976 if (!SemaRef.CurContext->isDependentContext())
2977 checkAllocateClauses(SemaRef, DSAStack, D->clauses());
2978 checkReductionClauses(SemaRef, DSAStack, D->clauses());
2979 }
2980
2981 DSAStack->pop();
2984}
2985
2987 Expr *NumIterations, Sema &SemaRef,
2988 Scope *S, DSAStackTy *Stack);
2989
2990static bool finishLinearClauses(Sema &SemaRef, ArrayRef<OMPClause *> Clauses,
2992 DSAStackTy *Stack) {
2993 assert((SemaRef.CurContext->isDependentContext() || B.builtAll()) &&
2994 "loop exprs were not built");
2995
2996 if (SemaRef.CurContext->isDependentContext())
2997 return false;
2998
2999 // Finalize the clauses that need pre-built expressions for CodeGen.
3000 for (OMPClause *C : Clauses) {
3001 auto *LC = dyn_cast<OMPLinearClause>(C);
3002 if (!LC)
3003 continue;
3004 if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
3005 B.NumIterations, SemaRef,
3006 SemaRef.getCurScope(), Stack))
3007 return true;
3008 }
3009
3010 return false;
3011}
3012
3013namespace {
3014
3015class VarDeclFilterCCC final : public CorrectionCandidateCallback {
3016private:
3017 Sema &SemaRef;
3018
3019public:
3020 explicit VarDeclFilterCCC(Sema &S) : SemaRef(S) {}
3021 bool ValidateCandidate(const TypoCorrection &Candidate) override {
3022 NamedDecl *ND = Candidate.getCorrectionDecl();
3023 if (const auto *VD = dyn_cast_or_null<VarDecl>(ND)) {
3024 return VD->hasGlobalStorage() &&
3025 SemaRef.isDeclInScope(ND, SemaRef.getCurLexicalContext(),
3026 SemaRef.getCurScope());
3027 }
3028 return false;
3029 }
3030
3031 std::unique_ptr<CorrectionCandidateCallback> clone() override {
3032 return std::make_unique<VarDeclFilterCCC>(*this);
3033 }
3034};
3035
3036class VarOrFuncDeclFilterCCC final : public CorrectionCandidateCallback {
3037private:
3038 Sema &SemaRef;
3039
3040public:
3041 explicit VarOrFuncDeclFilterCCC(Sema &S) : SemaRef(S) {}
3042 bool ValidateCandidate(const TypoCorrection &Candidate) override {
3043 NamedDecl *ND = Candidate.getCorrectionDecl();
3044 if (ND && ((isa<VarDecl>(ND) && ND->getKind() == Decl::Var) ||
3045 isa<FunctionDecl>(ND))) {
3046 return SemaRef.isDeclInScope(ND, SemaRef.getCurLexicalContext(),
3047 SemaRef.getCurScope());
3048 }
3049 return false;
3050 }
3051
3052 std::unique_ptr<CorrectionCandidateCallback> clone() override {
3053 return std::make_unique<VarOrFuncDeclFilterCCC>(*this);
3054 }
3055};
3056
3057} // namespace
3058
3060 CXXScopeSpec &ScopeSpec,
3061 const DeclarationNameInfo &Id,
3062 OpenMPDirectiveKind Kind) {
3063 ASTContext &Context = getASTContext();
3065 SemaRef.LookupParsedName(Lookup, CurScope, &ScopeSpec,
3066 /*ObjectType=*/QualType(),
3067 /*AllowBuiltinCreation=*/true);
3068
3069 if (Lookup.isAmbiguous())
3070 return ExprError();
3071
3072 VarDecl *VD;
3073 if (!Lookup.isSingleResult()) {
3074 VarDeclFilterCCC CCC(SemaRef);
3075 if (TypoCorrection Corrected =
3076 SemaRef.CorrectTypo(Id, Sema::LookupOrdinaryName, CurScope, nullptr,
3079 Corrected,
3080 SemaRef.PDiag(Lookup.empty() ? diag::err_undeclared_var_use_suggest
3081 : diag::err_omp_expected_var_arg_suggest)
3082 << Id.getName());
3083 VD = Corrected.getCorrectionDeclAs<VarDecl>();
3084 } else {
3085 Diag(Id.getLoc(), Lookup.empty() ? diag::err_undeclared_var_use
3086 : diag::err_omp_expected_var_arg)
3087 << Id.getName();
3088 return ExprError();
3089 }
3090 } else if (!(VD = Lookup.getAsSingle<VarDecl>())) {
3091 Diag(Id.getLoc(), diag::err_omp_expected_var_arg) << Id.getName();
3092 Diag(Lookup.getFoundDecl()->getLocation(), diag::note_declared_at);
3093 return ExprError();
3094 }
3095 Lookup.suppressDiagnostics();
3096
3097 // OpenMP [2.9.2, Syntax, C/C++]
3098 // Variables must be file-scope, namespace-scope, or static block-scope.
3099 if (Kind == OMPD_threadprivate && !VD->hasGlobalStorage()) {
3100 Diag(Id.getLoc(), diag::err_omp_global_var_arg)
3101 << getOpenMPDirectiveName(Kind) << !VD->isStaticLocal();
3102 bool IsDecl =
3104 Diag(VD->getLocation(),
3105 IsDecl ? diag::note_previous_decl : diag::note_defined_here)
3106 << VD;
3107 return ExprError();
3108 }
3109
3110 VarDecl *CanonicalVD = VD->getCanonicalDecl();
3111 NamedDecl *ND = CanonicalVD;
3112 // OpenMP [2.9.2, Restrictions, C/C++, p.2]
3113 // A threadprivate directive for file-scope variables must appear outside
3114 // any definition or declaration.
3115 if (CanonicalVD->getDeclContext()->isTranslationUnit() &&
3117 Diag(Id.getLoc(), diag::err_omp_var_scope)
3118 << getOpenMPDirectiveName(Kind) << VD;
3119 bool IsDecl =
3121 Diag(VD->getLocation(),
3122 IsDecl ? diag::note_previous_decl : diag::note_defined_here)
3123 << VD;
3124 return ExprError();
3125 }
3126 // OpenMP [2.9.2, Restrictions, C/C++, p.3]
3127 // A threadprivate directive for static class member variables must appear
3128 // in the class definition, in the same scope in which the member
3129 // variables are declared.
3130 if (CanonicalVD->isStaticDataMember() &&
3131 !CanonicalVD->getDeclContext()->Equals(SemaRef.getCurLexicalContext())) {
3132 Diag(Id.getLoc(), diag::err_omp_var_scope)
3133 << getOpenMPDirectiveName(Kind) << VD;
3134 bool IsDecl =
3136 Diag(VD->getLocation(),
3137 IsDecl ? diag::note_previous_decl : diag::note_defined_here)
3138 << VD;
3139 return ExprError();
3140 }
3141 // OpenMP [2.9.2, Restrictions, C/C++, p.4]
3142 // A threadprivate directive for namespace-scope variables must appear
3143 // outside any definition or declaration other than the namespace
3144 // definition itself.
3145 if (CanonicalVD->getDeclContext()->isNamespace() &&
3148 CanonicalVD->getDeclContext()))) {
3149 Diag(Id.getLoc(), diag::err_omp_var_scope)
3150 << getOpenMPDirectiveName(Kind) << VD;
3151 bool IsDecl =
3153 Diag(VD->getLocation(),
3154 IsDecl ? diag::note_previous_decl : diag::note_defined_here)
3155 << VD;
3156 return ExprError();
3157 }
3158 // OpenMP [2.9.2, Restrictions, C/C++, p.6]
3159 // A threadprivate directive for static block-scope variables must appear
3160 // in the scope of the variable and not in a nested scope.
3161 if (CanonicalVD->isLocalVarDecl() && CurScope &&
3163 Diag(Id.getLoc(), diag::err_omp_var_scope)
3164 << getOpenMPDirectiveName(Kind) << VD;
3165 bool IsDecl =
3167 Diag(VD->getLocation(),
3168 IsDecl ? diag::note_previous_decl : diag::note_defined_here)
3169 << VD;
3170 return ExprError();
3171 }
3172
3173 // OpenMP [2.9.2, Restrictions, C/C++, p.2-6]
3174 // A threadprivate directive must lexically precede all references to any
3175 // of the variables in its list.
3176 if (Kind == OMPD_threadprivate && VD->isUsed() &&
3177 !DSAStack->isThreadPrivate(VD)) {
3178 Diag(Id.getLoc(), diag::err_omp_var_used)
3179 << getOpenMPDirectiveName(Kind) << VD;
3180 return ExprError();
3181 }
3182
3183 QualType ExprType = VD->getType().getNonReferenceType();
3185 SourceLocation(), VD,
3186 /*RefersToEnclosingVariableOrCapture=*/false,
3187 Id.getLoc(), ExprType, VK_LValue);
3188}
3189
3192 ArrayRef<Expr *> VarList) {
3196 }
3197 return nullptr;
3198}
3199
3200namespace {
3201class LocalVarRefChecker final
3202 : public ConstStmtVisitor<LocalVarRefChecker, bool> {
3203 Sema &SemaRef;
3204
3205public:
3206 bool VisitDeclRefExpr(const DeclRefExpr *E) {
3207 if (const auto *VD = dyn_cast<VarDecl>(E->getDecl())) {
3208 if (VD->hasLocalStorage()) {
3209 SemaRef.Diag(E->getBeginLoc(),
3210 diag::err_omp_local_var_in_threadprivate_init)
3211 << E->getSourceRange();
3212 SemaRef.Diag(VD->getLocation(), diag::note_defined_here)
3213 << VD << VD->getSourceRange();
3214 return true;
3215 }
3216 }
3217 return false;
3218 }
3219 bool VisitStmt(const Stmt *S) {
3220 for (const Stmt *Child : S->children()) {
3221 if (Child && Visit(Child))
3222 return true;
3223 }
3224 return false;
3225 }
3226 explicit LocalVarRefChecker(Sema &SemaRef) : SemaRef(SemaRef) {}
3227};
3228} // namespace
3229
3232 ArrayRef<Expr *> VarList) {
3233 ASTContext &Context = getASTContext();
3235 for (Expr *RefExpr : VarList) {
3236 auto *DE = cast<DeclRefExpr>(RefExpr);
3237 auto *VD = cast<VarDecl>(DE->getDecl());
3238 SourceLocation ILoc = DE->getExprLoc();
3239
3240 // Mark variable as used.
3241 VD->setReferenced();
3242 VD->markUsed(Context);
3243
3244 QualType QType = VD->getType();
3245 if (QType->isDependentType() || QType->isInstantiationDependentType()) {
3246 // It will be analyzed later.
3247 Vars.push_back(DE);
3248 continue;
3249 }
3250
3251 // OpenMP [2.9.2, Restrictions, C/C++, p.10]
3252 // A threadprivate variable must not have an incomplete type.
3254 ILoc, VD->getType(), diag::err_omp_threadprivate_incomplete_type)) {
3255 continue;
3256 }
3257
3258 // OpenMP [2.9.2, Restrictions, C/C++, p.10]
3259 // A threadprivate variable must not have a reference type.
3260 if (VD->getType()->isReferenceType()) {
3261 Diag(ILoc, diag::err_omp_ref_type_arg)
3262 << getOpenMPDirectiveName(OMPD_threadprivate) << VD->getType();
3263 bool IsDecl =
3265 Diag(VD->getLocation(),
3266 IsDecl ? diag::note_previous_decl : diag::note_defined_here)
3267 << VD;
3268 continue;
3269 }
3270
3271 // Check if this is a TLS variable. If TLS is not being supported, produce
3272 // the corresponding diagnostic.
3273 if ((VD->getTLSKind() != VarDecl::TLS_None &&
3274 !(VD->hasAttr<OMPThreadPrivateDeclAttr>() &&
3275 getLangOpts().OpenMPUseTLS &&
3276 getASTContext().getTargetInfo().isTLSSupported())) ||
3277 (VD->getStorageClass() == SC_Register && VD->hasAttr<AsmLabelAttr>() &&
3278 !VD->isLocalVarDecl())) {
3279 Diag(ILoc, diag::err_omp_var_thread_local)
3280 << VD << ((VD->getTLSKind() != VarDecl::TLS_None) ? 0 : 1);
3281 bool IsDecl =
3283 Diag(VD->getLocation(),
3284 IsDecl ? diag::note_previous_decl : diag::note_defined_here)
3285 << VD;
3286 continue;
3287 }
3288
3289 // Check if initial value of threadprivate variable reference variable with
3290 // local storage (it is not supported by runtime).
3291 if (const Expr *Init = VD->getAnyInitializer()) {
3292 LocalVarRefChecker Checker(SemaRef);
3293 if (Checker.Visit(Init))
3294 continue;
3295 }
3296
3297 Vars.push_back(RefExpr);
3298 DSAStack->addDSA(VD, DE, OMPC_threadprivate);
3299 VD->addAttr(OMPThreadPrivateDeclAttr::CreateImplicit(
3300 Context, SourceRange(Loc, Loc)));
3301 if (ASTMutationListener *ML = Context.getASTMutationListener())
3302 ML->DeclarationMarkedOpenMPThreadPrivate(VD);
3303 }
3304 OMPThreadPrivateDecl *D = nullptr;
3305 if (!Vars.empty()) {
3307 Loc, Vars);
3309 }
3310 return D;
3311}
3312
3313static OMPAllocateDeclAttr::AllocatorTypeTy
3314getAllocatorKind(Sema &S, DSAStackTy *Stack, Expr *Allocator) {
3315 if (!Allocator)
3316 return OMPAllocateDeclAttr::OMPNullMemAlloc;
3317 if (Allocator->isTypeDependent() || Allocator->isValueDependent() ||
3318 Allocator->isInstantiationDependent() ||
3319 Allocator->containsUnexpandedParameterPack())
3320 return OMPAllocateDeclAttr::OMPUserDefinedMemAlloc;
3321 auto AllocatorKindRes = OMPAllocateDeclAttr::OMPUserDefinedMemAlloc;
3322 llvm::FoldingSetNodeID AEId;
3323 const Expr *AE = Allocator->IgnoreParenImpCasts();
3324 AE->IgnoreImpCasts()->Profile(AEId, S.getASTContext(), /*Canonical=*/true);
3325 for (int I = 0; I < OMPAllocateDeclAttr::OMPUserDefinedMemAlloc; ++I) {
3326 auto AllocatorKind = static_cast<OMPAllocateDeclAttr::AllocatorTypeTy>(I);
3327 const Expr *DefAllocator = Stack->getAllocator(AllocatorKind);
3328 llvm::FoldingSetNodeID DAEId;
3329 DefAllocator->IgnoreImpCasts()->Profile(DAEId, S.getASTContext(),
3330 /*Canonical=*/true);
3331 if (AEId == DAEId) {
3332 AllocatorKindRes = AllocatorKind;
3333 break;
3334 }
3335 }
3336 return AllocatorKindRes;
3337}
3338
3340 Sema &S, DSAStackTy *Stack, Expr *RefExpr, VarDecl *VD,
3341 OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind, Expr *Allocator) {
3342 if (!VD->hasAttr<OMPAllocateDeclAttr>())
3343 return false;
3344 const auto *A = VD->getAttr<OMPAllocateDeclAttr>();
3345 Expr *PrevAllocator = A->getAllocator();
3346 OMPAllocateDeclAttr::AllocatorTypeTy PrevAllocatorKind =
3347 getAllocatorKind(S, Stack, PrevAllocator);
3348 bool AllocatorsMatch = AllocatorKind == PrevAllocatorKind;
3349 if (AllocatorsMatch &&
3350 AllocatorKind == OMPAllocateDeclAttr::OMPUserDefinedMemAlloc &&
3351 Allocator && PrevAllocator) {
3352 const Expr *AE = Allocator->IgnoreParenImpCasts();
3353 const Expr *PAE = PrevAllocator->IgnoreParenImpCasts();
3354 llvm::FoldingSetNodeID AEId, PAEId;
3355 AE->Profile(AEId, S.Context, /*Canonical=*/true);
3356 PAE->Profile(PAEId, S.Context, /*Canonical=*/true);
3357 AllocatorsMatch = AEId == PAEId;
3358 }
3359 if (!AllocatorsMatch) {
3360 SmallString<256> AllocatorBuffer;
3361 llvm::raw_svector_ostream AllocatorStream(AllocatorBuffer);
3362 if (Allocator)
3363 Allocator->printPretty(AllocatorStream, nullptr, S.getPrintingPolicy());
3364 SmallString<256> PrevAllocatorBuffer;
3365 llvm::raw_svector_ostream PrevAllocatorStream(PrevAllocatorBuffer);
3366 if (PrevAllocator)
3367 PrevAllocator->printPretty(PrevAllocatorStream, nullptr,
3368 S.getPrintingPolicy());
3369
3370 SourceLocation AllocatorLoc =
3371 Allocator ? Allocator->getExprLoc() : RefExpr->getExprLoc();
3372 SourceRange AllocatorRange =
3373 Allocator ? Allocator->getSourceRange() : RefExpr->getSourceRange();
3374 SourceLocation PrevAllocatorLoc =
3375 PrevAllocator ? PrevAllocator->getExprLoc() : A->getLocation();
3376 SourceRange PrevAllocatorRange =
3377 PrevAllocator ? PrevAllocator->getSourceRange() : A->getRange();
3378 S.Diag(AllocatorLoc, diag::warn_omp_used_different_allocator)
3379 << (Allocator ? 1 : 0) << AllocatorStream.str()
3380 << (PrevAllocator ? 1 : 0) << PrevAllocatorStream.str()
3381 << AllocatorRange;
3382 S.Diag(PrevAllocatorLoc, diag::note_omp_previous_allocator)
3383 << PrevAllocatorRange;
3384 return true;
3385 }
3386 return false;
3387}
3388
3389static void
3391 OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind,
3392 Expr *Allocator, Expr *Alignment, SourceRange SR) {
3393 if (VD->hasAttr<OMPAllocateDeclAttr>())
3394 return;
3395 if (Alignment &&
3396 (Alignment->isTypeDependent() || Alignment->isValueDependent() ||
3397 Alignment->isInstantiationDependent() ||
3398 Alignment->containsUnexpandedParameterPack()))
3399 // Apply later when we have a usable value.
3400 return;
3401 if (Allocator &&
3402 (Allocator->isTypeDependent() || Allocator->isValueDependent() ||
3403 Allocator->isInstantiationDependent() ||
3404 Allocator->containsUnexpandedParameterPack()))
3405 return;
3406 auto *A = OMPAllocateDeclAttr::CreateImplicit(S.Context, AllocatorKind,
3407 Allocator, Alignment, SR);
3408 VD->addAttr(A);
3410 ML->DeclarationMarkedOpenMPAllocate(VD, A);
3411}
3412
3415 DeclContext *Owner) {
3416 assert(Clauses.size() <= 2 && "Expected at most two clauses.");
3417 Expr *Alignment = nullptr;
3418 Expr *Allocator = nullptr;
3419 if (Clauses.empty()) {
3420 // OpenMP 5.0, 2.11.3 allocate Directive, Restrictions.
3421 // allocate directives that appear in a target region must specify an
3422 // allocator clause unless a requires directive with the dynamic_allocators
3423 // clause is present in the same compilation unit.
3424 if (getLangOpts().OpenMPIsTargetDevice &&
3425 !DSAStack->hasRequiresDeclWithClause<OMPDynamicAllocatorsClause>())
3426 SemaRef.targetDiag(Loc, diag::err_expected_allocator_clause);
3427 } else {
3428 for (const OMPClause *C : Clauses)
3429 if (const auto *AC = dyn_cast<OMPAllocatorClause>(C))
3430 Allocator = AC->getAllocator();
3431 else if (const auto *AC = dyn_cast<OMPAlignClause>(C))
3432 Alignment = AC->getAlignment();
3433 else
3434 llvm_unreachable("Unexpected clause on allocate directive");
3435 }
3436 OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind =
3437 getAllocatorKind(SemaRef, DSAStack, Allocator);
3439 for (Expr *RefExpr : VarList) {
3440 auto *DE = cast<DeclRefExpr>(RefExpr);
3441 auto *VD = cast<VarDecl>(DE->getDecl());
3442
3443 // Check if this is a TLS variable or global register.
3444 if (VD->getTLSKind() != VarDecl::TLS_None ||
3445 VD->hasAttr<OMPThreadPrivateDeclAttr>() ||
3446 (VD->getStorageClass() == SC_Register && VD->hasAttr<AsmLabelAttr>() &&
3447 !VD->isLocalVarDecl()))
3448 continue;
3449
3450 // If the used several times in the allocate directive, the same allocator
3451 // must be used.
3453 AllocatorKind, Allocator))
3454 continue;
3455
3456 // OpenMP, 2.11.3 allocate Directive, Restrictions, C / C++
3457 // If a list item has a static storage type, the allocator expression in the
3458 // allocator clause must be a constant expression that evaluates to one of
3459 // the predefined memory allocator values.
3460 if (Allocator && VD->hasGlobalStorage()) {
3461 if (AllocatorKind == OMPAllocateDeclAttr::OMPUserDefinedMemAlloc) {
3462 Diag(Allocator->getExprLoc(),
3463 diag::err_omp_expected_predefined_allocator)
3464 << Allocator->getSourceRange();
3465 bool IsDecl = VD->isThisDeclarationADefinition(getASTContext()) ==
3467 Diag(VD->getLocation(),
3468 IsDecl ? diag::note_previous_decl : diag::note_defined_here)
3469 << VD;
3470 continue;
3471 }
3472 }
3473
3474 Vars.push_back(RefExpr);
3475 applyOMPAllocateAttribute(SemaRef, VD, AllocatorKind, Allocator, Alignment,
3476 DE->getSourceRange());
3477 }
3478 if (Vars.empty())
3479 return nullptr;
3480 if (!Owner)
3481 Owner = SemaRef.getCurLexicalContext();
3482 auto *D = OMPAllocateDecl::Create(getASTContext(), Owner, Loc, Vars, Clauses);
3484 Owner->addDecl(D);
3486}
3487
3490 ArrayRef<OMPClause *> ClauseList) {
3491 OMPRequiresDecl *D = nullptr;
3493 Diag(Loc, diag::err_omp_invalid_scope) << "requires";
3494 } else {
3495 D = CheckOMPRequiresDecl(Loc, ClauseList);
3496 if (D) {
3498 DSAStack->addRequiresDecl(D);
3499 }
3500 }
3502}
3503
3505 OpenMPDirectiveKind DKind,
3506 ArrayRef<std::string> Assumptions,
3507 bool SkippedClauses) {
3508 if (!SkippedClauses && Assumptions.empty())
3509 Diag(Loc, diag::err_omp_no_clause_for_directive)
3510 << llvm::omp::getAllAssumeClauseOptions()
3511 << llvm::omp::getOpenMPDirectiveName(DKind);
3512
3513 auto *AA =
3514 OMPAssumeAttr::Create(getASTContext(), llvm::join(Assumptions, ","), Loc);
3515 if (DKind == llvm::omp::Directive::OMPD_begin_assumes) {
3516 OMPAssumeScoped.push_back(AA);
3517 return;
3518 }
3519
3520 // Global assumes without assumption clauses are ignored.
3521 if (Assumptions.empty())
3522 return;
3523
3524 assert(DKind == llvm::omp::Directive::OMPD_assumes &&
3525 "Unexpected omp assumption directive!");
3526 OMPAssumeGlobal.push_back(AA);
3527
3528 // The OMPAssumeGlobal scope above will take care of new declarations but
3529 // we also want to apply the assumption to existing ones, e.g., to
3530 // declarations in included headers. To this end, we traverse all existing
3531 // declaration contexts and annotate function declarations here.
3532 SmallVector<DeclContext *, 8> DeclContexts;
3533 auto *Ctx = SemaRef.CurContext;
3534 while (Ctx->getLexicalParent())
3535 Ctx = Ctx->getLexicalParent();
3536 DeclContexts.push_back(Ctx);
3537 while (!DeclContexts.empty()) {
3538 DeclContext *DC = DeclContexts.pop_back_val();
3539 for (auto *SubDC : DC->decls()) {
3540 if (SubDC->isInvalidDecl())
3541 continue;
3542 if (auto *CTD = dyn_cast<ClassTemplateDecl>(SubDC)) {
3543 DeclContexts.push_back(CTD->getTemplatedDecl());
3544 llvm::append_range(DeclContexts, CTD->specializations());
3545 continue;
3546 }
3547 if (auto *DC = dyn_cast<DeclContext>(SubDC))
3548 DeclContexts.push_back(DC);
3549 if (auto *F = dyn_cast<FunctionDecl>(SubDC)) {
3550 F->addAttr(AA);
3551 continue;
3552 }
3553 }
3554 }
3555}
3556
3558 assert(isInOpenMPAssumeScope() && "Not in OpenMP assumes scope!");
3559 OMPAssumeScoped.pop_back();
3560}
3561
3563 Stmt *AStmt,
3564 SourceLocation StartLoc,
3565 SourceLocation EndLoc) {
3566 if (!AStmt)
3567 return StmtError();
3568
3569 return OMPAssumeDirective::Create(getASTContext(), StartLoc, EndLoc, Clauses,
3570 AStmt);
3571}
3572
3575 ArrayRef<OMPClause *> ClauseList) {
3576 /// For target specific clauses, the requires directive cannot be
3577 /// specified after the handling of any of the target regions in the
3578 /// current compilation unit.
3579 ArrayRef<SourceLocation> TargetLocations =
3580 DSAStack->getEncounteredTargetLocs();
3581 SourceLocation AtomicLoc = DSAStack->getAtomicDirectiveLoc();
3582 if (!TargetLocations.empty() || !AtomicLoc.isInvalid()) {
3583 for (const OMPClause *CNew : ClauseList) {
3584 // Check if any of the requires clauses affect target regions.
3585 if (isa<OMPUnifiedSharedMemoryClause>(CNew) ||
3586 isa<OMPUnifiedAddressClause>(CNew) ||
3587 isa<OMPReverseOffloadClause>(CNew) ||
3588 isa<OMPDynamicAllocatorsClause>(CNew)) {
3589 Diag(Loc, diag::err_omp_directive_before_requires)
3590 << "target" << getOpenMPClauseName(CNew->getClauseKind());
3591 for (SourceLocation TargetLoc : TargetLocations) {
3592 Diag(TargetLoc, diag::note_omp_requires_encountered_directive)
3593 << "target";
3594 }
3595 } else if (!AtomicLoc.isInvalid() &&
3596 isa<OMPAtomicDefaultMemOrderClause>(CNew)) {
3597 Diag(Loc, diag::err_omp_directive_before_requires)
3598 << "atomic" << getOpenMPClauseName(CNew->getClauseKind());
3599 Diag(AtomicLoc, diag::note_omp_requires_encountered_directive)
3600 << "atomic";
3601 }
3602 }
3603 }
3604
3605 if (!DSAStack->hasDuplicateRequiresClause(ClauseList))
3607 getASTContext(), SemaRef.getCurLexicalContext(), Loc, ClauseList);
3608 return nullptr;
3609}
3610
3611static void reportOriginalDsa(Sema &SemaRef, const DSAStackTy *Stack,
3612 const ValueDecl *D,
3613 const DSAStackTy::DSAVarData &DVar,
3614 bool IsLoopIterVar) {
3615 if (DVar.RefExpr) {
3616 SemaRef.Diag(DVar.RefExpr->getExprLoc(), diag::note_omp_explicit_dsa)
3617 << getOpenMPClauseName(DVar.CKind);
3618 return;
3619 }
3620 enum {
3621 PDSA_StaticMemberShared,
3622 PDSA_StaticLocalVarShared,
3623 PDSA_LoopIterVarPrivate,
3624 PDSA_LoopIterVarLinear,
3625 PDSA_LoopIterVarLastprivate,
3626 PDSA_ConstVarShared,
3627 PDSA_GlobalVarShared,
3628 PDSA_TaskVarFirstprivate,
3629 PDSA_LocalVarPrivate,
3630 PDSA_Implicit
3631 } Reason = PDSA_Implicit;
3632 bool ReportHint = false;
3633 auto ReportLoc = D->getLocation();
3634 auto *VD = dyn_cast<VarDecl>(D);
3635 if (IsLoopIterVar) {
3636 if (DVar.CKind == OMPC_private)
3637 Reason = PDSA_LoopIterVarPrivate;
3638 else if (DVar.CKind == OMPC_lastprivate)
3639 Reason = PDSA_LoopIterVarLastprivate;
3640 else
3641 Reason = PDSA_LoopIterVarLinear;
3642 } else if (isOpenMPTaskingDirective(DVar.DKind) &&
3643 DVar.CKind == OMPC_firstprivate) {
3644 Reason = PDSA_TaskVarFirstprivate;
3645 ReportLoc = DVar.ImplicitDSALoc;
3646 } else if (VD && VD->isStaticLocal())
3647 Reason = PDSA_StaticLocalVarShared;
3648 else if (VD && VD->isStaticDataMember())
3649 Reason = PDSA_StaticMemberShared;
3650 else if (VD && VD->isFileVarDecl())
3651 Reason = PDSA_GlobalVarShared;
3652 else if (D->getType().isConstant(SemaRef.getASTContext()))
3653 Reason = PDSA_ConstVarShared;
3654 else if (VD && VD->isLocalVarDecl() && DVar.CKind == OMPC_private) {
3655 ReportHint = true;
3656 Reason = PDSA_LocalVarPrivate;
3657 }
3658 if (Reason != PDSA_Implicit) {
3659 SemaRef.Diag(ReportLoc, diag::note_omp_predetermined_dsa)
3660 << Reason << ReportHint
3661 << getOpenMPDirectiveName(Stack->getCurrentDirective());
3662 } else if (DVar.ImplicitDSALoc.isValid()) {
3663 SemaRef.Diag(DVar.ImplicitDSALoc, diag::note_omp_implicit_dsa)
3664 << getOpenMPClauseName(DVar.CKind);
3665 }
3666}
3667
3670 bool IsAggregateOrDeclareTarget) {
3672 switch (M) {
3673 case OMPC_DEFAULTMAP_MODIFIER_alloc:
3674 Kind = OMPC_MAP_alloc;
3675 break;
3676 case OMPC_DEFAULTMAP_MODIFIER_to:
3677 Kind = OMPC_MAP_to;
3678 break;
3679 case OMPC_DEFAULTMAP_MODIFIER_from:
3680 Kind = OMPC_MAP_from;
3681 break;
3682 case OMPC_DEFAULTMAP_MODIFIER_tofrom:
3683 Kind = OMPC_MAP_tofrom;
3684 break;
3685 case OMPC_DEFAULTMAP_MODIFIER_present:
3686 // OpenMP 5.1 [2.21.7.3] defaultmap clause, Description]
3687 // If implicit-behavior is present, each variable referenced in the
3688 // construct in the category specified by variable-category is treated as if
3689 // it had been listed in a map clause with the map-type of alloc and
3690 // map-type-modifier of present.
3691 Kind = OMPC_MAP_alloc;
3692 break;
3693 case OMPC_DEFAULTMAP_MODIFIER_firstprivate:
3695 llvm_unreachable("Unexpected defaultmap implicit behavior");
3696 case OMPC_DEFAULTMAP_MODIFIER_none:
3697 case OMPC_DEFAULTMAP_MODIFIER_default:
3699 // IsAggregateOrDeclareTarget could be true if:
3700 // 1. the implicit behavior for aggregate is tofrom
3701 // 2. it's a declare target link
3702 if (IsAggregateOrDeclareTarget) {
3703 Kind = OMPC_MAP_tofrom;
3704 break;
3705 }
3706 llvm_unreachable("Unexpected defaultmap implicit behavior");
3707 }
3708 assert(Kind != OMPC_MAP_unknown && "Expect map kind to be known");
3709 return Kind;
3710}
3711
3712namespace {
3713struct VariableImplicitInfo {
3714 static const unsigned MapKindNum = OMPC_MAP_unknown;
3715 static const unsigned DefaultmapKindNum = OMPC_DEFAULTMAP_unknown + 1;
3716
3717 llvm::SetVector<Expr *> Privates;
3718 llvm::SetVector<Expr *> Firstprivates;
3719 llvm::SetVector<Expr *> Mappings[DefaultmapKindNum][MapKindNum];
3721 MapModifiers[DefaultmapKindNum];
3722};
3723
3724class DSAAttrChecker final : public StmtVisitor<DSAAttrChecker, void> {
3725 DSAStackTy *Stack;
3726 Sema &SemaRef;
3727 OpenMPDirectiveKind DKind = OMPD_unknown;
3728 bool ErrorFound = false;
3729 bool TryCaptureCXXThisMembers = false;
3730 CapturedStmt *CS = nullptr;
3731
3732 VariableImplicitInfo ImpInfo;
3733 SemaOpenMP::VarsWithInheritedDSAType VarsWithInheritedDSA;
3734 llvm::SmallDenseSet<const ValueDecl *, 4> ImplicitDeclarations;
3735
3736 void VisitSubCaptures(OMPExecutableDirective *S) {
3737 // Check implicitly captured variables.
3738 if (!S->hasAssociatedStmt() || !S->getAssociatedStmt())
3739 return;
3740 if (S->getDirectiveKind() == OMPD_atomic ||
3741 S->getDirectiveKind() == OMPD_critical ||
3742 S->getDirectiveKind() == OMPD_section ||
3743 S->getDirectiveKind() == OMPD_master ||
3744 S->getDirectiveKind() == OMPD_masked ||
3745 S->getDirectiveKind() == OMPD_scope ||
3746 S->getDirectiveKind() == OMPD_assume ||
3747 isOpenMPLoopTransformationDirective(S->getDirectiveKind())) {
3748 Visit(S->getAssociatedStmt());
3749 return;
3750 }
3751 visitSubCaptures(S->getInnermostCapturedStmt());
3752 // Try to capture inner this->member references to generate correct mappings
3753 // and diagnostics.
3754 if (TryCaptureCXXThisMembers ||
3756 llvm::any_of(S->getInnermostCapturedStmt()->captures(),
3757 [](const CapturedStmt::Capture &C) {
3758 return C.capturesThis();
3759 }))) {
3760 bool SavedTryCaptureCXXThisMembers = TryCaptureCXXThisMembers;
3761 TryCaptureCXXThisMembers = true;
3762 Visit(S->getInnermostCapturedStmt()->getCapturedStmt());
3763 TryCaptureCXXThisMembers = SavedTryCaptureCXXThisMembers;
3764 }
3765 // In tasks firstprivates are not captured anymore, need to analyze them
3766 // explicitly.
3767 if (isOpenMPTaskingDirective(S->getDirectiveKind()) &&
3768 !isOpenMPTaskLoopDirective(S->getDirectiveKind())) {
3769 for (OMPClause *C : S->clauses())
3770 if (auto *FC = dyn_cast<OMPFirstprivateClause>(C)) {
3771 for (Expr *Ref : FC->varlist())
3772 Visit(Ref);
3773 }
3774 }
3775 }
3776
3777public:
3778 void VisitDeclRefExpr(DeclRefExpr *E) {
3779 if (TryCaptureCXXThisMembers || E->isTypeDependent() ||
3782 E->isNonOdrUse() == clang::NOUR_Unevaluated)
3783 return;
3784 if (auto *VD = dyn_cast<VarDecl>(E->getDecl())) {
3785 // Check the datasharing rules for the expressions in the clauses.
3786 if (!CS || (isa<OMPCapturedExprDecl>(VD) && !CS->capturesVariable(VD) &&
3787 !Stack->getTopDSA(VD, /*FromParent=*/false).RefExpr &&
3788 !Stack->isImplicitDefaultFirstprivateFD(VD))) {
3789 if (auto *CED = dyn_cast<OMPCapturedExprDecl>(VD))
3790 if (!CED->hasAttr<OMPCaptureNoInitAttr>()) {
3791 Visit(CED->getInit());
3792 return;
3793 }
3794 } else if (VD->isImplicit() || isa<OMPCapturedExprDecl>(VD))
3795 // Do not analyze internal variables and do not enclose them into
3796 // implicit clauses.
3797 if (!Stack->isImplicitDefaultFirstprivateFD(VD))
3798 return;
3799 VD = VD->getCanonicalDecl();
3800 // Skip internally declared variables.
3801 if (VD->hasLocalStorage() && CS && !CS->capturesVariable(VD) &&
3802 !Stack->isImplicitDefaultFirstprivateFD(VD) &&
3803 !Stack->isImplicitTaskFirstprivate(VD))
3804 return;
3805 // Skip allocators in uses_allocators clauses.
3806 if (Stack->isUsesAllocatorsDecl(VD))
3807 return;
3808
3809 DSAStackTy::DSAVarData DVar = Stack->getTopDSA(VD, /*FromParent=*/false);
3810 // Check if the variable has explicit DSA set and stop analysis if it so.
3811 if (DVar.RefExpr || !ImplicitDeclarations.insert(VD).second)
3812 return;
3813
3814 // Skip internally declared static variables.
3815 std::optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
3816 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
3817 if (VD->hasGlobalStorage() && CS && !CS->capturesVariable(VD) &&
3818 (Stack->hasRequiresDeclWithClause<OMPUnifiedSharedMemoryClause>() ||
3819 !Res || *Res != OMPDeclareTargetDeclAttr::MT_Link) &&
3820 !Stack->isImplicitDefaultFirstprivateFD(VD) &&
3821 !Stack->isImplicitTaskFirstprivate(VD))
3822 return;
3823
3824 SourceLocation ELoc = E->getExprLoc();
3825 // The default(none) clause requires that each variable that is referenced
3826 // in the construct, and does not have a predetermined data-sharing
3827 // attribute, must have its data-sharing attribute explicitly determined
3828 // by being listed in a data-sharing attribute clause.
3829 if (DVar.CKind == OMPC_unknown &&
3830 (Stack->getDefaultDSA() == DSA_none ||
3831 Stack->getDefaultDSA() == DSA_private ||
3832 Stack->getDefaultDSA() == DSA_firstprivate) &&
3833 isImplicitOrExplicitTaskingRegion(DKind) &&
3834 VarsWithInheritedDSA.count(VD) == 0) {
3835 bool InheritedDSA = Stack->getDefaultDSA() == DSA_none;
3836 if (!InheritedDSA && (Stack->getDefaultDSA() == DSA_firstprivate ||
3837 Stack->getDefaultDSA() == DSA_private)) {
3838 DSAStackTy::DSAVarData DVar =
3839 Stack->getImplicitDSA(VD, /*FromParent=*/false);
3840 InheritedDSA = DVar.CKind == OMPC_unknown;
3841 }
3842 if (InheritedDSA)
3843 VarsWithInheritedDSA[VD] = E;
3844 if (Stack->getDefaultDSA() == DSA_none)
3845 return;
3846 }
3847
3848 // OpenMP 5.0 [2.19.7.2, defaultmap clause, Description]
3849 // If implicit-behavior is none, each variable referenced in the
3850 // construct that does not have a predetermined data-sharing attribute
3851 // and does not appear in a to or link clause on a declare target
3852 // directive must be listed in a data-mapping attribute clause, a
3853 // data-sharing attribute clause (including a data-sharing attribute
3854 // clause on a combined construct where target. is one of the
3855 // constituent constructs), or an is_device_ptr clause.
3856 OpenMPDefaultmapClauseKind ClauseKind =
3858 if (SemaRef.getLangOpts().OpenMP >= 50) {
3859 bool IsModifierNone = Stack->getDefaultmapModifier(ClauseKind) ==
3860 OMPC_DEFAULTMAP_MODIFIER_none;
3861 if (DVar.CKind == OMPC_unknown && IsModifierNone &&
3862 VarsWithInheritedDSA.count(VD) == 0 && !Res) {
3863 // Only check for data-mapping attribute and is_device_ptr here
3864 // since we have already make sure that the declaration does not
3865 // have a data-sharing attribute above
3866 if (!Stack->checkMappableExprComponentListsForDecl(
3867 VD, /*CurrentRegionOnly=*/true,
3869 MapExprComponents,
3871 auto MI = MapExprComponents.rbegin();
3872 auto ME = MapExprComponents.rend();
3873 return MI != ME && MI->getAssociatedDeclaration() == VD;
3874 })) {
3875 VarsWithInheritedDSA[VD] = E;
3876 return;
3877 }
3878 }
3879 }
3880 if (SemaRef.getLangOpts().OpenMP > 50) {
3881 bool IsModifierPresent = Stack->getDefaultmapModifier(ClauseKind) ==
3882 OMPC_DEFAULTMAP_MODIFIER_present;
3883 if (IsModifierPresent) {
3884 if (!llvm::is_contained(ImpInfo.MapModifiers[ClauseKind],
3885 OMPC_MAP_MODIFIER_present)) {
3886 ImpInfo.MapModifiers[ClauseKind].push_back(
3887 OMPC_MAP_MODIFIER_present);
3888 }
3889 }
3890 }
3891
3893 !Stack->isLoopControlVariable(VD).first) {
3894 if (!Stack->checkMappableExprComponentListsForDecl(
3895 VD, /*CurrentRegionOnly=*/true,
3897 StackComponents,
3899 if (SemaRef.LangOpts.OpenMP >= 50)
3900 return !StackComponents.empty();
3901 // Variable is used if it has been marked as an array, array
3902 // section, array shaping or the variable itself.
3903 return StackComponents.size() == 1 ||
3904 llvm::all_of(
3905 llvm::drop_begin(llvm::reverse(StackComponents)),
3906 [](const OMPClauseMappableExprCommon::
3907 MappableComponent &MC) {
3908 return MC.getAssociatedDeclaration() ==
3909 nullptr &&
3910 (isa<ArraySectionExpr>(
3911 MC.getAssociatedExpression()) ||
3912 isa<OMPArrayShapingExpr>(
3913 MC.getAssociatedExpression()) ||
3914 isa<ArraySubscriptExpr>(
3915 MC.getAssociatedExpression()));
3916 });
3917 })) {
3918 bool IsFirstprivate = false;
3919 // By default lambdas are captured as firstprivates.
3920 if (const auto *RD =
3922 IsFirstprivate = RD->isLambda();
3923 IsFirstprivate =
3924 IsFirstprivate || (Stack->mustBeFirstprivate(ClauseKind) && !Res);
3925 if (IsFirstprivate) {
3926 ImpInfo.Firstprivates.insert(E);
3927 } else {
3929 Stack->getDefaultmapModifier(ClauseKind);
3931 M, ClauseKind == OMPC_DEFAULTMAP_aggregate || Res);
3932 ImpInfo.Mappings[ClauseKind][Kind].insert(E);
3933 }
3934 return;
3935 }
3936 }
3937
3938 // OpenMP [2.9.3.6, Restrictions, p.2]
3939 // A list item that appears in a reduction clause of the innermost
3940 // enclosing worksharing or parallel construct may not be accessed in an
3941 // explicit task.
3942 DVar = Stack->hasInnermostDSA(
3943 VD,
3944 [](OpenMPClauseKind C, bool AppliedToPointee) {
3945 return C == OMPC_reduction && !AppliedToPointee;
3946 },
3947 [](OpenMPDirectiveKind K) {
3948 return isOpenMPParallelDirective(K) ||
3950 },
3951 /*FromParent=*/true);
3952 if (isOpenMPTaskingDirective(DKind) && DVar.CKind == OMPC_reduction) {
3953 ErrorFound = true;
3954 SemaRef.Diag(ELoc, diag::err_omp_reduction_in_task);
3955 reportOriginalDsa(SemaRef, Stack, VD, DVar);
3956 return;
3957 }
3958
3959 // Define implicit data-sharing attributes for task.
3960 DVar = Stack->getImplicitDSA(VD, /*FromParent=*/false);
3961 if (((isOpenMPTaskingDirective(DKind) && DVar.CKind != OMPC_shared) ||
3962 (((Stack->getDefaultDSA() == DSA_firstprivate &&
3963 DVar.CKind == OMPC_firstprivate) ||
3964 (Stack->getDefaultDSA() == DSA_private &&
3965 DVar.CKind == OMPC_private)) &&
3966 !DVar.RefExpr)) &&
3967 !Stack->isLoopControlVariable(VD).first) {
3968 if (Stack->getDefaultDSA() == DSA_private)
3969 ImpInfo.Privates.insert(E);
3970 else
3971 ImpInfo.Firstprivates.insert(E);
3972 return;
3973 }
3974
3975 // Store implicitly used globals with declare target link for parent
3976 // target.
3977 if (!isOpenMPTargetExecutionDirective(DKind) && Res &&
3978 *Res == OMPDeclareTargetDeclAttr::MT_Link) {
3979 Stack->addToParentTargetRegionLinkGlobals(E);
3980 return;
3981 }
3982 }
3983 }
3984 void VisitMemberExpr(MemberExpr *E) {
3985 if (E->isTypeDependent() || E->isValueDependent() ||
3987 return;
3988 auto *FD = dyn_cast<FieldDecl>(E->getMemberDecl());
3989 if (auto *TE = dyn_cast<CXXThisExpr>(E->getBase()->IgnoreParenCasts())) {
3990 if (!FD)
3991 return;
3992 DSAStackTy::DSAVarData DVar = Stack->getTopDSA(FD, /*FromParent=*/false);
3993 // Check if the variable has explicit DSA set and stop analysis if it
3994 // so.
3995 if (DVar.RefExpr || !ImplicitDeclarations.insert(FD).second)
3996 return;
3997
3999 !Stack->isLoopControlVariable(FD).first &&
4000 !Stack->checkMappableExprComponentListsForDecl(
4001 FD, /*CurrentRegionOnly=*/true,
4003 StackComponents,
4005 return isa<CXXThisExpr>(
4006 cast<MemberExpr>(
4007 StackComponents.back().getAssociatedExpression())
4008 ->getBase()
4009 ->IgnoreParens());
4010 })) {
4011 // OpenMP 4.5 [2.15.5.1, map Clause, Restrictions, C/C++, p.3]
4012 // A bit-field cannot appear in a map clause.
4013 //
4014 if (FD->isBitField())
4015 return;
4016
4017 // Check to see if the member expression is referencing a class that
4018 // has already been explicitly mapped
4019 if (Stack->isClassPreviouslyMapped(TE->getType()))
4020 return;
4021
4023 Stack->getDefaultmapModifier(OMPC_DEFAULTMAP_aggregate);
4024 OpenMPDefaultmapClauseKind ClauseKind =
4027 Modifier, /*IsAggregateOrDeclareTarget=*/true);
4028 ImpInfo.Mappings[ClauseKind][Kind].insert(E);
4029 return;
4030 }
4031
4032 SourceLocation ELoc = E->getExprLoc();
4033 // OpenMP [2.9.3.6, Restrictions, p.2]
4034 // A list item that appears in a reduction clause of the innermost
4035 // enclosing worksharing or parallel construct may not be accessed in
4036 // an explicit task.
4037 DVar = Stack->hasInnermostDSA(
4038 FD,
4039 [](OpenMPClauseKind C, bool AppliedToPointee) {
4040 return C == OMPC_reduction && !AppliedToPointee;
4041 },
4042 [](OpenMPDirectiveKind K) {
4043 return isOpenMPParallelDirective(K) ||
4045 },
4046 /*FromParent=*/true);
4047 if (isOpenMPTaskingDirective(DKind) && DVar.CKind == OMPC_reduction) {
4048 ErrorFound = true;
4049 SemaRef.Diag(ELoc, diag::err_omp_reduction_in_task);
4050 reportOriginalDsa(SemaRef, Stack, FD, DVar);
4051 return;
4052 }
4053
4054 // Define implicit data-sharing attributes for task.
4055 DVar = Stack->getImplicitDSA(FD, /*FromParent=*/false);
4056 if (isOpenMPTaskingDirective(DKind) && DVar.CKind != OMPC_shared &&
4057 !Stack->isLoopControlVariable(FD).first) {
4058 // Check if there is a captured expression for the current field in the
4059 // region. Do not mark it as firstprivate unless there is no captured
4060 // expression.
4061 // TODO: try to make it firstprivate.
4062 if (DVar.CKind != OMPC_unknown)
4063 ImpInfo.Firstprivates.insert(E);
4064 }
4065 return;
4066 }
4069 if (!checkMapClauseExpressionBase(SemaRef, E, CurComponents, OMPC_map,
4070 DKind, /*NoDiagnose=*/true))
4071 return;
4072 const auto *VD = cast<ValueDecl>(
4073 CurComponents.back().getAssociatedDeclaration()->getCanonicalDecl());
4074 if (!Stack->checkMappableExprComponentListsForDecl(
4075 VD, /*CurrentRegionOnly=*/true,
4076 [&CurComponents](
4078 StackComponents,
4080 auto CCI = CurComponents.rbegin();
4081 auto CCE = CurComponents.rend();
4082 for (const auto &SC : llvm::reverse(StackComponents)) {
4083 // Do both expressions have the same kind?
4084 if (CCI->getAssociatedExpression()->getStmtClass() !=
4085 SC.getAssociatedExpression()->getStmtClass())
4086 if (!((isa<ArraySectionExpr>(
4087 SC.getAssociatedExpression()) ||
4088 isa<OMPArrayShapingExpr>(
4089 SC.getAssociatedExpression())) &&
4090 isa<ArraySubscriptExpr>(
4091 CCI->getAssociatedExpression())))
4092 return false;
4093
4094 const Decl *CCD = CCI->getAssociatedDeclaration();
4095 const Decl *SCD = SC.getAssociatedDeclaration();
4096 CCD = CCD ? CCD->getCanonicalDecl() : nullptr;
4097 SCD = SCD ? SCD->getCanonicalDecl() : nullptr;
4098 if (SCD != CCD)
4099 return false;
4100 std::advance(CCI, 1);
4101 if (CCI == CCE)
4102 break;
4103 }
4104 return true;
4105 })) {
4106 Visit(E->getBase());
4107 }
4108 } else if (!TryCaptureCXXThisMembers) {
4109 Visit(E->getBase());
4110 }
4111 }
4112 void VisitOMPExecutableDirective(OMPExecutableDirective *S) {
4113 for (OMPClause *C : S->clauses()) {
4114 // Skip analysis of arguments of private clauses for task|target
4115 // directives.
4116 if (isa_and_nonnull<OMPPrivateClause>(C))
4117 continue;
4118 // Skip analysis of arguments of implicitly defined firstprivate clause
4119 // for task|target directives.
4120 // Skip analysis of arguments of implicitly defined map clause for target
4121 // directives.
4122 if (C && !((isa<OMPFirstprivateClause>(C) || isa<OMPMapClause>(C)) &&
4123 C->isImplicit() && !isOpenMPTaskingDirective(DKind))) {
4124 for (Stmt *CC : C->children()) {
4125 if (CC)
4126 Visit(CC);
4127 }
4128 }
4129 }
4130 // Check implicitly captured variables.
4131 VisitSubCaptures(S);
4132 }
4133
4134 void VisitOMPLoopTransformationDirective(OMPLoopTransformationDirective *S) {
4135 // Loop transformation directives do not introduce data sharing
4136 VisitStmt(S);
4137 }
4138
4139 void VisitCallExpr(CallExpr *S) {
4140 for (Stmt *C : S->arguments()) {
4141 if (C) {
4142 // Check implicitly captured variables in the task-based directives to
4143 // check if they must be firstprivatized.
4144 Visit(C);
4145 }
4146 }
4147 if (Expr *Callee = S->getCallee()) {
4148 auto *CI = Callee->IgnoreParenImpCasts();
4149 if (auto *CE = dyn_cast<MemberExpr>(CI))
4150 Visit(CE->getBase());
4151 else if (auto *CE = dyn_cast<DeclRefExpr>(CI))
4152 Visit(CE);
4153 }
4154 }
4155 void VisitStmt(Stmt *S) {
4156 for (Stmt *C : S->children()) {
4157 if (C) {
4158 // Check implicitly captured variables in the task-based directives to
4159 // check if they must be firstprivatized.
4160 Visit(C);
4161 }
4162 }
4163 }
4164
4165 void visitSubCaptures(CapturedStmt *S) {
4166 for (const CapturedStmt::Capture &Cap : S->captures()) {
4167 if (!Cap.capturesVariable() && !Cap.capturesVariableByCopy())
4168 continue;
4169 VarDecl *VD = Cap.getCapturedVar();
4170 // Do not try to map the variable if it or its sub-component was mapped
4171 // already.
4173 Stack->checkMappableExprComponentListsForDecl(
4174 VD, /*CurrentRegionOnly=*/true,
4176 OpenMPClauseKind) { return true; }))
4177 continue;
4179 SemaRef, VD, VD->getType().getNonLValueExprType(SemaRef.Context),
4180 Cap.getLocation(), /*RefersToCapture=*/true);
4181 Visit(DRE);
4182 }
4183 }
4184 bool isErrorFound() const { return ErrorFound; }
4185 const VariableImplicitInfo &getImplicitInfo() const { return ImpInfo; }
4186 const SemaOpenMP::VarsWithInheritedDSAType &getVarsWithInheritedDSA() const {
4187 return VarsWithInheritedDSA;
4188 }
4189
4190 DSAAttrChecker(DSAStackTy *S, Sema &SemaRef, CapturedStmt *CS)
4191 : Stack(S), SemaRef(SemaRef), ErrorFound(false), CS(CS) {
4192 DKind = S->getCurrentDirective();
4193 // Process declare target link variables for the target directives.
4195 for (DeclRefExpr *E : Stack->getLinkGlobals())
4196 Visit(E);
4197 }
4198 }
4199};
4200} // namespace
4201
4202static void handleDeclareVariantConstructTrait(DSAStackTy *Stack,
4203 OpenMPDirectiveKind DKind,
4204 bool ScopeEntry) {
4207 Traits.emplace_back(llvm::omp::TraitProperty::construct_target_target);
4208 if (isOpenMPTeamsDirective(DKind))
4209 Traits.emplace_back(llvm::omp::TraitProperty::construct_teams_teams);
4210 if (isOpenMPParallelDirective(DKind))
4211 Traits.emplace_back(llvm::omp::TraitProperty::construct_parallel_parallel);
4213 Traits.emplace_back(llvm::omp::TraitProperty::construct_for_for);
4214 if (isOpenMPSimdDirective(DKind))
4215 Traits.emplace_back(llvm::omp::TraitProperty::construct_simd_simd);
4216 Stack->handleConstructTrait(Traits, ScopeEntry);
4217}
4218
4220getParallelRegionParams(Sema &SemaRef, bool LoopBoundSharing) {
4221 ASTContext &Context = SemaRef.getASTContext();
4222 QualType KmpInt32Ty =
4223 Context.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1).withConst();
4224 QualType KmpInt32PtrTy =
4225 Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
4227 std::make_pair(".global_tid.", KmpInt32PtrTy),
4228 std::make_pair(".bound_tid.", KmpInt32PtrTy),
4229 };
4230 if (LoopBoundSharing) {
4231 QualType KmpSizeTy = Context.getSizeType().withConst();
4232 Params.push_back(std::make_pair(".previous.lb.", KmpSizeTy));
4233 Params.push_back(std::make_pair(".previous.ub.", KmpSizeTy));
4234 }
4235
4236 // __context with shared vars
4237 Params.push_back(std::make_pair(StringRef(), QualType()));
4238 return Params;
4239}
4240
4243 return getParallelRegionParams(SemaRef, /*LoopBoundSharing=*/false);
4244}
4245
4248 ASTContext &Context = SemaRef.getASTContext();
4249 QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
4250 QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
4251 QualType KmpInt32PtrTy =
4252 Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
4253 QualType Args[] = {VoidPtrTy};
4255 EPI.Variadic = true;
4256 QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
4258 std::make_pair(".global_tid.", KmpInt32Ty),
4259 std::make_pair(".part_id.", KmpInt32PtrTy),
4260 std::make_pair(".privates.", VoidPtrTy),
4261 std::make_pair(
4262 ".copy_fn.",
4263 Context.getPointerType(CopyFnType).withConst().withRestrict()),
4264 std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
4265 std::make_pair(StringRef(), QualType()) // __context with shared vars
4266 };
4267 return Params;
4268}
4269
4272 ASTContext &Context = SemaRef.getASTContext();
4274 if (SemaRef.getLangOpts().OpenMPIsTargetDevice) {
4275 QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
4276 Params.push_back(std::make_pair(StringRef("dyn_ptr"), VoidPtrTy));
4277 }
4278 // __context with shared vars
4279 Params.push_back(std::make_pair(StringRef(), QualType()));
4280 return Params;
4281}
4282
4286 std::make_pair(StringRef(), QualType()) // __context with shared vars
4287 };
4288 return Params;
4289}
4290
4293 ASTContext &Context = SemaRef.getASTContext();
4294 QualType KmpInt32Ty =
4295 Context.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1).withConst();
4296 QualType KmpUInt64Ty =
4297 Context.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0).withConst();
4298 QualType KmpInt64Ty =
4299 Context.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1).withConst();
4300 QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
4301 QualType KmpInt32PtrTy =
4302 Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
4303 QualType Args[] = {VoidPtrTy};
4305 EPI.Variadic = true;
4306 QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
4308 std::make_pair(".global_tid.", KmpInt32Ty),
4309 std::make_pair(".part_id.", KmpInt32PtrTy),
4310 std::make_pair(".privates.", VoidPtrTy),
4311 std::make_pair(
4312 ".copy_fn.",
4313 Context.getPointerType(CopyFnType).withConst().withRestrict()),
4314 std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
4315 std::make_pair(".lb.", KmpUInt64Ty),
4316 std::make_pair(".ub.", KmpUInt64Ty),
4317 std::make_pair(".st.", KmpInt64Ty),
4318 std::make_pair(".liter.", KmpInt32Ty),
4319 std::make_pair(".reductions.", VoidPtrTy),
4320 std::make_pair(StringRef(), QualType()) // __context with shared vars
4321 };
4322 return Params;
4323}
4324
4326 Scope *CurScope, SourceLocation Loc) {
4328 getOpenMPCaptureRegions(Regions, DKind);
4329
4330 bool LoopBoundSharing = isOpenMPLoopBoundSharingDirective(DKind);
4331
4332 auto MarkAsInlined = [&](CapturedRegionScopeInfo *CSI) {
4333 CSI->TheCapturedDecl->addAttr(AlwaysInlineAttr::CreateImplicit(
4334 SemaRef.getASTContext(), {}, AlwaysInlineAttr::Keyword_forceinline));
4335 };
4336
4337 for (auto [Level, RKind] : llvm::enumerate(Regions)) {
4338 switch (RKind) {
4339 // All region kinds that can be returned from `getOpenMPCaptureRegions`
4340 // are listed here.
4341 case OMPD_parallel:
4343 Loc, CurScope, CR_OpenMP,
4344 getParallelRegionParams(SemaRef, LoopBoundSharing), Level);
4345 break;
4346 case OMPD_teams:
4347 SemaRef.ActOnCapturedRegionStart(Loc, CurScope, CR_OpenMP,
4348 getTeamsRegionParams(SemaRef), Level);
4349 break;
4350 case OMPD_task:
4351 SemaRef.ActOnCapturedRegionStart(Loc, CurScope, CR_OpenMP,
4352 getTaskRegionParams(SemaRef), Level);
4353 // Mark this captured region as inlined, because we don't use outlined
4354 // function directly.
4355 MarkAsInlined(SemaRef.getCurCapturedRegion());
4356 break;
4357 case OMPD_taskloop:
4358 SemaRef.ActOnCapturedRegionStart(Loc, CurScope, CR_OpenMP,
4359 getTaskloopRegionParams(SemaRef), Level);
4360 // Mark this captured region as inlined, because we don't use outlined
4361 // function directly.
4362 MarkAsInlined(SemaRef.getCurCapturedRegion());
4363 break;
4364 case OMPD_target:
4365 SemaRef.ActOnCapturedRegionStart(Loc, CurScope, CR_OpenMP,
4366 getTargetRegionParams(SemaRef), Level);
4367 break;
4368 case OMPD_unknown:
4369 SemaRef.ActOnCapturedRegionStart(Loc, CurScope, CR_OpenMP,
4370 getUnknownRegionParams(SemaRef));
4371 break;
4372 case OMPD_metadirective:
4373 case OMPD_nothing:
4374 default:
4375 llvm_unreachable("Unexpected capture region");
4376 }
4377 }
4378}
4379
4381 Scope *CurScope) {
4382 switch (DKind) {
4383 case OMPD_atomic:
4384 case OMPD_critical:
4385 case OMPD_masked:
4386 case OMPD_master:
4387 case OMPD_section:
4388 case OMPD_tile:
4389 case OMPD_unroll:
4390 case OMPD_reverse:
4391 case OMPD_interchange:
4392 case OMPD_assume:
4393 break;
4394 default:
4395 processCapturedRegions(SemaRef, DKind, CurScope,
4396 DSAStack->getConstructLoc());
4397 break;
4398 }
4399
4400 DSAStack->setContext(SemaRef.CurContext);
4401 handleDeclareVariantConstructTrait(DSAStack, DKind, /*ScopeEntry=*/true);
4402}
4403
4404int SemaOpenMP::getNumberOfConstructScopes(unsigned Level) const {
4405 return getOpenMPCaptureLevels(DSAStack->getDirective(Level));
4406}
4407
4410 getOpenMPCaptureRegions(CaptureRegions, DKind);
4411 return CaptureRegions.size();
4412}
4413
4415 Expr *CaptureExpr, bool WithInit,
4416 DeclContext *CurContext,
4417 bool AsExpression) {
4418 assert(CaptureExpr);
4419 ASTContext &C = S.getASTContext();
4420 Expr *Init = AsExpression ? CaptureExpr : CaptureExpr->IgnoreImpCasts();
4421 QualType Ty = Init->getType();
4422 if (CaptureExpr->getObjectKind() == OK_Ordinary && CaptureExpr->isGLValue()) {
4423 if (S.getLangOpts().CPlusPlus) {
4424 Ty = C.getLValueReferenceType(Ty);
4425 } else {
4426 Ty = C.getPointerType(Ty);
4427 ExprResult Res =
4428 S.CreateBuiltinUnaryOp(CaptureExpr->getExprLoc(), UO_AddrOf, Init);
4429 if (!Res.isUsable())
4430 return nullptr;
4431 Init = Res.get();
4432 }
4433 WithInit = true;
4434 }
4435 auto *CED = OMPCapturedExprDecl::Create(C, CurContext, Id, Ty,
4436 CaptureExpr->getBeginLoc());
4437 if (!WithInit)
4438 CED->addAttr(OMPCaptureNoInitAttr::CreateImplicit(C));
4439 CurContext->addHiddenDecl(CED);
4441 S.AddInitializerToDecl(CED, Init, /*DirectInit=*/false);
4442 return CED;
4443}
4444
4445static DeclRefExpr *buildCapture(Sema &S, ValueDecl *D, Expr *CaptureExpr,
4446 bool WithInit) {
4448 if (VarDecl *VD = S.OpenMP().isOpenMPCapturedDecl(D))
4449 CD = cast<OMPCapturedExprDecl>(VD);
4450 else
4451 CD = buildCaptureDecl(S, D->getIdentifier(), CaptureExpr, WithInit,
4452 S.CurContext,
4453 /*AsExpression=*/false);
4454 return buildDeclRefExpr(S, CD, CD->getType().getNonReferenceType(),
4455 CaptureExpr->getExprLoc());
4456}
4457
4458static ExprResult buildCapture(Sema &S, Expr *CaptureExpr, DeclRefExpr *&Ref,
4459 StringRef Name) {
4460 CaptureExpr = S.DefaultLvalueConversion(CaptureExpr).get();
4461 if (!Ref) {
4463 S, &S.getASTContext().Idents.get(Name), CaptureExpr,
4464 /*WithInit=*/true, S.CurContext, /*AsExpression=*/true);
4465 Ref = buildDeclRefExpr(S, CD, CD->getType().getNonReferenceType(),
4466 CaptureExpr->getExprLoc());
4467 }
4468 ExprResult Res = Ref;
4469 if (!S.getLangOpts().CPlusPlus &&
4470 CaptureExpr->getObjectKind() == OK_Ordinary && CaptureExpr->isGLValue() &&
4471 Ref->getType()->isPointerType()) {
4472 Res = S.CreateBuiltinUnaryOp(CaptureExpr->getExprLoc(), UO_Deref, Ref);
4473 if (!Res.isUsable())
4474 return ExprError();
4475 }
4476 return S.DefaultLvalueConversion(Res.get());
4477}
4478
4479namespace {
4480// OpenMP directives parsed in this section are represented as a
4481// CapturedStatement with an associated statement. If a syntax error
4482// is detected during the parsing of the associated statement, the
4483// compiler must abort processing and close the CapturedStatement.
4484//
4485// Combined directives such as 'target parallel' have more than one
4486// nested CapturedStatements. This RAII ensures that we unwind out
4487// of all the nested CapturedStatements when an error is found.
4488class CaptureRegionUnwinderRAII {
4489private:
4490 Sema &S;
4491 bool &ErrorFound;
4492 OpenMPDirectiveKind DKind = OMPD_unknown;
4493
4494public:
4495 CaptureRegionUnwinderRAII(Sema &S, bool &ErrorFound,
4496 OpenMPDirectiveKind DKind)
4497 : S(S), ErrorFound(ErrorFound), DKind(DKind) {}
4498 ~CaptureRegionUnwinderRAII() {
4499 if (ErrorFound) {
4500 int ThisCaptureLevel = S.OpenMP().getOpenMPCaptureLevels(DKind);
4501 while (--ThisCaptureLevel >= 0)
4503 }
4504 }
4505};
4506} // namespace
4507
4509 // Capture variables captured by reference in lambdas for target-based
4510 // directives.
4512 (isOpenMPTargetExecutionDirective(DSAStack->getCurrentDirective()) ||
4514 DSAStack->getCurrentDirective()))) {
4515 QualType Type = V->getType();
4516 if (const auto *RD = Type.getCanonicalType()
4517 .getNonReferenceType()
4518 ->getAsCXXRecordDecl()) {
4519 bool SavedForceCaptureByReferenceInTargetExecutable =
4520 DSAStack->isForceCaptureByReferenceInTargetExecutable();
4521 DSAStack->setForceCaptureByReferenceInTargetExecutable(
4522 /*V=*/true);
4523 if (RD->isLambda()) {
4524 llvm::DenseMap<const ValueDecl *, FieldDecl *> Captures;
4525 FieldDecl *ThisCapture;
4526 RD->getCaptureFields(Captures, ThisCapture);
4527 for (const LambdaCapture &LC : RD->captures()) {
4528 if (LC.getCaptureKind() == LCK_ByRef) {
4529 VarDecl *VD = cast<VarDecl>(LC.getCapturedVar());
4530 DeclContext *VDC = VD->getDeclContext();
4531 if (!VDC->Encloses(SemaRef.CurContext))
4532 continue;
4533 SemaRef.MarkVariableReferenced(LC.getLocation(), VD);
4534 } else if (LC.getCaptureKind() == LCK_This) {
4536 if (!ThisTy.isNull() && getASTContext().typesAreCompatible(
4537 ThisTy, ThisCapture->getType()))
4538 SemaRef.CheckCXXThisCapture(LC.getLocation());
4539 }
4540 }
4541 }
4542 DSAStack->setForceCaptureByReferenceInTargetExecutable(
4543 SavedForceCaptureByReferenceInTargetExecutable);
4544 }
4545 }
4546}
4547
4549 const ArrayRef<OMPClause *> Clauses) {
4550 const OMPOrderedClause *Ordered = nullptr;
4551 const OMPOrderClause *Order = nullptr;
4552
4553 for (const OMPClause *Clause : Clauses) {
4554 if (Clause->getClauseKind() == OMPC_ordered)
4555 Ordered = cast<OMPOrderedClause>(Clause);
4556 else if (Clause->getClauseKind() == OMPC_order) {
4557 Order = cast<OMPOrderClause>(Clause);
4558 if (Order->getKind() != OMPC_ORDER_concurrent)
4559 Order = nullptr;
4560 }
4561 if (Ordered && Order)
4562 break;
4563 }
4564
4565 if (Ordered && Order) {
4566 S.Diag(Order->getKindKwLoc(),
4567 diag::err_omp_simple_clause_incompatible_with_ordered)
4568 << getOpenMPClauseName(OMPC_order)
4569 << getOpenMPSimpleClauseTypeName(OMPC_order, OMPC_ORDER_concurrent)
4570 << SourceRange(Order->getBeginLoc(), Order->getEndLoc());
4571 S.Diag(Ordered->getBeginLoc(), diag::note_omp_ordered_param)
4572 << 0 << SourceRange(Ordered->getBeginLoc(), Ordered->getEndLoc());
4573 return true;
4574 }
4575 return false;
4576}
4577
4579 ArrayRef<OMPClause *> Clauses) {
4581 /*ScopeEntry=*/false);
4582 if (!isOpenMPCapturingDirective(DSAStack->getCurrentDirective()))
4583 return S;
4584
4585 bool ErrorFound = false;
4586 CaptureRegionUnwinderRAII CaptureRegionUnwinder(
4587 SemaRef, ErrorFound, DSAStack->getCurrentDirective());
4588 if (!S.isUsable()) {
4589 ErrorFound = true;
4590 return StmtError();
4591 }
4592
4594 getOpenMPCaptureRegions(CaptureRegions, DSAStack->getCurrentDirective());
4595 OMPOrderedClause *OC = nullptr;
4596 OMPScheduleClause *SC = nullptr;
4599 // This is required for proper codegen.
4600 for (OMPClause *Clause : Clauses) {
4601 if (!getLangOpts().OpenMPSimd &&
4602 (isOpenMPTaskingDirective(DSAStack->getCurrentDirective()) ||
4603 DSAStack->getCurrentDirective() == OMPD_target) &&
4604 Clause->getClauseKind() == OMPC_in_reduction) {
4605 // Capture taskgroup task_reduction descriptors inside the tasking regions
4606 // with the corresponding in_reduction items.
4607 auto *IRC = cast<OMPInReductionClause>(Clause);
4608 for (Expr *E : IRC->taskgroup_descriptors())
4609 if (E)
4611 }
4612 if (isOpenMPPrivate(Clause->getClauseKind()) ||
4613 Clause->getClauseKind() == OMPC_copyprivate ||
4614 (getLangOpts().OpenMPUseTLS &&
4615 getASTContext().getTargetInfo().isTLSSupported() &&
4616 Clause->getClauseKind() == OMPC_copyin)) {
4617 DSAStack->setForceVarCapturing(Clause->getClauseKind() == OMPC_copyin);
4618 // Mark all variables in private list clauses as used in inner region.
4619 for (Stmt *VarRef : Clause->children()) {
4620 if (auto *E = cast_or_null<Expr>(VarRef)) {
4622 }
4623 }
4624 DSAStack->setForceVarCapturing(/*V=*/false);
4625 } else if (CaptureRegions.size() > 1 ||
4626 CaptureRegions.back() != OMPD_unknown) {
4627 if (auto *C = OMPClauseWithPreInit::get(Clause))
4628 PICs.push_back(C);
4629 if (auto *C = OMPClauseWithPostUpdate::get(Clause)) {
4630 if (Expr *E = C->getPostUpdateExpr())
4632 }
4633 }
4634 if (Clause->getClauseKind() == OMPC_schedule)
4635 SC = cast<OMPScheduleClause>(Clause);
4636 else if (Clause->getClauseKind() == OMPC_ordered)
4637 OC = cast<OMPOrderedClause>(Clause);
4638 else if (Clause->getClauseKind() == OMPC_linear)
4639 LCs.push_back(cast<OMPLinearClause>(Clause));
4640 }
4641 // Capture allocator expressions if used.
4642 for (Expr *E : DSAStack->getInnerAllocators())
4644 // OpenMP, 2.7.1 Loop Construct, Restrictions
4645 // The nonmonotonic modifier cannot be specified if an ordered clause is
4646 // specified.
4647 if (SC &&
4648 (SC->getFirstScheduleModifier() == OMPC_SCHEDULE_MODIFIER_nonmonotonic ||
4650 OMPC_SCHEDULE_MODIFIER_nonmonotonic) &&
4651 OC) {
4652 Diag(SC->getFirstScheduleModifier() == OMPC_SCHEDULE_MODIFIER_nonmonotonic
4655 diag::err_omp_simple_clause_incompatible_with_ordered)
4656 << getOpenMPClauseName(OMPC_schedule)
4657 << getOpenMPSimpleClauseTypeName(OMPC_schedule,
4658 OMPC_SCHEDULE_MODIFIER_nonmonotonic)
4659 << SourceRange(OC->getBeginLoc(), OC->getEndLoc());
4660 ErrorFound = true;
4661 }
4662 // OpenMP 5.0, 2.9.2 Worksharing-Loop Construct, Restrictions.
4663 // If an order(concurrent) clause is present, an ordered clause may not appear
4664 // on the same directive.
4665 if (checkOrderedOrderSpecified(SemaRef, Clauses))
4666 ErrorFound = true;
4667 if (!LCs.empty() && OC && OC->getNumForLoops()) {
4668 for (const OMPLinearClause *C : LCs) {
4669 Diag(C->getBeginLoc(), diag::err_omp_linear_ordered)
4670 << SourceRange(OC->getBeginLoc(), OC->getEndLoc());
4671 }
4672 ErrorFound = true;
4673 }
4674 if (isOpenMPWorksharingDirective(DSAStack->getCurrentDirective()) &&
4675 isOpenMPSimdDirective(DSAStack->getCurrentDirective()) && OC &&
4676 OC->getNumForLoops()) {
4677 Diag(OC->getBeginLoc(), diag::err_omp_ordered_simd)
4678 << getOpenMPDirectiveName(DSAStack->getCurrentDirective());
4679 ErrorFound = true;
4680 }
4681 if (ErrorFound) {
4682 return StmtError();
4683 }
4684 StmtResult SR = S;
4685 unsigned CompletedRegions = 0;
4686 for (OpenMPDirectiveKind ThisCaptureRegion : llvm::reverse(CaptureRegions)) {
4687 // Mark all variables in private list clauses as used in inner region.
4688 // Required for proper codegen of combined directives.
4689 // TODO: add processing for other clauses.
4690 if (ThisCaptureRegion != OMPD_unknown) {
4691 for (const clang::OMPClauseWithPreInit *C : PICs) {
4692 OpenMPDirectiveKind CaptureRegion = C->getCaptureRegion();
4693 // Find the particular capture region for the clause if the
4694 // directive is a combined one with multiple capture regions.
4695 // If the directive is not a combined one, the capture region
4696 // associated with the clause is OMPD_unknown and is generated
4697 // only once.
4698 if (CaptureRegion == ThisCaptureRegion ||
4699 CaptureRegion == OMPD_unknown) {
4700 if (auto *DS = cast_or_null<DeclStmt>(C->getPreInitStmt())) {
4701 for (Decl *D : DS->decls())
4703 cast<VarDecl>(D));
4704 }
4705 }
4706 }
4707 }
4708 if (ThisCaptureRegion == OMPD_target) {
4709 // Capture allocator traits in the target region. They are used implicitly
4710 // and, thus, are not captured by default.
4711 for (OMPClause *C : Clauses) {
4712 if (const auto *UAC = dyn_cast<OMPUsesAllocatorsClause>(C)) {
4713 for (unsigned I = 0, End = UAC->getNumberOfAllocators(); I < End;
4714 ++I) {
4715 OMPUsesAllocatorsClause::Data D = UAC->getAllocatorData(I);
4716 if (Expr *E = D.AllocatorTraits)
4718 }
4719 continue;
4720 }
4721 }
4722 }
4723 if (ThisCaptureRegion == OMPD_parallel) {
4724 // Capture temp arrays for inscan reductions and locals in aligned
4725 // clauses.
4726 for (OMPClause *C : Clauses) {
4727 if (auto *RC = dyn_cast<OMPReductionClause>(C)) {
4728 if (RC->getModifier() != OMPC_REDUCTION_inscan)
4729 continue;
4730 for (Expr *E : RC->copy_array_temps())
4731 if (E)
4733 }
4734 if (auto *AC = dyn_cast<OMPAlignedClause>(C)) {
4735 for (Expr *E : AC->varlist())
4737 }
4738 }
4739 }
4740 if (++CompletedRegions == CaptureRegions.size())
4741 DSAStack->setBodyComplete();
4743 }
4744 return SR;
4745}
4746
4747static bool checkCancelRegion(Sema &SemaRef, OpenMPDirectiveKind CurrentRegion,
4748 OpenMPDirectiveKind CancelRegion,
4749 SourceLocation StartLoc) {
4750 // CancelRegion is only needed for cancel and cancellation_point.
4751 if (CurrentRegion != OMPD_cancel && CurrentRegion != OMPD_cancellation_point)
4752 return false;
4753
4754 if (CancelRegion == OMPD_parallel || CancelRegion == OMPD_for ||
4755 CancelRegion == OMPD_sections || CancelRegion == OMPD_taskgroup)
4756 return false;
4757
4758 SemaRef.Diag(StartLoc, diag::err_omp_wrong_cancel_region)
4759 << getOpenMPDirectiveName(CancelRegion);
4760 return true;
4761}
4762
4763static bool checkNestingOfRegions(Sema &SemaRef, const DSAStackTy *Stack,
4764 OpenMPDirectiveKind CurrentRegion,
4765 const DeclarationNameInfo &CurrentName,
4766 OpenMPDirectiveKind CancelRegion,
4767 OpenMPBindClauseKind BindKind,
4768 SourceLocation StartLoc) {
4769 if (!Stack->getCurScope())
4770 return false;
4771
4772 OpenMPDirectiveKind ParentRegion = Stack->getParentDirective();
4773 OpenMPDirectiveKind OffendingRegion = ParentRegion;
4774 bool NestingProhibited = false;
4775 bool CloseNesting = true;
4776 bool OrphanSeen = false;
4777 enum {
4778 NoRecommend,
4779 ShouldBeInParallelRegion,
4780 ShouldBeInOrderedRegion,
4781 ShouldBeInTargetRegion,
4782 ShouldBeInTeamsRegion,
4783 ShouldBeInLoopSimdRegion,
4784 } Recommend = NoRecommend;
4785
4788 getLeafOrCompositeConstructs(ParentRegion, LeafOrComposite);
4789 OpenMPDirectiveKind EnclosingConstruct = ParentLOC.back();
4790
4791 if (SemaRef.LangOpts.OpenMP >= 51 && Stack->isParentOrderConcurrent() &&
4792 CurrentRegion != OMPD_simd && CurrentRegion != OMPD_loop &&
4793 CurrentRegion != OMPD_parallel &&
4794 !isOpenMPCombinedParallelADirective(CurrentRegion)) {
4795 SemaRef.Diag(StartLoc, diag::err_omp_prohibited_region_order)
4796 << getOpenMPDirectiveName(CurrentRegion);
4797 return true;
4798 }
4799 if (isOpenMPSimdDirective(ParentRegion) &&
4800 ((SemaRef.LangOpts.OpenMP <= 45 && CurrentRegion != OMPD_ordered) ||
4801 (SemaRef.LangOpts.OpenMP >= 50 && CurrentRegion != OMPD_ordered &&
4802 CurrentRegion != OMPD_simd && CurrentRegion != OMPD_atomic &&
4803 CurrentRegion != OMPD_scan))) {
4804 // OpenMP [2.16, Nesting of Regions]
4805 // OpenMP constructs may not be nested inside a simd region.
4806 // OpenMP [2.8.1,simd Construct, Restrictions]
4807 // An ordered construct with the simd clause is the only OpenMP
4808 // construct that can appear in the simd region.
4809 // Allowing a SIMD construct nested in another SIMD construct is an
4810 // extension. The OpenMP 4.5 spec does not allow it. Issue a warning
4811 // message.
4812 // OpenMP 5.0 [2.9.3.1, simd Construct, Restrictions]
4813 // The only OpenMP constructs that can be encountered during execution of
4814 // a simd region are the atomic construct, the loop construct, the simd
4815 // construct and the ordered construct with the simd clause.
4816 SemaRef.Diag(StartLoc, (CurrentRegion != OMPD_simd)
4817 ? diag::err_omp_prohibited_region_simd
4818 : diag::warn_omp_nesting_simd)
4819 << (SemaRef.LangOpts.OpenMP >= 50 ? 1 : 0);
4820 return CurrentRegion != OMPD_simd;
4821 }
4822 if (EnclosingConstruct == OMPD_atomic) {
4823 // OpenMP [2.16, Nesting of Regions]
4824 // OpenMP constructs may not be nested inside an atomic region.
4825 SemaRef.Diag(StartLoc, diag::err_omp_prohibited_region_atomic);
4826 return true;
4827 }
4828 if (CurrentRegion == OMPD_section) {
4829 // OpenMP [2.7.2, sections Construct, Restrictions]
4830 // Orphaned section directives are prohibited. That is, the section
4831 // directives must appear within the sections construct and must not be
4832 // encountered elsewhere in the sections region.
4833 if (EnclosingConstruct != OMPD_sections) {
4834 SemaRef.Diag(StartLoc, diag::err_omp_orphaned_section_directive)
4835 << (ParentRegion != OMPD_unknown)
4836 << getOpenMPDirectiveName(ParentRegion);
4837 return true;
4838 }
4839 return false;
4840 }
4841 // Allow some constructs (except teams and cancellation constructs) to be
4842 // orphaned (they could be used in functions, called from OpenMP regions
4843 // with the required preconditions).
4844 if (ParentRegion == OMPD_unknown &&
4845 !isOpenMPNestingTeamsDirective(CurrentRegion) &&
4846 CurrentRegion != OMPD_cancellation_point &&
4847 CurrentRegion != OMPD_cancel && CurrentRegion != OMPD_scan)
4848 return false;
4849 // Checks needed for mapping "loop" construct. Please check mapLoopConstruct
4850 // for a detailed explanation
4851 if (SemaRef.LangOpts.OpenMP >= 50 && CurrentRegion == OMPD_loop &&
4852 (BindKind == OMPC_BIND_parallel || BindKind == OMPC_BIND_teams) &&
4853 (isOpenMPWorksharingDirective(ParentRegion) ||
4854 EnclosingConstruct == OMPD_loop)) {
4855 int ErrorMsgNumber = (BindKind == OMPC_BIND_parallel) ? 1 : 4;
4856 SemaRef.Diag(StartLoc, diag::err_omp_prohibited_region)
4857 << true << getOpenMPDirectiveName(ParentRegion) << ErrorMsgNumber
4858 << getOpenMPDirectiveName(CurrentRegion);
4859 return true;
4860 }
4861 if (CurrentRegion == OMPD_cancellation_point ||
4862 CurrentRegion == OMPD_cancel) {
4863 // OpenMP [2.16, Nesting of Regions]
4864 // A cancellation point construct for which construct-type-clause is
4865 // taskgroup must be nested inside a task construct. A cancellation
4866 // point construct for which construct-type-clause is not taskgroup must
4867 // be closely nested inside an OpenMP construct that matches the type
4868 // specified in construct-type-clause.
4869 // A cancel construct for which construct-type-clause is taskgroup must be
4870 // nested inside a task construct. A cancel construct for which
4871 // construct-type-clause is not taskgroup must be closely nested inside an
4872 // OpenMP construct that matches the type specified in
4873 // construct-type-clause.
4874 ArrayRef<OpenMPDirectiveKind> Leafs = getLeafConstructsOrSelf(ParentRegion);
4875 if (CancelRegion == OMPD_taskgroup) {
4876 NestingProhibited = EnclosingConstruct != OMPD_task &&
4877 (SemaRef.getLangOpts().OpenMP < 50 ||
4878 EnclosingConstruct != OMPD_taskloop);
4879 } else if (CancelRegion == OMPD_sections) {
4880 NestingProhibited = EnclosingConstruct != OMPD_section &&
4881 EnclosingConstruct != OMPD_sections;
4882 } else {
4883 NestingProhibited = CancelRegion != Leafs.back();
4884 }
4885 OrphanSeen = ParentRegion == OMPD_unknown;
4886 } else if (CurrentRegion == OMPD_master || CurrentRegion == OMPD_masked) {
4887 // OpenMP 5.1 [2.22, Nesting of Regions]
4888 // A masked region may not be closely nested inside a worksharing, loop,
4889 // atomic, task, or taskloop region.
4890 NestingProhibited = isOpenMPWorksharingDirective(ParentRegion) ||
4891 isOpenMPGenericLoopDirective(ParentRegion) ||
4892 isOpenMPTaskingDirective(ParentRegion);
4893 } else if (CurrentRegion == OMPD_critical && CurrentName.getName()) {
4894 // OpenMP [2.16, Nesting of Regions]
4895 // A critical region may not be nested (closely or otherwise) inside a
4896 // critical region with the same name. Note that this restriction is not
4897 // sufficient to prevent deadlock.
4898 SourceLocation PreviousCriticalLoc;
4899 bool DeadLock = Stack->hasDirective(
4900 [CurrentName, &PreviousCriticalLoc](OpenMPDirectiveKind K,
4901 const DeclarationNameInfo &DNI,
4903 if (K == OMPD_critical && DNI.getName() == CurrentName.getName()) {
4904 PreviousCriticalLoc = Loc;
4905 return true;
4906 }
4907 return false;
4908 },
4909 false /* skip top directive */);
4910 if (DeadLock) {
4911 SemaRef.Diag(StartLoc, diag::err_omp_prohibited_region_critical_same_name)
4912 << CurrentName.getName();
4913 if (PreviousCriticalLoc.isValid())
4914 SemaRef.Diag(PreviousCriticalLoc,
4915 diag::note_omp_previous_critical_region);
4916 return true;
4917 }
4918 } else if (CurrentRegion == OMPD_barrier || CurrentRegion == OMPD_scope) {
4919 // OpenMP 5.1 [2.22, Nesting of Regions]
4920 // A scope region may not be closely nested inside a worksharing, loop,
4921 // task, taskloop, critical, ordered, atomic, or masked region.
4922 // OpenMP 5.1 [2.22, Nesting of Regions]
4923 // A barrier region may not be closely nested inside a worksharing, loop,
4924 // task, taskloop, critical, ordered, atomic, or masked region.
4925 NestingProhibited = isOpenMPWorksharingDirective(ParentRegion) ||
4926 isOpenMPGenericLoopDirective(ParentRegion) ||
4927 isOpenMPTaskingDirective(ParentRegion) ||
4928 llvm::is_contained({OMPD_masked, OMPD_master,
4929 OMPD_critical, OMPD_ordered},
4930 EnclosingConstruct);
4931 } else if (isOpenMPWorksharingDirective(CurrentRegion) &&
4932 !isOpenMPParallelDirective(CurrentRegion) &&
4933 !isOpenMPTeamsDirective(CurrentRegion)) {
4934 // OpenMP 5.1 [2.22, Nesting of Regions]
4935 // A loop region that binds to a parallel region or a worksharing region
4936 // may not be closely nested inside a worksharing, loop, task, taskloop,
4937 // critical, ordered, atomic, or masked region.
4938 NestingProhibited = isOpenMPWorksharingDirective(ParentRegion) ||
4939 isOpenMPGenericLoopDirective(ParentRegion) ||
4940 isOpenMPTaskingDirective(ParentRegion) ||
4941 llvm::is_contained({OMPD_masked, OMPD_master,
4942 OMPD_critical, OMPD_ordered},
4943 EnclosingConstruct);
4944 Recommend = ShouldBeInParallelRegion;
4945 } else if (CurrentRegion == OMPD_ordered) {
4946 // OpenMP [2.16, Nesting of Regions]
4947 // An ordered region may not be closely nested inside a critical,
4948 // atomic, or explicit task region.
4949 // An ordered region must be closely nested inside a loop region (or
4950 // parallel loop region) with an ordered clause.
4951 // OpenMP [2.8.1,simd Construct, Restrictions]
4952 // An ordered construct with the simd clause is the only OpenMP construct
4953 // that can appear in the simd region.
4954 NestingProhibited = EnclosingConstruct == OMPD_critical ||
4955 isOpenMPTaskingDirective(ParentRegion) ||
4956 !(isOpenMPSimdDirective(ParentRegion) ||
4957 Stack->isParentOrderedRegion());
4958 Recommend = ShouldBeInOrderedRegion;
4959 } else if (isOpenMPNestingTeamsDirective(CurrentRegion)) {
4960 // OpenMP [2.16, Nesting of Regions]
4961 // If specified, a teams construct must be contained within a target
4962 // construct.
4963 NestingProhibited =
4964 (SemaRef.LangOpts.OpenMP <= 45 && EnclosingConstruct != OMPD_target) ||
4965 (SemaRef.LangOpts.OpenMP >= 50 && EnclosingConstruct != OMPD_unknown &&
4966 EnclosingConstruct != OMPD_target);
4967 OrphanSeen = ParentRegion == OMPD_unknown;
4968 Recommend = ShouldBeInTargetRegion;
4969 } else if (CurrentRegion == OMPD_scan) {
4970 if (SemaRef.LangOpts.OpenMP >= 50) {
4971 // OpenMP spec 5.0 and 5.1 require scan to be directly enclosed by for,
4972 // simd, or for simd. This has to take into account combined directives.
4973 // In 5.2 this seems to be implied by the fact that the specified
4974 // separated constructs are do, for, and simd.
4975 NestingProhibited = !llvm::is_contained(
4976 {OMPD_for, OMPD_simd, OMPD_for_simd}, EnclosingConstruct);
4977 } else {
4978 NestingProhibited = true;
4979 }
4980 OrphanSeen = ParentRegion == OMPD_unknown;
4981 Recommend = ShouldBeInLoopSimdRegion;
4982 }
4983 if (!NestingProhibited && !isOpenMPTargetExecutionDirective(CurrentRegion) &&
4984 !isOpenMPTargetDataManagementDirective(CurrentRegion) &&
4985 EnclosingConstruct == OMPD_teams) {
4986 // OpenMP [5.1, 2.22, Nesting of Regions]
4987 // distribute, distribute simd, distribute parallel worksharing-loop,
4988 // distribute parallel worksharing-loop SIMD, loop, parallel regions,
4989 // including any parallel regions arising from combined constructs,
4990 // omp_get_num_teams() regions, and omp_get_team_num() regions are the
4991 // only OpenMP regions that may be strictly nested inside the teams
4992 // region.
4993 //
4994 // As an extension, we permit atomic within teams as well.
4995 NestingProhibited = !isOpenMPParallelDirective(CurrentRegion) &&
4996 !isOpenMPDistributeDirective(CurrentRegion) &&
4997 CurrentRegion != OMPD_loop &&
4998 !(SemaRef.getLangOpts().OpenMPExtensions &&
4999 CurrentRegion == OMPD_atomic);
5000 Recommend = ShouldBeInParallelRegion;
5001 }
5002 if (!NestingProhibited && CurrentRegion == OMPD_loop) {
5003 // OpenMP [5.1, 2.11.7, loop Construct, Restrictions]
5004 // If the bind clause is present on the loop construct and binding is
5005 // teams then the corresponding loop region must be strictly nested inside
5006 // a teams region.
5007 NestingProhibited =
5008 BindKind == OMPC_BIND_teams && EnclosingConstruct != OMPD_teams;
5009 Recommend = ShouldBeInTeamsRegion;
5010 }
5011 if (!NestingProhibited && isOpenMPNestingDistributeDirective(CurrentRegion)) {
5012 // OpenMP 4.5 [2.17 Nesting of Regions]
5013 // The region associated with the distribute construct must be strictly
5014 // nested inside a teams region
5015 NestingProhibited = EnclosingConstruct != OMPD_teams;
5016 Recommend = ShouldBeInTeamsRegion;
5017 }
5018 if (!NestingProhibited &&
5019 (isOpenMPTargetExecutionDirective(CurrentRegion) ||
5020 isOpenMPTargetDataManagementDirective(CurrentRegion))) {
5021 // OpenMP 4.5 [2.17 Nesting of Regions]
5022 // If a target, target update, target data, target enter data, or
5023 // target exit data construct is encountered during execution of a
5024 // target region, the behavior is unspecified.
5025 NestingProhibited = Stack->hasDirective(
5026 [&OffendingRegion](OpenMPDirectiveKind K, const DeclarationNameInfo &,
5029 OffendingRegion = K;
5030 return true;
5031 }
5032 return false;
5033 },
5034 false /* don't skip top directive */);
5035 CloseNesting = false;
5036 }
5037 if (NestingProhibited) {
5038 if (OrphanSeen) {
5039 SemaRef.Diag(StartLoc, diag::err_omp_orphaned_device_directive)
5040 << getOpenMPDirectiveName(CurrentRegion) << Recommend;
5041 } else {
5042 SemaRef.Diag(StartLoc, diag::err_omp_prohibited_region)
5043 << CloseNesting << getOpenMPDirectiveName(OffendingRegion)
5044 << Recommend << getOpenMPDirectiveName(CurrentRegion);
5045 }
5046 return true;
5047 }
5048 return false;
5049}
5050
5053 unsigned operator()(argument_type DK) { return unsigned(DK); }
5054};
5056 ArrayRef<OMPClause *> Clauses,
5057 ArrayRef<OpenMPDirectiveKind> AllowedNameModifiers) {
5058 bool ErrorFound = false;
5059 unsigned NamedModifiersNumber = 0;
5060 llvm::IndexedMap<const OMPIfClause *, Kind2Unsigned> FoundNameModifiers;
5061 FoundNameModifiers.resize(llvm::omp::Directive_enumSize + 1);
5062 SmallVector<SourceLocation, 4> NameModifierLoc;
5063 for (const OMPClause *C : Clauses) {
5064 if (const auto *IC = dyn_cast_or_null<OMPIfClause>(C)) {
5065 // At most one if clause without a directive-name-modifier can appear on
5066 // the directive.
5067 OpenMPDirectiveKind CurNM = IC->getNameModifier();
5068 if (FoundNameModifiers[CurNM]) {
5069 S.Diag(C->getBeginLoc(), diag::err_omp_more_one_clause)
5070 << getOpenMPDirectiveName(Kind) << getOpenMPClauseName(OMPC_if)
5071 << (CurNM != OMPD_unknown) << getOpenMPDirectiveName(CurNM);
5072 ErrorFound = true;
5073 } else if (CurNM != OMPD_unknown) {
5074 NameModifierLoc.push_back(IC->getNameModifierLoc());
5075 ++NamedModifiersNumber;
5076 }
5077 FoundNameModifiers[CurNM] = IC;
5078 if (CurNM == OMPD_unknown)
5079 continue;
5080 // Check if the specified name modifier is allowed for the current
5081 // directive.
5082 // At most one if clause with the particular directive-name-modifier can
5083 // appear on the directive.
5084 if (!llvm::is_contained(AllowedNameModifiers, CurNM)) {
5085 S.Diag(IC->getNameModifierLoc(),
5086 diag::err_omp_wrong_if_directive_name_modifier)
5087 << getOpenMPDirectiveName(CurNM) << getOpenMPDirectiveName(Kind);
5088 ErrorFound = true;
5089 }
5090 }
5091 }
5092 // If any if clause on the directive includes a directive-name-modifier then
5093 // all if clauses on the directive must include a directive-name-modifier.
5094 if (FoundNameModifiers[OMPD_unknown] && NamedModifiersNumber > 0) {
5095 if (NamedModifiersNumber == AllowedNameModifiers.size()) {
5096 S.Diag(FoundNameModifiers[OMPD_unknown]->getBeginLoc(),
5097 diag::err_omp_no_more_if_clause);
5098 } else {
5099 std::string Values;
5100 std::string Sep(", ");
5101 unsigned AllowedCnt = 0;
5102 unsigned TotalAllowedNum =
5103 AllowedNameModifiers.size() - NamedModifiersNumber;
5104 for (unsigned Cnt = 0, End = AllowedNameModifiers.size(); Cnt < End;
5105 ++Cnt) {
5106 OpenMPDirectiveKind NM = AllowedNameModifiers[Cnt];
5107 if (!FoundNameModifiers[NM]) {
5108 Values += "'";
5109 Values += getOpenMPDirectiveName(NM);
5110 Values += "'";
5111 if (AllowedCnt + 2 == TotalAllowedNum)
5112 Values += " or ";
5113 else if (AllowedCnt + 1 != TotalAllowedNum)
5114 Values += Sep;
5115 ++AllowedCnt;
5116 }
5117 }
5118 S.Diag(FoundNameModifiers[OMPD_unknown]->getCondition()->getBeginLoc(),
5119 diag::err_omp_unnamed_if_clause)
5120 << (TotalAllowedNum > 1) << Values;
5121 }
5122 for (SourceLocation Loc : NameModifierLoc) {
5123 S.Diag(Loc, diag::note_omp_previous_named_if_clause);
5124 }</