clang  15.0.0git
SemaOpenMP.cpp
Go to the documentation of this file.
1 //===--- SemaOpenMP.cpp - Semantic Analysis for OpenMP constructs ---------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements semantic analysis for OpenMP directives and
10 /// clauses.
11 ///
12 //===----------------------------------------------------------------------===//
13 
14 #include "TreeTransform.h"
15 #include "clang/AST/ASTContext.h"
18 #include "clang/AST/Decl.h"
19 #include "clang/AST/DeclCXX.h"
20 #include "clang/AST/DeclOpenMP.h"
21 #include "clang/AST/OpenMPClause.h"
22 #include "clang/AST/StmtCXX.h"
23 #include "clang/AST/StmtOpenMP.h"
24 #include "clang/AST/StmtVisitor.h"
25 #include "clang/AST/TypeOrdering.h"
29 #include "clang/Basic/TargetInfo.h"
31 #include "clang/Sema/Lookup.h"
32 #include "clang/Sema/Scope.h"
33 #include "clang/Sema/ScopeInfo.h"
35 #include "llvm/ADT/IndexedMap.h"
36 #include "llvm/ADT/PointerEmbeddedInt.h"
37 #include "llvm/ADT/STLExtras.h"
38 #include "llvm/ADT/SmallSet.h"
39 #include "llvm/ADT/StringExtras.h"
40 #include "llvm/Frontend/OpenMP/OMPAssume.h"
41 #include "llvm/Frontend/OpenMP/OMPConstants.h"
42 #include <set>
43 
44 using namespace clang;
45 using namespace llvm::omp;
46 
47 //===----------------------------------------------------------------------===//
48 // Stack of data-sharing attributes for variables
49 //===----------------------------------------------------------------------===//
50 
52  Sema &SemaRef, Expr *E,
54  OpenMPClauseKind CKind, OpenMPDirectiveKind DKind, bool NoDiagnose);
55 
56 namespace {
57 /// Default data sharing attributes, which can be applied to directive.
58 enum DefaultDataSharingAttributes {
59  DSA_unspecified = 0, /// Data sharing attribute not specified.
60  DSA_none = 1 << 0, /// Default data sharing attribute 'none'.
61  DSA_shared = 1 << 1, /// Default data sharing attribute 'shared'.
62  DSA_private = 1 << 2, /// Default data sharing attribute 'private'.
63  DSA_firstprivate = 1 << 3, /// Default data sharing attribute 'firstprivate'.
64 };
65 
66 /// Stack for tracking declarations used in OpenMP directives and
67 /// clauses and their data-sharing attributes.
68 class DSAStackTy {
69 public:
70  struct DSAVarData {
71  OpenMPDirectiveKind DKind = OMPD_unknown;
72  OpenMPClauseKind CKind = OMPC_unknown;
73  unsigned Modifier = 0;
74  const Expr *RefExpr = nullptr;
75  DeclRefExpr *PrivateCopy = nullptr;
76  SourceLocation ImplicitDSALoc;
77  bool AppliedToPointee = false;
78  DSAVarData() = default;
79  DSAVarData(OpenMPDirectiveKind DKind, OpenMPClauseKind CKind,
80  const Expr *RefExpr, DeclRefExpr *PrivateCopy,
81  SourceLocation ImplicitDSALoc, unsigned Modifier,
82  bool AppliedToPointee)
83  : DKind(DKind), CKind(CKind), Modifier(Modifier), RefExpr(RefExpr),
84  PrivateCopy(PrivateCopy), ImplicitDSALoc(ImplicitDSALoc),
85  AppliedToPointee(AppliedToPointee) {}
86  };
87  using OperatorOffsetTy =
89  using DoacrossDependMapTy =
90  llvm::DenseMap<OMPDependClause *, OperatorOffsetTy>;
91  /// Kind of the declaration used in the uses_allocators clauses.
92  enum class UsesAllocatorsDeclKind {
93  /// Predefined allocator
94  PredefinedAllocator,
95  /// User-defined allocator
96  UserDefinedAllocator,
97  /// The declaration that represent allocator trait
98  AllocatorTrait,
99  };
100 
101 private:
102  struct DSAInfo {
103  OpenMPClauseKind Attributes = OMPC_unknown;
104  unsigned Modifier = 0;
105  /// Pointer to a reference expression and a flag which shows that the
106  /// variable is marked as lastprivate(true) or not (false).
107  llvm::PointerIntPair<const Expr *, 1, bool> RefExpr;
108  DeclRefExpr *PrivateCopy = nullptr;
109  /// true if the attribute is applied to the pointee, not the variable
110  /// itself.
111  bool AppliedToPointee = false;
112  };
113  using DeclSAMapTy = llvm::SmallDenseMap<const ValueDecl *, DSAInfo, 8>;
114  using UsedRefMapTy = llvm::SmallDenseMap<const ValueDecl *, const Expr *, 8>;
115  using LCDeclInfo = std::pair<unsigned, VarDecl *>;
116  using LoopControlVariablesMapTy =
117  llvm::SmallDenseMap<const ValueDecl *, LCDeclInfo, 8>;
118  /// Struct that associates a component with the clause kind where they are
119  /// found.
120  struct MappedExprComponentTy {
122  OpenMPClauseKind Kind = OMPC_unknown;
123  };
124  using MappedExprComponentsTy =
125  llvm::DenseMap<const ValueDecl *, MappedExprComponentTy>;
126  using CriticalsWithHintsTy =
127  llvm::StringMap<std::pair<const OMPCriticalDirective *, llvm::APSInt>>;
128  struct ReductionData {
129  using BOKPtrType = llvm::PointerEmbeddedInt<BinaryOperatorKind, 16>;
130  SourceRange ReductionRange;
131  llvm::PointerUnion<const Expr *, BOKPtrType> ReductionOp;
132  ReductionData() = default;
133  void set(BinaryOperatorKind BO, SourceRange RR) {
134  ReductionRange = RR;
135  ReductionOp = BO;
136  }
137  void set(const Expr *RefExpr, SourceRange RR) {
138  ReductionRange = RR;
139  ReductionOp = RefExpr;
140  }
141  };
142  using DeclReductionMapTy =
143  llvm::SmallDenseMap<const ValueDecl *, ReductionData, 4>;
144  struct DefaultmapInfo {
145  OpenMPDefaultmapClauseModifier ImplicitBehavior =
147  SourceLocation SLoc;
148  DefaultmapInfo() = default;
149  DefaultmapInfo(OpenMPDefaultmapClauseModifier M, SourceLocation Loc)
150  : ImplicitBehavior(M), SLoc(Loc) {}
151  };
152 
153  struct SharingMapTy {
154  DeclSAMapTy SharingMap;
155  DeclReductionMapTy ReductionMap;
156  UsedRefMapTy AlignedMap;
157  UsedRefMapTy NontemporalMap;
158  MappedExprComponentsTy MappedExprComponents;
159  LoopControlVariablesMapTy LCVMap;
160  DefaultDataSharingAttributes DefaultAttr = DSA_unspecified;
161  SourceLocation DefaultAttrLoc;
162  DefaultmapInfo DefaultmapMap[OMPC_DEFAULTMAP_unknown];
163  OpenMPDirectiveKind Directive = OMPD_unknown;
164  DeclarationNameInfo DirectiveName;
165  Scope *CurScope = nullptr;
166  DeclContext *Context = nullptr;
167  SourceLocation ConstructLoc;
168  /// Set of 'depend' clauses with 'sink|source' dependence kind. Required to
169  /// get the data (loop counters etc.) about enclosing loop-based construct.
170  /// This data is required during codegen.
171  DoacrossDependMapTy DoacrossDepends;
172  /// First argument (Expr *) contains optional argument of the
173  /// 'ordered' clause, the second one is true if the regions has 'ordered'
174  /// clause, false otherwise.
176  unsigned AssociatedLoops = 1;
177  bool HasMutipleLoops = false;
178  const Decl *PossiblyLoopCounter = nullptr;
179  bool NowaitRegion = false;
180  bool UntiedRegion = false;
181  bool CancelRegion = false;
182  bool LoopStart = false;
183  bool BodyComplete = false;
184  SourceLocation PrevScanLocation;
185  SourceLocation PrevOrderedLocation;
186  SourceLocation InnerTeamsRegionLoc;
187  /// Reference to the taskgroup task_reduction reference expression.
188  Expr *TaskgroupReductionRef = nullptr;
189  llvm::DenseSet<QualType> MappedClassesQualTypes;
190  SmallVector<Expr *, 4> InnerUsedAllocators;
191  llvm::DenseSet<CanonicalDeclPtr<Decl>> ImplicitTaskFirstprivates;
192  /// List of globals marked as declare target link in this target region
193  /// (isOpenMPTargetExecutionDirective(Directive) == true).
194  llvm::SmallVector<DeclRefExpr *, 4> DeclareTargetLinkVarDecls;
195  /// List of decls used in inclusive/exclusive clauses of the scan directive.
196  llvm::DenseSet<CanonicalDeclPtr<Decl>> UsedInScanDirective;
197  llvm::DenseMap<CanonicalDeclPtr<const Decl>, UsesAllocatorsDeclKind>
198  UsesAllocatorsDecls;
199  Expr *DeclareMapperVar = nullptr;
200  SharingMapTy(OpenMPDirectiveKind DKind, DeclarationNameInfo Name,
201  Scope *CurScope, SourceLocation Loc)
202  : Directive(DKind), DirectiveName(Name), CurScope(CurScope),
203  ConstructLoc(Loc) {}
204  SharingMapTy() = default;
205  };
206 
207  using StackTy = SmallVector<SharingMapTy, 4>;
208 
209  /// Stack of used declaration and their data-sharing attributes.
210  DeclSAMapTy Threadprivates;
211  const FunctionScopeInfo *CurrentNonCapturingFunctionScope = nullptr;
213  /// true, if check for DSA must be from parent directive, false, if
214  /// from current directive.
215  OpenMPClauseKind ClauseKindMode = OMPC_unknown;
216  Sema &SemaRef;
217  bool ForceCapturing = false;
218  /// true if all the variables in the target executable directives must be
219  /// captured by reference.
220  bool ForceCaptureByReferenceInTargetExecutable = false;
221  CriticalsWithHintsTy Criticals;
222  unsigned IgnoredStackElements = 0;
223 
224  /// Iterators over the stack iterate in order from innermost to outermost
225  /// directive.
226  using const_iterator = StackTy::const_reverse_iterator;
227  const_iterator begin() const {
228  return Stack.empty() ? const_iterator()
229  : Stack.back().first.rbegin() + IgnoredStackElements;
230  }
231  const_iterator end() const {
232  return Stack.empty() ? const_iterator() : Stack.back().first.rend();
233  }
234  using iterator = StackTy::reverse_iterator;
235  iterator begin() {
236  return Stack.empty() ? iterator()
237  : Stack.back().first.rbegin() + IgnoredStackElements;
238  }
239  iterator end() {
240  return Stack.empty() ? iterator() : Stack.back().first.rend();
241  }
242 
243  // Convenience operations to get at the elements of the stack.
244 
245  bool isStackEmpty() const {
246  return Stack.empty() ||
247  Stack.back().second != CurrentNonCapturingFunctionScope ||
248  Stack.back().first.size() <= IgnoredStackElements;
249  }
250  size_t getStackSize() const {
251  return isStackEmpty() ? 0
252  : Stack.back().first.size() - IgnoredStackElements;
253  }
254 
255  SharingMapTy *getTopOfStackOrNull() {
256  size_t Size = getStackSize();
257  if (Size == 0)
258  return nullptr;
259  return &Stack.back().first[Size - 1];
260  }
261  const SharingMapTy *getTopOfStackOrNull() const {
262  return const_cast<DSAStackTy &>(*this).getTopOfStackOrNull();
263  }
264  SharingMapTy &getTopOfStack() {
265  assert(!isStackEmpty() && "no current directive");
266  return *getTopOfStackOrNull();
267  }
268  const SharingMapTy &getTopOfStack() const {
269  return const_cast<DSAStackTy &>(*this).getTopOfStack();
270  }
271 
272  SharingMapTy *getSecondOnStackOrNull() {
273  size_t Size = getStackSize();
274  if (Size <= 1)
275  return nullptr;
276  return &Stack.back().first[Size - 2];
277  }
278  const SharingMapTy *getSecondOnStackOrNull() const {
279  return const_cast<DSAStackTy &>(*this).getSecondOnStackOrNull();
280  }
281 
282  /// Get the stack element at a certain level (previously returned by
283  /// \c getNestingLevel).
284  ///
285  /// Note that nesting levels count from outermost to innermost, and this is
286  /// the reverse of our iteration order where new inner levels are pushed at
287  /// the front of the stack.
288  SharingMapTy &getStackElemAtLevel(unsigned Level) {
289  assert(Level < getStackSize() && "no such stack element");
290  return Stack.back().first[Level];
291  }
292  const SharingMapTy &getStackElemAtLevel(unsigned Level) const {
293  return const_cast<DSAStackTy &>(*this).getStackElemAtLevel(Level);
294  }
295 
296  DSAVarData getDSA(const_iterator &Iter, ValueDecl *D) const;
297 
298  /// Checks if the variable is a local for OpenMP region.
299  bool isOpenMPLocal(VarDecl *D, const_iterator Iter) const;
300 
301  /// Vector of previously declared requires directives
303  /// omp_allocator_handle_t type.
304  QualType OMPAllocatorHandleT;
305  /// omp_depend_t type.
306  QualType OMPDependT;
307  /// omp_event_handle_t type.
308  QualType OMPEventHandleT;
309  /// omp_alloctrait_t type.
310  QualType OMPAlloctraitT;
311  /// Expression for the predefined allocators.
312  Expr *OMPPredefinedAllocators[OMPAllocateDeclAttr::OMPUserDefinedMemAlloc] = {
313  nullptr};
314  /// Vector of previously encountered target directives
315  SmallVector<SourceLocation, 2> TargetLocations;
316  SourceLocation AtomicLocation;
317  /// Vector of declare variant construct traits.
319 
320 public:
321  explicit DSAStackTy(Sema &S) : SemaRef(S) {}
322 
323  /// Sets omp_allocator_handle_t type.
324  void setOMPAllocatorHandleT(QualType Ty) { OMPAllocatorHandleT = Ty; }
325  /// Gets omp_allocator_handle_t type.
326  QualType getOMPAllocatorHandleT() const { return OMPAllocatorHandleT; }
327  /// Sets omp_alloctrait_t type.
328  void setOMPAlloctraitT(QualType Ty) { OMPAlloctraitT = Ty; }
329  /// Gets omp_alloctrait_t type.
330  QualType getOMPAlloctraitT() const { return OMPAlloctraitT; }
331  /// Sets the given default allocator.
332  void setAllocator(OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind,
333  Expr *Allocator) {
334  OMPPredefinedAllocators[AllocatorKind] = Allocator;
335  }
336  /// Returns the specified default allocator.
337  Expr *getAllocator(OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind) const {
338  return OMPPredefinedAllocators[AllocatorKind];
339  }
340  /// Sets omp_depend_t type.
341  void setOMPDependT(QualType Ty) { OMPDependT = Ty; }
342  /// Gets omp_depend_t type.
343  QualType getOMPDependT() const { return OMPDependT; }
344 
345  /// Sets omp_event_handle_t type.
346  void setOMPEventHandleT(QualType Ty) { OMPEventHandleT = Ty; }
347  /// Gets omp_event_handle_t type.
348  QualType getOMPEventHandleT() const { return OMPEventHandleT; }
349 
350  bool isClauseParsingMode() const { return ClauseKindMode != OMPC_unknown; }
351  OpenMPClauseKind getClauseParsingMode() const {
352  assert(isClauseParsingMode() && "Must be in clause parsing mode.");
353  return ClauseKindMode;
354  }
355  void setClauseParsingMode(OpenMPClauseKind K) { ClauseKindMode = K; }
356 
357  bool isBodyComplete() const {
358  const SharingMapTy *Top = getTopOfStackOrNull();
359  return Top && Top->BodyComplete;
360  }
361  void setBodyComplete() { getTopOfStack().BodyComplete = true; }
362 
363  bool isForceVarCapturing() const { return ForceCapturing; }
364  void setForceVarCapturing(bool V) { ForceCapturing = V; }
365 
366  void setForceCaptureByReferenceInTargetExecutable(bool V) {
367  ForceCaptureByReferenceInTargetExecutable = V;
368  }
369  bool isForceCaptureByReferenceInTargetExecutable() const {
370  return ForceCaptureByReferenceInTargetExecutable;
371  }
372 
373  void push(OpenMPDirectiveKind DKind, const DeclarationNameInfo &DirName,
374  Scope *CurScope, SourceLocation Loc) {
375  assert(!IgnoredStackElements &&
376  "cannot change stack while ignoring elements");
377  if (Stack.empty() ||
378  Stack.back().second != CurrentNonCapturingFunctionScope)
379  Stack.emplace_back(StackTy(), CurrentNonCapturingFunctionScope);
380  Stack.back().first.emplace_back(DKind, DirName, CurScope, Loc);
381  Stack.back().first.back().DefaultAttrLoc = Loc;
382  }
383 
384  void pop() {
385  assert(!IgnoredStackElements &&
386  "cannot change stack while ignoring elements");
387  assert(!Stack.back().first.empty() &&
388  "Data-sharing attributes stack is empty!");
389  Stack.back().first.pop_back();
390  }
391 
392  /// RAII object to temporarily leave the scope of a directive when we want to
393  /// logically operate in its parent.
394  class ParentDirectiveScope {
395  DSAStackTy &Self;
396  bool Active;
397 
398  public:
399  ParentDirectiveScope(DSAStackTy &Self, bool Activate)
400  : Self(Self), Active(false) {
401  if (Activate)
402  enable();
403  }
404  ~ParentDirectiveScope() { disable(); }
405  void disable() {
406  if (Active) {
407  --Self.IgnoredStackElements;
408  Active = false;
409  }
410  }
411  void enable() {
412  if (!Active) {
413  ++Self.IgnoredStackElements;
414  Active = true;
415  }
416  }
417  };
418 
419  /// Marks that we're started loop parsing.
420  void loopInit() {
421  assert(isOpenMPLoopDirective(getCurrentDirective()) &&
422  "Expected loop-based directive.");
423  getTopOfStack().LoopStart = true;
424  }
425  /// Start capturing of the variables in the loop context.
426  void loopStart() {
427  assert(isOpenMPLoopDirective(getCurrentDirective()) &&
428  "Expected loop-based directive.");
429  getTopOfStack().LoopStart = false;
430  }
431  /// true, if variables are captured, false otherwise.
432  bool isLoopStarted() const {
433  assert(isOpenMPLoopDirective(getCurrentDirective()) &&
434  "Expected loop-based directive.");
435  return !getTopOfStack().LoopStart;
436  }
437  /// Marks (or clears) declaration as possibly loop counter.
438  void resetPossibleLoopCounter(const Decl *D = nullptr) {
439  getTopOfStack().PossiblyLoopCounter = D ? D->getCanonicalDecl() : D;
440  }
441  /// Gets the possible loop counter decl.
442  const Decl *getPossiblyLoopCunter() const {
443  return getTopOfStack().PossiblyLoopCounter;
444  }
445  /// Start new OpenMP region stack in new non-capturing function.
446  void pushFunction() {
447  assert(!IgnoredStackElements &&
448  "cannot change stack while ignoring elements");
449  const FunctionScopeInfo *CurFnScope = SemaRef.getCurFunction();
450  assert(!isa<CapturingScopeInfo>(CurFnScope));
451  CurrentNonCapturingFunctionScope = CurFnScope;
452  }
453  /// Pop region stack for non-capturing function.
454  void popFunction(const FunctionScopeInfo *OldFSI) {
455  assert(!IgnoredStackElements &&
456  "cannot change stack while ignoring elements");
457  if (!Stack.empty() && Stack.back().second == OldFSI) {
458  assert(Stack.back().first.empty());
459  Stack.pop_back();
460  }
461  CurrentNonCapturingFunctionScope = nullptr;
462  for (const FunctionScopeInfo *FSI : llvm::reverse(SemaRef.FunctionScopes)) {
463  if (!isa<CapturingScopeInfo>(FSI)) {
464  CurrentNonCapturingFunctionScope = FSI;
465  break;
466  }
467  }
468  }
469 
470  void addCriticalWithHint(const OMPCriticalDirective *D, llvm::APSInt Hint) {
471  Criticals.try_emplace(D->getDirectiveName().getAsString(), D, Hint);
472  }
473  const std::pair<const OMPCriticalDirective *, llvm::APSInt>
474  getCriticalWithHint(const DeclarationNameInfo &Name) const {
475  auto I = Criticals.find(Name.getAsString());
476  if (I != Criticals.end())
477  return I->second;
478  return std::make_pair(nullptr, llvm::APSInt());
479  }
480  /// If 'aligned' declaration for given variable \a D was not seen yet,
481  /// add it and return NULL; otherwise return previous occurrence's expression
482  /// for diagnostics.
483  const Expr *addUniqueAligned(const ValueDecl *D, const Expr *NewDE);
484  /// If 'nontemporal' declaration for given variable \a D was not seen yet,
485  /// add it and return NULL; otherwise return previous occurrence's expression
486  /// for diagnostics.
487  const Expr *addUniqueNontemporal(const ValueDecl *D, const Expr *NewDE);
488 
489  /// Register specified variable as loop control variable.
490  void addLoopControlVariable(const ValueDecl *D, VarDecl *Capture);
491  /// Check if the specified variable is a loop control variable for
492  /// current region.
493  /// \return The index of the loop control variable in the list of associated
494  /// for-loops (from outer to inner).
495  const LCDeclInfo isLoopControlVariable(const ValueDecl *D) const;
496  /// Check if the specified variable is a loop control variable for
497  /// parent region.
498  /// \return The index of the loop control variable in the list of associated
499  /// for-loops (from outer to inner).
500  const LCDeclInfo isParentLoopControlVariable(const ValueDecl *D) const;
501  /// Check if the specified variable is a loop control variable for
502  /// current region.
503  /// \return The index of the loop control variable in the list of associated
504  /// for-loops (from outer to inner).
505  const LCDeclInfo isLoopControlVariable(const ValueDecl *D,
506  unsigned Level) const;
507  /// Get the loop control variable for the I-th loop (or nullptr) in
508  /// parent directive.
509  const ValueDecl *getParentLoopControlVariable(unsigned I) const;
510 
511  /// Marks the specified decl \p D as used in scan directive.
512  void markDeclAsUsedInScanDirective(ValueDecl *D) {
513  if (SharingMapTy *Stack = getSecondOnStackOrNull())
514  Stack->UsedInScanDirective.insert(D);
515  }
516 
517  /// Checks if the specified declaration was used in the inner scan directive.
518  bool isUsedInScanDirective(ValueDecl *D) const {
519  if (const SharingMapTy *Stack = getTopOfStackOrNull())
520  return Stack->UsedInScanDirective.contains(D);
521  return false;
522  }
523 
524  /// Adds explicit data sharing attribute to the specified declaration.
525  void addDSA(const ValueDecl *D, const Expr *E, OpenMPClauseKind A,
526  DeclRefExpr *PrivateCopy = nullptr, unsigned Modifier = 0,
527  bool AppliedToPointee = false);
528 
529  /// Adds additional information for the reduction items with the reduction id
530  /// represented as an operator.
531  void addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
532  BinaryOperatorKind BOK);
533  /// Adds additional information for the reduction items with the reduction id
534  /// represented as reduction identifier.
535  void addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
536  const Expr *ReductionRef);
537  /// Returns the location and reduction operation from the innermost parent
538  /// region for the given \p D.
539  const DSAVarData
540  getTopMostTaskgroupReductionData(const ValueDecl *D, SourceRange &SR,
541  BinaryOperatorKind &BOK,
542  Expr *&TaskgroupDescriptor) const;
543  /// Returns the location and reduction operation from the innermost parent
544  /// region for the given \p D.
545  const DSAVarData
546  getTopMostTaskgroupReductionData(const ValueDecl *D, SourceRange &SR,
547  const Expr *&ReductionRef,
548  Expr *&TaskgroupDescriptor) const;
549  /// Return reduction reference expression for the current taskgroup or
550  /// parallel/worksharing directives with task reductions.
551  Expr *getTaskgroupReductionRef() const {
552  assert((getTopOfStack().Directive == OMPD_taskgroup ||
553  ((isOpenMPParallelDirective(getTopOfStack().Directive) ||
554  isOpenMPWorksharingDirective(getTopOfStack().Directive)) &&
555  !isOpenMPSimdDirective(getTopOfStack().Directive))) &&
556  "taskgroup reference expression requested for non taskgroup or "
557  "parallel/worksharing directive.");
558  return getTopOfStack().TaskgroupReductionRef;
559  }
560  /// Checks if the given \p VD declaration is actually a taskgroup reduction
561  /// descriptor variable at the \p Level of OpenMP regions.
562  bool isTaskgroupReductionRef(const ValueDecl *VD, unsigned Level) const {
563  return getStackElemAtLevel(Level).TaskgroupReductionRef &&
564  cast<DeclRefExpr>(getStackElemAtLevel(Level).TaskgroupReductionRef)
565  ->getDecl() == VD;
566  }
567 
568  /// Returns data sharing attributes from top of the stack for the
569  /// specified declaration.
570  const DSAVarData getTopDSA(ValueDecl *D, bool FromParent);
571  /// Returns data-sharing attributes for the specified declaration.
572  const DSAVarData getImplicitDSA(ValueDecl *D, bool FromParent) const;
573  /// Returns data-sharing attributes for the specified declaration.
574  const DSAVarData getImplicitDSA(ValueDecl *D, unsigned Level) const;
575  /// Checks if the specified variables has data-sharing attributes which
576  /// match specified \a CPred predicate in any directive which matches \a DPred
577  /// predicate.
578  const DSAVarData
579  hasDSA(ValueDecl *D,
580  const llvm::function_ref<bool(OpenMPClauseKind, bool)> CPred,
581  const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
582  bool FromParent) const;
583  /// Checks if the specified variables has data-sharing attributes which
584  /// match specified \a CPred predicate in any innermost directive which
585  /// matches \a DPred predicate.
586  const DSAVarData
587  hasInnermostDSA(ValueDecl *D,
588  const llvm::function_ref<bool(OpenMPClauseKind, bool)> CPred,
589  const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
590  bool FromParent) const;
591  /// Checks if the specified variables has explicit data-sharing
592  /// attributes which match specified \a CPred predicate at the specified
593  /// OpenMP region.
594  bool
595  hasExplicitDSA(const ValueDecl *D,
596  const llvm::function_ref<bool(OpenMPClauseKind, bool)> CPred,
597  unsigned Level, bool NotLastprivate = false) const;
598 
599  /// Returns true if the directive at level \Level matches in the
600  /// specified \a DPred predicate.
601  bool hasExplicitDirective(
602  const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
603  unsigned Level) const;
604 
605  /// Finds a directive which matches specified \a DPred predicate.
606  bool hasDirective(
607  const llvm::function_ref<bool(
609  DPred,
610  bool FromParent) const;
611 
612  /// Returns currently analyzed directive.
613  OpenMPDirectiveKind getCurrentDirective() const {
614  const SharingMapTy *Top = getTopOfStackOrNull();
615  return Top ? Top->Directive : OMPD_unknown;
616  }
617  /// Returns directive kind at specified level.
618  OpenMPDirectiveKind getDirective(unsigned Level) const {
619  assert(!isStackEmpty() && "No directive at specified level.");
620  return getStackElemAtLevel(Level).Directive;
621  }
622  /// Returns the capture region at the specified level.
623  OpenMPDirectiveKind getCaptureRegion(unsigned Level,
624  unsigned OpenMPCaptureLevel) const {
626  getOpenMPCaptureRegions(CaptureRegions, getDirective(Level));
627  return CaptureRegions[OpenMPCaptureLevel];
628  }
629  /// Returns parent directive.
630  OpenMPDirectiveKind getParentDirective() const {
631  const SharingMapTy *Parent = getSecondOnStackOrNull();
632  return Parent ? Parent->Directive : OMPD_unknown;
633  }
634 
635  /// Add requires decl to internal vector
636  void addRequiresDecl(OMPRequiresDecl *RD) { RequiresDecls.push_back(RD); }
637 
638  /// Checks if the defined 'requires' directive has specified type of clause.
639  template <typename ClauseType> bool hasRequiresDeclWithClause() const {
640  return llvm::any_of(RequiresDecls, [](const OMPRequiresDecl *D) {
641  return llvm::any_of(D->clauselists(), [](const OMPClause *C) {
642  return isa<ClauseType>(C);
643  });
644  });
645  }
646 
647  /// Checks for a duplicate clause amongst previously declared requires
648  /// directives
649  bool hasDuplicateRequiresClause(ArrayRef<OMPClause *> ClauseList) const {
650  bool IsDuplicate = false;
651  for (OMPClause *CNew : ClauseList) {
652  for (const OMPRequiresDecl *D : RequiresDecls) {
653  for (const OMPClause *CPrev : D->clauselists()) {
654  if (CNew->getClauseKind() == CPrev->getClauseKind()) {
655  SemaRef.Diag(CNew->getBeginLoc(),
656  diag::err_omp_requires_clause_redeclaration)
657  << getOpenMPClauseName(CNew->getClauseKind());
658  SemaRef.Diag(CPrev->getBeginLoc(),
659  diag::note_omp_requires_previous_clause)
660  << getOpenMPClauseName(CPrev->getClauseKind());
661  IsDuplicate = true;
662  }
663  }
664  }
665  }
666  return IsDuplicate;
667  }
668 
669  /// Add location of previously encountered target to internal vector
670  void addTargetDirLocation(SourceLocation LocStart) {
671  TargetLocations.push_back(LocStart);
672  }
673 
674  /// Add location for the first encountered atomicc directive.
675  void addAtomicDirectiveLoc(SourceLocation Loc) {
676  if (AtomicLocation.isInvalid())
677  AtomicLocation = Loc;
678  }
679 
680  /// Returns the location of the first encountered atomic directive in the
681  /// module.
682  SourceLocation getAtomicDirectiveLoc() const { return AtomicLocation; }
683 
684  // Return previously encountered target region locations.
685  ArrayRef<SourceLocation> getEncounteredTargetLocs() const {
686  return TargetLocations;
687  }
688 
689  /// Set default data sharing attribute to none.
690  void setDefaultDSANone(SourceLocation Loc) {
691  getTopOfStack().DefaultAttr = DSA_none;
692  getTopOfStack().DefaultAttrLoc = Loc;
693  }
694  /// Set default data sharing attribute to shared.
695  void setDefaultDSAShared(SourceLocation Loc) {
696  getTopOfStack().DefaultAttr = DSA_shared;
697  getTopOfStack().DefaultAttrLoc = Loc;
698  }
699  /// Set default data sharing attribute to private.
700  void setDefaultDSAPrivate(SourceLocation Loc) {
701  getTopOfStack().DefaultAttr = DSA_private;
702  getTopOfStack().DefaultAttrLoc = Loc;
703  }
704  /// Set default data sharing attribute to firstprivate.
705  void setDefaultDSAFirstPrivate(SourceLocation Loc) {
706  getTopOfStack().DefaultAttr = DSA_firstprivate;
707  getTopOfStack().DefaultAttrLoc = Loc;
708  }
709  /// Set default data mapping attribute to Modifier:Kind
710  void setDefaultDMAAttr(OpenMPDefaultmapClauseModifier M,
712  DefaultmapInfo &DMI = getTopOfStack().DefaultmapMap[Kind];
713  DMI.ImplicitBehavior = M;
714  DMI.SLoc = Loc;
715  }
716  /// Check whether the implicit-behavior has been set in defaultmap
717  bool checkDefaultmapCategory(OpenMPDefaultmapClauseKind VariableCategory) {
718  if (VariableCategory == OMPC_DEFAULTMAP_unknown)
719  return getTopOfStack()
720  .DefaultmapMap[OMPC_DEFAULTMAP_aggregate]
721  .ImplicitBehavior != OMPC_DEFAULTMAP_MODIFIER_unknown ||
722  getTopOfStack()
723  .DefaultmapMap[OMPC_DEFAULTMAP_scalar]
724  .ImplicitBehavior != OMPC_DEFAULTMAP_MODIFIER_unknown ||
725  getTopOfStack()
726  .DefaultmapMap[OMPC_DEFAULTMAP_pointer]
727  .ImplicitBehavior != OMPC_DEFAULTMAP_MODIFIER_unknown;
728  return getTopOfStack().DefaultmapMap[VariableCategory].ImplicitBehavior !=
730  }
731 
732  ArrayRef<llvm::omp::TraitProperty> getConstructTraits() {
733  return ConstructTraits;
734  }
735  void handleConstructTrait(ArrayRef<llvm::omp::TraitProperty> Traits,
736  bool ScopeEntry) {
737  if (ScopeEntry)
738  ConstructTraits.append(Traits.begin(), Traits.end());
739  else
740  for (llvm::omp::TraitProperty Trait : llvm::reverse(Traits)) {
741  llvm::omp::TraitProperty Top = ConstructTraits.pop_back_val();
742  assert(Top == Trait && "Something left a trait on the stack!");
743  (void)Trait;
744  (void)Top;
745  }
746  }
747 
748  DefaultDataSharingAttributes getDefaultDSA(unsigned Level) const {
749  return getStackSize() <= Level ? DSA_unspecified
750  : getStackElemAtLevel(Level).DefaultAttr;
751  }
752  DefaultDataSharingAttributes getDefaultDSA() const {
753  return isStackEmpty() ? DSA_unspecified : getTopOfStack().DefaultAttr;
754  }
755  SourceLocation getDefaultDSALocation() const {
756  return isStackEmpty() ? SourceLocation() : getTopOfStack().DefaultAttrLoc;
757  }
759  getDefaultmapModifier(OpenMPDefaultmapClauseKind Kind) const {
760  return isStackEmpty()
762  : getTopOfStack().DefaultmapMap[Kind].ImplicitBehavior;
763  }
765  getDefaultmapModifierAtLevel(unsigned Level,
767  return getStackElemAtLevel(Level).DefaultmapMap[Kind].ImplicitBehavior;
768  }
769  bool isDefaultmapCapturedByRef(unsigned Level,
772  getDefaultmapModifierAtLevel(Level, Kind);
773  if (Kind == OMPC_DEFAULTMAP_scalar || Kind == OMPC_DEFAULTMAP_pointer) {
774  return (M == OMPC_DEFAULTMAP_MODIFIER_alloc) ||
775  (M == OMPC_DEFAULTMAP_MODIFIER_to) ||
776  (M == OMPC_DEFAULTMAP_MODIFIER_from) ||
777  (M == OMPC_DEFAULTMAP_MODIFIER_tofrom);
778  }
779  return true;
780  }
781  static bool mustBeFirstprivateBase(OpenMPDefaultmapClauseModifier M,
783  switch (Kind) {
784  case OMPC_DEFAULTMAP_scalar:
785  case OMPC_DEFAULTMAP_pointer:
786  return (M == OMPC_DEFAULTMAP_MODIFIER_unknown) ||
787  (M == OMPC_DEFAULTMAP_MODIFIER_firstprivate) ||
788  (M == OMPC_DEFAULTMAP_MODIFIER_default);
789  case OMPC_DEFAULTMAP_aggregate:
790  return M == OMPC_DEFAULTMAP_MODIFIER_firstprivate;
791  default:
792  break;
793  }
794  llvm_unreachable("Unexpected OpenMPDefaultmapClauseKind enum");
795  }
796  bool mustBeFirstprivateAtLevel(unsigned Level,
799  getDefaultmapModifierAtLevel(Level, Kind);
800  return mustBeFirstprivateBase(M, Kind);
801  }
802  bool mustBeFirstprivate(OpenMPDefaultmapClauseKind Kind) const {
803  OpenMPDefaultmapClauseModifier M = getDefaultmapModifier(Kind);
804  return mustBeFirstprivateBase(M, Kind);
805  }
806 
807  /// Checks if the specified variable is a threadprivate.
808  bool isThreadPrivate(VarDecl *D) {
809  const DSAVarData DVar = getTopDSA(D, false);
810  return isOpenMPThreadPrivate(DVar.CKind);
811  }
812 
813  /// Marks current region as ordered (it has an 'ordered' clause).
814  void setOrderedRegion(bool IsOrdered, const Expr *Param,
815  OMPOrderedClause *Clause) {
816  if (IsOrdered)
817  getTopOfStack().OrderedRegion.emplace(Param, Clause);
818  else
819  getTopOfStack().OrderedRegion.reset();
820  }
821  /// Returns true, if region is ordered (has associated 'ordered' clause),
822  /// false - otherwise.
823  bool isOrderedRegion() const {
824  if (const SharingMapTy *Top = getTopOfStackOrNull())
825  return Top->OrderedRegion.hasValue();
826  return false;
827  }
828  /// Returns optional parameter for the ordered region.
829  std::pair<const Expr *, OMPOrderedClause *> getOrderedRegionParam() const {
830  if (const SharingMapTy *Top = getTopOfStackOrNull())
831  if (Top->OrderedRegion.hasValue())
832  return Top->OrderedRegion.getValue();
833  return std::make_pair(nullptr, nullptr);
834  }
835  /// Returns true, if parent region is ordered (has associated
836  /// 'ordered' clause), false - otherwise.
837  bool isParentOrderedRegion() const {
838  if (const SharingMapTy *Parent = getSecondOnStackOrNull())
839  return Parent->OrderedRegion.hasValue();
840  return false;
841  }
842  /// Returns optional parameter for the ordered region.
843  std::pair<const Expr *, OMPOrderedClause *>
844  getParentOrderedRegionParam() const {
845  if (const SharingMapTy *Parent = getSecondOnStackOrNull())
846  if (Parent->OrderedRegion.hasValue())
847  return Parent->OrderedRegion.getValue();
848  return std::make_pair(nullptr, nullptr);
849  }
850  /// Marks current region as nowait (it has a 'nowait' clause).
851  void setNowaitRegion(bool IsNowait = true) {
852  getTopOfStack().NowaitRegion = IsNowait;
853  }
854  /// Returns true, if parent region is nowait (has associated
855  /// 'nowait' clause), false - otherwise.
856  bool isParentNowaitRegion() const {
857  if (const SharingMapTy *Parent = getSecondOnStackOrNull())
858  return Parent->NowaitRegion;
859  return false;
860  }
861  /// Marks current region as untied (it has a 'untied' clause).
862  void setUntiedRegion(bool IsUntied = true) {
863  getTopOfStack().UntiedRegion = IsUntied;
864  }
865  /// Return true if current region is untied.
866  bool isUntiedRegion() const {
867  const SharingMapTy *Top = getTopOfStackOrNull();
868  return Top ? Top->UntiedRegion : false;
869  }
870  /// Marks parent region as cancel region.
871  void setParentCancelRegion(bool Cancel = true) {
872  if (SharingMapTy *Parent = getSecondOnStackOrNull())
873  Parent->CancelRegion |= Cancel;
874  }
875  /// Return true if current region has inner cancel construct.
876  bool isCancelRegion() const {
877  const SharingMapTy *Top = getTopOfStackOrNull();
878  return Top ? Top->CancelRegion : false;
879  }
880 
881  /// Mark that parent region already has scan directive.
882  void setParentHasScanDirective(SourceLocation Loc) {
883  if (SharingMapTy *Parent = getSecondOnStackOrNull())
884  Parent->PrevScanLocation = Loc;
885  }
886  /// Return true if current region has inner cancel construct.
887  bool doesParentHasScanDirective() const {
888  const SharingMapTy *Top = getSecondOnStackOrNull();
889  return Top ? Top->PrevScanLocation.isValid() : false;
890  }
891  /// Return true if current region has inner cancel construct.
892  SourceLocation getParentScanDirectiveLoc() const {
893  const SharingMapTy *Top = getSecondOnStackOrNull();
894  return Top ? Top->PrevScanLocation : SourceLocation();
895  }
896  /// Mark that parent region already has ordered directive.
897  void setParentHasOrderedDirective(SourceLocation Loc) {
898  if (SharingMapTy *Parent = getSecondOnStackOrNull())
899  Parent->PrevOrderedLocation = Loc;
900  }
901  /// Return true if current region has inner ordered construct.
902  bool doesParentHasOrderedDirective() const {
903  const SharingMapTy *Top = getSecondOnStackOrNull();
904  return Top ? Top->PrevOrderedLocation.isValid() : false;
905  }
906  /// Returns the location of the previously specified ordered directive.
907  SourceLocation getParentOrderedDirectiveLoc() const {
908  const SharingMapTy *Top = getSecondOnStackOrNull();
909  return Top ? Top->PrevOrderedLocation : SourceLocation();
910  }
911 
912  /// Set collapse value for the region.
913  void setAssociatedLoops(unsigned Val) {
914  getTopOfStack().AssociatedLoops = Val;
915  if (Val > 1)
916  getTopOfStack().HasMutipleLoops = true;
917  }
918  /// Return collapse value for region.
919  unsigned getAssociatedLoops() const {
920  const SharingMapTy *Top = getTopOfStackOrNull();
921  return Top ? Top->AssociatedLoops : 0;
922  }
923  /// Returns true if the construct is associated with multiple loops.
924  bool hasMutipleLoops() const {
925  const SharingMapTy *Top = getTopOfStackOrNull();
926  return Top ? Top->HasMutipleLoops : false;
927  }
928 
929  /// Marks current target region as one with closely nested teams
930  /// region.
931  void setParentTeamsRegionLoc(SourceLocation TeamsRegionLoc) {
932  if (SharingMapTy *Parent = getSecondOnStackOrNull())
933  Parent->InnerTeamsRegionLoc = TeamsRegionLoc;
934  }
935  /// Returns true, if current region has closely nested teams region.
936  bool hasInnerTeamsRegion() const {
937  return getInnerTeamsRegionLoc().isValid();
938  }
939  /// Returns location of the nested teams region (if any).
940  SourceLocation getInnerTeamsRegionLoc() const {
941  const SharingMapTy *Top = getTopOfStackOrNull();
942  return Top ? Top->InnerTeamsRegionLoc : SourceLocation();
943  }
944 
945  Scope *getCurScope() const {
946  const SharingMapTy *Top = getTopOfStackOrNull();
947  return Top ? Top->CurScope : nullptr;
948  }
949  void setContext(DeclContext *DC) { getTopOfStack().Context = DC; }
950  SourceLocation getConstructLoc() const {
951  const SharingMapTy *Top = getTopOfStackOrNull();
952  return Top ? Top->ConstructLoc : SourceLocation();
953  }
954 
955  /// Do the check specified in \a Check to all component lists and return true
956  /// if any issue is found.
957  bool checkMappableExprComponentListsForDecl(
958  const ValueDecl *VD, bool CurrentRegionOnly,
959  const llvm::function_ref<
962  Check) const {
963  if (isStackEmpty())
964  return false;
965  auto SI = begin();
966  auto SE = end();
967 
968  if (SI == SE)
969  return false;
970 
971  if (CurrentRegionOnly)
972  SE = std::next(SI);
973  else
974  std::advance(SI, 1);
975 
976  for (; SI != SE; ++SI) {
977  auto MI = SI->MappedExprComponents.find(VD);
978  if (MI != SI->MappedExprComponents.end())
980  MI->second.Components)
981  if (Check(L, MI->second.Kind))
982  return true;
983  }
984  return false;
985  }
986 
987  /// Do the check specified in \a Check to all component lists at a given level
988  /// and return true if any issue is found.
989  bool checkMappableExprComponentListsForDeclAtLevel(
990  const ValueDecl *VD, unsigned Level,
991  const llvm::function_ref<
994  Check) const {
995  if (getStackSize() <= Level)
996  return false;
997 
998  const SharingMapTy &StackElem = getStackElemAtLevel(Level);
999  auto MI = StackElem.MappedExprComponents.find(VD);
1000  if (MI != StackElem.MappedExprComponents.end())
1002  MI->second.Components)
1003  if (Check(L, MI->second.Kind))
1004  return true;
1005  return false;
1006  }
1007 
1008  /// Create a new mappable expression component list associated with a given
1009  /// declaration and initialize it with the provided list of components.
1010  void addMappableExpressionComponents(
1011  const ValueDecl *VD,
1013  OpenMPClauseKind WhereFoundClauseKind) {
1014  MappedExprComponentTy &MEC = getTopOfStack().MappedExprComponents[VD];
1015  // Create new entry and append the new components there.
1016  MEC.Components.resize(MEC.Components.size() + 1);
1017  MEC.Components.back().append(Components.begin(), Components.end());
1018  MEC.Kind = WhereFoundClauseKind;
1019  }
1020 
1021  unsigned getNestingLevel() const {
1022  assert(!isStackEmpty());
1023  return getStackSize() - 1;
1024  }
1025  void addDoacrossDependClause(OMPDependClause *C,
1026  const OperatorOffsetTy &OpsOffs) {
1027  SharingMapTy *Parent = getSecondOnStackOrNull();
1028  assert(Parent && isOpenMPWorksharingDirective(Parent->Directive));
1029  Parent->DoacrossDepends.try_emplace(C, OpsOffs);
1030  }
1031  llvm::iterator_range<DoacrossDependMapTy::const_iterator>
1032  getDoacrossDependClauses() const {
1033  const SharingMapTy &StackElem = getTopOfStack();
1034  if (isOpenMPWorksharingDirective(StackElem.Directive)) {
1035  const DoacrossDependMapTy &Ref = StackElem.DoacrossDepends;
1036  return llvm::make_range(Ref.begin(), Ref.end());
1037  }
1038  return llvm::make_range(StackElem.DoacrossDepends.end(),
1039  StackElem.DoacrossDepends.end());
1040  }
1041 
1042  // Store types of classes which have been explicitly mapped
1043  void addMappedClassesQualTypes(QualType QT) {
1044  SharingMapTy &StackElem = getTopOfStack();
1045  StackElem.MappedClassesQualTypes.insert(QT);
1046  }
1047 
1048  // Return set of mapped classes types
1049  bool isClassPreviouslyMapped(QualType QT) const {
1050  const SharingMapTy &StackElem = getTopOfStack();
1051  return StackElem.MappedClassesQualTypes.contains(QT);
1052  }
1053 
1054  /// Adds global declare target to the parent target region.
1055  void addToParentTargetRegionLinkGlobals(DeclRefExpr *E) {
1056  assert(*OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(
1057  E->getDecl()) == OMPDeclareTargetDeclAttr::MT_Link &&
1058  "Expected declare target link global.");
1059  for (auto &Elem : *this) {
1060  if (isOpenMPTargetExecutionDirective(Elem.Directive)) {
1061  Elem.DeclareTargetLinkVarDecls.push_back(E);
1062  return;
1063  }
1064  }
1065  }
1066 
1067  /// Returns the list of globals with declare target link if current directive
1068  /// is target.
1069  ArrayRef<DeclRefExpr *> getLinkGlobals() const {
1070  assert(isOpenMPTargetExecutionDirective(getCurrentDirective()) &&
1071  "Expected target executable directive.");
1072  return getTopOfStack().DeclareTargetLinkVarDecls;
1073  }
1074 
1075  /// Adds list of allocators expressions.
1076  void addInnerAllocatorExpr(Expr *E) {
1077  getTopOfStack().InnerUsedAllocators.push_back(E);
1078  }
1079  /// Return list of used allocators.
1080  ArrayRef<Expr *> getInnerAllocators() const {
1081  return getTopOfStack().InnerUsedAllocators;
1082  }
1083  /// Marks the declaration as implicitly firstprivate nin the task-based
1084  /// regions.
1085  void addImplicitTaskFirstprivate(unsigned Level, Decl *D) {
1086  getStackElemAtLevel(Level).ImplicitTaskFirstprivates.insert(D);
1087  }
1088  /// Checks if the decl is implicitly firstprivate in the task-based region.
1089  bool isImplicitTaskFirstprivate(Decl *D) const {
1090  return getTopOfStack().ImplicitTaskFirstprivates.contains(D);
1091  }
1092 
1093  /// Marks decl as used in uses_allocators clause as the allocator.
1094  void addUsesAllocatorsDecl(const Decl *D, UsesAllocatorsDeclKind Kind) {
1095  getTopOfStack().UsesAllocatorsDecls.try_emplace(D, Kind);
1096  }
1097  /// Checks if specified decl is used in uses allocator clause as the
1098  /// allocator.
1099  Optional<UsesAllocatorsDeclKind> isUsesAllocatorsDecl(unsigned Level,
1100  const Decl *D) const {
1101  const SharingMapTy &StackElem = getTopOfStack();
1102  auto I = StackElem.UsesAllocatorsDecls.find(D);
1103  if (I == StackElem.UsesAllocatorsDecls.end())
1104  return None;
1105  return I->getSecond();
1106  }
1107  Optional<UsesAllocatorsDeclKind> isUsesAllocatorsDecl(const Decl *D) const {
1108  const SharingMapTy &StackElem = getTopOfStack();
1109  auto I = StackElem.UsesAllocatorsDecls.find(D);
1110  if (I == StackElem.UsesAllocatorsDecls.end())
1111  return None;
1112  return I->getSecond();
1113  }
1114 
1115  void addDeclareMapperVarRef(Expr *Ref) {
1116  SharingMapTy &StackElem = getTopOfStack();
1117  StackElem.DeclareMapperVar = Ref;
1118  }
1119  const Expr *getDeclareMapperVarRef() const {
1120  const SharingMapTy *Top = getTopOfStackOrNull();
1121  return Top ? Top->DeclareMapperVar : nullptr;
1122  }
1123 };
1124 
1125 bool isImplicitTaskingRegion(OpenMPDirectiveKind DKind) {
1126  return isOpenMPParallelDirective(DKind) || isOpenMPTeamsDirective(DKind);
1127 }
1128 
1129 bool isImplicitOrExplicitTaskingRegion(OpenMPDirectiveKind DKind) {
1130  return isImplicitTaskingRegion(DKind) || isOpenMPTaskingDirective(DKind) ||
1131  DKind == OMPD_unknown;
1132 }
1133 
1134 } // namespace
1135 
1136 static const Expr *getExprAsWritten(const Expr *E) {
1137  if (const auto *FE = dyn_cast<FullExpr>(E))
1138  E = FE->getSubExpr();
1139 
1140  if (const auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E))
1141  E = MTE->getSubExpr();
1142 
1143  while (const auto *Binder = dyn_cast<CXXBindTemporaryExpr>(E))
1144  E = Binder->getSubExpr();
1145 
1146  if (const auto *ICE = dyn_cast<ImplicitCastExpr>(E))
1147  E = ICE->getSubExprAsWritten();
1148  return E->IgnoreParens();
1149 }
1150 
1152  return const_cast<Expr *>(getExprAsWritten(const_cast<const Expr *>(E)));
1153 }
1154 
1155 static const ValueDecl *getCanonicalDecl(const ValueDecl *D) {
1156  if (const auto *CED = dyn_cast<OMPCapturedExprDecl>(D))
1157  if (const auto *ME = dyn_cast<MemberExpr>(getExprAsWritten(CED->getInit())))
1158  D = ME->getMemberDecl();
1159  const auto *VD = dyn_cast<VarDecl>(D);
1160  const auto *FD = dyn_cast<FieldDecl>(D);
1161  if (VD != nullptr) {
1162  VD = VD->getCanonicalDecl();
1163  D = VD;
1164  } else {
1165  assert(FD);
1166  FD = FD->getCanonicalDecl();
1167  D = FD;
1168  }
1169  return D;
1170 }
1171 
1173  return const_cast<ValueDecl *>(
1174  getCanonicalDecl(const_cast<const ValueDecl *>(D)));
1175 }
1176 
1177 DSAStackTy::DSAVarData DSAStackTy::getDSA(const_iterator &Iter,
1178  ValueDecl *D) const {
1179  D = getCanonicalDecl(D);
1180  auto *VD = dyn_cast<VarDecl>(D);
1181  const auto *FD = dyn_cast<FieldDecl>(D);
1182  DSAVarData DVar;
1183  if (Iter == end()) {
1184  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1185  // in a region but not in construct]
1186  // File-scope or namespace-scope variables referenced in called routines
1187  // in the region are shared unless they appear in a threadprivate
1188  // directive.
1189  if (VD && !VD->isFunctionOrMethodVarDecl() && !isa<ParmVarDecl>(VD))
1190  DVar.CKind = OMPC_shared;
1191 
1192  // OpenMP [2.9.1.2, Data-sharing Attribute Rules for Variables Referenced
1193  // in a region but not in construct]
1194  // Variables with static storage duration that are declared in called
1195  // routines in the region are shared.
1196  if (VD && VD->hasGlobalStorage())
1197  DVar.CKind = OMPC_shared;
1198 
1199  // Non-static data members are shared by default.
1200  if (FD)
1201  DVar.CKind = OMPC_shared;
1202 
1203  return DVar;
1204  }
1205 
1206  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1207  // in a Construct, C/C++, predetermined, p.1]
1208  // Variables with automatic storage duration that are declared in a scope
1209  // inside the construct are private.
1210  if (VD && isOpenMPLocal(VD, Iter) && VD->isLocalVarDecl() &&
1211  (VD->getStorageClass() == SC_Auto || VD->getStorageClass() == SC_None)) {
1212  DVar.CKind = OMPC_private;
1213  return DVar;
1214  }
1215 
1216  DVar.DKind = Iter->Directive;
1217  // Explicitly specified attributes and local variables with predetermined
1218  // attributes.
1219  if (Iter->SharingMap.count(D)) {
1220  const DSAInfo &Data = Iter->SharingMap.lookup(D);
1221  DVar.RefExpr = Data.RefExpr.getPointer();
1222  DVar.PrivateCopy = Data.PrivateCopy;
1223  DVar.CKind = Data.Attributes;
1224  DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
1225  DVar.Modifier = Data.Modifier;
1226  DVar.AppliedToPointee = Data.AppliedToPointee;
1227  return DVar;
1228  }
1229 
1230  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1231  // in a Construct, C/C++, implicitly determined, p.1]
1232  // In a parallel or task construct, the data-sharing attributes of these
1233  // variables are determined by the default clause, if present.
1234  switch (Iter->DefaultAttr) {
1235  case DSA_shared:
1236  DVar.CKind = OMPC_shared;
1237  DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
1238  return DVar;
1239  case DSA_none:
1240  return DVar;
1241  case DSA_firstprivate:
1242  if (VD && VD->getStorageDuration() == SD_Static &&
1243  VD->getDeclContext()->isFileContext()) {
1244  DVar.CKind = OMPC_unknown;
1245  } else {
1246  DVar.CKind = OMPC_firstprivate;
1247  }
1248  DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
1249  return DVar;
1250  case DSA_private:
1251  // each variable with static storage duration that is declared
1252  // in a namespace or global scope and referenced in the construct,
1253  // and that does not have a predetermined data-sharing attribute
1254  if (VD && VD->getStorageDuration() == SD_Static &&
1255  VD->getDeclContext()->isFileContext()) {
1256  DVar.CKind = OMPC_unknown;
1257  } else {
1258  DVar.CKind = OMPC_private;
1259  }
1260  DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
1261  return DVar;
1262  case DSA_unspecified:
1263  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1264  // in a Construct, implicitly determined, p.2]
1265  // In a parallel construct, if no default clause is present, these
1266  // variables are shared.
1267  DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
1268  if ((isOpenMPParallelDirective(DVar.DKind) &&
1269  !isOpenMPTaskLoopDirective(DVar.DKind)) ||
1270  isOpenMPTeamsDirective(DVar.DKind)) {
1271  DVar.CKind = OMPC_shared;
1272  return DVar;
1273  }
1274 
1275  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1276  // in a Construct, implicitly determined, p.4]
1277  // In a task construct, if no default clause is present, a variable that in
1278  // the enclosing context is determined to be shared by all implicit tasks
1279  // bound to the current team is shared.
1280  if (isOpenMPTaskingDirective(DVar.DKind)) {
1281  DSAVarData DVarTemp;
1282  const_iterator I = Iter, E = end();
1283  do {
1284  ++I;
1285  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables
1286  // Referenced in a Construct, implicitly determined, p.6]
1287  // In a task construct, if no default clause is present, a variable
1288  // whose data-sharing attribute is not determined by the rules above is
1289  // firstprivate.
1290  DVarTemp = getDSA(I, D);
1291  if (DVarTemp.CKind != OMPC_shared) {
1292  DVar.RefExpr = nullptr;
1293  DVar.CKind = OMPC_firstprivate;
1294  return DVar;
1295  }
1296  } while (I != E && !isImplicitTaskingRegion(I->Directive));
1297  DVar.CKind =
1298  (DVarTemp.CKind == OMPC_unknown) ? OMPC_firstprivate : OMPC_shared;
1299  return DVar;
1300  }
1301  }
1302  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1303  // in a Construct, implicitly determined, p.3]
1304  // For constructs other than task, if no default clause is present, these
1305  // variables inherit their data-sharing attributes from the enclosing
1306  // context.
1307  return getDSA(++Iter, D);
1308 }
1309 
1310 const Expr *DSAStackTy::addUniqueAligned(const ValueDecl *D,
1311  const Expr *NewDE) {
1312  assert(!isStackEmpty() && "Data sharing attributes stack is empty");
1313  D = getCanonicalDecl(D);
1314  SharingMapTy &StackElem = getTopOfStack();
1315  auto It = StackElem.AlignedMap.find(D);
1316  if (It == StackElem.AlignedMap.end()) {
1317  assert(NewDE && "Unexpected nullptr expr to be added into aligned map");
1318  StackElem.AlignedMap[D] = NewDE;
1319  return nullptr;
1320  }
1321  assert(It->second && "Unexpected nullptr expr in the aligned map");
1322  return It->second;
1323 }
1324 
1325 const Expr *DSAStackTy::addUniqueNontemporal(const ValueDecl *D,
1326  const Expr *NewDE) {
1327  assert(!isStackEmpty() && "Data sharing attributes stack is empty");
1328  D = getCanonicalDecl(D);
1329  SharingMapTy &StackElem = getTopOfStack();
1330  auto It = StackElem.NontemporalMap.find(D);
1331  if (It == StackElem.NontemporalMap.end()) {
1332  assert(NewDE && "Unexpected nullptr expr to be added into aligned map");
1333  StackElem.NontemporalMap[D] = NewDE;
1334  return nullptr;
1335  }
1336  assert(It->second && "Unexpected nullptr expr in the aligned map");
1337  return It->second;
1338 }
1339 
1340 void DSAStackTy::addLoopControlVariable(const ValueDecl *D, VarDecl *Capture) {
1341  assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
1342  D = getCanonicalDecl(D);
1343  SharingMapTy &StackElem = getTopOfStack();
1344  StackElem.LCVMap.try_emplace(
1345  D, LCDeclInfo(StackElem.LCVMap.size() + 1, Capture));
1346 }
1347 
1348 const DSAStackTy::LCDeclInfo
1349 DSAStackTy::isLoopControlVariable(const ValueDecl *D) const {
1350  assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
1351  D = getCanonicalDecl(D);
1352  const SharingMapTy &StackElem = getTopOfStack();
1353  auto It = StackElem.LCVMap.find(D);
1354  if (It != StackElem.LCVMap.end())
1355  return It->second;
1356  return {0, nullptr};
1357 }
1358 
1359 const DSAStackTy::LCDeclInfo
1360 DSAStackTy::isLoopControlVariable(const ValueDecl *D, unsigned Level) const {
1361  assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
1362  D = getCanonicalDecl(D);
1363  for (unsigned I = Level + 1; I > 0; --I) {
1364  const SharingMapTy &StackElem = getStackElemAtLevel(I - 1);
1365  auto It = StackElem.LCVMap.find(D);
1366  if (It != StackElem.LCVMap.end())
1367  return It->second;
1368  }
1369  return {0, nullptr};
1370 }
1371 
1372 const DSAStackTy::LCDeclInfo
1373 DSAStackTy::isParentLoopControlVariable(const ValueDecl *D) const {
1374  const SharingMapTy *Parent = getSecondOnStackOrNull();
1375  assert(Parent && "Data-sharing attributes stack is empty");
1376  D = getCanonicalDecl(D);
1377  auto It = Parent->LCVMap.find(D);
1378  if (It != Parent->LCVMap.end())
1379  return It->second;
1380  return {0, nullptr};
1381 }
1382 
1383 const ValueDecl *DSAStackTy::getParentLoopControlVariable(unsigned I) const {
1384  const SharingMapTy *Parent = getSecondOnStackOrNull();
1385  assert(Parent && "Data-sharing attributes stack is empty");
1386  if (Parent->LCVMap.size() < I)
1387  return nullptr;
1388  for (const auto &Pair : Parent->LCVMap)
1389  if (Pair.second.first == I)
1390  return Pair.first;
1391  return nullptr;
1392 }
1393 
1394 void DSAStackTy::addDSA(const ValueDecl *D, const Expr *E, OpenMPClauseKind A,
1395  DeclRefExpr *PrivateCopy, unsigned Modifier,
1396  bool AppliedToPointee) {
1397  D = getCanonicalDecl(D);
1398  if (A == OMPC_threadprivate) {
1399  DSAInfo &Data = Threadprivates[D];
1400  Data.Attributes = A;
1401  Data.RefExpr.setPointer(E);
1402  Data.PrivateCopy = nullptr;
1403  Data.Modifier = Modifier;
1404  } else {
1405  DSAInfo &Data = getTopOfStack().SharingMap[D];
1406  assert(Data.Attributes == OMPC_unknown || (A == Data.Attributes) ||
1407  (A == OMPC_firstprivate && Data.Attributes == OMPC_lastprivate) ||
1408  (A == OMPC_lastprivate && Data.Attributes == OMPC_firstprivate) ||
1409  (isLoopControlVariable(D).first && A == OMPC_private));
1410  Data.Modifier = Modifier;
1411  if (A == OMPC_lastprivate && Data.Attributes == OMPC_firstprivate) {
1412  Data.RefExpr.setInt(/*IntVal=*/true);
1413  return;
1414  }
1415  const bool IsLastprivate =
1416  A == OMPC_lastprivate || Data.Attributes == OMPC_lastprivate;
1417  Data.Attributes = A;
1418  Data.RefExpr.setPointerAndInt(E, IsLastprivate);
1419  Data.PrivateCopy = PrivateCopy;
1420  Data.AppliedToPointee = AppliedToPointee;
1421  if (PrivateCopy) {
1422  DSAInfo &Data = getTopOfStack().SharingMap[PrivateCopy->getDecl()];
1423  Data.Modifier = Modifier;
1424  Data.Attributes = A;
1425  Data.RefExpr.setPointerAndInt(PrivateCopy, IsLastprivate);
1426  Data.PrivateCopy = nullptr;
1427  Data.AppliedToPointee = AppliedToPointee;
1428  }
1429  }
1430 }
1431 
1432 /// Build a variable declaration for OpenMP loop iteration variable.
1434  StringRef Name, const AttrVec *Attrs = nullptr,
1435  DeclRefExpr *OrigRef = nullptr) {
1436  DeclContext *DC = SemaRef.CurContext;
1437  IdentifierInfo *II = &SemaRef.PP.getIdentifierTable().get(Name);
1438  TypeSourceInfo *TInfo = SemaRef.Context.getTrivialTypeSourceInfo(Type, Loc);
1439  auto *Decl =
1440  VarDecl::Create(SemaRef.Context, DC, Loc, Loc, II, Type, TInfo, SC_None);
1441  if (Attrs) {
1442  for (specific_attr_iterator<AlignedAttr> I(Attrs->begin()), E(Attrs->end());
1443  I != E; ++I)
1444  Decl->addAttr(*I);
1445  }
1446  Decl->setImplicit();
1447  if (OrigRef) {
1448  Decl->addAttr(
1449  OMPReferencedVarAttr::CreateImplicit(SemaRef.Context, OrigRef));
1450  }
1451  return Decl;
1452 }
1453 
1455  SourceLocation Loc,
1456  bool RefersToCapture = false) {
1457  D->setReferenced();
1458  D->markUsed(S.Context);
1460  SourceLocation(), D, RefersToCapture, Loc, Ty,
1461  VK_LValue);
1462 }
1463 
1464 void DSAStackTy::addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
1465  BinaryOperatorKind BOK) {
1466  D = getCanonicalDecl(D);
1467  assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
1468  assert(
1469  getTopOfStack().SharingMap[D].Attributes == OMPC_reduction &&
1470  "Additional reduction info may be specified only for reduction items.");
1471  ReductionData &ReductionData = getTopOfStack().ReductionMap[D];
1472  assert(ReductionData.ReductionRange.isInvalid() &&
1473  (getTopOfStack().Directive == OMPD_taskgroup ||
1474  ((isOpenMPParallelDirective(getTopOfStack().Directive) ||
1475  isOpenMPWorksharingDirective(getTopOfStack().Directive)) &&
1476  !isOpenMPSimdDirective(getTopOfStack().Directive))) &&
1477  "Additional reduction info may be specified only once for reduction "
1478  "items.");
1479  ReductionData.set(BOK, SR);
1480  Expr *&TaskgroupReductionRef = getTopOfStack().TaskgroupReductionRef;
1481  if (!TaskgroupReductionRef) {
1482  VarDecl *VD = buildVarDecl(SemaRef, SR.getBegin(),
1483  SemaRef.Context.VoidPtrTy, ".task_red.");
1484  TaskgroupReductionRef =
1485  buildDeclRefExpr(SemaRef, VD, SemaRef.Context.VoidPtrTy, SR.getBegin());
1486  }
1487 }
1488 
1489 void DSAStackTy::addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
1490  const Expr *ReductionRef) {
1491  D = getCanonicalDecl(D);
1492  assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
1493  assert(
1494  getTopOfStack().SharingMap[D].Attributes == OMPC_reduction &&
1495  "Additional reduction info may be specified only for reduction items.");
1496  ReductionData &ReductionData = getTopOfStack().ReductionMap[D];
1497  assert(ReductionData.ReductionRange.isInvalid() &&
1498  (getTopOfStack().Directive == OMPD_taskgroup ||
1499  ((isOpenMPParallelDirective(getTopOfStack().Directive) ||
1500  isOpenMPWorksharingDirective(getTopOfStack().Directive)) &&
1501  !isOpenMPSimdDirective(getTopOfStack().Directive))) &&
1502  "Additional reduction info may be specified only once for reduction "
1503  "items.");
1504  ReductionData.set(ReductionRef, SR);
1505  Expr *&TaskgroupReductionRef = getTopOfStack().TaskgroupReductionRef;
1506  if (!TaskgroupReductionRef) {
1507  VarDecl *VD = buildVarDecl(SemaRef, SR.getBegin(),
1508  SemaRef.Context.VoidPtrTy, ".task_red.");
1509  TaskgroupReductionRef =
1510  buildDeclRefExpr(SemaRef, VD, SemaRef.Context.VoidPtrTy, SR.getBegin());
1511  }
1512 }
1513 
1514 const DSAStackTy::DSAVarData DSAStackTy::getTopMostTaskgroupReductionData(
1515  const ValueDecl *D, SourceRange &SR, BinaryOperatorKind &BOK,
1516  Expr *&TaskgroupDescriptor) const {
1517  D = getCanonicalDecl(D);
1518  assert(!isStackEmpty() && "Data-sharing attributes stack is empty.");
1519  for (const_iterator I = begin() + 1, E = end(); I != E; ++I) {
1520  const DSAInfo &Data = I->SharingMap.lookup(D);
1521  if (Data.Attributes != OMPC_reduction ||
1522  Data.Modifier != OMPC_REDUCTION_task)
1523  continue;
1524  const ReductionData &ReductionData = I->ReductionMap.lookup(D);
1525  if (!ReductionData.ReductionOp ||
1526  ReductionData.ReductionOp.is<const Expr *>())
1527  return DSAVarData();
1528  SR = ReductionData.ReductionRange;
1529  BOK = ReductionData.ReductionOp.get<ReductionData::BOKPtrType>();
1530  assert(I->TaskgroupReductionRef && "taskgroup reduction reference "
1531  "expression for the descriptor is not "
1532  "set.");
1533  TaskgroupDescriptor = I->TaskgroupReductionRef;
1534  return DSAVarData(I->Directive, OMPC_reduction, Data.RefExpr.getPointer(),
1535  Data.PrivateCopy, I->DefaultAttrLoc, OMPC_REDUCTION_task,
1536  /*AppliedToPointee=*/false);
1537  }
1538  return DSAVarData();
1539 }
1540 
1541 const DSAStackTy::DSAVarData DSAStackTy::getTopMostTaskgroupReductionData(
1542  const ValueDecl *D, SourceRange &SR, const Expr *&ReductionRef,
1543  Expr *&TaskgroupDescriptor) const {
1544  D = getCanonicalDecl(D);
1545  assert(!isStackEmpty() && "Data-sharing attributes stack is empty.");
1546  for (const_iterator I = begin() + 1, E = end(); I != E; ++I) {
1547  const DSAInfo &Data = I->SharingMap.lookup(D);
1548  if (Data.Attributes != OMPC_reduction ||
1549  Data.Modifier != OMPC_REDUCTION_task)
1550  continue;
1551  const ReductionData &ReductionData = I->ReductionMap.lookup(D);
1552  if (!ReductionData.ReductionOp ||
1553  !ReductionData.ReductionOp.is<const Expr *>())
1554  return DSAVarData();
1555  SR = ReductionData.ReductionRange;
1556  ReductionRef = ReductionData.ReductionOp.get<const Expr *>();
1557  assert(I->TaskgroupReductionRef && "taskgroup reduction reference "
1558  "expression for the descriptor is not "
1559  "set.");
1560  TaskgroupDescriptor = I->TaskgroupReductionRef;
1561  return DSAVarData(I->Directive, OMPC_reduction, Data.RefExpr.getPointer(),
1562  Data.PrivateCopy, I->DefaultAttrLoc, OMPC_REDUCTION_task,
1563  /*AppliedToPointee=*/false);
1564  }
1565  return DSAVarData();
1566 }
1567 
1568 bool DSAStackTy::isOpenMPLocal(VarDecl *D, const_iterator I) const {
1569  D = D->getCanonicalDecl();
1570  for (const_iterator E = end(); I != E; ++I) {
1571  if (isImplicitOrExplicitTaskingRegion(I->Directive) ||
1572  isOpenMPTargetExecutionDirective(I->Directive)) {
1573  if (I->CurScope) {
1574  Scope *TopScope = I->CurScope->getParent();
1575  Scope *CurScope = getCurScope();
1576  while (CurScope && CurScope != TopScope && !CurScope->isDeclScope(D))
1577  CurScope = CurScope->getParent();
1578  return CurScope != TopScope;
1579  }
1580  for (DeclContext *DC = D->getDeclContext(); DC; DC = DC->getParent())
1581  if (I->Context == DC)
1582  return true;
1583  return false;
1584  }
1585  }
1586  return false;
1587 }
1588 
1589 static bool isConstNotMutableType(Sema &SemaRef, QualType Type,
1590  bool AcceptIfMutable = true,
1591  bool *IsClassType = nullptr) {
1592  ASTContext &Context = SemaRef.getASTContext();
1593  Type = Type.getNonReferenceType().getCanonicalType();
1594  bool IsConstant = Type.isConstant(Context);
1595  Type = Context.getBaseElementType(Type);
1596  const CXXRecordDecl *RD = AcceptIfMutable && SemaRef.getLangOpts().CPlusPlus
1598  : nullptr;
1599  if (const auto *CTSD = dyn_cast_or_null<ClassTemplateSpecializationDecl>(RD))
1600  if (const ClassTemplateDecl *CTD = CTSD->getSpecializedTemplate())
1601  RD = CTD->getTemplatedDecl();
1602  if (IsClassType)
1603  *IsClassType = RD;
1604  return IsConstant && !(SemaRef.getLangOpts().CPlusPlus && RD &&
1605  RD->hasDefinition() && RD->hasMutableFields());
1606 }
1607 
1608 static bool rejectConstNotMutableType(Sema &SemaRef, const ValueDecl *D,
1610  SourceLocation ELoc,
1611  bool AcceptIfMutable = true,
1612  bool ListItemNotVar = false) {
1613  ASTContext &Context = SemaRef.getASTContext();
1614  bool IsClassType;
1615  if (isConstNotMutableType(SemaRef, Type, AcceptIfMutable, &IsClassType)) {
1616  unsigned Diag = ListItemNotVar ? diag::err_omp_const_list_item
1617  : IsClassType ? diag::err_omp_const_not_mutable_variable
1618  : diag::err_omp_const_variable;
1619  SemaRef.Diag(ELoc, Diag) << getOpenMPClauseName(CKind);
1620  if (!ListItemNotVar && D) {
1621  const VarDecl *VD = dyn_cast<VarDecl>(D);
1622  bool IsDecl = !VD || VD->isThisDeclarationADefinition(Context) ==
1624  SemaRef.Diag(D->getLocation(),
1625  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
1626  << D;
1627  }
1628  return true;
1629  }
1630  return false;
1631 }
1632 
1633 const DSAStackTy::DSAVarData DSAStackTy::getTopDSA(ValueDecl *D,
1634  bool FromParent) {
1635  D = getCanonicalDecl(D);
1636  DSAVarData DVar;
1637 
1638  auto *VD = dyn_cast<VarDecl>(D);
1639  auto TI = Threadprivates.find(D);
1640  if (TI != Threadprivates.end()) {
1641  DVar.RefExpr = TI->getSecond().RefExpr.getPointer();
1642  DVar.CKind = OMPC_threadprivate;
1643  DVar.Modifier = TI->getSecond().Modifier;
1644  return DVar;
1645  }
1646  if (VD && VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
1647  DVar.RefExpr = buildDeclRefExpr(
1648  SemaRef, VD, D->getType().getNonReferenceType(),
1649  VD->getAttr<OMPThreadPrivateDeclAttr>()->getLocation());
1650  DVar.CKind = OMPC_threadprivate;
1651  addDSA(D, DVar.RefExpr, OMPC_threadprivate);
1652  return DVar;
1653  }
1654  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1655  // in a Construct, C/C++, predetermined, p.1]
1656  // Variables appearing in threadprivate directives are threadprivate.
1657  if ((VD && VD->getTLSKind() != VarDecl::TLS_None &&
1658  !(VD->hasAttr<OMPThreadPrivateDeclAttr>() &&
1659  SemaRef.getLangOpts().OpenMPUseTLS &&
1660  SemaRef.getASTContext().getTargetInfo().isTLSSupported())) ||
1661  (VD && VD->getStorageClass() == SC_Register &&
1662  VD->hasAttr<AsmLabelAttr>() && !VD->isLocalVarDecl())) {
1663  DVar.RefExpr = buildDeclRefExpr(
1664  SemaRef, VD, D->getType().getNonReferenceType(), D->getLocation());
1665  DVar.CKind = OMPC_threadprivate;
1666  addDSA(D, DVar.RefExpr, OMPC_threadprivate);
1667  return DVar;
1668  }
1669  if (SemaRef.getLangOpts().OpenMPCUDAMode && VD &&
1670  VD->isLocalVarDeclOrParm() && !isStackEmpty() &&
1671  !isLoopControlVariable(D).first) {
1672  const_iterator IterTarget =
1673  std::find_if(begin(), end(), [](const SharingMapTy &Data) {
1674  return isOpenMPTargetExecutionDirective(Data.Directive);
1675  });
1676  if (IterTarget != end()) {
1677  const_iterator ParentIterTarget = IterTarget + 1;
1678  for (const_iterator Iter = begin(); Iter != ParentIterTarget; ++Iter) {
1679  if (isOpenMPLocal(VD, Iter)) {
1680  DVar.RefExpr =
1681  buildDeclRefExpr(SemaRef, VD, D->getType().getNonReferenceType(),
1682  D->getLocation());
1683  DVar.CKind = OMPC_threadprivate;
1684  return DVar;
1685  }
1686  }
1687  if (!isClauseParsingMode() || IterTarget != begin()) {
1688  auto DSAIter = IterTarget->SharingMap.find(D);
1689  if (DSAIter != IterTarget->SharingMap.end() &&
1690  isOpenMPPrivate(DSAIter->getSecond().Attributes)) {
1691  DVar.RefExpr = DSAIter->getSecond().RefExpr.getPointer();
1692  DVar.CKind = OMPC_threadprivate;
1693  return DVar;
1694  }
1695  const_iterator End = end();
1696  if (!SemaRef.isOpenMPCapturedByRef(D,
1697  std::distance(ParentIterTarget, End),
1698  /*OpenMPCaptureLevel=*/0)) {
1699  DVar.RefExpr =
1700  buildDeclRefExpr(SemaRef, VD, D->getType().getNonReferenceType(),
1701  IterTarget->ConstructLoc);
1702  DVar.CKind = OMPC_threadprivate;
1703  return DVar;
1704  }
1705  }
1706  }
1707  }
1708 
1709  if (isStackEmpty())
1710  // Not in OpenMP execution region and top scope was already checked.
1711  return DVar;
1712 
1713  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1714  // in a Construct, C/C++, predetermined, p.4]
1715  // Static data members are shared.
1716  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1717  // in a Construct, C/C++, predetermined, p.7]
1718  // Variables with static storage duration that are declared in a scope
1719  // inside the construct are shared.
1720  if (VD && VD->isStaticDataMember()) {
1721  // Check for explicitly specified attributes.
1722  const_iterator I = begin();
1723  const_iterator EndI = end();
1724  if (FromParent && I != EndI)
1725  ++I;
1726  if (I != EndI) {
1727  auto It = I->SharingMap.find(D);
1728  if (It != I->SharingMap.end()) {
1729  const DSAInfo &Data = It->getSecond();
1730  DVar.RefExpr = Data.RefExpr.getPointer();
1731  DVar.PrivateCopy = Data.PrivateCopy;
1732  DVar.CKind = Data.Attributes;
1733  DVar.ImplicitDSALoc = I->DefaultAttrLoc;
1734  DVar.DKind = I->Directive;
1735  DVar.Modifier = Data.Modifier;
1736  DVar.AppliedToPointee = Data.AppliedToPointee;
1737  return DVar;
1738  }
1739  }
1740 
1741  DVar.CKind = OMPC_shared;
1742  return DVar;
1743  }
1744 
1745  auto &&MatchesAlways = [](OpenMPDirectiveKind) { return true; };
1746  // The predetermined shared attribute for const-qualified types having no
1747  // mutable members was removed after OpenMP 3.1.
1748  if (SemaRef.LangOpts.OpenMP <= 31) {
1749  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1750  // in a Construct, C/C++, predetermined, p.6]
1751  // Variables with const qualified type having no mutable member are
1752  // shared.
1753  if (isConstNotMutableType(SemaRef, D->getType())) {
1754  // Variables with const-qualified type having no mutable member may be
1755  // listed in a firstprivate clause, even if they are static data members.
1756  DSAVarData DVarTemp = hasInnermostDSA(
1757  D,
1758  [](OpenMPClauseKind C, bool) {
1759  return C == OMPC_firstprivate || C == OMPC_shared;
1760  },
1761  MatchesAlways, FromParent);
1762  if (DVarTemp.CKind != OMPC_unknown && DVarTemp.RefExpr)
1763  return DVarTemp;
1764 
1765  DVar.CKind = OMPC_shared;
1766  return DVar;
1767  }
1768  }
1769 
1770  // Explicitly specified attributes and local variables with predetermined
1771  // attributes.
1772  const_iterator I = begin();
1773  const_iterator EndI = end();
1774  if (FromParent && I != EndI)
1775  ++I;
1776  if (I == EndI)
1777  return DVar;
1778  auto It = I->SharingMap.find(D);
1779  if (It != I->SharingMap.end()) {
1780  const DSAInfo &Data = It->getSecond();
1781  DVar.RefExpr = Data.RefExpr.getPointer();
1782  DVar.PrivateCopy = Data.PrivateCopy;
1783  DVar.CKind = Data.Attributes;
1784  DVar.ImplicitDSALoc = I->DefaultAttrLoc;
1785  DVar.DKind = I->Directive;
1786  DVar.Modifier = Data.Modifier;
1787  DVar.AppliedToPointee = Data.AppliedToPointee;
1788  }
1789 
1790  return DVar;
1791 }
1792 
1793 const DSAStackTy::DSAVarData DSAStackTy::getImplicitDSA(ValueDecl *D,
1794  bool FromParent) const {
1795  if (isStackEmpty()) {
1796  const_iterator I;
1797  return getDSA(I, D);
1798  }
1799  D = getCanonicalDecl(D);
1800  const_iterator StartI = begin();
1801  const_iterator EndI = end();
1802  if (FromParent && StartI != EndI)
1803  ++StartI;
1804  return getDSA(StartI, D);
1805 }
1806 
1807 const DSAStackTy::DSAVarData DSAStackTy::getImplicitDSA(ValueDecl *D,
1808  unsigned Level) const {
1809  if (getStackSize() <= Level)
1810  return DSAVarData();
1811  D = getCanonicalDecl(D);
1812  const_iterator StartI = std::next(begin(), getStackSize() - 1 - Level);
1813  return getDSA(StartI, D);
1814 }
1815 
1816 const DSAStackTy::DSAVarData
1817 DSAStackTy::hasDSA(ValueDecl *D,
1818  const llvm::function_ref<bool(OpenMPClauseKind, bool)> CPred,
1819  const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
1820  bool FromParent) const {
1821  if (isStackEmpty())
1822  return {};
1823  D = getCanonicalDecl(D);
1824  const_iterator I = begin();
1825  const_iterator EndI = end();
1826  if (FromParent && I != EndI)
1827  ++I;
1828  for (; I != EndI; ++I) {
1829  if (!DPred(I->Directive) &&
1830  !isImplicitOrExplicitTaskingRegion(I->Directive))
1831  continue;
1832  const_iterator NewI = I;
1833  DSAVarData DVar = getDSA(NewI, D);
1834  if (I == NewI && CPred(DVar.CKind, DVar.AppliedToPointee))
1835  return DVar;
1836  }
1837  return {};
1838 }
1839 
1840 const DSAStackTy::DSAVarData DSAStackTy::hasInnermostDSA(
1841  ValueDecl *D, const llvm::function_ref<bool(OpenMPClauseKind, bool)> CPred,
1842  const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
1843  bool FromParent) const {
1844  if (isStackEmpty())
1845  return {};
1846  D = getCanonicalDecl(D);
1847  const_iterator StartI = begin();
1848  const_iterator EndI = end();
1849  if (FromParent && StartI != EndI)
1850  ++StartI;
1851  if (StartI == EndI || !DPred(StartI->Directive))
1852  return {};
1853  const_iterator NewI = StartI;
1854  DSAVarData DVar = getDSA(NewI, D);
1855  return (NewI == StartI && CPred(DVar.CKind, DVar.AppliedToPointee))
1856  ? DVar
1857  : DSAVarData();
1858 }
1859 
1860 bool DSAStackTy::hasExplicitDSA(
1861  const ValueDecl *D,
1862  const llvm::function_ref<bool(OpenMPClauseKind, bool)> CPred,
1863  unsigned Level, bool NotLastprivate) const {
1864  if (getStackSize() <= Level)
1865  return false;
1866  D = getCanonicalDecl(D);
1867  const SharingMapTy &StackElem = getStackElemAtLevel(Level);
1868  auto I = StackElem.SharingMap.find(D);
1869  if (I != StackElem.SharingMap.end() && I->getSecond().RefExpr.getPointer() &&
1870  CPred(I->getSecond().Attributes, I->getSecond().AppliedToPointee) &&
1871  (!NotLastprivate || !I->getSecond().RefExpr.getInt()))
1872  return true;
1873  // Check predetermined rules for the loop control variables.
1874  auto LI = StackElem.LCVMap.find(D);
1875  if (LI != StackElem.LCVMap.end())
1876  return CPred(OMPC_private, /*AppliedToPointee=*/false);
1877  return false;
1878 }
1879 
1880 bool DSAStackTy::hasExplicitDirective(
1881  const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
1882  unsigned Level) const {
1883  if (getStackSize() <= Level)
1884  return false;
1885  const SharingMapTy &StackElem = getStackElemAtLevel(Level);
1886  return DPred(StackElem.Directive);
1887 }
1888 
1889 bool DSAStackTy::hasDirective(
1890  const llvm::function_ref<bool(OpenMPDirectiveKind,
1892  DPred,
1893  bool FromParent) const {
1894  // We look only in the enclosing region.
1895  size_t Skip = FromParent ? 2 : 1;
1896  for (const_iterator I = begin() + std::min(Skip, getStackSize()), E = end();
1897  I != E; ++I) {
1898  if (DPred(I->Directive, I->DirectiveName, I->ConstructLoc))
1899  return true;
1900  }
1901  return false;
1902 }
1903 
1904 void Sema::InitDataSharingAttributesStack() {
1905  VarDataSharingAttributesStack = new DSAStackTy(*this);
1906 }
1907 
1908 #define DSAStack static_cast<DSAStackTy *>(VarDataSharingAttributesStack)
1909 
1910 void Sema::pushOpenMPFunctionRegion() { DSAStack->pushFunction(); }
1911 
1912 void Sema::popOpenMPFunctionRegion(const FunctionScopeInfo *OldFSI) {
1913  DSAStack->popFunction(OldFSI);
1914 }
1915 
1917  assert(S.LangOpts.OpenMP && S.LangOpts.OpenMPIsDevice &&
1918  "Expected OpenMP device compilation.");
1920 }
1921 
1922 namespace {
1923 /// Status of the function emission on the host/device.
1924 enum class FunctionEmissionStatus {
1925  Emitted,
1926  Discarded,
1927  Unknown,
1928 };
1929 } // anonymous namespace
1930 
1932  unsigned DiagID,
1933  FunctionDecl *FD) {
1934  assert(LangOpts.OpenMP && LangOpts.OpenMPIsDevice &&
1935  "Expected OpenMP device compilation.");
1936 
1937  SemaDiagnosticBuilder::Kind Kind = SemaDiagnosticBuilder::K_Nop;
1938  if (FD) {
1939  FunctionEmissionStatus FES = getEmissionStatus(FD);
1940  switch (FES) {
1941  case FunctionEmissionStatus::Emitted:
1942  Kind = SemaDiagnosticBuilder::K_Immediate;
1943  break;
1945  // TODO: We should always delay diagnostics here in case a target
1946  // region is in a function we do not emit. However, as the
1947  // current diagnostics are associated with the function containing
1948  // the target region and we do not emit that one, we would miss out
1949  // on diagnostics for the target region itself. We need to anchor
1950  // the diagnostics with the new generated function *or* ensure we
1951  // emit diagnostics associated with the surrounding function.
1953  ? SemaDiagnosticBuilder::K_Deferred
1954  : SemaDiagnosticBuilder::K_Immediate;
1955  break;
1956  case FunctionEmissionStatus::TemplateDiscarded:
1957  case FunctionEmissionStatus::OMPDiscarded:
1958  Kind = SemaDiagnosticBuilder::K_Nop;
1959  break;
1960  case FunctionEmissionStatus::CUDADiscarded:
1961  llvm_unreachable("CUDADiscarded unexpected in OpenMP device compilation");
1962  break;
1963  }
1964  }
1965 
1966  return SemaDiagnosticBuilder(Kind, Loc, DiagID, FD, *this);
1967 }
1968 
1970  unsigned DiagID,
1971  FunctionDecl *FD) {
1972  assert(LangOpts.OpenMP && !LangOpts.OpenMPIsDevice &&
1973  "Expected OpenMP host compilation.");
1974 
1975  SemaDiagnosticBuilder::Kind Kind = SemaDiagnosticBuilder::K_Nop;
1976  if (FD) {
1977  FunctionEmissionStatus FES = getEmissionStatus(FD);
1978  switch (FES) {
1979  case FunctionEmissionStatus::Emitted:
1980  Kind = SemaDiagnosticBuilder::K_Immediate;
1981  break;
1983  Kind = SemaDiagnosticBuilder::K_Deferred;
1984  break;
1985  case FunctionEmissionStatus::TemplateDiscarded:
1986  case FunctionEmissionStatus::OMPDiscarded:
1987  case FunctionEmissionStatus::CUDADiscarded:
1988  Kind = SemaDiagnosticBuilder::K_Nop;
1989  break;
1990  }
1991  }
1992 
1993  return SemaDiagnosticBuilder(Kind, Loc, DiagID, FD, *this);
1994 }
1995 
1998  if (LO.OpenMP <= 45) {
1999  if (VD->getType().getNonReferenceType()->isScalarType())
2000  return OMPC_DEFAULTMAP_scalar;
2001  return OMPC_DEFAULTMAP_aggregate;
2002  }
2004  return OMPC_DEFAULTMAP_pointer;
2005  if (VD->getType().getNonReferenceType()->isScalarType())
2006  return OMPC_DEFAULTMAP_scalar;
2007  return OMPC_DEFAULTMAP_aggregate;
2008 }
2009 
2011  unsigned OpenMPCaptureLevel) const {
2012  assert(LangOpts.OpenMP && "OpenMP is not allowed");
2013 
2014  ASTContext &Ctx = getASTContext();
2015  bool IsByRef = true;
2016 
2017  // Find the directive that is associated with the provided scope.
2018  D = cast<ValueDecl>(D->getCanonicalDecl());
2019  QualType Ty = D->getType();
2020 
2021  bool IsVariableUsedInMapClause = false;
2022  if (DSAStack->hasExplicitDirective(isOpenMPTargetExecutionDirective, Level)) {
2023  // This table summarizes how a given variable should be passed to the device
2024  // given its type and the clauses where it appears. This table is based on
2025  // the description in OpenMP 4.5 [2.10.4, target Construct] and
2026  // OpenMP 4.5 [2.15.5, Data-mapping Attribute Rules and Clauses].
2027  //
2028  // =========================================================================
2029  // | type | defaultmap | pvt | first | is_device_ptr | map | res. |
2030  // | |(tofrom:scalar)| | pvt | | | |
2031  // =========================================================================
2032  // | scl | | | | - | | bycopy|
2033  // | scl | | - | x | - | - | bycopy|
2034  // | scl | | x | - | - | - | null |
2035  // | scl | x | | | - | | byref |
2036  // | scl | x | - | x | - | - | bycopy|
2037  // | scl | x | x | - | - | - | null |
2038  // | scl | | - | - | - | x | byref |
2039  // | scl | x | - | - | - | x | byref |
2040  //
2041  // | agg | n.a. | | | - | | byref |
2042  // | agg | n.a. | - | x | - | - | byref |
2043  // | agg | n.a. | x | - | - | - | null |
2044  // | agg | n.a. | - | - | - | x | byref |
2045  // | agg | n.a. | - | - | - | x[] | byref |
2046  //
2047  // | ptr | n.a. | | | - | | bycopy|
2048  // | ptr | n.a. | - | x | - | - | bycopy|
2049  // | ptr | n.a. | x | - | - | - | null |
2050  // | ptr | n.a. | - | - | - | x | byref |
2051  // | ptr | n.a. | - | - | - | x[] | bycopy|
2052  // | ptr | n.a. | - | - | x | | bycopy|
2053  // | ptr | n.a. | - | - | x | x | bycopy|
2054  // | ptr | n.a. | - | - | x | x[] | bycopy|
2055  // =========================================================================
2056  // Legend:
2057  // scl - scalar
2058  // ptr - pointer
2059  // agg - aggregate
2060  // x - applies
2061  // - - invalid in this combination
2062  // [] - mapped with an array section
2063  // byref - should be mapped by reference
2064  // byval - should be mapped by value
2065  // null - initialize a local variable to null on the device
2066  //
2067  // Observations:
2068  // - All scalar declarations that show up in a map clause have to be passed
2069  // by reference, because they may have been mapped in the enclosing data
2070  // environment.
2071  // - If the scalar value does not fit the size of uintptr, it has to be
2072  // passed by reference, regardless the result in the table above.
2073  // - For pointers mapped by value that have either an implicit map or an
2074  // array section, the runtime library may pass the NULL value to the
2075  // device instead of the value passed to it by the compiler.
2076 
2077  if (Ty->isReferenceType())
2078  Ty = Ty->castAs<ReferenceType>()->getPointeeType();
2079 
2080  // Locate map clauses and see if the variable being captured is referred to
2081  // in any of those clauses. Here we only care about variables, not fields,
2082  // because fields are part of aggregates.
2083  bool IsVariableAssociatedWithSection = false;
2084 
2085  DSAStack->checkMappableExprComponentListsForDeclAtLevel(
2086  D, Level,
2087  [&IsVariableUsedInMapClause, &IsVariableAssociatedWithSection,
2089  MapExprComponents,
2090  OpenMPClauseKind WhereFoundClauseKind) {
2091  // Only the map clause information influences how a variable is
2092  // captured. E.g. is_device_ptr does not require changing the default
2093  // behavior.
2094  if (WhereFoundClauseKind != OMPC_map)
2095  return false;
2096 
2097  auto EI = MapExprComponents.rbegin();
2098  auto EE = MapExprComponents.rend();
2099 
2100  assert(EI != EE && "Invalid map expression!");
2101 
2102  if (isa<DeclRefExpr>(EI->getAssociatedExpression()))
2103  IsVariableUsedInMapClause |= EI->getAssociatedDeclaration() == D;
2104 
2105  ++EI;
2106  if (EI == EE)
2107  return false;
2108 
2109  if (isa<ArraySubscriptExpr>(EI->getAssociatedExpression()) ||
2110  isa<OMPArraySectionExpr>(EI->getAssociatedExpression()) ||
2111  isa<MemberExpr>(EI->getAssociatedExpression()) ||
2112  isa<OMPArrayShapingExpr>(EI->getAssociatedExpression())) {
2113  IsVariableAssociatedWithSection = true;
2114  // There is nothing more we need to know about this variable.
2115  return true;
2116  }
2117 
2118  // Keep looking for more map info.
2119  return false;
2120  });
2121 
2122  if (IsVariableUsedInMapClause) {
2123  // If variable is identified in a map clause it is always captured by
2124  // reference except if it is a pointer that is dereferenced somehow.
2125  IsByRef = !(Ty->isPointerType() && IsVariableAssociatedWithSection);
2126  } else {
2127  // By default, all the data that has a scalar type is mapped by copy
2128  // (except for reduction variables).
2129  // Defaultmap scalar is mutual exclusive to defaultmap pointer
2130  IsByRef = (DSAStack->isForceCaptureByReferenceInTargetExecutable() &&
2131  !Ty->isAnyPointerType()) ||
2132  !Ty->isScalarType() ||
2133  DSAStack->isDefaultmapCapturedByRef(
2134  Level, getVariableCategoryFromDecl(LangOpts, D)) ||
2135  DSAStack->hasExplicitDSA(
2136  D,
2137  [](OpenMPClauseKind K, bool AppliedToPointee) {
2138  return K == OMPC_reduction && !AppliedToPointee;
2139  },
2140  Level);
2141  }
2142  }
2143 
2144  if (IsByRef && Ty.getNonReferenceType()->isScalarType()) {
2145  IsByRef =
2146  ((IsVariableUsedInMapClause &&
2147  DSAStack->getCaptureRegion(Level, OpenMPCaptureLevel) ==
2148  OMPD_target) ||
2149  !(DSAStack->hasExplicitDSA(
2150  D,
2151  [](OpenMPClauseKind K, bool AppliedToPointee) -> bool {
2152  return K == OMPC_firstprivate ||
2153  (K == OMPC_reduction && AppliedToPointee);
2154  },
2155  Level, /*NotLastprivate=*/true) ||
2156  DSAStack->isUsesAllocatorsDecl(Level, D))) &&
2157  // If the variable is artificial and must be captured by value - try to
2158  // capture by value.
2159  !(isa<OMPCapturedExprDecl>(D) && !D->hasAttr<OMPCaptureNoInitAttr>() &&
2160  !cast<OMPCapturedExprDecl>(D)->getInit()->isGLValue()) &&
2161  // If the variable is implicitly firstprivate and scalar - capture by
2162  // copy
2163  !((DSAStack->getDefaultDSA() == DSA_firstprivate ||
2164  DSAStack->getDefaultDSA() == DSA_private) &&
2165  !DSAStack->hasExplicitDSA(
2166  D, [](OpenMPClauseKind K, bool) { return K != OMPC_unknown; },
2167  Level) &&
2168  !DSAStack->isLoopControlVariable(D, Level).first);
2169  }
2170 
2171  // When passing data by copy, we need to make sure it fits the uintptr size
2172  // and alignment, because the runtime library only deals with uintptr types.
2173  // If it does not fit the uintptr size, we need to pass the data by reference
2174  // instead.
2175  if (!IsByRef &&
2176  (Ctx.getTypeSizeInChars(Ty) >
2177  Ctx.getTypeSizeInChars(Ctx.getUIntPtrType()) ||
2178  Ctx.getDeclAlign(D) > Ctx.getTypeAlignInChars(Ctx.getUIntPtrType()))) {
2179  IsByRef = true;
2180  }
2181 
2182  return IsByRef;
2183 }
2184 
2185 unsigned Sema::getOpenMPNestingLevel() const {
2186  assert(getLangOpts().OpenMP);
2187  return DSAStack->getNestingLevel();
2188 }
2189 
2191  return isOpenMPTaskingDirective(DSAStack->getCurrentDirective()) &&
2192  DSAStack->isUntiedRegion();
2193 }
2194 
2196  return (isOpenMPTargetExecutionDirective(DSAStack->getCurrentDirective()) &&
2197  !DSAStack->isClauseParsingMode()) ||
2198  DSAStack->hasDirective(
2200  SourceLocation) -> bool {
2202  },
2203  false);
2204 }
2205 
2207  unsigned StopAt) {
2208  assert(LangOpts.OpenMP && "OpenMP is not allowed");
2209  D = getCanonicalDecl(D);
2210 
2211  auto *VD = dyn_cast<VarDecl>(D);
2212  // Do not capture constexpr variables.
2213  if (VD && VD->isConstexpr())
2214  return nullptr;
2215 
2216  // If we want to determine whether the variable should be captured from the
2217  // perspective of the current capturing scope, and we've already left all the
2218  // capturing scopes of the top directive on the stack, check from the
2219  // perspective of its parent directive (if any) instead.
2220  DSAStackTy::ParentDirectiveScope InParentDirectiveRAII(
2221  *DSAStack, CheckScopeInfo && DSAStack->isBodyComplete());
2222 
2223  // If we are attempting to capture a global variable in a directive with
2224  // 'target' we return true so that this global is also mapped to the device.
2225  //
2226  if (VD && !VD->hasLocalStorage() &&
2227  (getCurCapturedRegion() || getCurBlock() || getCurLambda())) {
2228  if (isInOpenMPTargetExecutionDirective()) {
2229  DSAStackTy::DSAVarData DVarTop =
2230  DSAStack->getTopDSA(D, DSAStack->isClauseParsingMode());
2231  if (DVarTop.CKind != OMPC_unknown && DVarTop.RefExpr)
2232  return VD;
2233  // If the declaration is enclosed in a 'declare target' directive,
2234  // then it should not be captured.
2235  //
2236  if (OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
2237  return nullptr;
2238  CapturedRegionScopeInfo *CSI = nullptr;
2239  for (FunctionScopeInfo *FSI : llvm::drop_begin(
2240  llvm::reverse(FunctionScopes),
2241  CheckScopeInfo ? (FunctionScopes.size() - (StopAt + 1)) : 0)) {
2242  if (!isa<CapturingScopeInfo>(FSI))
2243  return nullptr;
2244  if (auto *RSI = dyn_cast<CapturedRegionScopeInfo>(FSI))
2245  if (RSI->CapRegionKind == CR_OpenMP) {
2246  CSI = RSI;
2247  break;
2248  }
2249  }
2250  assert(CSI && "Failed to find CapturedRegionScopeInfo");
2252  getOpenMPCaptureRegions(Regions,
2253  DSAStack->getDirective(CSI->OpenMPLevel));
2254  if (Regions[CSI->OpenMPCaptureLevel] != OMPD_task)
2255  return VD;
2256  }
2257  if (isInOpenMPDeclareTargetContext()) {
2258  // Try to mark variable as declare target if it is used in capturing
2259  // regions.
2260  if (LangOpts.OpenMP <= 45 &&
2261  !OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
2262  checkDeclIsAllowedInOpenMPTarget(nullptr, VD);
2263  return nullptr;
2264  }
2265  }
2266 
2267  if (CheckScopeInfo) {
2268  bool OpenMPFound = false;
2269  for (unsigned I = StopAt + 1; I > 0; --I) {
2270  FunctionScopeInfo *FSI = FunctionScopes[I - 1];
2271  if (!isa<CapturingScopeInfo>(FSI))
2272  return nullptr;
2273  if (auto *RSI = dyn_cast<CapturedRegionScopeInfo>(FSI))
2274  if (RSI->CapRegionKind == CR_OpenMP) {
2275  OpenMPFound = true;
2276  break;
2277  }
2278  }
2279  if (!OpenMPFound)
2280  return nullptr;
2281  }
2282 
2283  if (DSAStack->getCurrentDirective() != OMPD_unknown &&
2284  (!DSAStack->isClauseParsingMode() ||
2285  DSAStack->getParentDirective() != OMPD_unknown)) {
2286  auto &&Info = DSAStack->isLoopControlVariable(D);
2287  if (Info.first ||
2288  (VD && VD->hasLocalStorage() &&
2289  isImplicitOrExplicitTaskingRegion(DSAStack->getCurrentDirective())) ||
2290  (VD && DSAStack->isForceVarCapturing()))
2291  return VD ? VD : Info.second;
2292  DSAStackTy::DSAVarData DVarTop =
2293  DSAStack->getTopDSA(D, DSAStack->isClauseParsingMode());
2294  if (DVarTop.CKind != OMPC_unknown && isOpenMPPrivate(DVarTop.CKind) &&
2295  (!VD || VD->hasLocalStorage() || !DVarTop.AppliedToPointee))
2296  return VD ? VD : cast<VarDecl>(DVarTop.PrivateCopy->getDecl());
2297  // Threadprivate variables must not be captured.
2298  if (isOpenMPThreadPrivate(DVarTop.CKind))
2299  return nullptr;
2300  // The variable is not private or it is the variable in the directive with
2301  // default(none) clause and not used in any clause.
2302  DSAStackTy::DSAVarData DVarPrivate = DSAStack->hasDSA(
2303  D,
2304  [](OpenMPClauseKind C, bool AppliedToPointee) {
2305  return isOpenMPPrivate(C) && !AppliedToPointee;
2306  },
2307  [](OpenMPDirectiveKind) { return true; },
2308  DSAStack->isClauseParsingMode());
2309  // Global shared must not be captured.
2310  if (VD && !VD->hasLocalStorage() && DVarPrivate.CKind == OMPC_unknown &&
2311  ((DSAStack->getDefaultDSA() != DSA_none &&
2312  DSAStack->getDefaultDSA() != DSA_private &&
2313  DSAStack->getDefaultDSA() != DSA_firstprivate) ||
2314  DVarTop.CKind == OMPC_shared))
2315  return nullptr;
2316  if (DVarPrivate.CKind != OMPC_unknown ||
2317  (VD && (DSAStack->getDefaultDSA() == DSA_none ||
2318  DSAStack->getDefaultDSA() == DSA_private ||
2319  DSAStack->getDefaultDSA() == DSA_firstprivate)))
2320  return VD ? VD : cast<VarDecl>(DVarPrivate.PrivateCopy->getDecl());
2321  }
2322  return nullptr;
2323 }
2324 
2325 void Sema::adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
2326  unsigned Level) const {
2327  FunctionScopesIndex -= getOpenMPCaptureLevels(DSAStack->getDirective(Level));
2328 }
2329 
2331  assert(LangOpts.OpenMP && "OpenMP must be enabled.");
2332  if (isOpenMPLoopDirective(DSAStack->getCurrentDirective()))
2333  DSAStack->loopInit();
2334 }
2335 
2337  assert(LangOpts.OpenMP && "OpenMP must be enabled.");
2338  if (isOpenMPLoopDirective(DSAStack->getCurrentDirective())) {
2339  DSAStack->resetPossibleLoopCounter();
2340  DSAStack->loopStart();
2341  }
2342 }
2343 
2345  unsigned CapLevel) const {
2346  assert(LangOpts.OpenMP && "OpenMP is not allowed");
2347  if (DSAStack->hasExplicitDirective(isOpenMPTaskingDirective, Level)) {
2348  bool IsTriviallyCopyable =
2350  !D->getType()
2352  .getCanonicalType()
2353  ->getAsCXXRecordDecl();
2354  OpenMPDirectiveKind DKind = DSAStack->getDirective(Level);
2355  SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
2356  getOpenMPCaptureRegions(CaptureRegions, DKind);
2357  if (isOpenMPTaskingDirective(CaptureRegions[CapLevel]) &&
2358  (IsTriviallyCopyable ||
2359  !isOpenMPTaskLoopDirective(CaptureRegions[CapLevel]))) {
2360  if (DSAStack->hasExplicitDSA(
2361  D,
2362  [](OpenMPClauseKind K, bool) { return K == OMPC_firstprivate; },
2363  Level, /*NotLastprivate=*/true))
2364  return OMPC_firstprivate;
2365  DSAStackTy::DSAVarData DVar = DSAStack->getImplicitDSA(D, Level);
2366  if (DVar.CKind != OMPC_shared &&
2367  !DSAStack->isLoopControlVariable(D, Level).first && !DVar.RefExpr) {
2368  DSAStack->addImplicitTaskFirstprivate(Level, D);
2369  return OMPC_firstprivate;
2370  }
2371  }
2372  }
2373  if (isOpenMPLoopDirective(DSAStack->getCurrentDirective())) {
2374  if (DSAStack->getAssociatedLoops() > 0 && !DSAStack->isLoopStarted()) {
2375  DSAStack->resetPossibleLoopCounter(D);
2376  DSAStack->loopStart();
2377  return OMPC_private;
2378  }
2379  if ((DSAStack->getPossiblyLoopCunter() == D->getCanonicalDecl() ||
2380  DSAStack->isLoopControlVariable(D).first) &&
2381  !DSAStack->hasExplicitDSA(
2382  D, [](OpenMPClauseKind K, bool) { return K != OMPC_private; },
2383  Level) &&
2384  !isOpenMPSimdDirective(DSAStack->getCurrentDirective()))
2385  return OMPC_private;
2386  }
2387  if (const auto *VD = dyn_cast<VarDecl>(D)) {
2388  if (DSAStack->isThreadPrivate(const_cast<VarDecl *>(VD)) &&
2389  DSAStack->isForceVarCapturing() &&
2390  !DSAStack->hasExplicitDSA(
2391  D, [](OpenMPClauseKind K, bool) { return K == OMPC_copyin; },
2392  Level))
2393  return OMPC_private;
2394  }
2395  // User-defined allocators are private since they must be defined in the
2396  // context of target region.
2397  if (DSAStack->hasExplicitDirective(isOpenMPTargetExecutionDirective, Level) &&
2398  DSAStack->isUsesAllocatorsDecl(Level, D).getValueOr(
2399  DSAStackTy::UsesAllocatorsDeclKind::AllocatorTrait) ==
2400  DSAStackTy::UsesAllocatorsDeclKind::UserDefinedAllocator)
2401  return OMPC_private;
2402  return (DSAStack->hasExplicitDSA(
2403  D, [](OpenMPClauseKind K, bool) { return K == OMPC_private; },
2404  Level) ||
2405  (DSAStack->isClauseParsingMode() &&
2406  DSAStack->getClauseParsingMode() == OMPC_private) ||
2407  // Consider taskgroup reduction descriptor variable a private
2408  // to avoid possible capture in the region.
2409  (DSAStack->hasExplicitDirective(
2410  [](OpenMPDirectiveKind K) {
2411  return K == OMPD_taskgroup ||
2412  ((isOpenMPParallelDirective(K) ||
2413  isOpenMPWorksharingDirective(K)) &&
2414  !isOpenMPSimdDirective(K));
2415  },
2416  Level) &&
2417  DSAStack->isTaskgroupReductionRef(D, Level)))
2418  ? OMPC_private
2419  : OMPC_unknown;
2420 }
2421 
2423  unsigned Level) {
2424  assert(LangOpts.OpenMP && "OpenMP is not allowed");
2425  D = getCanonicalDecl(D);
2426  OpenMPClauseKind OMPC = OMPC_unknown;
2427  for (unsigned I = DSAStack->getNestingLevel() + 1; I > Level; --I) {
2428  const unsigned NewLevel = I - 1;
2429  if (DSAStack->hasExplicitDSA(
2430  D,
2431  [&OMPC](const OpenMPClauseKind K, bool AppliedToPointee) {
2432  if (isOpenMPPrivate(K) && !AppliedToPointee) {
2433  OMPC = K;
2434  return true;
2435  }
2436  return false;
2437  },
2438  NewLevel))
2439  break;
2440  if (DSAStack->checkMappableExprComponentListsForDeclAtLevel(
2441  D, NewLevel,
2443  OpenMPClauseKind) { return true; })) {
2444  OMPC = OMPC_map;
2445  break;
2446  }
2447  if (DSAStack->hasExplicitDirective(isOpenMPTargetExecutionDirective,
2448  NewLevel)) {
2449  OMPC = OMPC_map;
2450  if (DSAStack->mustBeFirstprivateAtLevel(
2451  NewLevel, getVariableCategoryFromDecl(LangOpts, D)))
2452  OMPC = OMPC_firstprivate;
2453  break;
2454  }
2455  }
2456  if (OMPC != OMPC_unknown)
2457  FD->addAttr(OMPCaptureKindAttr::CreateImplicit(Context, unsigned(OMPC)));
2458 }
2459 
2461  unsigned CaptureLevel) const {
2462  assert(LangOpts.OpenMP && "OpenMP is not allowed");
2463  // Return true if the current level is no longer enclosed in a target region.
2464 
2466  getOpenMPCaptureRegions(Regions, DSAStack->getDirective(Level));
2467  const auto *VD = dyn_cast<VarDecl>(D);
2468  return VD && !VD->hasLocalStorage() &&
2469  DSAStack->hasExplicitDirective(isOpenMPTargetExecutionDirective,
2470  Level) &&
2471  Regions[CaptureLevel] != OMPD_task;
2472 }
2473 
2475  unsigned CaptureLevel) const {
2476  assert(LangOpts.OpenMP && "OpenMP is not allowed");
2477  // Return true if the current level is no longer enclosed in a target region.
2478 
2479  if (const auto *VD = dyn_cast<VarDecl>(D)) {
2480  if (!VD->hasLocalStorage()) {
2482  return true;
2483  DSAStackTy::DSAVarData TopDVar =
2484  DSAStack->getTopDSA(D, /*FromParent=*/false);
2485  unsigned NumLevels =
2486  getOpenMPCaptureLevels(DSAStack->getDirective(Level));
2487  if (Level == 0)
2488  // non-file scope static variale with default(firstprivate)
2489  // should be gloabal captured.
2490  return (NumLevels == CaptureLevel + 1 &&
2491  (TopDVar.CKind != OMPC_shared ||
2492  DSAStack->getDefaultDSA() == DSA_firstprivate));
2493  do {
2494  --Level;
2495  DSAStackTy::DSAVarData DVar = DSAStack->getImplicitDSA(D, Level);
2496  if (DVar.CKind != OMPC_shared)
2497  return true;
2498  } while (Level > 0);
2499  }
2500  }
2501  return true;
2502 }
2503 
2504 void Sema::DestroyDataSharingAttributesStack() { delete DSAStack; }
2505 
2507  OMPTraitInfo &TI) {
2508  OMPDeclareVariantScopes.push_back(OMPDeclareVariantScope(TI));
2509 }
2510 
2512  assert(isInOpenMPDeclareVariantScope() &&
2513  "Not in OpenMP declare variant scope!");
2514 
2515  OMPDeclareVariantScopes.pop_back();
2516 }
2517 
2519  const FunctionDecl *Callee,
2520  SourceLocation Loc) {
2521  assert(LangOpts.OpenMP && "Expected OpenMP compilation mode.");
2523  OMPDeclareTargetDeclAttr::getDeviceType(Caller->getMostRecentDecl());
2524  // Ignore host functions during device analyzis.
2525  if (LangOpts.OpenMPIsDevice &&
2526  (!DevTy || *DevTy == OMPDeclareTargetDeclAttr::DT_Host))
2527  return;
2528  // Ignore nohost functions during host analyzis.
2529  if (!LangOpts.OpenMPIsDevice && DevTy &&
2530  *DevTy == OMPDeclareTargetDeclAttr::DT_NoHost)
2531  return;
2532  const FunctionDecl *FD = Callee->getMostRecentDecl();
2533  DevTy = OMPDeclareTargetDeclAttr::getDeviceType(FD);
2534  if (LangOpts.OpenMPIsDevice && DevTy &&
2535  *DevTy == OMPDeclareTargetDeclAttr::DT_Host) {
2536  // Diagnose host function called during device codegen.
2537  StringRef HostDevTy =
2538  getOpenMPSimpleClauseTypeName(OMPC_device_type, OMPC_DEVICE_TYPE_host);
2539  Diag(Loc, diag::err_omp_wrong_device_function_call) << HostDevTy << 0;
2540  Diag(*OMPDeclareTargetDeclAttr::getLocation(FD),
2541  diag::note_omp_marked_device_type_here)
2542  << HostDevTy;
2543  return;
2544  }
2545  if (!LangOpts.OpenMPIsDevice && !LangOpts.OpenMPOffloadMandatory && DevTy &&
2546  *DevTy == OMPDeclareTargetDeclAttr::DT_NoHost) {
2547  // Diagnose nohost function called during host codegen.
2548  StringRef NoHostDevTy = getOpenMPSimpleClauseTypeName(
2549  OMPC_device_type, OMPC_DEVICE_TYPE_nohost);
2550  Diag(Loc, diag::err_omp_wrong_device_function_call) << NoHostDevTy << 1;
2551  Diag(*OMPDeclareTargetDeclAttr::getLocation(FD),
2552  diag::note_omp_marked_device_type_here)
2553  << NoHostDevTy;
2554  }
2555 }
2556 
2558  const DeclarationNameInfo &DirName,
2559  Scope *CurScope, SourceLocation Loc) {
2560  DSAStack->push(DKind, DirName, CurScope, Loc);
2563 }
2564 
2566  DSAStack->setClauseParsingMode(K);
2567 }
2568 
2570  DSAStack->setClauseParsingMode(/*K=*/OMPC_unknown);
2572 }
2573 
2574 static std::pair<ValueDecl *, bool>
2575 getPrivateItem(Sema &S, Expr *&RefExpr, SourceLocation &ELoc,
2576  SourceRange &ERange, bool AllowArraySection = false);
2577 
2578 /// Check consistency of the reduction clauses.
2579 static void checkReductionClauses(Sema &S, DSAStackTy *Stack,
2580  ArrayRef<OMPClause *> Clauses) {
2581  bool InscanFound = false;
2582  SourceLocation InscanLoc;
2583  // OpenMP 5.0, 2.19.5.4 reduction Clause, Restrictions.
2584  // A reduction clause without the inscan reduction-modifier may not appear on
2585  // a construct on which a reduction clause with the inscan reduction-modifier
2586  // appears.
2587  for (OMPClause *C : Clauses) {
2588  if (C->getClauseKind() != OMPC_reduction)
2589  continue;
2590  auto *RC = cast<OMPReductionClause>(C);
2591  if (RC->getModifier() == OMPC_REDUCTION_inscan) {
2592  InscanFound = true;
2593  InscanLoc = RC->getModifierLoc();
2594  continue;
2595  }
2596  if (RC->getModifier() == OMPC_REDUCTION_task) {
2597  // OpenMP 5.0, 2.19.5.4 reduction Clause.
2598  // A reduction clause with the task reduction-modifier may only appear on
2599  // a parallel construct, a worksharing construct or a combined or
2600  // composite construct for which any of the aforementioned constructs is a
2601  // constituent construct and simd or loop are not constituent constructs.
2602  OpenMPDirectiveKind CurDir = Stack->getCurrentDirective();
2603  if (!(isOpenMPParallelDirective(CurDir) ||
2604  isOpenMPWorksharingDirective(CurDir)) ||
2605  isOpenMPSimdDirective(CurDir))
2606  S.Diag(RC->getModifierLoc(),
2607  diag::err_omp_reduction_task_not_parallel_or_worksharing);
2608  continue;
2609  }
2610  }
2611  if (InscanFound) {
2612  for (OMPClause *C : Clauses) {
2613  if (C->getClauseKind() != OMPC_reduction)
2614  continue;
2615  auto *RC = cast<OMPReductionClause>(C);
2616  if (RC->getModifier() != OMPC_REDUCTION_inscan) {
2617  S.Diag(RC->getModifier() == OMPC_REDUCTION_unknown
2618  ? RC->getBeginLoc()
2619  : RC->getModifierLoc(),
2620  diag::err_omp_inscan_reduction_expected);
2621  S.Diag(InscanLoc, diag::note_omp_previous_inscan_reduction);
2622  continue;
2623  }
2624  for (Expr *Ref : RC->varlists()) {
2625  assert(Ref && "NULL expr in OpenMP nontemporal clause.");
2626  SourceLocation ELoc;
2627  SourceRange ERange;
2628  Expr *SimpleRefExpr = Ref;
2629  auto Res = getPrivateItem(S, SimpleRefExpr, ELoc, ERange,
2630  /*AllowArraySection=*/true);
2631  ValueDecl *D = Res.first;
2632  if (!D)
2633  continue;
2634  if (!Stack->isUsedInScanDirective(getCanonicalDecl(D))) {
2635  S.Diag(Ref->getExprLoc(),
2636  diag::err_omp_reduction_not_inclusive_exclusive)
2637  << Ref->getSourceRange();
2638  }
2639  }
2640  }
2641  }
2642 }
2643 
2644 static void checkAllocateClauses(Sema &S, DSAStackTy *Stack,
2645  ArrayRef<OMPClause *> Clauses);
2646 static DeclRefExpr *buildCapture(Sema &S, ValueDecl *D, Expr *CaptureExpr,
2647  bool WithInit);
2648 
2649 static void reportOriginalDsa(Sema &SemaRef, const DSAStackTy *Stack,
2650  const ValueDecl *D,
2651  const DSAStackTy::DSAVarData &DVar,
2652  bool IsLoopIterVar = false);
2653 
2654 void Sema::EndOpenMPDSABlock(Stmt *CurDirective) {
2655  // OpenMP [2.14.3.5, Restrictions, C/C++, p.1]
2656  // A variable of class type (or array thereof) that appears in a lastprivate
2657  // clause requires an accessible, unambiguous default constructor for the
2658  // class type, unless the list item is also specified in a firstprivate
2659  // clause.
2660  if (const auto *D = dyn_cast_or_null<OMPExecutableDirective>(CurDirective)) {
2661  for (OMPClause *C : D->clauses()) {
2662  if (auto *Clause = dyn_cast<OMPLastprivateClause>(C)) {
2663  SmallVector<Expr *, 8> PrivateCopies;
2664  for (Expr *DE : Clause->varlists()) {
2665  if (DE->isValueDependent() || DE->isTypeDependent()) {
2666  PrivateCopies.push_back(nullptr);
2667  continue;
2668  }
2669  auto *DRE = cast<DeclRefExpr>(DE->IgnoreParens());
2670  auto *VD = cast<VarDecl>(DRE->getDecl());
2672  const DSAStackTy::DSAVarData DVar =
2673  DSAStack->getTopDSA(VD, /*FromParent=*/false);
2674  if (DVar.CKind == OMPC_lastprivate) {
2675  // Generate helper private variable and initialize it with the
2676  // default value. The address of the original variable is replaced
2677  // by the address of the new private variable in CodeGen. This new
2678  // variable is not added to IdResolver, so the code in the OpenMP
2679  // region uses original variable for proper diagnostics.
2680  VarDecl *VDPrivate = buildVarDecl(
2681  *this, DE->getExprLoc(), Type.getUnqualifiedType(),
2682  VD->getName(), VD->hasAttrs() ? &VD->getAttrs() : nullptr, DRE);
2683  ActOnUninitializedDecl(VDPrivate);
2684  if (VDPrivate->isInvalidDecl()) {
2685  PrivateCopies.push_back(nullptr);
2686  continue;
2687  }
2688  PrivateCopies.push_back(buildDeclRefExpr(
2689  *this, VDPrivate, DE->getType(), DE->getExprLoc()));
2690  } else {
2691  // The variable is also a firstprivate, so initialization sequence
2692  // for private copy is generated already.
2693  PrivateCopies.push_back(nullptr);
2694  }
2695  }
2696  Clause->setPrivateCopies(PrivateCopies);
2697  continue;
2698  }
2699  // Finalize nontemporal clause by handling private copies, if any.
2700  if (auto *Clause = dyn_cast<OMPNontemporalClause>(C)) {
2701  SmallVector<Expr *, 8> PrivateRefs;
2702  for (Expr *RefExpr : Clause->varlists()) {
2703  assert(RefExpr && "NULL expr in OpenMP nontemporal clause.");
2704  SourceLocation ELoc;
2705  SourceRange ERange;
2706  Expr *SimpleRefExpr = RefExpr;
2707  auto Res = getPrivateItem(*this, SimpleRefExpr, ELoc, ERange);
2708  if (Res.second)
2709  // It will be analyzed later.
2710  PrivateRefs.push_back(RefExpr);
2711  ValueDecl *D = Res.first;
2712  if (!D)
2713  continue;
2714 
2715  const DSAStackTy::DSAVarData DVar =
2716  DSAStack->getTopDSA(D, /*FromParent=*/false);
2717  PrivateRefs.push_back(DVar.PrivateCopy ? DVar.PrivateCopy
2718  : SimpleRefExpr);
2719  }
2720  Clause->setPrivateRefs(PrivateRefs);
2721  continue;
2722  }
2723  if (auto *Clause = dyn_cast<OMPUsesAllocatorsClause>(C)) {
2724  for (unsigned I = 0, E = Clause->getNumberOfAllocators(); I < E; ++I) {
2725  OMPUsesAllocatorsClause::Data D = Clause->getAllocatorData(I);
2726  auto *DRE = dyn_cast<DeclRefExpr>(D.Allocator->IgnoreParenImpCasts());
2727  if (!DRE)
2728  continue;
2729  ValueDecl *VD = DRE->getDecl();
2730  if (!VD || !isa<VarDecl>(VD))
2731  continue;
2732  DSAStackTy::DSAVarData DVar =
2733  DSAStack->getTopDSA(VD, /*FromParent=*/false);
2734  // OpenMP [2.12.5, target Construct]
2735  // Memory allocators that appear in a uses_allocators clause cannot
2736  // appear in other data-sharing attribute clauses or data-mapping
2737  // attribute clauses in the same construct.
2738  Expr *MapExpr = nullptr;
2739  if (DVar.RefExpr ||
2740  DSAStack->checkMappableExprComponentListsForDecl(
2741  VD, /*CurrentRegionOnly=*/true,
2742  [VD, &MapExpr](
2744  MapExprComponents,
2745  OpenMPClauseKind C) {
2746  auto MI = MapExprComponents.rbegin();
2747  auto ME = MapExprComponents.rend();
2748  if (MI != ME &&
2749  MI->getAssociatedDeclaration()->getCanonicalDecl() ==
2750  VD->getCanonicalDecl()) {
2751  MapExpr = MI->getAssociatedExpression();
2752  return true;
2753  }
2754  return false;
2755  })) {
2756  Diag(D.Allocator->getExprLoc(),
2757  diag::err_omp_allocator_used_in_clauses)
2758  << D.Allocator->getSourceRange();
2759  if (DVar.RefExpr)
2760  reportOriginalDsa(*this, DSAStack, VD, DVar);
2761  else
2762  Diag(MapExpr->getExprLoc(), diag::note_used_here)
2763  << MapExpr->getSourceRange();
2764  }
2765  }
2766  continue;
2767  }
2768  }
2769  // Check allocate clauses.
2771  checkAllocateClauses(*this, DSAStack, D->clauses());
2772  checkReductionClauses(*this, DSAStack, D->clauses());
2773  }
2774 
2775  DSAStack->pop();
2776  DiscardCleanupsInEvaluationContext();
2777  PopExpressionEvaluationContext();
2778 }
2779 
2780 static bool FinishOpenMPLinearClause(OMPLinearClause &Clause, DeclRefExpr *IV,
2781  Expr *NumIterations, Sema &SemaRef,
2782  Scope *S, DSAStackTy *Stack);
2783 
2784 namespace {
2785 
2786 class VarDeclFilterCCC final : public CorrectionCandidateCallback {
2787 private:
2788  Sema &SemaRef;
2789 
2790 public:
2791  explicit VarDeclFilterCCC(Sema &S) : SemaRef(S) {}
2792  bool ValidateCandidate(const TypoCorrection &Candidate) override {
2793  NamedDecl *ND = Candidate.getCorrectionDecl();
2794  if (const auto *VD = dyn_cast_or_null<VarDecl>(ND)) {
2795  return VD->hasGlobalStorage() &&
2796  SemaRef.isDeclInScope(ND, SemaRef.getCurLexicalContext(),
2797  SemaRef.getCurScope());
2798  }
2799  return false;
2800  }
2801 
2802  std::unique_ptr<CorrectionCandidateCallback> clone() override {
2803  return std::make_unique<VarDeclFilterCCC>(*this);
2804  }
2805 };
2806 
2807 class VarOrFuncDeclFilterCCC final : public CorrectionCandidateCallback {
2808 private:
2809  Sema &SemaRef;
2810 
2811 public:
2812  explicit VarOrFuncDeclFilterCCC(Sema &S) : SemaRef(S) {}
2813  bool ValidateCandidate(const TypoCorrection &Candidate) override {
2814  NamedDecl *ND = Candidate.getCorrectionDecl();
2815  if (ND && ((isa<VarDecl>(ND) && ND->getKind() == Decl::Var) ||
2816  isa<FunctionDecl>(ND))) {
2817  return SemaRef.isDeclInScope(ND, SemaRef.getCurLexicalContext(),
2818  SemaRef.getCurScope());
2819  }
2820  return false;
2821  }
2822 
2823  std::unique_ptr<CorrectionCandidateCallback> clone() override {
2824  return std::make_unique<VarOrFuncDeclFilterCCC>(*this);
2825  }
2826 };
2827 
2828 } // namespace
2829 
2830 ExprResult Sema::ActOnOpenMPIdExpression(Scope *CurScope,
2831  CXXScopeSpec &ScopeSpec,
2832  const DeclarationNameInfo &Id,
2834  LookupResult Lookup(*this, Id, LookupOrdinaryName);
2835  LookupParsedName(Lookup, CurScope, &ScopeSpec, true);
2836 
2837  if (Lookup.isAmbiguous())
2838  return ExprError();
2839 
2840  VarDecl *VD;
2841  if (!Lookup.isSingleResult()) {
2842  VarDeclFilterCCC CCC(*this);
2843  if (TypoCorrection Corrected =
2844  CorrectTypo(Id, LookupOrdinaryName, CurScope, nullptr, CCC,
2845  CTK_ErrorRecovery)) {
2846  diagnoseTypo(Corrected,
2847  PDiag(Lookup.empty()
2848  ? diag::err_undeclared_var_use_suggest
2849  : diag::err_omp_expected_var_arg_suggest)
2850  << Id.getName());
2851  VD = Corrected.getCorrectionDeclAs<VarDecl>();
2852  } else {
2853  Diag(Id.getLoc(), Lookup.empty() ? diag::err_undeclared_var_use
2854  : diag::err_omp_expected_var_arg)
2855  << Id.getName();
2856  return ExprError();
2857  }
2858  } else if (!(VD = Lookup.getAsSingle<VarDecl>())) {
2859  Diag(Id.getLoc(), diag::err_omp_expected_var_arg) << Id.getName();
2860  Diag(Lookup.getFoundDecl()->getLocation(), diag::note_declared_at);
2861  return ExprError();
2862  }
2863  Lookup.suppressDiagnostics();
2864 
2865  // OpenMP [2.9.2, Syntax, C/C++]
2866  // Variables must be file-scope, namespace-scope, or static block-scope.
2867  if (Kind == OMPD_threadprivate && !VD->hasGlobalStorage()) {
2868  Diag(Id.getLoc(), diag::err_omp_global_var_arg)
2869  << getOpenMPDirectiveName(Kind) << !VD->isStaticLocal();
2870  bool IsDecl =
2872  Diag(VD->getLocation(),
2873  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
2874  << VD;
2875  return ExprError();
2876  }
2877 
2878  VarDecl *CanonicalVD = VD->getCanonicalDecl();
2879  NamedDecl *ND = CanonicalVD;
2880  // OpenMP [2.9.2, Restrictions, C/C++, p.2]
2881  // A threadprivate directive for file-scope variables must appear outside
2882  // any definition or declaration.
2883  if (CanonicalVD->getDeclContext()->isTranslationUnit() &&
2884  !getCurLexicalContext()->isTranslationUnit()) {
2885  Diag(Id.getLoc(), diag::err_omp_var_scope)
2886  << getOpenMPDirectiveName(Kind) << VD;
2887  bool IsDecl =
2889  Diag(VD->getLocation(),
2890  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
2891  << VD;
2892  return ExprError();
2893  }
2894  // OpenMP [2.9.2, Restrictions, C/C++, p.3]
2895  // A threadprivate directive for static class member variables must appear
2896  // in the class definition, in the same scope in which the member
2897  // variables are declared.
2898  if (CanonicalVD->isStaticDataMember() &&
2899  !CanonicalVD->getDeclContext()->Equals(getCurLexicalContext())) {
2900  Diag(Id.getLoc(), diag::err_omp_var_scope)
2901  << getOpenMPDirectiveName(Kind) << VD;
2902  bool IsDecl =
2904  Diag(VD->getLocation(),
2905  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
2906  << VD;
2907  return ExprError();
2908  }
2909  // OpenMP [2.9.2, Restrictions, C/C++, p.4]
2910  // A threadprivate directive for namespace-scope variables must appear
2911  // outside any definition or declaration other than the namespace
2912  // definition itself.
2913  if (CanonicalVD->getDeclContext()->isNamespace() &&
2914  (!getCurLexicalContext()->isFileContext() ||
2915  !getCurLexicalContext()->Encloses(CanonicalVD->getDeclContext()))) {
2916  Diag(Id.getLoc(), diag::err_omp_var_scope)
2917  << getOpenMPDirectiveName(Kind) << VD;
2918  bool IsDecl =
2920  Diag(VD->getLocation(),
2921  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
2922  << VD;
2923  return ExprError();
2924  }
2925  // OpenMP [2.9.2, Restrictions, C/C++, p.6]
2926  // A threadprivate directive for static block-scope variables must appear
2927  // in the scope of the variable and not in a nested scope.
2928  if (CanonicalVD->isLocalVarDecl() && CurScope &&
2929  !isDeclInScope(ND, getCurLexicalContext(), CurScope)) {
2930  Diag(Id.getLoc(), diag::err_omp_var_scope)
2931  << getOpenMPDirectiveName(Kind) << VD;
2932  bool IsDecl =
2934  Diag(VD->getLocation(),
2935  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
2936  << VD;
2937  return ExprError();
2938  }
2939 
2940  // OpenMP [2.9.2, Restrictions, C/C++, p.2-6]
2941  // A threadprivate directive must lexically precede all references to any
2942  // of the variables in its list.
2943  if (Kind == OMPD_threadprivate && VD->isUsed() &&
2944  !DSAStack->isThreadPrivate(VD)) {
2945  Diag(Id.getLoc(), diag::err_omp_var_used)
2946  << getOpenMPDirectiveName(Kind) << VD;
2947  return ExprError();
2948  }
2949 
2950  QualType ExprType = VD->getType().getNonReferenceType();
2952  SourceLocation(), VD,
2953  /*RefersToEnclosingVariableOrCapture=*/false,
2954  Id.getLoc(), ExprType, VK_LValue);
2955 }
2956 
2959  ArrayRef<Expr *> VarList) {
2960  if (OMPThreadPrivateDecl *D = CheckOMPThreadPrivateDecl(Loc, VarList)) {
2961  CurContext->addDecl(D);
2963  }
2964  return nullptr;
2965 }
2966 
2967 namespace {
2968 class LocalVarRefChecker final
2969  : public ConstStmtVisitor<LocalVarRefChecker, bool> {
2970  Sema &SemaRef;
2971 
2972 public:
2973  bool VisitDeclRefExpr(const DeclRefExpr *E) {
2974  if (const auto *VD = dyn_cast<VarDecl>(E->getDecl())) {
2975  if (VD->hasLocalStorage()) {
2976  SemaRef.Diag(E->getBeginLoc(),
2977  diag::err_omp_local_var_in_threadprivate_init)
2978  << E->getSourceRange();
2979  SemaRef.Diag(VD->getLocation(), diag::note_defined_here)
2980  << VD << VD->getSourceRange();
2981  return true;
2982  }
2983  }
2984  return false;
2985  }
2986  bool VisitStmt(const Stmt *S) {
2987  for (const Stmt *Child : S->children()) {
2988  if (Child && Visit(Child))
2989  return true;
2990  }
2991  return false;
2992  }
2993  explicit LocalVarRefChecker(Sema &SemaRef) : SemaRef(SemaRef) {}
2994 };
2995 } // namespace
2996 
3000  for (Expr *RefExpr : VarList) {
3001  auto *DE = cast<DeclRefExpr>(RefExpr);
3002  auto *VD = cast<VarDecl>(DE->getDecl());
3003  SourceLocation ILoc = DE->getExprLoc();
3004 
3005  // Mark variable as used.
3006  VD->setReferenced();
3007  VD->markUsed(Context);
3008 
3009  QualType QType = VD->getType();
3010  if (QType->isDependentType() || QType->isInstantiationDependentType()) {
3011  // It will be analyzed later.
3012  Vars.push_back(DE);
3013  continue;
3014  }
3015 
3016  // OpenMP [2.9.2, Restrictions, C/C++, p.10]
3017  // A threadprivate variable must not have an incomplete type.
3018  if (RequireCompleteType(ILoc, VD->getType(),
3019  diag::err_omp_threadprivate_incomplete_type)) {
3020  continue;
3021  }
3022 
3023  // OpenMP [2.9.2, Restrictions, C/C++, p.10]
3024  // A threadprivate variable must not have a reference type.
3025  if (VD->getType()->isReferenceType()) {
3026  Diag(ILoc, diag::err_omp_ref_type_arg)
3027  << getOpenMPDirectiveName(OMPD_threadprivate) << VD->getType();
3028  bool IsDecl =
3029  VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
3030  Diag(VD->getLocation(),
3031  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
3032  << VD;
3033  continue;
3034  }
3035 
3036  // Check if this is a TLS variable. If TLS is not being supported, produce
3037  // the corresponding diagnostic.
3038  if ((VD->getTLSKind() != VarDecl::TLS_None &&
3039  !(VD->hasAttr<OMPThreadPrivateDeclAttr>() &&
3040  getLangOpts().OpenMPUseTLS &&
3041  getASTContext().getTargetInfo().isTLSSupported())) ||
3042  (VD->getStorageClass() == SC_Register && VD->hasAttr<AsmLabelAttr>() &&
3043  !VD->isLocalVarDecl())) {
3044  Diag(ILoc, diag::err_omp_var_thread_local)
3045  << VD << ((VD->getTLSKind() != VarDecl::TLS_None) ? 0 : 1);
3046  bool IsDecl =
3047  VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
3048  Diag(VD->getLocation(),
3049  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
3050  << VD;
3051  continue;
3052  }
3053 
3054  // Check if initial value of threadprivate variable reference variable with
3055  // local storage (it is not supported by runtime).
3056  if (const Expr *Init = VD->getAnyInitializer()) {
3057  LocalVarRefChecker Checker(*this);
3058  if (Checker.Visit(Init))
3059  continue;
3060  }
3061 
3062  Vars.push_back(RefExpr);
3063  DSAStack->addDSA(VD, DE, OMPC_threadprivate);
3064  VD->addAttr(OMPThreadPrivateDeclAttr::CreateImplicit(
3065  Context, SourceRange(Loc, Loc)));
3067  ML->DeclarationMarkedOpenMPThreadPrivate(VD);
3068  }
3069  OMPThreadPrivateDecl *D = nullptr;
3070  if (!Vars.empty()) {
3072  Vars);
3073  D->setAccess(AS_public);
3074  }
3075  return D;
3076 }
3077 
3078 static OMPAllocateDeclAttr::AllocatorTypeTy
3079 getAllocatorKind(Sema &S, DSAStackTy *Stack, Expr *Allocator) {
3080  if (!Allocator)
3081  return OMPAllocateDeclAttr::OMPNullMemAlloc;
3082  if (Allocator->isTypeDependent() || Allocator->isValueDependent() ||
3083  Allocator->isInstantiationDependent() ||
3084  Allocator->containsUnexpandedParameterPack())
3085  return OMPAllocateDeclAttr::OMPUserDefinedMemAlloc;
3086  auto AllocatorKindRes = OMPAllocateDeclAttr::OMPUserDefinedMemAlloc;
3087  const Expr *AE = Allocator->IgnoreParenImpCasts();
3088  for (int I = 0; I < OMPAllocateDeclAttr::OMPUserDefinedMemAlloc; ++I) {
3089  auto AllocatorKind = static_cast<OMPAllocateDeclAttr::AllocatorTypeTy>(I);
3090  const Expr *DefAllocator = Stack->getAllocator(AllocatorKind);
3091  llvm::FoldingSetNodeID AEId, DAEId;
3092  AE->Profile(AEId, S.getASTContext(), /*Canonical=*/true);
3093  DefAllocator->Profile(DAEId, S.getASTContext(), /*Canonical=*/true);
3094  if (AEId == DAEId) {
3095  AllocatorKindRes = AllocatorKind;
3096  break;
3097  }
3098  }
3099  return AllocatorKindRes;
3100 }
3101 
3103  Sema &S, DSAStackTy *Stack, Expr *RefExpr, VarDecl *VD,
3104  OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind, Expr *Allocator) {
3105  if (!VD->hasAttr<OMPAllocateDeclAttr>())
3106  return false;
3107  const auto *A = VD->getAttr<OMPAllocateDeclAttr>();
3108  Expr *PrevAllocator = A->getAllocator();
3109  OMPAllocateDeclAttr::AllocatorTypeTy PrevAllocatorKind =
3110  getAllocatorKind(S, Stack, PrevAllocator);
3111  bool AllocatorsMatch = AllocatorKind == PrevAllocatorKind;
3112  if (AllocatorsMatch &&
3113  AllocatorKind == OMPAllocateDeclAttr::OMPUserDefinedMemAlloc &&
3114  Allocator && PrevAllocator) {
3115  const Expr *AE = Allocator->IgnoreParenImpCasts();
3116  const Expr *PAE = PrevAllocator->IgnoreParenImpCasts();
3117  llvm::FoldingSetNodeID AEId, PAEId;
3118  AE->Profile(AEId, S.Context, /*Canonical=*/true);
3119  PAE->Profile(PAEId, S.Context, /*Canonical=*/true);
3120  AllocatorsMatch = AEId == PAEId;
3121  }
3122  if (!AllocatorsMatch) {
3123  SmallString<256> AllocatorBuffer;
3124  llvm::raw_svector_ostream AllocatorStream(AllocatorBuffer);
3125  if (Allocator)
3126  Allocator->printPretty(AllocatorStream, nullptr, S.getPrintingPolicy());
3127  SmallString<256> PrevAllocatorBuffer;
3128  llvm::raw_svector_ostream PrevAllocatorStream(PrevAllocatorBuffer);
3129  if (PrevAllocator)
3130  PrevAllocator->printPretty(PrevAllocatorStream, nullptr,
3131  S.getPrintingPolicy());
3132 
3133  SourceLocation AllocatorLoc =
3134  Allocator ? Allocator->getExprLoc() : RefExpr->getExprLoc();
3135  SourceRange AllocatorRange =
3136  Allocator ? Allocator->getSourceRange() : RefExpr->getSourceRange();
3137  SourceLocation PrevAllocatorLoc =
3138  PrevAllocator ? PrevAllocator->getExprLoc() : A->getLocation();
3139  SourceRange PrevAllocatorRange =
3140  PrevAllocator ? PrevAllocator->getSourceRange() : A->getRange();
3141  S.Diag(AllocatorLoc, diag::warn_omp_used_different_allocator)
3142  << (Allocator ? 1 : 0) << AllocatorStream.str()
3143  << (PrevAllocator ? 1 : 0) << PrevAllocatorStream.str()
3144  << AllocatorRange;
3145  S.Diag(PrevAllocatorLoc, diag::note_omp_previous_allocator)
3146  << PrevAllocatorRange;
3147  return true;
3148  }
3149  return false;
3150 }
3151 
3152 static void
3154  OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind,
3155  Expr *Allocator, Expr *Alignment, SourceRange SR) {
3156  if (VD->hasAttr<OMPAllocateDeclAttr>())
3157  return;
3158  if (Alignment &&
3159  (Alignment->isTypeDependent() || Alignment->isValueDependent() ||
3160  Alignment->isInstantiationDependent() ||
3161  Alignment->containsUnexpandedParameterPack()))
3162  // Apply later when we have a usable value.
3163  return;
3164  if (Allocator &&
3165  (Allocator->isTypeDependent() || Allocator->isValueDependent() ||
3166  Allocator->isInstantiationDependent() ||
3167  Allocator->containsUnexpandedParameterPack()))
3168  return;
3169  auto *A = OMPAllocateDeclAttr::CreateImplicit(S.Context, AllocatorKind,
3170  Allocator, Alignment, SR);
3171  VD->addAttr(A);
3173  ML->DeclarationMarkedOpenMPAllocate(VD, A);
3174 }
3175 
3178  ArrayRef<OMPClause *> Clauses,
3179  DeclContext *Owner) {
3180  assert(Clauses.size() <= 2 && "Expected at most two clauses.");
3181  Expr *Alignment = nullptr;
3182  Expr *Allocator = nullptr;
3183  if (Clauses.empty()) {
3184  // OpenMP 5.0, 2.11.3 allocate Directive, Restrictions.
3185  // allocate directives that appear in a target region must specify an
3186  // allocator clause unless a requires directive with the dynamic_allocators
3187  // clause is present in the same compilation unit.
3188  if (LangOpts.OpenMPIsDevice &&
3189  !DSAStack->hasRequiresDeclWithClause<OMPDynamicAllocatorsClause>())
3190  targetDiag(Loc, diag::err_expected_allocator_clause);
3191  } else {
3192  for (const OMPClause *C : Clauses)
3193  if (const auto *AC = dyn_cast<OMPAllocatorClause>(C))
3194  Allocator = AC->getAllocator();
3195  else if (const auto *AC = dyn_cast<OMPAlignClause>(C))
3196  Alignment = AC->getAlignment();
3197  else
3198  llvm_unreachable("Unexpected clause on allocate directive");
3199  }
3200  OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind =
3201  getAllocatorKind(*this, DSAStack, Allocator);
3203  for (Expr *RefExpr : VarList) {
3204  auto *DE = cast<DeclRefExpr>(RefExpr);
3205  auto *VD = cast<VarDecl>(DE->getDecl());
3206 
3207  // Check if this is a TLS variable or global register.
3208  if (VD->getTLSKind() != VarDecl::TLS_None ||
3209  VD->hasAttr<OMPThreadPrivateDeclAttr>() ||
3210  (VD->getStorageClass() == SC_Register && VD->hasAttr<AsmLabelAttr>() &&
3211  !VD->isLocalVarDecl()))
3212  continue;
3213 
3214  // If the used several times in the allocate directive, the same allocator
3215  // must be used.
3216  if (checkPreviousOMPAllocateAttribute(*this, DSAStack, RefExpr, VD,
3217  AllocatorKind, Allocator))
3218  continue;
3219 
3220  // OpenMP, 2.11.3 allocate Directive, Restrictions, C / C++
3221  // If a list item has a static storage type, the allocator expression in the
3222  // allocator clause must be a constant expression that evaluates to one of
3223  // the predefined memory allocator values.
3224  if (Allocator && VD->hasGlobalStorage()) {
3225  if (AllocatorKind == OMPAllocateDeclAttr::OMPUserDefinedMemAlloc) {
3226  Diag(Allocator->getExprLoc(),
3227  diag::err_omp_expected_predefined_allocator)
3228  << Allocator->getSourceRange();
3229  bool IsDecl = VD->isThisDeclarationADefinition(Context) ==
3231  Diag(VD->getLocation(),
3232  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
3233  << VD;
3234  continue;
3235  }
3236  }
3237 
3238  Vars.push_back(RefExpr);
3239  applyOMPAllocateAttribute(*this, VD, AllocatorKind, Allocator, Alignment,
3240  DE->getSourceRange());
3241  }
3242  if (Vars.empty())
3243  return nullptr;
3244  if (!Owner)
3245  Owner = getCurLexicalContext();
3246  auto *D = OMPAllocateDecl::Create(Context, Owner, Loc, Vars, Clauses);
3247  D->setAccess(AS_public);
3248  Owner->addDecl(D);
3250 }
3251 
3254  ArrayRef<OMPClause *> ClauseList) {
3255  OMPRequiresDecl *D = nullptr;
3256  if (!CurContext->isFileContext()) {
3257  Diag(Loc, diag::err_omp_invalid_scope) << "requires";
3258  } else {
3259  D = CheckOMPRequiresDecl(Loc, ClauseList);
3260  if (D) {
3261  CurContext->addDecl(D);
3262  DSAStack->addRequiresDecl(D);
3263  }
3264  }
3266 }
3267 
3269  OpenMPDirectiveKind DKind,
3270  ArrayRef<std::string> Assumptions,
3271  bool SkippedClauses) {
3272  if (!SkippedClauses && Assumptions.empty())
3273  Diag(Loc, diag::err_omp_no_clause_for_directive)
3274  << llvm::omp::getAllAssumeClauseOptions()
3275  << llvm::omp::getOpenMPDirectiveName(DKind);
3276 
3277  auto *AA = AssumptionAttr::Create(Context, llvm::join(Assumptions, ","), Loc);
3278  if (DKind == llvm::omp::Directive::OMPD_begin_assumes) {
3279  OMPAssumeScoped.push_back(AA);
3280  return;
3281  }
3282 
3283  // Global assumes without assumption clauses are ignored.
3284  if (Assumptions.empty())
3285  return;
3286 
3287  assert(DKind == llvm::omp::Directive::OMPD_assumes &&
3288  "Unexpected omp assumption directive!");
3289  OMPAssumeGlobal.push_back(AA);
3290 
3291  // The OMPAssumeGlobal scope above will take care of new declarations but
3292  // we also want to apply the assumption to existing ones, e.g., to
3293  // declarations in included headers. To this end, we traverse all existing
3294  // declaration contexts and annotate function declarations here.
3295  SmallVector<DeclContext *, 8> DeclContexts;
3296  auto *Ctx = CurContext;
3297  while (Ctx->getLexicalParent())
3298  Ctx = Ctx->getLexicalParent();
3299  DeclContexts.push_back(Ctx);
3300  while (!DeclContexts.empty()) {
3301  DeclContext *DC = DeclContexts.pop_back_val();
3302  for (auto *SubDC : DC->decls()) {
3303  if (SubDC->isInvalidDecl())
3304  continue;
3305  if (auto *CTD = dyn_cast<ClassTemplateDecl>(SubDC)) {
3306  DeclContexts.push_back(CTD->getTemplatedDecl());
3307  llvm::append_range(DeclContexts, CTD->specializations());
3308  continue;
3309  }
3310  if (auto *DC = dyn_cast<DeclContext>(SubDC))
3311  DeclContexts.push_back(DC);
3312  if (auto *F = dyn_cast<FunctionDecl>(SubDC)) {
3313  F->addAttr(AA);
3314  continue;
3315  }
3316  }
3317  }
3318 }
3319 
3321  assert(isInOpenMPAssumeScope() && "Not in OpenMP assumes scope!");
3322  OMPAssumeScoped.pop_back();
3323 }
3324 
3326  ArrayRef<OMPClause *> ClauseList) {
3327  /// For target specific clauses, the requires directive cannot be
3328  /// specified after the handling of any of the target regions in the
3329  /// current compilation unit.
3330  ArrayRef<SourceLocation> TargetLocations =
3331  DSAStack->getEncounteredTargetLocs();
3332  SourceLocation AtomicLoc = DSAStack->getAtomicDirectiveLoc();
3333  if (!TargetLocations.empty() || !AtomicLoc.isInvalid()) {
3334  for (const OMPClause *CNew : ClauseList) {
3335  // Check if any of the requires clauses affect target regions.
3336  if (isa<OMPUnifiedSharedMemoryClause>(CNew) ||
3337  isa<OMPUnifiedAddressClause>(CNew) ||
3338  isa<OMPReverseOffloadClause>(CNew) ||
3339  isa<OMPDynamicAllocatorsClause>(CNew)) {
3340  Diag(Loc, diag::err_omp_directive_before_requires)
3341  << "target" << getOpenMPClauseName(CNew->getClauseKind());
3342  for (SourceLocation TargetLoc : TargetLocations) {
3343  Diag(TargetLoc, diag::note_omp_requires_encountered_directive)
3344  << "target";
3345  }
3346  } else if (!AtomicLoc.isInvalid() &&
3347  isa<OMPAtomicDefaultMemOrderClause>(CNew)) {
3348  Diag(Loc, diag::err_omp_directive_before_requires)
3349  << "atomic" << getOpenMPClauseName(CNew->getClauseKind());
3350  Diag(AtomicLoc, diag::note_omp_requires_encountered_directive)
3351  << "atomic";
3352  }
3353  }
3354  }
3355 
3356  if (!DSAStack->hasDuplicateRequiresClause(ClauseList))
3358  ClauseList);
3359  return nullptr;
3360 }
3361 
3362 static void reportOriginalDsa(Sema &SemaRef, const DSAStackTy *Stack,
3363  const ValueDecl *D,
3364  const DSAStackTy::DSAVarData &DVar,
3365  bool IsLoopIterVar) {
3366  if (DVar.RefExpr) {
3367  SemaRef.Diag(DVar.RefExpr->getExprLoc(), diag::note_omp_explicit_dsa)
3368  << getOpenMPClauseName(DVar.CKind);
3369  return;
3370  }
3371  enum {
3372  PDSA_StaticMemberShared,
3373  PDSA_StaticLocalVarShared,
3374  PDSA_LoopIterVarPrivate,
3375  PDSA_LoopIterVarLinear,
3376  PDSA_LoopIterVarLastprivate,
3377  PDSA_ConstVarShared,
3378  PDSA_GlobalVarShared,
3379  PDSA_TaskVarFirstprivate,
3380  PDSA_LocalVarPrivate,
3381  PDSA_Implicit
3382  } Reason = PDSA_Implicit;
3383  bool ReportHint = false;
3384  auto ReportLoc = D->getLocation();
3385  auto *VD = dyn_cast<VarDecl>(D);
3386  if (IsLoopIterVar) {
3387  if (DVar.CKind == OMPC_private)
3388  Reason = PDSA_LoopIterVarPrivate;
3389  else if (DVar.CKind == OMPC_lastprivate)
3390  Reason = PDSA_LoopIterVarLastprivate;
3391  else
3392  Reason = PDSA_LoopIterVarLinear;
3393  } else if (isOpenMPTaskingDirective(DVar.DKind) &&
3394  DVar.CKind == OMPC_firstprivate) {
3395  Reason = PDSA_TaskVarFirstprivate;
3396  ReportLoc = DVar.ImplicitDSALoc;
3397  } else if (VD && VD->isStaticLocal())
3398  Reason = PDSA_StaticLocalVarShared;
3399  else if (VD && VD->isStaticDataMember())
3400  Reason = PDSA_StaticMemberShared;
3401  else if (VD && VD->isFileVarDecl())
3402  Reason = PDSA_GlobalVarShared;
3403  else if (D->getType().isConstant(SemaRef.getASTContext()))
3404  Reason = PDSA_ConstVarShared;
3405  else if (VD && VD->isLocalVarDecl() && DVar.CKind == OMPC_private) {
3406  ReportHint = true;
3407  Reason = PDSA_LocalVarPrivate;
3408  }
3409  if (Reason != PDSA_Implicit) {
3410  SemaRef.Diag(ReportLoc, diag::note_omp_predetermined_dsa)
3411  << Reason << ReportHint
3412  << getOpenMPDirectiveName(Stack->getCurrentDirective());
3413  } else if (DVar.ImplicitDSALoc.isValid()) {
3414  SemaRef.Diag(DVar.ImplicitDSALoc, diag::note_omp_implicit_dsa)
3415  << getOpenMPClauseName(DVar.CKind);
3416  }
3417 }
3418 
3419 static OpenMPMapClauseKind
3421  bool IsAggregateOrDeclareTarget) {
3423  switch (M) {
3424  case OMPC_DEFAULTMAP_MODIFIER_alloc:
3425  Kind = OMPC_MAP_alloc;
3426  break;
3427  case OMPC_DEFAULTMAP_MODIFIER_to:
3428  Kind = OMPC_MAP_to;
3429  break;
3430  case OMPC_DEFAULTMAP_MODIFIER_from:
3431  Kind = OMPC_MAP_from;
3432  break;
3433  case OMPC_DEFAULTMAP_MODIFIER_tofrom:
3434  Kind = OMPC_MAP_tofrom;
3435  break;
3436  case OMPC_DEFAULTMAP_MODIFIER_present:
3437  // OpenMP 5.1 [2.21.7.3] defaultmap clause, Description]
3438  // If implicit-behavior is present, each variable referenced in the
3439  // construct in the category specified by variable-category is treated as if
3440  // it had been listed in a map clause with the map-type of alloc and
3441  // map-type-modifier of present.
3442  Kind = OMPC_MAP_alloc;
3443  break;
3444  case OMPC_DEFAULTMAP_MODIFIER_firstprivate:
3446  llvm_unreachable("Unexpected defaultmap implicit behavior");
3447  case OMPC_DEFAULTMAP_MODIFIER_none:
3448  case OMPC_DEFAULTMAP_MODIFIER_default:
3450  // IsAggregateOrDeclareTarget could be true if:
3451  // 1. the implicit behavior for aggregate is tofrom
3452  // 2. it's a declare target link
3453  if (IsAggregateOrDeclareTarget) {
3454  Kind = OMPC_MAP_tofrom;
3455  break;
3456  }
3457  llvm_unreachable("Unexpected defaultmap implicit behavior");
3458  }
3459  assert(Kind != OMPC_MAP_unknown && "Expect map kind to be known");
3460  return Kind;
3461 }
3462 
3463 namespace {
3464 class DSAAttrChecker final : public StmtVisitor<DSAAttrChecker, void> {
3465  DSAStackTy *Stack;
3466  Sema &SemaRef;
3467  bool ErrorFound = false;
3468  bool TryCaptureCXXThisMembers = false;
3469  CapturedStmt *CS = nullptr;
3470  const static unsigned DefaultmapKindNum = OMPC_DEFAULTMAP_pointer + 1;
3471  llvm::SmallVector<Expr *, 4> ImplicitFirstprivate;
3472  llvm::SmallVector<Expr *, 4> ImplicitPrivate;
3473  llvm::SmallVector<Expr *, 4> ImplicitMap[DefaultmapKindNum][OMPC_MAP_delete];
3475  ImplicitMapModifier[DefaultmapKindNum];
3476  Sema::VarsWithInheritedDSAType VarsWithInheritedDSA;
3477  llvm::SmallDenseSet<const ValueDecl *, 4> ImplicitDeclarations;
3478 
3479  void VisitSubCaptures(OMPExecutableDirective *S) {
3480  // Check implicitly captured variables.
3481  if (!S->hasAssociatedStmt() || !S->getAssociatedStmt())
3482  return;
3483  if (S->getDirectiveKind() == OMPD_atomic ||
3484  S->getDirectiveKind() == OMPD_critical ||
3485  S->getDirectiveKind() == OMPD_section ||
3486  S->getDirectiveKind() == OMPD_master ||
3487  S->getDirectiveKind() == OMPD_masked ||
3488  isOpenMPLoopTransformationDirective(S->getDirectiveKind())) {
3489  Visit(S->getAssociatedStmt());
3490  return;
3491  }
3492  visitSubCaptures(S->getInnermostCapturedStmt());
3493  // Try to capture inner this->member references to generate correct mappings
3494  // and diagnostics.
3495  if (TryCaptureCXXThisMembers ||
3496  (isOpenMPTargetExecutionDirective(Stack->getCurrentDirective()) &&
3497  llvm::any_of(S->getInnermostCapturedStmt()->captures(),
3498  [](const CapturedStmt::Capture &C) {
3499  return C.capturesThis();
3500  }))) {
3501  bool SavedTryCaptureCXXThisMembers = TryCaptureCXXThisMembers;
3502  TryCaptureCXXThisMembers = true;
3503  Visit(S->getInnermostCapturedStmt()->getCapturedStmt());
3504  TryCaptureCXXThisMembers = SavedTryCaptureCXXThisMembers;
3505  }
3506  // In tasks firstprivates are not captured anymore, need to analyze them
3507  // explicitly.
3508  if (isOpenMPTaskingDirective(S->getDirectiveKind()) &&
3509  !isOpenMPTaskLoopDirective(S->getDirectiveKind())) {
3510  for (OMPClause *C : S->clauses())
3511  if (auto *FC = dyn_cast<OMPFirstprivateClause>(C)) {
3512  for (Expr *Ref : FC->varlists())
3513  Visit(Ref);
3514  }
3515  }
3516  }
3517 
3518 public:
3519  void VisitDeclRefExpr(DeclRefExpr *E) {
3520  if (TryCaptureCXXThisMembers || E->isTypeDependent() ||
3523  return;
3524  if (auto *VD = dyn_cast<VarDecl>(E->getDecl())) {
3525  // Check the datasharing rules for the expressions in the clauses.
3526  if (!CS || (isa<OMPCapturedExprDecl>(VD) && !CS->capturesVariable(VD) &&
3527  !Stack->getTopDSA(VD, /*FromParent=*/false).RefExpr)) {
3528  if (auto *CED = dyn_cast<OMPCapturedExprDecl>(VD))
3529  if (!CED->hasAttr<OMPCaptureNoInitAttr>()) {
3530  Visit(CED->getInit());
3531  return;
3532  }
3533  } else if (VD->isImplicit() || isa<OMPCapturedExprDecl>(VD))
3534  // Do not analyze internal variables and do not enclose them into
3535  // implicit clauses.
3536  return;
3537  VD = VD->getCanonicalDecl();
3538  // Skip internally declared variables.
3539  if (VD->hasLocalStorage() && CS && !CS->capturesVariable(VD) &&
3540  !Stack->isImplicitTaskFirstprivate(VD))
3541  return;
3542  // Skip allocators in uses_allocators clauses.
3543  if (Stack->isUsesAllocatorsDecl(VD).hasValue())
3544  return;
3545 
3546  DSAStackTy::DSAVarData DVar = Stack->getTopDSA(VD, /*FromParent=*/false);
3547  // Check if the variable has explicit DSA set and stop analysis if it so.
3548  if (DVar.RefExpr || !ImplicitDeclarations.insert(VD).second)
3549  return;
3550 
3551  // Skip internally declared static variables.
3553  OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
3554  if (VD->hasGlobalStorage() && CS && !CS->capturesVariable(VD) &&
3555  (Stack->hasRequiresDeclWithClause<OMPUnifiedSharedMemoryClause>() ||
3556  !Res || *Res != OMPDeclareTargetDeclAttr::MT_Link) &&
3557  !Stack->isImplicitTaskFirstprivate(VD))
3558  return;
3559 
3560  SourceLocation ELoc = E->getExprLoc();
3561  OpenMPDirectiveKind DKind = Stack->getCurrentDirective();
3562  // The default(none) clause requires that each variable that is referenced
3563  // in the construct, and does not have a predetermined data-sharing
3564  // attribute, must have its data-sharing attribute explicitly determined
3565  // by being listed in a data-sharing attribute clause.
3566  if (DVar.CKind == OMPC_unknown &&
3567  (Stack->getDefaultDSA() == DSA_none ||
3568  Stack->getDefaultDSA() == DSA_private ||
3569  Stack->getDefaultDSA() == DSA_firstprivate) &&
3570  isImplicitOrExplicitTaskingRegion(DKind) &&
3571  VarsWithInheritedDSA.count(VD) == 0) {
3572  bool InheritedDSA = Stack->getDefaultDSA() == DSA_none;
3573  if (!InheritedDSA && (Stack->getDefaultDSA() == DSA_firstprivate ||
3574  Stack->getDefaultDSA() == DSA_private)) {
3575  DSAStackTy::DSAVarData DVar =
3576  Stack->getImplicitDSA(VD, /*FromParent=*/false);
3577  InheritedDSA = DVar.CKind == OMPC_unknown;
3578  }
3579  if (InheritedDSA)
3580  VarsWithInheritedDSA[VD] = E;
3581  if (Stack->getDefaultDSA() == DSA_none)
3582  return;
3583  }
3584 
3585  // OpenMP 5.0 [2.19.7.2, defaultmap clause, Description]
3586  // If implicit-behavior is none, each variable referenced in the
3587  // construct that does not have a predetermined data-sharing attribute
3588  // and does not appear in a to or link clause on a declare target
3589  // directive must be listed in a data-mapping attribute clause, a
3590  // data-sharing attribute clause (including a data-sharing attribute
3591  // clause on a combined construct where target. is one of the
3592  // constituent constructs), or an is_device_ptr clause.
3593  OpenMPDefaultmapClauseKind ClauseKind =
3595  if (SemaRef.getLangOpts().OpenMP >= 50) {
3596  bool IsModifierNone = Stack->getDefaultmapModifier(ClauseKind) ==
3597  OMPC_DEFAULTMAP_MODIFIER_none;
3598  if (DVar.CKind == OMPC_unknown && IsModifierNone &&
3599  VarsWithInheritedDSA.count(VD) == 0 && !Res) {
3600  // Only check for data-mapping attribute and is_device_ptr here
3601  // since we have already make sure that the declaration does not
3602  // have a data-sharing attribute above
3603  if (!Stack->checkMappableExprComponentListsForDecl(
3604  VD, /*CurrentRegionOnly=*/true,
3606  MapExprComponents,
3607  OpenMPClauseKind) {
3608  auto MI = MapExprComponents.rbegin();
3609  auto ME = MapExprComponents.rend();
3610  return MI != ME && MI->getAssociatedDeclaration() == VD;
3611  })) {
3612  VarsWithInheritedDSA[VD] = E;
3613  return;
3614  }
3615  }
3616  }
3617  if (SemaRef.getLangOpts().OpenMP > 50) {
3618  bool IsModifierPresent = Stack->getDefaultmapModifier(ClauseKind) ==
3619  OMPC_DEFAULTMAP_MODIFIER_present;
3620  if (IsModifierPresent) {
3621  if (llvm::find(ImplicitMapModifier[ClauseKind],
3622  OMPC_MAP_MODIFIER_present) ==
3623  std::end(ImplicitMapModifier[ClauseKind])) {
3624  ImplicitMapModifier[ClauseKind].push_back(
3625  OMPC_MAP_MODIFIER_present);
3626  }
3627  }
3628  }
3629 
3630  if (isOpenMPTargetExecutionDirective(DKind) &&
3631  !Stack->isLoopControlVariable(VD).first) {
3632  if (!Stack->checkMappableExprComponentListsForDecl(
3633  VD, /*CurrentRegionOnly=*/true,
3635  StackComponents,
3636  OpenMPClauseKind) {
3637  if (SemaRef.LangOpts.OpenMP >= 50)
3638  return !StackComponents.empty();
3639  // Variable is used if it has been marked as an array, array
3640  // section, array shaping or the variable iself.
3641  return StackComponents.size() == 1 ||
3642  std::all_of(
3643  std::next(StackComponents.rbegin()),
3644  StackComponents.rend(),
3645  [](const OMPClauseMappableExprCommon::
3646  MappableComponent &MC) {
3647  return MC.getAssociatedDeclaration() ==
3648  nullptr &&
3649  (isa<OMPArraySectionExpr>(
3650  MC.getAssociatedExpression()) ||
3651  isa<OMPArrayShapingExpr>(
3652  MC.getAssociatedExpression()) ||
3653  isa<ArraySubscriptExpr>(
3654  MC.getAssociatedExpression()));
3655  });
3656  })) {
3657  bool IsFirstprivate = false;
3658  // By default lambdas are captured as firstprivates.
3659  if (const auto *RD =
3661  IsFirstprivate = RD->isLambda();
3662  IsFirstprivate =
3663  IsFirstprivate || (Stack->mustBeFirstprivate(ClauseKind) && !Res);
3664  if (IsFirstprivate) {
3665  ImplicitFirstprivate.emplace_back(E);
3666  } else {
3668  Stack->getDefaultmapModifier(ClauseKind);
3670  M, ClauseKind == OMPC_DEFAULTMAP_aggregate || Res);
3671  ImplicitMap[ClauseKind][Kind].emplace_back(E);
3672  }
3673  return;
3674  }
3675  }
3676 
3677  // OpenMP [2.9.3.6, Restrictions, p.2]
3678  // A list item that appears in a reduction clause of the innermost
3679  // enclosing worksharing or parallel construct may not be accessed in an
3680  // explicit task.
3681  DVar = Stack->hasInnermostDSA(
3682  VD,
3683  [](OpenMPClauseKind C, bool AppliedToPointee) {
3684  return C == OMPC_reduction && !AppliedToPointee;
3685  },
3686  [](OpenMPDirectiveKind K) {
3687  return isOpenMPParallelDirective(K) ||
3689  },
3690  /*FromParent=*/true);
3691  if (isOpenMPTaskingDirective(DKind) && DVar.CKind == OMPC_reduction) {
3692  ErrorFound = true;
3693  SemaRef.Diag(ELoc, diag::err_omp_reduction_in_task);
3694  reportOriginalDsa(SemaRef, Stack, VD, DVar);
3695  return;
3696  }
3697 
3698  // Define implicit data-sharing attributes for task.
3699  DVar = Stack->getImplicitDSA(VD, /*FromParent=*/false);
3700  if (((isOpenMPTaskingDirective(DKind) && DVar.CKind != OMPC_shared) ||
3701  (((Stack->getDefaultDSA() == DSA_firstprivate &&
3702  DVar.CKind == OMPC_firstprivate) ||
3703  (Stack->getDefaultDSA() == DSA_private &&
3704  DVar.CKind == OMPC_private)) &&
3705  !DVar.RefExpr)) &&
3706  !Stack->isLoopControlVariable(VD).first) {
3707  if (Stack->getDefaultDSA() == DSA_private)
3708  ImplicitPrivate.push_back(E);
3709  else
3710  ImplicitFirstprivate.push_back(E);
3711  return;
3712  }
3713 
3714  // Store implicitly used globals with declare target link for parent
3715  // target.
3716  if (!isOpenMPTargetExecutionDirective(DKind) && Res &&
3717  *Res == OMPDeclareTargetDeclAttr::MT_Link) {
3718  Stack->addToParentTargetRegionLinkGlobals(E);
3719  return;
3720  }
3721  }
3722  }
3723  void VisitMemberExpr(MemberExpr *E) {
3724  if (E->isTypeDependent() || E->isValueDependent() ||
3726  return;
3727  auto *FD = dyn_cast<FieldDecl>(E->getMemberDecl());
3728  OpenMPDirectiveKind DKind = Stack->getCurrentDirective();
3729  if (auto *TE = dyn_cast<CXXThisExpr>(E->getBase()->IgnoreParenCasts())) {
3730  if (!FD)
3731  return;
3732  DSAStackTy::DSAVarData DVar = Stack->getTopDSA(FD, /*FromParent=*/false);
3733  // Check if the variable has explicit DSA set and stop analysis if it
3734  // so.
3735  if (DVar.RefExpr || !ImplicitDeclarations.insert(FD).second)
3736  return;
3737 
3738  if (isOpenMPTargetExecutionDirective(DKind) &&
3739  !Stack->isLoopControlVariable(FD).first &&
3740  !Stack->checkMappableExprComponentListsForDecl(
3741  FD, /*CurrentRegionOnly=*/true,
3743  StackComponents,
3744  OpenMPClauseKind) {
3745  return isa<CXXThisExpr>(
3746  cast<MemberExpr>(
3747  StackComponents.back().getAssociatedExpression())
3748  ->getBase()
3749  ->IgnoreParens());
3750  })) {
3751  // OpenMP 4.5 [2.15.5.1, map Clause, Restrictions, C/C++, p.3]
3752  // A bit-field cannot appear in a map clause.
3753  //
3754  if (FD->isBitField())
3755  return;
3756 
3757  // Check to see if the member expression is referencing a class that
3758  // has already been explicitly mapped
3759  if (Stack->isClassPreviouslyMapped(TE->getType()))
3760  return;
3761 
3763  Stack->getDefaultmapModifier(OMPC_DEFAULTMAP_aggregate);
3764  OpenMPDefaultmapClauseKind ClauseKind =
3767  Modifier, /*IsAggregateOrDeclareTarget*/ true);
3768  ImplicitMap[ClauseKind][Kind].emplace_back(E);
3769  return;
3770  }
3771 
3772  SourceLocation ELoc = E->getExprLoc();
3773  // OpenMP [2.9.3.6, Restrictions, p.2]
3774  // A list item that appears in a reduction clause of the innermost
3775  // enclosing worksharing or parallel construct may not be accessed in
3776  // an explicit task.
3777  DVar = Stack->hasInnermostDSA(
3778  FD,
3779  [](OpenMPClauseKind C, bool AppliedToPointee) {
3780  return C == OMPC_reduction && !AppliedToPointee;
3781  },
3782  [](OpenMPDirectiveKind K) {
3783  return isOpenMPParallelDirective(K) ||
3785  },
3786  /*FromParent=*/true);
3787  if (isOpenMPTaskingDirective(DKind) && DVar.CKind == OMPC_reduction) {
3788  ErrorFound = true;
3789  SemaRef.Diag(ELoc, diag::err_omp_reduction_in_task);
3790  reportOriginalDsa(SemaRef, Stack, FD, DVar);
3791  return;
3792  }
3793 
3794  // Define implicit data-sharing attributes for task.
3795  DVar = Stack->getImplicitDSA(FD, /*FromParent=*/false);
3796  if (isOpenMPTaskingDirective(DKind) && DVar.CKind != OMPC_shared &&
3797  !Stack->isLoopControlVariable(FD).first) {
3798  // Check if there is a captured expression for the current field in the
3799  // region. Do not mark it as firstprivate unless there is no captured
3800  // expression.
3801  // TODO: try to make it firstprivate.
3802  if (DVar.CKind != OMPC_unknown)
3803  ImplicitFirstprivate.push_back(E);
3804  }
3805  return;
3806  }
3807  if (isOpenMPTargetExecutionDirective(DKind)) {
3809  if (!checkMapClauseExpressionBase(SemaRef, E, CurComponents, OMPC_map,
3810  Stack->getCurrentDirective(),
3811  /*NoDiagnose=*/true))
3812  return;
3813  const auto *VD = cast<ValueDecl>(
3814  CurComponents.back().getAssociatedDeclaration()->getCanonicalDecl());
3815  if (!Stack->checkMappableExprComponentListsForDecl(
3816  VD, /*CurrentRegionOnly=*/true,
3817  [&CurComponents](
3819  StackComponents,
3820  OpenMPClauseKind) {
3821  auto CCI = CurComponents.rbegin();
3822  auto CCE = CurComponents.rend();
3823  for (const auto &SC : llvm::reverse(StackComponents)) {
3824  // Do both expressions have the same kind?
3825  if (CCI->getAssociatedExpression()->getStmtClass() !=
3826  SC.getAssociatedExpression()->getStmtClass())
3827  if (!((isa<OMPArraySectionExpr>(
3828  SC.getAssociatedExpression()) ||
3829  isa<OMPArrayShapingExpr>(
3830  SC.getAssociatedExpression())) &&
3831  isa<ArraySubscriptExpr>(
3832  CCI->getAssociatedExpression())))
3833  return false;
3834 
3835  const Decl *CCD = CCI->getAssociatedDeclaration();
3836  const Decl *SCD = SC.getAssociatedDeclaration();
3837  CCD = CCD ? CCD->getCanonicalDecl() : nullptr;
3838  SCD = SCD ? SCD->getCanonicalDecl() : nullptr;
3839  if (SCD != CCD)
3840  return false;
3841  std::advance(CCI, 1);
3842  if (CCI == CCE)
3843  break;
3844  }
3845  return true;
3846  })) {
3847  Visit(E->getBase());
3848  }
3849  } else if (!TryCaptureCXXThisMembers) {
3850  Visit(E->getBase());
3851  }
3852  }
3853  void VisitOMPExecutableDirective(OMPExecutableDirective *S) {
3854  for (OMPClause *C : S->clauses()) {
3855  // Skip analysis of arguments of private clauses for task|target
3856  // directives.
3857  if (isa_and_nonnull<OMPPrivateClause>(C))
3858  continue;
3859  // Skip analysis of arguments of implicitly defined firstprivate clause
3860  // for task|target directives.
3861  // Skip analysis of arguments of implicitly defined map clause for target
3862  // directives.
3863  if (C && !((isa<OMPFirstprivateClause>(C) || isa<OMPMapClause>(C)) &&
3864  C->isImplicit() &&
3865  !isOpenMPTaskingDirective(Stack->getCurrentDirective()))) {
3866  for (Stmt *CC : C->children()) {
3867  if (CC)
3868  Visit(CC);
3869  }
3870  }
3871  }
3872  // Check implicitly captured variables.
3873  VisitSubCaptures(S);
3874  }
3875 
3876  void VisitOMPLoopTransformationDirective(OMPLoopTransformationDirective *S) {
3877  // Loop transformation directives do not introduce data sharing
3878  VisitStmt(S);
3879  }
3880 
3881  void VisitCallExpr(CallExpr *S) {
3882  for (Stmt *C : S->arguments()) {
3883  if (C) {
3884  // Check implicitly captured variables in the task-based directives to
3885  // check if they must be firstprivatized.
3886  Visit(C);
3887  }
3888  }
3889  if (Expr *Callee = S->getCallee())
3890  if (auto *CE = dyn_cast<MemberExpr>(Callee->IgnoreParenImpCasts()))
3891  Visit(CE->getBase());
3892  }
3893  void VisitStmt(Stmt *S) {
3894  for (Stmt *C : S->children()) {
3895  if (C) {
3896  // Check implicitly captured variables in the task-based directives to
3897  // check if they must be firstprivatized.
3898  Visit(C);
3899  }
3900  }
3901  }
3902 
3903  void visitSubCaptures(CapturedStmt *S) {
3904  for (const CapturedStmt::Capture &Cap : S->captures()) {
3905  if (!Cap.capturesVariable() && !Cap.capturesVariableByCopy())
3906  continue;
3907  VarDecl *VD = Cap.getCapturedVar();
3908  // Do not try to map the variable if it or its sub-component was mapped
3909  // already.
3910  if (isOpenMPTargetExecutionDirective(Stack->getCurrentDirective()) &&
3911  Stack->checkMappableExprComponentListsForDecl(
3912  VD, /*CurrentRegionOnly=*/true,
3914  OpenMPClauseKind) { return true; }))
3915  continue;
3917  SemaRef, VD, VD->getType().getNonLValueExprType(SemaRef.Context),
3918  Cap.getLocation(), /*RefersToCapture=*/true);
3919  Visit(DRE);
3920  }
3921  }
3922  bool isErrorFound() const { return ErrorFound; }
3923  ArrayRef<Expr *> getImplicitFirstprivate() const {
3924  return ImplicitFirstprivate;
3925  }
3926  ArrayRef<Expr *> getImplicitPrivate() const { return ImplicitPrivate; }
3927  ArrayRef<Expr *> getImplicitMap(OpenMPDefaultmapClauseKind DK,
3928  OpenMPMapClauseKind MK) const {
3929  return ImplicitMap[DK][MK];
3930  }
3932  getImplicitMapModifier(OpenMPDefaultmapClauseKind Kind) const {
3933  return ImplicitMapModifier[Kind];
3934  }
3935  const Sema::VarsWithInheritedDSAType &getVarsWithInheritedDSA() const {
3936  return VarsWithInheritedDSA;
3937  }
3938 
3939  DSAAttrChecker(DSAStackTy *S, Sema &SemaRef, CapturedStmt *CS)
3940  : Stack(S), SemaRef(SemaRef), ErrorFound(false), CS(CS) {
3941  // Process declare target link variables for the target directives.
3942  if (isOpenMPTargetExecutionDirective(S->getCurrentDirective())) {
3943  for (DeclRefExpr *E : Stack->getLinkGlobals())
3944  Visit(E);
3945  }
3946  }
3947 };
3948 } // namespace
3949 
3950 static void handleDeclareVariantConstructTrait(DSAStackTy *Stack,
3951  OpenMPDirectiveKind DKind,
3952  bool ScopeEntry) {
3955  Traits.emplace_back(llvm::omp::TraitProperty::construct_target_target);
3956  if (isOpenMPTeamsDirective(DKind))
3957  Traits.emplace_back(llvm::omp::TraitProperty::construct_teams_teams);
3958  if (isOpenMPParallelDirective(DKind))
3959  Traits.emplace_back(llvm::omp::TraitProperty::construct_parallel_parallel);
3960  if (isOpenMPWorksharingDirective(DKind))
3961  Traits.emplace_back(llvm::omp::TraitProperty::construct_for_for);
3962  if (isOpenMPSimdDirective(DKind))
3963  Traits.emplace_back(llvm::omp::TraitProperty::construct_simd_simd);
3964  Stack->handleConstructTrait(Traits, ScopeEntry);
3965 }
3966 
3968  switch (DKind) {
3969  case OMPD_parallel:
3970  case OMPD_parallel_for:
3971  case OMPD_parallel_for_simd:
3972  case OMPD_parallel_sections:
3973  case OMPD_parallel_master:
3974  case OMPD_parallel_loop:
3975  case OMPD_teams:
3976  case OMPD_teams_distribute:
3977  case OMPD_teams_distribute_simd: {
3978  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
3979  QualType KmpInt32PtrTy =
3980  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
3981  Sema::CapturedParamNameType Params[] = {
3982  std::make_pair(".global_tid.", KmpInt32PtrTy),
3983  std::make_pair(".bound_tid.", KmpInt32PtrTy),
3984  std::make_pair(StringRef(), QualType()) // __context with shared vars
3985  };
3986  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
3987  Params);
3988  break;
3989  }
3990  case OMPD_target_teams:
3991  case OMPD_target_parallel:
3992  case OMPD_target_parallel_for:
3993  case OMPD_target_parallel_for_simd:
3994  case OMPD_target_teams_loop:
3995  case OMPD_target_parallel_loop:
3996  case OMPD_target_teams_distribute:
3997  case OMPD_target_teams_distribute_simd: {
3998  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
4000  QualType KmpInt32PtrTy =
4001  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
4002  QualType Args[] = {VoidPtrTy};
4004  EPI.Variadic = true;
4005  QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
4006  Sema::CapturedParamNameType Params[] = {
4007  std::make_pair(".global_tid.", KmpInt32Ty),
4008  std::make_pair(".part_id.", KmpInt32PtrTy),
4009  std::make_pair(".privates.", VoidPtrTy),
4010  std::make_pair(
4011  ".copy_fn.",
4012  Context.getPointerType(CopyFnType).withConst().withRestrict()),
4013  std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
4014  std::make_pair(StringRef(), QualType()) // __context with shared vars
4015  };
4016  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
4017  Params, /*OpenMPCaptureLevel=*/0);
4018  // Mark this captured region as inlined, because we don't use outlined
4019  // function directly.
4021  AlwaysInlineAttr::CreateImplicit(
4023  AlwaysInlineAttr::Keyword_forceinline));
4024  Sema::CapturedParamNameType ParamsTarget[] = {
4025  std::make_pair(StringRef(), QualType()) // __context with shared vars
4026  };
4027  // Start a captured region for 'target' with no implicit parameters.
4028  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
4029  ParamsTarget, /*OpenMPCaptureLevel=*/1);
4030  Sema::CapturedParamNameType ParamsTeamsOrParallel[] = {
4031  std::make_pair(".global_tid.", KmpInt32PtrTy),
4032  std::make_pair(".bound_tid.", KmpInt32PtrTy),
4033  std::make_pair(StringRef(), QualType()) // __context with shared vars
4034  };
4035  // Start a captured region for 'teams' or 'parallel'. Both regions have
4036  // the same implicit parameters.
4037  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
4038  ParamsTeamsOrParallel, /*OpenMPCaptureLevel=*/2);
4039  break;
4040  }
4041  case OMPD_target:
4042  case OMPD_target_simd: {
4043  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
4045  QualType KmpInt32PtrTy =
4046  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
4047  QualType Args[] = {VoidPtrTy};
4049  EPI.Variadic = true;
4050  QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
4051  Sema::CapturedParamNameType Params[] = {
4052  std::make_pair(".global_tid.", KmpInt32Ty),
4053  std::make_pair(".part_id.", KmpInt32PtrTy),
4054  std::make_pair(".privates.", VoidPtrTy),
4055  std::make_pair(
4056  ".copy_fn.",
4057  Context.getPointerType(CopyFnType).withConst().withRestrict()),
4058  std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
4059  std::make_pair(StringRef(), QualType()) // __context with shared vars
4060  };
4061  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
4062  Params, /*OpenMPCaptureLevel=*/0);
4063  // Mark this captured region as inlined, because we don't use outlined
4064  // function directly.
4066  AlwaysInlineAttr::CreateImplicit(
4068  AlwaysInlineAttr::Keyword_forceinline));
4069  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
4070  std::make_pair(StringRef(), QualType()),
4071  /*OpenMPCaptureLevel=*/1);
4072  break;
4073  }
4074  case OMPD_atomic:
4075  case OMPD_critical:
4076  case OMPD_section:
4077  case OMPD_master:
4078  case OMPD_masked:
4079  case OMPD_tile:
4080  case OMPD_unroll:
4081  break;
4082  case OMPD_loop:
4083  // TODO: 'loop' may require additional parameters depending on the binding.
4084  // Treat similar to OMPD_simd/OMPD_for for now.
4085  case OMPD_simd:
4086  case OMPD_for:
4087  case OMPD_for_simd:
4088  case OMPD_sections:
4089  case OMPD_single:
4090  case OMPD_taskgroup:
4091  case OMPD_distribute:
4092  case OMPD_distribute_simd:
4093  case OMPD_ordered:
4094  case OMPD_target_data:
4095  case OMPD_dispatch: {
4096  Sema::CapturedParamNameType Params[] = {
4097  std::make_pair(StringRef(), QualType()) // __context with shared vars
4098  };
4099  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
4100  Params);
4101  break;
4102  }
4103  case OMPD_task: {
4104  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
4106  QualType KmpInt32PtrTy =
4107  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
4108  QualType Args[] = {VoidPtrTy};
4110  EPI.Variadic = true;
4111  QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
4112  Sema::CapturedParamNameType Params[] = {
4113  std::make_pair(".global_tid.", KmpInt32Ty),
4114  std::make_pair(".part_id.", KmpInt32PtrTy),
4115  std::make_pair(".privates.", VoidPtrTy),
4116  std::make_pair(
4117  ".copy_fn.",
4118  Context.getPointerType(CopyFnType).withConst().withRestrict()),
4119  std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
4120  std::make_pair(StringRef(), QualType()) // __context with shared vars
4121  };
4122  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
4123  Params);
4124  // Mark this captured region as inlined, because we don't use outlined
4125  // function directly.
4127  AlwaysInlineAttr::CreateImplicit(
4129  AlwaysInlineAttr::Keyword_forceinline));
4130  break;
4131  }
4132  case OMPD_taskloop:
4133  case OMPD_taskloop_simd:
4134  case OMPD_master_taskloop:
4135  case OMPD_master_taskloop_simd: {
4136  QualType KmpInt32Ty =
4137  Context.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1)
4138  .withConst();
4139  QualType KmpUInt64Ty =
4140  Context.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0)
4141  .withConst();
4142  QualType KmpInt64Ty =
4143  Context.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1)
4144  .withConst();
4146  QualType KmpInt32PtrTy =
4147  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
4148  QualType Args[] = {VoidPtrTy};
4150  EPI.Variadic = true;
4151  QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
4152  Sema::CapturedParamNameType Params[] = {
4153  std::make_pair(".global_tid.", KmpInt32Ty),
4154  std::make_pair(".part_id.", KmpInt32PtrTy),
4155  std::make_pair(".privates.", VoidPtrTy),
4156  std::make_pair(
4157  ".copy_fn.",
4158  Context.getPointerType(CopyFnType).withConst().withRestrict()),
4159  std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
4160  std::make_pair(".lb.", KmpUInt64Ty),
4161  std::make_pair(".ub.", KmpUInt64Ty),
4162  std::make_pair(".st.", KmpInt64Ty),
4163  std::make_pair(".liter.", KmpInt32Ty),
4164  std::make_pair(".reductions.", VoidPtrTy),
4165  std::make_pair(StringRef(), QualType()) // __context with shared vars
4166  };
4167  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
4168  Params);
4169  // Mark this captured region as inlined, because we don't use outlined
4170  // function directly.
4172  AlwaysInlineAttr::CreateImplicit(
4174  AlwaysInlineAttr::Keyword_forceinline));
4175  break;
4176  }
4177  case OMPD_parallel_master_taskloop:
4178  case OMPD_parallel_master_taskloop_simd: {
4179  QualType KmpInt32Ty =
4180  Context.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1)
4181  .withConst();
4182  QualType KmpUInt64Ty =
4183  Context.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0)
4184  .withConst();
4185  QualType KmpInt64Ty =
4186  Context.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1)
4187  .withConst();
4189  QualType KmpInt32PtrTy =
4190  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
4191  Sema::CapturedParamNameType ParamsParallel[] = {
4192  std::make_pair(".global_tid.", KmpInt32PtrTy),
4193  std::make_pair(".bound_tid.", KmpInt32PtrTy),
4194  std::make_pair(StringRef(), QualType()) // __context with shared vars
4195  };
4196  // Start a captured region for 'parallel'.
4197  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
4198  ParamsParallel, /*OpenMPCaptureLevel=*/0);
4199  QualType Args[] = {VoidPtrTy};
4201  EPI.Variadic = true;
4202  QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
4203  Sema::CapturedParamNameType Params[] = {
4204  std::make_pair(".global_tid.", KmpInt32Ty),
4205  std::make_pair(".part_id.", KmpInt32PtrTy),
4206  std::make_pair(".privates.", VoidPtrTy),
4207  std::make_pair(
4208  ".copy_fn.",
4209  Context.getPointerType(CopyFnType).withConst().withRestrict()),
4210  std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
4211  std::make_pair(".lb.", KmpUInt64Ty),
4212  std::make_pair(".ub.", KmpUInt64Ty),
4213  std::make_pair(".st.", KmpInt64Ty),
4214  std::make_pair(".liter.", KmpInt32Ty),
4215  std::make_pair(".reductions.", VoidPtrTy),
4216  std::make_pair(StringRef(), QualType()) // __context with shared vars
4217  };
4218  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
4219  Params, /*OpenMPCaptureLevel=*/1);
4220  // Mark this captured region as inlined, because we don't use outlined
4221  // function directly.
4223  AlwaysInlineAttr::CreateImplicit(
4225  AlwaysInlineAttr::Keyword_forceinline));
4226  break;
4227  }
4228  case OMPD_distribute_parallel_for_simd:
4229  case OMPD_distribute_parallel_for: {
4230  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
4231  QualType KmpInt32PtrTy =
4232  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
4233  Sema::CapturedParamNameType Params[] = {
4234  std::make_pair(".global_tid.", KmpInt32PtrTy),
4235  std::make_pair(".bound_tid.", KmpInt32PtrTy),
4236  std::make_pair(".previous.lb.", Context.getSizeType().withConst()),
4237  std::make_pair(".previous.ub.", Context.getSizeType().withConst()),
4238  std::make_pair(StringRef(), QualType()) // __context with shared vars
4239  };
4240  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
4241  Params);
4242  break;
4243  }
4244  case OMPD_target_teams_distribute_parallel_for:
4245  case OMPD_target_teams_distribute_parallel_for_simd: {
4246  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
4247  QualType KmpInt32PtrTy =
4248  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
4250 
4251  QualType Args[] = {VoidPtrTy};
4253  EPI.Variadic = true;
4254  QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
4255  Sema::CapturedParamNameType Params[] = {
4256  std::make_pair(".global_tid.", KmpInt32Ty),
4257  std::make_pair(".part_id.", KmpInt32PtrTy),
4258  std::make_pair(".privates.", VoidPtrTy),
4259  std::make_pair(
4260  ".copy_fn.",
4261  Context.getPointerType(CopyFnType).withConst().withRestrict()),
4262  std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
4263  std::make_pair(StringRef(), QualType()) // __context with shared vars
4264  };
4265  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
4266  Params, /*OpenMPCaptureLevel=*/0);
4267  // Mark this captured region as inlined, because we don't use outlined
4268  // function directly.
4270  AlwaysInlineAttr::CreateImplicit(
4272  AlwaysInlineAttr::Keyword_forceinline));
4273  Sema::CapturedParamNameType ParamsTarget[] = {
4274  std::make_pair(StringRef(), QualType()) // __context with shared vars
4275  };
4276  // Start a captured region for 'target' with no implicit parameters.
4277  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
4278  ParamsTarget, /*OpenMPCaptureLevel=*/1);
4279 
4280  Sema::CapturedParamNameType ParamsTeams[] = {
4281  std::make_pair(".global_tid.", KmpInt32PtrTy),
4282  std::make_pair(".bound_tid.", KmpInt32PtrTy),
4283  std::make_pair(StringRef(), QualType()) // __context with shared vars
4284  };
4285  // Start a captured region for 'target' with no implicit parameters.
4286  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
4287  ParamsTeams, /*OpenMPCaptureLevel=*/2);
4288 
4289  Sema::CapturedParamNameType ParamsParallel[] = {
4290  std::make_pair(".global_tid.", KmpInt32PtrTy),
4291  std::make_pair(".bound_tid.", KmpInt32PtrTy),
4292  std::make_pair(".previous.lb.", Context.getSizeType().withConst()),
4293  std::make_pair(".previous.ub.", Context.getSizeType().withConst()),
4294  std::make_pair(StringRef(), QualType()) // __context with shared vars
4295  };
4296  // Start a captured region for 'teams' or 'parallel'. Both regions have
4297  // the same implicit parameters.
4298  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
4299  ParamsParallel, /*OpenMPCaptureLevel=*/3);
4300  break;
4301  }
4302 
4303  case OMPD_teams_loop: {
4304  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
4305  QualType KmpInt32PtrTy =
4306  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
4307 
4308  Sema::CapturedParamNameType ParamsTeams[] = {
4309  std::make_pair(".global_tid.", KmpInt32PtrTy),
4310  std::make_pair(".bound_tid.", KmpInt32PtrTy),
4311  std::make_pair(StringRef(), QualType()) // __context with shared vars
4312  };
4313  // Start a captured region for 'teams'.
4314  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
4315  ParamsTeams, /*OpenMPCaptureLevel=*/0);
4316  break;
4317  }
4318 
4319  case OMPD_teams_distribute_parallel_for:
4320  case OMPD_teams_distribute_parallel_for_simd: {
4321  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
4322  QualType KmpInt32PtrTy =
4323  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
4324 
4325  Sema::CapturedParamNameType ParamsTeams[] = {
4326  std::make_pair(".global_tid.", KmpInt32PtrTy),
4327  std::make_pair(".bound_tid.", KmpInt32PtrTy),
4328  std::make_pair(StringRef(), QualType()) // __context with shared vars
4329  };
4330  // Start a captured region for 'target' with no implicit parameters.
4331  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
4332  ParamsTeams, /*OpenMPCaptureLevel=*/0);
4333 
4334  Sema::CapturedParamNameType ParamsParallel[] = {
4335  std::make_pair(".global_tid.", KmpInt32PtrTy),
4336  std::make_pair(".bound_tid.", KmpInt32PtrTy),
4337  std::make_pair(".previous.lb.", Context.getSizeType().withConst()),
4338  std::make_pair(".previous.ub.", Context.getSizeType().withConst()),
4339  std::make_pair(StringRef(), QualType()) // __context with shared vars
4340  };
4341  // Start a captured region for 'teams' or 'parallel'. Both regions have
4342  // the same implicit parameters.
4343  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
4344  ParamsParallel, /*OpenMPCaptureLevel=*/1);
4345  break;
4346  }
4347  case OMPD_target_update:
4348  case OMPD_target_enter_data:
4349  case OMPD_target_exit_data: {
4350  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
4352  QualType KmpInt32PtrTy =
4353  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
4354  QualType Args[] = {VoidPtrTy};
4356  EPI.Variadic = true;
4357  QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
4358  Sema::CapturedParamNameType Params[] = {
4359  std::make_pair(".global_tid.", KmpInt32Ty),
4360  std::make_pair(".part_id.", KmpInt32PtrTy),
4361  std::make_pair(".privates.", VoidPtrTy),
4362  std::make_pair(
4363  ".copy_fn.",
4364  Context.getPointerType(CopyFnType).withConst().withRestrict()),
4365  std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
4366  std::make_pair(StringRef(), QualType()) // __context with shared vars
4367  };
4368  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
4369  Params);
4370  // Mark this captured region as inlined, because we don't use outlined
4371  // function directly.
4373  AlwaysInlineAttr::CreateImplicit(
4375  AlwaysInlineAttr::Keyword_forceinline));
4376  break;
4377  }
4378  case OMPD_threadprivate:
4379  case OMPD_allocate:
4380  case OMPD_taskyield:
4381  case OMPD_barrier:
4382  case OMPD_taskwait:
4383  case OMPD_cancellation_point:
4384  case OMPD_cancel:
4385  case OMPD_flush:
4386  case OMPD_depobj:
4387  case OMPD_scan:
4388  case OMPD_declare_reduction:
4389  case OMPD_declare_mapper:
4390  case OMPD_declare_simd:
4391  case OMPD_declare_target:
4392  case OMPD_end_declare_target:
4393  case OMPD_requires:
4394  case OMPD_declare_variant:
4395  case OMPD_begin_declare_variant:
4396  case OMPD_end_declare_variant:
4397  case OMPD_metadirective:
4398  llvm_unreachable("OpenMP Directive is not allowed");
4399  case OMPD_unknown:
4400  default:
4401  llvm_unreachable("Unknown OpenMP directive");
4402  }
4403  DSAStack->setContext(CurContext);
4404  handleDeclareVariantConstructTrait(DSAStack, DKind, /* ScopeEntry */ true);
4405 }
4406 
4407 int Sema::getNumberOfConstructScopes(unsigned Level) const {
4408  return getOpenMPCaptureLevels(DSAStack->getDirective(Level));
4409 }
4410 
4412  SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
4413  getOpenMPCaptureRegions(CaptureRegions, DKind);
4414  return CaptureRegions.size();
4415 }
4416 
4418  Expr *CaptureExpr, bool WithInit,
4419  bool AsExpression) {
4420  assert(CaptureExpr);
4421  ASTContext &C = S.getASTContext();
4422  Expr *Init = AsExpression ? CaptureExpr : CaptureExpr->IgnoreImpCasts();
4423  QualType Ty = Init->getType();
4424  if (CaptureExpr->getObjectKind() == OK_Ordinary && CaptureExpr->isGLValue()) {
4425  if (S.getLangOpts().CPlusPlus) {
4426  Ty = C.getLValueReferenceType(Ty);
4427  } else {
4428  Ty = C.getPointerType(Ty);
4429  ExprResult Res =
4430  S.CreateBuiltinUnaryOp(CaptureExpr->getExprLoc(), UO_AddrOf, Init);
4431  if (!Res.isUsable())
4432  return nullptr;
4433  Init = Res.get();
4434  }
4435  WithInit = true;
4436  }
4437  auto *CED = OMPCapturedExprDecl::Create(C, S.CurContext, Id, Ty,
4438  CaptureExpr->getBeginLoc());
4439  if (!WithInit)
4440  CED->addAttr(OMPCaptureNoInitAttr::CreateImplicit(C));
4441  S.CurContext->addHiddenDecl(CED);
4443  S.AddInitializerToDecl(CED, Init, /*DirectInit=*/false);
4444  return CED;
4445 }
4446 
4447 static DeclRefExpr *buildCapture(Sema &S, ValueDecl *D, Expr *CaptureExpr,
4448  bool WithInit) {
4449  OMPCapturedExprDecl *CD;
4450  if (VarDecl *VD = S.isOpenMPCapturedDecl(D))
4451  CD = cast<OMPCapturedExprDecl>(VD);
4452  else
4453  CD = buildCaptureDecl(S, D->getIdentifier(), CaptureExpr, WithInit,
4454  /*AsExpression=*/false);
4455  return buildDeclRefExpr(S, CD, CD->getType().getNonReferenceType(),
4456  CaptureExpr->getExprLoc());
4457 }
4458 
4459 static ExprResult buildCapture(Sema &S, Expr *CaptureExpr, DeclRefExpr *&Ref) {
4460  CaptureExpr = S.DefaultLvalueConversion(CaptureExpr).get();
4461  if (!Ref) {
4463  S, &S.getASTContext().Idents.get(".capture_expr."), CaptureExpr,
4464  /*WithInit=*/true, /*AsExpression=*/true);
4465  Ref = buildDeclRefExpr(S, CD, CD->getType().getNonReferenceType(),
4466  CaptureExpr->getExprLoc());
4467  }
4468  ExprResult Res = Ref;
4469  if (!S.getLangOpts().CPlusPlus &&
4470  CaptureExpr->getObjectKind() == OK_Ordinary && CaptureExpr->isGLValue() &&
4471  Ref->getType()->isPointerType()) {
4472  Res = S.CreateBuiltinUnaryOp(CaptureExpr->getExprLoc(), UO_Deref, Ref);
4473  if (!Res.isUsable())
4474  return ExprError();
4475  }
4476  return S.DefaultLvalueConversion(Res.get());
4477 }
4478 
4479 namespace {
4480 // OpenMP directives parsed in this section are represented as a
4481 // CapturedStatement with an associated statement. If a syntax error
4482 // is detected during the parsing of the associated statement, the
4483 // compiler must abort processing and close the CapturedStatement.
4484 //
4485 // Combined directives such as 'target parallel' have more than one
4486 // nested CapturedStatements. This RAII ensures that we unwind out
4487 // of all the nested CapturedStatements when an error is found.
4488 class CaptureRegionUnwinderRAII {
4489 private:
4490  Sema &S;
4491  bool &ErrorFound;
4492  OpenMPDirectiveKind DKind = OMPD_unknown;
4493 
4494 public:
4495  CaptureRegionUnwinderRAII(Sema &S, bool &ErrorFound,
4496  OpenMPDirectiveKind DKind)
4497  : S(S), ErrorFound(ErrorFound), DKind(DKind) {}
4498  ~CaptureRegionUnwinderRAII() {
4499  if (ErrorFound) {
4500  int ThisCaptureLevel = S.getOpenMPCaptureLevels(DKind);
4501  while (--ThisCaptureLevel >= 0)
4503  }
4504  }
4505 };
4506 } // namespace
4507 
4509  // Capture variables captured by reference in lambdas for target-based
4510  // directives.
4511  if (!CurContext->isDependentContext() &&
4512  (isOpenMPTargetExecutionDirective(DSAStack->getCurrentDirective()) ||
4514  DSAStack->getCurrentDirective()))) {
4515  QualType Type = V->getType();
4516  if (const auto *RD = Type.getCanonicalType()
4517  .getNonReferenceType()
4518  ->getAsCXXRecordDecl()) {
4519  bool SavedForceCaptureByReferenceInTargetExecutable =
4520  DSAStack->isForceCaptureByReferenceInTargetExecutable();
4521  DSAStack->setForceCaptureByReferenceInTargetExecutable(
4522  /*V=*/true);
4523  if (RD->isLambda()) {
4524  llvm::DenseMap<const VarDecl *, FieldDecl *> Captures;
4525  FieldDecl *ThisCapture;
4526  RD->getCaptureFields(Captures, ThisCapture);
4527  for (const LambdaCapture &LC : RD->captures()) {
4528  if (LC.getCaptureKind() == LCK_ByRef) {
4529  VarDecl *VD = LC.getCapturedVar();
4530  DeclContext *VDC = VD->getDeclContext();
4531  if (!VDC->Encloses(CurContext))
4532  continue;
4533  MarkVariableReferenced(LC.getLocation(), VD);
4534  } else if (LC.getCaptureKind() == LCK_This) {
4535  QualType ThisTy = getCurrentThisType();
4536  if (!ThisTy.isNull() &&
4537  Context.typesAreCompatible(ThisTy, ThisCapture->getType()))
4538  CheckCXXThisCapture(LC.getLocation());
4539  }
4540  }
4541  }
4542  DSAStack->setForceCaptureByReferenceInTargetExecutable(
4543  SavedForceCaptureByReferenceInTargetExecutable);
4544  }
4545  }
4546 }
4547 
4549  const ArrayRef<OMPClause *> Clauses) {
4550  const OMPOrderedClause *Ordered = nullptr;
4551  const OMPOrderClause *Order = nullptr;
4552 
4553  for (const OMPClause *Clause : Clauses) {
4554  if (Clause->getClauseKind() == OMPC_ordered)
4555  Ordered = cast<OMPOrderedClause>(Clause);
4556  else if (Clause->getClauseKind() == OMPC_order) {
4557  Order = cast<OMPOrderClause>(Clause);
4558  if (Order->getKind() != OMPC_ORDER_concurrent)
4559  Order = nullptr;
4560  }
4561  if (Ordered && Order)
4562  break;
4563  }
4564 
4565  if (Ordered && Order) {
4566  S.Diag(Order->getKindKwLoc(),
4567  diag::err_omp_simple_clause_incompatible_with_ordered)
4568  << getOpenMPClauseName(OMPC_order)
4569  << getOpenMPSimpleClauseTypeName(OMPC_order, OMPC_ORDER_concurrent)
4570  << SourceRange(Order->getBeginLoc(), Order->getEndLoc());
4571  S.Diag(Ordered->getBeginLoc(), diag::note_omp_ordered_param)
4572  << 0 << SourceRange(Ordered->getBeginLoc(), Ordered->getEndLoc());
4573  return true;
4574  }
4575  return false;
4576 }
4577 
4579  ArrayRef<OMPClause *> Clauses) {
4580  handleDeclareVariantConstructTrait(DSAStack, DSAStack->getCurrentDirective(),
4581  /* ScopeEntry */ false);
4582  if (DSAStack->getCurrentDirective() == OMPD_atomic ||
4583  DSAStack->getCurrentDirective() == OMPD_critical ||
4584  DSAStack->getCurrentDirective() == OMPD_section ||
4585  DSAStack->getCurrentDirective() == OMPD_master ||
4586  DSAStack->getCurrentDirective() == OMPD_masked)
4587  return S;
4588 
4589  bool ErrorFound = false;
4590  CaptureRegionUnwinderRAII CaptureRegionUnwinder(
4591  *this, ErrorFound, DSAStack->getCurrentDirective());
4592  if (!S.isUsable()) {
4593  ErrorFound = true;
4594  return StmtError();
4595  }
4596 
4597  SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
4598  getOpenMPCaptureRegions(CaptureRegions, DSAStack->getCurrentDirective());
4599  OMPOrderedClause *OC = nullptr;
4600  OMPScheduleClause *SC = nullptr;
4603  // This is required for proper codegen.
4604  for (OMPClause *Clause : Clauses) {
4605  if (!LangOpts.OpenMPSimd &&
4606  isOpenMPTaskingDirective(DSAStack->getCurrentDirective()) &&
4607  Clause->getClauseKind() == OMPC_in_reduction) {
4608  // Capture taskgroup task_reduction descriptors inside the tasking regions
4609  // with the corresponding in_reduction items.
4610  auto *IRC = cast<OMPInReductionClause>(Clause);
4611  for (Expr *E : IRC->taskgroup_descriptors())
4612  if (E)
4614  }
4615  if (isOpenMPPrivate(Clause->getClauseKind()) ||
4616  Clause->getClauseKind() == OMPC_copyprivate ||
4617  (getLangOpts().OpenMPUseTLS &&
4618  getASTContext().getTargetInfo().isTLSSupported() &&
4619  Clause->getClauseKind() == OMPC_copyin)) {
4620  DSAStack->setForceVarCapturing(Clause->getClauseKind() == OMPC_copyin);
4621  // Mark all variables in private list clauses as used in inner region.
4622  for (Stmt *VarRef : Clause->children()) {
4623  if (auto *E = cast_or_null<Expr>(VarRef)) {
4625  }
4626  }
4627  DSAStack->setForceVarCapturing(/*V=*/false);
4629  DSAStack->getCurrentDirective())) {
4630  assert(CaptureRegions.empty() &&
4631  "No captured regions in loop transformation directives.");
4632  } else if (CaptureRegions.size() > 1 ||
4633  CaptureRegions.back() != OMPD_unknown) {
4634  if (auto *C = OMPClauseWithPreInit::get(Clause))
4635  PICs.push_back(C);
4636  if (auto *C = OMPClauseWithPostUpdate::get(Clause)) {
4637  if (Expr *E = C->getPostUpdateExpr())
4639  }
4640  }
4641  if (Clause->getClauseKind() == OMPC_schedule)
4642  SC = cast<OMPScheduleClause>(Clause);
4643  else if (Clause->getClauseKind() == OMPC_ordered)
4644  OC = cast<OMPOrderedClause>(Clause);
4645  else if (Clause->getClauseKind() == OMPC_linear)
4646  LCs.push_back(cast<OMPLinearClause>(Clause));
4647  }
4648  // Capture allocator expressions if used.
4649  for (Expr *E : DSAStack->getInnerAllocators())
4651  // OpenMP, 2.7.1 Loop Construct, Restrictions
4652  // The nonmonotonic modifier cannot be specified if an ordered clause is
4653  // specified.
4654  if (SC &&
4655  (SC->getFirstScheduleModifier() == OMPC_SCHEDULE_MODIFIER_nonmonotonic ||
4656  SC->getSecondScheduleModifier() ==
4657  OMPC_SCHEDULE_MODIFIER_nonmonotonic) &&
4658  OC) {
4659  Diag(SC->getFirstScheduleModifier() == OMPC_SCHEDULE_MODIFIER_nonmonotonic
4662  diag::err_omp_simple_clause_incompatible_with_ordered)
4663  << getOpenMPClauseName(OMPC_schedule)
4664  << getOpenMPSimpleClauseTypeName(OMPC_schedule,
4665  OMPC_SCHEDULE_MODIFIER_nonmonotonic)
4666  << SourceRange(OC->getBeginLoc(), OC->getEndLoc());
4667  ErrorFound = true;
4668  }
4669  // OpenMP 5.0, 2.9.2 Worksharing-Loop Construct, Restrictions.
4670  // If an order(concurrent) clause is present, an ordered clause may not appear
4671  // on the same directive.
4672  if (checkOrderedOrderSpecified(*this, Clauses))
4673  ErrorFound = true;
4674  if (!LCs.empty() && OC && OC->getNumForLoops()) {
4675  for (const OMPLinearClause *C : LCs) {
4676  Diag(C->getBeginLoc(), diag::err_omp_linear_ordered)
4677  << SourceRange(OC->getBeginLoc(), OC->getEndLoc());
4678  }
4679  ErrorFound = true;
4680  }
4681  if (isOpenMPWorksharingDirective(DSAStack->getCurrentDirective()) &&
4682  isOpenMPSimdDirective(DSAStack->getCurrentDirective()) && OC &&
4683  OC->getNumForLoops()) {
4684  Diag(OC->getBeginLoc(), diag::err_omp_ordered_simd)
4685  << getOpenMPDirectiveName(DSAStack->getCurrentDirective());
4686  ErrorFound = true;
4687  }
4688  if (ErrorFound) {
4689  return StmtError();
4690  }
4691  StmtResult SR = S;
4692  unsigned CompletedRegions = 0;
4693  for (OpenMPDirectiveKind ThisCaptureRegion : llvm::reverse(CaptureRegions)) {
4694  // Mark all variables in private list clauses as used in inner region.
4695  // Required for proper codegen of combined directives.
4696  // TODO: add processing for other clauses.
4697  if (ThisCaptureRegion != OMPD_unknown) {
4698  for (const clang::OMPClauseWithPreInit *C : PICs) {
4699  OpenMPDirectiveKind CaptureRegion = C->getCaptureRegion();
4700  // Find the particular capture region for the clause if the
4701  // directive is a combined one with multiple capture regions.
4702  // If the directive is not a combined one, the capture region
4703  // associated with the clause is OMPD_unknown and is generated
4704  // only once.
4705  if (CaptureRegion == ThisCaptureRegion ||
4706  CaptureRegion == OMPD_unknown) {
4707  if (auto *DS = cast_or_null<DeclStmt>(C->getPreInitStmt())) {
4708  for (Decl *D : DS->decls())
4709  MarkVariableReferenced(D->getLocation(), cast<VarDecl>(D));
4710  }
4711  }
4712  }
4713  }
4714  if (ThisCaptureRegion == OMPD_target) {
4715  // Capture allocator traits in the target region. They are used implicitly
4716  // and, thus, are not captured by default.
4717  for (OMPClause *C : Clauses) {
4718  if (const auto *UAC = dyn_cast<OMPUsesAllocatorsClause>(C)) {
4719  for (unsigned I = 0, End = UAC->getNumberOfAllocators(); I < End;
4720  ++I) {
4721  OMPUsesAllocatorsClause::Data D = UAC->getAllocatorData(I);
4722  if (Expr *E = D.AllocatorTraits)
4724  }
4725  continue;
4726  }
4727  }
4728  }
4729  if (ThisCaptureRegion == OMPD_parallel) {
4730  // Capture temp arrays for inscan reductions and locals in aligned
4731  // clauses.
4732  for (OMPClause *C : Clauses) {
4733  if (auto *RC = dyn_cast<OMPReductionClause>(C)) {
4734  if (RC->getModifier() != OMPC_REDUCTION_inscan)
4735  continue;
4736  for (Expr *E : RC->copy_array_temps())
4738  }
4739  if (auto *AC = dyn_cast<OMPAlignedClause>(C)) {
4740  for (Expr *E : AC->varlists())
4742  }
4743  }
4744  }
4745  if (++CompletedRegions == CaptureRegions.size())
4746  DSAStack->setBodyComplete();
4747  SR = ActOnCapturedRegionEnd(SR.get());
4748  }
4749  return SR;
4750 }
4751 
4752 static bool checkCancelRegion(Sema &SemaRef, OpenMPDirectiveKind CurrentRegion,
4753  OpenMPDirectiveKind CancelRegion,
4754  SourceLocation StartLoc) {
4755  // CancelRegion is only needed for cancel and cancellation_point.
4756  if (CurrentRegion != OMPD_cancel && CurrentRegion != OMPD_cancellation_point)
4757  return false;
4758 
4759  if (CancelRegion == OMPD_parallel || CancelRegion == OMPD_for ||
4760  CancelRegion == OMPD_sections || CancelRegion == OMPD_taskgroup)
4761  return false;
4762 
4763  SemaRef.Diag(StartLoc, diag::err_omp_wrong_cancel_region)
4764  << getOpenMPDirectiveName(CancelRegion);
4765  return true;
4766 }
4767 
4768 static bool checkNestingOfRegions(Sema &SemaRef, const DSAStackTy *Stack,
4769  OpenMPDirectiveKind CurrentRegion,
4770  const DeclarationNameInfo &CurrentName,
4771  OpenMPDirectiveKind CancelRegion,
4772  OpenMPBindClauseKind BindKind,
4773  SourceLocation StartLoc) {
4774  if (Stack->getCurScope()) {
4775  OpenMPDirectiveKind ParentRegion = Stack->getParentDirective();
4776  OpenMPDirectiveKind OffendingRegion = ParentRegion;
4777  bool NestingProhibited = false;
4778  bool CloseNesting = true;
4779  bool OrphanSeen = false;
4780  enum {
4781  NoRecommend,
4782  ShouldBeInParallelRegion,
4783  ShouldBeInOrderedRegion,
4784  ShouldBeInTargetRegion,
4785  ShouldBeInTeamsRegion,
4786  ShouldBeInLoopSimdRegion,
4787  } Recommend = NoRecommend;
4788  if (isOpenMPSimdDirective(ParentRegion) &&
4789  ((SemaRef.LangOpts.OpenMP <= 45 && CurrentRegion != OMPD_ordered) ||
4790  (SemaRef.LangOpts.OpenMP >= 50 && CurrentRegion != OMPD_ordered &&
4791  CurrentRegion != OMPD_simd && CurrentRegion != OMPD_atomic &&
4792  CurrentRegion != OMPD_scan))) {
4793  // OpenMP [2.16, Nesting of Regions]
4794  // OpenMP constructs may not be nested inside a simd region.
4795  // OpenMP [2.8.1,simd Construct, Restrictions]
4796  // An ordered construct with the simd clause is the only OpenMP
4797  // construct that can appear in the simd region.
4798  // Allowing a SIMD construct nested in another SIMD construct is an
4799  // extension. The OpenMP 4.5 spec does not allow it. Issue a warning
4800  // message.
4801  // OpenMP 5.0 [2.9.3.1, simd Construct, Restrictions]
4802  // The only OpenMP constructs that can be encountered during execution of
4803  // a simd region are the atomic construct, the loop construct, the simd
4804  // construct and the ordered construct with the simd clause.
4805  SemaRef.Diag(StartLoc, (CurrentRegion != OMPD_simd)
4806  ? diag::err_omp_prohibited_region_simd
4807  : diag::warn_omp_nesting_simd)
4808  << (SemaRef.LangOpts.OpenMP >= 50 ? 1 : 0);
4809  return CurrentRegion != OMPD_simd;
4810  }
4811  if (ParentRegion == OMPD_atomic) {
4812  // OpenMP [2.16, Nesting of Regions]
4813  // OpenMP constructs may not be nested inside an atomic region.
4814  SemaRef.Diag(StartLoc, diag::err_omp_prohibited_region_atomic);
4815  return true;
4816  }
4817  if (CurrentRegion == OMPD_section) {
4818  // OpenMP [2.7.2, sections Construct, Restrictions]
4819  // Orphaned section directives are prohibited. That is, the section
4820  // directives must appear within the sections construct and must not be
4821  // encountered elsewhere in the sections region.
4822  if (ParentRegion != OMPD_sections &&
4823  ParentRegion != OMPD_parallel_sections) {
4824  SemaRef.Diag(StartLoc, diag::err_omp_orphaned_section_directive)
4825  << (ParentRegion != OMPD_unknown)
4826  << getOpenMPDirectiveName(ParentRegion);
4827  return true;
4828  }
4829  return false;
4830  }
4831  // Allow some constructs (except teams and cancellation constructs) to be
4832  // orphaned (they could be used in functions, called from OpenMP regions
4833  // with the required preconditions).
4834  if (ParentRegion == OMPD_unknown &&
4835  !isOpenMPNestingTeamsDirective(CurrentRegion) &&
4836  CurrentRegion != OMPD_cancellation_point &&
4837  CurrentRegion != OMPD_cancel && CurrentRegion != OMPD_scan)
4838  return false;
4839  if (CurrentRegion == OMPD_cancellation_point ||
4840  CurrentRegion == OMPD_cancel) {
4841  // OpenMP [2.16, Nesting of Regions]
4842  // A cancellation point construct for which construct-type-clause is
4843  // taskgroup must be nested inside a task construct. A cancellation
4844  // point construct for which construct-type-clause is not taskgroup must
4845  // be closely nested inside an OpenMP construct that matches the type
4846  // specified in construct-type-clause.
4847  // A cancel construct for which construct-type-clause is taskgroup must be
4848  // nested inside a task construct. A cancel construct for which
4849  // construct-type-clause is not taskgroup must be closely nested inside an
4850  // OpenMP construct that matches the type specified in
4851  // construct-type-clause.
4852  NestingProhibited =
4853  !((CancelRegion == OMPD_parallel &&
4854  (ParentRegion == OMPD_parallel ||
4855  ParentRegion == OMPD_target_parallel)) ||
4856  (CancelRegion == OMPD_for &&
4857  (ParentRegion == OMPD_for || ParentRegion == OMPD_parallel_for ||
4858  ParentRegion == OMPD_target_parallel_for ||
4859  ParentRegion == OMPD_distribute_parallel_for ||
4860  ParentRegion == OMPD_teams_distribute_parallel_for ||
4861  ParentRegion == OMPD_target_teams_distribute_parallel_for)) ||
4862  (CancelRegion == OMPD_taskgroup &&
4863  (ParentRegion == OMPD_task ||
4864  (SemaRef.getLangOpts().OpenMP >= 50 &&
4865  (ParentRegion == OMPD_taskloop ||
4866  ParentRegion == OMPD_master_taskloop ||
4867  ParentRegion == OMPD_parallel_master_taskloop)))) ||
4868  (CancelRegion == OMPD_sections &&
4869  (ParentRegion == OMPD_section || ParentRegion == OMPD_sections ||
4870  ParentRegion == OMPD_parallel_sections)));
4871  OrphanSeen = ParentRegion == OMPD_unknown;
4872  } else if (CurrentRegion == OMPD_master || CurrentRegion == OMPD_masked) {
4873  // OpenMP 5.1 [2.22, Nesting of Regions]
4874  // A masked region may not be closely nested inside a worksharing, loop,
4875  // atomic, task, or taskloop region.
4876  NestingProhibited = isOpenMPWorksharingDirective(ParentRegion) ||
4877  isOpenMPGenericLoopDirective(ParentRegion) ||
4878  isOpenMPTaskingDirective(ParentRegion);
4879  } else if (CurrentRegion == OMPD_critical && CurrentName.getName()) {
4880  // OpenMP [2.16, Nesting of Regions]
4881  // A critical region may not be nested (closely or otherwise) inside a
4882  // critical region with the same name. Note that this restriction is not
4883  // sufficient to prevent deadlock.
4884  SourceLocation PreviousCriticalLoc;
4885  bool DeadLock = Stack->hasDirective(
4886  [CurrentName, &PreviousCriticalLoc](OpenMPDirectiveKind K,
4887  const DeclarationNameInfo &DNI,
4888  SourceLocation Loc) {
4889  if (K == OMPD_critical && DNI.getName() == CurrentName.getName()) {
4890  PreviousCriticalLoc = Loc;
4891  return true;
4892  }
4893  return false;
4894  },
4895  false /* skip top directive */);
4896  if (DeadLock) {
4897  SemaRef.Diag(StartLoc,
4898  diag::err_omp_prohibited_region_critical_same_name)
4899  << CurrentName.getName();
4900  if (PreviousCriticalLoc.isValid())
4901  SemaRef.Diag(PreviousCriticalLoc,
4902  diag::note_omp_previous_critical_region);
4903  return true;
4904  }
4905  } else if (CurrentRegion == OMPD_barrier) {
4906  // OpenMP 5.1 [2.22, Nesting of Regions]
4907  // A barrier region may not be closely nested inside a worksharing, loop,
4908  // task, taskloop, critical, ordered, atomic, or masked region.
4909  NestingProhibited =
4910  isOpenMPWorksharingDirective(ParentRegion) ||
4911  isOpenMPGenericLoopDirective(ParentRegion) ||
4912  isOpenMPTaskingDirective(ParentRegion) ||
4913  ParentRegion == OMPD_master || ParentRegion == OMPD_masked ||
4914  ParentRegion == OMPD_parallel_master ||
4915  ParentRegion == OMPD_critical || ParentRegion == OMPD_ordered;
4916  } else if (isOpenMPWorksharingDirective(CurrentRegion) &&
4917  !isOpenMPParallelDirective(CurrentRegion) &&
4918  !isOpenMPTeamsDirective(CurrentRegion)) {
4919  // OpenMP 5.1 [2.22, Nesting of Regions]
4920  // A loop region that binds to a parallel region or a worksharing region
4921  // may not be closely nested inside a worksharing, loop, task, taskloop,
4922  // critical, ordered, atomic, or masked region.
4923  NestingProhibited =
4924  isOpenMPWorksharingDirective(ParentRegion) ||
4925  isOpenMPGenericLoopDirective(ParentRegion) ||
4926  isOpenMPTaskingDirective(ParentRegion) ||
4927  ParentRegion == OMPD_master || ParentRegion == OMPD_masked ||
4928  ParentRegion == OMPD_parallel_master ||
4929  ParentRegion == OMPD_critical || ParentRegion == OMPD_ordered;
4930  Recommend = ShouldBeInParallelRegion;
4931  } else if (CurrentRegion == OMPD_ordered) {
4932  // OpenMP [2.16, Nesting of Regions]
4933  // An ordered region may not be closely nested inside a critical,
4934  // atomic, or explicit task region.
4935  // An ordered region must be closely nested inside a loop region (or
4936  // parallel loop region) with an ordered clause.
4937  // OpenMP [2.8.1,simd Construct, Restrictions]
4938  // An ordered construct with the simd clause is the only OpenMP construct
4939  // that can appear in the simd region.
4940  NestingProhibited = ParentRegion == OMPD_critical ||
4941  isOpenMPTaskingDirective(ParentRegion) ||
4942  !(isOpenMPSimdDirective(ParentRegion) ||
4943  Stack->isParentOrderedRegion());
4944  Recommend = ShouldBeInOrderedRegion;
4945  } else if (isOpenMPNestingTeamsDirective(CurrentRegion)) {
4946  // OpenMP [2.16, Nesting of Regions]
4947  // If specified, a teams construct must be contained within a target
4948  // construct.
4949  NestingProhibited =
4950  (SemaRef.LangOpts.OpenMP <= 45 && ParentRegion != OMPD_target) ||
4951  (SemaRef.LangOpts.OpenMP >= 50 && ParentRegion != OMPD_unknown &&
4952  ParentRegion != OMPD_target);
4953  OrphanSeen = ParentRegion == OMPD_unknown;
4954  Recommend = ShouldBeInTargetRegion;
4955  } else if (CurrentRegion == OMPD_scan) {
4956  // OpenMP [2.16, Nesting of Regions]
4957  // If specified, a teams construct must be contained within a target
4958  // construct.
4959  NestingProhibited =
4960  SemaRef.LangOpts.OpenMP < 50 ||
4961  (ParentRegion != OMPD_simd && ParentRegion != OMPD_for &&
4962  ParentRegion != OMPD_for_simd && ParentRegion != OMPD_parallel_for &&
4963  ParentRegion != OMPD_parallel_for_simd);
4964  OrphanSeen = ParentRegion == OMPD_unknown;
4965  Recommend = ShouldBeInLoopSimdRegion;
4966  }
4967  if (!NestingProhibited &&
4968  !isOpenMPTargetExecutionDirective(CurrentRegion) &&
4969  !isOpenMPTargetDataManagementDirective(CurrentRegion) &&
4970  (ParentRegion == OMPD_teams || ParentRegion == OMPD_target_teams)) {
4971  // OpenMP [5.1, 2.22, Nesting of Regions]
4972  // distribute, distribute simd, distribute parallel worksharing-loop,
4973  // distribute parallel worksharing-loop SIMD, loop, parallel regions,
4974  // including any parallel regions arising from combined constructs,
4975  // omp_get_num_teams() regions, and omp_get_team_num() regions are the
4976  // only OpenMP regions that may be strictly nested inside the teams
4977  // region.
4978  NestingProhibited = !isOpenMPParallelDirective(CurrentRegion) &&
4979  !isOpenMPDistributeDirective(CurrentRegion) &&
4980  CurrentRegion != OMPD_loop;
4981  Recommend = ShouldBeInParallelRegion;
4982  }
4983  if (!NestingProhibited && CurrentRegion == OMPD_loop) {
4984  // OpenMP [5.1, 2.11.7, loop Construct, Restrictions]
4985  // If the bind clause is present on the loop construct and binding is
4986  // teams then the corresponding loop region must be strictly nested inside
4987  // a teams region.
4988  NestingProhibited = BindKind == OMPC_BIND_teams &&
4989  ParentRegion != OMPD_teams &&
4990  ParentRegion != OMPD_target_teams;
4991  Recommend = ShouldBeInTeamsRegion;
4992  }
4993  if (!NestingProhibited &&
4994  isOpenMPNestingDistributeDirective(CurrentRegion)) {
4995  // OpenMP 4.5 [2.17 Nesting of Regions]
4996  // The region associated with the distribute construct must be strictly
4997  // nested inside a teams region
4998  NestingProhibited =
4999  (ParentRegion != OMPD_teams && ParentRegion != OMPD_target_teams);
5000  Recommend = ShouldBeInTeamsRegion;
5001  }
5002  if (!NestingProhibited &&
5003  (isOpenMPTargetExecutionDirective(CurrentRegion) ||
5004  isOpenMPTargetDataManagementDirective(CurrentRegion))) {
5005  // OpenMP 4.5 [2.17 Nesting of Regions]
5006  // If a target, target update, target data, target enter data, or
5007  // target exit data construct is encountered during execution of a
5008  // target region, the behavior is unspecified.
5009  NestingProhibited = Stack->hasDirective(
5010  [&OffendingRegion](OpenMPDirectiveKind K, const DeclarationNameInfo &,
5011  SourceLocation) {
5013  OffendingRegion = K;
5014  return true;
5015  }
5016  return false;
5017  },
5018  false /* don't skip top directive */);
5019  CloseNesting = false;
5020  }
5021  if (NestingProhibited) {
5022  if (OrphanSeen) {
5023  SemaRef.Diag(StartLoc, diag::err_omp_orphaned_device_directive)
5024  << getOpenMPDirectiveName(CurrentRegion) << Recommend;
5025  } else {
5026  SemaRef.Diag(StartLoc, diag::err_omp_prohibited_region)
5027  << CloseNesting << getOpenMPDirectiveName(OffendingRegion)
5028  << Recommend << getOpenMPDirectiveName(CurrentRegion);
5029  }
5030  return true;
5031  }
5032  }
5033  return false;
5034 }
5035 
5038  unsigned operator()(argument_type DK) { return unsigned(DK); }
5039 };
5041  ArrayRef<OMPClause *> Clauses,
5042  ArrayRef<OpenMPDirectiveKind> AllowedNameModifiers) {
5043  bool ErrorFound = false;
5044  unsigned NamedModifiersNumber = 0;
5045  llvm::IndexedMap<const OMPIfClause *, Kind2Unsigned> FoundNameModifiers;
5046  FoundNameModifiers.resize(llvm::omp::Directive_enumSize + 1);
5047  SmallVector<SourceLocation, 4> NameModifierLoc;
5048  for (const OMPClause *C : Clauses) {
5049  if (const auto *IC = dyn_cast_or_null<OMPIfClause>(C)) {
5050  // At most one if clause without a directive-name-modifier can appear on
5051  // the directive.
5052  OpenMPDirectiveKind CurNM = IC->getNameModifier();
5053  if (FoundNameModifiers[CurNM]) {
5054  S.Diag(C->getBeginLoc(), diag::err_omp_more_one_clause)
5055  << getOpenMPDirectiveName(Kind) << getOpenMPClauseName(OMPC_if)
5056  << (CurNM != OMPD_unknown) << getOpenMPDirectiveName(CurNM);
5057  ErrorFound = true;
5058  } else if (CurNM != OMPD_unknown) {
5059  NameModifierLoc.push_back(IC->getNameModifierLoc());
5060  ++NamedModifiersNumber;
5061  }
5062  FoundNameModifiers[CurNM] = IC;
5063  if (CurNM == OMPD_unknown)
5064  continue;
5065  // Check if the specified name modifier is allowed for the current
5066  // directive.
5067  // At most one if clause with the particular directive-name-modifier can
5068  // appear on the directive.
5069  if (!llvm::is_contained(AllowedNameModifiers, CurNM)) {
5070  S.Diag(IC->getNameModifierLoc(),
5071  diag::err_omp_wrong_if_directive_name_modifier)
5072  << getOpenMPDirectiveName(CurNM) << getOpenMPDirectiveName(Kind);
5073  ErrorFound = true;
5074  }
5075  }
5076  }
5077  // If any if clause on the directive includes a directive-name-modifier then
5078  // all if clauses on the directive must include a directive-name-modifier.
5079  if (FoundNameModifiers[OMPD_unknown] && NamedModifiersNumber > 0) {
5080  if (NamedModifiersNumber == AllowedNameModifiers.size()) {
5081  S.Diag(FoundNameModifiers[OMPD_unknown]->getBeginLoc(),
5082  diag::err_omp_no_more_if_clause);
5083  } else {
5084  std::string Values;
5085  std::string Sep(", ");
5086  unsigned AllowedCnt = 0;
5087  unsigned TotalAllowedNum =
5088  AllowedNameModifiers.size() - NamedModifiersNumber;
5089  for (unsigned Cnt = 0, End = AllowedNameModifiers.size(); Cnt < End;
5090  ++Cnt) {
5091  OpenMPDirectiveKind NM = AllowedNameModifiers[Cnt];
5092  if (!FoundNameModifiers[NM]) {
5093  Values += "'";
5094  Values += getOpenMPDirectiveName(NM);
5095  Values += "'";
5096  if (AllowedCnt + 2 == TotalAllowedNum)
5097  Values += " or ";
5098  else if (AllowedCnt + 1 != TotalAllowedNum)
5099  Values += Sep;
5100  ++AllowedCnt;
5101  }
5102  }
5103  S.Diag(FoundNameModifiers[OMPD_unknown]->getCondition()->getBeginLoc(),
5104  diag::err_omp_unnamed_if_clause)
5105  << (TotalAllowedNum > 1) << Values;
5106  }
5107  for (SourceLocation Loc : NameModifierLoc) {
5108  S.Diag(Loc, diag::note_omp_previous_named_if_clause);
5109  }
5110  ErrorFound = true;
5111  }
5112  return ErrorFound;
5113 }
5114 
5115 static std::pair<ValueDecl *, bool> getPrivateItem(Sema &S, Expr *&RefExpr,
5116  SourceLocation &ELoc,
5117  SourceRange &ERange,
5118  bool AllowArraySection) {
5119  if (RefExpr->isTypeDependent() || RefExpr->isValueDependent() ||
5121  return std::make_pair(nullptr, true);
5122 
5123  // OpenMP [3.1, C/C++]
5124  // A list item is a variable name.
5125  // OpenMP [2.9.3.3, Restrictions, p.1]
5126  // A variable that is part of another variable (as an array or
5127  // structure element) cannot appear in a private clause.
5128  RefExpr = RefExpr->IgnoreParens();
5129  enum {
5130  NoArrayExpr = -1,
5131  ArraySubscript = 0,
5132  OMPArraySection = 1
5133  } IsArrayExpr = NoArrayExpr;
5134  if (AllowArraySection) {
5135  if (auto *ASE = dyn_cast_or_null<ArraySubscriptExpr>(RefExpr)) {
5136  Expr *Base = ASE->getBase()->IgnoreParenImpCasts();
5137  while (auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
5138  Base = TempASE->getBase()->IgnoreParenImpCasts();
5139  RefExpr = Base;
5140  IsArrayExpr = ArraySubscript;
5141  } else if (auto *OASE = dyn_cast_or_null<OMPArraySectionExpr>(RefExpr)) {
5142  Expr *Base = OASE->getBase()->IgnoreParenImpCasts();
5143  while (auto *TempOASE = dyn_cast<OMPArraySectionExpr>(Base))
5144  Base = TempOASE->getBase()->IgnoreParenImpCasts();
5145  while (auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
5146  Base = TempASE->getBase()->IgnoreParenImpCasts();
5147  RefExpr = Base;
5148  IsArrayExpr = OMPArraySection;
5149  }
5150  }
5151  ELoc = RefExpr->getExprLoc();
5152  ERange = RefExpr->getSourceRange();
5153  RefExpr = RefExpr->IgnoreParenImpCasts();
5154  auto *DE = dyn_cast_or_null<DeclRefExpr>(RefExpr);
5155  auto *ME = dyn_cast_or_null<MemberExpr>(RefExpr);
5156  if ((!DE || !isa<VarDecl>(DE->getDecl())) &&
5157  (S.getCurrentThisType().isNull() || !ME ||
5158  !isa<CXXThisExpr>(ME->getBase()->IgnoreParenImpCasts()) ||
5159  !isa<FieldDecl>(ME->getMemberDecl()))) {
5160  if (IsArrayExpr != NoArrayExpr) {
5161  S.Diag(ELoc, diag::err_omp_expected_base_var_name)
5162  << IsArrayExpr << ERange;
5163  } else {
5164  S.Diag(ELoc,
5165  AllowArraySection
5166  ? diag::err_omp_expected_var_name_member_expr_or_array_item
5167  : diag::err_omp_expected_var_name_member_expr)
5168  << (S.getCurrentThisType().isNull() ? 0 : 1) << ERange;
5169  }
5170  return std::make_pair(nullptr, false);
5171  }
5172  return std::make_pair(
5173  getCanonicalDecl(DE ? DE->getDecl() : ME->getMemberDecl()), false);
5174 }
5175 
5176 namespace {
5177 /// Checks if the allocator is used in uses_allocators clause to be allowed in
5178 /// target regions.
5179 class AllocatorChecker final : public ConstStmtVisitor<AllocatorChecker, bool> {
5180  DSAStackTy *S = nullptr;
5181 
5182 public:
5183  bool VisitDeclRefExpr(const DeclRefExpr *E) {
5184  return S->isUsesAllocatorsDecl(E->getDecl())
5185  .getValueOr(
5186  DSAStackTy::UsesAllocatorsDeclKind::AllocatorTrait) ==
5187  DSAStackTy::UsesAllocatorsDeclKind::AllocatorTrait;
5188  }
5189  bool VisitStmt(const Stmt *S) {
5190  for (const Stmt *Child : S->children()) {
5191  if (Child && Visit(Child))
5192  return true;
5193  }
5194  return false;
5195  }
5196  explicit AllocatorChecker(DSAStackTy *S) : S(S) {}
5197 };
5198 } // namespace
5199 
5200 static void checkAllocateClauses(Sema &S, DSAStackTy *Stack,
5201  ArrayRef<OMPClause *> Clauses) {
5202  assert(!S.CurContext->isDependentContext() &&
5203  "Expected non-dependent context.");
5204  auto AllocateRange =
5205  llvm::make_filter_range(Clauses, OMPAllocateClause::classof);
5206  llvm::DenseMap<CanonicalDeclPtr<Decl>, CanonicalDeclPtr<VarDecl>> DeclToCopy;
5207  auto PrivateRange = llvm::make_filter_range(Clauses, [](const OMPClause *C) {
5208  return isOpenMPPrivate(C->getClauseKind());
5209  });
5210  for (OMPClause *Cl : PrivateRange) {
5212  if (Cl->getClauseKind() == OMPC_private) {
5213  auto *PC = cast<OMPPrivateClause>(Cl);
5214  I = PC->private_copies().begin();
5215  It = PC->varlist_begin();
5216  Et = PC->varlist_end();
5217  } else if (Cl->getClauseKind() == OMPC_firstprivate) {
5218  auto *PC = cast<OMPFirstprivateClause>(Cl);
5219  I = PC->private_copies().begin();
5220  It = PC->varlist_begin();
5221  Et = PC->varlist_end();
5222  } else if (Cl->getClauseKind() == OMPC_lastprivate) {
5223  auto *PC = cast<OMPLastprivateClause>(Cl);
5224  I = PC->private_copies().begin();
5225  It = PC->varlist_begin();
5226  Et = PC->varlist_end();
5227  } else if (Cl->getClauseKind() == OMPC_linear) {
5228  auto *PC = cast<OMPLinearClause>(Cl);
5229  I = PC->privates().begin();
5230  It = PC->varlist_begin();
5231  Et = PC->varlist_end();
5232  } else if (Cl->getClauseKind() == OMPC_reduction) {
5233  auto *PC = cast<OMPReductionClause>(Cl);
5234  I = PC->privates().begin();
5235  It = PC->varlist_begin();
5236  Et = PC->varlist_end();
5237  } else if (Cl->getClauseKind() == OMPC_task_reduction) {
5238  auto *PC = cast<OMPTaskReductionClause>(Cl);
5239  I = PC->privates().begin();
5240  It = PC->varlist_begin();
5241  Et = PC->varlist_end();
5242  } else if (Cl->getClauseKind() == OMPC_in_reduction) {
5243  auto *PC = cast<OMPInReductionClause>(Cl);
5244  I = PC->privates().begin();
5245  It = PC->varlist_begin();
5246  Et = PC->varlist_end();
5247  } else {
5248  llvm_unreachable("Expected private clause.");
5249  }
5250  for (Expr *E : llvm::make_range(It, Et)) {
5251  if (!*I) {
5252  ++I;
5253  continue;
5254  }
5255  SourceLocation ELoc;
5256  SourceRange ERange;
5257  Expr *SimpleRefExpr = E;
5258  auto Res = getPrivateItem(S, SimpleRefExpr, ELoc, ERange,
5259  /*AllowArraySection=*/true);
5260  DeclToCopy.try_emplace(Res.first,
5261  cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()));
5262  ++I;
5263  }
5264  }
5265  for (OMPClause *C : AllocateRange) {
5266  auto *AC = cast<OMPAllocateClause>(C);
5267  if (S.getLangOpts().OpenMP >= 50 &&
5268  !Stack->hasRequiresDeclWithClause<OMPDynamicAllocatorsClause>() &&
5269  isOpenMPTargetExecutionDirective(Stack->getCurrentDirective()) &&
5270  AC->getAllocator()) {
5271  Expr *Allocator = AC->getAllocator();
5272  // OpenMP, 2.12.5 target Construct
5273  // Memory allocators that do not appear in a uses_allocators clause cannot
5274  // appear as an allocator in an allocate clause or be used in the target
5275  // region unless a requires directive with the dynamic_allocators clause
5276  // is present in the same compilation unit.
5277  AllocatorChecker Checker(Stack);
5278  if (Checker.Visit(Allocator))
5279  S.Diag(Allocator->getExprLoc(),
5280  diag::err_omp_allocator_not_in_uses_allocators)
5281  << Allocator->getSourceRange();
5282  }
5283  OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind =
5284  getAllocatorKind(S, Stack, AC->getAllocator());
5285  // OpenMP, 2.11.4 allocate Clause, Restrictions.
5286  // For task, taskloop or target directives, allocation requests to memory
5287  // allocators with the trait access set to thread result in unspecified
5288  // behavior.
5289  if (AllocatorKind == OMPAllocateDeclAttr::OMPThreadMemAlloc &&
5290  (isOpenMPTaskingDirective(Stack->getCurrentDirective()) ||
5291  isOpenMPTargetExecutionDirective(Stack->getCurrentDirective()))) {
5292  S.Diag(AC->getAllocator()->getExprLoc(),
5293  diag::warn_omp_allocate_thread_on_task_target_directive)
5294  << getOpenMPDirectiveName(Stack->getCurrentDirective());
5295  }
5296  for (Expr *E : AC->varlists()) {
5297  SourceLocation ELoc;
5298  SourceRange ERange;
5299  Expr *SimpleRefExpr = E;
5300  auto Res = getPrivateItem(S, SimpleRefExpr, ELoc, ERange);
5301  ValueDecl *VD = Res.first;
5302  DSAStackTy::DSAVarData Data = Stack->getTopDSA(VD, /*FromParent=*/false);
5303  if (!isOpenMPPrivate(Data.CKind)) {
5304  S.Diag(E->getExprLoc(),
5305  diag::err_omp_expected_private_copy_for_allocate);
5306  continue;
5307  }
5308  VarDecl *PrivateVD = DeclToCopy[VD];
5309  if (checkPreviousOMPAllocateAttribute(S, Stack, E, PrivateVD,
5310  AllocatorKind, AC->getAllocator()))
5311  continue;
5312  // Placeholder until allocate clause supports align modifier.
5313  Expr *Alignment = nullptr;
5314  applyOMPAllocateAttribute(S, PrivateVD, AllocatorKind, AC->getAllocator(),
5315  Alignment, E->getSourceRange());
5316  }
5317  }
5318 }
5319 
5320 namespace {
5321 /// Rewrite statements and expressions for Sema \p Actions CurContext.
5322 ///
5323 /// Used to wrap already parsed statements/expressions into a new CapturedStmt
5324 /// context. DeclRefExpr used inside the new context are changed to refer to the
5325 /// captured variable instead.
5326 class CaptureVars : public TreeTransform<CaptureVars> {
5327  using BaseTransform = TreeTransform<CaptureVars>;
5328 
5329 public:
5330  CaptureVars(Sema &Actions) : BaseTransform(Actions) {}
5331 
5332  bool AlwaysRebuild() { return true; }
5333 };
5334 } // namespace
5335 
5336 static VarDecl *precomputeExpr(Sema &Actions,
5337  SmallVectorImpl<Stmt *> &BodyStmts, Expr *E,
5338  StringRef Name) {
5339  Expr *NewE = AssertSuccess(CaptureVars(Actions).TransformExpr(E));
5340  VarDecl *NewVar = buildVarDecl(Actions, {}, NewE->getType(), Name, nullptr,
5341  dyn_cast<DeclRefExpr>(E->IgnoreImplicit()));
5342  auto *NewDeclStmt = cast<DeclStmt>(AssertSuccess(
5343  Actions.ActOnDeclStmt(Actions.ConvertDeclToDeclGroup(NewVar), {}, {})));
5344  Actions.AddInitializerToDecl(NewDeclStmt->getSingleDecl(), NewE, false);
5345  BodyStmts.push_back(NewDeclStmt);
5346  return NewVar;
5347 }
5348 
5349 /// Create a closure that computes the number of iterations of a loop.
5350 ///
5351 /// \param Actions The Sema object.
5352 /// \param LogicalTy Type for the logical iteration number.
5353 /// \param Rel Comparison operator of the loop condition.
5354 /// \param StartExpr Value of the loop counter at the first iteration.
5355 /// \param StopExpr Expression the loop counter is compared against in the loop
5356 /// condition. \param StepExpr Amount of increment after each iteration.
5357 ///
5358 /// \return Closure (CapturedStmt) of the distance calculation.
5359 static CapturedStmt *buildDistanceFunc(Sema &Actions, QualType LogicalTy,
5361  Expr *StartExpr, Expr *StopExpr,
5362  Expr *StepExpr) {
5363  ASTContext &Ctx = Actions.getASTContext();
5364  TypeSourceInfo *LogicalTSI = Ctx.getTrivialTypeSourceInfo(LogicalTy);
5365 
5366  // Captured regions currently don't support return values, we use an
5367  // out-parameter instead. All inputs are implicit captures.
5368  // TODO: Instead of capturing each DeclRefExpr occurring in
5369  // StartExpr/StopExpr/Step, these could also be passed as a value capture.
5370  QualType ResultTy = Ctx.getLValueReferenceType(LogicalTy);
5371  Sema::CapturedParamNameType Params[] = {{"Distance", ResultTy},
5372  {StringRef(), QualType()}};
5373  Actions.ActOnCapturedRegionStart({}, nullptr, CR_Default, Params);
5374 
5375  Stmt *Body;
5376  {
5377  Sema::CompoundScopeRAII CompoundScope(Actions);
5378  CapturedDecl *CS = cast<CapturedDecl>(Actions.CurContext);
5379 
5380  // Get the LValue expression for the result.
5381  ImplicitParamDecl *DistParam = CS->getParam(0);
5382  DeclRefExpr *DistRef = Actions.BuildDeclRefExpr(
5383  DistParam, LogicalTy, VK_LValue, {}, nullptr, nullptr, {}, nullptr);
5384 
5385  SmallVector<Stmt *, 4> BodyStmts;
5386 
5387  // Capture all referenced variable references.
5388  // TODO: Instead of computing NewStart/NewStop/NewStep inside the
5389  // CapturedStmt, we could compute them before and capture the result, to be
5390  // used jointly with the LoopVar function.
5391  VarDecl *NewStart = precomputeExpr(Actions, BodyStmts, StartExpr, ".start");
5392  VarDecl *NewStop = precomputeExpr(Actions, BodyStmts, StopExpr, ".stop");
5393  VarDecl *NewStep = precomputeExpr(Actions, BodyStmts, StepExpr, ".step");
5394  auto BuildVarRef = [&](VarDecl *VD) {
5395  return buildDeclRefExpr(Actions, VD, VD->getType(), {});
5396  };
5397 
5399  Ctx, llvm::APInt(Ctx.getIntWidth(LogicalTy), 0), LogicalTy, {});
5401  Ctx, llvm::APInt(Ctx.getIntWidth(LogicalTy), 1), LogicalTy, {});
5402  Expr *Dist;
5403  if (Rel == BO_NE) {
5404  // When using a != comparison, the increment can be +1 or -1. This can be
5405  // dynamic at runtime, so we need to check for the direction.
5406  Expr *IsNegStep = AssertSuccess(
5407  Actions.BuildBinOp(nullptr, {}, BO_LT, BuildVarRef(NewStep), Zero));
5408 
5409  // Positive increment.
5410  Expr *ForwardRange = AssertSuccess(Actions.BuildBinOp(
5411  nullptr, {}, BO_Sub, BuildVarRef(NewStop), BuildVarRef(NewStart)));
5412  ForwardRange = AssertSuccess(
5413  Actions.BuildCStyleCastExpr({}, LogicalTSI, {}, ForwardRange));
5414  Expr *ForwardDist = AssertSuccess(Actions.BuildBinOp(
5415  nullptr, {}, BO_Div, ForwardRange, BuildVarRef(NewStep)));
5416 
5417  // Negative increment.
5418  Expr *BackwardRange = AssertSuccess(Actions.BuildBinOp(
5419  nullptr, {}, BO_Sub, BuildVarRef(NewStart), BuildVarRef(NewStop)));
5420  BackwardRange = AssertSuccess(
5421  Actions.BuildCStyleCastExpr({}, LogicalTSI, {}, BackwardRange));
5422  Expr *NegIncAmount = AssertSuccess(
5423  Actions.BuildUnaryOp(nullptr, {}, UO_Minus, BuildVarRef(NewStep)));
5424  Expr *BackwardDist = AssertSuccess(
5425  Actions.BuildBinOp(nullptr, {}, BO_Div, BackwardRange, NegIncAmount));
5426 
5427  // Use the appropriate case.
5428  Dist = AssertSuccess(Actions.ActOnConditionalOp(
5429  {}, {}, IsNegStep, BackwardDist, ForwardDist));
5430  } else {
5431  assert((Rel == BO_LT || Rel == BO_LE || Rel == BO_GE || Rel == BO_GT) &&
5432  "Expected one of these relational operators");
5433 
5434  // We can derive the direction from any other comparison operator. It is
5435  // non well-formed OpenMP if Step increments/decrements in the other
5436  // directions. Whether at least the first iteration passes the loop
5437  // condition.
5438  Expr *HasAnyIteration = AssertSuccess(Actions.BuildBinOp(
5439  nullptr, {}, Rel, BuildVarRef(NewStart), BuildVarRef(NewStop)));
5440 
5441  // Compute the range between first and last counter value.
5442  Expr *Range;
5443  if (Rel == BO_GE || Rel == BO_GT)
5444  Range = AssertSuccess(Actions.BuildBinOp(
5445  nullptr, {}, BO_Sub, BuildVarRef(NewStart), BuildVarRef(NewStop)));
5446  else
5447  Range = AssertSuccess(Actions.BuildBinOp(
5448  nullptr, {}, BO_Sub, BuildVarRef(NewStop), BuildVarRef(NewStart)));
5449 
5450  // Ensure unsigned range space.
5451  Range =
5452  AssertSuccess(Actions.BuildCStyleCastExpr({}, LogicalTSI, {}, Range));
5453 
5454  if (Rel == BO_LE || Rel == BO_GE) {
5455  // Add one to the range if the relational operator is inclusive.
5456  Range =
5457  AssertSuccess(Actions.BuildBinOp(nullptr, {}, BO_Add, Range, One));
5458  }
5459 
5460  // Divide by the absolute step amount. If the range is not a multiple of
5461  // the step size, rounding-up the effective upper bound ensures that the
5462  // last iteration is included.
5463  // Note that the rounding-up may cause an overflow in a temporry that
5464  // could be avoided, but would have occurred in a C-style for-loop as well.
5465  Expr *Divisor = BuildVarRef(NewStep);
5466  if (Rel == BO_GE || Rel == BO_GT)
5467  Divisor =
5468  AssertSuccess(Actions.BuildUnaryOp(nullptr, {}, UO_Minus, Divisor));
5469  Expr *DivisorMinusOne =
5470  AssertSuccess(Actions.BuildBinOp(nullptr, {}, BO_Sub, Divisor, One));
5471  Expr *RangeRoundUp = AssertSuccess(
5472  Actions.BuildBinOp(nullptr, {}, BO_Add, Range, DivisorMinusOne));
5473  Dist = AssertSuccess(
5474  Actions.BuildBinOp(nullptr, {}, BO_Div, RangeRoundUp, Divisor));
5475 
5476  // If there is not at least one iteration, the range contains garbage. Fix
5477  // to zero in this case.
5478  Dist = AssertSuccess(
5479  Actions.ActOnConditionalOp({}, {}, HasAnyIteration, Dist, Zero));
5480  }
5481 
5482  // Assign the result to the out-parameter.
5483  Stmt *ResultAssign = AssertSuccess(Actions.BuildBinOp(
5484  Actions.getCurScope(), {}, BO_Assign, DistRef, Dist));
5485  BodyStmts.push_back(ResultAssign);
5486 
5487  Body = AssertSuccess(Actions.ActOnCompoundStmt({}, {}, BodyStmts, false));
5488  }
5489 
5490  return cast<CapturedStmt>(
5491  AssertSuccess(Actions.ActOnCapturedRegionEnd(Body)));
5492 }
5493 
5494 /// Create a closure that computes the loop variable from the logical iteration
5495 /// number.
5496 ///
5497 /// \param Actions The Sema object.
5498 /// \param LoopVarTy Type for the loop variable used for result value.
5499 /// \param LogicalTy Type for the logical iteration number.
5500 /// \param StartExpr Value of the loop counter at the first iteration.
5501 /// \param Step Amount of increment after each iteration.
5502 /// \param Deref Whether the loop variable is a dereference of the loop
5503 /// counter variable.
5504 ///
5505 /// \return Closure (CapturedStmt) of the loop value calculation.
5506 static CapturedStmt *buildLoopVarFunc(Sema &Actions, QualType LoopVarTy,
5507  QualType LogicalTy,
5508  DeclRefExpr *StartExpr, Expr *Step,
5509  bool Deref) {
5510  ASTContext &Ctx = Actions.getASTContext();
5511 
5512  // Pass the result as an out-parameter. Passing as return value would require
5513  // the OpenMPIRBuilder to know additional C/C++ semantics, such as how to
5514  // invoke a copy constructor.
5515  QualType TargetParamTy = Ctx.getLValueReferenceType(LoopVarTy);
5516  Sema::CapturedParamNameType Params[] = {{"LoopVar", TargetParamTy},
5517  {"Logical", LogicalTy},
5518  {StringRef(), QualType()}};
5519  Actions.ActOnCapturedRegionStart({}, nullptr, CR_Default, Params);
5520 
5521  // Capture the initial iterator which represents the LoopVar value at the
5522  // zero's logical iteration. Since the original ForStmt/CXXForRangeStmt update
5523  // it in every iteration, capture it by value before it is modified.
5524  VarDecl *StartVar = cast<VarDecl>(StartExpr->getDecl());
5525  bool Invalid = Actions.tryCaptureVariable(StartVar, {},
5527  (void)Invalid;
5528  assert(!Invalid && "Expecting capture-by-value to work.");
5529 
5530  Expr *Body;
5531  {
5532  Sema::CompoundScopeRAII CompoundScope(Actions);
5533  auto *CS = cast<CapturedDecl>(Actions.CurContext);
5534 
5535  ImplicitParamDecl *TargetParam = CS->getParam(0);
5536  DeclRefExpr *TargetRef = Actions.BuildDeclRefExpr(
5537  TargetParam, LoopVarTy, VK_LValue, {}, nullptr, nullptr, {}, nullptr);
5538  ImplicitParamDecl *IndvarParam = CS->getParam(1);
5539  DeclRefExpr *LogicalRef = Actions.BuildDeclRefExpr(
5540  IndvarParam, LogicalTy, VK_LValue, {}, nullptr, nullptr, {}, nullptr);
5541 
5542  // Capture the Start expression.
5543  CaptureVars Recap(Actions);
5544  Expr *NewStart = AssertSuccess(Recap.TransformExpr(StartExpr));
5545  Expr *NewStep = AssertSuccess(Recap.TransformExpr(Step));
5546 
5547  Expr *Skip = AssertSuccess(
5548  Actions.BuildBinOp(nullptr, {}, BO_Mul, NewStep, LogicalRef));
5549  // TODO: Explicitly cast to the iterator's difference_type instead of
5550  // relying on implicit conversion.
5551  Expr *Advanced =
5552  AssertSuccess(Actions.BuildBinOp(nullptr, {}, BO_Add, NewStart, Skip));
5553 
5554  if (Deref) {
5555  // For range-based for-loops convert the loop counter value to a concrete
5556  // loop variable value by dereferencing the iterator.
5557  Advanced =
5558  AssertSuccess(Actions.BuildUnaryOp(nullptr, {}, UO_Deref, Advanced));
5559  }
5560 
5561  // Assign the result to the output parameter.
5562  Body = AssertSuccess(Actions.BuildBinOp(Actions.getCurScope(), {},
5563  BO_Assign, TargetRef, Advanced));
5564  }
5565  return cast<CapturedStmt>(
5566  AssertSuccess(Actions.ActOnCapturedRegionEnd(Body)));
5567 }
5568 
5570  ASTContext &Ctx = getASTContext();
5571 
5572  // Extract the common elements of ForStmt and CXXForRangeStmt:
5573  // Loop variable, repeat condition, increment
5574  Expr *Cond, *Inc;
5575  VarDecl *LIVDecl, *LUVDecl;
5576  if (auto *For = dyn_cast<ForStmt>(AStmt)) {
5577  Stmt *Init = For->getInit();
5578  if (auto *LCVarDeclStmt = dyn_cast<DeclStmt>(Init)) {
5579  // For statement declares loop variable.
5580  LIVDecl = cast<VarDecl>(LCVarDeclStmt->getSingleDecl());
5581  } else if (auto *LCAssign = dyn_cast<BinaryOperator>(Init)) {
5582  // For statement reuses variable.
5583  assert(LCAssign->getOpcode() == BO_Assign &&
5584  "init part must be a loop variable assignment");
5585  auto *CounterRef = cast<DeclRefExpr>(LCAssign->getLHS());
5586  LIVDecl = cast<VarDecl>(CounterRef->getDecl());
5587  } else
5588  llvm_unreachable("Cannot determine loop variable");
5589  LUVDecl = LIVDecl;
5590 
5591  Cond = For->getCond();
5592  Inc = For->getInc();
5593  } else if (auto *RangeFor = dyn_cast<CXXForRangeStmt>(AStmt)) {
5594  DeclStmt *BeginStmt = RangeFor->getBeginStmt();
5595  LIVDecl = cast<VarDecl>(BeginStmt->getSingleDecl());
5596  LUVDecl = RangeFor->getLoopVariable();
5597 
5598  Cond = RangeFor->getCond();
5599  Inc = RangeFor->getInc();
5600  } else
5601  llvm_unreachable("unhandled kind of loop");
5602 
5603  QualType CounterTy = LIVDecl->getType();
5604  QualType LVTy = LUVDecl->getType();
5605 
5606  // Analyze the loop condition.
5607  Expr *LHS, *RHS;
5608  BinaryOperator::Opcode CondRel;
5609  Cond = Cond->IgnoreImplicit();
5610  if (auto *CondBinExpr = dyn_cast<BinaryOperator>(Cond)) {
5611  LHS = CondBinExpr->getLHS();
5612  RHS = CondBinExpr->getRHS();
5613  CondRel = CondBinExpr->getOpcode();
5614  } else if (auto *CondCXXOp = dyn_cast<CXXOperatorCallExpr>(Cond)) {
5615  assert(CondCXXOp->getNumArgs() == 2 && "Comparison should have 2 operands");
5616  LHS = CondCXXOp->getArg(0);
5617  RHS = CondCXXOp->getArg(1);
5618  switch (CondCXXOp->getOperator()) {
5619  case OO_ExclaimEqual:
5620  CondRel = BO_NE;
5621  break;
5622  case OO_Less:
5623  CondRel = BO_LT;
5624  break;
5625  case OO_LessEqual:
5626  CondRel = BO_LE;
5627  break;
5628  case OO_Greater:
5629  CondRel = BO_GT;
5630  break;
5631  case OO_GreaterEqual:
5632  CondRel = BO_GE;