clang  16.0.0git
SemaOpenMP.cpp
Go to the documentation of this file.
1 //===--- SemaOpenMP.cpp - Semantic Analysis for OpenMP constructs ---------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements semantic analysis for OpenMP directives and
10 /// clauses.
11 ///
12 //===----------------------------------------------------------------------===//
13 
14 #include "TreeTransform.h"
15 #include "clang/AST/ASTContext.h"
18 #include "clang/AST/Decl.h"
19 #include "clang/AST/DeclCXX.h"
20 #include "clang/AST/DeclOpenMP.h"
21 #include "clang/AST/OpenMPClause.h"
22 #include "clang/AST/StmtCXX.h"
23 #include "clang/AST/StmtOpenMP.h"
24 #include "clang/AST/StmtVisitor.h"
25 #include "clang/AST/TypeOrdering.h"
29 #include "clang/Basic/TargetInfo.h"
31 #include "clang/Sema/Lookup.h"
32 #include "clang/Sema/Scope.h"
33 #include "clang/Sema/ScopeInfo.h"
35 #include "llvm/ADT/IndexedMap.h"
36 #include "llvm/ADT/PointerEmbeddedInt.h"
37 #include "llvm/ADT/STLExtras.h"
38 #include "llvm/ADT/SmallSet.h"
39 #include "llvm/ADT/StringExtras.h"
40 #include "llvm/Frontend/OpenMP/OMPAssume.h"
41 #include "llvm/Frontend/OpenMP/OMPConstants.h"
42 #include <set>
43 
44 using namespace clang;
45 using namespace llvm::omp;
46 
47 //===----------------------------------------------------------------------===//
48 // Stack of data-sharing attributes for variables
49 //===----------------------------------------------------------------------===//
50 
52  Sema &SemaRef, Expr *E,
54  OpenMPClauseKind CKind, OpenMPDirectiveKind DKind, bool NoDiagnose);
55 
56 namespace {
57 /// Default data sharing attributes, which can be applied to directive.
58 enum DefaultDataSharingAttributes {
59  DSA_unspecified = 0, /// Data sharing attribute not specified.
60  DSA_none = 1 << 0, /// Default data sharing attribute 'none'.
61  DSA_shared = 1 << 1, /// Default data sharing attribute 'shared'.
62  DSA_private = 1 << 2, /// Default data sharing attribute 'private'.
63  DSA_firstprivate = 1 << 3, /// Default data sharing attribute 'firstprivate'.
64 };
65 
66 /// Stack for tracking declarations used in OpenMP directives and
67 /// clauses and their data-sharing attributes.
68 class DSAStackTy {
69 public:
70  struct DSAVarData {
71  OpenMPDirectiveKind DKind = OMPD_unknown;
72  OpenMPClauseKind CKind = OMPC_unknown;
73  unsigned Modifier = 0;
74  const Expr *RefExpr = nullptr;
75  DeclRefExpr *PrivateCopy = nullptr;
76  SourceLocation ImplicitDSALoc;
77  bool AppliedToPointee = false;
78  DSAVarData() = default;
79  DSAVarData(OpenMPDirectiveKind DKind, OpenMPClauseKind CKind,
80  const Expr *RefExpr, DeclRefExpr *PrivateCopy,
81  SourceLocation ImplicitDSALoc, unsigned Modifier,
82  bool AppliedToPointee)
83  : DKind(DKind), CKind(CKind), Modifier(Modifier), RefExpr(RefExpr),
84  PrivateCopy(PrivateCopy), ImplicitDSALoc(ImplicitDSALoc),
85  AppliedToPointee(AppliedToPointee) {}
86  };
87  using OperatorOffsetTy =
89  using DoacrossDependMapTy =
90  llvm::DenseMap<OMPDependClause *, OperatorOffsetTy>;
91  /// Kind of the declaration used in the uses_allocators clauses.
92  enum class UsesAllocatorsDeclKind {
93  /// Predefined allocator
94  PredefinedAllocator,
95  /// User-defined allocator
96  UserDefinedAllocator,
97  /// The declaration that represent allocator trait
98  AllocatorTrait,
99  };
100 
101 private:
102  struct DSAInfo {
103  OpenMPClauseKind Attributes = OMPC_unknown;
104  unsigned Modifier = 0;
105  /// Pointer to a reference expression and a flag which shows that the
106  /// variable is marked as lastprivate(true) or not (false).
107  llvm::PointerIntPair<const Expr *, 1, bool> RefExpr;
108  DeclRefExpr *PrivateCopy = nullptr;
109  /// true if the attribute is applied to the pointee, not the variable
110  /// itself.
111  bool AppliedToPointee = false;
112  };
113  using DeclSAMapTy = llvm::SmallDenseMap<const ValueDecl *, DSAInfo, 8>;
114  using UsedRefMapTy = llvm::SmallDenseMap<const ValueDecl *, const Expr *, 8>;
115  using LCDeclInfo = std::pair<unsigned, VarDecl *>;
116  using LoopControlVariablesMapTy =
117  llvm::SmallDenseMap<const ValueDecl *, LCDeclInfo, 8>;
118  /// Struct that associates a component with the clause kind where they are
119  /// found.
120  struct MappedExprComponentTy {
122  OpenMPClauseKind Kind = OMPC_unknown;
123  };
124  using MappedExprComponentsTy =
125  llvm::DenseMap<const ValueDecl *, MappedExprComponentTy>;
126  using CriticalsWithHintsTy =
127  llvm::StringMap<std::pair<const OMPCriticalDirective *, llvm::APSInt>>;
128  struct ReductionData {
129  using BOKPtrType = llvm::PointerEmbeddedInt<BinaryOperatorKind, 16>;
130  SourceRange ReductionRange;
131  llvm::PointerUnion<const Expr *, BOKPtrType> ReductionOp;
132  ReductionData() = default;
133  void set(BinaryOperatorKind BO, SourceRange RR) {
134  ReductionRange = RR;
135  ReductionOp = BO;
136  }
137  void set(const Expr *RefExpr, SourceRange RR) {
138  ReductionRange = RR;
139  ReductionOp = RefExpr;
140  }
141  };
142  using DeclReductionMapTy =
143  llvm::SmallDenseMap<const ValueDecl *, ReductionData, 4>;
144  struct DefaultmapInfo {
145  OpenMPDefaultmapClauseModifier ImplicitBehavior =
147  SourceLocation SLoc;
148  DefaultmapInfo() = default;
149  DefaultmapInfo(OpenMPDefaultmapClauseModifier M, SourceLocation Loc)
150  : ImplicitBehavior(M), SLoc(Loc) {}
151  };
152 
153  struct SharingMapTy {
154  DeclSAMapTy SharingMap;
155  DeclReductionMapTy ReductionMap;
156  UsedRefMapTy AlignedMap;
157  UsedRefMapTy NontemporalMap;
158  MappedExprComponentsTy MappedExprComponents;
159  LoopControlVariablesMapTy LCVMap;
160  DefaultDataSharingAttributes DefaultAttr = DSA_unspecified;
161  SourceLocation DefaultAttrLoc;
162  DefaultmapInfo DefaultmapMap[OMPC_DEFAULTMAP_unknown];
163  OpenMPDirectiveKind Directive = OMPD_unknown;
164  DeclarationNameInfo DirectiveName;
165  Scope *CurScope = nullptr;
166  DeclContext *Context = nullptr;
167  SourceLocation ConstructLoc;
168  /// Set of 'depend' clauses with 'sink|source' dependence kind. Required to
169  /// get the data (loop counters etc.) about enclosing loop-based construct.
170  /// This data is required during codegen.
171  DoacrossDependMapTy DoacrossDepends;
172  /// First argument (Expr *) contains optional argument of the
173  /// 'ordered' clause, the second one is true if the regions has 'ordered'
174  /// clause, false otherwise.
176  unsigned AssociatedLoops = 1;
177  bool HasMutipleLoops = false;
178  const Decl *PossiblyLoopCounter = nullptr;
179  bool NowaitRegion = false;
180  bool UntiedRegion = false;
181  bool CancelRegion = false;
182  bool LoopStart = false;
183  bool BodyComplete = false;
184  SourceLocation PrevScanLocation;
185  SourceLocation PrevOrderedLocation;
186  SourceLocation InnerTeamsRegionLoc;
187  /// Reference to the taskgroup task_reduction reference expression.
188  Expr *TaskgroupReductionRef = nullptr;
189  llvm::DenseSet<QualType> MappedClassesQualTypes;
190  SmallVector<Expr *, 4> InnerUsedAllocators;
191  llvm::DenseSet<CanonicalDeclPtr<Decl>> ImplicitTaskFirstprivates;
192  /// List of globals marked as declare target link in this target region
193  /// (isOpenMPTargetExecutionDirective(Directive) == true).
194  llvm::SmallVector<DeclRefExpr *, 4> DeclareTargetLinkVarDecls;
195  /// List of decls used in inclusive/exclusive clauses of the scan directive.
196  llvm::DenseSet<CanonicalDeclPtr<Decl>> UsedInScanDirective;
197  llvm::DenseMap<CanonicalDeclPtr<const Decl>, UsesAllocatorsDeclKind>
198  UsesAllocatorsDecls;
199  /// Data is required on creating capture fields for implicit
200  /// default first|private clause.
201  struct ImplicitDefaultFDInfoTy {
202  /// Field decl.
203  const FieldDecl *FD = nullptr;
204  /// Nesting stack level
205  size_t StackLevel = 0;
206  /// Capture variable decl.
207  VarDecl *VD = nullptr;
208  ImplicitDefaultFDInfoTy(const FieldDecl *FD, size_t StackLevel,
209  VarDecl *VD)
210  : FD(FD), StackLevel(StackLevel), VD(VD) {}
211  };
212  /// List of captured fields
214  ImplicitDefaultFirstprivateFDs;
215  Expr *DeclareMapperVar = nullptr;
216  SharingMapTy(OpenMPDirectiveKind DKind, DeclarationNameInfo Name,
217  Scope *CurScope, SourceLocation Loc)
218  : Directive(DKind), DirectiveName(Name), CurScope(CurScope),
219  ConstructLoc(Loc) {}
220  SharingMapTy() = default;
221  };
222 
223  using StackTy = SmallVector<SharingMapTy, 4>;
224 
225  /// Stack of used declaration and their data-sharing attributes.
226  DeclSAMapTy Threadprivates;
227  const FunctionScopeInfo *CurrentNonCapturingFunctionScope = nullptr;
229  /// true, if check for DSA must be from parent directive, false, if
230  /// from current directive.
231  OpenMPClauseKind ClauseKindMode = OMPC_unknown;
232  Sema &SemaRef;
233  bool ForceCapturing = false;
234  /// true if all the variables in the target executable directives must be
235  /// captured by reference.
236  bool ForceCaptureByReferenceInTargetExecutable = false;
237  CriticalsWithHintsTy Criticals;
238  unsigned IgnoredStackElements = 0;
239 
240  /// Iterators over the stack iterate in order from innermost to outermost
241  /// directive.
242  using const_iterator = StackTy::const_reverse_iterator;
243  const_iterator begin() const {
244  return Stack.empty() ? const_iterator()
245  : Stack.back().first.rbegin() + IgnoredStackElements;
246  }
247  const_iterator end() const {
248  return Stack.empty() ? const_iterator() : Stack.back().first.rend();
249  }
250  using iterator = StackTy::reverse_iterator;
251  iterator begin() {
252  return Stack.empty() ? iterator()
253  : Stack.back().first.rbegin() + IgnoredStackElements;
254  }
255  iterator end() {
256  return Stack.empty() ? iterator() : Stack.back().first.rend();
257  }
258 
259  // Convenience operations to get at the elements of the stack.
260 
261  bool isStackEmpty() const {
262  return Stack.empty() ||
263  Stack.back().second != CurrentNonCapturingFunctionScope ||
264  Stack.back().first.size() <= IgnoredStackElements;
265  }
266  size_t getStackSize() const {
267  return isStackEmpty() ? 0
268  : Stack.back().first.size() - IgnoredStackElements;
269  }
270 
271  SharingMapTy *getTopOfStackOrNull() {
272  size_t Size = getStackSize();
273  if (Size == 0)
274  return nullptr;
275  return &Stack.back().first[Size - 1];
276  }
277  const SharingMapTy *getTopOfStackOrNull() const {
278  return const_cast<DSAStackTy &>(*this).getTopOfStackOrNull();
279  }
280  SharingMapTy &getTopOfStack() {
281  assert(!isStackEmpty() && "no current directive");
282  return *getTopOfStackOrNull();
283  }
284  const SharingMapTy &getTopOfStack() const {
285  return const_cast<DSAStackTy &>(*this).getTopOfStack();
286  }
287 
288  SharingMapTy *getSecondOnStackOrNull() {
289  size_t Size = getStackSize();
290  if (Size <= 1)
291  return nullptr;
292  return &Stack.back().first[Size - 2];
293  }
294  const SharingMapTy *getSecondOnStackOrNull() const {
295  return const_cast<DSAStackTy &>(*this).getSecondOnStackOrNull();
296  }
297 
298  /// Get the stack element at a certain level (previously returned by
299  /// \c getNestingLevel).
300  ///
301  /// Note that nesting levels count from outermost to innermost, and this is
302  /// the reverse of our iteration order where new inner levels are pushed at
303  /// the front of the stack.
304  SharingMapTy &getStackElemAtLevel(unsigned Level) {
305  assert(Level < getStackSize() && "no such stack element");
306  return Stack.back().first[Level];
307  }
308  const SharingMapTy &getStackElemAtLevel(unsigned Level) const {
309  return const_cast<DSAStackTy &>(*this).getStackElemAtLevel(Level);
310  }
311 
312  DSAVarData getDSA(const_iterator &Iter, ValueDecl *D) const;
313 
314  /// Checks if the variable is a local for OpenMP region.
315  bool isOpenMPLocal(VarDecl *D, const_iterator Iter) const;
316 
317  /// Vector of previously declared requires directives
319  /// omp_allocator_handle_t type.
320  QualType OMPAllocatorHandleT;
321  /// omp_depend_t type.
322  QualType OMPDependT;
323  /// omp_event_handle_t type.
324  QualType OMPEventHandleT;
325  /// omp_alloctrait_t type.
326  QualType OMPAlloctraitT;
327  /// Expression for the predefined allocators.
328  Expr *OMPPredefinedAllocators[OMPAllocateDeclAttr::OMPUserDefinedMemAlloc] = {
329  nullptr};
330  /// Vector of previously encountered target directives
331  SmallVector<SourceLocation, 2> TargetLocations;
332  SourceLocation AtomicLocation;
333  /// Vector of declare variant construct traits.
335 
336 public:
337  explicit DSAStackTy(Sema &S) : SemaRef(S) {}
338 
339  /// Sets omp_allocator_handle_t type.
340  void setOMPAllocatorHandleT(QualType Ty) { OMPAllocatorHandleT = Ty; }
341  /// Gets omp_allocator_handle_t type.
342  QualType getOMPAllocatorHandleT() const { return OMPAllocatorHandleT; }
343  /// Sets omp_alloctrait_t type.
344  void setOMPAlloctraitT(QualType Ty) { OMPAlloctraitT = Ty; }
345  /// Gets omp_alloctrait_t type.
346  QualType getOMPAlloctraitT() const { return OMPAlloctraitT; }
347  /// Sets the given default allocator.
348  void setAllocator(OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind,
349  Expr *Allocator) {
350  OMPPredefinedAllocators[AllocatorKind] = Allocator;
351  }
352  /// Returns the specified default allocator.
353  Expr *getAllocator(OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind) const {
354  return OMPPredefinedAllocators[AllocatorKind];
355  }
356  /// Sets omp_depend_t type.
357  void setOMPDependT(QualType Ty) { OMPDependT = Ty; }
358  /// Gets omp_depend_t type.
359  QualType getOMPDependT() const { return OMPDependT; }
360 
361  /// Sets omp_event_handle_t type.
362  void setOMPEventHandleT(QualType Ty) { OMPEventHandleT = Ty; }
363  /// Gets omp_event_handle_t type.
364  QualType getOMPEventHandleT() const { return OMPEventHandleT; }
365 
366  bool isClauseParsingMode() const { return ClauseKindMode != OMPC_unknown; }
367  OpenMPClauseKind getClauseParsingMode() const {
368  assert(isClauseParsingMode() && "Must be in clause parsing mode.");
369  return ClauseKindMode;
370  }
371  void setClauseParsingMode(OpenMPClauseKind K) { ClauseKindMode = K; }
372 
373  bool isBodyComplete() const {
374  const SharingMapTy *Top = getTopOfStackOrNull();
375  return Top && Top->BodyComplete;
376  }
377  void setBodyComplete() { getTopOfStack().BodyComplete = true; }
378 
379  bool isForceVarCapturing() const { return ForceCapturing; }
380  void setForceVarCapturing(bool V) { ForceCapturing = V; }
381 
382  void setForceCaptureByReferenceInTargetExecutable(bool V) {
383  ForceCaptureByReferenceInTargetExecutable = V;
384  }
385  bool isForceCaptureByReferenceInTargetExecutable() const {
386  return ForceCaptureByReferenceInTargetExecutable;
387  }
388 
389  void push(OpenMPDirectiveKind DKind, const DeclarationNameInfo &DirName,
390  Scope *CurScope, SourceLocation Loc) {
391  assert(!IgnoredStackElements &&
392  "cannot change stack while ignoring elements");
393  if (Stack.empty() ||
394  Stack.back().second != CurrentNonCapturingFunctionScope)
395  Stack.emplace_back(StackTy(), CurrentNonCapturingFunctionScope);
396  Stack.back().first.emplace_back(DKind, DirName, CurScope, Loc);
397  Stack.back().first.back().DefaultAttrLoc = Loc;
398  }
399 
400  void pop() {
401  assert(!IgnoredStackElements &&
402  "cannot change stack while ignoring elements");
403  assert(!Stack.back().first.empty() &&
404  "Data-sharing attributes stack is empty!");
405  Stack.back().first.pop_back();
406  }
407 
408  /// RAII object to temporarily leave the scope of a directive when we want to
409  /// logically operate in its parent.
410  class ParentDirectiveScope {
411  DSAStackTy &Self;
412  bool Active;
413 
414  public:
415  ParentDirectiveScope(DSAStackTy &Self, bool Activate)
416  : Self(Self), Active(false) {
417  if (Activate)
418  enable();
419  }
420  ~ParentDirectiveScope() { disable(); }
421  void disable() {
422  if (Active) {
423  --Self.IgnoredStackElements;
424  Active = false;
425  }
426  }
427  void enable() {
428  if (!Active) {
429  ++Self.IgnoredStackElements;
430  Active = true;
431  }
432  }
433  };
434 
435  /// Marks that we're started loop parsing.
436  void loopInit() {
437  assert(isOpenMPLoopDirective(getCurrentDirective()) &&
438  "Expected loop-based directive.");
439  getTopOfStack().LoopStart = true;
440  }
441  /// Start capturing of the variables in the loop context.
442  void loopStart() {
443  assert(isOpenMPLoopDirective(getCurrentDirective()) &&
444  "Expected loop-based directive.");
445  getTopOfStack().LoopStart = false;
446  }
447  /// true, if variables are captured, false otherwise.
448  bool isLoopStarted() const {
449  assert(isOpenMPLoopDirective(getCurrentDirective()) &&
450  "Expected loop-based directive.");
451  return !getTopOfStack().LoopStart;
452  }
453  /// Marks (or clears) declaration as possibly loop counter.
454  void resetPossibleLoopCounter(const Decl *D = nullptr) {
455  getTopOfStack().PossiblyLoopCounter = D ? D->getCanonicalDecl() : D;
456  }
457  /// Gets the possible loop counter decl.
458  const Decl *getPossiblyLoopCunter() const {
459  return getTopOfStack().PossiblyLoopCounter;
460  }
461  /// Start new OpenMP region stack in new non-capturing function.
462  void pushFunction() {
463  assert(!IgnoredStackElements &&
464  "cannot change stack while ignoring elements");
465  const FunctionScopeInfo *CurFnScope = SemaRef.getCurFunction();
466  assert(!isa<CapturingScopeInfo>(CurFnScope));
467  CurrentNonCapturingFunctionScope = CurFnScope;
468  }
469  /// Pop region stack for non-capturing function.
470  void popFunction(const FunctionScopeInfo *OldFSI) {
471  assert(!IgnoredStackElements &&
472  "cannot change stack while ignoring elements");
473  if (!Stack.empty() && Stack.back().second == OldFSI) {
474  assert(Stack.back().first.empty());
475  Stack.pop_back();
476  }
477  CurrentNonCapturingFunctionScope = nullptr;
478  for (const FunctionScopeInfo *FSI : llvm::reverse(SemaRef.FunctionScopes)) {
479  if (!isa<CapturingScopeInfo>(FSI)) {
480  CurrentNonCapturingFunctionScope = FSI;
481  break;
482  }
483  }
484  }
485 
486  void addCriticalWithHint(const OMPCriticalDirective *D, llvm::APSInt Hint) {
487  Criticals.try_emplace(D->getDirectiveName().getAsString(), D, Hint);
488  }
489  const std::pair<const OMPCriticalDirective *, llvm::APSInt>
490  getCriticalWithHint(const DeclarationNameInfo &Name) const {
491  auto I = Criticals.find(Name.getAsString());
492  if (I != Criticals.end())
493  return I->second;
494  return std::make_pair(nullptr, llvm::APSInt());
495  }
496  /// If 'aligned' declaration for given variable \a D was not seen yet,
497  /// add it and return NULL; otherwise return previous occurrence's expression
498  /// for diagnostics.
499  const Expr *addUniqueAligned(const ValueDecl *D, const Expr *NewDE);
500  /// If 'nontemporal' declaration for given variable \a D was not seen yet,
501  /// add it and return NULL; otherwise return previous occurrence's expression
502  /// for diagnostics.
503  const Expr *addUniqueNontemporal(const ValueDecl *D, const Expr *NewDE);
504 
505  /// Register specified variable as loop control variable.
506  void addLoopControlVariable(const ValueDecl *D, VarDecl *Capture);
507  /// Check if the specified variable is a loop control variable for
508  /// current region.
509  /// \return The index of the loop control variable in the list of associated
510  /// for-loops (from outer to inner).
511  const LCDeclInfo isLoopControlVariable(const ValueDecl *D) const;
512  /// Check if the specified variable is a loop control variable for
513  /// parent region.
514  /// \return The index of the loop control variable in the list of associated
515  /// for-loops (from outer to inner).
516  const LCDeclInfo isParentLoopControlVariable(const ValueDecl *D) const;
517  /// Check if the specified variable is a loop control variable for
518  /// current region.
519  /// \return The index of the loop control variable in the list of associated
520  /// for-loops (from outer to inner).
521  const LCDeclInfo isLoopControlVariable(const ValueDecl *D,
522  unsigned Level) const;
523  /// Get the loop control variable for the I-th loop (or nullptr) in
524  /// parent directive.
525  const ValueDecl *getParentLoopControlVariable(unsigned I) const;
526 
527  /// Marks the specified decl \p D as used in scan directive.
528  void markDeclAsUsedInScanDirective(ValueDecl *D) {
529  if (SharingMapTy *Stack = getSecondOnStackOrNull())
530  Stack->UsedInScanDirective.insert(D);
531  }
532 
533  /// Checks if the specified declaration was used in the inner scan directive.
534  bool isUsedInScanDirective(ValueDecl *D) const {
535  if (const SharingMapTy *Stack = getTopOfStackOrNull())
536  return Stack->UsedInScanDirective.contains(D);
537  return false;
538  }
539 
540  /// Adds explicit data sharing attribute to the specified declaration.
541  void addDSA(const ValueDecl *D, const Expr *E, OpenMPClauseKind A,
542  DeclRefExpr *PrivateCopy = nullptr, unsigned Modifier = 0,
543  bool AppliedToPointee = false);
544 
545  /// Adds additional information for the reduction items with the reduction id
546  /// represented as an operator.
547  void addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
548  BinaryOperatorKind BOK);
549  /// Adds additional information for the reduction items with the reduction id
550  /// represented as reduction identifier.
551  void addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
552  const Expr *ReductionRef);
553  /// Returns the location and reduction operation from the innermost parent
554  /// region for the given \p D.
555  const DSAVarData
556  getTopMostTaskgroupReductionData(const ValueDecl *D, SourceRange &SR,
557  BinaryOperatorKind &BOK,
558  Expr *&TaskgroupDescriptor) const;
559  /// Returns the location and reduction operation from the innermost parent
560  /// region for the given \p D.
561  const DSAVarData
562  getTopMostTaskgroupReductionData(const ValueDecl *D, SourceRange &SR,
563  const Expr *&ReductionRef,
564  Expr *&TaskgroupDescriptor) const;
565  /// Return reduction reference expression for the current taskgroup or
566  /// parallel/worksharing directives with task reductions.
567  Expr *getTaskgroupReductionRef() const {
568  assert((getTopOfStack().Directive == OMPD_taskgroup ||
569  ((isOpenMPParallelDirective(getTopOfStack().Directive) ||
570  isOpenMPWorksharingDirective(getTopOfStack().Directive)) &&
571  !isOpenMPSimdDirective(getTopOfStack().Directive))) &&
572  "taskgroup reference expression requested for non taskgroup or "
573  "parallel/worksharing directive.");
574  return getTopOfStack().TaskgroupReductionRef;
575  }
576  /// Checks if the given \p VD declaration is actually a taskgroup reduction
577  /// descriptor variable at the \p Level of OpenMP regions.
578  bool isTaskgroupReductionRef(const ValueDecl *VD, unsigned Level) const {
579  return getStackElemAtLevel(Level).TaskgroupReductionRef &&
580  cast<DeclRefExpr>(getStackElemAtLevel(Level).TaskgroupReductionRef)
581  ->getDecl() == VD;
582  }
583 
584  /// Returns data sharing attributes from top of the stack for the
585  /// specified declaration.
586  const DSAVarData getTopDSA(ValueDecl *D, bool FromParent);
587  /// Returns data-sharing attributes for the specified declaration.
588  const DSAVarData getImplicitDSA(ValueDecl *D, bool FromParent) const;
589  /// Returns data-sharing attributes for the specified declaration.
590  const DSAVarData getImplicitDSA(ValueDecl *D, unsigned Level) const;
591  /// Checks if the specified variables has data-sharing attributes which
592  /// match specified \a CPred predicate in any directive which matches \a DPred
593  /// predicate.
594  const DSAVarData
595  hasDSA(ValueDecl *D,
596  const llvm::function_ref<bool(OpenMPClauseKind, bool,
597  DefaultDataSharingAttributes)>
598  CPred,
599  const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
600  bool FromParent) const;
601  /// Checks if the specified variables has data-sharing attributes which
602  /// match specified \a CPred predicate in any innermost directive which
603  /// matches \a DPred predicate.
604  const DSAVarData
605  hasInnermostDSA(ValueDecl *D,
606  const llvm::function_ref<bool(OpenMPClauseKind, bool)> CPred,
607  const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
608  bool FromParent) const;
609  /// Checks if the specified variables has explicit data-sharing
610  /// attributes which match specified \a CPred predicate at the specified
611  /// OpenMP region.
612  bool
613  hasExplicitDSA(const ValueDecl *D,
614  const llvm::function_ref<bool(OpenMPClauseKind, bool)> CPred,
615  unsigned Level, bool NotLastprivate = false) const;
616 
617  /// Returns true if the directive at level \Level matches in the
618  /// specified \a DPred predicate.
619  bool hasExplicitDirective(
620  const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
621  unsigned Level) const;
622 
623  /// Finds a directive which matches specified \a DPred predicate.
624  bool hasDirective(
625  const llvm::function_ref<bool(
627  DPred,
628  bool FromParent) const;
629 
630  /// Returns currently analyzed directive.
631  OpenMPDirectiveKind getCurrentDirective() const {
632  const SharingMapTy *Top = getTopOfStackOrNull();
633  return Top ? Top->Directive : OMPD_unknown;
634  }
635  /// Returns directive kind at specified level.
636  OpenMPDirectiveKind getDirective(unsigned Level) const {
637  assert(!isStackEmpty() && "No directive at specified level.");
638  return getStackElemAtLevel(Level).Directive;
639  }
640  /// Returns the capture region at the specified level.
641  OpenMPDirectiveKind getCaptureRegion(unsigned Level,
642  unsigned OpenMPCaptureLevel) const {
644  getOpenMPCaptureRegions(CaptureRegions, getDirective(Level));
645  return CaptureRegions[OpenMPCaptureLevel];
646  }
647  /// Returns parent directive.
648  OpenMPDirectiveKind getParentDirective() const {
649  const SharingMapTy *Parent = getSecondOnStackOrNull();
650  return Parent ? Parent->Directive : OMPD_unknown;
651  }
652 
653  /// Add requires decl to internal vector
654  void addRequiresDecl(OMPRequiresDecl *RD) { RequiresDecls.push_back(RD); }
655 
656  /// Checks if the defined 'requires' directive has specified type of clause.
657  template <typename ClauseType> bool hasRequiresDeclWithClause() const {
658  return llvm::any_of(RequiresDecls, [](const OMPRequiresDecl *D) {
659  return llvm::any_of(D->clauselists(), [](const OMPClause *C) {
660  return isa<ClauseType>(C);
661  });
662  });
663  }
664 
665  /// Checks for a duplicate clause amongst previously declared requires
666  /// directives
667  bool hasDuplicateRequiresClause(ArrayRef<OMPClause *> ClauseList) const {
668  bool IsDuplicate = false;
669  for (OMPClause *CNew : ClauseList) {
670  for (const OMPRequiresDecl *D : RequiresDecls) {
671  for (const OMPClause *CPrev : D->clauselists()) {
672  if (CNew->getClauseKind() == CPrev->getClauseKind()) {
673  SemaRef.Diag(CNew->getBeginLoc(),
674  diag::err_omp_requires_clause_redeclaration)
675  << getOpenMPClauseName(CNew->getClauseKind());
676  SemaRef.Diag(CPrev->getBeginLoc(),
677  diag::note_omp_requires_previous_clause)
678  << getOpenMPClauseName(CPrev->getClauseKind());
679  IsDuplicate = true;
680  }
681  }
682  }
683  }
684  return IsDuplicate;
685  }
686 
687  /// Add location of previously encountered target to internal vector
688  void addTargetDirLocation(SourceLocation LocStart) {
689  TargetLocations.push_back(LocStart);
690  }
691 
692  /// Add location for the first encountered atomicc directive.
693  void addAtomicDirectiveLoc(SourceLocation Loc) {
694  if (AtomicLocation.isInvalid())
695  AtomicLocation = Loc;
696  }
697 
698  /// Returns the location of the first encountered atomic directive in the
699  /// module.
700  SourceLocation getAtomicDirectiveLoc() const { return AtomicLocation; }
701 
702  // Return previously encountered target region locations.
703  ArrayRef<SourceLocation> getEncounteredTargetLocs() const {
704  return TargetLocations;
705  }
706 
707  /// Set default data sharing attribute to none.
708  void setDefaultDSANone(SourceLocation Loc) {
709  getTopOfStack().DefaultAttr = DSA_none;
710  getTopOfStack().DefaultAttrLoc = Loc;
711  }
712  /// Set default data sharing attribute to shared.
713  void setDefaultDSAShared(SourceLocation Loc) {
714  getTopOfStack().DefaultAttr = DSA_shared;
715  getTopOfStack().DefaultAttrLoc = Loc;
716  }
717  /// Set default data sharing attribute to private.
718  void setDefaultDSAPrivate(SourceLocation Loc) {
719  getTopOfStack().DefaultAttr = DSA_private;
720  getTopOfStack().DefaultAttrLoc = Loc;
721  }
722  /// Set default data sharing attribute to firstprivate.
723  void setDefaultDSAFirstPrivate(SourceLocation Loc) {
724  getTopOfStack().DefaultAttr = DSA_firstprivate;
725  getTopOfStack().DefaultAttrLoc = Loc;
726  }
727  /// Set default data mapping attribute to Modifier:Kind
728  void setDefaultDMAAttr(OpenMPDefaultmapClauseModifier M,
730  DefaultmapInfo &DMI = getTopOfStack().DefaultmapMap[Kind];
731  DMI.ImplicitBehavior = M;
732  DMI.SLoc = Loc;
733  }
734  /// Check whether the implicit-behavior has been set in defaultmap
735  bool checkDefaultmapCategory(OpenMPDefaultmapClauseKind VariableCategory) {
736  if (VariableCategory == OMPC_DEFAULTMAP_unknown)
737  return getTopOfStack()
738  .DefaultmapMap[OMPC_DEFAULTMAP_aggregate]
739  .ImplicitBehavior != OMPC_DEFAULTMAP_MODIFIER_unknown ||
740  getTopOfStack()
741  .DefaultmapMap[OMPC_DEFAULTMAP_scalar]
742  .ImplicitBehavior != OMPC_DEFAULTMAP_MODIFIER_unknown ||
743  getTopOfStack()
744  .DefaultmapMap[OMPC_DEFAULTMAP_pointer]
745  .ImplicitBehavior != OMPC_DEFAULTMAP_MODIFIER_unknown;
746  return getTopOfStack().DefaultmapMap[VariableCategory].ImplicitBehavior !=
748  }
749 
750  ArrayRef<llvm::omp::TraitProperty> getConstructTraits() {
751  return ConstructTraits;
752  }
753  void handleConstructTrait(ArrayRef<llvm::omp::TraitProperty> Traits,
754  bool ScopeEntry) {
755  if (ScopeEntry)
756  ConstructTraits.append(Traits.begin(), Traits.end());
757  else
758  for (llvm::omp::TraitProperty Trait : llvm::reverse(Traits)) {
759  llvm::omp::TraitProperty Top = ConstructTraits.pop_back_val();
760  assert(Top == Trait && "Something left a trait on the stack!");
761  (void)Trait;
762  (void)Top;
763  }
764  }
765 
766  DefaultDataSharingAttributes getDefaultDSA(unsigned Level) const {
767  return getStackSize() <= Level ? DSA_unspecified
768  : getStackElemAtLevel(Level).DefaultAttr;
769  }
770  DefaultDataSharingAttributes getDefaultDSA() const {
771  return isStackEmpty() ? DSA_unspecified : getTopOfStack().DefaultAttr;
772  }
773  SourceLocation getDefaultDSALocation() const {
774  return isStackEmpty() ? SourceLocation() : getTopOfStack().DefaultAttrLoc;
775  }
777  getDefaultmapModifier(OpenMPDefaultmapClauseKind Kind) const {
778  return isStackEmpty()
780  : getTopOfStack().DefaultmapMap[Kind].ImplicitBehavior;
781  }
783  getDefaultmapModifierAtLevel(unsigned Level,
785  return getStackElemAtLevel(Level).DefaultmapMap[Kind].ImplicitBehavior;
786  }
787  bool isDefaultmapCapturedByRef(unsigned Level,
790  getDefaultmapModifierAtLevel(Level, Kind);
791  if (Kind == OMPC_DEFAULTMAP_scalar || Kind == OMPC_DEFAULTMAP_pointer) {
792  return (M == OMPC_DEFAULTMAP_MODIFIER_alloc) ||
793  (M == OMPC_DEFAULTMAP_MODIFIER_to) ||
794  (M == OMPC_DEFAULTMAP_MODIFIER_from) ||
795  (M == OMPC_DEFAULTMAP_MODIFIER_tofrom);
796  }
797  return true;
798  }
799  static bool mustBeFirstprivateBase(OpenMPDefaultmapClauseModifier M,
801  switch (Kind) {
802  case OMPC_DEFAULTMAP_scalar:
803  case OMPC_DEFAULTMAP_pointer:
804  return (M == OMPC_DEFAULTMAP_MODIFIER_unknown) ||
805  (M == OMPC_DEFAULTMAP_MODIFIER_firstprivate) ||
806  (M == OMPC_DEFAULTMAP_MODIFIER_default);
807  case OMPC_DEFAULTMAP_aggregate:
808  return M == OMPC_DEFAULTMAP_MODIFIER_firstprivate;
809  default:
810  break;
811  }
812  llvm_unreachable("Unexpected OpenMPDefaultmapClauseKind enum");
813  }
814  bool mustBeFirstprivateAtLevel(unsigned Level,
817  getDefaultmapModifierAtLevel(Level, Kind);
818  return mustBeFirstprivateBase(M, Kind);
819  }
820  bool mustBeFirstprivate(OpenMPDefaultmapClauseKind Kind) const {
821  OpenMPDefaultmapClauseModifier M = getDefaultmapModifier(Kind);
822  return mustBeFirstprivateBase(M, Kind);
823  }
824 
825  /// Checks if the specified variable is a threadprivate.
826  bool isThreadPrivate(VarDecl *D) {
827  const DSAVarData DVar = getTopDSA(D, false);
828  return isOpenMPThreadPrivate(DVar.CKind);
829  }
830 
831  /// Marks current region as ordered (it has an 'ordered' clause).
832  void setOrderedRegion(bool IsOrdered, const Expr *Param,
833  OMPOrderedClause *Clause) {
834  if (IsOrdered)
835  getTopOfStack().OrderedRegion.emplace(Param, Clause);
836  else
837  getTopOfStack().OrderedRegion.reset();
838  }
839  /// Returns true, if region is ordered (has associated 'ordered' clause),
840  /// false - otherwise.
841  bool isOrderedRegion() const {
842  if (const SharingMapTy *Top = getTopOfStackOrNull())
843  return Top->OrderedRegion.has_value();
844  return false;
845  }
846  /// Returns optional parameter for the ordered region.
847  std::pair<const Expr *, OMPOrderedClause *> getOrderedRegionParam() const {
848  if (const SharingMapTy *Top = getTopOfStackOrNull())
849  if (Top->OrderedRegion)
850  return Top->OrderedRegion.value();
851  return std::make_pair(nullptr, nullptr);
852  }
853  /// Returns true, if parent region is ordered (has associated
854  /// 'ordered' clause), false - otherwise.
855  bool isParentOrderedRegion() const {
856  if (const SharingMapTy *Parent = getSecondOnStackOrNull())
857  return Parent->OrderedRegion.has_value();
858  return false;
859  }
860  /// Returns optional parameter for the ordered region.
861  std::pair<const Expr *, OMPOrderedClause *>
862  getParentOrderedRegionParam() const {
863  if (const SharingMapTy *Parent = getSecondOnStackOrNull())
864  if (Parent->OrderedRegion)
865  return Parent->OrderedRegion.value();
866  return std::make_pair(nullptr, nullptr);
867  }
868  /// Marks current region as nowait (it has a 'nowait' clause).
869  void setNowaitRegion(bool IsNowait = true) {
870  getTopOfStack().NowaitRegion = IsNowait;
871  }
872  /// Returns true, if parent region is nowait (has associated
873  /// 'nowait' clause), false - otherwise.
874  bool isParentNowaitRegion() const {
875  if (const SharingMapTy *Parent = getSecondOnStackOrNull())
876  return Parent->NowaitRegion;
877  return false;
878  }
879  /// Marks current region as untied (it has a 'untied' clause).
880  void setUntiedRegion(bool IsUntied = true) {
881  getTopOfStack().UntiedRegion = IsUntied;
882  }
883  /// Return true if current region is untied.
884  bool isUntiedRegion() const {
885  const SharingMapTy *Top = getTopOfStackOrNull();
886  return Top ? Top->UntiedRegion : false;
887  }
888  /// Marks parent region as cancel region.
889  void setParentCancelRegion(bool Cancel = true) {
890  if (SharingMapTy *Parent = getSecondOnStackOrNull())
891  Parent->CancelRegion |= Cancel;
892  }
893  /// Return true if current region has inner cancel construct.
894  bool isCancelRegion() const {
895  const SharingMapTy *Top = getTopOfStackOrNull();
896  return Top ? Top->CancelRegion : false;
897  }
898 
899  /// Mark that parent region already has scan directive.
900  void setParentHasScanDirective(SourceLocation Loc) {
901  if (SharingMapTy *Parent = getSecondOnStackOrNull())
902  Parent->PrevScanLocation = Loc;
903  }
904  /// Return true if current region has inner cancel construct.
905  bool doesParentHasScanDirective() const {
906  const SharingMapTy *Top = getSecondOnStackOrNull();
907  return Top ? Top->PrevScanLocation.isValid() : false;
908  }
909  /// Return true if current region has inner cancel construct.
910  SourceLocation getParentScanDirectiveLoc() const {
911  const SharingMapTy *Top = getSecondOnStackOrNull();
912  return Top ? Top->PrevScanLocation : SourceLocation();
913  }
914  /// Mark that parent region already has ordered directive.
915  void setParentHasOrderedDirective(SourceLocation Loc) {
916  if (SharingMapTy *Parent = getSecondOnStackOrNull())
917  Parent->PrevOrderedLocation = Loc;
918  }
919  /// Return true if current region has inner ordered construct.
920  bool doesParentHasOrderedDirective() const {
921  const SharingMapTy *Top = getSecondOnStackOrNull();
922  return Top ? Top->PrevOrderedLocation.isValid() : false;
923  }
924  /// Returns the location of the previously specified ordered directive.
925  SourceLocation getParentOrderedDirectiveLoc() const {
926  const SharingMapTy *Top = getSecondOnStackOrNull();
927  return Top ? Top->PrevOrderedLocation : SourceLocation();
928  }
929 
930  /// Set collapse value for the region.
931  void setAssociatedLoops(unsigned Val) {
932  getTopOfStack().AssociatedLoops = Val;
933  if (Val > 1)
934  getTopOfStack().HasMutipleLoops = true;
935  }
936  /// Return collapse value for region.
937  unsigned getAssociatedLoops() const {
938  const SharingMapTy *Top = getTopOfStackOrNull();
939  return Top ? Top->AssociatedLoops : 0;
940  }
941  /// Returns true if the construct is associated with multiple loops.
942  bool hasMutipleLoops() const {
943  const SharingMapTy *Top = getTopOfStackOrNull();
944  return Top ? Top->HasMutipleLoops : false;
945  }
946 
947  /// Marks current target region as one with closely nested teams
948  /// region.
949  void setParentTeamsRegionLoc(SourceLocation TeamsRegionLoc) {
950  if (SharingMapTy *Parent = getSecondOnStackOrNull())
951  Parent->InnerTeamsRegionLoc = TeamsRegionLoc;
952  }
953  /// Returns true, if current region has closely nested teams region.
954  bool hasInnerTeamsRegion() const {
955  return getInnerTeamsRegionLoc().isValid();
956  }
957  /// Returns location of the nested teams region (if any).
958  SourceLocation getInnerTeamsRegionLoc() const {
959  const SharingMapTy *Top = getTopOfStackOrNull();
960  return Top ? Top->InnerTeamsRegionLoc : SourceLocation();
961  }
962 
963  Scope *getCurScope() const {
964  const SharingMapTy *Top = getTopOfStackOrNull();
965  return Top ? Top->CurScope : nullptr;
966  }
967  void setContext(DeclContext *DC) { getTopOfStack().Context = DC; }
968  SourceLocation getConstructLoc() const {
969  const SharingMapTy *Top = getTopOfStackOrNull();
970  return Top ? Top->ConstructLoc : SourceLocation();
971  }
972 
973  /// Do the check specified in \a Check to all component lists and return true
974  /// if any issue is found.
975  bool checkMappableExprComponentListsForDecl(
976  const ValueDecl *VD, bool CurrentRegionOnly,
977  const llvm::function_ref<
980  Check) const {
981  if (isStackEmpty())
982  return false;
983  auto SI = begin();
984  auto SE = end();
985 
986  if (SI == SE)
987  return false;
988 
989  if (CurrentRegionOnly)
990  SE = std::next(SI);
991  else
992  std::advance(SI, 1);
993 
994  for (; SI != SE; ++SI) {
995  auto MI = SI->MappedExprComponents.find(VD);
996  if (MI != SI->MappedExprComponents.end())
998  MI->second.Components)
999  if (Check(L, MI->second.Kind))
1000  return true;
1001  }
1002  return false;
1003  }
1004 
1005  /// Do the check specified in \a Check to all component lists at a given level
1006  /// and return true if any issue is found.
1007  bool checkMappableExprComponentListsForDeclAtLevel(
1008  const ValueDecl *VD, unsigned Level,
1009  const llvm::function_ref<
1012  Check) const {
1013  if (getStackSize() <= Level)
1014  return false;
1015 
1016  const SharingMapTy &StackElem = getStackElemAtLevel(Level);
1017  auto MI = StackElem.MappedExprComponents.find(VD);
1018  if (MI != StackElem.MappedExprComponents.end())
1020  MI->second.Components)
1021  if (Check(L, MI->second.Kind))
1022  return true;
1023  return false;
1024  }
1025 
1026  /// Create a new mappable expression component list associated with a given
1027  /// declaration and initialize it with the provided list of components.
1028  void addMappableExpressionComponents(
1029  const ValueDecl *VD,
1031  OpenMPClauseKind WhereFoundClauseKind) {
1032  MappedExprComponentTy &MEC = getTopOfStack().MappedExprComponents[VD];
1033  // Create new entry and append the new components there.
1034  MEC.Components.resize(MEC.Components.size() + 1);
1035  MEC.Components.back().append(Components.begin(), Components.end());
1036  MEC.Kind = WhereFoundClauseKind;
1037  }
1038 
1039  unsigned getNestingLevel() const {
1040  assert(!isStackEmpty());
1041  return getStackSize() - 1;
1042  }
1043  void addDoacrossDependClause(OMPDependClause *C,
1044  const OperatorOffsetTy &OpsOffs) {
1045  SharingMapTy *Parent = getSecondOnStackOrNull();
1046  assert(Parent && isOpenMPWorksharingDirective(Parent->Directive));
1047  Parent->DoacrossDepends.try_emplace(C, OpsOffs);
1048  }
1049  llvm::iterator_range<DoacrossDependMapTy::const_iterator>
1050  getDoacrossDependClauses() const {
1051  const SharingMapTy &StackElem = getTopOfStack();
1052  if (isOpenMPWorksharingDirective(StackElem.Directive)) {
1053  const DoacrossDependMapTy &Ref = StackElem.DoacrossDepends;
1054  return llvm::make_range(Ref.begin(), Ref.end());
1055  }
1056  return llvm::make_range(StackElem.DoacrossDepends.end(),
1057  StackElem.DoacrossDepends.end());
1058  }
1059 
1060  // Store types of classes which have been explicitly mapped
1061  void addMappedClassesQualTypes(QualType QT) {
1062  SharingMapTy &StackElem = getTopOfStack();
1063  StackElem.MappedClassesQualTypes.insert(QT);
1064  }
1065 
1066  // Return set of mapped classes types
1067  bool isClassPreviouslyMapped(QualType QT) const {
1068  const SharingMapTy &StackElem = getTopOfStack();
1069  return StackElem.MappedClassesQualTypes.contains(QT);
1070  }
1071 
1072  /// Adds global declare target to the parent target region.
1073  void addToParentTargetRegionLinkGlobals(DeclRefExpr *E) {
1074  assert(*OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(
1075  E->getDecl()) == OMPDeclareTargetDeclAttr::MT_Link &&
1076  "Expected declare target link global.");
1077  for (auto &Elem : *this) {
1078  if (isOpenMPTargetExecutionDirective(Elem.Directive)) {
1079  Elem.DeclareTargetLinkVarDecls.push_back(E);
1080  return;
1081  }
1082  }
1083  }
1084 
1085  /// Returns the list of globals with declare target link if current directive
1086  /// is target.
1087  ArrayRef<DeclRefExpr *> getLinkGlobals() const {
1088  assert(isOpenMPTargetExecutionDirective(getCurrentDirective()) &&
1089  "Expected target executable directive.");
1090  return getTopOfStack().DeclareTargetLinkVarDecls;
1091  }
1092 
1093  /// Adds list of allocators expressions.
1094  void addInnerAllocatorExpr(Expr *E) {
1095  getTopOfStack().InnerUsedAllocators.push_back(E);
1096  }
1097  /// Return list of used allocators.
1098  ArrayRef<Expr *> getInnerAllocators() const {
1099  return getTopOfStack().InnerUsedAllocators;
1100  }
1101  /// Marks the declaration as implicitly firstprivate nin the task-based
1102  /// regions.
1103  void addImplicitTaskFirstprivate(unsigned Level, Decl *D) {
1104  getStackElemAtLevel(Level).ImplicitTaskFirstprivates.insert(D);
1105  }
1106  /// Checks if the decl is implicitly firstprivate in the task-based region.
1107  bool isImplicitTaskFirstprivate(Decl *D) const {
1108  return getTopOfStack().ImplicitTaskFirstprivates.contains(D);
1109  }
1110 
1111  /// Marks decl as used in uses_allocators clause as the allocator.
1112  void addUsesAllocatorsDecl(const Decl *D, UsesAllocatorsDeclKind Kind) {
1113  getTopOfStack().UsesAllocatorsDecls.try_emplace(D, Kind);
1114  }
1115  /// Checks if specified decl is used in uses allocator clause as the
1116  /// allocator.
1117  Optional<UsesAllocatorsDeclKind> isUsesAllocatorsDecl(unsigned Level,
1118  const Decl *D) const {
1119  const SharingMapTy &StackElem = getTopOfStack();
1120  auto I = StackElem.UsesAllocatorsDecls.find(D);
1121  if (I == StackElem.UsesAllocatorsDecls.end())
1122  return None;
1123  return I->getSecond();
1124  }
1125  Optional<UsesAllocatorsDeclKind> isUsesAllocatorsDecl(const Decl *D) const {
1126  const SharingMapTy &StackElem = getTopOfStack();
1127  auto I = StackElem.UsesAllocatorsDecls.find(D);
1128  if (I == StackElem.UsesAllocatorsDecls.end())
1129  return None;
1130  return I->getSecond();
1131  }
1132 
1133  void addDeclareMapperVarRef(Expr *Ref) {
1134  SharingMapTy &StackElem = getTopOfStack();
1135  StackElem.DeclareMapperVar = Ref;
1136  }
1137  const Expr *getDeclareMapperVarRef() const {
1138  const SharingMapTy *Top = getTopOfStackOrNull();
1139  return Top ? Top->DeclareMapperVar : nullptr;
1140  }
1141  /// get captured field from ImplicitDefaultFirstprivateFDs
1142  VarDecl *getImplicitFDCapExprDecl(const FieldDecl *FD) const {
1143  const_iterator I = begin();
1144  const_iterator EndI = end();
1145  size_t StackLevel = getStackSize();
1146  for (; I != EndI; ++I) {
1147  if (I->DefaultAttr == DSA_firstprivate || I->DefaultAttr == DSA_private)
1148  break;
1149  StackLevel--;
1150  }
1151  assert((StackLevel > 0 && I != EndI) || (StackLevel == 0 && I == EndI));
1152  if (I == EndI)
1153  return nullptr;
1154  for (const auto &IFD : I->ImplicitDefaultFirstprivateFDs)
1155  if (IFD.FD == FD && IFD.StackLevel == StackLevel)
1156  return IFD.VD;
1157  return nullptr;
1158  }
1159  /// Check if capture decl is field captured in ImplicitDefaultFirstprivateFDs
1160  bool isImplicitDefaultFirstprivateFD(VarDecl *VD) const {
1161  const_iterator I = begin();
1162  const_iterator EndI = end();
1163  for (; I != EndI; ++I)
1164  if (I->DefaultAttr == DSA_firstprivate || I->DefaultAttr == DSA_private)
1165  break;
1166  if (I == EndI)
1167  return false;
1168  for (const auto &IFD : I->ImplicitDefaultFirstprivateFDs)
1169  if (IFD.VD == VD)
1170  return true;
1171  return false;
1172  }
1173  /// Store capture FD info in ImplicitDefaultFirstprivateFDs
1174  void addImplicitDefaultFirstprivateFD(const FieldDecl *FD, VarDecl *VD) {
1175  iterator I = begin();
1176  const_iterator EndI = end();
1177  size_t StackLevel = getStackSize();
1178  for (; I != EndI; ++I) {
1179  if (I->DefaultAttr == DSA_private || I->DefaultAttr == DSA_firstprivate) {
1180  I->ImplicitDefaultFirstprivateFDs.emplace_back(FD, StackLevel, VD);
1181  break;
1182  }
1183  StackLevel--;
1184  }
1185  assert((StackLevel > 0 && I != EndI) || (StackLevel == 0 && I == EndI));
1186  }
1187 };
1188 
1189 bool isImplicitTaskingRegion(OpenMPDirectiveKind DKind) {
1190  return isOpenMPParallelDirective(DKind) || isOpenMPTeamsDirective(DKind);
1191 }
1192 
1193 bool isImplicitOrExplicitTaskingRegion(OpenMPDirectiveKind DKind) {
1194  return isImplicitTaskingRegion(DKind) || isOpenMPTaskingDirective(DKind) ||
1195  DKind == OMPD_unknown;
1196 }
1197 
1198 } // namespace
1199 
1200 static const Expr *getExprAsWritten(const Expr *E) {
1201  if (const auto *FE = dyn_cast<FullExpr>(E))
1202  E = FE->getSubExpr();
1203 
1204  if (const auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E))
1205  E = MTE->getSubExpr();
1206 
1207  while (const auto *Binder = dyn_cast<CXXBindTemporaryExpr>(E))
1208  E = Binder->getSubExpr();
1209 
1210  if (const auto *ICE = dyn_cast<ImplicitCastExpr>(E))
1211  E = ICE->getSubExprAsWritten();
1212  return E->IgnoreParens();
1213 }
1214 
1216  return const_cast<Expr *>(getExprAsWritten(const_cast<const Expr *>(E)));
1217 }
1218 
1219 static const ValueDecl *getCanonicalDecl(const ValueDecl *D) {
1220  if (const auto *CED = dyn_cast<OMPCapturedExprDecl>(D))
1221  if (const auto *ME = dyn_cast<MemberExpr>(getExprAsWritten(CED->getInit())))
1222  D = ME->getMemberDecl();
1223  const auto *VD = dyn_cast<VarDecl>(D);
1224  const auto *FD = dyn_cast<FieldDecl>(D);
1225  if (VD != nullptr) {
1226  VD = VD->getCanonicalDecl();
1227  D = VD;
1228  } else {
1229  assert(FD);
1230  FD = FD->getCanonicalDecl();
1231  D = FD;
1232  }
1233  return D;
1234 }
1235 
1237  return const_cast<ValueDecl *>(
1238  getCanonicalDecl(const_cast<const ValueDecl *>(D)));
1239 }
1240 
1241 DSAStackTy::DSAVarData DSAStackTy::getDSA(const_iterator &Iter,
1242  ValueDecl *D) const {
1243  D = getCanonicalDecl(D);
1244  auto *VD = dyn_cast<VarDecl>(D);
1245  const auto *FD = dyn_cast<FieldDecl>(D);
1246  DSAVarData DVar;
1247  if (Iter == end()) {
1248  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1249  // in a region but not in construct]
1250  // File-scope or namespace-scope variables referenced in called routines
1251  // in the region are shared unless they appear in a threadprivate
1252  // directive.
1253  if (VD && !VD->isFunctionOrMethodVarDecl() && !isa<ParmVarDecl>(VD))
1254  DVar.CKind = OMPC_shared;
1255 
1256  // OpenMP [2.9.1.2, Data-sharing Attribute Rules for Variables Referenced
1257  // in a region but not in construct]
1258  // Variables with static storage duration that are declared in called
1259  // routines in the region are shared.
1260  if (VD && VD->hasGlobalStorage())
1261  DVar.CKind = OMPC_shared;
1262 
1263  // Non-static data members are shared by default.
1264  if (FD)
1265  DVar.CKind = OMPC_shared;
1266 
1267  return DVar;
1268  }
1269 
1270  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1271  // in a Construct, C/C++, predetermined, p.1]
1272  // Variables with automatic storage duration that are declared in a scope
1273  // inside the construct are private.
1274  if (VD && isOpenMPLocal(VD, Iter) && VD->isLocalVarDecl() &&
1275  (VD->getStorageClass() == SC_Auto || VD->getStorageClass() == SC_None)) {
1276  DVar.CKind = OMPC_private;
1277  return DVar;
1278  }
1279 
1280  DVar.DKind = Iter->Directive;
1281  // Explicitly specified attributes and local variables with predetermined
1282  // attributes.
1283  if (Iter->SharingMap.count(D)) {
1284  const DSAInfo &Data = Iter->SharingMap.lookup(D);
1285  DVar.RefExpr = Data.RefExpr.getPointer();
1286  DVar.PrivateCopy = Data.PrivateCopy;
1287  DVar.CKind = Data.Attributes;
1288  DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
1289  DVar.Modifier = Data.Modifier;
1290  DVar.AppliedToPointee = Data.AppliedToPointee;
1291  return DVar;
1292  }
1293 
1294  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1295  // in a Construct, C/C++, implicitly determined, p.1]
1296  // In a parallel or task construct, the data-sharing attributes of these
1297  // variables are determined by the default clause, if present.
1298  switch (Iter->DefaultAttr) {
1299  case DSA_shared:
1300  DVar.CKind = OMPC_shared;
1301  DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
1302  return DVar;
1303  case DSA_none:
1304  return DVar;
1305  case DSA_firstprivate:
1306  if (VD && VD->getStorageDuration() == SD_Static &&
1307  VD->getDeclContext()->isFileContext()) {
1308  DVar.CKind = OMPC_unknown;
1309  } else {
1310  DVar.CKind = OMPC_firstprivate;
1311  }
1312  DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
1313  return DVar;
1314  case DSA_private:
1315  // each variable with static storage duration that is declared
1316  // in a namespace or global scope and referenced in the construct,
1317  // and that does not have a predetermined data-sharing attribute
1318  if (VD && VD->getStorageDuration() == SD_Static &&
1319  VD->getDeclContext()->isFileContext()) {
1320  DVar.CKind = OMPC_unknown;
1321  } else {
1322  DVar.CKind = OMPC_private;
1323  }
1324  DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
1325  return DVar;
1326  case DSA_unspecified:
1327  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1328  // in a Construct, implicitly determined, p.2]
1329  // In a parallel construct, if no default clause is present, these
1330  // variables are shared.
1331  DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
1332  if ((isOpenMPParallelDirective(DVar.DKind) &&
1333  !isOpenMPTaskLoopDirective(DVar.DKind)) ||
1334  isOpenMPTeamsDirective(DVar.DKind)) {
1335  DVar.CKind = OMPC_shared;
1336  return DVar;
1337  }
1338 
1339  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1340  // in a Construct, implicitly determined, p.4]
1341  // In a task construct, if no default clause is present, a variable that in
1342  // the enclosing context is determined to be shared by all implicit tasks
1343  // bound to the current team is shared.
1344  if (isOpenMPTaskingDirective(DVar.DKind)) {
1345  DSAVarData DVarTemp;
1346  const_iterator I = Iter, E = end();
1347  do {
1348  ++I;
1349  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables
1350  // Referenced in a Construct, implicitly determined, p.6]
1351  // In a task construct, if no default clause is present, a variable
1352  // whose data-sharing attribute is not determined by the rules above is
1353  // firstprivate.
1354  DVarTemp = getDSA(I, D);
1355  if (DVarTemp.CKind != OMPC_shared) {
1356  DVar.RefExpr = nullptr;
1357  DVar.CKind = OMPC_firstprivate;
1358  return DVar;
1359  }
1360  } while (I != E && !isImplicitTaskingRegion(I->Directive));
1361  DVar.CKind =
1362  (DVarTemp.CKind == OMPC_unknown) ? OMPC_firstprivate : OMPC_shared;
1363  return DVar;
1364  }
1365  }
1366  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1367  // in a Construct, implicitly determined, p.3]
1368  // For constructs other than task, if no default clause is present, these
1369  // variables inherit their data-sharing attributes from the enclosing
1370  // context.
1371  return getDSA(++Iter, D);
1372 }
1373 
1374 const Expr *DSAStackTy::addUniqueAligned(const ValueDecl *D,
1375  const Expr *NewDE) {
1376  assert(!isStackEmpty() && "Data sharing attributes stack is empty");
1377  D = getCanonicalDecl(D);
1378  SharingMapTy &StackElem = getTopOfStack();
1379  auto It = StackElem.AlignedMap.find(D);
1380  if (It == StackElem.AlignedMap.end()) {
1381  assert(NewDE && "Unexpected nullptr expr to be added into aligned map");
1382  StackElem.AlignedMap[D] = NewDE;
1383  return nullptr;
1384  }
1385  assert(It->second && "Unexpected nullptr expr in the aligned map");
1386  return It->second;
1387 }
1388 
1389 const Expr *DSAStackTy::addUniqueNontemporal(const ValueDecl *D,
1390  const Expr *NewDE) {
1391  assert(!isStackEmpty() && "Data sharing attributes stack is empty");
1392  D = getCanonicalDecl(D);
1393  SharingMapTy &StackElem = getTopOfStack();
1394  auto It = StackElem.NontemporalMap.find(D);
1395  if (It == StackElem.NontemporalMap.end()) {
1396  assert(NewDE && "Unexpected nullptr expr to be added into aligned map");
1397  StackElem.NontemporalMap[D] = NewDE;
1398  return nullptr;
1399  }
1400  assert(It->second && "Unexpected nullptr expr in the aligned map");
1401  return It->second;
1402 }
1403 
1404 void DSAStackTy::addLoopControlVariable(const ValueDecl *D, VarDecl *Capture) {
1405  assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
1406  D = getCanonicalDecl(D);
1407  SharingMapTy &StackElem = getTopOfStack();
1408  StackElem.LCVMap.try_emplace(
1409  D, LCDeclInfo(StackElem.LCVMap.size() + 1, Capture));
1410 }
1411 
1412 const DSAStackTy::LCDeclInfo
1413 DSAStackTy::isLoopControlVariable(const ValueDecl *D) const {
1414  assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
1415  D = getCanonicalDecl(D);
1416  const SharingMapTy &StackElem = getTopOfStack();
1417  auto It = StackElem.LCVMap.find(D);
1418  if (It != StackElem.LCVMap.end())
1419  return It->second;
1420  return {0, nullptr};
1421 }
1422 
1423 const DSAStackTy::LCDeclInfo
1424 DSAStackTy::isLoopControlVariable(const ValueDecl *D, unsigned Level) const {
1425  assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
1426  D = getCanonicalDecl(D);
1427  for (unsigned I = Level + 1; I > 0; --I) {
1428  const SharingMapTy &StackElem = getStackElemAtLevel(I - 1);
1429  auto It = StackElem.LCVMap.find(D);
1430  if (It != StackElem.LCVMap.end())
1431  return It->second;
1432  }
1433  return {0, nullptr};
1434 }
1435 
1436 const DSAStackTy::LCDeclInfo
1437 DSAStackTy::isParentLoopControlVariable(const ValueDecl *D) const {
1438  const SharingMapTy *Parent = getSecondOnStackOrNull();
1439  assert(Parent && "Data-sharing attributes stack is empty");
1440  D = getCanonicalDecl(D);
1441  auto It = Parent->LCVMap.find(D);
1442  if (It != Parent->LCVMap.end())
1443  return It->second;
1444  return {0, nullptr};
1445 }
1446 
1447 const ValueDecl *DSAStackTy::getParentLoopControlVariable(unsigned I) const {
1448  const SharingMapTy *Parent = getSecondOnStackOrNull();
1449  assert(Parent && "Data-sharing attributes stack is empty");
1450  if (Parent->LCVMap.size() < I)
1451  return nullptr;
1452  for (const auto &Pair : Parent->LCVMap)
1453  if (Pair.second.first == I)
1454  return Pair.first;
1455  return nullptr;
1456 }
1457 
1458 void DSAStackTy::addDSA(const ValueDecl *D, const Expr *E, OpenMPClauseKind A,
1459  DeclRefExpr *PrivateCopy, unsigned Modifier,
1460  bool AppliedToPointee) {
1461  D = getCanonicalDecl(D);
1462  if (A == OMPC_threadprivate) {
1463  DSAInfo &Data = Threadprivates[D];
1464  Data.Attributes = A;
1465  Data.RefExpr.setPointer(E);
1466  Data.PrivateCopy = nullptr;
1467  Data.Modifier = Modifier;
1468  } else {
1469  DSAInfo &Data = getTopOfStack().SharingMap[D];
1470  assert(Data.Attributes == OMPC_unknown || (A == Data.Attributes) ||
1471  (A == OMPC_firstprivate && Data.Attributes == OMPC_lastprivate) ||
1472  (A == OMPC_lastprivate && Data.Attributes == OMPC_firstprivate) ||
1473  (isLoopControlVariable(D).first && A == OMPC_private));
1474  Data.Modifier = Modifier;
1475  if (A == OMPC_lastprivate && Data.Attributes == OMPC_firstprivate) {
1476  Data.RefExpr.setInt(/*IntVal=*/true);
1477  return;
1478  }
1479  const bool IsLastprivate =
1480  A == OMPC_lastprivate || Data.Attributes == OMPC_lastprivate;
1481  Data.Attributes = A;
1482  Data.RefExpr.setPointerAndInt(E, IsLastprivate);
1483  Data.PrivateCopy = PrivateCopy;
1484  Data.AppliedToPointee = AppliedToPointee;
1485  if (PrivateCopy) {
1486  DSAInfo &Data = getTopOfStack().SharingMap[PrivateCopy->getDecl()];
1487  Data.Modifier = Modifier;
1488  Data.Attributes = A;
1489  Data.RefExpr.setPointerAndInt(PrivateCopy, IsLastprivate);
1490  Data.PrivateCopy = nullptr;
1491  Data.AppliedToPointee = AppliedToPointee;
1492  }
1493  }
1494 }
1495 
1496 /// Build a variable declaration for OpenMP loop iteration variable.
1498  StringRef Name, const AttrVec *Attrs = nullptr,
1499  DeclRefExpr *OrigRef = nullptr) {
1500  DeclContext *DC = SemaRef.CurContext;
1501  IdentifierInfo *II = &SemaRef.PP.getIdentifierTable().get(Name);
1502  TypeSourceInfo *TInfo = SemaRef.Context.getTrivialTypeSourceInfo(Type, Loc);
1503  auto *Decl =
1504  VarDecl::Create(SemaRef.Context, DC, Loc, Loc, II, Type, TInfo, SC_None);
1505  if (Attrs) {
1506  for (specific_attr_iterator<AlignedAttr> I(Attrs->begin()), E(Attrs->end());
1507  I != E; ++I)
1508  Decl->addAttr(*I);
1509  }
1510  Decl->setImplicit();
1511  if (OrigRef) {
1512  Decl->addAttr(
1513  OMPReferencedVarAttr::CreateImplicit(SemaRef.Context, OrigRef));
1514  }
1515  return Decl;
1516 }
1517 
1519  SourceLocation Loc,
1520  bool RefersToCapture = false) {
1521  D->setReferenced();
1522  D->markUsed(S.Context);
1524  SourceLocation(), D, RefersToCapture, Loc, Ty,
1525  VK_LValue);
1526 }
1527 
1528 void DSAStackTy::addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
1529  BinaryOperatorKind BOK) {
1530  D = getCanonicalDecl(D);
1531  assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
1532  assert(
1533  getTopOfStack().SharingMap[D].Attributes == OMPC_reduction &&
1534  "Additional reduction info may be specified only for reduction items.");
1535  ReductionData &ReductionData = getTopOfStack().ReductionMap[D];
1536  assert(ReductionData.ReductionRange.isInvalid() &&
1537  (getTopOfStack().Directive == OMPD_taskgroup ||
1538  ((isOpenMPParallelDirective(getTopOfStack().Directive) ||
1539  isOpenMPWorksharingDirective(getTopOfStack().Directive)) &&
1540  !isOpenMPSimdDirective(getTopOfStack().Directive))) &&
1541  "Additional reduction info may be specified only once for reduction "
1542  "items.");
1543  ReductionData.set(BOK, SR);
1544  Expr *&TaskgroupReductionRef = getTopOfStack().TaskgroupReductionRef;
1545  if (!TaskgroupReductionRef) {
1546  VarDecl *VD = buildVarDecl(SemaRef, SR.getBegin(),
1547  SemaRef.Context.VoidPtrTy, ".task_red.");
1548  TaskgroupReductionRef =
1549  buildDeclRefExpr(SemaRef, VD, SemaRef.Context.VoidPtrTy, SR.getBegin());
1550  }
1551 }
1552 
1553 void DSAStackTy::addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
1554  const Expr *ReductionRef) {
1555  D = getCanonicalDecl(D);
1556  assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
1557  assert(
1558  getTopOfStack().SharingMap[D].Attributes == OMPC_reduction &&
1559  "Additional reduction info may be specified only for reduction items.");
1560  ReductionData &ReductionData = getTopOfStack().ReductionMap[D];
1561  assert(ReductionData.ReductionRange.isInvalid() &&
1562  (getTopOfStack().Directive == OMPD_taskgroup ||
1563  ((isOpenMPParallelDirective(getTopOfStack().Directive) ||
1564  isOpenMPWorksharingDirective(getTopOfStack().Directive)) &&
1565  !isOpenMPSimdDirective(getTopOfStack().Directive))) &&
1566  "Additional reduction info may be specified only once for reduction "
1567  "items.");
1568  ReductionData.set(ReductionRef, SR);
1569  Expr *&TaskgroupReductionRef = getTopOfStack().TaskgroupReductionRef;
1570  if (!TaskgroupReductionRef) {
1571  VarDecl *VD = buildVarDecl(SemaRef, SR.getBegin(),
1572  SemaRef.Context.VoidPtrTy, ".task_red.");
1573  TaskgroupReductionRef =
1574  buildDeclRefExpr(SemaRef, VD, SemaRef.Context.VoidPtrTy, SR.getBegin());
1575  }
1576 }
1577 
1578 const DSAStackTy::DSAVarData DSAStackTy::getTopMostTaskgroupReductionData(
1579  const ValueDecl *D, SourceRange &SR, BinaryOperatorKind &BOK,
1580  Expr *&TaskgroupDescriptor) const {
1581  D = getCanonicalDecl(D);
1582  assert(!isStackEmpty() && "Data-sharing attributes stack is empty.");
1583  for (const_iterator I = begin() + 1, E = end(); I != E; ++I) {
1584  const DSAInfo &Data = I->SharingMap.lookup(D);
1585  if (Data.Attributes != OMPC_reduction ||
1586  Data.Modifier != OMPC_REDUCTION_task)
1587  continue;
1588  const ReductionData &ReductionData = I->ReductionMap.lookup(D);
1589  if (!ReductionData.ReductionOp ||
1590  ReductionData.ReductionOp.is<const Expr *>())
1591  return DSAVarData();
1592  SR = ReductionData.ReductionRange;
1593  BOK = ReductionData.ReductionOp.get<ReductionData::BOKPtrType>();
1594  assert(I->TaskgroupReductionRef && "taskgroup reduction reference "
1595  "expression for the descriptor is not "
1596  "set.");
1597  TaskgroupDescriptor = I->TaskgroupReductionRef;
1598  return DSAVarData(I->Directive, OMPC_reduction, Data.RefExpr.getPointer(),
1599  Data.PrivateCopy, I->DefaultAttrLoc, OMPC_REDUCTION_task,
1600  /*AppliedToPointee=*/false);
1601  }
1602  return DSAVarData();
1603 }
1604 
1605 const DSAStackTy::DSAVarData DSAStackTy::getTopMostTaskgroupReductionData(
1606  const ValueDecl *D, SourceRange &SR, const Expr *&ReductionRef,
1607  Expr *&TaskgroupDescriptor) const {
1608  D = getCanonicalDecl(D);
1609  assert(!isStackEmpty() && "Data-sharing attributes stack is empty.");
1610  for (const_iterator I = begin() + 1, E = end(); I != E; ++I) {
1611  const DSAInfo &Data = I->SharingMap.lookup(D);
1612  if (Data.Attributes != OMPC_reduction ||
1613  Data.Modifier != OMPC_REDUCTION_task)
1614  continue;
1615  const ReductionData &ReductionData = I->ReductionMap.lookup(D);
1616  if (!ReductionData.ReductionOp ||
1617  !ReductionData.ReductionOp.is<const Expr *>())
1618  return DSAVarData();
1619  SR = ReductionData.ReductionRange;
1620  ReductionRef = ReductionData.ReductionOp.get<const Expr *>();
1621  assert(I->TaskgroupReductionRef && "taskgroup reduction reference "
1622  "expression for the descriptor is not "
1623  "set.");
1624  TaskgroupDescriptor = I->TaskgroupReductionRef;
1625  return DSAVarData(I->Directive, OMPC_reduction, Data.RefExpr.getPointer(),
1626  Data.PrivateCopy, I->DefaultAttrLoc, OMPC_REDUCTION_task,
1627  /*AppliedToPointee=*/false);
1628  }
1629  return DSAVarData();
1630 }
1631 
1632 bool DSAStackTy::isOpenMPLocal(VarDecl *D, const_iterator I) const {
1633  D = D->getCanonicalDecl();
1634  for (const_iterator E = end(); I != E; ++I) {
1635  if (isImplicitOrExplicitTaskingRegion(I->Directive) ||
1636  isOpenMPTargetExecutionDirective(I->Directive)) {
1637  if (I->CurScope) {
1638  Scope *TopScope = I->CurScope->getParent();
1639  Scope *CurScope = getCurScope();
1640  while (CurScope && CurScope != TopScope && !CurScope->isDeclScope(D))
1641  CurScope = CurScope->getParent();
1642  return CurScope != TopScope;
1643  }
1644  for (DeclContext *DC = D->getDeclContext(); DC; DC = DC->getParent())
1645  if (I->Context == DC)
1646  return true;
1647  return false;
1648  }
1649  }
1650  return false;
1651 }
1652 
1653 static bool isConstNotMutableType(Sema &SemaRef, QualType Type,
1654  bool AcceptIfMutable = true,
1655  bool *IsClassType = nullptr) {
1656  ASTContext &Context = SemaRef.getASTContext();
1657  Type = Type.getNonReferenceType().getCanonicalType();
1658  bool IsConstant = Type.isConstant(Context);
1659  Type = Context.getBaseElementType(Type);
1660  const CXXRecordDecl *RD = AcceptIfMutable && SemaRef.getLangOpts().CPlusPlus
1662  : nullptr;
1663  if (const auto *CTSD = dyn_cast_or_null<ClassTemplateSpecializationDecl>(RD))
1664  if (const ClassTemplateDecl *CTD = CTSD->getSpecializedTemplate())
1665  RD = CTD->getTemplatedDecl();
1666  if (IsClassType)
1667  *IsClassType = RD;
1668  return IsConstant && !(SemaRef.getLangOpts().CPlusPlus && RD &&
1669  RD->hasDefinition() && RD->hasMutableFields());
1670 }
1671 
1672 static bool rejectConstNotMutableType(Sema &SemaRef, const ValueDecl *D,
1674  SourceLocation ELoc,
1675  bool AcceptIfMutable = true,
1676  bool ListItemNotVar = false) {
1677  ASTContext &Context = SemaRef.getASTContext();
1678  bool IsClassType;
1679  if (isConstNotMutableType(SemaRef, Type, AcceptIfMutable, &IsClassType)) {
1680  unsigned Diag = ListItemNotVar ? diag::err_omp_const_list_item
1681  : IsClassType ? diag::err_omp_const_not_mutable_variable
1682  : diag::err_omp_const_variable;
1683  SemaRef.Diag(ELoc, Diag) << getOpenMPClauseName(CKind);
1684  if (!ListItemNotVar && D) {
1685  const VarDecl *VD = dyn_cast<VarDecl>(D);
1686  bool IsDecl = !VD || VD->isThisDeclarationADefinition(Context) ==
1688  SemaRef.Diag(D->getLocation(),
1689  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
1690  << D;
1691  }
1692  return true;
1693  }
1694  return false;
1695 }
1696 
1697 const DSAStackTy::DSAVarData DSAStackTy::getTopDSA(ValueDecl *D,
1698  bool FromParent) {
1699  D = getCanonicalDecl(D);
1700  DSAVarData DVar;
1701 
1702  auto *VD = dyn_cast<VarDecl>(D);
1703  auto TI = Threadprivates.find(D);
1704  if (TI != Threadprivates.end()) {
1705  DVar.RefExpr = TI->getSecond().RefExpr.getPointer();
1706  DVar.CKind = OMPC_threadprivate;
1707  DVar.Modifier = TI->getSecond().Modifier;
1708  return DVar;
1709  }
1710  if (VD && VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
1711  DVar.RefExpr = buildDeclRefExpr(
1712  SemaRef, VD, D->getType().getNonReferenceType(),
1713  VD->getAttr<OMPThreadPrivateDeclAttr>()->getLocation());
1714  DVar.CKind = OMPC_threadprivate;
1715  addDSA(D, DVar.RefExpr, OMPC_threadprivate);
1716  return DVar;
1717  }
1718  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1719  // in a Construct, C/C++, predetermined, p.1]
1720  // Variables appearing in threadprivate directives are threadprivate.
1721  if ((VD && VD->getTLSKind() != VarDecl::TLS_None &&
1722  !(VD->hasAttr<OMPThreadPrivateDeclAttr>() &&
1723  SemaRef.getLangOpts().OpenMPUseTLS &&
1724  SemaRef.getASTContext().getTargetInfo().isTLSSupported())) ||
1725  (VD && VD->getStorageClass() == SC_Register &&
1726  VD->hasAttr<AsmLabelAttr>() && !VD->isLocalVarDecl())) {
1727  DVar.RefExpr = buildDeclRefExpr(
1728  SemaRef, VD, D->getType().getNonReferenceType(), D->getLocation());
1729  DVar.CKind = OMPC_threadprivate;
1730  addDSA(D, DVar.RefExpr, OMPC_threadprivate);
1731  return DVar;
1732  }
1733  if (SemaRef.getLangOpts().OpenMPCUDAMode && VD &&
1734  VD->isLocalVarDeclOrParm() && !isStackEmpty() &&
1735  !isLoopControlVariable(D).first) {
1736  const_iterator IterTarget =
1737  std::find_if(begin(), end(), [](const SharingMapTy &Data) {
1738  return isOpenMPTargetExecutionDirective(Data.Directive);
1739  });
1740  if (IterTarget != end()) {
1741  const_iterator ParentIterTarget = IterTarget + 1;
1742  for (const_iterator Iter = begin(); Iter != ParentIterTarget; ++Iter) {
1743  if (isOpenMPLocal(VD, Iter)) {
1744  DVar.RefExpr =
1745  buildDeclRefExpr(SemaRef, VD, D->getType().getNonReferenceType(),
1746  D->getLocation());
1747  DVar.CKind = OMPC_threadprivate;
1748  return DVar;
1749  }
1750  }
1751  if (!isClauseParsingMode() || IterTarget != begin()) {
1752  auto DSAIter = IterTarget->SharingMap.find(D);
1753  if (DSAIter != IterTarget->SharingMap.end() &&
1754  isOpenMPPrivate(DSAIter->getSecond().Attributes)) {
1755  DVar.RefExpr = DSAIter->getSecond().RefExpr.getPointer();
1756  DVar.CKind = OMPC_threadprivate;
1757  return DVar;
1758  }
1759  const_iterator End = end();
1760  if (!SemaRef.isOpenMPCapturedByRef(D,
1761  std::distance(ParentIterTarget, End),
1762  /*OpenMPCaptureLevel=*/0)) {
1763  DVar.RefExpr =
1764  buildDeclRefExpr(SemaRef, VD, D->getType().getNonReferenceType(),
1765  IterTarget->ConstructLoc);
1766  DVar.CKind = OMPC_threadprivate;
1767  return DVar;
1768  }
1769  }
1770  }
1771  }
1772 
1773  if (isStackEmpty())
1774  // Not in OpenMP execution region and top scope was already checked.
1775  return DVar;
1776 
1777  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1778  // in a Construct, C/C++, predetermined, p.4]
1779  // Static data members are shared.
1780  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1781  // in a Construct, C/C++, predetermined, p.7]
1782  // Variables with static storage duration that are declared in a scope
1783  // inside the construct are shared.
1784  if (VD && VD->isStaticDataMember()) {
1785  // Check for explicitly specified attributes.
1786  const_iterator I = begin();
1787  const_iterator EndI = end();
1788  if (FromParent && I != EndI)
1789  ++I;
1790  if (I != EndI) {
1791  auto It = I->SharingMap.find(D);
1792  if (It != I->SharingMap.end()) {
1793  const DSAInfo &Data = It->getSecond();
1794  DVar.RefExpr = Data.RefExpr.getPointer();
1795  DVar.PrivateCopy = Data.PrivateCopy;
1796  DVar.CKind = Data.Attributes;
1797  DVar.ImplicitDSALoc = I->DefaultAttrLoc;
1798  DVar.DKind = I->Directive;
1799  DVar.Modifier = Data.Modifier;
1800  DVar.AppliedToPointee = Data.AppliedToPointee;
1801  return DVar;
1802  }
1803  }
1804 
1805  DVar.CKind = OMPC_shared;
1806  return DVar;
1807  }
1808 
1809  auto &&MatchesAlways = [](OpenMPDirectiveKind) { return true; };
1810  // The predetermined shared attribute for const-qualified types having no
1811  // mutable members was removed after OpenMP 3.1.
1812  if (SemaRef.LangOpts.OpenMP <= 31) {
1813  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1814  // in a Construct, C/C++, predetermined, p.6]
1815  // Variables with const qualified type having no mutable member are
1816  // shared.
1817  if (isConstNotMutableType(SemaRef, D->getType())) {
1818  // Variables with const-qualified type having no mutable member may be
1819  // listed in a firstprivate clause, even if they are static data members.
1820  DSAVarData DVarTemp = hasInnermostDSA(
1821  D,
1822  [](OpenMPClauseKind C, bool) {
1823  return C == OMPC_firstprivate || C == OMPC_shared;
1824  },
1825  MatchesAlways, FromParent);
1826  if (DVarTemp.CKind != OMPC_unknown && DVarTemp.RefExpr)
1827  return DVarTemp;
1828 
1829  DVar.CKind = OMPC_shared;
1830  return DVar;
1831  }
1832  }
1833 
1834  // Explicitly specified attributes and local variables with predetermined
1835  // attributes.
1836  const_iterator I = begin();
1837  const_iterator EndI = end();
1838  if (FromParent && I != EndI)
1839  ++I;
1840  if (I == EndI)
1841  return DVar;
1842  auto It = I->SharingMap.find(D);
1843  if (It != I->SharingMap.end()) {
1844  const DSAInfo &Data = It->getSecond();
1845  DVar.RefExpr = Data.RefExpr.getPointer();
1846  DVar.PrivateCopy = Data.PrivateCopy;
1847  DVar.CKind = Data.Attributes;
1848  DVar.ImplicitDSALoc = I->DefaultAttrLoc;
1849  DVar.DKind = I->Directive;
1850  DVar.Modifier = Data.Modifier;
1851  DVar.AppliedToPointee = Data.AppliedToPointee;
1852  }
1853 
1854  return DVar;
1855 }
1856 
1857 const DSAStackTy::DSAVarData DSAStackTy::getImplicitDSA(ValueDecl *D,
1858  bool FromParent) const {
1859  if (isStackEmpty()) {
1860  const_iterator I;
1861  return getDSA(I, D);
1862  }
1863  D = getCanonicalDecl(D);
1864  const_iterator StartI = begin();
1865  const_iterator EndI = end();
1866  if (FromParent && StartI != EndI)
1867  ++StartI;
1868  return getDSA(StartI, D);
1869 }
1870 
1871 const DSAStackTy::DSAVarData DSAStackTy::getImplicitDSA(ValueDecl *D,
1872  unsigned Level) const {
1873  if (getStackSize() <= Level)
1874  return DSAVarData();
1875  D = getCanonicalDecl(D);
1876  const_iterator StartI = std::next(begin(), getStackSize() - 1 - Level);
1877  return getDSA(StartI, D);
1878 }
1879 
1880 const DSAStackTy::DSAVarData
1881 DSAStackTy::hasDSA(ValueDecl *D,
1882  const llvm::function_ref<bool(OpenMPClauseKind, bool,
1883  DefaultDataSharingAttributes)>
1884  CPred,
1885  const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
1886  bool FromParent) const {
1887  if (isStackEmpty())
1888  return {};
1889  D = getCanonicalDecl(D);
1890  const_iterator I = begin();
1891  const_iterator EndI = end();
1892  if (FromParent && I != EndI)
1893  ++I;
1894  for (; I != EndI; ++I) {
1895  if (!DPred(I->Directive) &&
1896  !isImplicitOrExplicitTaskingRegion(I->Directive))
1897  continue;
1898  const_iterator NewI = I;
1899  DSAVarData DVar = getDSA(NewI, D);
1900  if (I == NewI && CPred(DVar.CKind, DVar.AppliedToPointee, I->DefaultAttr))
1901  return DVar;
1902  }
1903  return {};
1904 }
1905 
1906 const DSAStackTy::DSAVarData DSAStackTy::hasInnermostDSA(
1907  ValueDecl *D, const llvm::function_ref<bool(OpenMPClauseKind, bool)> CPred,
1908  const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
1909  bool FromParent) const {
1910  if (isStackEmpty())
1911  return {};
1912  D = getCanonicalDecl(D);
1913  const_iterator StartI = begin();
1914  const_iterator EndI = end();
1915  if (FromParent && StartI != EndI)
1916  ++StartI;
1917  if (StartI == EndI || !DPred(StartI->Directive))
1918  return {};
1919  const_iterator NewI = StartI;
1920  DSAVarData DVar = getDSA(NewI, D);
1921  return (NewI == StartI && CPred(DVar.CKind, DVar.AppliedToPointee))
1922  ? DVar
1923  : DSAVarData();
1924 }
1925 
1926 bool DSAStackTy::hasExplicitDSA(
1927  const ValueDecl *D,
1928  const llvm::function_ref<bool(OpenMPClauseKind, bool)> CPred,
1929  unsigned Level, bool NotLastprivate) const {
1930  if (getStackSize() <= Level)
1931  return false;
1932  D = getCanonicalDecl(D);
1933  const SharingMapTy &StackElem = getStackElemAtLevel(Level);
1934  auto I = StackElem.SharingMap.find(D);
1935  if (I != StackElem.SharingMap.end() && I->getSecond().RefExpr.getPointer() &&
1936  CPred(I->getSecond().Attributes, I->getSecond().AppliedToPointee) &&
1937  (!NotLastprivate || !I->getSecond().RefExpr.getInt()))
1938  return true;
1939  // Check predetermined rules for the loop control variables.
1940  auto LI = StackElem.LCVMap.find(D);
1941  if (LI != StackElem.LCVMap.end())
1942  return CPred(OMPC_private, /*AppliedToPointee=*/false);
1943  return false;
1944 }
1945 
1946 bool DSAStackTy::hasExplicitDirective(
1947  const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
1948  unsigned Level) const {
1949  if (getStackSize() <= Level)
1950  return false;
1951  const SharingMapTy &StackElem = getStackElemAtLevel(Level);
1952  return DPred(StackElem.Directive);
1953 }
1954 
1955 bool DSAStackTy::hasDirective(
1956  const llvm::function_ref<bool(OpenMPDirectiveKind,
1958  DPred,
1959  bool FromParent) const {
1960  // We look only in the enclosing region.
1961  size_t Skip = FromParent ? 2 : 1;
1962  for (const_iterator I = begin() + std::min(Skip, getStackSize()), E = end();
1963  I != E; ++I) {
1964  if (DPred(I->Directive, I->DirectiveName, I->ConstructLoc))
1965  return true;
1966  }
1967  return false;
1968 }
1969 
1970 void Sema::InitDataSharingAttributesStack() {
1971  VarDataSharingAttributesStack = new DSAStackTy(*this);
1972 }
1973 
1974 #define DSAStack static_cast<DSAStackTy *>(VarDataSharingAttributesStack)
1975 
1976 void Sema::pushOpenMPFunctionRegion() { DSAStack->pushFunction(); }
1977 
1978 void Sema::popOpenMPFunctionRegion(const FunctionScopeInfo *OldFSI) {
1979  DSAStack->popFunction(OldFSI);
1980 }
1981 
1983  assert(S.LangOpts.OpenMP && S.LangOpts.OpenMPIsDevice &&
1984  "Expected OpenMP device compilation.");
1986 }
1987 
1988 namespace {
1989 /// Status of the function emission on the host/device.
1990 enum class FunctionEmissionStatus {
1991  Emitted,
1992  Discarded,
1993  Unknown,
1994 };
1995 } // anonymous namespace
1996 
1998  unsigned DiagID,
1999  FunctionDecl *FD) {
2000  assert(LangOpts.OpenMP && LangOpts.OpenMPIsDevice &&
2001  "Expected OpenMP device compilation.");
2002 
2003  SemaDiagnosticBuilder::Kind Kind = SemaDiagnosticBuilder::K_Nop;
2004  if (FD) {
2005  FunctionEmissionStatus FES = getEmissionStatus(FD);
2006  switch (FES) {
2007  case FunctionEmissionStatus::Emitted:
2008  Kind = SemaDiagnosticBuilder::K_Immediate;
2009  break;
2011  // TODO: We should always delay diagnostics here in case a target
2012  // region is in a function we do not emit. However, as the
2013  // current diagnostics are associated with the function containing
2014  // the target region and we do not emit that one, we would miss out
2015  // on diagnostics for the target region itself. We need to anchor
2016  // the diagnostics with the new generated function *or* ensure we
2017  // emit diagnostics associated with the surrounding function.
2019  ? SemaDiagnosticBuilder::K_Deferred
2020  : SemaDiagnosticBuilder::K_Immediate;
2021  break;
2022  case FunctionEmissionStatus::TemplateDiscarded:
2023  case FunctionEmissionStatus::OMPDiscarded:
2024  Kind = SemaDiagnosticBuilder::K_Nop;
2025  break;
2026  case FunctionEmissionStatus::CUDADiscarded:
2027  llvm_unreachable("CUDADiscarded unexpected in OpenMP device compilation");
2028  break;
2029  }
2030  }
2031 
2032  return SemaDiagnosticBuilder(Kind, Loc, DiagID, FD, *this);
2033 }
2034 
2036  unsigned DiagID,
2037  FunctionDecl *FD) {
2038  assert(LangOpts.OpenMP && !LangOpts.OpenMPIsDevice &&
2039  "Expected OpenMP host compilation.");
2040 
2041  SemaDiagnosticBuilder::Kind Kind = SemaDiagnosticBuilder::K_Nop;
2042  if (FD) {
2043  FunctionEmissionStatus FES = getEmissionStatus(FD);
2044  switch (FES) {
2045  case FunctionEmissionStatus::Emitted:
2046  Kind = SemaDiagnosticBuilder::K_Immediate;
2047  break;
2049  Kind = SemaDiagnosticBuilder::K_Deferred;
2050  break;
2051  case FunctionEmissionStatus::TemplateDiscarded:
2052  case FunctionEmissionStatus::OMPDiscarded:
2053  case FunctionEmissionStatus::CUDADiscarded:
2054  Kind = SemaDiagnosticBuilder::K_Nop;
2055  break;
2056  }
2057  }
2058 
2059  return SemaDiagnosticBuilder(Kind, Loc, DiagID, FD, *this);
2060 }
2061 
2064  if (LO.OpenMP <= 45) {
2065  if (VD->getType().getNonReferenceType()->isScalarType())
2066  return OMPC_DEFAULTMAP_scalar;
2067  return OMPC_DEFAULTMAP_aggregate;
2068  }
2070  return OMPC_DEFAULTMAP_pointer;
2071  if (VD->getType().getNonReferenceType()->isScalarType())
2072  return OMPC_DEFAULTMAP_scalar;
2073  return OMPC_DEFAULTMAP_aggregate;
2074 }
2075 
2077  unsigned OpenMPCaptureLevel) const {
2078  assert(LangOpts.OpenMP && "OpenMP is not allowed");
2079 
2080  ASTContext &Ctx = getASTContext();
2081  bool IsByRef = true;
2082 
2083  // Find the directive that is associated with the provided scope.
2084  D = cast<ValueDecl>(D->getCanonicalDecl());
2085  QualType Ty = D->getType();
2086 
2087  bool IsVariableUsedInMapClause = false;
2088  if (DSAStack->hasExplicitDirective(isOpenMPTargetExecutionDirective, Level)) {
2089  // This table summarizes how a given variable should be passed to the device
2090  // given its type and the clauses where it appears. This table is based on
2091  // the description in OpenMP 4.5 [2.10.4, target Construct] and
2092  // OpenMP 4.5 [2.15.5, Data-mapping Attribute Rules and Clauses].
2093  //
2094  // =========================================================================
2095  // | type | defaultmap | pvt | first | is_device_ptr | map | res. |
2096  // | |(tofrom:scalar)| | pvt | |has_dv_adr| |
2097  // =========================================================================
2098  // | scl | | | | - | | bycopy|
2099  // | scl | | - | x | - | - | bycopy|
2100  // | scl | | x | - | - | - | null |
2101  // | scl | x | | | - | | byref |
2102  // | scl | x | - | x | - | - | bycopy|
2103  // | scl | x | x | - | - | - | null |
2104  // | scl | | - | - | - | x | byref |
2105  // | scl | x | - | - | - | x | byref |
2106  //
2107  // | agg | n.a. | | | - | | byref |
2108  // | agg | n.a. | - | x | - | - | byref |
2109  // | agg | n.a. | x | - | - | - | null |
2110  // | agg | n.a. | - | - | - | x | byref |
2111  // | agg | n.a. | - | - | - | x[] | byref |
2112  //
2113  // | ptr | n.a. | | | - | | bycopy|
2114  // | ptr | n.a. | - | x | - | - | bycopy|
2115  // | ptr | n.a. | x | - | - | - | null |
2116  // | ptr | n.a. | - | - | - | x | byref |
2117  // | ptr | n.a. | - | - | - | x[] | bycopy|
2118  // | ptr | n.a. | - | - | x | | bycopy|
2119  // | ptr | n.a. | - | - | x | x | bycopy|
2120  // | ptr | n.a. | - | - | x | x[] | bycopy|
2121  // =========================================================================
2122  // Legend:
2123  // scl - scalar
2124  // ptr - pointer
2125  // agg - aggregate
2126  // x - applies
2127  // - - invalid in this combination
2128  // [] - mapped with an array section
2129  // byref - should be mapped by reference
2130  // byval - should be mapped by value
2131  // null - initialize a local variable to null on the device
2132  //
2133  // Observations:
2134  // - All scalar declarations that show up in a map clause have to be passed
2135  // by reference, because they may have been mapped in the enclosing data
2136  // environment.
2137  // - If the scalar value does not fit the size of uintptr, it has to be
2138  // passed by reference, regardless the result in the table above.
2139  // - For pointers mapped by value that have either an implicit map or an
2140  // array section, the runtime library may pass the NULL value to the
2141  // device instead of the value passed to it by the compiler.
2142 
2143  if (Ty->isReferenceType())
2144  Ty = Ty->castAs<ReferenceType>()->getPointeeType();
2145 
2146  // Locate map clauses and see if the variable being captured is referred to
2147  // in any of those clauses. Here we only care about variables, not fields,
2148  // because fields are part of aggregates.
2149  bool IsVariableAssociatedWithSection = false;
2150 
2151  DSAStack->checkMappableExprComponentListsForDeclAtLevel(
2152  D, Level,
2153  [&IsVariableUsedInMapClause, &IsVariableAssociatedWithSection,
2155  MapExprComponents,
2156  OpenMPClauseKind WhereFoundClauseKind) {
2157  // Both map and has_device_addr clauses information influences how a
2158  // variable is captured. E.g. is_device_ptr does not require changing
2159  // the default behavior.
2160  if (WhereFoundClauseKind != OMPC_map &&
2161  WhereFoundClauseKind != OMPC_has_device_addr)
2162  return false;
2163 
2164  auto EI = MapExprComponents.rbegin();
2165  auto EE = MapExprComponents.rend();
2166 
2167  assert(EI != EE && "Invalid map expression!");
2168 
2169  if (isa<DeclRefExpr>(EI->getAssociatedExpression()))
2170  IsVariableUsedInMapClause |= EI->getAssociatedDeclaration() == D;
2171 
2172  ++EI;
2173  if (EI == EE)
2174  return false;
2175 
2176  if (isa<ArraySubscriptExpr>(EI->getAssociatedExpression()) ||
2177  isa<OMPArraySectionExpr>(EI->getAssociatedExpression()) ||
2178  isa<MemberExpr>(EI->getAssociatedExpression()) ||
2179  isa<OMPArrayShapingExpr>(EI->getAssociatedExpression())) {
2180  IsVariableAssociatedWithSection = true;
2181  // There is nothing more we need to know about this variable.
2182  return true;
2183  }
2184 
2185  // Keep looking for more map info.
2186  return false;
2187  });
2188 
2189  if (IsVariableUsedInMapClause) {
2190  // If variable is identified in a map clause it is always captured by
2191  // reference except if it is a pointer that is dereferenced somehow.
2192  IsByRef = !(Ty->isPointerType() && IsVariableAssociatedWithSection);
2193  } else {
2194  // By default, all the data that has a scalar type is mapped by copy
2195  // (except for reduction variables).
2196  // Defaultmap scalar is mutual exclusive to defaultmap pointer
2197  IsByRef = (DSAStack->isForceCaptureByReferenceInTargetExecutable() &&
2198  !Ty->isAnyPointerType()) ||
2199  !Ty->isScalarType() ||
2200  DSAStack->isDefaultmapCapturedByRef(
2201  Level, getVariableCategoryFromDecl(LangOpts, D)) ||
2202  DSAStack->hasExplicitDSA(
2203  D,
2204  [](OpenMPClauseKind K, bool AppliedToPointee) {
2205  return K == OMPC_reduction && !AppliedToPointee;
2206  },
2207  Level);
2208  }
2209  }
2210 
2211  if (IsByRef && Ty.getNonReferenceType()->isScalarType()) {
2212  IsByRef =
2213  ((IsVariableUsedInMapClause &&
2214  DSAStack->getCaptureRegion(Level, OpenMPCaptureLevel) ==
2215  OMPD_target) ||
2216  !(DSAStack->hasExplicitDSA(
2217  D,
2218  [](OpenMPClauseKind K, bool AppliedToPointee) -> bool {
2219  return K == OMPC_firstprivate ||
2220  (K == OMPC_reduction && AppliedToPointee);
2221  },
2222  Level, /*NotLastprivate=*/true) ||
2223  DSAStack->isUsesAllocatorsDecl(Level, D))) &&
2224  // If the variable is artificial and must be captured by value - try to
2225  // capture by value.
2226  !(isa<OMPCapturedExprDecl>(D) && !D->hasAttr<OMPCaptureNoInitAttr>() &&
2227  !cast<OMPCapturedExprDecl>(D)->getInit()->isGLValue()) &&
2228  // If the variable is implicitly firstprivate and scalar - capture by
2229  // copy
2230  !((DSAStack->getDefaultDSA() == DSA_firstprivate ||
2231  DSAStack->getDefaultDSA() == DSA_private) &&
2232  !DSAStack->hasExplicitDSA(
2233  D, [](OpenMPClauseKind K, bool) { return K != OMPC_unknown; },
2234  Level) &&
2235  !DSAStack->isLoopControlVariable(D, Level).first);
2236  }
2237 
2238  // When passing data by copy, we need to make sure it fits the uintptr size
2239  // and alignment, because the runtime library only deals with uintptr types.
2240  // If it does not fit the uintptr size, we need to pass the data by reference
2241  // instead.
2242  if (!IsByRef &&
2243  (Ctx.getTypeSizeInChars(Ty) >
2244  Ctx.getTypeSizeInChars(Ctx.getUIntPtrType()) ||
2245  Ctx.getDeclAlign(D) > Ctx.getTypeAlignInChars(Ctx.getUIntPtrType()))) {
2246  IsByRef = true;
2247  }
2248 
2249  return IsByRef;
2250 }
2251 
2252 unsigned Sema::getOpenMPNestingLevel() const {
2253  assert(getLangOpts().OpenMP);
2254  return DSAStack->getNestingLevel();
2255 }
2256 
2258  return isOpenMPTaskingDirective(DSAStack->getCurrentDirective()) &&
2259  DSAStack->isUntiedRegion();
2260 }
2261 
2263  return (isOpenMPTargetExecutionDirective(DSAStack->getCurrentDirective()) &&
2264  !DSAStack->isClauseParsingMode()) ||
2265  DSAStack->hasDirective(
2267  SourceLocation) -> bool {
2269  },
2270  false);
2271 }
2272 
2274  // Only rebuild for Field.
2275  if (!dyn_cast<FieldDecl>(D))
2276  return false;
2277  DSAStackTy::DSAVarData DVarPrivate = DSAStack->hasDSA(
2278  D,
2279  [](OpenMPClauseKind C, bool AppliedToPointee,
2280  DefaultDataSharingAttributes DefaultAttr) {
2281  return isOpenMPPrivate(C) && !AppliedToPointee &&
2282  (DefaultAttr == DSA_firstprivate || DefaultAttr == DSA_private);
2283  },
2284  [](OpenMPDirectiveKind) { return true; },
2285  DSAStack->isClauseParsingMode());
2286  if (DVarPrivate.CKind != OMPC_unknown)
2287  return true;
2288  return false;
2289 }
2290 
2292  Expr *CaptureExpr, bool WithInit,
2293  DeclContext *CurContext,
2294  bool AsExpression);
2295 
2297  unsigned StopAt) {
2298  assert(LangOpts.OpenMP && "OpenMP is not allowed");
2299  D = getCanonicalDecl(D);
2300 
2301  auto *VD = dyn_cast<VarDecl>(D);
2302  // Do not capture constexpr variables.
2303  if (VD && VD->isConstexpr())
2304  return nullptr;
2305 
2306  // If we want to determine whether the variable should be captured from the
2307  // perspective of the current capturing scope, and we've already left all the
2308  // capturing scopes of the top directive on the stack, check from the
2309  // perspective of its parent directive (if any) instead.
2310  DSAStackTy::ParentDirectiveScope InParentDirectiveRAII(
2311  *DSAStack, CheckScopeInfo && DSAStack->isBodyComplete());
2312 
2313  // If we are attempting to capture a global variable in a directive with
2314  // 'target' we return true so that this global is also mapped to the device.
2315  //
2316  if (VD && !VD->hasLocalStorage() &&
2317  (getCurCapturedRegion() || getCurBlock() || getCurLambda())) {
2318  if (isInOpenMPTargetExecutionDirective()) {
2319  DSAStackTy::DSAVarData DVarTop =
2320  DSAStack->getTopDSA(D, DSAStack->isClauseParsingMode());
2321  if (DVarTop.CKind != OMPC_unknown && DVarTop.RefExpr)
2322  return VD;
2323  // If the declaration is enclosed in a 'declare target' directive,
2324  // then it should not be captured.
2325  //
2326  if (OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
2327  return nullptr;
2328  CapturedRegionScopeInfo *CSI = nullptr;
2329  for (FunctionScopeInfo *FSI : llvm::drop_begin(
2330  llvm::reverse(FunctionScopes),
2331  CheckScopeInfo ? (FunctionScopes.size() - (StopAt + 1)) : 0)) {
2332  if (!isa<CapturingScopeInfo>(FSI))
2333  return nullptr;
2334  if (auto *RSI = dyn_cast<CapturedRegionScopeInfo>(FSI))
2335  if (RSI->CapRegionKind == CR_OpenMP) {
2336  CSI = RSI;
2337  break;
2338  }
2339  }
2340  assert(CSI && "Failed to find CapturedRegionScopeInfo");
2342  getOpenMPCaptureRegions(Regions,
2343  DSAStack->getDirective(CSI->OpenMPLevel));
2344  if (Regions[CSI->OpenMPCaptureLevel] != OMPD_task)
2345  return VD;
2346  }
2347  if (isInOpenMPDeclareTargetContext()) {
2348  // Try to mark variable as declare target if it is used in capturing
2349  // regions.
2350  if (LangOpts.OpenMP <= 45 &&
2351  !OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
2352  checkDeclIsAllowedInOpenMPTarget(nullptr, VD);
2353  return nullptr;
2354  }
2355  }
2356 
2357  if (CheckScopeInfo) {
2358  bool OpenMPFound = false;
2359  for (unsigned I = StopAt + 1; I > 0; --I) {
2360  FunctionScopeInfo *FSI = FunctionScopes[I - 1];
2361  if (!isa<CapturingScopeInfo>(FSI))
2362  return nullptr;
2363  if (auto *RSI = dyn_cast<CapturedRegionScopeInfo>(FSI))
2364  if (RSI->CapRegionKind == CR_OpenMP) {
2365  OpenMPFound = true;
2366  break;
2367  }
2368  }
2369  if (!OpenMPFound)
2370  return nullptr;
2371  }
2372 
2373  if (DSAStack->getCurrentDirective() != OMPD_unknown &&
2374  (!DSAStack->isClauseParsingMode() ||
2375  DSAStack->getParentDirective() != OMPD_unknown)) {
2376  auto &&Info = DSAStack->isLoopControlVariable(D);
2377  if (Info.first ||
2378  (VD && VD->hasLocalStorage() &&
2379  isImplicitOrExplicitTaskingRegion(DSAStack->getCurrentDirective())) ||
2380  (VD && DSAStack->isForceVarCapturing()))
2381  return VD ? VD : Info.second;
2382  DSAStackTy::DSAVarData DVarTop =
2383  DSAStack->getTopDSA(D, DSAStack->isClauseParsingMode());
2384  if (DVarTop.CKind != OMPC_unknown && isOpenMPPrivate(DVarTop.CKind) &&
2385  (!VD || VD->hasLocalStorage() || !DVarTop.AppliedToPointee))
2386  return VD ? VD : cast<VarDecl>(DVarTop.PrivateCopy->getDecl());
2387  // Threadprivate variables must not be captured.
2388  if (isOpenMPThreadPrivate(DVarTop.CKind))
2389  return nullptr;
2390  // The variable is not private or it is the variable in the directive with
2391  // default(none) clause and not used in any clause.
2392  DSAStackTy::DSAVarData DVarPrivate = DSAStack->hasDSA(
2393  D,
2394  [](OpenMPClauseKind C, bool AppliedToPointee, bool) {
2395  return isOpenMPPrivate(C) && !AppliedToPointee;
2396  },
2397  [](OpenMPDirectiveKind) { return true; },
2398  DSAStack->isClauseParsingMode());
2399  // Global shared must not be captured.
2400  if (VD && !VD->hasLocalStorage() && DVarPrivate.CKind == OMPC_unknown &&
2401  ((DSAStack->getDefaultDSA() != DSA_none &&
2402  DSAStack->getDefaultDSA() != DSA_private &&
2403  DSAStack->getDefaultDSA() != DSA_firstprivate) ||
2404  DVarTop.CKind == OMPC_shared))
2405  return nullptr;
2406  auto *FD = dyn_cast<FieldDecl>(D);
2407  if (DVarPrivate.CKind != OMPC_unknown && !VD && FD &&
2408  !DVarPrivate.PrivateCopy) {
2409  DSAStackTy::DSAVarData DVarPrivate = DSAStack->hasDSA(
2410  D,
2411  [](OpenMPClauseKind C, bool AppliedToPointee,
2412  DefaultDataSharingAttributes DefaultAttr) {
2413  return isOpenMPPrivate(C) && !AppliedToPointee &&
2414  (DefaultAttr == DSA_firstprivate ||
2415  DefaultAttr == DSA_private);
2416  },
2417  [](OpenMPDirectiveKind) { return true; },
2418  DSAStack->isClauseParsingMode());
2419  if (DVarPrivate.CKind == OMPC_unknown)
2420  return nullptr;
2421 
2422  VarDecl *VD = DSAStack->getImplicitFDCapExprDecl(FD);
2423  if (VD)
2424  return VD;
2425  if (getCurrentThisType().isNull())
2426  return nullptr;
2427  Expr *ThisExpr = BuildCXXThisExpr(SourceLocation(), getCurrentThisType(),
2428  /*IsImplicit=*/true);
2429  const CXXScopeSpec CS = CXXScopeSpec();
2430  Expr *ME = BuildMemberExpr(ThisExpr, /*IsArrow=*/true, SourceLocation(),
2432  DeclAccessPair::make(FD, FD->getAccess()),
2433  /*HadMultipleCandidates=*/false,
2434  DeclarationNameInfo(), FD->getType(),
2437  *this, FD->getIdentifier(), ME, DVarPrivate.CKind != OMPC_private,
2438  CurContext->getParent(), /*AsExpression=*/false);
2439  DeclRefExpr *VDPrivateRefExpr = buildDeclRefExpr(
2440  *this, CD, CD->getType().getNonReferenceType(), SourceLocation());
2441  VD = cast<VarDecl>(VDPrivateRefExpr->getDecl());
2442  DSAStack->addImplicitDefaultFirstprivateFD(FD, VD);
2443  return VD;
2444  }
2445  if (DVarPrivate.CKind != OMPC_unknown ||
2446  (VD && (DSAStack->getDefaultDSA() == DSA_none ||
2447  DSAStack->getDefaultDSA() == DSA_private ||
2448  DSAStack->getDefaultDSA() == DSA_firstprivate)))
2449  return VD ? VD : cast<VarDecl>(DVarPrivate.PrivateCopy->getDecl());
2450  }
2451  return nullptr;
2452 }
2453 
2454 void Sema::adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
2455  unsigned Level) const {
2456  FunctionScopesIndex -= getOpenMPCaptureLevels(DSAStack->getDirective(Level));
2457 }
2458 
2460  assert(LangOpts.OpenMP && "OpenMP must be enabled.");
2461  if (isOpenMPLoopDirective(DSAStack->getCurrentDirective()))
2462  DSAStack->loopInit();
2463 }
2464 
2466  assert(LangOpts.OpenMP && "OpenMP must be enabled.");
2467  if (isOpenMPLoopDirective(DSAStack->getCurrentDirective())) {
2468  DSAStack->resetPossibleLoopCounter();
2469  DSAStack->loopStart();
2470  }
2471 }
2472 
2474  unsigned CapLevel) const {
2475  assert(LangOpts.OpenMP && "OpenMP is not allowed");
2476  if (DSAStack->getCurrentDirective() != OMPD_unknown &&
2477  (!DSAStack->isClauseParsingMode() ||
2478  DSAStack->getParentDirective() != OMPD_unknown)) {
2479  DSAStackTy::DSAVarData DVarPrivate = DSAStack->hasDSA(
2480  D,
2481  [](OpenMPClauseKind C, bool AppliedToPointee,
2482  DefaultDataSharingAttributes DefaultAttr) {
2483  return isOpenMPPrivate(C) && !AppliedToPointee &&
2484  DefaultAttr == DSA_private;
2485  },
2486  [](OpenMPDirectiveKind) { return true; },
2487  DSAStack->isClauseParsingMode());
2488  if (DVarPrivate.CKind == OMPC_private && isa<OMPCapturedExprDecl>(D) &&
2489  DSAStack->isImplicitDefaultFirstprivateFD(cast<VarDecl>(D)) &&
2490  !DSAStack->isLoopControlVariable(D).first)
2491  return OMPC_private;
2492  }
2493  if (DSAStack->hasExplicitDirective(isOpenMPTaskingDirective, Level)) {
2494  bool IsTriviallyCopyable =
2496  !D->getType()
2498  .getCanonicalType()
2499  ->getAsCXXRecordDecl();
2500  OpenMPDirectiveKind DKind = DSAStack->getDirective(Level);
2501  SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
2502  getOpenMPCaptureRegions(CaptureRegions, DKind);
2503  if (isOpenMPTaskingDirective(CaptureRegions[CapLevel]) &&
2504  (IsTriviallyCopyable ||
2505  !isOpenMPTaskLoopDirective(CaptureRegions[CapLevel]))) {
2506  if (DSAStack->hasExplicitDSA(
2507  D,
2508  [](OpenMPClauseKind K, bool) { return K == OMPC_firstprivate; },
2509  Level, /*NotLastprivate=*/true))
2510  return OMPC_firstprivate;
2511  DSAStackTy::DSAVarData DVar = DSAStack->getImplicitDSA(D, Level);
2512  if (DVar.CKind != OMPC_shared &&
2513  !DSAStack->isLoopControlVariable(D, Level).first && !DVar.RefExpr) {
2514  DSAStack->addImplicitTaskFirstprivate(Level, D);
2515  return OMPC_firstprivate;
2516  }
2517  }
2518  }
2519  if (isOpenMPLoopDirective(DSAStack->getCurrentDirective())) {
2520  if (DSAStack->getAssociatedLoops() > 0 && !DSAStack->isLoopStarted()) {
2521  DSAStack->resetPossibleLoopCounter(D);
2522  DSAStack->loopStart();
2523  return OMPC_private;
2524  }
2525  if ((DSAStack->getPossiblyLoopCunter() == D->getCanonicalDecl() ||
2526  DSAStack->isLoopControlVariable(D).first) &&
2527  !DSAStack->hasExplicitDSA(
2528  D, [](OpenMPClauseKind K, bool) { return K != OMPC_private; },
2529  Level) &&
2530  !isOpenMPSimdDirective(DSAStack->getCurrentDirective()))
2531  return OMPC_private;
2532  }
2533  if (const auto *VD = dyn_cast<VarDecl>(D)) {
2534  if (DSAStack->isThreadPrivate(const_cast<VarDecl *>(VD)) &&
2535  DSAStack->isForceVarCapturing() &&
2536  !DSAStack->hasExplicitDSA(
2537  D, [](OpenMPClauseKind K, bool) { return K == OMPC_copyin; },
2538  Level))
2539  return OMPC_private;
2540  }
2541  // User-defined allocators are private since they must be defined in the
2542  // context of target region.
2543  if (DSAStack->hasExplicitDirective(isOpenMPTargetExecutionDirective, Level) &&
2544  DSAStack->isUsesAllocatorsDecl(Level, D).value_or(
2545  DSAStackTy::UsesAllocatorsDeclKind::AllocatorTrait) ==
2546  DSAStackTy::UsesAllocatorsDeclKind::UserDefinedAllocator)
2547  return OMPC_private;
2548  return (DSAStack->hasExplicitDSA(
2549  D, [](OpenMPClauseKind K, bool) { return K == OMPC_private; },
2550  Level) ||
2551  (DSAStack->isClauseParsingMode() &&
2552  DSAStack->getClauseParsingMode() == OMPC_private) ||
2553  // Consider taskgroup reduction descriptor variable a private
2554  // to avoid possible capture in the region.
2555  (DSAStack->hasExplicitDirective(
2556  [](OpenMPDirectiveKind K) {
2557  return K == OMPD_taskgroup ||
2558  ((isOpenMPParallelDirective(K) ||
2559  isOpenMPWorksharingDirective(K)) &&
2560  !isOpenMPSimdDirective(K));
2561  },
2562  Level) &&
2563  DSAStack->isTaskgroupReductionRef(D, Level)))
2564  ? OMPC_private
2565  : OMPC_unknown;
2566 }
2567 
2569  unsigned Level) {
2570  assert(LangOpts.OpenMP && "OpenMP is not allowed");
2571  D = getCanonicalDecl(D);
2572  OpenMPClauseKind OMPC = OMPC_unknown;
2573  for (unsigned I = DSAStack->getNestingLevel() + 1; I > Level; --I) {
2574  const unsigned NewLevel = I - 1;
2575  if (DSAStack->hasExplicitDSA(
2576  D,
2577  [&OMPC](const OpenMPClauseKind K, bool AppliedToPointee) {
2578  if (isOpenMPPrivate(K) && !AppliedToPointee) {
2579  OMPC = K;
2580  return true;
2581  }
2582  return false;
2583  },
2584  NewLevel))
2585  break;
2586  if (DSAStack->checkMappableExprComponentListsForDeclAtLevel(
2587  D, NewLevel,
2589  OpenMPClauseKind) { return true; })) {
2590  OMPC = OMPC_map;
2591  break;
2592  }
2593  if (DSAStack->hasExplicitDirective(isOpenMPTargetExecutionDirective,
2594  NewLevel)) {
2595  OMPC = OMPC_map;
2596  if (DSAStack->mustBeFirstprivateAtLevel(
2597  NewLevel, getVariableCategoryFromDecl(LangOpts, D)))
2598  OMPC = OMPC_firstprivate;
2599  break;
2600  }
2601  }
2602  if (OMPC != OMPC_unknown)
2603  FD->addAttr(OMPCaptureKindAttr::CreateImplicit(Context, unsigned(OMPC)));
2604 }
2605 
2607  unsigned CaptureLevel) const {
2608  assert(LangOpts.OpenMP && "OpenMP is not allowed");
2609  // Return true if the current level is no longer enclosed in a target region.
2610 
2612  getOpenMPCaptureRegions(Regions, DSAStack->getDirective(Level));
2613  const auto *VD = dyn_cast<VarDecl>(D);
2614  return VD && !VD->hasLocalStorage() &&
2615  DSAStack->hasExplicitDirective(isOpenMPTargetExecutionDirective,
2616  Level) &&
2617  Regions[CaptureLevel] != OMPD_task;
2618 }
2619 
2621  unsigned CaptureLevel) const {
2622  assert(LangOpts.OpenMP && "OpenMP is not allowed");
2623  // Return true if the current level is no longer enclosed in a target region.
2624 
2625  if (const auto *VD = dyn_cast<VarDecl>(D)) {
2626  if (!VD->hasLocalStorage()) {
2628  return true;
2629  DSAStackTy::DSAVarData TopDVar =
2630  DSAStack->getTopDSA(D, /*FromParent=*/false);
2631  unsigned NumLevels =
2632  getOpenMPCaptureLevels(DSAStack->getDirective(Level));
2633  if (Level == 0)
2634  // non-file scope static variale with default(firstprivate)
2635  // should be gloabal captured.
2636  return (NumLevels == CaptureLevel + 1 &&
2637  (TopDVar.CKind != OMPC_shared ||
2638  DSAStack->getDefaultDSA() == DSA_firstprivate));
2639  do {
2640  --Level;
2641  DSAStackTy::DSAVarData DVar = DSAStack->getImplicitDSA(D, Level);
2642  if (DVar.CKind != OMPC_shared)
2643  return true;
2644  } while (Level > 0);
2645  }
2646  }
2647  return true;
2648 }
2649 
2650 void Sema::DestroyDataSharingAttributesStack() { delete DSAStack; }
2651 
2653  OMPTraitInfo &TI) {
2654  OMPDeclareVariantScopes.push_back(OMPDeclareVariantScope(TI));
2655 }
2656 
2658  assert(isInOpenMPDeclareVariantScope() &&
2659  "Not in OpenMP declare variant scope!");
2660 
2661  OMPDeclareVariantScopes.pop_back();
2662 }
2663 
2665  const FunctionDecl *Callee,
2666  SourceLocation Loc) {
2667  assert(LangOpts.OpenMP && "Expected OpenMP compilation mode.");
2669  OMPDeclareTargetDeclAttr::getDeviceType(Caller->getMostRecentDecl());
2670  // Ignore host functions during device analyzis.
2671  if (LangOpts.OpenMPIsDevice &&
2672  (!DevTy || *DevTy == OMPDeclareTargetDeclAttr::DT_Host))
2673  return;
2674  // Ignore nohost functions during host analyzis.
2675  if (!LangOpts.OpenMPIsDevice && DevTy &&
2676  *DevTy == OMPDeclareTargetDeclAttr::DT_NoHost)
2677  return;
2678  const FunctionDecl *FD = Callee->getMostRecentDecl();
2679  DevTy = OMPDeclareTargetDeclAttr::getDeviceType(FD);
2680  if (LangOpts.OpenMPIsDevice && DevTy &&
2681  *DevTy == OMPDeclareTargetDeclAttr::DT_Host) {
2682  // Diagnose host function called during device codegen.
2683  StringRef HostDevTy =
2684  getOpenMPSimpleClauseTypeName(OMPC_device_type, OMPC_DEVICE_TYPE_host);
2685  Diag(Loc, diag::err_omp_wrong_device_function_call) << HostDevTy << 0;
2686  Diag(*OMPDeclareTargetDeclAttr::getLocation(FD),
2687  diag::note_omp_marked_device_type_here)
2688  << HostDevTy;
2689  return;
2690  }
2691  if (!LangOpts.OpenMPIsDevice && !LangOpts.OpenMPOffloadMandatory && DevTy &&
2692  *DevTy == OMPDeclareTargetDeclAttr::DT_NoHost) {
2693  // Diagnose nohost function called during host codegen.
2694  StringRef NoHostDevTy = getOpenMPSimpleClauseTypeName(
2695  OMPC_device_type, OMPC_DEVICE_TYPE_nohost);
2696  Diag(Loc, diag::err_omp_wrong_device_function_call) << NoHostDevTy << 1;
2697  Diag(*OMPDeclareTargetDeclAttr::getLocation(FD),
2698  diag::note_omp_marked_device_type_here)
2699  << NoHostDevTy;
2700  }
2701 }
2702 
2704  const DeclarationNameInfo &DirName,
2705  Scope *CurScope, SourceLocation Loc) {
2706  DSAStack->push(DKind, DirName, CurScope, Loc);
2709 }
2710 
2712  DSAStack->setClauseParsingMode(K);
2713 }
2714 
2716  DSAStack->setClauseParsingMode(/*K=*/OMPC_unknown);
2718 }
2719 
2720 static std::pair<ValueDecl *, bool>
2721 getPrivateItem(Sema &S, Expr *&RefExpr, SourceLocation &ELoc,
2722  SourceRange &ERange, bool AllowArraySection = false,
2723  StringRef DiagType = "");
2724 
2725 /// Check consistency of the reduction clauses.
2726 static void checkReductionClauses(Sema &S, DSAStackTy *Stack,
2727  ArrayRef<OMPClause *> Clauses) {
2728  bool InscanFound = false;
2729  SourceLocation InscanLoc;
2730  // OpenMP 5.0, 2.19.5.4 reduction Clause, Restrictions.
2731  // A reduction clause without the inscan reduction-modifier may not appear on
2732  // a construct on which a reduction clause with the inscan reduction-modifier
2733  // appears.
2734  for (OMPClause *C : Clauses) {
2735  if (C->getClauseKind() != OMPC_reduction)
2736  continue;
2737  auto *RC = cast<OMPReductionClause>(C);
2738  if (RC->getModifier() == OMPC_REDUCTION_inscan) {
2739  InscanFound = true;
2740  InscanLoc = RC->getModifierLoc();
2741  continue;
2742  }
2743  if (RC->getModifier() == OMPC_REDUCTION_task) {
2744  // OpenMP 5.0, 2.19.5.4 reduction Clause.
2745  // A reduction clause with the task reduction-modifier may only appear on
2746  // a parallel construct, a worksharing construct or a combined or
2747  // composite construct for which any of the aforementioned constructs is a
2748  // constituent construct and simd or loop are not constituent constructs.
2749  OpenMPDirectiveKind CurDir = Stack->getCurrentDirective();
2750  if (!(isOpenMPParallelDirective(CurDir) ||
2751  isOpenMPWorksharingDirective(CurDir)) ||
2752  isOpenMPSimdDirective(CurDir))
2753  S.Diag(RC->getModifierLoc(),
2754  diag::err_omp_reduction_task_not_parallel_or_worksharing);
2755  continue;
2756  }
2757  }
2758  if (InscanFound) {
2759  for (OMPClause *C : Clauses) {
2760  if (C->getClauseKind() != OMPC_reduction)
2761  continue;
2762  auto *RC = cast<OMPReductionClause>(C);
2763  if (RC->getModifier() != OMPC_REDUCTION_inscan) {
2764  S.Diag(RC->getModifier() == OMPC_REDUCTION_unknown
2765  ? RC->getBeginLoc()
2766  : RC->getModifierLoc(),
2767  diag::err_omp_inscan_reduction_expected);
2768  S.Diag(InscanLoc, diag::note_omp_previous_inscan_reduction);
2769  continue;
2770  }
2771  for (Expr *Ref : RC->varlists()) {
2772  assert(Ref && "NULL expr in OpenMP nontemporal clause.");
2773  SourceLocation ELoc;
2774  SourceRange ERange;
2775  Expr *SimpleRefExpr = Ref;
2776  auto Res = getPrivateItem(S, SimpleRefExpr, ELoc, ERange,
2777  /*AllowArraySection=*/true);
2778  ValueDecl *D = Res.first;
2779  if (!D)
2780  continue;
2781  if (!Stack->isUsedInScanDirective(getCanonicalDecl(D))) {
2782  S.Diag(Ref->getExprLoc(),
2783  diag::err_omp_reduction_not_inclusive_exclusive)
2784  << Ref->getSourceRange();
2785  }
2786  }
2787  }
2788  }
2789 }
2790 
2791 static void checkAllocateClauses(Sema &S, DSAStackTy *Stack,
2792  ArrayRef<OMPClause *> Clauses);
2793 static DeclRefExpr *buildCapture(Sema &S, ValueDecl *D, Expr *CaptureExpr,
2794  bool WithInit);
2795 
2796 static void reportOriginalDsa(Sema &SemaRef, const DSAStackTy *Stack,
2797  const ValueDecl *D,
2798  const DSAStackTy::DSAVarData &DVar,
2799  bool IsLoopIterVar = false);
2800 
2801 void Sema::EndOpenMPDSABlock(Stmt *CurDirective) {
2802  // OpenMP [2.14.3.5, Restrictions, C/C++, p.1]
2803  // A variable of class type (or array thereof) that appears in a lastprivate
2804  // clause requires an accessible, unambiguous default constructor for the
2805  // class type, unless the list item is also specified in a firstprivate
2806  // clause.
2807  if (const auto *D = dyn_cast_or_null<OMPExecutableDirective>(CurDirective)) {
2808  for (OMPClause *C : D->clauses()) {
2809  if (auto *Clause = dyn_cast<OMPLastprivateClause>(C)) {
2810  SmallVector<Expr *, 8> PrivateCopies;
2811  for (Expr *DE : Clause->varlists()) {
2812  if (DE->isValueDependent() || DE->isTypeDependent()) {
2813  PrivateCopies.push_back(nullptr);
2814  continue;
2815  }
2816  auto *DRE = cast<DeclRefExpr>(DE->IgnoreParens());
2817  auto *VD = cast<VarDecl>(DRE->getDecl());
2819  const DSAStackTy::DSAVarData DVar =
2820  DSAStack->getTopDSA(VD, /*FromParent=*/false);
2821  if (DVar.CKind == OMPC_lastprivate) {
2822  // Generate helper private variable and initialize it with the
2823  // default value. The address of the original variable is replaced
2824  // by the address of the new private variable in CodeGen. This new
2825  // variable is not added to IdResolver, so the code in the OpenMP
2826  // region uses original variable for proper diagnostics.
2827  VarDecl *VDPrivate = buildVarDecl(
2828  *this, DE->getExprLoc(), Type.getUnqualifiedType(),
2829  VD->getName(), VD->hasAttrs() ? &VD->getAttrs() : nullptr, DRE);
2830  ActOnUninitializedDecl(VDPrivate);
2831  if (VDPrivate->isInvalidDecl()) {
2832  PrivateCopies.push_back(nullptr);
2833  continue;
2834  }
2835  PrivateCopies.push_back(buildDeclRefExpr(
2836  *this, VDPrivate, DE->getType(), DE->getExprLoc()));
2837  } else {
2838  // The variable is also a firstprivate, so initialization sequence
2839  // for private copy is generated already.
2840  PrivateCopies.push_back(nullptr);
2841  }
2842  }
2843  Clause->setPrivateCopies(PrivateCopies);
2844  continue;
2845  }
2846  // Finalize nontemporal clause by handling private copies, if any.
2847  if (auto *Clause = dyn_cast<OMPNontemporalClause>(C)) {
2848  SmallVector<Expr *, 8> PrivateRefs;
2849  for (Expr *RefExpr : Clause->varlists()) {
2850  assert(RefExpr && "NULL expr in OpenMP nontemporal clause.");
2851  SourceLocation ELoc;
2852  SourceRange ERange;
2853  Expr *SimpleRefExpr = RefExpr;
2854  auto Res = getPrivateItem(*this, SimpleRefExpr, ELoc, ERange);
2855  if (Res.second)
2856  // It will be analyzed later.
2857  PrivateRefs.push_back(RefExpr);
2858  ValueDecl *D = Res.first;
2859  if (!D)
2860  continue;
2861 
2862  const DSAStackTy::DSAVarData DVar =
2863  DSAStack->getTopDSA(D, /*FromParent=*/false);
2864  PrivateRefs.push_back(DVar.PrivateCopy ? DVar.PrivateCopy
2865  : SimpleRefExpr);
2866  }
2867  Clause->setPrivateRefs(PrivateRefs);
2868  continue;
2869  }
2870  if (auto *Clause = dyn_cast<OMPUsesAllocatorsClause>(C)) {
2871  for (unsigned I = 0, E = Clause->getNumberOfAllocators(); I < E; ++I) {
2872  OMPUsesAllocatorsClause::Data D = Clause->getAllocatorData(I);
2873  auto *DRE = dyn_cast<DeclRefExpr>(D.Allocator->IgnoreParenImpCasts());
2874  if (!DRE)
2875  continue;
2876  ValueDecl *VD = DRE->getDecl();
2877  if (!VD || !isa<VarDecl>(VD))
2878  continue;
2879  DSAStackTy::DSAVarData DVar =
2880  DSAStack->getTopDSA(VD, /*FromParent=*/false);
2881  // OpenMP [2.12.5, target Construct]
2882  // Memory allocators that appear in a uses_allocators clause cannot
2883  // appear in other data-sharing attribute clauses or data-mapping
2884  // attribute clauses in the same construct.
2885  Expr *MapExpr = nullptr;
2886  if (DVar.RefExpr ||
2887  DSAStack->checkMappableExprComponentListsForDecl(
2888  VD, /*CurrentRegionOnly=*/true,
2889  [VD, &MapExpr](
2891  MapExprComponents,
2892  OpenMPClauseKind C) {
2893  auto MI = MapExprComponents.rbegin();
2894  auto ME = MapExprComponents.rend();
2895  if (MI != ME &&
2896  MI->getAssociatedDeclaration()->getCanonicalDecl() ==
2897  VD->getCanonicalDecl()) {
2898  MapExpr = MI->getAssociatedExpression();
2899  return true;
2900  }
2901  return false;
2902  })) {
2903  Diag(D.Allocator->getExprLoc(),
2904  diag::err_omp_allocator_used_in_clauses)
2905  << D.Allocator->getSourceRange();
2906  if (DVar.RefExpr)
2907  reportOriginalDsa(*this, DSAStack, VD, DVar);
2908  else
2909  Diag(MapExpr->getExprLoc(), diag::note_used_here)
2910  << MapExpr->getSourceRange();
2911  }
2912  }
2913  continue;
2914  }
2915  }
2916  // Check allocate clauses.
2918  checkAllocateClauses(*this, DSAStack, D->clauses());
2919  checkReductionClauses(*this, DSAStack, D->clauses());
2920  }
2921 
2922  DSAStack->pop();
2923  DiscardCleanupsInEvaluationContext();
2924  PopExpressionEvaluationContext();
2925 }
2926 
2927 static bool FinishOpenMPLinearClause(OMPLinearClause &Clause, DeclRefExpr *IV,
2928  Expr *NumIterations, Sema &SemaRef,
2929  Scope *S, DSAStackTy *Stack);
2930 
2931 namespace {
2932 
2933 class VarDeclFilterCCC final : public CorrectionCandidateCallback {
2934 private:
2935  Sema &SemaRef;
2936 
2937 public:
2938  explicit VarDeclFilterCCC(Sema &S) : SemaRef(S) {}
2939  bool ValidateCandidate(const TypoCorrection &Candidate) override {
2940  NamedDecl *ND = Candidate.getCorrectionDecl();
2941  if (const auto *VD = dyn_cast_or_null<VarDecl>(ND)) {
2942  return VD->hasGlobalStorage() &&
2943  SemaRef.isDeclInScope(ND, SemaRef.getCurLexicalContext(),
2944  SemaRef.getCurScope());
2945  }
2946  return false;
2947  }
2948 
2949  std::unique_ptr<CorrectionCandidateCallback> clone() override {
2950  return std::make_unique<VarDeclFilterCCC>(*this);
2951  }
2952 };
2953 
2954 class VarOrFuncDeclFilterCCC final : public CorrectionCandidateCallback {
2955 private:
2956  Sema &SemaRef;
2957 
2958 public:
2959  explicit VarOrFuncDeclFilterCCC(Sema &S) : SemaRef(S) {}
2960  bool ValidateCandidate(const TypoCorrection &Candidate) override {
2961  NamedDecl *ND = Candidate.getCorrectionDecl();
2962  if (ND && ((isa<VarDecl>(ND) && ND->getKind() == Decl::Var) ||
2963  isa<FunctionDecl>(ND))) {
2964  return SemaRef.isDeclInScope(ND, SemaRef.getCurLexicalContext(),
2965  SemaRef.getCurScope());
2966  }
2967  return false;
2968  }
2969 
2970  std::unique_ptr<CorrectionCandidateCallback> clone() override {
2971  return std::make_unique<VarOrFuncDeclFilterCCC>(*this);
2972  }
2973 };
2974 
2975 } // namespace
2976 
2977 ExprResult Sema::ActOnOpenMPIdExpression(Scope *CurScope,
2978  CXXScopeSpec &ScopeSpec,
2979  const DeclarationNameInfo &Id,
2981  LookupResult Lookup(*this, Id, LookupOrdinaryName);
2982  LookupParsedName(Lookup, CurScope, &ScopeSpec, true);
2983 
2984  if (Lookup.isAmbiguous())
2985  return ExprError();
2986 
2987  VarDecl *VD;
2988  if (!Lookup.isSingleResult()) {
2989  VarDeclFilterCCC CCC(*this);
2990  if (TypoCorrection Corrected =
2991  CorrectTypo(Id, LookupOrdinaryName, CurScope, nullptr, CCC,
2992  CTK_ErrorRecovery)) {
2993  diagnoseTypo(Corrected,
2994  PDiag(Lookup.empty()
2995  ? diag::err_undeclared_var_use_suggest
2996  : diag::err_omp_expected_var_arg_suggest)
2997  << Id.getName());
2998  VD = Corrected.getCorrectionDeclAs<VarDecl>();
2999  } else {
3000  Diag(Id.getLoc(), Lookup.empty() ? diag::err_undeclared_var_use
3001  : diag::err_omp_expected_var_arg)
3002  << Id.getName();
3003  return ExprError();
3004  }
3005  } else if (!(VD = Lookup.getAsSingle<VarDecl>())) {
3006  Diag(Id.getLoc(), diag::err_omp_expected_var_arg) << Id.getName();
3007  Diag(Lookup.getFoundDecl()->getLocation(), diag::note_declared_at);
3008  return ExprError();
3009  }
3010  Lookup.suppressDiagnostics();
3011 
3012  // OpenMP [2.9.2, Syntax, C/C++]
3013  // Variables must be file-scope, namespace-scope, or static block-scope.
3014  if (Kind == OMPD_threadprivate && !VD->hasGlobalStorage()) {
3015  Diag(Id.getLoc(), diag::err_omp_global_var_arg)
3016  << getOpenMPDirectiveName(Kind) << !VD->isStaticLocal();
3017  bool IsDecl =
3019  Diag(VD->getLocation(),
3020  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
3021  << VD;
3022  return ExprError();
3023  }
3024 
3025  VarDecl *CanonicalVD = VD->getCanonicalDecl();
3026  NamedDecl *ND = CanonicalVD;
3027  // OpenMP [2.9.2, Restrictions, C/C++, p.2]
3028  // A threadprivate directive for file-scope variables must appear outside
3029  // any definition or declaration.
3030  if (CanonicalVD->getDeclContext()->isTranslationUnit() &&
3031  !getCurLexicalContext()->isTranslationUnit()) {
3032  Diag(Id.getLoc(), diag::err_omp_var_scope)
3033  << getOpenMPDirectiveName(Kind) << VD;
3034  bool IsDecl =
3036  Diag(VD->getLocation(),
3037  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
3038  << VD;
3039  return ExprError();
3040  }
3041  // OpenMP [2.9.2, Restrictions, C/C++, p.3]
3042  // A threadprivate directive for static class member variables must appear
3043  // in the class definition, in the same scope in which the member
3044  // variables are declared.
3045  if (CanonicalVD->isStaticDataMember() &&
3046  !CanonicalVD->getDeclContext()->Equals(getCurLexicalContext())) {
3047  Diag(Id.getLoc(), diag::err_omp_var_scope)
3048  << getOpenMPDirectiveName(Kind) << VD;
3049  bool IsDecl =
3051  Diag(VD->getLocation(),
3052  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
3053  << VD;
3054  return ExprError();
3055  }
3056  // OpenMP [2.9.2, Restrictions, C/C++, p.4]
3057  // A threadprivate directive for namespace-scope variables must appear
3058  // outside any definition or declaration other than the namespace
3059  // definition itself.
3060  if (CanonicalVD->getDeclContext()->isNamespace() &&
3061  (!getCurLexicalContext()->isFileContext() ||
3062  !getCurLexicalContext()->Encloses(CanonicalVD->getDeclContext()))) {
3063  Diag(Id.getLoc(), diag::err_omp_var_scope)
3064  << getOpenMPDirectiveName(Kind) << VD;
3065  bool IsDecl =
3067  Diag(VD->getLocation(),
3068  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
3069  << VD;
3070  return ExprError();
3071  }
3072  // OpenMP [2.9.2, Restrictions, C/C++, p.6]
3073  // A threadprivate directive for static block-scope variables must appear
3074  // in the scope of the variable and not in a nested scope.
3075  if (CanonicalVD->isLocalVarDecl() && CurScope &&
3076  !isDeclInScope(ND, getCurLexicalContext(), CurScope)) {
3077  Diag(Id.getLoc(), diag::err_omp_var_scope)
3078  << getOpenMPDirectiveName(Kind) << VD;
3079  bool IsDecl =
3081  Diag(VD->getLocation(),
3082  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
3083  << VD;
3084  return ExprError();
3085  }
3086 
3087  // OpenMP [2.9.2, Restrictions, C/C++, p.2-6]
3088  // A threadprivate directive must lexically precede all references to any
3089  // of the variables in its list.
3090  if (Kind == OMPD_threadprivate && VD->isUsed() &&
3091  !DSAStack->isThreadPrivate(VD)) {
3092  Diag(Id.getLoc(), diag::err_omp_var_used)
3093  << getOpenMPDirectiveName(Kind) << VD;
3094  return ExprError();
3095  }
3096 
3097  QualType ExprType = VD->getType().getNonReferenceType();
3099  SourceLocation(), VD,
3100  /*RefersToEnclosingVariableOrCapture=*/false,
3101  Id.getLoc(), ExprType, VK_LValue);
3102 }
3103 
3106  ArrayRef<Expr *> VarList) {
3107  if (OMPThreadPrivateDecl *D = CheckOMPThreadPrivateDecl(Loc, VarList)) {
3108  CurContext->addDecl(D);
3110  }
3111  return nullptr;
3112 }
3113 
3114 namespace {
3115 class LocalVarRefChecker final
3116  : public ConstStmtVisitor<LocalVarRefChecker, bool> {
3117  Sema &SemaRef;
3118 
3119 public:
3120  bool VisitDeclRefExpr(const DeclRefExpr *E) {
3121  if (const auto *VD = dyn_cast<VarDecl>(E->getDecl())) {
3122  if (VD->hasLocalStorage()) {
3123  SemaRef.Diag(E->getBeginLoc(),
3124  diag::err_omp_local_var_in_threadprivate_init)
3125  << E->getSourceRange();
3126  SemaRef.Diag(VD->getLocation(), diag::note_defined_here)
3127  << VD << VD->getSourceRange();
3128  return true;
3129  }
3130  }
3131  return false;
3132  }
3133  bool VisitStmt(const Stmt *S) {
3134  for (const Stmt *Child : S->children()) {
3135  if (Child && Visit(Child))
3136  return true;
3137  }
3138  return false;
3139  }
3140  explicit LocalVarRefChecker(Sema &SemaRef) : SemaRef(SemaRef) {}
3141 };
3142 } // namespace
3143 
3147  for (Expr *RefExpr : VarList) {
3148  auto *DE = cast<DeclRefExpr>(RefExpr);
3149  auto *VD = cast<VarDecl>(DE->getDecl());
3150  SourceLocation ILoc = DE->getExprLoc();
3151 
3152  // Mark variable as used.
3153  VD->setReferenced();
3154  VD->markUsed(Context);
3155 
3156  QualType QType = VD->getType();
3157  if (QType->isDependentType() || QType->isInstantiationDependentType()) {
3158  // It will be analyzed later.
3159  Vars.push_back(DE);
3160  continue;
3161  }
3162 
3163  // OpenMP [2.9.2, Restrictions, C/C++, p.10]
3164  // A threadprivate variable must not have an incomplete type.
3165  if (RequireCompleteType(ILoc, VD->getType(),
3166  diag::err_omp_threadprivate_incomplete_type)) {
3167  continue;
3168  }
3169 
3170  // OpenMP [2.9.2, Restrictions, C/C++, p.10]
3171  // A threadprivate variable must not have a reference type.
3172  if (VD->getType()->isReferenceType()) {
3173  Diag(ILoc, diag::err_omp_ref_type_arg)
3174  << getOpenMPDirectiveName(OMPD_threadprivate) << VD->getType();
3175  bool IsDecl =
3177  Diag(VD->getLocation(),
3178  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
3179  << VD;
3180  continue;
3181  }
3182 
3183  // Check if this is a TLS variable. If TLS is not being supported, produce
3184  // the corresponding diagnostic.
3185  if ((VD->getTLSKind() != VarDecl::TLS_None &&
3186  !(VD->hasAttr<OMPThreadPrivateDeclAttr>() &&
3187  getLangOpts().OpenMPUseTLS &&
3188  getASTContext().getTargetInfo().isTLSSupported())) ||
3189  (VD->getStorageClass() == SC_Register && VD->hasAttr<AsmLabelAttr>() &&
3190  !VD->isLocalVarDecl())) {
3191  Diag(ILoc, diag::err_omp_var_thread_local)
3192  << VD << ((VD->getTLSKind() != VarDecl::TLS_None) ? 0 : 1);
3193  bool IsDecl =
3195  Diag(VD->getLocation(),
3196  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
3197  << VD;
3198  continue;
3199  }
3200 
3201  // Check if initial value of threadprivate variable reference variable with
3202  // local storage (it is not supported by runtime).
3203  if (const Expr *Init = VD->getAnyInitializer()) {
3204  LocalVarRefChecker Checker(*this);
3205  if (Checker.Visit(Init))
3206  continue;
3207  }
3208 
3209  Vars.push_back(RefExpr);
3210  DSAStack->addDSA(VD, DE, OMPC_threadprivate);
3211  VD->addAttr(OMPThreadPrivateDeclAttr::CreateImplicit(
3212  Context, SourceRange(Loc, Loc)));
3214  ML->DeclarationMarkedOpenMPThreadPrivate(VD);
3215  }
3216  OMPThreadPrivateDecl *D = nullptr;
3217  if (!Vars.empty()) {
3219  Vars);
3220  D->setAccess(AS_public);
3221  }
3222  return D;
3223 }
3224 
3225 static OMPAllocateDeclAttr::AllocatorTypeTy
3226 getAllocatorKind(Sema &S, DSAStackTy *Stack, Expr *Allocator) {
3227  if (!Allocator)
3228  return OMPAllocateDeclAttr::OMPNullMemAlloc;
3229  if (Allocator->isTypeDependent() || Allocator->isValueDependent() ||
3230  Allocator->isInstantiationDependent() ||
3231  Allocator->containsUnexpandedParameterPack())
3232  return OMPAllocateDeclAttr::OMPUserDefinedMemAlloc;
3233  auto AllocatorKindRes = OMPAllocateDeclAttr::OMPUserDefinedMemAlloc;
3234  const Expr *AE = Allocator->IgnoreParenImpCasts();
3235  for (int I = 0; I < OMPAllocateDeclAttr::OMPUserDefinedMemAlloc; ++I) {
3236  auto AllocatorKind = static_cast<OMPAllocateDeclAttr::AllocatorTypeTy>(I);
3237  const Expr *DefAllocator = Stack->getAllocator(AllocatorKind);
3238  llvm::FoldingSetNodeID AEId, DAEId;
3239  AE->Profile(AEId, S.getASTContext(), /*Canonical=*/true);
3240  DefAllocator->Profile(DAEId, S.getASTContext(), /*Canonical=*/true);
3241  if (AEId == DAEId) {
3242  AllocatorKindRes = AllocatorKind;
3243  break;
3244  }
3245  }
3246  return AllocatorKindRes;
3247 }
3248 
3250  Sema &S, DSAStackTy *Stack, Expr *RefExpr, VarDecl *VD,
3251  OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind, Expr *Allocator) {
3252  if (!VD->hasAttr<OMPAllocateDeclAttr>())
3253  return false;
3254  const auto *A = VD->getAttr<OMPAllocateDeclAttr>();
3255  Expr *PrevAllocator = A->getAllocator();
3256  OMPAllocateDeclAttr::AllocatorTypeTy PrevAllocatorKind =
3257  getAllocatorKind(S, Stack, PrevAllocator);
3258  bool AllocatorsMatch = AllocatorKind == PrevAllocatorKind;
3259  if (AllocatorsMatch &&
3260  AllocatorKind == OMPAllocateDeclAttr::OMPUserDefinedMemAlloc &&
3261  Allocator && PrevAllocator) {
3262  const Expr *AE = Allocator->IgnoreParenImpCasts();
3263  const Expr *PAE = PrevAllocator->IgnoreParenImpCasts();
3264  llvm::FoldingSetNodeID AEId, PAEId;
3265  AE->Profile(AEId, S.Context, /*Canonical=*/true);
3266  PAE->Profile(PAEId, S.Context, /*Canonical=*/true);
3267  AllocatorsMatch = AEId == PAEId;
3268  }
3269  if (!AllocatorsMatch) {
3270  SmallString<256> AllocatorBuffer;
3271  llvm::raw_svector_ostream AllocatorStream(AllocatorBuffer);
3272  if (Allocator)
3273  Allocator->printPretty(AllocatorStream, nullptr, S.getPrintingPolicy());
3274  SmallString<256> PrevAllocatorBuffer;
3275  llvm::raw_svector_ostream PrevAllocatorStream(PrevAllocatorBuffer);
3276  if (PrevAllocator)
3277  PrevAllocator->printPretty(PrevAllocatorStream, nullptr,
3278  S.getPrintingPolicy());
3279 
3280  SourceLocation AllocatorLoc =
3281  Allocator ? Allocator->getExprLoc() : RefExpr->getExprLoc();
3282  SourceRange AllocatorRange =
3283  Allocator ? Allocator->getSourceRange() : RefExpr->getSourceRange();
3284  SourceLocation PrevAllocatorLoc =
3285  PrevAllocator ? PrevAllocator->getExprLoc() : A->getLocation();
3286  SourceRange PrevAllocatorRange =
3287  PrevAllocator ? PrevAllocator->getSourceRange() : A->getRange();
3288  S.Diag(AllocatorLoc, diag::warn_omp_used_different_allocator)
3289  << (Allocator ? 1 : 0) << AllocatorStream.str()
3290  << (PrevAllocator ? 1 : 0) << PrevAllocatorStream.str()
3291  << AllocatorRange;
3292  S.Diag(PrevAllocatorLoc, diag::note_omp_previous_allocator)
3293  << PrevAllocatorRange;
3294  return true;
3295  }
3296  return false;
3297 }
3298 
3299 static void
3301  OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind,
3302  Expr *Allocator, Expr *Alignment, SourceRange SR) {
3303  if (VD->hasAttr<OMPAllocateDeclAttr>())
3304  return;
3305  if (Alignment &&
3306  (Alignment->isTypeDependent() || Alignment->isValueDependent() ||
3307  Alignment->isInstantiationDependent() ||
3308  Alignment->containsUnexpandedParameterPack()))
3309  // Apply later when we have a usable value.
3310  return;
3311  if (Allocator &&
3312  (Allocator->isTypeDependent() || Allocator->isValueDependent() ||
3313  Allocator->isInstantiationDependent() ||
3314  Allocator->containsUnexpandedParameterPack()))
3315  return;
3316  auto *A = OMPAllocateDeclAttr::CreateImplicit(S.Context, AllocatorKind,
3317  Allocator, Alignment, SR);
3318  VD->addAttr(A);
3320  ML->DeclarationMarkedOpenMPAllocate(VD, A);
3321 }
3322 
3325  ArrayRef<OMPClause *> Clauses,
3326  DeclContext *Owner) {
3327  assert(Clauses.size() <= 2 && "Expected at most two clauses.");
3328  Expr *Alignment = nullptr;
3329  Expr *Allocator = nullptr;
3330  if (Clauses.empty()) {
3331  // OpenMP 5.0, 2.11.3 allocate Directive, Restrictions.
3332  // allocate directives that appear in a target region must specify an
3333  // allocator clause unless a requires directive with the dynamic_allocators
3334  // clause is present in the same compilation unit.
3335  if (LangOpts.OpenMPIsDevice &&
3336  !DSAStack->hasRequiresDeclWithClause<OMPDynamicAllocatorsClause>())
3337  targetDiag(Loc, diag::err_expected_allocator_clause);
3338  } else {
3339  for (const OMPClause *C : Clauses)
3340  if (const auto *AC = dyn_cast<OMPAllocatorClause>(C))
3341  Allocator = AC->getAllocator();
3342  else if (const auto *AC = dyn_cast<OMPAlignClause>(C))
3343  Alignment = AC->getAlignment();
3344  else
3345  llvm_unreachable("Unexpected clause on allocate directive");
3346  }
3347  OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind =
3348  getAllocatorKind(*this, DSAStack, Allocator);
3350  for (Expr *RefExpr : VarList) {
3351  auto *DE = cast<DeclRefExpr>(RefExpr);
3352  auto *VD = cast<VarDecl>(DE->getDecl());
3353 
3354  // Check if this is a TLS variable or global register.
3355  if (VD->getTLSKind() != VarDecl::TLS_None ||
3356  VD->hasAttr<OMPThreadPrivateDeclAttr>() ||
3357  (VD->getStorageClass() == SC_Register && VD->hasAttr<AsmLabelAttr>() &&
3358  !VD->isLocalVarDecl()))
3359  continue;
3360 
3361  // If the used several times in the allocate directive, the same allocator
3362  // must be used.
3363  if (checkPreviousOMPAllocateAttribute(*this, DSAStack, RefExpr, VD,
3364  AllocatorKind, Allocator))
3365  continue;
3366 
3367  // OpenMP, 2.11.3 allocate Directive, Restrictions, C / C++
3368  // If a list item has a static storage type, the allocator expression in the
3369  // allocator clause must be a constant expression that evaluates to one of
3370  // the predefined memory allocator values.
3371  if (Allocator && VD->hasGlobalStorage()) {
3372  if (AllocatorKind == OMPAllocateDeclAttr::OMPUserDefinedMemAlloc) {
3373  Diag(Allocator->getExprLoc(),
3374  diag::err_omp_expected_predefined_allocator)
3375  << Allocator->getSourceRange();
3376  bool IsDecl = VD->isThisDeclarationADefinition(Context) ==
3378  Diag(VD->getLocation(),
3379  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
3380  << VD;
3381  continue;
3382  }
3383  }
3384 
3385  Vars.push_back(RefExpr);
3386  applyOMPAllocateAttribute(*this, VD, AllocatorKind, Allocator, Alignment,
3387  DE->getSourceRange());
3388  }
3389  if (Vars.empty())
3390  return nullptr;
3391  if (!Owner)
3392  Owner = getCurLexicalContext();
3393  auto *D = OMPAllocateDecl::Create(Context, Owner, Loc, Vars, Clauses);
3394  D->setAccess(AS_public);
3395  Owner->addDecl(D);
3397 }
3398 
3401  ArrayRef<OMPClause *> ClauseList) {
3402  OMPRequiresDecl *D = nullptr;
3403  if (!CurContext->isFileContext()) {
3404  Diag(Loc, diag::err_omp_invalid_scope) << "requires";
3405  } else {
3406  D = CheckOMPRequiresDecl(Loc, ClauseList);
3407  if (D) {
3408  CurContext->addDecl(D);
3409  DSAStack->addRequiresDecl(D);
3410  }
3411  }
3413 }
3414 
3416  OpenMPDirectiveKind DKind,
3417  ArrayRef<std::string> Assumptions,
3418  bool SkippedClauses) {
3419  if (!SkippedClauses && Assumptions.empty())
3420  Diag(Loc, diag::err_omp_no_clause_for_directive)
3421  << llvm::omp::getAllAssumeClauseOptions()
3422  << llvm::omp::getOpenMPDirectiveName(DKind);
3423 
3424  auto *AA = AssumptionAttr::Create(Context, llvm::join(Assumptions, ","), Loc);
3425  if (DKind == llvm::omp::Directive::OMPD_begin_assumes) {
3426  OMPAssumeScoped.push_back(AA);
3427  return;
3428  }
3429 
3430  // Global assumes without assumption clauses are ignored.
3431  if (Assumptions.empty())
3432  return;
3433 
3434  assert(DKind == llvm::omp::Directive::OMPD_assumes &&
3435  "Unexpected omp assumption directive!");
3436  OMPAssumeGlobal.push_back(AA);
3437 
3438  // The OMPAssumeGlobal scope above will take care of new declarations but
3439  // we also want to apply the assumption to existing ones, e.g., to
3440  // declarations in included headers. To this end, we traverse all existing
3441  // declaration contexts and annotate function declarations here.
3442  SmallVector<DeclContext *, 8> DeclContexts;
3443  auto *Ctx = CurContext;
3444  while (Ctx->getLexicalParent())
3445  Ctx = Ctx->getLexicalParent();
3446  DeclContexts.push_back(Ctx);
3447  while (!DeclContexts.empty()) {
3448  DeclContext *DC = DeclContexts.pop_back_val();
3449  for (auto *SubDC : DC->decls()) {
3450  if (SubDC->isInvalidDecl())
3451  continue;
3452  if (auto *CTD = dyn_cast<ClassTemplateDecl>(SubDC)) {
3453  DeclContexts.push_back(CTD->getTemplatedDecl());
3454  llvm::append_range(DeclContexts, CTD->specializations());
3455  continue;
3456  }
3457  if (auto *DC = dyn_cast<DeclContext>(SubDC))
3458  DeclContexts.push_back(DC);
3459  if (auto *F = dyn_cast<FunctionDecl>(SubDC)) {
3460  F->addAttr(AA);
3461  continue;
3462  }
3463  }
3464  }
3465 }
3466 
3468  assert(isInOpenMPAssumeScope() && "Not in OpenMP assumes scope!");
3469  OMPAssumeScoped.pop_back();
3470 }
3471 
3473  ArrayRef<OMPClause *> ClauseList) {
3474  /// For target specific clauses, the requires directive cannot be
3475  /// specified after the handling of any of the target regions in the
3476  /// current compilation unit.
3477  ArrayRef<SourceLocation> TargetLocations =
3478  DSAStack->getEncounteredTargetLocs();
3479  SourceLocation AtomicLoc = DSAStack->getAtomicDirectiveLoc();
3480  if (!TargetLocations.empty() || !AtomicLoc.isInvalid()) {
3481  for (const OMPClause *CNew : ClauseList) {
3482  // Check if any of the requires clauses affect target regions.
3483  if (isa<OMPUnifiedSharedMemoryClause>(CNew) ||
3484  isa<OMPUnifiedAddressClause>(CNew) ||
3485  isa<OMPReverseOffloadClause>(CNew) ||
3486  isa<OMPDynamicAllocatorsClause>(CNew)) {
3487  Diag(Loc, diag::err_omp_directive_before_requires)
3488  << "target" << getOpenMPClauseName(CNew->getClauseKind());
3489  for (SourceLocation TargetLoc : TargetLocations) {
3490  Diag(TargetLoc, diag::note_omp_requires_encountered_directive)
3491  << "target";
3492  }
3493  } else if (!AtomicLoc.isInvalid() &&
3494  isa<OMPAtomicDefaultMemOrderClause>(CNew)) {
3495  Diag(Loc, diag::err_omp_directive_before_requires)
3496  << "atomic" << getOpenMPClauseName(CNew->getClauseKind());
3497  Diag(AtomicLoc, diag::note_omp_requires_encountered_directive)
3498  << "atomic";
3499  }
3500  }
3501  }
3502 
3503  if (!DSAStack->hasDuplicateRequiresClause(ClauseList))
3505  ClauseList);
3506  return nullptr;
3507 }
3508 
3509 static void reportOriginalDsa(Sema &SemaRef, const DSAStackTy *Stack,
3510  const ValueDecl *D,
3511  const DSAStackTy::DSAVarData &DVar,
3512  bool IsLoopIterVar) {
3513  if (DVar.RefExpr) {
3514  SemaRef.Diag(DVar.RefExpr->getExprLoc(), diag::note_omp_explicit_dsa)
3515  << getOpenMPClauseName(DVar.CKind);
3516  return;
3517  }
3518  enum {
3519  PDSA_StaticMemberShared,
3520  PDSA_StaticLocalVarShared,
3521  PDSA_LoopIterVarPrivate,
3522  PDSA_LoopIterVarLinear,
3523  PDSA_LoopIterVarLastprivate,
3524  PDSA_ConstVarShared,
3525  PDSA_GlobalVarShared,
3526  PDSA_TaskVarFirstprivate,
3527  PDSA_LocalVarPrivate,
3528  PDSA_Implicit
3529  } Reason = PDSA_Implicit;
3530  bool ReportHint = false;
3531  auto ReportLoc = D->getLocation();
3532  auto *VD = dyn_cast<VarDecl>(D);
3533  if (IsLoopIterVar) {
3534  if (DVar.CKind == OMPC_private)
3535  Reason = PDSA_LoopIterVarPrivate;
3536  else if (DVar.CKind == OMPC_lastprivate)
3537  Reason = PDSA_LoopIterVarLastprivate;
3538  else
3539  Reason = PDSA_LoopIterVarLinear;
3540  } else if (isOpenMPTaskingDirective(DVar.DKind) &&
3541  DVar.CKind == OMPC_firstprivate) {
3542  Reason = PDSA_TaskVarFirstprivate;
3543  ReportLoc = DVar.ImplicitDSALoc;
3544  } else if (VD && VD->isStaticLocal())
3545  Reason = PDSA_StaticLocalVarShared;
3546  else if (VD && VD->isStaticDataMember())
3547  Reason = PDSA_StaticMemberShared;
3548  else if (VD && VD->isFileVarDecl())
3549  Reason = PDSA_GlobalVarShared;
3550  else if (D->getType().isConstant(SemaRef.getASTContext()))
3551  Reason = PDSA_ConstVarShared;
3552  else if (VD && VD->isLocalVarDecl() && DVar.CKind == OMPC_private) {
3553  ReportHint = true;
3554  Reason = PDSA_LocalVarPrivate;
3555  }
3556  if (Reason != PDSA_Implicit) {
3557  SemaRef.Diag(ReportLoc, diag::note_omp_predetermined_dsa)
3558  << Reason << ReportHint
3559  << getOpenMPDirectiveName(Stack->getCurrentDirective());
3560  } else if (DVar.ImplicitDSALoc.isValid()) {
3561  SemaRef.Diag(DVar.ImplicitDSALoc, diag::note_omp_implicit_dsa)
3562  << getOpenMPClauseName(DVar.CKind);
3563  }
3564 }
3565 
3566 static OpenMPMapClauseKind
3568  bool IsAggregateOrDeclareTarget) {
3570  switch (M) {
3571  case OMPC_DEFAULTMAP_MODIFIER_alloc:
3572  Kind = OMPC_MAP_alloc;
3573  break;
3574  case OMPC_DEFAULTMAP_MODIFIER_to:
3575  Kind = OMPC_MAP_to;
3576  break;
3577  case OMPC_DEFAULTMAP_MODIFIER_from:
3578  Kind = OMPC_MAP_from;
3579  break;
3580  case OMPC_DEFAULTMAP_MODIFIER_tofrom:
3581  Kind = OMPC_MAP_tofrom;
3582  break;
3583  case OMPC_DEFAULTMAP_MODIFIER_present:
3584  // OpenMP 5.1 [2.21.7.3] defaultmap clause, Description]
3585  // If implicit-behavior is present, each variable referenced in the
3586  // construct in the category specified by variable-category is treated as if
3587  // it had been listed in a map clause with the map-type of alloc and
3588  // map-type-modifier of present.
3589  Kind = OMPC_MAP_alloc;
3590  break;
3591  case OMPC_DEFAULTMAP_MODIFIER_firstprivate:
3593  llvm_unreachable("Unexpected defaultmap implicit behavior");
3594  case OMPC_DEFAULTMAP_MODIFIER_none:
3595  case OMPC_DEFAULTMAP_MODIFIER_default:
3597  // IsAggregateOrDeclareTarget could be true if:
3598  // 1. the implicit behavior for aggregate is tofrom
3599  // 2. it's a declare target link
3600  if (IsAggregateOrDeclareTarget) {
3601  Kind = OMPC_MAP_tofrom;
3602  break;
3603  }
3604  llvm_unreachable("Unexpected defaultmap implicit behavior");
3605  }
3606  assert(Kind != OMPC_MAP_unknown && "Expect map kind to be known");
3607  return Kind;
3608 }
3609 
3610 namespace {
3611 class DSAAttrChecker final : public StmtVisitor<DSAAttrChecker, void> {
3612  DSAStackTy *Stack;
3613  Sema &SemaRef;
3614  bool ErrorFound = false;
3615  bool TryCaptureCXXThisMembers = false;
3616  CapturedStmt *CS = nullptr;
3617  const static unsigned DefaultmapKindNum = OMPC_DEFAULTMAP_pointer + 1;
3618  llvm::SmallVector<Expr *, 4> ImplicitFirstprivate;
3619  llvm::SmallVector<Expr *, 4> ImplicitPrivate;
3620  llvm::SmallVector<Expr *, 4> ImplicitMap[DefaultmapKindNum][OMPC_MAP_delete];
3622  ImplicitMapModifier[DefaultmapKindNum];
3623  Sema::VarsWithInheritedDSAType VarsWithInheritedDSA;
3624  llvm::SmallDenseSet<const ValueDecl *, 4> ImplicitDeclarations;
3625 
3626  void VisitSubCaptures(OMPExecutableDirective *S) {
3627  // Check implicitly captured variables.
3628  if (!S->hasAssociatedStmt() || !S->getAssociatedStmt())
3629  return;
3630  if (S->getDirectiveKind() == OMPD_atomic ||
3631  S->getDirectiveKind() == OMPD_critical ||
3632  S->getDirectiveKind() == OMPD_section ||
3633  S->getDirectiveKind() == OMPD_master ||
3634  S->getDirectiveKind() == OMPD_masked ||
3635  isOpenMPLoopTransformationDirective(S->getDirectiveKind())) {
3636  Visit(S->getAssociatedStmt());
3637  return;
3638  }
3639  visitSubCaptures(S->getInnermostCapturedStmt());
3640  // Try to capture inner this->member references to generate correct mappings
3641  // and diagnostics.
3642  if (TryCaptureCXXThisMembers ||
3643  (isOpenMPTargetExecutionDirective(Stack->getCurrentDirective()) &&
3644  llvm::any_of(S->getInnermostCapturedStmt()->captures(),
3645  [](const CapturedStmt::Capture &C) {
3646  return C.capturesThis();
3647  }))) {
3648  bool SavedTryCaptureCXXThisMembers = TryCaptureCXXThisMembers;
3649  TryCaptureCXXThisMembers = true;
3650  Visit(S->getInnermostCapturedStmt()->getCapturedStmt());
3651  TryCaptureCXXThisMembers = SavedTryCaptureCXXThisMembers;
3652  }
3653  // In tasks firstprivates are not captured anymore, need to analyze them
3654  // explicitly.
3655  if (isOpenMPTaskingDirective(S->getDirectiveKind()) &&
3656  !isOpenMPTaskLoopDirective(S->getDirectiveKind())) {
3657  for (OMPClause *C : S->clauses())
3658  if (auto *FC = dyn_cast<OMPFirstprivateClause>(C)) {
3659  for (Expr *Ref : FC->varlists())
3660  Visit(Ref);
3661  }
3662  }
3663  }
3664 
3665 public:
3666  void VisitDeclRefExpr(DeclRefExpr *E) {
3667  if (TryCaptureCXXThisMembers || E->isTypeDependent() ||
3670  return;
3671  if (auto *VD = dyn_cast<VarDecl>(E->getDecl())) {
3672  // Check the datasharing rules for the expressions in the clauses.
3673  if (!CS || (isa<OMPCapturedExprDecl>(VD) && !CS->capturesVariable(VD) &&
3674  !Stack->getTopDSA(VD, /*FromParent=*/false).RefExpr &&
3675  !Stack->isImplicitDefaultFirstprivateFD(VD))) {
3676  if (auto *CED = dyn_cast<OMPCapturedExprDecl>(VD))
3677  if (!CED->hasAttr<OMPCaptureNoInitAttr>()) {
3678  Visit(CED->getInit());
3679  return;
3680  }
3681  } else if (VD->isImplicit() || isa<OMPCapturedExprDecl>(VD))
3682  // Do not analyze internal variables and do not enclose them into
3683  // implicit clauses.
3684  if (!Stack->isImplicitDefaultFirstprivateFD(VD))
3685  return;
3686  VD = VD->getCanonicalDecl();
3687  // Skip internally declared variables.
3688  if (VD->hasLocalStorage() && CS && !CS->capturesVariable(VD) &&
3689  !Stack->isImplicitDefaultFirstprivateFD(VD) &&
3690  !Stack->isImplicitTaskFirstprivate(VD))
3691  return;
3692  // Skip allocators in uses_allocators clauses.
3693  if (Stack->isUsesAllocatorsDecl(VD))
3694  return;
3695 
3696  DSAStackTy::DSAVarData DVar = Stack->getTopDSA(VD, /*FromParent=*/false);
3697  // Check if the variable has explicit DSA set and stop analysis if it so.
3698  if (DVar.RefExpr || !ImplicitDeclarations.insert(VD).second)
3699  return;
3700 
3701  // Skip internally declared static variables.
3703  OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
3704  if (VD->hasGlobalStorage() && CS && !CS->capturesVariable(VD) &&
3705  (Stack->hasRequiresDeclWithClause<OMPUnifiedSharedMemoryClause>() ||
3706  !Res || *Res != OMPDeclareTargetDeclAttr::MT_Link) &&
3707  !Stack->isImplicitDefaultFirstprivateFD(VD) &&
3708  !Stack->isImplicitTaskFirstprivate(VD))
3709  return;
3710 
3711  SourceLocation ELoc = E->getExprLoc();
3712  OpenMPDirectiveKind DKind = Stack->getCurrentDirective();
3713  // The default(none) clause requires that each variable that is referenced
3714  // in the construct, and does not have a predetermined data-sharing
3715  // attribute, must have its data-sharing attribute explicitly determined
3716  // by being listed in a data-sharing attribute clause.
3717  if (DVar.CKind == OMPC_unknown &&
3718  (Stack->getDefaultDSA() == DSA_none ||
3719  Stack->getDefaultDSA() == DSA_private ||
3720  Stack->getDefaultDSA() == DSA_firstprivate) &&
3721  isImplicitOrExplicitTaskingRegion(DKind) &&
3722  VarsWithInheritedDSA.count(VD) == 0) {
3723  bool InheritedDSA = Stack->getDefaultDSA() == DSA_none;
3724  if (!InheritedDSA && (Stack->getDefaultDSA() == DSA_firstprivate ||
3725  Stack->getDefaultDSA() == DSA_private)) {
3726  DSAStackTy::DSAVarData DVar =
3727  Stack->getImplicitDSA(VD, /*FromParent=*/false);
3728  InheritedDSA = DVar.CKind == OMPC_unknown;
3729  }
3730  if (InheritedDSA)
3731  VarsWithInheritedDSA[VD] = E;
3732  if (Stack->getDefaultDSA() == DSA_none)
3733  return;
3734  }
3735 
3736  // OpenMP 5.0 [2.19.7.2, defaultmap clause, Description]
3737  // If implicit-behavior is none, each variable referenced in the
3738  // construct that does not have a predetermined data-sharing attribute
3739  // and does not appear in a to or link clause on a declare target
3740  // directive must be listed in a data-mapping attribute clause, a
3741  // data-sharing attribute clause (including a data-sharing attribute
3742  // clause on a combined construct where target. is one of the
3743  // constituent constructs), or an is_device_ptr clause.
3744  OpenMPDefaultmapClauseKind ClauseKind =
3746  if (SemaRef.getLangOpts().OpenMP >= 50) {
3747  bool IsModifierNone = Stack->getDefaultmapModifier(ClauseKind) ==
3748  OMPC_DEFAULTMAP_MODIFIER_none;
3749  if (DVar.CKind == OMPC_unknown && IsModifierNone &&
3750  VarsWithInheritedDSA.count(VD) == 0 && !Res) {
3751  // Only check for data-mapping attribute and is_device_ptr here
3752  // since we have already make sure that the declaration does not
3753  // have a data-sharing attribute above
3754  if (!Stack->checkMappableExprComponentListsForDecl(
3755  VD, /*CurrentRegionOnly=*/true,
3757  MapExprComponents,
3758  OpenMPClauseKind) {
3759  auto MI = MapExprComponents.rbegin();
3760  auto ME = MapExprComponents.rend();
3761  return MI != ME && MI->getAssociatedDeclaration() == VD;
3762  })) {
3763  VarsWithInheritedDSA[VD] = E;
3764  return;
3765  }
3766  }
3767  }
3768  if (SemaRef.getLangOpts().OpenMP > 50) {
3769  bool IsModifierPresent = Stack->getDefaultmapModifier(ClauseKind) ==
3770  OMPC_DEFAULTMAP_MODIFIER_present;
3771  if (IsModifierPresent) {
3772  if (!llvm::is_contained(ImplicitMapModifier[ClauseKind],
3773  OMPC_MAP_MODIFIER_present)) {
3774  ImplicitMapModifier[ClauseKind].push_back(
3775  OMPC_MAP_MODIFIER_present);
3776  }
3777  }
3778  }
3779 
3780  if (isOpenMPTargetExecutionDirective(DKind) &&
3781  !Stack->isLoopControlVariable(VD).first) {
3782  if (!Stack->checkMappableExprComponentListsForDecl(
3783  VD, /*CurrentRegionOnly=*/true,
3785  StackComponents,
3786  OpenMPClauseKind) {
3787  if (SemaRef.LangOpts.OpenMP >= 50)
3788  return !StackComponents.empty();
3789  // Variable is used if it has been marked as an array, array
3790  // section, array shaping or the variable iself.
3791  return StackComponents.size() == 1 ||
3792  llvm::all_of(
3793  llvm::drop_begin(llvm::reverse(StackComponents)),
3794  [](const OMPClauseMappableExprCommon::
3795  MappableComponent &MC) {
3796  return MC.getAssociatedDeclaration() ==
3797  nullptr &&
3798  (isa<OMPArraySectionExpr>(
3799  MC.getAssociatedExpression()) ||
3800  isa<OMPArrayShapingExpr>(
3801  MC.getAssociatedExpression()) ||
3802  isa<ArraySubscriptExpr>(
3803  MC.getAssociatedExpression()));
3804  });
3805  })) {
3806  bool IsFirstprivate = false;
3807  // By default lambdas are captured as firstprivates.
3808  if (const auto *RD =
3810  IsFirstprivate = RD->isLambda();
3811  IsFirstprivate =
3812  IsFirstprivate || (Stack->mustBeFirstprivate(ClauseKind) && !Res);
3813  if (IsFirstprivate) {
3814  ImplicitFirstprivate.emplace_back(E);
3815  } else {
3817  Stack->getDefaultmapModifier(ClauseKind);
3819  M, ClauseKind == OMPC_DEFAULTMAP_aggregate || Res);
3820  ImplicitMap[ClauseKind][Kind].emplace_back(E);
3821  }
3822  return;
3823  }
3824  }
3825 
3826  // OpenMP [2.9.3.6, Restrictions, p.2]
3827  // A list item that appears in a reduction clause of the innermost
3828  // enclosing worksharing or parallel construct may not be accessed in an
3829  // explicit task.
3830  DVar = Stack->hasInnermostDSA(
3831  VD,
3832  [](OpenMPClauseKind C, bool AppliedToPointee) {
3833  return C == OMPC_reduction && !AppliedToPointee;
3834  },
3835  [](OpenMPDirectiveKind K) {
3836  return isOpenMPParallelDirective(K) ||
3838  },
3839  /*FromParent=*/true);
3840  if (isOpenMPTaskingDirective(DKind) && DVar.CKind == OMPC_reduction) {
3841  ErrorFound = true;
3842  SemaRef.Diag(ELoc, diag::err_omp_reduction_in_task);
3843  reportOriginalDsa(SemaRef, Stack, VD, DVar);
3844  return;
3845  }
3846 
3847  // Define implicit data-sharing attributes for task.
3848  DVar = Stack->getImplicitDSA(VD, /*FromParent=*/false);
3849  if (((isOpenMPTaskingDirective(DKind) && DVar.CKind != OMPC_shared) ||
3850  (((Stack->getDefaultDSA() == DSA_firstprivate &&
3851  DVar.CKind == OMPC_firstprivate) ||
3852  (Stack->getDefaultDSA() == DSA_private &&
3853  DVar.CKind == OMPC_private)) &&
3854  !DVar.RefExpr)) &&
3855  !Stack->isLoopControlVariable(VD).first) {
3856  if (Stack->getDefaultDSA() == DSA_private)
3857  ImplicitPrivate.push_back(E);
3858  else
3859  ImplicitFirstprivate.push_back(E);
3860  return;
3861  }
3862 
3863  // Store implicitly used globals with declare target link for parent
3864  // target.
3865  if (!isOpenMPTargetExecutionDirective(DKind) && Res &&
3866  *Res == OMPDeclareTargetDeclAttr::MT_Link) {
3867  Stack->addToParentTargetRegionLinkGlobals(E);
3868  return;
3869  }
3870  }
3871  }
3872  void VisitMemberExpr(MemberExpr *E) {
3873  if (E->isTypeDependent() || E->isValueDependent() ||
3875  return;
3876  auto *FD = dyn_cast<FieldDecl>(E->getMemberDecl());
3877  OpenMPDirectiveKind DKind = Stack->getCurrentDirective();
3878  if (auto *TE = dyn_cast<CXXThisExpr>(E->getBase()->IgnoreParenCasts())) {
3879  if (!FD)
3880  return;
3881  DSAStackTy::DSAVarData DVar = Stack->getTopDSA(FD, /*FromParent=*/false);
3882  // Check if the variable has explicit DSA set and stop analysis if it
3883  // so.
3884  if (DVar.RefExpr || !ImplicitDeclarations.insert(FD).second)
3885  return;
3886 
3887  if (isOpenMPTargetExecutionDirective(DKind) &&
3888  !Stack->isLoopControlVariable(FD).first &&
3889  !Stack->checkMappableExprComponentListsForDecl(
3890  FD, /*CurrentRegionOnly=*/true,
3892  StackComponents,
3893  OpenMPClauseKind) {
3894  return isa<CXXThisExpr>(
3895  cast<MemberExpr>(
3896  StackComponents.back().getAssociatedExpression())
3897  ->getBase()
3898  ->IgnoreParens());
3899  })) {
3900  // OpenMP 4.5 [2.15.5.1, map Clause, Restrictions, C/C++, p.3]
3901  // A bit-field cannot appear in a map clause.
3902  //
3903  if (FD->isBitField())
3904  return;
3905 
3906  // Check to see if the member expression is referencing a class that
3907  // has already been explicitly mapped
3908  if (Stack->isClassPreviouslyMapped(TE->getType()))
3909  return;
3910 
3912  Stack->getDefaultmapModifier(OMPC_DEFAULTMAP_aggregate);
3913  OpenMPDefaultmapClauseKind ClauseKind =
3916  Modifier, /*IsAggregateOrDeclareTarget*/ true);
3917  ImplicitMap[ClauseKind][Kind].emplace_back(E);
3918  return;
3919  }
3920 
3921  SourceLocation ELoc = E->getExprLoc();
3922  // OpenMP [2.9.3.6, Restrictions, p.2]
3923  // A list item that appears in a reduction clause of the innermost
3924  // enclosing worksharing or parallel construct may not be accessed in
3925  // an explicit task.
3926  DVar = Stack->hasInnermostDSA(
3927  FD,
3928  [](OpenMPClauseKind C, bool AppliedToPointee) {
3929  return C == OMPC_reduction && !AppliedToPointee;
3930  },
3931  [](OpenMPDirectiveKind K) {
3932  return isOpenMPParallelDirective(K) ||
3934  },
3935  /*FromParent=*/true);
3936  if (isOpenMPTaskingDirective(DKind) && DVar.CKind == OMPC_reduction) {
3937  ErrorFound = true;
3938  SemaRef.Diag(ELoc, diag::err_omp_reduction_in_task);
3939  reportOriginalDsa(SemaRef, Stack, FD, DVar);
3940  return;
3941  }
3942 
3943  // Define implicit data-sharing attributes for task.
3944  DVar = Stack->getImplicitDSA(FD, /*FromParent=*/false);
3945  if (isOpenMPTaskingDirective(DKind) && DVar.CKind != OMPC_shared &&
3946  !Stack->isLoopControlVariable(FD).first) {
3947  // Check if there is a captured expression for the current field in the
3948  // region. Do not mark it as firstprivate unless there is no captured
3949  // expression.
3950  // TODO: try to make it firstprivate.
3951  if (DVar.CKind != OMPC_unknown)
3952  ImplicitFirstprivate.push_back(E);
3953  }
3954  return;
3955  }
3956  if (isOpenMPTargetExecutionDirective(DKind)) {
3958  if (!checkMapClauseExpressionBase(SemaRef, E, CurComponents, OMPC_map,
3959  Stack->getCurrentDirective(),
3960  /*NoDiagnose=*/true))
3961  return;
3962  const auto *VD = cast<ValueDecl>(
3963  CurComponents.back().getAssociatedDeclaration()->getCanonicalDecl());
3964  if (!Stack->checkMappableExprComponentListsForDecl(
3965  VD, /*CurrentRegionOnly=*/true,
3966  [&CurComponents](
3968  StackComponents,
3969  OpenMPClauseKind) {
3970  auto CCI = CurComponents.rbegin();
3971  auto CCE = CurComponents.rend();
3972  for (const auto &SC : llvm::reverse(StackComponents)) {
3973  // Do both expressions have the same kind?
3974  if (CCI->getAssociatedExpression()->getStmtClass() !=
3975  SC.getAssociatedExpression()->getStmtClass())
3976  if (!((isa<OMPArraySectionExpr>(
3977  SC.getAssociatedExpression()) ||
3978  isa<OMPArrayShapingExpr>(
3979  SC.getAssociatedExpression())) &&
3980  isa<ArraySubscriptExpr>(
3981  CCI->getAssociatedExpression())))
3982  return false;
3983 
3984  const Decl *CCD = CCI->getAssociatedDeclaration();
3985  const Decl *SCD = SC.getAssociatedDeclaration();
3986  CCD = CCD ? CCD->getCanonicalDecl() : nullptr;
3987  SCD = SCD ? SCD->getCanonicalDecl() : nullptr;
3988  if (SCD != CCD)
3989  return false;
3990  std::advance(CCI, 1);
3991  if (CCI == CCE)
3992  break;
3993  }
3994  return true;
3995  })) {
3996  Visit(E->getBase());
3997  }
3998  } else if (!TryCaptureCXXThisMembers) {
3999  Visit(E->getBase());
4000  }
4001  }
4002  void VisitOMPExecutableDirective(OMPExecutableDirective *S) {
4003  for (OMPClause *C : S->clauses()) {
4004  // Skip analysis of arguments of private clauses for task|target
4005  // directives.
4006  if (isa_and_nonnull<OMPPrivateClause>(C))
4007  continue;
4008  // Skip analysis of arguments of implicitly defined firstprivate clause
4009  // for task|target directives.
4010  // Skip analysis of arguments of implicitly defined map clause for target
4011  // directives.
4012  if (C && !((isa<OMPFirstprivateClause>(C) || isa<OMPMapClause>(C)) &&
4013  C->isImplicit() &&
4014  !isOpenMPTaskingDirective(Stack->getCurrentDirective()))) {
4015  for (Stmt *CC : C->children()) {
4016  if (CC)
4017  Visit(CC);
4018  }
4019  }
4020  }
4021  // Check implicitly captured variables.
4022  VisitSubCaptures(S);
4023  }
4024 
4025  void VisitOMPLoopTransformationDirective(OMPLoopTransformationDirective *S) {
4026  // Loop transformation directives do not introduce data sharing
4027  VisitStmt(S);
4028  }
4029 
4030  void VisitCallExpr(CallExpr *S) {
4031  for (Stmt *C : S->arguments()) {
4032  if (C) {
4033  // Check implicitly captured variables in the task-based directives to
4034  // check if they must be firstprivatized.
4035  Visit(C);
4036  }
4037  }
4038  if (Expr *Callee = S->getCallee())
4039  if (auto *CE = dyn_cast<MemberExpr>(Callee->IgnoreParenImpCasts()))
4040  Visit(CE->getBase());
4041  }
4042  void VisitStmt(Stmt *S) {
4043  for (Stmt *C : S->children()) {
4044  if (C) {
4045  // Check implicitly captured variables in the task-based directives to
4046  // check if they must be firstprivatized.
4047  Visit(C);
4048  }
4049  }
4050  }
4051 
4052  void visitSubCaptures(CapturedStmt *S) {
4053  for (const CapturedStmt::Capture &Cap : S->captures()) {
4054  if (!Cap.capturesVariable() && !Cap.capturesVariableByCopy())
4055  continue;
4056  VarDecl *VD = Cap.getCapturedVar();
4057  // Do not try to map the variable if it or its sub-component was mapped
4058  // already.
4059  if (isOpenMPTargetExecutionDirective(Stack->getCurrentDirective()) &&
4060  Stack->checkMappableExprComponentListsForDecl(
4061  VD, /*CurrentRegionOnly=*/true,
4063  OpenMPClauseKind) { return true; }))
4064  continue;
4066  SemaRef, VD, VD->getType().getNonLValueExprType(SemaRef.Context),
4067  Cap.getLocation(), /*RefersToCapture=*/true);
4068  Visit(DRE);
4069  }
4070  }
4071  bool isErrorFound() const { return ErrorFound; }
4072  ArrayRef<Expr *> getImplicitFirstprivate() const {
4073  return ImplicitFirstprivate;
4074  }
4075  ArrayRef<Expr *> getImplicitPrivate() const { return ImplicitPrivate; }
4076  ArrayRef<Expr *> getImplicitMap(OpenMPDefaultmapClauseKind DK,
4077  OpenMPMapClauseKind MK) const {
4078  return ImplicitMap[DK][MK];
4079  }
4081  getImplicitMapModifier(OpenMPDefaultmapClauseKind Kind) const {
4082  return ImplicitMapModifier[Kind];
4083  }
4084  const Sema::VarsWithInheritedDSAType &getVarsWithInheritedDSA() const {
4085  return VarsWithInheritedDSA;
4086  }
4087 
4088  DSAAttrChecker(DSAStackTy *S, Sema &SemaRef, CapturedStmt *CS)
4089  : Stack(S), SemaRef(SemaRef), ErrorFound(false), CS(CS) {
4090  // Process declare target link variables for the target directives.
4091  if (isOpenMPTargetExecutionDirective(S->getCurrentDirective())) {
4092  for (DeclRefExpr *E : Stack->getLinkGlobals())
4093  Visit(E);
4094  }
4095  }
4096 };
4097 } // namespace
4098 
4099 static void handleDeclareVariantConstructTrait(DSAStackTy *Stack,
4100  OpenMPDirectiveKind DKind,
4101  bool ScopeEntry) {
4104  Traits.emplace_back(llvm::omp::TraitProperty::construct_target_target);
4105  if (isOpenMPTeamsDirective(DKind))
4106  Traits.emplace_back(llvm::omp::TraitProperty::construct_teams_teams);
4107  if (isOpenMPParallelDirective(DKind))
4108  Traits.emplace_back(llvm::omp::TraitProperty::construct_parallel_parallel);
4109  if (isOpenMPWorksharingDirective(DKind))
4110  Traits.emplace_back(llvm::omp::TraitProperty::construct_for_for);
4111  if (isOpenMPSimdDirective(DKind))
4112  Traits.emplace_back(llvm::omp::TraitProperty::construct_simd_simd);
4113  Stack->handleConstructTrait(Traits, ScopeEntry);
4114 }
4115 
4117  switch (DKind) {
4118  case OMPD_parallel:
4119  case OMPD_parallel_for:
4120  case OMPD_parallel_for_simd:
4121  case OMPD_parallel_sections:
4122  case OMPD_parallel_master:
4123  case OMPD_parallel_masked:
4124  case OMPD_parallel_loop:
4125  case OMPD_teams:
4126  case OMPD_teams_distribute:
4127  case OMPD_teams_distribute_simd: {
4128  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
4129  QualType KmpInt32PtrTy =
4130  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
4131  Sema::CapturedParamNameType Params[] = {
4132  std::make_pair(".global_tid.", KmpInt32PtrTy),
4133  std::make_pair(".bound_tid.", KmpInt32PtrTy),
4134  std::make_pair(StringRef(), QualType()) // __context with shared vars
4135  };
4136  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
4137  Params);
4138  break;
4139  }
4140  case OMPD_target_teams:
4141  case OMPD_target_parallel:
4142  case OMPD_target_parallel_for:
4143  case OMPD_target_parallel_for_simd:
4144  case OMPD_target_teams_loop:
4145  case OMPD_target_parallel_loop:
4146  case OMPD_target_teams_distribute:
4147  case OMPD_target_teams_distribute_simd: {
4148  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
4150  QualType KmpInt32PtrTy =
4151  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
4152  QualType Args[] = {VoidPtrTy};
4154  EPI.Variadic = true;
4155  QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
4156  Sema::CapturedParamNameType Params[] = {
4157  std::make_pair(".global_tid.", KmpInt32Ty),
4158  std::make_pair(".part_id.", KmpInt32PtrTy),
4159  std::make_pair(".privates.", VoidPtrTy),
4160  std::make_pair(
4161  ".copy_fn.",
4162  Context.getPointerType(CopyFnType).withConst().withRestrict()),
4163  std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
4164  std::make_pair(StringRef(), QualType()) // __context with shared vars
4165  };
4166  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
4167  Params, /*OpenMPCaptureLevel=*/0);
4168  // Mark this captured region as inlined, because we don't use outlined
4169  // function directly.
4171  AlwaysInlineAttr::CreateImplicit(
4173  AlwaysInlineAttr::Keyword_forceinline));
4174  Sema::CapturedParamNameType ParamsTarget[] = {
4175  std::make_pair(StringRef(), QualType()) // __context with shared vars
4176  };
4177  // Start a captured region for 'target' with no implicit parameters.
4178  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
4179  ParamsTarget, /*OpenMPCaptureLevel=*/1);
4180  Sema::CapturedParamNameType ParamsTeamsOrParallel[] = {
4181  std::make_pair(".global_tid.", KmpInt32PtrTy),
4182  std::make_pair(".bound_tid.", KmpInt32PtrTy),
4183  std::make_pair(StringRef(), QualType()) // __context with shared vars
4184  };
4185  // Start a captured region for 'teams' or 'parallel'. Both regions have
4186  // the same implicit parameters.
4187  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
4188  ParamsTeamsOrParallel, /*OpenMPCaptureLevel=*/2);
4189  break;
4190  }
4191  case OMPD_target:
4192  case OMPD_target_simd: {
4193  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
4195  QualType KmpInt32PtrTy =
4196  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
4197  QualType Args[] = {VoidPtrTy};
4199  EPI.Variadic = true;
4200  QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
4201  Sema::CapturedParamNameType Params[] = {
4202  std::make_pair(".global_tid.", KmpInt32Ty),
4203  std::make_pair(".part_id.", KmpInt32PtrTy),
4204  std::make_pair(".privates.", VoidPtrTy),
4205  std::make_pair(
4206  ".copy_fn.",
4207  Context.getPointerType(CopyFnType).withConst().withRestrict()),
4208  std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
4209  std::make_pair(StringRef(), QualType()) // __context with shared vars
4210  };
4211  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
4212  Params, /*OpenMPCaptureLevel=*/0);
4213  // Mark this captured region as inlined, because we don't use outlined
4214  // function directly.
4216  AlwaysInlineAttr::CreateImplicit(
4218  AlwaysInlineAttr::Keyword_forceinline));
4219  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
4220  std::make_pair(StringRef(), QualType()),
4221  /*OpenMPCaptureLevel=*/1);
4222  break;
4223  }
4224  case OMPD_atomic:
4225  case OMPD_critical:
4226  case OMPD_section:
4227  case OMPD_master:
4228  case OMPD_masked:
4229  case OMPD_tile:
4230  case OMPD_unroll:
4231  break;
4232  case OMPD_loop:
4233  // TODO: 'loop' may require additional parameters depending on the binding.
4234  // Treat similar to OMPD_simd/OMPD_for for now.
4235  case OMPD_simd:
4236  case OMPD_for:
4237  case OMPD_for_simd:
4238  case OMPD_sections:
4239  case OMPD_single:
4240  case OMPD_taskgroup:
4241  case OMPD_distribute:
4242  case OMPD_distribute_simd:
4243  case OMPD_ordered:
4244  case OMPD_target_data:
4245  case OMPD_dispatch: {
4246  Sema::CapturedParamNameType Params[] = {
4247  std::make_pair(StringRef(), QualType()) // __context with shared vars
4248  };
4249  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
4250  Params);
4251  break;
4252  }
4253  case OMPD_task: {
4254  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
4256  QualType KmpInt32PtrTy =
4257  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
4258  QualType Args[] = {VoidPtrTy};
4260  EPI.Variadic = true;
4261  QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
4262  Sema::CapturedParamNameType Params[] = {
4263  std::make_pair(".global_tid.", KmpInt32Ty),
4264  std::make_pair(".part_id.", KmpInt32PtrTy),
4265  std::make_pair(".privates.", VoidPtrTy),
4266  std::make_pair(
4267  ".copy_fn.",
4268  Context.getPointerType(CopyFnType).withConst().withRestrict()),
4269  std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
4270  std::make_pair(StringRef(), QualType()) // __context with shared vars
4271  };
4272  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
4273  Params);
4274  // Mark this captured region as inlined, because we don't use outlined
4275  // function directly.
4277  AlwaysInlineAttr::CreateImplicit(
4279  AlwaysInlineAttr::Keyword_forceinline));
4280  break;
4281  }
4282  case OMPD_taskloop:
4283  case OMPD_taskloop_simd:
4284  case OMPD_master_taskloop:
4285  case OMPD_masked_taskloop:
4286  case OMPD_masked_taskloop_simd:
4287  case OMPD_master_taskloop_simd: {
4288  QualType KmpInt32Ty =
4289  Context.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1)
4290  .withConst();
4291  QualType KmpUInt64Ty =
4292  Context.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0)
4293  .withConst();
4294  QualType KmpInt64Ty =
4295  Context.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1)
4296  .withConst();
4298  QualType KmpInt32PtrTy =
4299  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
4300  QualType Args[] = {VoidPtrTy};
4302  EPI.Variadic = true;
4303  QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
4304  Sema::CapturedParamNameType Params[] = {
4305  std::make_pair(".global_tid.", KmpInt32Ty),
4306  std::make_pair(".part_id.", KmpInt32PtrTy),
4307  std::make_pair(".privates.", VoidPtrTy),
4308  std::make_pair(
4309  ".copy_fn.",
4310  Context.getPointerType(CopyFnType).withConst().withRestrict()),
4311  std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
4312  std::make_pair(".lb.", KmpUInt64Ty),
4313  std::make_pair(".ub.", KmpUInt64Ty),
4314  std::make_pair(".st.", KmpInt64Ty),
4315  std::make_pair(".liter.", KmpInt32Ty),
4316  std::make_pair(".reductions.", VoidPtrTy),
4317  std::make_pair(StringRef(), QualType()) // __context with shared vars
4318  };
4319  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
4320  Params);
4321  // Mark this captured region as inlined, because we don't use outlined
4322  // function directly.
4324  AlwaysInlineAttr::CreateImplicit(
4326  AlwaysInlineAttr::Keyword_forceinline));
4327  break;
4328  }
4329  case OMPD_parallel_masked_taskloop:
4330  case OMPD_parallel_masked_taskloop_simd:
4331  case OMPD_parallel_master_taskloop:
4332  case OMPD_parallel_master_taskloop_simd: {
4333  QualType KmpInt32Ty =
4334  Context.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1)
4335  .withConst();
4336  QualType KmpUInt64Ty =
4337  Context.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0)
4338  .withConst();
4339  QualType KmpInt64Ty =
4340  Context.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1)
4341  .withConst();
4343  QualType KmpInt32PtrTy =
4344  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
4345  Sema::CapturedParamNameType ParamsParallel[] = {
4346  std::make_pair(".global_tid.", KmpInt32PtrTy),
4347  std::make_pair(".bound_tid.", KmpInt32PtrTy),
4348  std::make_pair(StringRef(), QualType()) // __context with shared vars
4349  };
4350  // Start a captured region for 'parallel'.
4351  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
4352  ParamsParallel, /*OpenMPCaptureLevel=*/0);
4353  QualType Args[] = {VoidPtrTy};
4355  EPI.Variadic = true;
4356  QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
4357  Sema::CapturedParamNameType Params[] = {
4358  std::make_pair(".global_tid.", KmpInt32Ty),
4359  std::make_pair(".part_id.", KmpInt32PtrTy),
4360  std::make_pair(".privates.", VoidPtrTy),
4361  std::make_pair(
4362  ".copy_fn.",
4363  Context.getPointerType(CopyFnType).withConst().withRestrict()),
4364  std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
4365  std::make_pair(".lb.", KmpUInt64Ty),
4366  std::make_pair(".ub.", KmpUInt64Ty),
4367  std::make_pair(".st.", KmpInt64Ty),
4368  std::make_pair(".liter.", KmpInt32Ty),
4369  std::make_pair(".reductions.", VoidPtrTy),
4370  std::make_pair(StringRef(), QualType()) // __context with shared vars
4371  };
4372  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
4373  Params, /*OpenMPCaptureLevel=*/1);
4374  // Mark this captured region as inlined, because we don't use outlined
4375  // function directly.
4377  AlwaysInlineAttr::CreateImplicit(
4379  AlwaysInlineAttr::Keyword_forceinline));
4380  break;
4381  }
4382  case OMPD_distribute_parallel_for_simd:
4383  case OMPD_distribute_parallel_for: {
4384  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
4385  QualType KmpInt32PtrTy =
4386  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
4387  Sema::CapturedParamNameType Params[] = {
4388  std::make_pair(".global_tid.", KmpInt32PtrTy),
4389  std::make_pair(".bound_tid.", KmpInt32PtrTy),
4390  std::make_pair(".previous.lb.", Context.getSizeType().withConst()),
4391  std::make_pair(".previous.ub.", Context.getSizeType().withConst()),
4392  std::make_pair(StringRef(), QualType()) // __context with shared vars
4393  };
4394  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
4395  Params);
4396  break;
4397  }
4398  case OMPD_target_teams_distribute_parallel_for:
4399  case OMPD_target_teams_distribute_parallel_for_simd: {
4400  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
4401  QualType KmpInt32PtrTy =
4402  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
4404 
4405  QualType Args[] = {VoidPtrTy};
4407  EPI.Variadic = true;
4408  QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
4409  Sema::CapturedParamNameType Params[] = {
4410  std::make_pair(".global_tid.", KmpInt32Ty),
4411  std::make_pair(".part_id.", KmpInt32PtrTy),
4412  std::make_pair(".privates.", VoidPtrTy),
4413  std::make_pair(
4414  ".copy_fn.",
4415  Context.getPointerType(CopyFnType).withConst().withRestrict()),
4416  std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
4417  std::make_pair(StringRef(), QualType()) // __context with shared vars
4418  };
4419  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
4420  Params, /*OpenMPCaptureLevel=*/0);
4421  // Mark this captured region as inlined, because we don't use outlined
4422  // function directly.
4424  AlwaysInlineAttr::CreateImplicit(
4426  AlwaysInlineAttr::Keyword_forceinline));
4427  Sema::CapturedParamNameType ParamsTarget[] = {
4428  std::make_pair(StringRef(), QualType()) // __context with shared vars
4429  };
4430  // Start a captured region for 'target' with no implicit parameters.
4431  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
4432  ParamsTarget, /*OpenMPCaptureLevel=*/1);
4433 
4434  Sema::CapturedParamNameType ParamsTeams[] = {
4435  std::make_pair(".global_tid.", KmpInt32PtrTy),
4436  std::make_pair(".bound_tid.", KmpInt32PtrTy),
4437  std::make_pair(StringRef(), QualType()) // __context with shared vars
4438  };
4439  // Start a captured region for 'target' with no implicit parameters.
4440  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
4441  ParamsTeams, /*OpenMPCaptureLevel=*/2);
4442 
4443  Sema::CapturedParamNameType ParamsParallel[] = {
4444  std::make_pair(".global_tid.", KmpInt32PtrTy),
4445  std::make_pair(".bound_tid.", KmpInt32PtrTy),
4446  std::make_pair(".previous.lb.", Context.getSizeType().withConst()),
4447  std::make_pair(".previous.ub.", Context.getSizeType().withConst()),
4448  std::make_pair(StringRef(), QualType()) // __context with shared vars
4449  };
4450  // Start a captured region for 'teams' or 'parallel'. Both regions have
4451  // the same implicit parameters.
4452  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
4453  ParamsParallel, /*OpenMPCaptureLevel=*/3);
4454  break;
4455  }
4456 
4457  case OMPD_teams_loop: {
4458  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
4459  QualType KmpInt32PtrTy =
4460  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
4461 
4462  Sema::CapturedParamNameType ParamsTeams[] = {
4463  std::make_pair(".global_tid.", KmpInt32PtrTy),
4464  std::make_pair(".bound_tid.", KmpInt32PtrTy),
4465  std::make_pair(StringRef(), QualType()) // __context with shared vars
4466  };
4467  // Start a captured region for 'teams'.
4468  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
4469  ParamsTeams, /*OpenMPCaptureLevel=*/0);
4470  break;
4471  }
4472 
4473  case OMPD_teams_distribute_parallel_for:
4474  case OMPD_teams_distribute_parallel_for_simd: {
4475  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
4476  QualType KmpInt32PtrTy =
4477  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
4478 
4479  Sema::CapturedParamNameType ParamsTeams[] = {
4480  std::make_pair(".global_tid.", KmpInt32PtrTy),
4481  std::make_pair(".bound_tid.", KmpInt32PtrTy),
4482  std::make_pair(StringRef(), QualType()) // __context with shared vars
4483  };
4484  // Start a captured region for 'target' with no implicit parameters.
4485  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
4486  ParamsTeams, /*OpenMPCaptureLevel=*/0);
4487 
4488  Sema::CapturedParamNameType ParamsParallel[] = {
4489  std::make_pair(".global_tid.", KmpInt32PtrTy),
4490  std::make_pair(".bound_tid.", KmpInt32PtrTy),
4491  std::make_pair(".previous.lb.", Context.getSizeType().withConst()),
4492  std::make_pair(".previous.ub.", Context.getSizeType().withConst()),
4493  std::make_pair(StringRef(), QualType()) // __context with shared vars
4494  };
4495  // Start a captured region for 'teams' or 'parallel'. Both regions have
4496  // the same implicit parameters.
4497  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
4498  ParamsParallel, /*OpenMPCaptureLevel=*/1);
4499  break;
4500  }
4501  case OMPD_target_update:
4502  case OMPD_target_enter_data:
4503  case OMPD_target_exit_data: {
4504  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
4506  QualType KmpInt32PtrTy =
4507  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
4508  QualType Args[] = {VoidPtrTy};
4510  EPI.Variadic = true;
4511  QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
4512  Sema::CapturedParamNameType Params[] = {
4513  std::make_pair(".global_tid.", KmpInt32Ty),
4514  std::make_pair(".part_id.", KmpInt32PtrTy),
4515  std::make_pair(".privates.", VoidPtrTy),
4516  std::make_pair(
4517  ".copy_fn.",
4518  Context.getPointerType(CopyFnType).withConst().withRestrict()),
4519  std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
4520  std::make_pair(StringRef(), QualType()) // __context with shared vars
4521  };
4522  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
4523  Params);
4524  // Mark this captured region as inlined, because we don't use outlined
4525  // function directly.
4527  AlwaysInlineAttr::CreateImplicit(
4529  AlwaysInlineAttr::Keyword_forceinline));
4530  break;
4531  }
4532  case OMPD_threadprivate:
4533  case OMPD_allocate:
4534  case OMPD_taskyield:
4535  case OMPD_error:
4536  case OMPD_barrier:
4537  case OMPD_taskwait:
4538  case OMPD_cancellation_point:
4539  case OMPD_cancel:
4540  case OMPD_flush:
4541  case OMPD_depobj:
4542  case OMPD_scan:
4543  case OMPD_declare_reduction:
4544  case OMPD_declare_mapper:
4545  case OMPD_declare_simd:
4546  case OMPD_declare_target:
4547  case OMPD_end_declare_target:
4548  case OMPD_requires:
4549  case OMPD_declare_variant:
4550  case OMPD_begin_declare_variant:
4551  case OMPD_end_declare_variant:
4552  case OMPD_metadirective:
4553  llvm_unreachable("OpenMP Directive is not allowed");
4554  case OMPD_unknown:
4555  default:
4556  llvm_unreachable("Unknown OpenMP directive");
4557  }
4558  DSAStack->setContext(CurContext);
4559  handleDeclareVariantConstructTrait(DSAStack, DKind, /* ScopeEntry */ true);
4560 }
4561 
4562 int Sema::getNumberOfConstructScopes(unsigned Level) const {
4563  return getOpenMPCaptureLevels(DSAStack->getDirective(Level));
4564 }
4565 
4567  SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
4568  getOpenMPCaptureRegions(CaptureRegions, DKind);
4569  return CaptureRegions.size();
4570 }
4571 
4573  Expr *CaptureExpr, bool WithInit,
4574  DeclContext *CurContext,
4575  bool AsExpression) {
4576  assert(CaptureExpr);
4577  ASTContext &C = S.getASTContext();
4578  Expr *Init = AsExpression ? CaptureExpr : CaptureExpr->IgnoreImpCasts();
4579  QualType Ty = Init->getType();
4580  if (CaptureExpr->getObjectKind() == OK_Ordinary && CaptureExpr->isGLValue()) {
4581  if (S.getLangOpts().CPlusPlus) {
4582  Ty = C.getLValueReferenceType(Ty);
4583  } else {
4584  Ty = C.getPointerType(Ty);
4585  ExprResult Res =
4586  S.CreateBuiltinUnaryOp(CaptureExpr->getExprLoc(), UO_AddrOf, Init);
4587  if (!Res.isUsable())
4588  return nullptr;
4589  Init = Res.get();
4590  }
4591  WithInit = true;
4592  }
4593  auto *CED = OMPCapturedExprDecl::Create(C, CurContext, Id, Ty,
4594  CaptureExpr->getBeginLoc());
4595  if (!WithInit)
4596  CED->addAttr(OMPCaptureNoInitAttr::CreateImplicit(C));
4597  CurContext->addHiddenDecl(CED);
4599  S.AddInitializerToDecl(CED, Init, /*DirectInit=*/false);
4600  return CED;
4601 }
4602 
4603 static DeclRefExpr *buildCapture(Sema &S, ValueDecl *D, Expr *CaptureExpr,
4604  bool WithInit) {
4605  OMPCapturedExprDecl *CD;
4606  if (VarDecl *VD = S.isOpenMPCapturedDecl(D))
4607  CD = cast<OMPCapturedExprDecl>(VD);
4608  else
4609  CD = buildCaptureDecl(S, D->getIdentifier(), CaptureExpr, WithInit,
4610  S.CurContext,
4611  /*AsExpression=*/false);
4612  return buildDeclRefExpr(S, CD, CD->getType().getNonReferenceType(),
4613  CaptureExpr->getExprLoc());
4614 }
4615 
4616 static ExprResult buildCapture(Sema &S, Expr *CaptureExpr, DeclRefExpr *&Ref) {
4617  CaptureExpr = S.DefaultLvalueConversion(CaptureExpr).get();
4618  if (!Ref) {
4620  S, &S.getASTContext().Idents.get(".capture_expr."), CaptureExpr,
4621  /*WithInit=*/true, S.CurContext, /*AsExpression=*/true);
4622  Ref = buildDeclRefExpr(S, CD, CD->getType().getNonReferenceType(),
4623  CaptureExpr->getExprLoc());
4624  }
4625  ExprResult Res = Ref;
4626  if (!S.getLangOpts().CPlusPlus &&
4627  CaptureExpr->getObjectKind() == OK_Ordinary && CaptureExpr->isGLValue() &&
4628  Ref->getType()->isPointerType()) {
4629  Res = S.CreateBuiltinUnaryOp(CaptureExpr->getExprLoc(), UO_Deref, Ref);
4630  if (!Res.isUsable())
4631  return ExprError();
4632  }
4633  return S.DefaultLvalueConversion(Res.get());
4634 }
4635 
4636 namespace {
4637 // OpenMP directives parsed in this section are represented as a
4638 // CapturedStatement with an associated statement. If a syntax error
4639 // is detected during the parsing of the associated statement, the
4640 // compiler must abort processing and close the CapturedStatement.
4641 //
4642 // Combined directives such as 'target parallel' have more than one
4643 // nested CapturedStatements. This RAII ensures that we unwind out
4644 // of all the nested CapturedStatements when an error is found.
4645 class CaptureRegionUnwinderRAII {
4646 private:
4647  Sema &S;
4648  bool &ErrorFound;
4649  OpenMPDirectiveKind DKind = OMPD_unknown;
4650 
4651 public:
4652  CaptureRegionUnwinderRAII(Sema &S, bool &ErrorFound,
4653  OpenMPDirectiveKind DKind)
4654  : S(S), ErrorFound(ErrorFound), DKind(DKind) {}
4655  ~CaptureRegionUnwinderRAII() {
4656  if (ErrorFound) {
4657  int ThisCaptureLevel = S.getOpenMPCaptureLevels(DKind);
4658  while (--ThisCaptureLevel >= 0)
4660  }
4661  }
4662 };
4663 } // namespace
4664 
4666  // Capture variables captured by reference in lambdas for target-based
4667  // directives.
4668  if (!CurContext->isDependentContext() &&
4669  (isOpenMPTargetExecutionDirective(DSAStack->getCurrentDirective()) ||
4671  DSAStack->getCurrentDirective()))) {
4672  QualType Type = V->getType();
4673  if (const auto *RD = Type.getCanonicalType()
4674  .getNonReferenceType()
4675  ->getAsCXXRecordDecl()) {
4676  bool SavedForceCaptureByReferenceInTargetExecutable =
4677  DSAStack->isForceCaptureByReferenceInTargetExecutable();
4678  DSAStack->setForceCaptureByReferenceInTargetExecutable(
4679  /*V=*/true);
4680  if (RD->isLambda()) {
4681  llvm::DenseMap<const ValueDecl *, FieldDecl *> Captures;
4682  FieldDecl *ThisCapture;
4683  RD->getCaptureFields(Captures, ThisCapture);
4684  for (const LambdaCapture &LC : RD->captures()) {
4685  if (LC.getCaptureKind() == LCK_ByRef) {
4686  VarDecl *VD = cast<VarDecl>(LC.getCapturedVar());
4687  DeclContext *VDC = VD->getDeclContext();
4688  if (!VDC->Encloses(CurContext))
4689  continue;
4690  MarkVariableReferenced(LC.getLocation(), VD);
4691  } else if (LC.getCaptureKind() == LCK_This) {
4692  QualType ThisTy = getCurrentThisType();
4693  if (!ThisTy.isNull() &&
4694  Context.typesAreCompatible(ThisTy, ThisCapture->getType()))
4695  CheckCXXThisCapture(LC.getLocation());
4696  }
4697  }
4698  }
4699  DSAStack->setForceCaptureByReferenceInTargetExecutable(
4700  SavedForceCaptureByReferenceInTargetExecutable);
4701  }
4702  }
4703 }
4704 
4706  const ArrayRef<OMPClause *> Clauses) {
4707  const OMPOrderedClause *Ordered = nullptr;
4708  const OMPOrderClause *Order = nullptr;
4709 
4710  for (const OMPClause *Clause : Clauses) {
4711  if (Clause->getClauseKind() == OMPC_ordered)
4712  Ordered = cast<OMPOrderedClause>(Clause);
4713  else if (Clause->getClauseKind() == OMPC_order) {
4714  Order = cast<OMPOrderClause>(Clause);
4715  if (Order->getKind() != OMPC_ORDER_concurrent)
4716  Order = nullptr;
4717  }
4718  if (Ordered && Order)
4719  break;
4720  }
4721 
4722  if (Ordered && Order) {
4723  S.Diag(Order->getKindKwLoc(),
4724  diag::err_omp_simple_clause_incompatible_with_ordered)
4725  << getOpenMPClauseName(OMPC_order)
4726  << getOpenMPSimpleClauseTypeName(OMPC_order, OMPC_ORDER_concurrent)
4727  << SourceRange(Order->getBeginLoc(), Order->getEndLoc());
4728  S.Diag(Ordered->getBeginLoc(), diag::note_omp_ordered_param)
4729  << 0 << SourceRange(Ordered->getBeginLoc(), Ordered->getEndLoc());
4730  return true;
4731  }
4732  return false;
4733 }
4734 
4736  ArrayRef<OMPClause *> Clauses) {
4737  handleDeclareVariantConstructTrait(DSAStack, DSAStack->getCurrentDirective(),
4738  /* ScopeEntry */ false);
4739  if (DSAStack->getCurrentDirective() == OMPD_atomic ||
4740  DSAStack->getCurrentDirective() == OMPD_critical ||
4741  DSAStack->getCurrentDirective() == OMPD_section ||
4742  DSAStack->getCurrentDirective() == OMPD_master ||
4743  DSAStack->getCurrentDirective() == OMPD_masked)
4744  return S;
4745 
4746  bool ErrorFound = false;
4747  CaptureRegionUnwinderRAII CaptureRegionUnwinder(
4748  *this, ErrorFound, DSAStack->getCurrentDirective());
4749  if (!S.isUsable()) {
4750  ErrorFound = true;
4751  return StmtError();
4752  }
4753 
4754  SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
4755  getOpenMPCaptureRegions(CaptureRegions, DSAStack->getCurrentDirective());
4756  OMPOrderedClause *OC = nullptr;
4757  OMPScheduleClause *SC = nullptr;
4760  // This is required for proper codegen.
4761  for (OMPClause *Clause : Clauses) {
4762  if (!LangOpts.OpenMPSimd &&
4763  (isOpenMPTaskingDirective(DSAStack->getCurrentDirective()) ||
4764  DSAStack->getCurrentDirective() == OMPD_target) &&
4765  Clause->getClauseKind() == OMPC_in_reduction) {
4766  // Capture taskgroup task_reduction descriptors inside the tasking regions
4767  // with the corresponding in_reduction items.
4768  auto *IRC = cast<OMPInReductionClause>(Clause);
4769  for (Expr *E : IRC->taskgroup_descriptors())
4770  if (E)
4772  }
4773  if (isOpenMPPrivate(Clause->getClauseKind()) ||
4774  Clause->getClauseKind() == OMPC_copyprivate ||
4775  (getLangOpts().OpenMPUseTLS &&
4776  getASTContext().getTargetInfo().isTLSSupported() &&
4777  Clause->getClauseKind() == OMPC_copyin)) {
4778  DSAStack->setForceVarCapturing(Clause->getClauseKind() == OMPC_copyin);
4779  // Mark all variables in private list clauses as used in inner region.
4780  for (Stmt *VarRef : Clause->children()) {
4781  if (auto *E = cast_or_null<Expr>(VarRef)) {
4783  }
4784  }
4785  DSAStack->setForceVarCapturing(/*V=*/false);
4787  DSAStack->getCurrentDirective())) {
4788  assert(CaptureRegions.empty() &&
4789  "No captured regions in loop transformation directives.");
4790  } else if (CaptureRegions.size() > 1 ||
4791  CaptureRegions.back() != OMPD_unknown) {
4792  if (auto *C = OMPClauseWithPreInit::get(Clause))
4793  PICs.push_back(C);
4794  if (auto *C = OMPClauseWithPostUpdate::get(Clause)) {
4795  if (Expr *E = C->getPostUpdateExpr())
4797  }
4798  }
4799  if (Clause->getClauseKind() == OMPC_schedule)
4800  SC = cast<OMPScheduleClause>(Clause);
4801  else if (Clause->getClauseKind() == OMPC_ordered)
4802  OC = cast<OMPOrderedClause>(Clause);
4803  else if (Clause->getClauseKind() == OMPC_linear)
4804  LCs.push_back(cast<OMPLinearClause>(Clause));
4805  }
4806  // Capture allocator expressions if used.
4807  for (Expr *E : DSAStack->getInnerAllocators())
4809  // OpenMP, 2.7.1 Loop Construct, Restrictions
4810  // The nonmonotonic modifier cannot be specified if an ordered clause is
4811  // specified.
4812  if (SC &&
4813  (SC->getFirstScheduleModifier() == OMPC_SCHEDULE_MODIFIER_nonmonotonic ||
4814  SC->getSecondScheduleModifier() ==
4815  OMPC_SCHEDULE_MODIFIER_nonmonotonic) &&
4816  OC) {
4817  Diag(SC->getFirstScheduleModifier() == OMPC_SCHEDULE_MODIFIER_nonmonotonic
4820  diag::err_omp_simple_clause_incompatible_with_ordered)
4821  << getOpenMPClauseName(OMPC_schedule)
4822  << getOpenMPSimpleClauseTypeName(OMPC_schedule,
4823  OMPC_SCHEDULE_MODIFIER_nonmonotonic)
4824  << SourceRange(OC->getBeginLoc(), OC->getEndLoc());
4825  ErrorFound = true;
4826  }
4827  // OpenMP 5.0, 2.9.2 Worksharing-Loop Construct, Restrictions.
4828  // If an order(concurrent) clause is present, an ordered clause may not appear
4829  // on the same directive.
4830  if (checkOrderedOrderSpecified(*this, Clauses))
4831  ErrorFound = true;
4832  if (!LCs.empty() && OC && OC->getNumForLoops()) {
4833  for (const OMPLinearClause *C : LCs) {
4834  Diag(C->getBeginLoc(), diag::err_omp_linear_ordered)
4835  << SourceRange(OC->getBeginLoc(), OC->getEndLoc());
4836  }
4837  ErrorFound = true;
4838  }
4839  if (isOpenMPWorksharingDirective(DSAStack->getCurrentDirective()) &&
4840  isOpenMPSimdDirective(DSAStack->getCurrentDirective()) && OC &&
4841  OC->getNumForLoops()) {
4842  Diag(OC->getBeginLoc(), diag::err_omp_ordered_simd)
4843  << getOpenMPDirectiveName(DSAStack->getCurrentDirective());
4844  ErrorFound = true;
4845  }
4846  if (ErrorFound) {
4847  return StmtError();
4848  }
4849  StmtResult SR = S;
4850  unsigned CompletedRegions = 0;
4851  for (OpenMPDirectiveKind ThisCaptureRegion : llvm::reverse(CaptureRegions)) {
4852  // Mark all variables in private list clauses as used in inner region.
4853  // Required for proper codegen of combined directives.
4854  // TODO: add processing for other clauses.
4855  if (ThisCaptureRegion != OMPD_unknown) {
4856  for (const clang::OMPClauseWithPreInit *C : PICs) {
4857  OpenMPDirectiveKind CaptureRegion = C->getCaptureRegion();
4858  // Find the particular capture region for the clause if the
4859  // directive is a combined one with multiple capture regions.
4860  // If the directive is not a combined one, the capture region
4861  // associated with the clause is OMPD_unknown and is generated
4862  // only once.
4863  if (CaptureRegion == ThisCaptureRegion ||
4864  CaptureRegion == OMPD_unknown) {
4865  if (auto *DS = cast_or_null<DeclStmt>(C->getPreInitStmt())) {
4866  for (Decl *D : DS->decls())
4867  MarkVariableReferenced(D->getLocation(), cast<VarDecl>(D));
4868  }
4869  }
4870  }
4871  }
4872  if (ThisCaptureRegion == OMPD_target) {
4873  // Capture allocator traits in the target region. They are used implicitly
4874  // and, thus, are not captured by default.
4875  for (OMPClause *C : Clauses) {
4876  if (const auto *UAC = dyn_cast<OMPUsesAllocatorsClause>(C)) {
4877  for (unsigned I = 0, End = UAC->getNumberOfAllocators(); I < End;
4878  ++I) {
4879  OMPUsesAllocatorsClause::Data D = UAC->getAllocatorData(I);
4880  if (Expr *E = D.AllocatorTraits)
4882  }
4883  continue;
4884  }
4885  }
4886  }
4887  if (ThisCaptureRegion == OMPD_parallel) {
4888  // Capture temp arrays for inscan reductions and locals in aligned
4889  // clauses.
4890  for (OMPClause *C : Clauses) {
4891  if (auto *RC = dyn_cast<OMPReductionClause>(C)) {
4892  if (RC->getModifier() != OMPC_REDUCTION_inscan)
4893  continue;
4894  for (Expr *E : RC->copy_array_temps())
4896  }
4897  if (auto *AC = dyn_cast<OMPAlignedClause>(C)) {
4898  for (Expr *E : AC->varlists())
4900  }
4901  }
4902  }
4903  if (++CompletedRegions == CaptureRegions.size())
4904  DSAStack->setBodyComplete();
4905  SR = ActOnCapturedRegionEnd(SR.get());
4906  }
4907  return SR;
4908 }
4909 
4910 static bool checkCancelRegion(Sema &SemaRef, OpenMPDirectiveKind CurrentRegion,
4911  OpenMPDirectiveKind CancelRegion,
4912  SourceLocation StartLoc) {
4913  // CancelRegion is only needed for cancel and cancellation_point.
4914  if (CurrentRegion != OMPD_cancel && CurrentRegion != OMPD_cancellation_point)
4915  return false;
4916 
4917  if (CancelRegion == OMPD_parallel || CancelRegion == OMPD_for ||
4918  CancelRegion == OMPD_sections || CancelRegion == OMPD_taskgroup)
4919  return false;
4920 
4921  SemaRef.Diag(StartLoc, diag::err_omp_wrong_cancel_region)
4922  << getOpenMPDirectiveName(CancelRegion);
4923  return true;
4924 }
4925 
4926 static bool checkNestingOfRegions(Sema &SemaRef, const DSAStackTy *Stack,
4927  OpenMPDirectiveKind CurrentRegion,
4928  const DeclarationNameInfo &CurrentName,
4929  OpenMPDirectiveKind CancelRegion,
4930  OpenMPBindClauseKind BindKind,
4931  SourceLocation StartLoc) {
4932  if (Stack->getCurScope()) {
4933  OpenMPDirectiveKind ParentRegion = Stack->getParentDirective();
4934  OpenMPDirectiveKind OffendingRegion = ParentRegion;
4935  bool NestingProhibited = false;
4936  bool CloseNesting = true;
4937  bool OrphanSeen = false;
4938  enum {
4939  NoRecommend,
4940  ShouldBeInParallelRegion,
4941  ShouldBeInOrderedRegion,
4942  ShouldBeInTargetRegion,
4943  ShouldBeInTeamsRegion,
4944  ShouldBeInLoopSimdRegion,
4945  } Recommend = NoRecommend;
4946  if (isOpenMPSimdDirective(ParentRegion) &&
4947  ((SemaRef.LangOpts.OpenMP <= 45 && CurrentRegion != OMPD_ordered) ||
4948  (SemaRef.LangOpts.OpenMP >= 50 && CurrentRegion != OMPD_ordered &&
4949  CurrentRegion != OMPD_simd && CurrentRegion != OMPD_atomic &&
4950  CurrentRegion != OMPD_scan))) {
4951  // OpenMP [2.16, Nesting of Regions]
4952  // OpenMP constructs may not be nested inside a simd region.
4953  // OpenMP [2.8.1,simd Construct, Restrictions]
4954  // An ordered construct with the simd clause is the only OpenMP
4955  // construct that can appear in the simd region.
4956  // Allowing a SIMD construct nested in another SIMD construct is an
4957  // extension. The OpenMP 4.5 spec does not allow it. Issue a warning
4958  // message.
4959  // OpenMP 5.0 [2.9.3.1, simd Construct, Restrictions]
4960  // The only OpenMP constructs that can be encountered during execution of
4961  // a simd region are the atomic construct, the loop construct, the simd
4962  // construct and the ordered construct with the simd clause.
4963  SemaRef.Diag(StartLoc, (CurrentRegion != OMPD_simd)
4964  ? diag::err_omp_prohibited_region_simd
4965  : diag::warn_omp_nesting_simd)
4966  << (SemaRef.LangOpts.OpenMP >= 50 ? 1 : 0);
4967  return CurrentRegion != OMPD_simd;
4968  }
4969  if (ParentRegion == OMPD_atomic) {
4970  // OpenMP [2.16, Nesting of Regions]
4971  // OpenMP constructs may not be nested inside an atomic region.
4972  SemaRef.Diag(StartLoc, diag::err_omp_prohibited_region_atomic);
4973  return true;
4974  }
4975  if (CurrentRegion == OMPD_section) {
4976  // OpenMP [2.7.2, sections Construct, Restrictions]
4977  // Orphaned section directives are prohibited. That is, the section
4978  // directives must appear within the sections construct and must not be
4979  // encountered elsewhere in the sections region.
4980  if (ParentRegion != OMPD_sections &&
4981  ParentRegion != OMPD_parallel_sections) {
4982  SemaRef.Diag(StartLoc, diag::err_omp_orphaned_section_directive)
4983  << (ParentRegion != OMPD_unknown)
4984  << getOpenMPDirectiveName(ParentRegion);
4985  return true;
4986  }
4987  return false;
4988  }
4989  // Allow some constructs (except teams and cancellation constructs) to be
4990  // orphaned (they could be used in functions, called from OpenMP regions
4991  // with the required preconditions).
4992  if (ParentRegion == OMPD_unknown &&
4993  !isOpenMPNestingTeamsDirective(CurrentRegion) &&
4994  CurrentRegion != OMPD_cancellation_point &&
4995  CurrentRegion != OMPD_cancel && CurrentRegion != OMPD_scan)
4996  return false;
4997  if (CurrentRegion == OMPD_cancellation_point ||
4998  CurrentRegion == OMPD_cancel) {
4999  // OpenMP [2.16, Nesting of Regions]
5000  // A cancellation point construct for which construct-type-clause is
5001  // taskgroup must be nested inside a task construct. A cancellation
5002  // point construct for which construct-type-clause is not taskgroup must
5003  // be closely nested inside an OpenMP construct that matches the type
5004  // specified in construct-type-clause.
5005  // A cancel construct for which construct-type-clause is taskgroup must be
5006  // nested inside a task construct. A cancel construct for which
5007  // construct-type-clause is not taskgroup must be closely nested inside an
5008  // OpenMP construct that matches the type specified in
5009  // construct-type-clause.
5010  NestingProhibited =
5011  !((CancelRegion == OMPD_parallel &&
5012  (ParentRegion == OMPD_parallel ||
5013  ParentRegion == OMPD_target_parallel)) ||
5014  (CancelRegion == OMPD_for &&
5015  (ParentRegion == OMPD_for || ParentRegion == OMPD_parallel_for ||
5016  ParentRegion == OMPD_target_parallel_for ||
5017  ParentRegion == OMPD_distribute_parallel_for ||
5018  ParentRegion == OMPD_teams_distribute_parallel_for ||
5019  ParentRegion == OMPD_target_teams_distribute_parallel_for)) ||
5020  (CancelRegion == OMPD_taskgroup &&
5021  (ParentRegion == OMPD_task ||
5022  (SemaRef.getLangOpts().OpenMP >= 50 &&
5023  (ParentRegion == OMPD_taskloop ||
5024  ParentRegion == OMPD_master_taskloop ||
5025  ParentRegion == OMPD_masked_taskloop ||
5026  ParentRegion == OMPD_parallel_masked_taskloop ||
5027  ParentRegion == OMPD_parallel_master_taskloop)))) ||
5028  (CancelRegion == OMPD_sections &&
5029  (ParentRegion == OMPD_section || ParentRegion == OMPD_sections ||
5030  ParentRegion == OMPD_parallel_sections)));
5031  OrphanSeen = ParentRegion == OMPD_unknown;
5032  } else if (CurrentRegion == OMPD_master || CurrentRegion == OMPD_masked) {
5033  // OpenMP 5.1 [2.22, Nesting of Regions]
5034  // A masked region may not be closely nested inside a worksharing, loop,
5035  // atomic, task, or taskloop region.
5036  NestingProhibited = isOpenMPWorksharingDirective(ParentRegion) ||
5037  isOpenMPGenericLoopDirective(ParentRegion) ||
5038  isOpenMPTaskingDirective(ParentRegion);
5039  } else if (CurrentRegion == OMPD_critical && CurrentName.getName()) {
5040  // OpenMP [2.16, Nesting of Regions]
5041  // A critical region may not be nested (closely or otherwise) inside a
5042  // critical region with the same name. Note that this restriction is not
5043  // sufficient to prevent deadlock.
5044  SourceLocation PreviousCriticalLoc;
5045  bool DeadLock = Stack->hasDirective(
5046  [CurrentName, &PreviousCriticalLoc](OpenMPDirectiveKind K,
5047  const DeclarationNameInfo &DNI,
5048  SourceLocation Loc) {
5049  if (K == OMPD_critical && DNI.getName() == CurrentName.getName()) {
5050  PreviousCriticalLoc = Loc;
5051  return true;
5052  }
5053  return false;
5054  },
5055  false /* skip top directive */);
5056  if (DeadLock) {
5057  SemaRef.Diag(StartLoc,
5058  diag::err_omp_prohibited_region_critical_same_name)
5059  << CurrentName.getName();
5060  if (PreviousCriticalLoc.isValid())
5061  SemaRef.Diag(PreviousCriticalLoc,
5062  diag::note_omp_previous_critical_region);
5063  return true;
5064  }
5065  } else if (CurrentRegion == OMPD_barrier) {
5066  // OpenMP 5.1 [2.22, Nesting of Regions]
5067  // A barrier region may not be closely nested inside a worksharing, loop,
5068  // task, taskloop, critical, ordered, atomic, or masked region.
5069  NestingProhibited =
5070  isOpenMPWorksharingDirective(ParentRegion) ||
5071  isOpenMPGenericLoopDirective(ParentRegion) ||
5072  isOpenMPTaskingDirective(ParentRegion) ||
5073  ParentRegion == OMPD_master || ParentRegion == OMPD_masked ||
5074  ParentRegion == OMPD_parallel_master ||
5075  ParentRegion == OMPD_parallel_masked ||
5076  ParentRegion == OMPD_critical || ParentRegion == OMPD_ordered;
5077  } else if (isOpenMPWorksharingDirective(CurrentRegion) &&
5078  !isOpenMPParallelDirective(CurrentRegion) &&
5079  !isOpenMPTeamsDirective(CurrentRegion)) {
5080  // OpenMP 5.1 [2.22, Nesting of Regions]
5081  // A loop region that binds to a parallel region or a worksharing region
5082  // may not be closely nested inside a worksharing, loop, task, taskloop,
5083  // critical, ordered, atomic, or masked region.
5084  NestingProhibited =
5085  isOpenMPWorksharingDirective(ParentRegion) ||
5086  isOpenMPGenericLoopDirective(ParentRegion) ||
5087  isOpenMPTaskingDirective(ParentRegion) ||
5088  ParentRegion == OMPD_master || ParentRegion == OMPD_masked ||
5089  ParentRegion == OMPD_parallel_master ||
5090  ParentRegion == OMPD_parallel_masked ||
5091  ParentRegion == OMPD_critical || ParentRegion == OMPD_ordered;
5092  Recommend = ShouldBeInParallelRegion;
5093  } else if (CurrentRegion == OMPD_ordered) {
5094  // OpenMP [2.16, Nesting of Regions]
5095  // An ordered region may not be closely nested inside a critical,
5096  // atomic, or explicit task region.
5097  // An ordered region must be closely nested inside a loop region (or
5098  // parallel loop region) with an ordered clause.
5099  // OpenMP [2.8.1,simd Construct, Restrictions]
5100  // An ordered construct with the simd clause is the only OpenMP construct
5101  // that can appear in the simd region.
5102  NestingProhibited = ParentRegion == OMPD_critical ||
5103  isOpenMPTaskingDirective(ParentRegion) ||
5104  !(isOpenMPSimdDirective(ParentRegion) ||
5105  Stack->isParentOrderedRegion());
5106  Recommend = ShouldBeInOrderedRegion;
5107  } else if (isOpenMPNestingTeamsDirective(CurrentRegion)) {
5108  // OpenMP [2.16, Nesting of Regions]
5109  // If specified, a teams construct must be contained within a target
5110  // construct.
5111  NestingProhibited =
5112  (SemaRef.LangOpts.OpenMP <= 45 && ParentRegion != OMPD_target) ||
5113  (SemaRef.LangOpts.OpenMP >= 50 && ParentRegion != OMPD_unknown &&
5114  ParentRegion != OMPD_target);
5115  OrphanSeen = ParentRegion == OMPD_unknown;
5116  Recommend = ShouldBeInTargetRegion;
5117  } else if (CurrentRegion == OMPD_scan) {
5118  // OpenMP [2.16, Nesting of Regions]
5119  // If specified, a teams construct must be contained within a target
5120  // construct.
5121  NestingProhibited =
5122  SemaRef.LangOpts.OpenMP < 50 ||
5123  (ParentRegion != OMPD_simd && ParentRegion != OMPD_for &&
5124  ParentRegion != OMPD_for_simd && ParentRegion != OMPD_parallel_for &&
5125  ParentRegion != OMPD_parallel_for_simd);
5126  OrphanSeen = ParentRegion == OMPD_unknown;
5127  Recommend = ShouldBeInLoopSimdRegion;
5128  }
5129  if (!NestingProhibited &&
5130  !isOpenMPTargetExecutionDirective(CurrentRegion) &&
5131  !isOpenMPTargetDataManagementDirective(CurrentRegion) &&
5132  (ParentRegion == OMPD_teams || ParentRegion == OMPD_target_teams)) {
5133  // OpenMP [5.1, 2.22, Nesting of Regions]
5134  // distribute, distribute simd, distribute parallel worksharing-loop,
5135  // distribute parallel worksharing-loop SIMD, loop, parallel regions,
5136  // including any parallel regions arising from combined constructs,
5137  // omp_get_num_teams() regions, and omp_get_team_num() regions are the
5138  // only OpenMP regions that may be strictly nested inside the teams
5139  // region.
5140  //
5141  // As an extension, we permit atomic within teams as well.
5142  NestingProhibited = !isOpenMPParallelDirective(CurrentRegion) &&
5143  !isOpenMPDistributeDirective(CurrentRegion) &&
5144  CurrentRegion != OMPD_loop &&
5145  !(SemaRef.getLangOpts().OpenMPExtensions &&
5146  CurrentRegion == OMPD_atomic);
5147  Recommend = ShouldBeInParallelRegion;
5148  }
5149  if (!NestingProhibited && CurrentRegion == OMPD_loop) {
5150  // OpenMP [5.1, 2.11.7, loop Construct, Restrictions]
5151  // If the bind clause is present on the loop construct and binding is
5152  // teams then the corresponding loop region must be strictly nested inside
5153  // a teams region.
5154  NestingProhibited = BindKind == OMPC_BIND_teams &&
5155  ParentRegion != OMPD_teams &&
5156  ParentRegion != OMPD_target_teams;
5157  Recommend = ShouldBeInTeamsRegion;
5158  }
5159  if (!NestingProhibited &&
5160  isOpenMPNestingDistributeDirective(CurrentRegion)) {
5161  // OpenMP 4.5 [2.17 Nesting of Regions]
5162  // The region associated with the distribute construct must be strictly
5163  // nested inside a teams region
5164  NestingProhibited =
5165  (ParentRegion != OMPD_teams && ParentRegion != OMPD_target_teams);
5166  Recommend = ShouldBeInTeamsRegion;
5167  }
5168  if (!NestingProhibited &&
5169  (isOpenMPTargetExecutionDirective(CurrentRegion) ||
5170  isOpenMPTargetDataManagementDirective(CurrentRegion))) {
5171  // OpenMP 4.5 [2.17 Nesting of Regions]
5172  // If a target, target update, target data, target enter data, or
5173  // target exit data construct is encountered during execution of a
5174  // target region, the behavior is unspecified.
5175  NestingProhibited = Stack->hasDirective(
5176  [&OffendingRegion](OpenMPDirectiveKind K, const DeclarationNameInfo &,
5177  SourceLocation) {
5179  OffendingRegion = K;
5180  return true;
5181  }
5182  return false;
5183  },
5184  false /* don't skip top directive */);
5185  CloseNesting = false;
5186  }
5187  if (NestingProhibited) {
5188  if (OrphanSeen) {
5189  SemaRef.Diag(StartLoc, diag::err_omp_orphaned_device_directive)
5190  << getOpenMPDirectiveName(CurrentRegion) << Recommend;
5191  } else {
5192  SemaRef.Diag(StartLoc, diag::err_omp_prohibited_region)
5193  << CloseNesting << getOpenMPDirectiveName(OffendingRegion)
5194  << Recommend << getOpenMPDirectiveName(CurrentRegion);
5195  }
5196  return true;
5197  }
5198  }
5199  return false;
5200 }
5201 
5204  unsigned operator()(argument_type DK) { return unsigned(DK); }
5205 };
5207  ArrayRef<OMPClause *> Clauses,
5208  ArrayRef<OpenMPDirectiveKind> AllowedNameModifiers) {
5209  bool ErrorFound = false;
5210  unsigned NamedModifiersNumber = 0;
5211  llvm::IndexedMap<const OMPIfClause *, Kind2Unsigned> FoundNameModifiers;
5212  FoundNameModifiers.resize(llvm::omp::Directive_enumSize + 1);
5213  SmallVector<SourceLocation, 4> NameModifierLoc;
5214  for (const OMPClause *C : Clauses) {
5215  if (const auto *IC = dyn_cast_or_null<OMPIfClause>(C)) {
5216  // At most one if clause without a directive-name-modifier can appear on
5217  // the directive.
5218  OpenMPDirectiveKind CurNM = IC->getNameModifier();
5219  if (FoundNameModifiers[CurNM]) {
5220  S.Diag(C->getBeginLoc(), diag::err_omp_more_one_clause)
5221  << getOpenMPDirectiveName(Kind) << getOpenMPClauseName(OMPC_if)
5222  << (CurNM != OMPD_unknown) << getOpenMPDirectiveName(CurNM);
5223  ErrorFound = true;
5224  } else if (CurNM != OMPD_unknown) {
5225  NameModifierLoc.push_back(IC->getNameModifierLoc());
5226  ++NamedModifiersNumber;
5227  }
5228  FoundNameModifiers[CurNM] = IC;
5229  if (CurNM == OMPD_unknown)
5230  continue;
5231  // Check if the specified name modifier is allowed for the current
5232  // directive.
5233  // At most one if clause with the particular directive-name-modifier can
5234  // appear on the directive.
5235  if (!llvm::is_contained(AllowedNameModifiers, CurNM)) {
5236  S.Diag(IC->getNameModifierLoc(),
5237  diag::err_omp_wrong_if_directive_name_modifier)
5238  << getOpenMPDirectiveName(CurNM) << getOpenMPDirectiveName(Kind);
5239  ErrorFound = true;
5240  }
5241  }
5242  }
5243  // If any if clause on the directive includes a directive-name-modifier then
5244  // all if clauses on the directive must include a directive-name-modifier.
5245  if (FoundNameModifiers[OMPD_unknown] && NamedModifiersNumber > 0) {
5246  if (NamedModifiersNumber == AllowedNameModifiers.size()) {
5247  S.Diag(FoundNameModifiers[OMPD_unknown]->getBeginLoc(),
5248  diag::err_omp_no_more_if_clause);
5249  } else {
5250  std::string Values;
5251  std::string Sep(", ");
5252  unsigned AllowedCnt = 0;
5253  unsigned TotalAllowedNum =
5254  AllowedNameModifiers.size() - NamedModifiersNumber;
5255  for (unsigned Cnt = 0, End = AllowedNameModifiers.size(); Cnt < End;
5256  ++Cnt) {
5257  OpenMPDirectiveKind NM = AllowedNameModifiers[Cnt];
5258  if (!FoundNameModifiers[NM]) {
5259  Values += "'";
5260  Values += getOpenMPDirectiveName(NM);
5261  Values += "'";
5262  if (AllowedCnt + 2 == TotalAllowedNum)
5263  Values += " or ";
5264  else if (AllowedCnt + 1 != TotalAllowedNum)
5265  Values += Sep;
5266  ++AllowedCnt;
5267  }
5268  }
5269  S.Diag(FoundNameModifiers[OMPD_unknown]->getCondition()->getBeginLoc(),
5270  diag::err_omp_unnamed_if_clause)
5271  << (TotalAllowedNum > 1) << Values;
5272  }
5273  for (SourceLocation Loc : NameModifierLoc) {
5274  S.Diag(Loc, diag::note_omp_previous_named_if_clause);
5275  }
5276  ErrorFound = true;
5277  }
5278  return ErrorFound;
5279 }
5280 
5281 static std::pair<ValueDecl *, bool> getPrivateItem(Sema &S, Expr *&RefExpr,
5282  SourceLocation &ELoc,
5283  SourceRange &ERange,
5284  bool AllowArraySection,
5285  StringRef DiagType) {
5286  if (RefExpr->isTypeDependent() || RefExpr->isValueDependent() ||
5288  return std::make_pair(nullptr, true);
5289 
5290  // OpenMP [3.1, C/C++]
5291  // A list item is a variable name.
5292  // OpenMP [2.9.3.3, Restrictions, p.1]
5293  // A variable that is part of another variable (as an array or
5294  // structure element) cannot appear in a private clause.
5295  RefExpr = RefExpr->IgnoreParens();
5296  enum {
5297  NoArrayExpr = -1,
5298  ArraySubscript = 0,
5299  OMPArraySection = 1
5300  } IsArrayExpr = NoArrayExpr;
5301  if (AllowArraySection) {
5302  if (auto *ASE = dyn_cast_or_null<ArraySubscriptExpr>(RefExpr)) {
5303  Expr *Base = ASE->getBase()->IgnoreParenImpCasts();
5304  while (auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
5305  Base = TempASE->getBase()->IgnoreParenImpCasts();
5306  RefExpr = Base;
5307  IsArrayExpr = ArraySubscript;
5308  } else if (auto *OASE = dyn_cast_or_null<OMPArraySectionExpr>(RefExpr)) {
5309  Expr *Base = OASE->getBase()->IgnoreParenImpCasts();
5310  while (auto *TempOASE = dyn_cast<OMPArraySectionExpr>(Base))
5311  Base = TempOASE->getBase()->IgnoreParenImpCasts();
5312  while (auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
5313  Base = TempASE->getBase()->IgnoreParenImpCasts();
5314  RefExpr = Base;
5315  IsArrayExpr = OMPArraySection;
5316  }
5317  }
5318  ELoc = RefExpr->getExprLoc();
5319  ERange = RefExpr->getSourceRange();
5320  RefExpr = RefExpr->IgnoreParenImpCasts();
5321  auto *DE = dyn_cast_or_null<DeclRefExpr>(RefExpr);
5322  auto *ME = dyn_cast_or_null<MemberExpr>(RefExpr);
5323  if ((!DE || !isa<VarDecl>(DE->getDecl())) &&
5324  (S.getCurrentThisType().isNull() || !ME ||
5325  !isa<CXXThisExpr>(ME->getBase()->IgnoreParenImpCasts()) ||
5326  !isa<FieldDecl>(ME->getMemberDecl()))) {
5327  if (IsArrayExpr != NoArrayExpr) {
5328  S.Diag(ELoc, diag::err_omp_expected_base_var_name)
5329  << IsArrayExpr << ERange;
5330  } else if (!DiagType.empty()) {
5331  unsigned DiagSelect = S.getLangOpts().CPlusPlus
5332  ? (S.getCurrentThisType().isNull() ? 1 : 2)
5333  : 0;
5334  S.Diag(ELoc, diag::err_omp_expected_var_name_member_expr_with_type)
5335  << DiagSelect << DiagType << ERange;
5336  } else {
5337  S.Diag(ELoc,
5338  AllowArraySection
5339  ? diag::err_omp_expected_var_name_member_expr_or_array_item
5340  : diag::err_omp_expected_var_name_member_expr)
5341  << (S.getCurrentThisType().isNull() ? 0 : 1) << ERange;
5342  }
5343  return std::make_pair(nullptr, false);
5344  }
5345  return std::make_pair(
5346  getCanonicalDecl(DE ? DE->getDecl() : ME->getMemberDecl()), false);
5347 }
5348 
5349 namespace {
5350 /// Checks if the allocator is used in uses_allocators clause to be allowed in
5351 /// target regions.
5352 class AllocatorChecker final : public ConstStmtVisitor<AllocatorChecker, bool> {
5353  DSAStackTy *S = nullptr;
5354 
5355 public:
5356  bool VisitDeclRefExpr(const DeclRefExpr *E) {
5357  return S->isUsesAllocatorsDecl(E->getDecl())
5358  .value_or(DSAStackTy::UsesAllocatorsDeclKind::AllocatorTrait) ==
5359  DSAStackTy::UsesAllocatorsDeclKind::AllocatorTrait;
5360  }
5361  bool VisitStmt(const Stmt *S) {
5362  for (const Stmt *Child : S->children()) {
5363  if (Child && Visit(Child))
5364  return true;
5365  }
5366  return false;
5367  }
5368  explicit AllocatorChecker(DSAStackTy *S) : S(S) {}
5369 };
5370 } // namespace
5371 
5372 static void checkAllocateClauses(Sema &S, DSAStackTy *Stack,
5373  ArrayRef<OMPClause *> Clauses) {
5374  assert(!S.CurContext->isDependentContext() &&
5375  "Expected non-dependent context.");
5376  auto AllocateRange =
5377  llvm::make_filter_range(Clauses, OMPAllocateClause::classof);
5378  llvm::DenseMap<CanonicalDeclPtr<Decl>, CanonicalDeclPtr<VarDecl>> DeclToCopy;
5379  auto PrivateRange = llvm::make_filter_range(Clauses, [](const OMPClause *C) {
5380  return isOpenMPPrivate(C->getClauseKind());
5381  });
5382  for (OMPClause *Cl : PrivateRange) {
5384  if (Cl->getClauseKind() == OMPC_private) {
5385  auto *PC = cast<OMPPrivateClause>(Cl);
5386  I = PC->private_copies().begin();
5387  It = PC->varlist_begin();
5388  Et = PC->varlist_end();
5389  } else if (Cl->getClauseKind() == OMPC_firstprivate) {
5390  auto *PC = cast<OMPFirstprivateClause>(Cl);
5391  I = PC->private_copies().begin();
5392  It = PC->varlist_begin();
5393  Et = PC->varlist_end();
5394  } else if (Cl->getClauseKind() == OMPC_lastprivate) {
5395  auto *PC = cast<OMPLastprivateClause>(Cl);
5396  I = PC->private_copies().begin();
5397  It = PC->varlist_begin();
5398  Et = PC->varlist_end();
5399  } else if (Cl->getClauseKind() == OMPC_linear) {
5400  auto *PC = cast<OMPLinearClause>(Cl);
5401  I = PC->privates().begin();
5402  It = PC->varlist_begin();
5403  Et = PC->varlist_end();
5404  } else if (Cl->getClauseKind() == OMPC_reduction) {
5405  auto *PC = cast<OMPReductionClause>(Cl);
5406  I = PC->privates().begin();
5407  It = PC->varlist_begin();
5408  Et = PC->varlist_end();
5409  } else if (Cl->getClauseKind() == OMPC_task_reduction) {
5410  auto *PC = cast<OMPTaskReductionClause>(Cl);
5411  I = PC->privates().begin();
5412  It = PC->varlist_begin();
5413  Et = PC->varlist_end();
5414  } else if (Cl->getClauseKind() == OMPC_in_reduction) {
5415  auto *PC = cast<OMPInReductionClause>(Cl);
5416  I = PC->privates().begin();
5417  It = PC->varlist_begin();
5418  Et = PC->varlist_end();
5419  } else {
5420  llvm_unreachable("Expected private clause.");
5421  }
5422  for (Expr *E : llvm::make_range(It, Et)) {
5423  if (!*I) {
5424  ++I;
5425  continue;
5426  }
5427  SourceLocation ELoc;
5428  SourceRange ERange;
5429  Expr *SimpleRefExpr = E;
5430  auto Res = getPrivateItem(S, SimpleRefExpr, ELoc, ERange,
5431  /*AllowArraySection=*/true);
5432  DeclToCopy.try_emplace(Res.first,
5433  cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()));
5434  ++I;
5435  }
5436  }
5437  for (OMPClause *C : AllocateRange) {
5438  auto *AC = cast<OMPAllocateClause>(C);
5439  if (S.getLangOpts().OpenMP >= 50 &&
5440  !Stack->hasRequiresDeclWithClause<OMPDynamicAllocatorsClause>() &&
5441  isOpenMPTargetExecutionDirective(Stack->getCurrentDirective()) &&
5442  AC->getAllocator()) {
5443  Expr *Allocator = AC->getAllocator();
5444  // OpenMP, 2.12.5 target Construct
5445  // Memory allocators that do not appear in a uses_allocators clause cannot
5446  // appear as an allocator in an allocate clause or be used in the target
5447  // region unless a requires directive with the dynamic_allocators clause
5448  // is present in the same compilation unit.
5449  AllocatorChecker Checker(Stack);
5450  if (Checker.Visit(Allocator))
5451  S.Diag(Allocator->getExprLoc(),
5452  diag::err_omp_allocator_not_in_uses_allocators)
5453  << Allocator->getSourceRange();
5454  }
5455  OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind =
5456  getAllocatorKind(S, Stack, AC->getAllocator());
5457  // OpenMP, 2.11.4 allocate Clause, Restrictions.
5458  // For task, taskloop or target directives, allocation requests to memory
5459  // allocators with the trait access set to thread result in unspecified
5460  // behavior.
5461  if (AllocatorKind == OMPAllocateDeclAttr::OMPThreadMemAlloc &&
5462  (isOpenMPTaskingDirective(Stack->getCurrentDirective()) ||
5463  isOpenMPTargetExecutionDirective(Stack->getCurrentDirective()))) {
5464  S.Diag(AC->getAllocator()->getExprLoc(),
5465  diag::warn_omp_allocate_thread_on_task_target_directive)
5466  << getOpenMPDirectiveName(Stack->getCurrentDirective());
5467  }
5468  for (Expr *E : AC->varlists()) {
5469  SourceLocation ELoc;
5470  SourceRange ERange;
5471  Expr *SimpleRefExpr = E;
5472  auto Res = getPrivateItem(S, SimpleRefExpr, ELoc, ERange);
5473  ValueDecl *VD = Res.first;
5474  DSAStackTy::DSAVarData Data = Stack->getTopDSA(VD, /*FromParent=*/false);
5475  if (!isOpenMPPrivate(Data.CKind)) {
5476  S.Diag(E->getExprLoc(),
5477  diag::err_omp_expected_private_copy_for_allocate);
5478  continue;
5479  }
5480  VarDecl *PrivateVD = DeclToCopy[VD];
5481  if (checkPreviousOMPAllocateAttribute(S, Stack, E, PrivateVD,
5482  AllocatorKind, AC->getAllocator()))
5483  continue;
5484  // Placeholder until allocate clause supports align modifier.
5485  Expr *Alignment = nullptr;
5486  applyOMPAllocateAttribute(S, PrivateVD, AllocatorKind, AC->getAllocator(),
5487  Alignment, E->getSourceRange());
5488  }
5489  }
5490 }
5491 
5492 namespace {
5493 /// Rewrite statements and expressions for Sema \p Actions CurContext.
5494 ///
5495 /// Used to wrap already parsed statements/expressions into a new CapturedStmt
5496 /// context. DeclRefExpr used inside the new context are changed to refer to the
5497 /// captured variable instead.
5498 class CaptureVars : public TreeTransform<CaptureVars> {
5499  using BaseTransform = TreeTransform<CaptureVars>;
5500 
5501 public:
5502  CaptureVars(Sema &Actions) : BaseTransform(Actions) {}
5503 
5504  bool AlwaysRebuild() { return true; }
5505 };
5506 } // namespace
5507 
5508 static VarDecl *precomputeExpr(Sema &Actions,
5509  SmallVectorImpl<Stmt *> &BodyStmts, Expr *E,
5510  StringRef Name) {
5511  Expr *NewE = AssertSuccess(CaptureVars(Actions).TransformExpr(E));
5512  VarDecl *NewVar = buildVarDecl(Actions, {}, NewE->getType(), Name, nullptr,
5513  dyn_cast<DeclRefExpr>(E->IgnoreImplicit()));
5514  auto *NewDeclStmt = cast<DeclStmt>(AssertSuccess(
5515  Actions.ActOnDeclStmt(Actions.ConvertDeclToDeclGroup(NewVar), {}, {})));
5516  Actions.AddInitializerToDecl(NewDeclStmt->getSingleDecl(), NewE, false);
5517  BodyStmts.push_back(NewDeclStmt);
5518  return NewVar;
5519 }
5520 
5521 /// Create a closure that computes the number of iterations of a loop.
5522 ///
5523 /// \param Actions The Sema object.
5524 /// \param LogicalTy Type for the logical iteration number.
5525 /// \param Rel Comparison operator of the loop condition.
5526 /// \param StartExpr Value of the loop counter at the first iteration.
5527 /// \param StopExpr Expression the loop counter is compared against in the loop
5528 /// condition. \param StepExpr Amount of increment after each iteration.
5529 ///
5530 /// \return Closure (CapturedStmt) of the distance calculation.
5531 static CapturedStmt *buildDistanceFunc(Sema &Actions, QualType LogicalTy,
5533  Expr *StartExpr, Expr *StopExpr,
5534  Expr *StepExpr) {
5535  ASTContext &Ctx = Actions.getASTContext();
5536  TypeSourceInfo *LogicalTSI = Ctx.getTrivialTypeSourceInfo(LogicalTy);
5537 
5538  // Captured regions currently don't support return values, we use an
5539  // out-parameter instead. All inputs are implicit captures.
5540  // TODO: Instead of capturing each DeclRefExpr occurring in
5541  // StartExpr/StopExpr/Step, these could also be passed as a value capture.
5542  QualType ResultTy = Ctx.getLValueReferenceType(LogicalTy);
5543  Sema::CapturedParamNameType Params[] = {{"Distance", ResultTy},
5544  {StringRef(), QualType()}};
5545  Actions.ActOnCapturedRegionStart({}, nullptr, CR_Default, Params);
5546 
5547  Stmt *Body;
5548  {
5549  Sema::CompoundScopeRAII CompoundScope(Actions);
5550  CapturedDecl *CS = cast<CapturedDecl>(Actions.CurContext);
5551 
5552  // Get the LValue expression for the result.
5553  ImplicitParamDecl *DistParam = CS->getParam(0);
5554  DeclRefExpr *DistRef = Actions.BuildDeclRefExpr(
5555  DistParam, LogicalTy, VK_LValue, {}, nullptr, nullptr, {}, nullptr);
5556 
5557  SmallVector<Stmt *, 4> BodyStmts;
5558 
5559  // Capture all referenced variable references.
5560  // TODO: Instead of computing NewStart/NewStop/NewStep inside the
5561  // CapturedStmt, we could compute them before and capture the result, to be
5562  // used jointly with the LoopVar function.
5563  VarDecl *NewStart = precomputeExpr(Actions, BodyStmts, StartExpr, ".start");
5564  VarDecl *NewStop = precomputeExpr(Actions, BodyStmts, StopExpr, ".stop");
5565  VarDecl *NewStep = precomputeExpr(Actions, BodyStmts, StepExpr, ".step");
5566  auto BuildVarRef = [&](VarDecl *VD) {
5567  return buildDeclRefExpr(Actions, VD, VD->getType(), {});
5568  };
5569 
5571  Ctx, llvm::APInt(Ctx.getIntWidth(LogicalTy), 0), LogicalTy, {});
5573  Ctx, llvm::APInt(Ctx.getIntWidth(LogicalTy), 1), LogicalTy, {});
5574  Expr *Dist;
5575  if (Rel == BO_NE) {
5576  // When using a != comparison, the increment can be +1 or -1. This can be
5577  // dynamic at runtime, so we need to check for the direction.
5578  Expr *IsNegStep = AssertSuccess(
5579  Actions.BuildBinOp(nullptr, {}, BO_LT, BuildVarRef(NewStep), Zero));
5580 
5581  // Positive increment.
5582  Expr *ForwardRange = AssertSuccess(Actions.BuildBinOp(
5583  nullptr, {}, BO_Sub, BuildVarRef(NewStop), BuildVarRef(NewStart)));
5584  ForwardRange = AssertSuccess(
5585  Actions.BuildCStyleCastExpr({}, LogicalTSI, {}, ForwardRange));
5586  Expr *ForwardDist = AssertSuccess(Actions.BuildBinOp(
5587  nullptr, {}, BO_Div, ForwardRange, BuildVarRef(NewStep)));
5588 
5589  // Negative increment.
5590  Expr *BackwardRange = AssertSuccess(Actions.BuildBinOp(
5591  nullptr, {}, BO_Sub, BuildVarRef(NewStart), BuildVarRef(NewStop)));
5592  BackwardRange = AssertSuccess(
5593  Actions.BuildCStyleCastExpr({}, LogicalTSI, {}, BackwardRange));
5594  Expr *NegIncAmount = AssertSuccess(
5595  Actions.BuildUnaryOp(nullptr, {}, UO_Minus, BuildVarRef(NewStep)));
5596  Expr *BackwardDist = AssertSuccess(
5597  Actions.BuildBinOp(nullptr, {}, BO_Div, BackwardRange, NegIncAmount));
5598 
5599  // Use the appropriate case.
5600  Dist = AssertSuccess(Actions.ActOnConditionalOp(
5601  {}, {}, IsNegStep, BackwardDist, ForwardDist));
5602  } else {
5603  assert((Rel == BO_LT || Rel == BO_LE || Rel == BO_GE || Rel == BO_GT) &&
5604  "Expected one of these relational operators");
5605 
5606  // We can derive the direction from any other comparison operator. It is
5607  // non well-formed OpenMP if Step increments/decrements in the other
5608  // directions. Whether at least the first iteration passes the loop