clang  14.0.0git
SemaOpenMP.cpp
Go to the documentation of this file.
1 //===--- SemaOpenMP.cpp - Semantic Analysis for OpenMP constructs ---------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements semantic analysis for OpenMP directives and
10 /// clauses.
11 ///
12 //===----------------------------------------------------------------------===//
13 
14 #include "TreeTransform.h"
15 #include "clang/AST/ASTContext.h"
18 #include "clang/AST/Decl.h"
19 #include "clang/AST/DeclCXX.h"
20 #include "clang/AST/DeclOpenMP.h"
21 #include "clang/AST/OpenMPClause.h"
22 #include "clang/AST/StmtCXX.h"
23 #include "clang/AST/StmtOpenMP.h"
24 #include "clang/AST/StmtVisitor.h"
25 #include "clang/AST/TypeOrdering.h"
29 #include "clang/Basic/TargetInfo.h"
31 #include "clang/Sema/Lookup.h"
32 #include "clang/Sema/Scope.h"
33 #include "clang/Sema/ScopeInfo.h"
35 #include "llvm/ADT/IndexedMap.h"
36 #include "llvm/ADT/PointerEmbeddedInt.h"
37 #include "llvm/ADT/STLExtras.h"
38 #include "llvm/ADT/StringExtras.h"
39 #include "llvm/Frontend/OpenMP/OMPConstants.h"
40 #include <set>
41 
42 using namespace clang;
43 using namespace llvm::omp;
44 
45 //===----------------------------------------------------------------------===//
46 // Stack of data-sharing attributes for variables
47 //===----------------------------------------------------------------------===//
48 
50  Sema &SemaRef, Expr *E,
52  OpenMPClauseKind CKind, OpenMPDirectiveKind DKind, bool NoDiagnose);
53 
54 namespace {
55 /// Default data sharing attributes, which can be applied to directive.
56 enum DefaultDataSharingAttributes {
57  DSA_unspecified = 0, /// Data sharing attribute not specified.
58  DSA_none = 1 << 0, /// Default data sharing attribute 'none'.
59  DSA_shared = 1 << 1, /// Default data sharing attribute 'shared'.
60  DSA_firstprivate = 1 << 2, /// Default data sharing attribute 'firstprivate'.
61 };
62 
63 /// Stack for tracking declarations used in OpenMP directives and
64 /// clauses and their data-sharing attributes.
65 class DSAStackTy {
66 public:
67  struct DSAVarData {
68  OpenMPDirectiveKind DKind = OMPD_unknown;
69  OpenMPClauseKind CKind = OMPC_unknown;
70  unsigned Modifier = 0;
71  const Expr *RefExpr = nullptr;
72  DeclRefExpr *PrivateCopy = nullptr;
73  SourceLocation ImplicitDSALoc;
74  bool AppliedToPointee = false;
75  DSAVarData() = default;
76  DSAVarData(OpenMPDirectiveKind DKind, OpenMPClauseKind CKind,
77  const Expr *RefExpr, DeclRefExpr *PrivateCopy,
78  SourceLocation ImplicitDSALoc, unsigned Modifier,
79  bool AppliedToPointee)
80  : DKind(DKind), CKind(CKind), Modifier(Modifier), RefExpr(RefExpr),
81  PrivateCopy(PrivateCopy), ImplicitDSALoc(ImplicitDSALoc),
82  AppliedToPointee(AppliedToPointee) {}
83  };
84  using OperatorOffsetTy =
86  using DoacrossDependMapTy =
87  llvm::DenseMap<OMPDependClause *, OperatorOffsetTy>;
88  /// Kind of the declaration used in the uses_allocators clauses.
89  enum class UsesAllocatorsDeclKind {
90  /// Predefined allocator
91  PredefinedAllocator,
92  /// User-defined allocator
93  UserDefinedAllocator,
94  /// The declaration that represent allocator trait
95  AllocatorTrait,
96  };
97 
98 private:
99  struct DSAInfo {
100  OpenMPClauseKind Attributes = OMPC_unknown;
101  unsigned Modifier = 0;
102  /// Pointer to a reference expression and a flag which shows that the
103  /// variable is marked as lastprivate(true) or not (false).
104  llvm::PointerIntPair<const Expr *, 1, bool> RefExpr;
105  DeclRefExpr *PrivateCopy = nullptr;
106  /// true if the attribute is applied to the pointee, not the variable
107  /// itself.
108  bool AppliedToPointee = false;
109  };
110  using DeclSAMapTy = llvm::SmallDenseMap<const ValueDecl *, DSAInfo, 8>;
111  using UsedRefMapTy = llvm::SmallDenseMap<const ValueDecl *, const Expr *, 8>;
112  using LCDeclInfo = std::pair<unsigned, VarDecl *>;
113  using LoopControlVariablesMapTy =
114  llvm::SmallDenseMap<const ValueDecl *, LCDeclInfo, 8>;
115  /// Struct that associates a component with the clause kind where they are
116  /// found.
117  struct MappedExprComponentTy {
119  OpenMPClauseKind Kind = OMPC_unknown;
120  };
121  using MappedExprComponentsTy =
122  llvm::DenseMap<const ValueDecl *, MappedExprComponentTy>;
123  using CriticalsWithHintsTy =
124  llvm::StringMap<std::pair<const OMPCriticalDirective *, llvm::APSInt>>;
125  struct ReductionData {
126  using BOKPtrType = llvm::PointerEmbeddedInt<BinaryOperatorKind, 16>;
127  SourceRange ReductionRange;
128  llvm::PointerUnion<const Expr *, BOKPtrType> ReductionOp;
129  ReductionData() = default;
130  void set(BinaryOperatorKind BO, SourceRange RR) {
131  ReductionRange = RR;
132  ReductionOp = BO;
133  }
134  void set(const Expr *RefExpr, SourceRange RR) {
135  ReductionRange = RR;
136  ReductionOp = RefExpr;
137  }
138  };
139  using DeclReductionMapTy =
140  llvm::SmallDenseMap<const ValueDecl *, ReductionData, 4>;
141  struct DefaultmapInfo {
142  OpenMPDefaultmapClauseModifier ImplicitBehavior =
144  SourceLocation SLoc;
145  DefaultmapInfo() = default;
146  DefaultmapInfo(OpenMPDefaultmapClauseModifier M, SourceLocation Loc)
147  : ImplicitBehavior(M), SLoc(Loc) {}
148  };
149 
150  struct SharingMapTy {
151  DeclSAMapTy SharingMap;
152  DeclReductionMapTy ReductionMap;
153  UsedRefMapTy AlignedMap;
154  UsedRefMapTy NontemporalMap;
155  MappedExprComponentsTy MappedExprComponents;
156  LoopControlVariablesMapTy LCVMap;
157  DefaultDataSharingAttributes DefaultAttr = DSA_unspecified;
158  SourceLocation DefaultAttrLoc;
159  DefaultmapInfo DefaultmapMap[OMPC_DEFAULTMAP_unknown];
160  OpenMPDirectiveKind Directive = OMPD_unknown;
161  DeclarationNameInfo DirectiveName;
162  Scope *CurScope = nullptr;
163  DeclContext *Context = nullptr;
164  SourceLocation ConstructLoc;
165  /// Set of 'depend' clauses with 'sink|source' dependence kind. Required to
166  /// get the data (loop counters etc.) about enclosing loop-based construct.
167  /// This data is required during codegen.
168  DoacrossDependMapTy DoacrossDepends;
169  /// First argument (Expr *) contains optional argument of the
170  /// 'ordered' clause, the second one is true if the regions has 'ordered'
171  /// clause, false otherwise.
173  unsigned AssociatedLoops = 1;
174  bool HasMutipleLoops = false;
175  const Decl *PossiblyLoopCounter = nullptr;
176  bool NowaitRegion = false;
177  bool CancelRegion = false;
178  bool LoopStart = false;
179  bool BodyComplete = false;
180  SourceLocation PrevScanLocation;
181  SourceLocation PrevOrderedLocation;
182  SourceLocation InnerTeamsRegionLoc;
183  /// Reference to the taskgroup task_reduction reference expression.
184  Expr *TaskgroupReductionRef = nullptr;
185  llvm::DenseSet<QualType> MappedClassesQualTypes;
186  SmallVector<Expr *, 4> InnerUsedAllocators;
187  llvm::DenseSet<CanonicalDeclPtr<Decl>> ImplicitTaskFirstprivates;
188  /// List of globals marked as declare target link in this target region
189  /// (isOpenMPTargetExecutionDirective(Directive) == true).
190  llvm::SmallVector<DeclRefExpr *, 4> DeclareTargetLinkVarDecls;
191  /// List of decls used in inclusive/exclusive clauses of the scan directive.
192  llvm::DenseSet<CanonicalDeclPtr<Decl>> UsedInScanDirective;
193  llvm::DenseMap<CanonicalDeclPtr<const Decl>, UsesAllocatorsDeclKind>
194  UsesAllocatorsDecls;
195  Expr *DeclareMapperVar = nullptr;
196  SharingMapTy(OpenMPDirectiveKind DKind, DeclarationNameInfo Name,
197  Scope *CurScope, SourceLocation Loc)
198  : Directive(DKind), DirectiveName(Name), CurScope(CurScope),
199  ConstructLoc(Loc) {}
200  SharingMapTy() = default;
201  };
202 
203  using StackTy = SmallVector<SharingMapTy, 4>;
204 
205  /// Stack of used declaration and their data-sharing attributes.
206  DeclSAMapTy Threadprivates;
207  const FunctionScopeInfo *CurrentNonCapturingFunctionScope = nullptr;
209  /// true, if check for DSA must be from parent directive, false, if
210  /// from current directive.
211  OpenMPClauseKind ClauseKindMode = OMPC_unknown;
212  Sema &SemaRef;
213  bool ForceCapturing = false;
214  /// true if all the variables in the target executable directives must be
215  /// captured by reference.
216  bool ForceCaptureByReferenceInTargetExecutable = false;
217  CriticalsWithHintsTy Criticals;
218  unsigned IgnoredStackElements = 0;
219 
220  /// Iterators over the stack iterate in order from innermost to outermost
221  /// directive.
222  using const_iterator = StackTy::const_reverse_iterator;
223  const_iterator begin() const {
224  return Stack.empty() ? const_iterator()
225  : Stack.back().first.rbegin() + IgnoredStackElements;
226  }
227  const_iterator end() const {
228  return Stack.empty() ? const_iterator() : Stack.back().first.rend();
229  }
230  using iterator = StackTy::reverse_iterator;
231  iterator begin() {
232  return Stack.empty() ? iterator()
233  : Stack.back().first.rbegin() + IgnoredStackElements;
234  }
235  iterator end() {
236  return Stack.empty() ? iterator() : Stack.back().first.rend();
237  }
238 
239  // Convenience operations to get at the elements of the stack.
240 
241  bool isStackEmpty() const {
242  return Stack.empty() ||
243  Stack.back().second != CurrentNonCapturingFunctionScope ||
244  Stack.back().first.size() <= IgnoredStackElements;
245  }
246  size_t getStackSize() const {
247  return isStackEmpty() ? 0
248  : Stack.back().first.size() - IgnoredStackElements;
249  }
250 
251  SharingMapTy *getTopOfStackOrNull() {
252  size_t Size = getStackSize();
253  if (Size == 0)
254  return nullptr;
255  return &Stack.back().first[Size - 1];
256  }
257  const SharingMapTy *getTopOfStackOrNull() const {
258  return const_cast<DSAStackTy&>(*this).getTopOfStackOrNull();
259  }
260  SharingMapTy &getTopOfStack() {
261  assert(!isStackEmpty() && "no current directive");
262  return *getTopOfStackOrNull();
263  }
264  const SharingMapTy &getTopOfStack() const {
265  return const_cast<DSAStackTy&>(*this).getTopOfStack();
266  }
267 
268  SharingMapTy *getSecondOnStackOrNull() {
269  size_t Size = getStackSize();
270  if (Size <= 1)
271  return nullptr;
272  return &Stack.back().first[Size - 2];
273  }
274  const SharingMapTy *getSecondOnStackOrNull() const {
275  return const_cast<DSAStackTy&>(*this).getSecondOnStackOrNull();
276  }
277 
278  /// Get the stack element at a certain level (previously returned by
279  /// \c getNestingLevel).
280  ///
281  /// Note that nesting levels count from outermost to innermost, and this is
282  /// the reverse of our iteration order where new inner levels are pushed at
283  /// the front of the stack.
284  SharingMapTy &getStackElemAtLevel(unsigned Level) {
285  assert(Level < getStackSize() && "no such stack element");
286  return Stack.back().first[Level];
287  }
288  const SharingMapTy &getStackElemAtLevel(unsigned Level) const {
289  return const_cast<DSAStackTy&>(*this).getStackElemAtLevel(Level);
290  }
291 
292  DSAVarData getDSA(const_iterator &Iter, ValueDecl *D) const;
293 
294  /// Checks if the variable is a local for OpenMP region.
295  bool isOpenMPLocal(VarDecl *D, const_iterator Iter) const;
296 
297  /// Vector of previously declared requires directives
299  /// omp_allocator_handle_t type.
300  QualType OMPAllocatorHandleT;
301  /// omp_depend_t type.
302  QualType OMPDependT;
303  /// omp_event_handle_t type.
304  QualType OMPEventHandleT;
305  /// omp_alloctrait_t type.
306  QualType OMPAlloctraitT;
307  /// Expression for the predefined allocators.
308  Expr *OMPPredefinedAllocators[OMPAllocateDeclAttr::OMPUserDefinedMemAlloc] = {
309  nullptr};
310  /// Vector of previously encountered target directives
311  SmallVector<SourceLocation, 2> TargetLocations;
312  SourceLocation AtomicLocation;
313  /// Vector of declare variant construct traits.
315 
316 public:
317  explicit DSAStackTy(Sema &S) : SemaRef(S) {}
318 
319  /// Sets omp_allocator_handle_t type.
320  void setOMPAllocatorHandleT(QualType Ty) { OMPAllocatorHandleT = Ty; }
321  /// Gets omp_allocator_handle_t type.
322  QualType getOMPAllocatorHandleT() const { return OMPAllocatorHandleT; }
323  /// Sets omp_alloctrait_t type.
324  void setOMPAlloctraitT(QualType Ty) { OMPAlloctraitT = Ty; }
325  /// Gets omp_alloctrait_t type.
326  QualType getOMPAlloctraitT() const { return OMPAlloctraitT; }
327  /// Sets the given default allocator.
328  void setAllocator(OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind,
329  Expr *Allocator) {
330  OMPPredefinedAllocators[AllocatorKind] = Allocator;
331  }
332  /// Returns the specified default allocator.
333  Expr *getAllocator(OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind) const {
334  return OMPPredefinedAllocators[AllocatorKind];
335  }
336  /// Sets omp_depend_t type.
337  void setOMPDependT(QualType Ty) { OMPDependT = Ty; }
338  /// Gets omp_depend_t type.
339  QualType getOMPDependT() const { return OMPDependT; }
340 
341  /// Sets omp_event_handle_t type.
342  void setOMPEventHandleT(QualType Ty) { OMPEventHandleT = Ty; }
343  /// Gets omp_event_handle_t type.
344  QualType getOMPEventHandleT() const { return OMPEventHandleT; }
345 
346  bool isClauseParsingMode() const { return ClauseKindMode != OMPC_unknown; }
347  OpenMPClauseKind getClauseParsingMode() const {
348  assert(isClauseParsingMode() && "Must be in clause parsing mode.");
349  return ClauseKindMode;
350  }
351  void setClauseParsingMode(OpenMPClauseKind K) { ClauseKindMode = K; }
352 
353  bool isBodyComplete() const {
354  const SharingMapTy *Top = getTopOfStackOrNull();
355  return Top && Top->BodyComplete;
356  }
357  void setBodyComplete() {
358  getTopOfStack().BodyComplete = true;
359  }
360 
361  bool isForceVarCapturing() const { return ForceCapturing; }
362  void setForceVarCapturing(bool V) { ForceCapturing = V; }
363 
364  void setForceCaptureByReferenceInTargetExecutable(bool V) {
365  ForceCaptureByReferenceInTargetExecutable = V;
366  }
367  bool isForceCaptureByReferenceInTargetExecutable() const {
368  return ForceCaptureByReferenceInTargetExecutable;
369  }
370 
371  void push(OpenMPDirectiveKind DKind, const DeclarationNameInfo &DirName,
372  Scope *CurScope, SourceLocation Loc) {
373  assert(!IgnoredStackElements &&
374  "cannot change stack while ignoring elements");
375  if (Stack.empty() ||
376  Stack.back().second != CurrentNonCapturingFunctionScope)
377  Stack.emplace_back(StackTy(), CurrentNonCapturingFunctionScope);
378  Stack.back().first.emplace_back(DKind, DirName, CurScope, Loc);
379  Stack.back().first.back().DefaultAttrLoc = Loc;
380  }
381 
382  void pop() {
383  assert(!IgnoredStackElements &&
384  "cannot change stack while ignoring elements");
385  assert(!Stack.back().first.empty() &&
386  "Data-sharing attributes stack is empty!");
387  Stack.back().first.pop_back();
388  }
389 
390  /// RAII object to temporarily leave the scope of a directive when we want to
391  /// logically operate in its parent.
392  class ParentDirectiveScope {
393  DSAStackTy &Self;
394  bool Active;
395  public:
396  ParentDirectiveScope(DSAStackTy &Self, bool Activate)
397  : Self(Self), Active(false) {
398  if (Activate)
399  enable();
400  }
401  ~ParentDirectiveScope() { disable(); }
402  void disable() {
403  if (Active) {
404  --Self.IgnoredStackElements;
405  Active = false;
406  }
407  }
408  void enable() {
409  if (!Active) {
410  ++Self.IgnoredStackElements;
411  Active = true;
412  }
413  }
414  };
415 
416  /// Marks that we're started loop parsing.
417  void loopInit() {
418  assert(isOpenMPLoopDirective(getCurrentDirective()) &&
419  "Expected loop-based directive.");
420  getTopOfStack().LoopStart = true;
421  }
422  /// Start capturing of the variables in the loop context.
423  void loopStart() {
424  assert(isOpenMPLoopDirective(getCurrentDirective()) &&
425  "Expected loop-based directive.");
426  getTopOfStack().LoopStart = false;
427  }
428  /// true, if variables are captured, false otherwise.
429  bool isLoopStarted() const {
430  assert(isOpenMPLoopDirective(getCurrentDirective()) &&
431  "Expected loop-based directive.");
432  return !getTopOfStack().LoopStart;
433  }
434  /// Marks (or clears) declaration as possibly loop counter.
435  void resetPossibleLoopCounter(const Decl *D = nullptr) {
436  getTopOfStack().PossiblyLoopCounter =
437  D ? D->getCanonicalDecl() : D;
438  }
439  /// Gets the possible loop counter decl.
440  const Decl *getPossiblyLoopCunter() const {
441  return getTopOfStack().PossiblyLoopCounter;
442  }
443  /// Start new OpenMP region stack in new non-capturing function.
444  void pushFunction() {
445  assert(!IgnoredStackElements &&
446  "cannot change stack while ignoring elements");
447  const FunctionScopeInfo *CurFnScope = SemaRef.getCurFunction();
448  assert(!isa<CapturingScopeInfo>(CurFnScope));
449  CurrentNonCapturingFunctionScope = CurFnScope;
450  }
451  /// Pop region stack for non-capturing function.
452  void popFunction(const FunctionScopeInfo *OldFSI) {
453  assert(!IgnoredStackElements &&
454  "cannot change stack while ignoring elements");
455  if (!Stack.empty() && Stack.back().second == OldFSI) {
456  assert(Stack.back().first.empty());
457  Stack.pop_back();
458  }
459  CurrentNonCapturingFunctionScope = nullptr;
460  for (const FunctionScopeInfo *FSI : llvm::reverse(SemaRef.FunctionScopes)) {
461  if (!isa<CapturingScopeInfo>(FSI)) {
462  CurrentNonCapturingFunctionScope = FSI;
463  break;
464  }
465  }
466  }
467 
468  void addCriticalWithHint(const OMPCriticalDirective *D, llvm::APSInt Hint) {
469  Criticals.try_emplace(D->getDirectiveName().getAsString(), D, Hint);
470  }
471  const std::pair<const OMPCriticalDirective *, llvm::APSInt>
472  getCriticalWithHint(const DeclarationNameInfo &Name) const {
473  auto I = Criticals.find(Name.getAsString());
474  if (I != Criticals.end())
475  return I->second;
476  return std::make_pair(nullptr, llvm::APSInt());
477  }
478  /// If 'aligned' declaration for given variable \a D was not seen yet,
479  /// add it and return NULL; otherwise return previous occurrence's expression
480  /// for diagnostics.
481  const Expr *addUniqueAligned(const ValueDecl *D, const Expr *NewDE);
482  /// If 'nontemporal' declaration for given variable \a D was not seen yet,
483  /// add it and return NULL; otherwise return previous occurrence's expression
484  /// for diagnostics.
485  const Expr *addUniqueNontemporal(const ValueDecl *D, const Expr *NewDE);
486 
487  /// Register specified variable as loop control variable.
488  void addLoopControlVariable(const ValueDecl *D, VarDecl *Capture);
489  /// Check if the specified variable is a loop control variable for
490  /// current region.
491  /// \return The index of the loop control variable in the list of associated
492  /// for-loops (from outer to inner).
493  const LCDeclInfo isLoopControlVariable(const ValueDecl *D) const;
494  /// Check if the specified variable is a loop control variable for
495  /// parent region.
496  /// \return The index of the loop control variable in the list of associated
497  /// for-loops (from outer to inner).
498  const LCDeclInfo isParentLoopControlVariable(const ValueDecl *D) const;
499  /// Check if the specified variable is a loop control variable for
500  /// current region.
501  /// \return The index of the loop control variable in the list of associated
502  /// for-loops (from outer to inner).
503  const LCDeclInfo isLoopControlVariable(const ValueDecl *D,
504  unsigned Level) const;
505  /// Get the loop control variable for the I-th loop (or nullptr) in
506  /// parent directive.
507  const ValueDecl *getParentLoopControlVariable(unsigned I) const;
508 
509  /// Marks the specified decl \p D as used in scan directive.
510  void markDeclAsUsedInScanDirective(ValueDecl *D) {
511  if (SharingMapTy *Stack = getSecondOnStackOrNull())
512  Stack->UsedInScanDirective.insert(D);
513  }
514 
515  /// Checks if the specified declaration was used in the inner scan directive.
516  bool isUsedInScanDirective(ValueDecl *D) const {
517  if (const SharingMapTy *Stack = getTopOfStackOrNull())
518  return Stack->UsedInScanDirective.contains(D);
519  return false;
520  }
521 
522  /// Adds explicit data sharing attribute to the specified declaration.
523  void addDSA(const ValueDecl *D, const Expr *E, OpenMPClauseKind A,
524  DeclRefExpr *PrivateCopy = nullptr, unsigned Modifier = 0,
525  bool AppliedToPointee = false);
526 
527  /// Adds additional information for the reduction items with the reduction id
528  /// represented as an operator.
529  void addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
530  BinaryOperatorKind BOK);
531  /// Adds additional information for the reduction items with the reduction id
532  /// represented as reduction identifier.
533  void addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
534  const Expr *ReductionRef);
535  /// Returns the location and reduction operation from the innermost parent
536  /// region for the given \p D.
537  const DSAVarData
538  getTopMostTaskgroupReductionData(const ValueDecl *D, SourceRange &SR,
539  BinaryOperatorKind &BOK,
540  Expr *&TaskgroupDescriptor) const;
541  /// Returns the location and reduction operation from the innermost parent
542  /// region for the given \p D.
543  const DSAVarData
544  getTopMostTaskgroupReductionData(const ValueDecl *D, SourceRange &SR,
545  const Expr *&ReductionRef,
546  Expr *&TaskgroupDescriptor) const;
547  /// Return reduction reference expression for the current taskgroup or
548  /// parallel/worksharing directives with task reductions.
549  Expr *getTaskgroupReductionRef() const {
550  assert((getTopOfStack().Directive == OMPD_taskgroup ||
551  ((isOpenMPParallelDirective(getTopOfStack().Directive) ||
552  isOpenMPWorksharingDirective(getTopOfStack().Directive)) &&
553  !isOpenMPSimdDirective(getTopOfStack().Directive))) &&
554  "taskgroup reference expression requested for non taskgroup or "
555  "parallel/worksharing directive.");
556  return getTopOfStack().TaskgroupReductionRef;
557  }
558  /// Checks if the given \p VD declaration is actually a taskgroup reduction
559  /// descriptor variable at the \p Level of OpenMP regions.
560  bool isTaskgroupReductionRef(const ValueDecl *VD, unsigned Level) const {
561  return getStackElemAtLevel(Level).TaskgroupReductionRef &&
562  cast<DeclRefExpr>(getStackElemAtLevel(Level).TaskgroupReductionRef)
563  ->getDecl() == VD;
564  }
565 
566  /// Returns data sharing attributes from top of the stack for the
567  /// specified declaration.
568  const DSAVarData getTopDSA(ValueDecl *D, bool FromParent);
569  /// Returns data-sharing attributes for the specified declaration.
570  const DSAVarData getImplicitDSA(ValueDecl *D, bool FromParent) const;
571  /// Returns data-sharing attributes for the specified declaration.
572  const DSAVarData getImplicitDSA(ValueDecl *D, unsigned Level) const;
573  /// Checks if the specified variables has data-sharing attributes which
574  /// match specified \a CPred predicate in any directive which matches \a DPred
575  /// predicate.
576  const DSAVarData
577  hasDSA(ValueDecl *D,
578  const llvm::function_ref<bool(OpenMPClauseKind, bool)> CPred,
579  const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
580  bool FromParent) const;
581  /// Checks if the specified variables has data-sharing attributes which
582  /// match specified \a CPred predicate in any innermost directive which
583  /// matches \a DPred predicate.
584  const DSAVarData
585  hasInnermostDSA(ValueDecl *D,
586  const llvm::function_ref<bool(OpenMPClauseKind, bool)> CPred,
587  const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
588  bool FromParent) const;
589  /// Checks if the specified variables has explicit data-sharing
590  /// attributes which match specified \a CPred predicate at the specified
591  /// OpenMP region.
592  bool
593  hasExplicitDSA(const ValueDecl *D,
594  const llvm::function_ref<bool(OpenMPClauseKind, bool)> CPred,
595  unsigned Level, bool NotLastprivate = false) const;
596 
597  /// Returns true if the directive at level \Level matches in the
598  /// specified \a DPred predicate.
599  bool hasExplicitDirective(
600  const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
601  unsigned Level) const;
602 
603  /// Finds a directive which matches specified \a DPred predicate.
604  bool hasDirective(
605  const llvm::function_ref<bool(
607  DPred,
608  bool FromParent) const;
609 
610  /// Returns currently analyzed directive.
611  OpenMPDirectiveKind getCurrentDirective() const {
612  const SharingMapTy *Top = getTopOfStackOrNull();
613  return Top ? Top->Directive : OMPD_unknown;
614  }
615  /// Returns directive kind at specified level.
616  OpenMPDirectiveKind getDirective(unsigned Level) const {
617  assert(!isStackEmpty() && "No directive at specified level.");
618  return getStackElemAtLevel(Level).Directive;
619  }
620  /// Returns the capture region at the specified level.
621  OpenMPDirectiveKind getCaptureRegion(unsigned Level,
622  unsigned OpenMPCaptureLevel) const {
624  getOpenMPCaptureRegions(CaptureRegions, getDirective(Level));
625  return CaptureRegions[OpenMPCaptureLevel];
626  }
627  /// Returns parent directive.
628  OpenMPDirectiveKind getParentDirective() const {
629  const SharingMapTy *Parent = getSecondOnStackOrNull();
630  return Parent ? Parent->Directive : OMPD_unknown;
631  }
632 
633  /// Add requires decl to internal vector
634  void addRequiresDecl(OMPRequiresDecl *RD) {
635  RequiresDecls.push_back(RD);
636  }
637 
638  /// Checks if the defined 'requires' directive has specified type of clause.
639  template <typename ClauseType>
640  bool hasRequiresDeclWithClause() const {
641  return llvm::any_of(RequiresDecls, [](const OMPRequiresDecl *D) {
642  return llvm::any_of(D->clauselists(), [](const OMPClause *C) {
643  return isa<ClauseType>(C);
644  });
645  });
646  }
647 
648  /// Checks for a duplicate clause amongst previously declared requires
649  /// directives
650  bool hasDuplicateRequiresClause(ArrayRef<OMPClause *> ClauseList) const {
651  bool IsDuplicate = false;
652  for (OMPClause *CNew : ClauseList) {
653  for (const OMPRequiresDecl *D : RequiresDecls) {
654  for (const OMPClause *CPrev : D->clauselists()) {
655  if (CNew->getClauseKind() == CPrev->getClauseKind()) {
656  SemaRef.Diag(CNew->getBeginLoc(),
657  diag::err_omp_requires_clause_redeclaration)
658  << getOpenMPClauseName(CNew->getClauseKind());
659  SemaRef.Diag(CPrev->getBeginLoc(),
660  diag::note_omp_requires_previous_clause)
661  << getOpenMPClauseName(CPrev->getClauseKind());
662  IsDuplicate = true;
663  }
664  }
665  }
666  }
667  return IsDuplicate;
668  }
669 
670  /// Add location of previously encountered target to internal vector
671  void addTargetDirLocation(SourceLocation LocStart) {
672  TargetLocations.push_back(LocStart);
673  }
674 
675  /// Add location for the first encountered atomicc directive.
676  void addAtomicDirectiveLoc(SourceLocation Loc) {
677  if (AtomicLocation.isInvalid())
678  AtomicLocation = Loc;
679  }
680 
681  /// Returns the location of the first encountered atomic directive in the
682  /// module.
683  SourceLocation getAtomicDirectiveLoc() const {
684  return AtomicLocation;
685  }
686 
687  // Return previously encountered target region locations.
688  ArrayRef<SourceLocation> getEncounteredTargetLocs() const {
689  return TargetLocations;
690  }
691 
692  /// Set default data sharing attribute to none.
693  void setDefaultDSANone(SourceLocation Loc) {
694  getTopOfStack().DefaultAttr = DSA_none;
695  getTopOfStack().DefaultAttrLoc = Loc;
696  }
697  /// Set default data sharing attribute to shared.
698  void setDefaultDSAShared(SourceLocation Loc) {
699  getTopOfStack().DefaultAttr = DSA_shared;
700  getTopOfStack().DefaultAttrLoc = Loc;
701  }
702  /// Set default data sharing attribute to firstprivate.
703  void setDefaultDSAFirstPrivate(SourceLocation Loc) {
704  getTopOfStack().DefaultAttr = DSA_firstprivate;
705  getTopOfStack().DefaultAttrLoc = Loc;
706  }
707  /// Set default data mapping attribute to Modifier:Kind
708  void setDefaultDMAAttr(OpenMPDefaultmapClauseModifier M,
710  SourceLocation Loc) {
711  DefaultmapInfo &DMI = getTopOfStack().DefaultmapMap[Kind];
712  DMI.ImplicitBehavior = M;
713  DMI.SLoc = Loc;
714  }
715  /// Check whether the implicit-behavior has been set in defaultmap
716  bool checkDefaultmapCategory(OpenMPDefaultmapClauseKind VariableCategory) {
717  if (VariableCategory == OMPC_DEFAULTMAP_unknown)
718  return getTopOfStack()
719  .DefaultmapMap[OMPC_DEFAULTMAP_aggregate]
720  .ImplicitBehavior != OMPC_DEFAULTMAP_MODIFIER_unknown ||
721  getTopOfStack()
722  .DefaultmapMap[OMPC_DEFAULTMAP_scalar]
723  .ImplicitBehavior != OMPC_DEFAULTMAP_MODIFIER_unknown ||
724  getTopOfStack()
725  .DefaultmapMap[OMPC_DEFAULTMAP_pointer]
726  .ImplicitBehavior != OMPC_DEFAULTMAP_MODIFIER_unknown;
727  return getTopOfStack().DefaultmapMap[VariableCategory].ImplicitBehavior !=
729  }
730 
731  ArrayRef<llvm::omp::TraitProperty> getConstructTraits() {
732  return ConstructTraits;
733  }
734  void handleConstructTrait(ArrayRef<llvm::omp::TraitProperty> Traits,
735  bool ScopeEntry) {
736  if (ScopeEntry)
737  ConstructTraits.append(Traits.begin(), Traits.end());
738  else
739  for (llvm::omp::TraitProperty Trait : llvm::reverse(Traits)) {
740  llvm::omp::TraitProperty Top = ConstructTraits.pop_back_val();
741  assert(Top == Trait && "Something left a trait on the stack!");
742  (void)Trait;
743  (void)Top;
744  }
745  }
746 
747  DefaultDataSharingAttributes getDefaultDSA(unsigned Level) const {
748  return getStackSize() <= Level ? DSA_unspecified
749  : getStackElemAtLevel(Level).DefaultAttr;
750  }
751  DefaultDataSharingAttributes getDefaultDSA() const {
752  return isStackEmpty() ? DSA_unspecified
753  : getTopOfStack().DefaultAttr;
754  }
755  SourceLocation getDefaultDSALocation() const {
756  return isStackEmpty() ? SourceLocation()
757  : getTopOfStack().DefaultAttrLoc;
758  }
760  getDefaultmapModifier(OpenMPDefaultmapClauseKind Kind) const {
761  return isStackEmpty()
763  : getTopOfStack().DefaultmapMap[Kind].ImplicitBehavior;
764  }
766  getDefaultmapModifierAtLevel(unsigned Level,
768  return getStackElemAtLevel(Level).DefaultmapMap[Kind].ImplicitBehavior;
769  }
770  bool isDefaultmapCapturedByRef(unsigned Level,
773  getDefaultmapModifierAtLevel(Level, Kind);
774  if (Kind == OMPC_DEFAULTMAP_scalar || Kind == OMPC_DEFAULTMAP_pointer) {
775  return (M == OMPC_DEFAULTMAP_MODIFIER_alloc) ||
776  (M == OMPC_DEFAULTMAP_MODIFIER_to) ||
777  (M == OMPC_DEFAULTMAP_MODIFIER_from) ||
778  (M == OMPC_DEFAULTMAP_MODIFIER_tofrom);
779  }
780  return true;
781  }
782  static bool mustBeFirstprivateBase(OpenMPDefaultmapClauseModifier M,
784  switch (Kind) {
785  case OMPC_DEFAULTMAP_scalar:
786  case OMPC_DEFAULTMAP_pointer:
787  return (M == OMPC_DEFAULTMAP_MODIFIER_unknown) ||
788  (M == OMPC_DEFAULTMAP_MODIFIER_firstprivate) ||
789  (M == OMPC_DEFAULTMAP_MODIFIER_default);
790  case OMPC_DEFAULTMAP_aggregate:
791  return M == OMPC_DEFAULTMAP_MODIFIER_firstprivate;
792  default:
793  break;
794  }
795  llvm_unreachable("Unexpected OpenMPDefaultmapClauseKind enum");
796  }
797  bool mustBeFirstprivateAtLevel(unsigned Level,
800  getDefaultmapModifierAtLevel(Level, Kind);
801  return mustBeFirstprivateBase(M, Kind);
802  }
803  bool mustBeFirstprivate(OpenMPDefaultmapClauseKind Kind) const {
804  OpenMPDefaultmapClauseModifier M = getDefaultmapModifier(Kind);
805  return mustBeFirstprivateBase(M, Kind);
806  }
807 
808  /// Checks if the specified variable is a threadprivate.
809  bool isThreadPrivate(VarDecl *D) {
810  const DSAVarData DVar = getTopDSA(D, false);
811  return isOpenMPThreadPrivate(DVar.CKind);
812  }
813 
814  /// Marks current region as ordered (it has an 'ordered' clause).
815  void setOrderedRegion(bool IsOrdered, const Expr *Param,
816  OMPOrderedClause *Clause) {
817  if (IsOrdered)
818  getTopOfStack().OrderedRegion.emplace(Param, Clause);
819  else
820  getTopOfStack().OrderedRegion.reset();
821  }
822  /// Returns true, if region is ordered (has associated 'ordered' clause),
823  /// false - otherwise.
824  bool isOrderedRegion() const {
825  if (const SharingMapTy *Top = getTopOfStackOrNull())
826  return Top->OrderedRegion.hasValue();
827  return false;
828  }
829  /// Returns optional parameter for the ordered region.
830  std::pair<const Expr *, OMPOrderedClause *> getOrderedRegionParam() const {
831  if (const SharingMapTy *Top = getTopOfStackOrNull())
832  if (Top->OrderedRegion.hasValue())
833  return Top->OrderedRegion.getValue();
834  return std::make_pair(nullptr, nullptr);
835  }
836  /// Returns true, if parent region is ordered (has associated
837  /// 'ordered' clause), false - otherwise.
838  bool isParentOrderedRegion() const {
839  if (const SharingMapTy *Parent = getSecondOnStackOrNull())
840  return Parent->OrderedRegion.hasValue();
841  return false;
842  }
843  /// Returns optional parameter for the ordered region.
844  std::pair<const Expr *, OMPOrderedClause *>
845  getParentOrderedRegionParam() const {
846  if (const SharingMapTy *Parent = getSecondOnStackOrNull())
847  if (Parent->OrderedRegion.hasValue())
848  return Parent->OrderedRegion.getValue();
849  return std::make_pair(nullptr, nullptr);
850  }
851  /// Marks current region as nowait (it has a 'nowait' clause).
852  void setNowaitRegion(bool IsNowait = true) {
853  getTopOfStack().NowaitRegion = IsNowait;
854  }
855  /// Returns true, if parent region is nowait (has associated
856  /// 'nowait' clause), false - otherwise.
857  bool isParentNowaitRegion() const {
858  if (const SharingMapTy *Parent = getSecondOnStackOrNull())
859  return Parent->NowaitRegion;
860  return false;
861  }
862  /// Marks parent region as cancel region.
863  void setParentCancelRegion(bool Cancel = true) {
864  if (SharingMapTy *Parent = getSecondOnStackOrNull())
865  Parent->CancelRegion |= Cancel;
866  }
867  /// Return true if current region has inner cancel construct.
868  bool isCancelRegion() const {
869  const SharingMapTy *Top = getTopOfStackOrNull();
870  return Top ? Top->CancelRegion : false;
871  }
872 
873  /// Mark that parent region already has scan directive.
874  void setParentHasScanDirective(SourceLocation Loc) {
875  if (SharingMapTy *Parent = getSecondOnStackOrNull())
876  Parent->PrevScanLocation = Loc;
877  }
878  /// Return true if current region has inner cancel construct.
879  bool doesParentHasScanDirective() const {
880  const SharingMapTy *Top = getSecondOnStackOrNull();
881  return Top ? Top->PrevScanLocation.isValid() : false;
882  }
883  /// Return true if current region has inner cancel construct.
884  SourceLocation getParentScanDirectiveLoc() const {
885  const SharingMapTy *Top = getSecondOnStackOrNull();
886  return Top ? Top->PrevScanLocation : SourceLocation();
887  }
888  /// Mark that parent region already has ordered directive.
889  void setParentHasOrderedDirective(SourceLocation Loc) {
890  if (SharingMapTy *Parent = getSecondOnStackOrNull())
891  Parent->PrevOrderedLocation = Loc;
892  }
893  /// Return true if current region has inner ordered construct.
894  bool doesParentHasOrderedDirective() const {
895  const SharingMapTy *Top = getSecondOnStackOrNull();
896  return Top ? Top->PrevOrderedLocation.isValid() : false;
897  }
898  /// Returns the location of the previously specified ordered directive.
899  SourceLocation getParentOrderedDirectiveLoc() const {
900  const SharingMapTy *Top = getSecondOnStackOrNull();
901  return Top ? Top->PrevOrderedLocation : SourceLocation();
902  }
903 
904  /// Set collapse value for the region.
905  void setAssociatedLoops(unsigned Val) {
906  getTopOfStack().AssociatedLoops = Val;
907  if (Val > 1)
908  getTopOfStack().HasMutipleLoops = true;
909  }
910  /// Return collapse value for region.
911  unsigned getAssociatedLoops() const {
912  const SharingMapTy *Top = getTopOfStackOrNull();
913  return Top ? Top->AssociatedLoops : 0;
914  }
915  /// Returns true if the construct is associated with multiple loops.
916  bool hasMutipleLoops() const {
917  const SharingMapTy *Top = getTopOfStackOrNull();
918  return Top ? Top->HasMutipleLoops : false;
919  }
920 
921  /// Marks current target region as one with closely nested teams
922  /// region.
923  void setParentTeamsRegionLoc(SourceLocation TeamsRegionLoc) {
924  if (SharingMapTy *Parent = getSecondOnStackOrNull())
925  Parent->InnerTeamsRegionLoc = TeamsRegionLoc;
926  }
927  /// Returns true, if current region has closely nested teams region.
928  bool hasInnerTeamsRegion() const {
929  return getInnerTeamsRegionLoc().isValid();
930  }
931  /// Returns location of the nested teams region (if any).
932  SourceLocation getInnerTeamsRegionLoc() const {
933  const SharingMapTy *Top = getTopOfStackOrNull();
934  return Top ? Top->InnerTeamsRegionLoc : SourceLocation();
935  }
936 
937  Scope *getCurScope() const {
938  const SharingMapTy *Top = getTopOfStackOrNull();
939  return Top ? Top->CurScope : nullptr;
940  }
941  void setContext(DeclContext *DC) { getTopOfStack().Context = DC; }
942  SourceLocation getConstructLoc() const {
943  const SharingMapTy *Top = getTopOfStackOrNull();
944  return Top ? Top->ConstructLoc : SourceLocation();
945  }
946 
947  /// Do the check specified in \a Check to all component lists and return true
948  /// if any issue is found.
949  bool checkMappableExprComponentListsForDecl(
950  const ValueDecl *VD, bool CurrentRegionOnly,
951  const llvm::function_ref<
954  Check) const {
955  if (isStackEmpty())
956  return false;
957  auto SI = begin();
958  auto SE = end();
959 
960  if (SI == SE)
961  return false;
962 
963  if (CurrentRegionOnly)
964  SE = std::next(SI);
965  else
966  std::advance(SI, 1);
967 
968  for (; SI != SE; ++SI) {
969  auto MI = SI->MappedExprComponents.find(VD);
970  if (MI != SI->MappedExprComponents.end())
972  MI->second.Components)
973  if (Check(L, MI->second.Kind))
974  return true;
975  }
976  return false;
977  }
978 
979  /// Do the check specified in \a Check to all component lists at a given level
980  /// and return true if any issue is found.
981  bool checkMappableExprComponentListsForDeclAtLevel(
982  const ValueDecl *VD, unsigned Level,
983  const llvm::function_ref<
986  Check) const {
987  if (getStackSize() <= Level)
988  return false;
989 
990  const SharingMapTy &StackElem = getStackElemAtLevel(Level);
991  auto MI = StackElem.MappedExprComponents.find(VD);
992  if (MI != StackElem.MappedExprComponents.end())
994  MI->second.Components)
995  if (Check(L, MI->second.Kind))
996  return true;
997  return false;
998  }
999 
1000  /// Create a new mappable expression component list associated with a given
1001  /// declaration and initialize it with the provided list of components.
1002  void addMappableExpressionComponents(
1003  const ValueDecl *VD,
1005  OpenMPClauseKind WhereFoundClauseKind) {
1006  MappedExprComponentTy &MEC = getTopOfStack().MappedExprComponents[VD];
1007  // Create new entry and append the new components there.
1008  MEC.Components.resize(MEC.Components.size() + 1);
1009  MEC.Components.back().append(Components.begin(), Components.end());
1010  MEC.Kind = WhereFoundClauseKind;
1011  }
1012 
1013  unsigned getNestingLevel() const {
1014  assert(!isStackEmpty());
1015  return getStackSize() - 1;
1016  }
1017  void addDoacrossDependClause(OMPDependClause *C,
1018  const OperatorOffsetTy &OpsOffs) {
1019  SharingMapTy *Parent = getSecondOnStackOrNull();
1020  assert(Parent && isOpenMPWorksharingDirective(Parent->Directive));
1021  Parent->DoacrossDepends.try_emplace(C, OpsOffs);
1022  }
1023  llvm::iterator_range<DoacrossDependMapTy::const_iterator>
1024  getDoacrossDependClauses() const {
1025  const SharingMapTy &StackElem = getTopOfStack();
1026  if (isOpenMPWorksharingDirective(StackElem.Directive)) {
1027  const DoacrossDependMapTy &Ref = StackElem.DoacrossDepends;
1028  return llvm::make_range(Ref.begin(), Ref.end());
1029  }
1030  return llvm::make_range(StackElem.DoacrossDepends.end(),
1031  StackElem.DoacrossDepends.end());
1032  }
1033 
1034  // Store types of classes which have been explicitly mapped
1035  void addMappedClassesQualTypes(QualType QT) {
1036  SharingMapTy &StackElem = getTopOfStack();
1037  StackElem.MappedClassesQualTypes.insert(QT);
1038  }
1039 
1040  // Return set of mapped classes types
1041  bool isClassPreviouslyMapped(QualType QT) const {
1042  const SharingMapTy &StackElem = getTopOfStack();
1043  return StackElem.MappedClassesQualTypes.contains(QT);
1044  }
1045 
1046  /// Adds global declare target to the parent target region.
1047  void addToParentTargetRegionLinkGlobals(DeclRefExpr *E) {
1048  assert(*OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(
1049  E->getDecl()) == OMPDeclareTargetDeclAttr::MT_Link &&
1050  "Expected declare target link global.");
1051  for (auto &Elem : *this) {
1052  if (isOpenMPTargetExecutionDirective(Elem.Directive)) {
1053  Elem.DeclareTargetLinkVarDecls.push_back(E);
1054  return;
1055  }
1056  }
1057  }
1058 
1059  /// Returns the list of globals with declare target link if current directive
1060  /// is target.
1061  ArrayRef<DeclRefExpr *> getLinkGlobals() const {
1062  assert(isOpenMPTargetExecutionDirective(getCurrentDirective()) &&
1063  "Expected target executable directive.");
1064  return getTopOfStack().DeclareTargetLinkVarDecls;
1065  }
1066 
1067  /// Adds list of allocators expressions.
1068  void addInnerAllocatorExpr(Expr *E) {
1069  getTopOfStack().InnerUsedAllocators.push_back(E);
1070  }
1071  /// Return list of used allocators.
1072  ArrayRef<Expr *> getInnerAllocators() const {
1073  return getTopOfStack().InnerUsedAllocators;
1074  }
1075  /// Marks the declaration as implicitly firstprivate nin the task-based
1076  /// regions.
1077  void addImplicitTaskFirstprivate(unsigned Level, Decl *D) {
1078  getStackElemAtLevel(Level).ImplicitTaskFirstprivates.insert(D);
1079  }
1080  /// Checks if the decl is implicitly firstprivate in the task-based region.
1081  bool isImplicitTaskFirstprivate(Decl *D) const {
1082  return getTopOfStack().ImplicitTaskFirstprivates.contains(D);
1083  }
1084 
1085  /// Marks decl as used in uses_allocators clause as the allocator.
1086  void addUsesAllocatorsDecl(const Decl *D, UsesAllocatorsDeclKind Kind) {
1087  getTopOfStack().UsesAllocatorsDecls.try_emplace(D, Kind);
1088  }
1089  /// Checks if specified decl is used in uses allocator clause as the
1090  /// allocator.
1091  Optional<UsesAllocatorsDeclKind> isUsesAllocatorsDecl(unsigned Level,
1092  const Decl *D) const {
1093  const SharingMapTy &StackElem = getTopOfStack();
1094  auto I = StackElem.UsesAllocatorsDecls.find(D);
1095  if (I == StackElem.UsesAllocatorsDecls.end())
1096  return None;
1097  return I->getSecond();
1098  }
1099  Optional<UsesAllocatorsDeclKind> isUsesAllocatorsDecl(const Decl *D) const {
1100  const SharingMapTy &StackElem = getTopOfStack();
1101  auto I = StackElem.UsesAllocatorsDecls.find(D);
1102  if (I == StackElem.UsesAllocatorsDecls.end())
1103  return None;
1104  return I->getSecond();
1105  }
1106 
1107  void addDeclareMapperVarRef(Expr *Ref) {
1108  SharingMapTy &StackElem = getTopOfStack();
1109  StackElem.DeclareMapperVar = Ref;
1110  }
1111  const Expr *getDeclareMapperVarRef() const {
1112  const SharingMapTy *Top = getTopOfStackOrNull();
1113  return Top ? Top->DeclareMapperVar : nullptr;
1114  }
1115 };
1116 
1117 bool isImplicitTaskingRegion(OpenMPDirectiveKind DKind) {
1118  return isOpenMPParallelDirective(DKind) || isOpenMPTeamsDirective(DKind);
1119 }
1120 
1121 bool isImplicitOrExplicitTaskingRegion(OpenMPDirectiveKind DKind) {
1122  return isImplicitTaskingRegion(DKind) || isOpenMPTaskingDirective(DKind) ||
1123  DKind == OMPD_unknown;
1124 }
1125 
1126 } // namespace
1127 
1128 static const Expr *getExprAsWritten(const Expr *E) {
1129  if (const auto *FE = dyn_cast<FullExpr>(E))
1130  E = FE->getSubExpr();
1131 
1132  if (const auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E))
1133  E = MTE->getSubExpr();
1134 
1135  while (const auto *Binder = dyn_cast<CXXBindTemporaryExpr>(E))
1136  E = Binder->getSubExpr();
1137 
1138  if (const auto *ICE = dyn_cast<ImplicitCastExpr>(E))
1139  E = ICE->getSubExprAsWritten();
1140  return E->IgnoreParens();
1141 }
1142 
1144  return const_cast<Expr *>(getExprAsWritten(const_cast<const Expr *>(E)));
1145 }
1146 
1147 static const ValueDecl *getCanonicalDecl(const ValueDecl *D) {
1148  if (const auto *CED = dyn_cast<OMPCapturedExprDecl>(D))
1149  if (const auto *ME = dyn_cast<MemberExpr>(getExprAsWritten(CED->getInit())))
1150  D = ME->getMemberDecl();
1151  const auto *VD = dyn_cast<VarDecl>(D);
1152  const auto *FD = dyn_cast<FieldDecl>(D);
1153  if (VD != nullptr) {
1154  VD = VD->getCanonicalDecl();
1155  D = VD;
1156  } else {
1157  assert(FD);
1158  FD = FD->getCanonicalDecl();
1159  D = FD;
1160  }
1161  return D;
1162 }
1163 
1165  return const_cast<ValueDecl *>(
1166  getCanonicalDecl(const_cast<const ValueDecl *>(D)));
1167 }
1168 
1169 DSAStackTy::DSAVarData DSAStackTy::getDSA(const_iterator &Iter,
1170  ValueDecl *D) const {
1171  D = getCanonicalDecl(D);
1172  auto *VD = dyn_cast<VarDecl>(D);
1173  const auto *FD = dyn_cast<FieldDecl>(D);
1174  DSAVarData DVar;
1175  if (Iter == end()) {
1176  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1177  // in a region but not in construct]
1178  // File-scope or namespace-scope variables referenced in called routines
1179  // in the region are shared unless they appear in a threadprivate
1180  // directive.
1181  if (VD && !VD->isFunctionOrMethodVarDecl() && !isa<ParmVarDecl>(VD))
1182  DVar.CKind = OMPC_shared;
1183 
1184  // OpenMP [2.9.1.2, Data-sharing Attribute Rules for Variables Referenced
1185  // in a region but not in construct]
1186  // Variables with static storage duration that are declared in called
1187  // routines in the region are shared.
1188  if (VD && VD->hasGlobalStorage())
1189  DVar.CKind = OMPC_shared;
1190 
1191  // Non-static data members are shared by default.
1192  if (FD)
1193  DVar.CKind = OMPC_shared;
1194 
1195  return DVar;
1196  }
1197 
1198  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1199  // in a Construct, C/C++, predetermined, p.1]
1200  // Variables with automatic storage duration that are declared in a scope
1201  // inside the construct are private.
1202  if (VD && isOpenMPLocal(VD, Iter) && VD->isLocalVarDecl() &&
1203  (VD->getStorageClass() == SC_Auto || VD->getStorageClass() == SC_None)) {
1204  DVar.CKind = OMPC_private;
1205  return DVar;
1206  }
1207 
1208  DVar.DKind = Iter->Directive;
1209  // Explicitly specified attributes and local variables with predetermined
1210  // attributes.
1211  if (Iter->SharingMap.count(D)) {
1212  const DSAInfo &Data = Iter->SharingMap.lookup(D);
1213  DVar.RefExpr = Data.RefExpr.getPointer();
1214  DVar.PrivateCopy = Data.PrivateCopy;
1215  DVar.CKind = Data.Attributes;
1216  DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
1217  DVar.Modifier = Data.Modifier;
1218  DVar.AppliedToPointee = Data.AppliedToPointee;
1219  return DVar;
1220  }
1221 
1222  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1223  // in a Construct, C/C++, implicitly determined, p.1]
1224  // In a parallel or task construct, the data-sharing attributes of these
1225  // variables are determined by the default clause, if present.
1226  switch (Iter->DefaultAttr) {
1227  case DSA_shared:
1228  DVar.CKind = OMPC_shared;
1229  DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
1230  return DVar;
1231  case DSA_none:
1232  return DVar;
1233  case DSA_firstprivate:
1234  if (VD->getStorageDuration() == SD_Static &&
1235  VD->getDeclContext()->isFileContext()) {
1236  DVar.CKind = OMPC_unknown;
1237  } else {
1238  DVar.CKind = OMPC_firstprivate;
1239  }
1240  DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
1241  return DVar;
1242  case DSA_unspecified:
1243  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1244  // in a Construct, implicitly determined, p.2]
1245  // In a parallel construct, if no default clause is present, these
1246  // variables are shared.
1247  DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
1248  if ((isOpenMPParallelDirective(DVar.DKind) &&
1249  !isOpenMPTaskLoopDirective(DVar.DKind)) ||
1250  isOpenMPTeamsDirective(DVar.DKind)) {
1251  DVar.CKind = OMPC_shared;
1252  return DVar;
1253  }
1254 
1255  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1256  // in a Construct, implicitly determined, p.4]
1257  // In a task construct, if no default clause is present, a variable that in
1258  // the enclosing context is determined to be shared by all implicit tasks
1259  // bound to the current team is shared.
1260  if (isOpenMPTaskingDirective(DVar.DKind)) {
1261  DSAVarData DVarTemp;
1262  const_iterator I = Iter, E = end();
1263  do {
1264  ++I;
1265  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables
1266  // Referenced in a Construct, implicitly determined, p.6]
1267  // In a task construct, if no default clause is present, a variable
1268  // whose data-sharing attribute is not determined by the rules above is
1269  // firstprivate.
1270  DVarTemp = getDSA(I, D);
1271  if (DVarTemp.CKind != OMPC_shared) {
1272  DVar.RefExpr = nullptr;
1273  DVar.CKind = OMPC_firstprivate;
1274  return DVar;
1275  }
1276  } while (I != E && !isImplicitTaskingRegion(I->Directive));
1277  DVar.CKind =
1278  (DVarTemp.CKind == OMPC_unknown) ? OMPC_firstprivate : OMPC_shared;
1279  return DVar;
1280  }
1281  }
1282  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1283  // in a Construct, implicitly determined, p.3]
1284  // For constructs other than task, if no default clause is present, these
1285  // variables inherit their data-sharing attributes from the enclosing
1286  // context.
1287  return getDSA(++Iter, D);
1288 }
1289 
1290 const Expr *DSAStackTy::addUniqueAligned(const ValueDecl *D,
1291  const Expr *NewDE) {
1292  assert(!isStackEmpty() && "Data sharing attributes stack is empty");
1293  D = getCanonicalDecl(D);
1294  SharingMapTy &StackElem = getTopOfStack();
1295  auto It = StackElem.AlignedMap.find(D);
1296  if (It == StackElem.AlignedMap.end()) {
1297  assert(NewDE && "Unexpected nullptr expr to be added into aligned map");
1298  StackElem.AlignedMap[D] = NewDE;
1299  return nullptr;
1300  }
1301  assert(It->second && "Unexpected nullptr expr in the aligned map");
1302  return It->second;
1303 }
1304 
1305 const Expr *DSAStackTy::addUniqueNontemporal(const ValueDecl *D,
1306  const Expr *NewDE) {
1307  assert(!isStackEmpty() && "Data sharing attributes stack is empty");
1308  D = getCanonicalDecl(D);
1309  SharingMapTy &StackElem = getTopOfStack();
1310  auto It = StackElem.NontemporalMap.find(D);
1311  if (It == StackElem.NontemporalMap.end()) {
1312  assert(NewDE && "Unexpected nullptr expr to be added into aligned map");
1313  StackElem.NontemporalMap[D] = NewDE;
1314  return nullptr;
1315  }
1316  assert(It->second && "Unexpected nullptr expr in the aligned map");
1317  return It->second;
1318 }
1319 
1320 void DSAStackTy::addLoopControlVariable(const ValueDecl *D, VarDecl *Capture) {
1321  assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
1322  D = getCanonicalDecl(D);
1323  SharingMapTy &StackElem = getTopOfStack();
1324  StackElem.LCVMap.try_emplace(
1325  D, LCDeclInfo(StackElem.LCVMap.size() + 1, Capture));
1326 }
1327 
1328 const DSAStackTy::LCDeclInfo
1329 DSAStackTy::isLoopControlVariable(const ValueDecl *D) const {
1330  assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
1331  D = getCanonicalDecl(D);
1332  const SharingMapTy &StackElem = getTopOfStack();
1333  auto It = StackElem.LCVMap.find(D);
1334  if (It != StackElem.LCVMap.end())
1335  return It->second;
1336  return {0, nullptr};
1337 }
1338 
1339 const DSAStackTy::LCDeclInfo
1340 DSAStackTy::isLoopControlVariable(const ValueDecl *D, unsigned Level) const {
1341  assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
1342  D = getCanonicalDecl(D);
1343  for (unsigned I = Level + 1; I > 0; --I) {
1344  const SharingMapTy &StackElem = getStackElemAtLevel(I - 1);
1345  auto It = StackElem.LCVMap.find(D);
1346  if (It != StackElem.LCVMap.end())
1347  return It->second;
1348  }
1349  return {0, nullptr};
1350 }
1351 
1352 const DSAStackTy::LCDeclInfo
1353 DSAStackTy::isParentLoopControlVariable(const ValueDecl *D) const {
1354  const SharingMapTy *Parent = getSecondOnStackOrNull();
1355  assert(Parent && "Data-sharing attributes stack is empty");
1356  D = getCanonicalDecl(D);
1357  auto It = Parent->LCVMap.find(D);
1358  if (It != Parent->LCVMap.end())
1359  return It->second;
1360  return {0, nullptr};
1361 }
1362 
1363 const ValueDecl *DSAStackTy::getParentLoopControlVariable(unsigned I) const {
1364  const SharingMapTy *Parent = getSecondOnStackOrNull();
1365  assert(Parent && "Data-sharing attributes stack is empty");
1366  if (Parent->LCVMap.size() < I)
1367  return nullptr;
1368  for (const auto &Pair : Parent->LCVMap)
1369  if (Pair.second.first == I)
1370  return Pair.first;
1371  return nullptr;
1372 }
1373 
1374 void DSAStackTy::addDSA(const ValueDecl *D, const Expr *E, OpenMPClauseKind A,
1375  DeclRefExpr *PrivateCopy, unsigned Modifier,
1376  bool AppliedToPointee) {
1377  D = getCanonicalDecl(D);
1378  if (A == OMPC_threadprivate) {
1379  DSAInfo &Data = Threadprivates[D];
1380  Data.Attributes = A;
1381  Data.RefExpr.setPointer(E);
1382  Data.PrivateCopy = nullptr;
1383  Data.Modifier = Modifier;
1384  } else {
1385  DSAInfo &Data = getTopOfStack().SharingMap[D];
1386  assert(Data.Attributes == OMPC_unknown || (A == Data.Attributes) ||
1387  (A == OMPC_firstprivate && Data.Attributes == OMPC_lastprivate) ||
1388  (A == OMPC_lastprivate && Data.Attributes == OMPC_firstprivate) ||
1389  (isLoopControlVariable(D).first && A == OMPC_private));
1390  Data.Modifier = Modifier;
1391  if (A == OMPC_lastprivate && Data.Attributes == OMPC_firstprivate) {
1392  Data.RefExpr.setInt(/*IntVal=*/true);
1393  return;
1394  }
1395  const bool IsLastprivate =
1396  A == OMPC_lastprivate || Data.Attributes == OMPC_lastprivate;
1397  Data.Attributes = A;
1398  Data.RefExpr.setPointerAndInt(E, IsLastprivate);
1399  Data.PrivateCopy = PrivateCopy;
1400  Data.AppliedToPointee = AppliedToPointee;
1401  if (PrivateCopy) {
1402  DSAInfo &Data = getTopOfStack().SharingMap[PrivateCopy->getDecl()];
1403  Data.Modifier = Modifier;
1404  Data.Attributes = A;
1405  Data.RefExpr.setPointerAndInt(PrivateCopy, IsLastprivate);
1406  Data.PrivateCopy = nullptr;
1407  Data.AppliedToPointee = AppliedToPointee;
1408  }
1409  }
1410 }
1411 
1412 /// Build a variable declaration for OpenMP loop iteration variable.
1414  StringRef Name, const AttrVec *Attrs = nullptr,
1415  DeclRefExpr *OrigRef = nullptr) {
1416  DeclContext *DC = SemaRef.CurContext;
1417  IdentifierInfo *II = &SemaRef.PP.getIdentifierTable().get(Name);
1418  TypeSourceInfo *TInfo = SemaRef.Context.getTrivialTypeSourceInfo(Type, Loc);
1419  auto *Decl =
1420  VarDecl::Create(SemaRef.Context, DC, Loc, Loc, II, Type, TInfo, SC_None);
1421  if (Attrs) {
1422  for (specific_attr_iterator<AlignedAttr> I(Attrs->begin()), E(Attrs->end());
1423  I != E; ++I)
1424  Decl->addAttr(*I);
1425  }
1426  Decl->setImplicit();
1427  if (OrigRef) {
1428  Decl->addAttr(
1429  OMPReferencedVarAttr::CreateImplicit(SemaRef.Context, OrigRef));
1430  }
1431  return Decl;
1432 }
1433 
1435  SourceLocation Loc,
1436  bool RefersToCapture = false) {
1437  D->setReferenced();
1438  D->markUsed(S.Context);
1440  SourceLocation(), D, RefersToCapture, Loc, Ty,
1441  VK_LValue);
1442 }
1443 
1444 void DSAStackTy::addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
1445  BinaryOperatorKind BOK) {
1446  D = getCanonicalDecl(D);
1447  assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
1448  assert(
1449  getTopOfStack().SharingMap[D].Attributes == OMPC_reduction &&
1450  "Additional reduction info may be specified only for reduction items.");
1451  ReductionData &ReductionData = getTopOfStack().ReductionMap[D];
1452  assert(ReductionData.ReductionRange.isInvalid() &&
1453  (getTopOfStack().Directive == OMPD_taskgroup ||
1454  ((isOpenMPParallelDirective(getTopOfStack().Directive) ||
1455  isOpenMPWorksharingDirective(getTopOfStack().Directive)) &&
1456  !isOpenMPSimdDirective(getTopOfStack().Directive))) &&
1457  "Additional reduction info may be specified only once for reduction "
1458  "items.");
1459  ReductionData.set(BOK, SR);
1460  Expr *&TaskgroupReductionRef =
1461  getTopOfStack().TaskgroupReductionRef;
1462  if (!TaskgroupReductionRef) {
1463  VarDecl *VD = buildVarDecl(SemaRef, SR.getBegin(),
1464  SemaRef.Context.VoidPtrTy, ".task_red.");
1465  TaskgroupReductionRef =
1466  buildDeclRefExpr(SemaRef, VD, SemaRef.Context.VoidPtrTy, SR.getBegin());
1467  }
1468 }
1469 
1470 void DSAStackTy::addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
1471  const Expr *ReductionRef) {
1472  D = getCanonicalDecl(D);
1473  assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
1474  assert(
1475  getTopOfStack().SharingMap[D].Attributes == OMPC_reduction &&
1476  "Additional reduction info may be specified only for reduction items.");
1477  ReductionData &ReductionData = getTopOfStack().ReductionMap[D];
1478  assert(ReductionData.ReductionRange.isInvalid() &&
1479  (getTopOfStack().Directive == OMPD_taskgroup ||
1480  ((isOpenMPParallelDirective(getTopOfStack().Directive) ||
1481  isOpenMPWorksharingDirective(getTopOfStack().Directive)) &&
1482  !isOpenMPSimdDirective(getTopOfStack().Directive))) &&
1483  "Additional reduction info may be specified only once for reduction "
1484  "items.");
1485  ReductionData.set(ReductionRef, SR);
1486  Expr *&TaskgroupReductionRef =
1487  getTopOfStack().TaskgroupReductionRef;
1488  if (!TaskgroupReductionRef) {
1489  VarDecl *VD = buildVarDecl(SemaRef, SR.getBegin(),
1490  SemaRef.Context.VoidPtrTy, ".task_red.");
1491  TaskgroupReductionRef =
1492  buildDeclRefExpr(SemaRef, VD, SemaRef.Context.VoidPtrTy, SR.getBegin());
1493  }
1494 }
1495 
1496 const DSAStackTy::DSAVarData DSAStackTy::getTopMostTaskgroupReductionData(
1497  const ValueDecl *D, SourceRange &SR, BinaryOperatorKind &BOK,
1498  Expr *&TaskgroupDescriptor) const {
1499  D = getCanonicalDecl(D);
1500  assert(!isStackEmpty() && "Data-sharing attributes stack is empty.");
1501  for (const_iterator I = begin() + 1, E = end(); I != E; ++I) {
1502  const DSAInfo &Data = I->SharingMap.lookup(D);
1503  if (Data.Attributes != OMPC_reduction ||
1504  Data.Modifier != OMPC_REDUCTION_task)
1505  continue;
1506  const ReductionData &ReductionData = I->ReductionMap.lookup(D);
1507  if (!ReductionData.ReductionOp ||
1508  ReductionData.ReductionOp.is<const Expr *>())
1509  return DSAVarData();
1510  SR = ReductionData.ReductionRange;
1511  BOK = ReductionData.ReductionOp.get<ReductionData::BOKPtrType>();
1512  assert(I->TaskgroupReductionRef && "taskgroup reduction reference "
1513  "expression for the descriptor is not "
1514  "set.");
1515  TaskgroupDescriptor = I->TaskgroupReductionRef;
1516  return DSAVarData(I->Directive, OMPC_reduction, Data.RefExpr.getPointer(),
1517  Data.PrivateCopy, I->DefaultAttrLoc, OMPC_REDUCTION_task,
1518  /*AppliedToPointee=*/false);
1519  }
1520  return DSAVarData();
1521 }
1522 
1523 const DSAStackTy::DSAVarData DSAStackTy::getTopMostTaskgroupReductionData(
1524  const ValueDecl *D, SourceRange &SR, const Expr *&ReductionRef,
1525  Expr *&TaskgroupDescriptor) const {
1526  D = getCanonicalDecl(D);
1527  assert(!isStackEmpty() && "Data-sharing attributes stack is empty.");
1528  for (const_iterator I = begin() + 1, E = end(); I != E; ++I) {
1529  const DSAInfo &Data = I->SharingMap.lookup(D);
1530  if (Data.Attributes != OMPC_reduction ||
1531  Data.Modifier != OMPC_REDUCTION_task)
1532  continue;
1533  const ReductionData &ReductionData = I->ReductionMap.lookup(D);
1534  if (!ReductionData.ReductionOp ||
1535  !ReductionData.ReductionOp.is<const Expr *>())
1536  return DSAVarData();
1537  SR = ReductionData.ReductionRange;
1538  ReductionRef = ReductionData.ReductionOp.get<const Expr *>();
1539  assert(I->TaskgroupReductionRef && "taskgroup reduction reference "
1540  "expression for the descriptor is not "
1541  "set.");
1542  TaskgroupDescriptor = I->TaskgroupReductionRef;
1543  return DSAVarData(I->Directive, OMPC_reduction, Data.RefExpr.getPointer(),
1544  Data.PrivateCopy, I->DefaultAttrLoc, OMPC_REDUCTION_task,
1545  /*AppliedToPointee=*/false);
1546  }
1547  return DSAVarData();
1548 }
1549 
1550 bool DSAStackTy::isOpenMPLocal(VarDecl *D, const_iterator I) const {
1551  D = D->getCanonicalDecl();
1552  for (const_iterator E = end(); I != E; ++I) {
1553  if (isImplicitOrExplicitTaskingRegion(I->Directive) ||
1554  isOpenMPTargetExecutionDirective(I->Directive)) {
1555  if (I->CurScope) {
1556  Scope *TopScope = I->CurScope->getParent();
1557  Scope *CurScope = getCurScope();
1558  while (CurScope && CurScope != TopScope && !CurScope->isDeclScope(D))
1559  CurScope = CurScope->getParent();
1560  return CurScope != TopScope;
1561  }
1562  for (DeclContext *DC = D->getDeclContext(); DC; DC = DC->getParent())
1563  if (I->Context == DC)
1564  return true;
1565  return false;
1566  }
1567  }
1568  return false;
1569 }
1570 
1571 static bool isConstNotMutableType(Sema &SemaRef, QualType Type,
1572  bool AcceptIfMutable = true,
1573  bool *IsClassType = nullptr) {
1574  ASTContext &Context = SemaRef.getASTContext();
1575  Type = Type.getNonReferenceType().getCanonicalType();
1576  bool IsConstant = Type.isConstant(Context);
1577  Type = Context.getBaseElementType(Type);
1578  const CXXRecordDecl *RD = AcceptIfMutable && SemaRef.getLangOpts().CPlusPlus
1580  : nullptr;
1581  if (const auto *CTSD = dyn_cast_or_null<ClassTemplateSpecializationDecl>(RD))
1582  if (const ClassTemplateDecl *CTD = CTSD->getSpecializedTemplate())
1583  RD = CTD->getTemplatedDecl();
1584  if (IsClassType)
1585  *IsClassType = RD;
1586  return IsConstant && !(SemaRef.getLangOpts().CPlusPlus && RD &&
1587  RD->hasDefinition() && RD->hasMutableFields());
1588 }
1589 
1590 static bool rejectConstNotMutableType(Sema &SemaRef, const ValueDecl *D,
1592  SourceLocation ELoc,
1593  bool AcceptIfMutable = true,
1594  bool ListItemNotVar = false) {
1595  ASTContext &Context = SemaRef.getASTContext();
1596  bool IsClassType;
1597  if (isConstNotMutableType(SemaRef, Type, AcceptIfMutable, &IsClassType)) {
1598  unsigned Diag = ListItemNotVar
1599  ? diag::err_omp_const_list_item
1600  : IsClassType ? diag::err_omp_const_not_mutable_variable
1601  : diag::err_omp_const_variable;
1602  SemaRef.Diag(ELoc, Diag) << getOpenMPClauseName(CKind);
1603  if (!ListItemNotVar && D) {
1604  const VarDecl *VD = dyn_cast<VarDecl>(D);
1605  bool IsDecl = !VD || VD->isThisDeclarationADefinition(Context) ==
1607  SemaRef.Diag(D->getLocation(),
1608  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
1609  << D;
1610  }
1611  return true;
1612  }
1613  return false;
1614 }
1615 
1616 const DSAStackTy::DSAVarData DSAStackTy::getTopDSA(ValueDecl *D,
1617  bool FromParent) {
1618  D = getCanonicalDecl(D);
1619  DSAVarData DVar;
1620 
1621  auto *VD = dyn_cast<VarDecl>(D);
1622  auto TI = Threadprivates.find(D);
1623  if (TI != Threadprivates.end()) {
1624  DVar.RefExpr = TI->getSecond().RefExpr.getPointer();
1625  DVar.CKind = OMPC_threadprivate;
1626  DVar.Modifier = TI->getSecond().Modifier;
1627  return DVar;
1628  }
1629  if (VD && VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
1630  DVar.RefExpr = buildDeclRefExpr(
1631  SemaRef, VD, D->getType().getNonReferenceType(),
1632  VD->getAttr<OMPThreadPrivateDeclAttr>()->getLocation());
1633  DVar.CKind = OMPC_threadprivate;
1634  addDSA(D, DVar.RefExpr, OMPC_threadprivate);
1635  return DVar;
1636  }
1637  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1638  // in a Construct, C/C++, predetermined, p.1]
1639  // Variables appearing in threadprivate directives are threadprivate.
1640  if ((VD && VD->getTLSKind() != VarDecl::TLS_None &&
1641  !(VD->hasAttr<OMPThreadPrivateDeclAttr>() &&
1642  SemaRef.getLangOpts().OpenMPUseTLS &&
1643  SemaRef.getASTContext().getTargetInfo().isTLSSupported())) ||
1644  (VD && VD->getStorageClass() == SC_Register &&
1645  VD->hasAttr<AsmLabelAttr>() && !VD->isLocalVarDecl())) {
1646  DVar.RefExpr = buildDeclRefExpr(
1647  SemaRef, VD, D->getType().getNonReferenceType(), D->getLocation());
1648  DVar.CKind = OMPC_threadprivate;
1649  addDSA(D, DVar.RefExpr, OMPC_threadprivate);
1650  return DVar;
1651  }
1652  if (SemaRef.getLangOpts().OpenMPCUDAMode && VD &&
1653  VD->isLocalVarDeclOrParm() && !isStackEmpty() &&
1654  !isLoopControlVariable(D).first) {
1655  const_iterator IterTarget =
1656  std::find_if(begin(), end(), [](const SharingMapTy &Data) {
1657  return isOpenMPTargetExecutionDirective(Data.Directive);
1658  });
1659  if (IterTarget != end()) {
1660  const_iterator ParentIterTarget = IterTarget + 1;
1661  for (const_iterator Iter = begin();
1662  Iter != ParentIterTarget; ++Iter) {
1663  if (isOpenMPLocal(VD, Iter)) {
1664  DVar.RefExpr =
1665  buildDeclRefExpr(SemaRef, VD, D->getType().getNonReferenceType(),
1666  D->getLocation());
1667  DVar.CKind = OMPC_threadprivate;
1668  return DVar;
1669  }
1670  }
1671  if (!isClauseParsingMode() || IterTarget != begin()) {
1672  auto DSAIter = IterTarget->SharingMap.find(D);
1673  if (DSAIter != IterTarget->SharingMap.end() &&
1674  isOpenMPPrivate(DSAIter->getSecond().Attributes)) {
1675  DVar.RefExpr = DSAIter->getSecond().RefExpr.getPointer();
1676  DVar.CKind = OMPC_threadprivate;
1677  return DVar;
1678  }
1679  const_iterator End = end();
1680  if (!SemaRef.isOpenMPCapturedByRef(
1681  D, std::distance(ParentIterTarget, End),
1682  /*OpenMPCaptureLevel=*/0)) {
1683  DVar.RefExpr =
1684  buildDeclRefExpr(SemaRef, VD, D->getType().getNonReferenceType(),
1685  IterTarget->ConstructLoc);
1686  DVar.CKind = OMPC_threadprivate;
1687  return DVar;
1688  }
1689  }
1690  }
1691  }
1692 
1693  if (isStackEmpty())
1694  // Not in OpenMP execution region and top scope was already checked.
1695  return DVar;
1696 
1697  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1698  // in a Construct, C/C++, predetermined, p.4]
1699  // Static data members are shared.
1700  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1701  // in a Construct, C/C++, predetermined, p.7]
1702  // Variables with static storage duration that are declared in a scope
1703  // inside the construct are shared.
1704  if (VD && VD->isStaticDataMember()) {
1705  // Check for explicitly specified attributes.
1706  const_iterator I = begin();
1707  const_iterator EndI = end();
1708  if (FromParent && I != EndI)
1709  ++I;
1710  if (I != EndI) {
1711  auto It = I->SharingMap.find(D);
1712  if (It != I->SharingMap.end()) {
1713  const DSAInfo &Data = It->getSecond();
1714  DVar.RefExpr = Data.RefExpr.getPointer();
1715  DVar.PrivateCopy = Data.PrivateCopy;
1716  DVar.CKind = Data.Attributes;
1717  DVar.ImplicitDSALoc = I->DefaultAttrLoc;
1718  DVar.DKind = I->Directive;
1719  DVar.Modifier = Data.Modifier;
1720  DVar.AppliedToPointee = Data.AppliedToPointee;
1721  return DVar;
1722  }
1723  }
1724 
1725  DVar.CKind = OMPC_shared;
1726  return DVar;
1727  }
1728 
1729  auto &&MatchesAlways = [](OpenMPDirectiveKind) { return true; };
1730  // The predetermined shared attribute for const-qualified types having no
1731  // mutable members was removed after OpenMP 3.1.
1732  if (SemaRef.LangOpts.OpenMP <= 31) {
1733  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1734  // in a Construct, C/C++, predetermined, p.6]
1735  // Variables with const qualified type having no mutable member are
1736  // shared.
1737  if (isConstNotMutableType(SemaRef, D->getType())) {
1738  // Variables with const-qualified type having no mutable member may be
1739  // listed in a firstprivate clause, even if they are static data members.
1740  DSAVarData DVarTemp = hasInnermostDSA(
1741  D,
1742  [](OpenMPClauseKind C, bool) {
1743  return C == OMPC_firstprivate || C == OMPC_shared;
1744  },
1745  MatchesAlways, FromParent);
1746  if (DVarTemp.CKind != OMPC_unknown && DVarTemp.RefExpr)
1747  return DVarTemp;
1748 
1749  DVar.CKind = OMPC_shared;
1750  return DVar;
1751  }
1752  }
1753 
1754  // Explicitly specified attributes and local variables with predetermined
1755  // attributes.
1756  const_iterator I = begin();
1757  const_iterator EndI = end();
1758  if (FromParent && I != EndI)
1759  ++I;
1760  if (I == EndI)
1761  return DVar;
1762  auto It = I->SharingMap.find(D);
1763  if (It != I->SharingMap.end()) {
1764  const DSAInfo &Data = It->getSecond();
1765  DVar.RefExpr = Data.RefExpr.getPointer();
1766  DVar.PrivateCopy = Data.PrivateCopy;
1767  DVar.CKind = Data.Attributes;
1768  DVar.ImplicitDSALoc = I->DefaultAttrLoc;
1769  DVar.DKind = I->Directive;
1770  DVar.Modifier = Data.Modifier;
1771  DVar.AppliedToPointee = Data.AppliedToPointee;
1772  }
1773 
1774  return DVar;
1775 }
1776 
1777 const DSAStackTy::DSAVarData DSAStackTy::getImplicitDSA(ValueDecl *D,
1778  bool FromParent) const {
1779  if (isStackEmpty()) {
1780  const_iterator I;
1781  return getDSA(I, D);
1782  }
1783  D = getCanonicalDecl(D);
1784  const_iterator StartI = begin();
1785  const_iterator EndI = end();
1786  if (FromParent && StartI != EndI)
1787  ++StartI;
1788  return getDSA(StartI, D);
1789 }
1790 
1791 const DSAStackTy::DSAVarData DSAStackTy::getImplicitDSA(ValueDecl *D,
1792  unsigned Level) const {
1793  if (getStackSize() <= Level)
1794  return DSAVarData();
1795  D = getCanonicalDecl(D);
1796  const_iterator StartI = std::next(begin(), getStackSize() - 1 - Level);
1797  return getDSA(StartI, D);
1798 }
1799 
1800 const DSAStackTy::DSAVarData
1801 DSAStackTy::hasDSA(ValueDecl *D,
1802  const llvm::function_ref<bool(OpenMPClauseKind, bool)> CPred,
1803  const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
1804  bool FromParent) const {
1805  if (isStackEmpty())
1806  return {};
1807  D = getCanonicalDecl(D);
1808  const_iterator I = begin();
1809  const_iterator EndI = end();
1810  if (FromParent && I != EndI)
1811  ++I;
1812  for (; I != EndI; ++I) {
1813  if (!DPred(I->Directive) &&
1814  !isImplicitOrExplicitTaskingRegion(I->Directive))
1815  continue;
1816  const_iterator NewI = I;
1817  DSAVarData DVar = getDSA(NewI, D);
1818  if (I == NewI && CPred(DVar.CKind, DVar.AppliedToPointee))
1819  return DVar;
1820  }
1821  return {};
1822 }
1823 
1824 const DSAStackTy::DSAVarData DSAStackTy::hasInnermostDSA(
1825  ValueDecl *D, const llvm::function_ref<bool(OpenMPClauseKind, bool)> CPred,
1826  const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
1827  bool FromParent) const {
1828  if (isStackEmpty())
1829  return {};
1830  D = getCanonicalDecl(D);
1831  const_iterator StartI = begin();
1832  const_iterator EndI = end();
1833  if (FromParent && StartI != EndI)
1834  ++StartI;
1835  if (StartI == EndI || !DPred(StartI->Directive))
1836  return {};
1837  const_iterator NewI = StartI;
1838  DSAVarData DVar = getDSA(NewI, D);
1839  return (NewI == StartI && CPred(DVar.CKind, DVar.AppliedToPointee))
1840  ? DVar
1841  : DSAVarData();
1842 }
1843 
1844 bool DSAStackTy::hasExplicitDSA(
1845  const ValueDecl *D,
1846  const llvm::function_ref<bool(OpenMPClauseKind, bool)> CPred,
1847  unsigned Level, bool NotLastprivate) const {
1848  if (getStackSize() <= Level)
1849  return false;
1850  D = getCanonicalDecl(D);
1851  const SharingMapTy &StackElem = getStackElemAtLevel(Level);
1852  auto I = StackElem.SharingMap.find(D);
1853  if (I != StackElem.SharingMap.end() && I->getSecond().RefExpr.getPointer() &&
1854  CPred(I->getSecond().Attributes, I->getSecond().AppliedToPointee) &&
1855  (!NotLastprivate || !I->getSecond().RefExpr.getInt()))
1856  return true;
1857  // Check predetermined rules for the loop control variables.
1858  auto LI = StackElem.LCVMap.find(D);
1859  if (LI != StackElem.LCVMap.end())
1860  return CPred(OMPC_private, /*AppliedToPointee=*/false);
1861  return false;
1862 }
1863 
1864 bool DSAStackTy::hasExplicitDirective(
1865  const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
1866  unsigned Level) const {
1867  if (getStackSize() <= Level)
1868  return false;
1869  const SharingMapTy &StackElem = getStackElemAtLevel(Level);
1870  return DPred(StackElem.Directive);
1871 }
1872 
1873 bool DSAStackTy::hasDirective(
1874  const llvm::function_ref<bool(OpenMPDirectiveKind,
1876  DPred,
1877  bool FromParent) const {
1878  // We look only in the enclosing region.
1879  size_t Skip = FromParent ? 2 : 1;
1880  for (const_iterator I = begin() + std::min(Skip, getStackSize()), E = end();
1881  I != E; ++I) {
1882  if (DPred(I->Directive, I->DirectiveName, I->ConstructLoc))
1883  return true;
1884  }
1885  return false;
1886 }
1887 
1888 void Sema::InitDataSharingAttributesStack() {
1889  VarDataSharingAttributesStack = new DSAStackTy(*this);
1890 }
1891 
1892 #define DSAStack static_cast<DSAStackTy *>(VarDataSharingAttributesStack)
1893 
1894 void Sema::pushOpenMPFunctionRegion() {
1895  DSAStack->pushFunction();
1896 }
1897 
1898 void Sema::popOpenMPFunctionRegion(const FunctionScopeInfo *OldFSI) {
1899  DSAStack->popFunction(OldFSI);
1900 }
1901 
1903  assert(S.LangOpts.OpenMP && S.LangOpts.OpenMPIsDevice &&
1904  "Expected OpenMP device compilation.");
1906 }
1907 
1908 namespace {
1909 /// Status of the function emission on the host/device.
1910 enum class FunctionEmissionStatus {
1911  Emitted,
1912  Discarded,
1913  Unknown,
1914 };
1915 } // anonymous namespace
1916 
1918  unsigned DiagID,
1919  FunctionDecl *FD) {
1920  assert(LangOpts.OpenMP && LangOpts.OpenMPIsDevice &&
1921  "Expected OpenMP device compilation.");
1922 
1923  SemaDiagnosticBuilder::Kind Kind = SemaDiagnosticBuilder::K_Nop;
1924  if (FD) {
1925  FunctionEmissionStatus FES = getEmissionStatus(FD);
1926  switch (FES) {
1927  case FunctionEmissionStatus::Emitted:
1928  Kind = SemaDiagnosticBuilder::K_Immediate;
1929  break;
1931  // TODO: We should always delay diagnostics here in case a target
1932  // region is in a function we do not emit. However, as the
1933  // current diagnostics are associated with the function containing
1934  // the target region and we do not emit that one, we would miss out
1935  // on diagnostics for the target region itself. We need to anchor
1936  // the diagnostics with the new generated function *or* ensure we
1937  // emit diagnostics associated with the surrounding function.
1939  ? SemaDiagnosticBuilder::K_Deferred
1940  : SemaDiagnosticBuilder::K_Immediate;
1941  break;
1942  case FunctionEmissionStatus::TemplateDiscarded:
1943  case FunctionEmissionStatus::OMPDiscarded:
1944  Kind = SemaDiagnosticBuilder::K_Nop;
1945  break;
1946  case FunctionEmissionStatus::CUDADiscarded:
1947  llvm_unreachable("CUDADiscarded unexpected in OpenMP device compilation");
1948  break;
1949  }
1950  }
1951 
1952  return SemaDiagnosticBuilder(Kind, Loc, DiagID, FD, *this);
1953 }
1954 
1956  unsigned DiagID,
1957  FunctionDecl *FD) {
1958  assert(LangOpts.OpenMP && !LangOpts.OpenMPIsDevice &&
1959  "Expected OpenMP host compilation.");
1960 
1961  SemaDiagnosticBuilder::Kind Kind = SemaDiagnosticBuilder::K_Nop;
1962  if (FD) {
1963  FunctionEmissionStatus FES = getEmissionStatus(FD);
1964  switch (FES) {
1965  case FunctionEmissionStatus::Emitted:
1966  Kind = SemaDiagnosticBuilder::K_Immediate;
1967  break;
1969  Kind = SemaDiagnosticBuilder::K_Deferred;
1970  break;
1971  case FunctionEmissionStatus::TemplateDiscarded:
1972  case FunctionEmissionStatus::OMPDiscarded:
1973  case FunctionEmissionStatus::CUDADiscarded:
1974  Kind = SemaDiagnosticBuilder::K_Nop;
1975  break;
1976  }
1977  }
1978 
1979  return SemaDiagnosticBuilder(Kind, Loc, DiagID, FD, *this);
1980 }
1981 
1984  if (LO.OpenMP <= 45) {
1985  if (VD->getType().getNonReferenceType()->isScalarType())
1986  return OMPC_DEFAULTMAP_scalar;
1987  return OMPC_DEFAULTMAP_aggregate;
1988  }
1990  return OMPC_DEFAULTMAP_pointer;
1991  if (VD->getType().getNonReferenceType()->isScalarType())
1992  return OMPC_DEFAULTMAP_scalar;
1993  return OMPC_DEFAULTMAP_aggregate;
1994 }
1995 
1997  unsigned OpenMPCaptureLevel) const {
1998  assert(LangOpts.OpenMP && "OpenMP is not allowed");
1999 
2000  ASTContext &Ctx = getASTContext();
2001  bool IsByRef = true;
2002 
2003  // Find the directive that is associated with the provided scope.
2004  D = cast<ValueDecl>(D->getCanonicalDecl());
2005  QualType Ty = D->getType();
2006 
2007  bool IsVariableUsedInMapClause = false;
2008  if (DSAStack->hasExplicitDirective(isOpenMPTargetExecutionDirective, Level)) {
2009  // This table summarizes how a given variable should be passed to the device
2010  // given its type and the clauses where it appears. This table is based on
2011  // the description in OpenMP 4.5 [2.10.4, target Construct] and
2012  // OpenMP 4.5 [2.15.5, Data-mapping Attribute Rules and Clauses].
2013  //
2014  // =========================================================================
2015  // | type | defaultmap | pvt | first | is_device_ptr | map | res. |
2016  // | |(tofrom:scalar)| | pvt | | | |
2017  // =========================================================================
2018  // | scl | | | | - | | bycopy|
2019  // | scl | | - | x | - | - | bycopy|
2020  // | scl | | x | - | - | - | null |
2021  // | scl | x | | | - | | byref |
2022  // | scl | x | - | x | - | - | bycopy|
2023  // | scl | x | x | - | - | - | null |
2024  // | scl | | - | - | - | x | byref |
2025  // | scl | x | - | - | - | x | byref |
2026  //
2027  // | agg | n.a. | | | - | | byref |
2028  // | agg | n.a. | - | x | - | - | byref |
2029  // | agg | n.a. | x | - | - | - | null |
2030  // | agg | n.a. | - | - | - | x | byref |
2031  // | agg | n.a. | - | - | - | x[] | byref |
2032  //
2033  // | ptr | n.a. | | | - | | bycopy|
2034  // | ptr | n.a. | - | x | - | - | bycopy|
2035  // | ptr | n.a. | x | - | - | - | null |
2036  // | ptr | n.a. | - | - | - | x | byref |
2037  // | ptr | n.a. | - | - | - | x[] | bycopy|
2038  // | ptr | n.a. | - | - | x | | bycopy|
2039  // | ptr | n.a. | - | - | x | x | bycopy|
2040  // | ptr | n.a. | - | - | x | x[] | bycopy|
2041  // =========================================================================
2042  // Legend:
2043  // scl - scalar
2044  // ptr - pointer
2045  // agg - aggregate
2046  // x - applies
2047  // - - invalid in this combination
2048  // [] - mapped with an array section
2049  // byref - should be mapped by reference
2050  // byval - should be mapped by value
2051  // null - initialize a local variable to null on the device
2052  //
2053  // Observations:
2054  // - All scalar declarations that show up in a map clause have to be passed
2055  // by reference, because they may have been mapped in the enclosing data
2056  // environment.
2057  // - If the scalar value does not fit the size of uintptr, it has to be
2058  // passed by reference, regardless the result in the table above.
2059  // - For pointers mapped by value that have either an implicit map or an
2060  // array section, the runtime library may pass the NULL value to the
2061  // device instead of the value passed to it by the compiler.
2062 
2063  if (Ty->isReferenceType())
2064  Ty = Ty->castAs<ReferenceType>()->getPointeeType();
2065 
2066  // Locate map clauses and see if the variable being captured is referred to
2067  // in any of those clauses. Here we only care about variables, not fields,
2068  // because fields are part of aggregates.
2069  bool IsVariableAssociatedWithSection = false;
2070 
2071  DSAStack->checkMappableExprComponentListsForDeclAtLevel(
2072  D, Level,
2073  [&IsVariableUsedInMapClause, &IsVariableAssociatedWithSection, D](
2075  MapExprComponents,
2076  OpenMPClauseKind WhereFoundClauseKind) {
2077  // Only the map clause information influences how a variable is
2078  // captured. E.g. is_device_ptr does not require changing the default
2079  // behavior.
2080  if (WhereFoundClauseKind != OMPC_map)
2081  return false;
2082 
2083  auto EI = MapExprComponents.rbegin();
2084  auto EE = MapExprComponents.rend();
2085 
2086  assert(EI != EE && "Invalid map expression!");
2087 
2088  if (isa<DeclRefExpr>(EI->getAssociatedExpression()))
2089  IsVariableUsedInMapClause |= EI->getAssociatedDeclaration() == D;
2090 
2091  ++EI;
2092  if (EI == EE)
2093  return false;
2094 
2095  if (isa<ArraySubscriptExpr>(EI->getAssociatedExpression()) ||
2096  isa<OMPArraySectionExpr>(EI->getAssociatedExpression()) ||
2097  isa<MemberExpr>(EI->getAssociatedExpression()) ||
2098  isa<OMPArrayShapingExpr>(EI->getAssociatedExpression())) {
2099  IsVariableAssociatedWithSection = true;
2100  // There is nothing more we need to know about this variable.
2101  return true;
2102  }
2103 
2104  // Keep looking for more map info.
2105  return false;
2106  });
2107 
2108  if (IsVariableUsedInMapClause) {
2109  // If variable is identified in a map clause it is always captured by
2110  // reference except if it is a pointer that is dereferenced somehow.
2111  IsByRef = !(Ty->isPointerType() && IsVariableAssociatedWithSection);
2112  } else {
2113  // By default, all the data that has a scalar type is mapped by copy
2114  // (except for reduction variables).
2115  // Defaultmap scalar is mutual exclusive to defaultmap pointer
2116  IsByRef = (DSAStack->isForceCaptureByReferenceInTargetExecutable() &&
2117  !Ty->isAnyPointerType()) ||
2118  !Ty->isScalarType() ||
2119  DSAStack->isDefaultmapCapturedByRef(
2120  Level, getVariableCategoryFromDecl(LangOpts, D)) ||
2121  DSAStack->hasExplicitDSA(
2122  D,
2123  [](OpenMPClauseKind K, bool AppliedToPointee) {
2124  return K == OMPC_reduction && !AppliedToPointee;
2125  },
2126  Level);
2127  }
2128  }
2129 
2130  if (IsByRef && Ty.getNonReferenceType()->isScalarType()) {
2131  IsByRef =
2132  ((IsVariableUsedInMapClause &&
2133  DSAStack->getCaptureRegion(Level, OpenMPCaptureLevel) ==
2134  OMPD_target) ||
2135  !(DSAStack->hasExplicitDSA(
2136  D,
2137  [](OpenMPClauseKind K, bool AppliedToPointee) -> bool {
2138  return K == OMPC_firstprivate ||
2139  (K == OMPC_reduction && AppliedToPointee);
2140  },
2141  Level, /*NotLastprivate=*/true) ||
2142  DSAStack->isUsesAllocatorsDecl(Level, D))) &&
2143  // If the variable is artificial and must be captured by value - try to
2144  // capture by value.
2145  !(isa<OMPCapturedExprDecl>(D) && !D->hasAttr<OMPCaptureNoInitAttr>() &&
2146  !cast<OMPCapturedExprDecl>(D)->getInit()->isGLValue()) &&
2147  // If the variable is implicitly firstprivate and scalar - capture by
2148  // copy
2149  !(DSAStack->getDefaultDSA() == DSA_firstprivate &&
2150  !DSAStack->hasExplicitDSA(
2151  D, [](OpenMPClauseKind K, bool) { return K != OMPC_unknown; },
2152  Level) &&
2153  !DSAStack->isLoopControlVariable(D, Level).first);
2154  }
2155 
2156  // When passing data by copy, we need to make sure it fits the uintptr size
2157  // and alignment, because the runtime library only deals with uintptr types.
2158  // If it does not fit the uintptr size, we need to pass the data by reference
2159  // instead.
2160  if (!IsByRef &&
2161  (Ctx.getTypeSizeInChars(Ty) >
2162  Ctx.getTypeSizeInChars(Ctx.getUIntPtrType()) ||
2163  Ctx.getDeclAlign(D) > Ctx.getTypeAlignInChars(Ctx.getUIntPtrType()))) {
2164  IsByRef = true;
2165  }
2166 
2167  return IsByRef;
2168 }
2169 
2170 unsigned Sema::getOpenMPNestingLevel() const {
2171  assert(getLangOpts().OpenMP);
2172  return DSAStack->getNestingLevel();
2173 }
2174 
2176  return (isOpenMPTargetExecutionDirective(DSAStack->getCurrentDirective()) &&
2177  !DSAStack->isClauseParsingMode()) ||
2178  DSAStack->hasDirective(
2180  SourceLocation) -> bool {
2182  },
2183  false);
2184 }
2185 
2187  unsigned StopAt) {
2188  assert(LangOpts.OpenMP && "OpenMP is not allowed");
2189  D = getCanonicalDecl(D);
2190 
2191  auto *VD = dyn_cast<VarDecl>(D);
2192  // Do not capture constexpr variables.
2193  if (VD && VD->isConstexpr())
2194  return nullptr;
2195 
2196  // If we want to determine whether the variable should be captured from the
2197  // perspective of the current capturing scope, and we've already left all the
2198  // capturing scopes of the top directive on the stack, check from the
2199  // perspective of its parent directive (if any) instead.
2200  DSAStackTy::ParentDirectiveScope InParentDirectiveRAII(
2201  *DSAStack, CheckScopeInfo && DSAStack->isBodyComplete());
2202 
2203  // If we are attempting to capture a global variable in a directive with
2204  // 'target' we return true so that this global is also mapped to the device.
2205  //
2206  if (VD && !VD->hasLocalStorage() &&
2207  (getCurCapturedRegion() || getCurBlock() || getCurLambda())) {
2208  if (isInOpenMPTargetExecutionDirective()) {
2209  DSAStackTy::DSAVarData DVarTop =
2210  DSAStack->getTopDSA(D, DSAStack->isClauseParsingMode());
2211  if (DVarTop.CKind != OMPC_unknown && DVarTop.RefExpr)
2212  return VD;
2213  // If the declaration is enclosed in a 'declare target' directive,
2214  // then it should not be captured.
2215  //
2216  if (OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
2217  return nullptr;
2218  CapturedRegionScopeInfo *CSI = nullptr;
2219  for (FunctionScopeInfo *FSI : llvm::drop_begin(
2220  llvm::reverse(FunctionScopes),
2221  CheckScopeInfo ? (FunctionScopes.size() - (StopAt + 1)) : 0)) {
2222  if (!isa<CapturingScopeInfo>(FSI))
2223  return nullptr;
2224  if (auto *RSI = dyn_cast<CapturedRegionScopeInfo>(FSI))
2225  if (RSI->CapRegionKind == CR_OpenMP) {
2226  CSI = RSI;
2227  break;
2228  }
2229  }
2230  assert(CSI && "Failed to find CapturedRegionScopeInfo");
2232  getOpenMPCaptureRegions(Regions,
2233  DSAStack->getDirective(CSI->OpenMPLevel));
2234  if (Regions[CSI->OpenMPCaptureLevel] != OMPD_task)
2235  return VD;
2236  }
2237  if (isInOpenMPDeclareTargetContext()) {
2238  // Try to mark variable as declare target if it is used in capturing
2239  // regions.
2240  if (LangOpts.OpenMP <= 45 &&
2241  !OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
2242  checkDeclIsAllowedInOpenMPTarget(nullptr, VD);
2243  return nullptr;
2244  }
2245  }
2246 
2247  if (CheckScopeInfo) {
2248  bool OpenMPFound = false;
2249  for (unsigned I = StopAt + 1; I > 0; --I) {
2250  FunctionScopeInfo *FSI = FunctionScopes[I - 1];
2251  if(!isa<CapturingScopeInfo>(FSI))
2252  return nullptr;
2253  if (auto *RSI = dyn_cast<CapturedRegionScopeInfo>(FSI))
2254  if (RSI->CapRegionKind == CR_OpenMP) {
2255  OpenMPFound = true;
2256  break;
2257  }
2258  }
2259  if (!OpenMPFound)
2260  return nullptr;
2261  }
2262 
2263  if (DSAStack->getCurrentDirective() != OMPD_unknown &&
2264  (!DSAStack->isClauseParsingMode() ||
2265  DSAStack->getParentDirective() != OMPD_unknown)) {
2266  auto &&Info = DSAStack->isLoopControlVariable(D);
2267  if (Info.first ||
2268  (VD && VD->hasLocalStorage() &&
2269  isImplicitOrExplicitTaskingRegion(DSAStack->getCurrentDirective())) ||
2270  (VD && DSAStack->isForceVarCapturing()))
2271  return VD ? VD : Info.second;
2272  DSAStackTy::DSAVarData DVarTop =
2273  DSAStack->getTopDSA(D, DSAStack->isClauseParsingMode());
2274  if (DVarTop.CKind != OMPC_unknown && isOpenMPPrivate(DVarTop.CKind) &&
2275  (!VD || VD->hasLocalStorage() || !DVarTop.AppliedToPointee))
2276  return VD ? VD : cast<VarDecl>(DVarTop.PrivateCopy->getDecl());
2277  // Threadprivate variables must not be captured.
2278  if (isOpenMPThreadPrivate(DVarTop.CKind))
2279  return nullptr;
2280  // The variable is not private or it is the variable in the directive with
2281  // default(none) clause and not used in any clause.
2282  DSAStackTy::DSAVarData DVarPrivate = DSAStack->hasDSA(
2283  D,
2284  [](OpenMPClauseKind C, bool AppliedToPointee) {
2285  return isOpenMPPrivate(C) && !AppliedToPointee;
2286  },
2287  [](OpenMPDirectiveKind) { return true; },
2288  DSAStack->isClauseParsingMode());
2289  // Global shared must not be captured.
2290  if (VD && !VD->hasLocalStorage() && DVarPrivate.CKind == OMPC_unknown &&
2291  ((DSAStack->getDefaultDSA() != DSA_none &&
2292  DSAStack->getDefaultDSA() != DSA_firstprivate) ||
2293  DVarTop.CKind == OMPC_shared))
2294  return nullptr;
2295  if (DVarPrivate.CKind != OMPC_unknown ||
2296  (VD && (DSAStack->getDefaultDSA() == DSA_none ||
2297  DSAStack->getDefaultDSA() == DSA_firstprivate)))
2298  return VD ? VD : cast<VarDecl>(DVarPrivate.PrivateCopy->getDecl());
2299  }
2300  return nullptr;
2301 }
2302 
2303 void Sema::adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
2304  unsigned Level) const {
2305  FunctionScopesIndex -= getOpenMPCaptureLevels(DSAStack->getDirective(Level));
2306 }
2307 
2309  assert(LangOpts.OpenMP && "OpenMP must be enabled.");
2310  if (isOpenMPLoopDirective(DSAStack->getCurrentDirective()))
2311  DSAStack->loopInit();
2312 }
2313 
2315  assert(LangOpts.OpenMP && "OpenMP must be enabled.");
2316  if (isOpenMPLoopDirective(DSAStack->getCurrentDirective())) {
2317  DSAStack->resetPossibleLoopCounter();
2318  DSAStack->loopStart();
2319  }
2320 }
2321 
2323  unsigned CapLevel) const {
2324  assert(LangOpts.OpenMP && "OpenMP is not allowed");
2325  if (DSAStack->hasExplicitDirective(
2326  [](OpenMPDirectiveKind K) { return isOpenMPTaskingDirective(K); },
2327  Level)) {
2328  bool IsTriviallyCopyable =
2330  !D->getType()
2332  .getCanonicalType()
2333  ->getAsCXXRecordDecl();
2334  OpenMPDirectiveKind DKind = DSAStack->getDirective(Level);
2335  SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
2336  getOpenMPCaptureRegions(CaptureRegions, DKind);
2337  if (isOpenMPTaskingDirective(CaptureRegions[CapLevel]) &&
2338  (IsTriviallyCopyable ||
2339  !isOpenMPTaskLoopDirective(CaptureRegions[CapLevel]))) {
2340  if (DSAStack->hasExplicitDSA(
2341  D,
2342  [](OpenMPClauseKind K, bool) { return K == OMPC_firstprivate; },
2343  Level, /*NotLastprivate=*/true))
2344  return OMPC_firstprivate;
2345  DSAStackTy::DSAVarData DVar = DSAStack->getImplicitDSA(D, Level);
2346  if (DVar.CKind != OMPC_shared &&
2347  !DSAStack->isLoopControlVariable(D, Level).first && !DVar.RefExpr) {
2348  DSAStack->addImplicitTaskFirstprivate(Level, D);
2349  return OMPC_firstprivate;
2350  }
2351  }
2352  }
2353  if (isOpenMPLoopDirective(DSAStack->getCurrentDirective())) {
2354  if (DSAStack->getAssociatedLoops() > 0 &&
2355  !DSAStack->isLoopStarted()) {
2356  DSAStack->resetPossibleLoopCounter(D);
2357  DSAStack->loopStart();
2358  return OMPC_private;
2359  }
2360  if ((DSAStack->getPossiblyLoopCunter() == D->getCanonicalDecl() ||
2361  DSAStack->isLoopControlVariable(D).first) &&
2362  !DSAStack->hasExplicitDSA(
2363  D, [](OpenMPClauseKind K, bool) { return K != OMPC_private; },
2364  Level) &&
2365  !isOpenMPSimdDirective(DSAStack->getCurrentDirective()))
2366  return OMPC_private;
2367  }
2368  if (const auto *VD = dyn_cast<VarDecl>(D)) {
2369  if (DSAStack->isThreadPrivate(const_cast<VarDecl *>(VD)) &&
2370  DSAStack->isForceVarCapturing() &&
2371  !DSAStack->hasExplicitDSA(
2372  D, [](OpenMPClauseKind K, bool) { return K == OMPC_copyin; },
2373  Level))
2374  return OMPC_private;
2375  }
2376  // User-defined allocators are private since they must be defined in the
2377  // context of target region.
2378  if (DSAStack->hasExplicitDirective(isOpenMPTargetExecutionDirective, Level) &&
2379  DSAStack->isUsesAllocatorsDecl(Level, D).getValueOr(
2380  DSAStackTy::UsesAllocatorsDeclKind::AllocatorTrait) ==
2381  DSAStackTy::UsesAllocatorsDeclKind::UserDefinedAllocator)
2382  return OMPC_private;
2383  return (DSAStack->hasExplicitDSA(
2384  D, [](OpenMPClauseKind K, bool) { return K == OMPC_private; },
2385  Level) ||
2386  (DSAStack->isClauseParsingMode() &&
2387  DSAStack->getClauseParsingMode() == OMPC_private) ||
2388  // Consider taskgroup reduction descriptor variable a private
2389  // to avoid possible capture in the region.
2390  (DSAStack->hasExplicitDirective(
2391  [](OpenMPDirectiveKind K) {
2392  return K == OMPD_taskgroup ||
2393  ((isOpenMPParallelDirective(K) ||
2394  isOpenMPWorksharingDirective(K)) &&
2395  !isOpenMPSimdDirective(K));
2396  },
2397  Level) &&
2398  DSAStack->isTaskgroupReductionRef(D, Level)))
2399  ? OMPC_private
2400  : OMPC_unknown;
2401 }
2402 
2404  unsigned Level) {
2405  assert(LangOpts.OpenMP && "OpenMP is not allowed");
2406  D = getCanonicalDecl(D);
2407  OpenMPClauseKind OMPC = OMPC_unknown;
2408  for (unsigned I = DSAStack->getNestingLevel() + 1; I > Level; --I) {
2409  const unsigned NewLevel = I - 1;
2410  if (DSAStack->hasExplicitDSA(
2411  D,
2412  [&OMPC](const OpenMPClauseKind K, bool AppliedToPointee) {
2413  if (isOpenMPPrivate(K) && !AppliedToPointee) {
2414  OMPC = K;
2415  return true;
2416  }
2417  return false;
2418  },
2419  NewLevel))
2420  break;
2421  if (DSAStack->checkMappableExprComponentListsForDeclAtLevel(
2422  D, NewLevel,
2424  OpenMPClauseKind) { return true; })) {
2425  OMPC = OMPC_map;
2426  break;
2427  }
2428  if (DSAStack->hasExplicitDirective(isOpenMPTargetExecutionDirective,
2429  NewLevel)) {
2430  OMPC = OMPC_map;
2431  if (DSAStack->mustBeFirstprivateAtLevel(
2432  NewLevel, getVariableCategoryFromDecl(LangOpts, D)))
2433  OMPC = OMPC_firstprivate;
2434  break;
2435  }
2436  }
2437  if (OMPC != OMPC_unknown)
2438  FD->addAttr(OMPCaptureKindAttr::CreateImplicit(Context, unsigned(OMPC)));
2439 }
2440 
2442  unsigned CaptureLevel) const {
2443  assert(LangOpts.OpenMP && "OpenMP is not allowed");
2444  // Return true if the current level is no longer enclosed in a target region.
2445 
2447  getOpenMPCaptureRegions(Regions, DSAStack->getDirective(Level));
2448  const auto *VD = dyn_cast<VarDecl>(D);
2449  return VD && !VD->hasLocalStorage() &&
2450  DSAStack->hasExplicitDirective(isOpenMPTargetExecutionDirective,
2451  Level) &&
2452  Regions[CaptureLevel] != OMPD_task;
2453 }
2454 
2456  unsigned CaptureLevel) const {
2457  assert(LangOpts.OpenMP && "OpenMP is not allowed");
2458  // Return true if the current level is no longer enclosed in a target region.
2459 
2460  if (const auto *VD = dyn_cast<VarDecl>(D)) {
2461  if (!VD->hasLocalStorage()) {
2463  return true;
2464  DSAStackTy::DSAVarData TopDVar =
2465  DSAStack->getTopDSA(D, /*FromParent=*/false);
2466  unsigned NumLevels =
2467  getOpenMPCaptureLevels(DSAStack->getDirective(Level));
2468  if (Level == 0)
2469  return (NumLevels == CaptureLevel + 1) && TopDVar.CKind != OMPC_shared;
2470  do {
2471  --Level;
2472  DSAStackTy::DSAVarData DVar = DSAStack->getImplicitDSA(D, Level);
2473  if (DVar.CKind != OMPC_shared)
2474  return true;
2475  } while (Level > 0);
2476  }
2477  }
2478  return true;
2479 }
2480 
2481 void Sema::DestroyDataSharingAttributesStack() { delete DSAStack; }
2482 
2484  OMPTraitInfo &TI) {
2485  OMPDeclareVariantScopes.push_back(OMPDeclareVariantScope(TI));
2486 }
2487 
2489  assert(isInOpenMPDeclareVariantScope() &&
2490  "Not in OpenMP declare variant scope!");
2491 
2492  OMPDeclareVariantScopes.pop_back();
2493 }
2494 
2496  const FunctionDecl *Callee,
2497  SourceLocation Loc) {
2498  assert(LangOpts.OpenMP && "Expected OpenMP compilation mode.");
2500  OMPDeclareTargetDeclAttr::getDeviceType(Caller->getMostRecentDecl());
2501  // Ignore host functions during device analyzis.
2502  if (LangOpts.OpenMPIsDevice &&
2503  (!DevTy || *DevTy == OMPDeclareTargetDeclAttr::DT_Host))
2504  return;
2505  // Ignore nohost functions during host analyzis.
2506  if (!LangOpts.OpenMPIsDevice && DevTy &&
2507  *DevTy == OMPDeclareTargetDeclAttr::DT_NoHost)
2508  return;
2509  const FunctionDecl *FD = Callee->getMostRecentDecl();
2510  DevTy = OMPDeclareTargetDeclAttr::getDeviceType(FD);
2511  if (LangOpts.OpenMPIsDevice && DevTy &&
2512  *DevTy == OMPDeclareTargetDeclAttr::DT_Host) {
2513  // Diagnose host function called during device codegen.
2514  StringRef HostDevTy =
2515  getOpenMPSimpleClauseTypeName(OMPC_device_type, OMPC_DEVICE_TYPE_host);
2516  Diag(Loc, diag::err_omp_wrong_device_function_call) << HostDevTy << 0;
2517  Diag(*OMPDeclareTargetDeclAttr::getLocation(FD),
2518  diag::note_omp_marked_device_type_here)
2519  << HostDevTy;
2520  return;
2521  }
2522  if (!LangOpts.OpenMPIsDevice && DevTy &&
2523  *DevTy == OMPDeclareTargetDeclAttr::DT_NoHost) {
2524  // Diagnose nohost function called during host codegen.
2525  StringRef NoHostDevTy = getOpenMPSimpleClauseTypeName(
2526  OMPC_device_type, OMPC_DEVICE_TYPE_nohost);
2527  Diag(Loc, diag::err_omp_wrong_device_function_call) << NoHostDevTy << 1;
2528  Diag(*OMPDeclareTargetDeclAttr::getLocation(FD),
2529  diag::note_omp_marked_device_type_here)
2530  << NoHostDevTy;
2531  }
2532 }
2533 
2535  const DeclarationNameInfo &DirName,
2536  Scope *CurScope, SourceLocation Loc) {
2537  DSAStack->push(DKind, DirName, CurScope, Loc);
2540 }
2541 
2543  DSAStack->setClauseParsingMode(K);
2544 }
2545 
2547  DSAStack->setClauseParsingMode(/*K=*/OMPC_unknown);
2549 }
2550 
2551 static std::pair<ValueDecl *, bool>
2552 getPrivateItem(Sema &S, Expr *&RefExpr, SourceLocation &ELoc,
2553  SourceRange &ERange, bool AllowArraySection = false);
2554 
2555 /// Check consistency of the reduction clauses.
2556 static void checkReductionClauses(Sema &S, DSAStackTy *Stack,
2557  ArrayRef<OMPClause *> Clauses) {
2558  bool InscanFound = false;
2559  SourceLocation InscanLoc;
2560  // OpenMP 5.0, 2.19.5.4 reduction Clause, Restrictions.
2561  // A reduction clause without the inscan reduction-modifier may not appear on
2562  // a construct on which a reduction clause with the inscan reduction-modifier
2563  // appears.
2564  for (OMPClause *C : Clauses) {
2565  if (C->getClauseKind() != OMPC_reduction)
2566  continue;
2567  auto *RC = cast<OMPReductionClause>(C);
2568  if (RC->getModifier() == OMPC_REDUCTION_inscan) {
2569  InscanFound = true;
2570  InscanLoc = RC->getModifierLoc();
2571  continue;
2572  }
2573  if (RC->getModifier() == OMPC_REDUCTION_task) {
2574  // OpenMP 5.0, 2.19.5.4 reduction Clause.
2575  // A reduction clause with the task reduction-modifier may only appear on
2576  // a parallel construct, a worksharing construct or a combined or
2577  // composite construct for which any of the aforementioned constructs is a
2578  // constituent construct and simd or loop are not constituent constructs.
2579  OpenMPDirectiveKind CurDir = Stack->getCurrentDirective();
2580  if (!(isOpenMPParallelDirective(CurDir) ||
2581  isOpenMPWorksharingDirective(CurDir)) ||
2582  isOpenMPSimdDirective(CurDir))
2583  S.Diag(RC->getModifierLoc(),
2584  diag::err_omp_reduction_task_not_parallel_or_worksharing);
2585  continue;
2586  }
2587  }
2588  if (InscanFound) {
2589  for (OMPClause *C : Clauses) {
2590  if (C->getClauseKind() != OMPC_reduction)
2591  continue;
2592  auto *RC = cast<OMPReductionClause>(C);
2593  if (RC->getModifier() != OMPC_REDUCTION_inscan) {
2594  S.Diag(RC->getModifier() == OMPC_REDUCTION_unknown
2595  ? RC->getBeginLoc()
2596  : RC->getModifierLoc(),
2597  diag::err_omp_inscan_reduction_expected);
2598  S.Diag(InscanLoc, diag::note_omp_previous_inscan_reduction);
2599  continue;
2600  }
2601  for (Expr *Ref : RC->varlists()) {
2602  assert(Ref && "NULL expr in OpenMP nontemporal clause.");
2603  SourceLocation ELoc;
2604  SourceRange ERange;
2605  Expr *SimpleRefExpr = Ref;
2606  auto Res = getPrivateItem(S, SimpleRefExpr, ELoc, ERange,
2607  /*AllowArraySection=*/true);
2608  ValueDecl *D = Res.first;
2609  if (!D)
2610  continue;
2611  if (!Stack->isUsedInScanDirective(getCanonicalDecl(D))) {
2612  S.Diag(Ref->getExprLoc(),
2613  diag::err_omp_reduction_not_inclusive_exclusive)
2614  << Ref->getSourceRange();
2615  }
2616  }
2617  }
2618  }
2619 }
2620 
2621 static void checkAllocateClauses(Sema &S, DSAStackTy *Stack,
2622  ArrayRef<OMPClause *> Clauses);
2623 static DeclRefExpr *buildCapture(Sema &S, ValueDecl *D, Expr *CaptureExpr,
2624  bool WithInit);
2625 
2626 static void reportOriginalDsa(Sema &SemaRef, const DSAStackTy *Stack,
2627  const ValueDecl *D,
2628  const DSAStackTy::DSAVarData &DVar,
2629  bool IsLoopIterVar = false);
2630 
2631 void Sema::EndOpenMPDSABlock(Stmt *CurDirective) {
2632  // OpenMP [2.14.3.5, Restrictions, C/C++, p.1]
2633  // A variable of class type (or array thereof) that appears in a lastprivate
2634  // clause requires an accessible, unambiguous default constructor for the
2635  // class type, unless the list item is also specified in a firstprivate
2636  // clause.
2637  if (const auto *D = dyn_cast_or_null<OMPExecutableDirective>(CurDirective)) {
2638  for (OMPClause *C : D->clauses()) {
2639  if (auto *Clause = dyn_cast<OMPLastprivateClause>(C)) {
2640  SmallVector<Expr *, 8> PrivateCopies;
2641  for (Expr *DE : Clause->varlists()) {
2642  if (DE->isValueDependent() || DE->isTypeDependent()) {
2643  PrivateCopies.push_back(nullptr);
2644  continue;
2645  }
2646  auto *DRE = cast<DeclRefExpr>(DE->IgnoreParens());
2647  auto *VD = cast<VarDecl>(DRE->getDecl());
2649  const DSAStackTy::DSAVarData DVar =
2650  DSAStack->getTopDSA(VD, /*FromParent=*/false);
2651  if (DVar.CKind == OMPC_lastprivate) {
2652  // Generate helper private variable and initialize it with the
2653  // default value. The address of the original variable is replaced
2654  // by the address of the new private variable in CodeGen. This new
2655  // variable is not added to IdResolver, so the code in the OpenMP
2656  // region uses original variable for proper diagnostics.
2657  VarDecl *VDPrivate = buildVarDecl(
2658  *this, DE->getExprLoc(), Type.getUnqualifiedType(),
2659  VD->getName(), VD->hasAttrs() ? &VD->getAttrs() : nullptr, DRE);
2660  ActOnUninitializedDecl(VDPrivate);
2661  if (VDPrivate->isInvalidDecl()) {
2662  PrivateCopies.push_back(nullptr);
2663  continue;
2664  }
2665  PrivateCopies.push_back(buildDeclRefExpr(
2666  *this, VDPrivate, DE->getType(), DE->getExprLoc()));
2667  } else {
2668  // The variable is also a firstprivate, so initialization sequence
2669  // for private copy is generated already.
2670  PrivateCopies.push_back(nullptr);
2671  }
2672  }
2673  Clause->setPrivateCopies(PrivateCopies);
2674  continue;
2675  }
2676  // Finalize nontemporal clause by handling private copies, if any.
2677  if (auto *Clause = dyn_cast<OMPNontemporalClause>(C)) {
2678  SmallVector<Expr *, 8> PrivateRefs;
2679  for (Expr *RefExpr : Clause->varlists()) {
2680  assert(RefExpr && "NULL expr in OpenMP nontemporal clause.");
2681  SourceLocation ELoc;
2682  SourceRange ERange;
2683  Expr *SimpleRefExpr = RefExpr;
2684  auto Res = getPrivateItem(*this, SimpleRefExpr, ELoc, ERange);
2685  if (Res.second)
2686  // It will be analyzed later.
2687  PrivateRefs.push_back(RefExpr);
2688  ValueDecl *D = Res.first;
2689  if (!D)
2690  continue;
2691 
2692  const DSAStackTy::DSAVarData DVar =
2693  DSAStack->getTopDSA(D, /*FromParent=*/false);
2694  PrivateRefs.push_back(DVar.PrivateCopy ? DVar.PrivateCopy
2695  : SimpleRefExpr);
2696  }
2697  Clause->setPrivateRefs(PrivateRefs);
2698  continue;
2699  }
2700  if (auto *Clause = dyn_cast<OMPUsesAllocatorsClause>(C)) {
2701  for (unsigned I = 0, E = Clause->getNumberOfAllocators(); I < E; ++I) {
2702  OMPUsesAllocatorsClause::Data D = Clause->getAllocatorData(I);
2703  auto *DRE = dyn_cast<DeclRefExpr>(D.Allocator->IgnoreParenImpCasts());
2704  if (!DRE)
2705  continue;
2706  ValueDecl *VD = DRE->getDecl();
2707  if (!VD || !isa<VarDecl>(VD))
2708  continue;
2709  DSAStackTy::DSAVarData DVar =
2710  DSAStack->getTopDSA(VD, /*FromParent=*/false);
2711  // OpenMP [2.12.5, target Construct]
2712  // Memory allocators that appear in a uses_allocators clause cannot
2713  // appear in other data-sharing attribute clauses or data-mapping
2714  // attribute clauses in the same construct.
2715  Expr *MapExpr = nullptr;
2716  if (DVar.RefExpr ||
2717  DSAStack->checkMappableExprComponentListsForDecl(
2718  VD, /*CurrentRegionOnly=*/true,
2719  [VD, &MapExpr](
2721  MapExprComponents,
2722  OpenMPClauseKind C) {
2723  auto MI = MapExprComponents.rbegin();
2724  auto ME = MapExprComponents.rend();
2725  if (MI != ME &&
2726  MI->getAssociatedDeclaration()->getCanonicalDecl() ==
2727  VD->getCanonicalDecl()) {
2728  MapExpr = MI->getAssociatedExpression();
2729  return true;
2730  }
2731  return false;
2732  })) {
2733  Diag(D.Allocator->getExprLoc(),
2734  diag::err_omp_allocator_used_in_clauses)
2735  << D.Allocator->getSourceRange();
2736  if (DVar.RefExpr)
2737  reportOriginalDsa(*this, DSAStack, VD, DVar);
2738  else
2739  Diag(MapExpr->getExprLoc(), diag::note_used_here)
2740  << MapExpr->getSourceRange();
2741  }
2742  }
2743  continue;
2744  }
2745  }
2746  // Check allocate clauses.
2748  checkAllocateClauses(*this, DSAStack, D->clauses());
2749  checkReductionClauses(*this, DSAStack, D->clauses());
2750  }
2751 
2752  DSAStack->pop();
2753  DiscardCleanupsInEvaluationContext();
2754  PopExpressionEvaluationContext();
2755 }
2756 
2757 static bool FinishOpenMPLinearClause(OMPLinearClause &Clause, DeclRefExpr *IV,
2758  Expr *NumIterations, Sema &SemaRef,
2759  Scope *S, DSAStackTy *Stack);
2760 
2761 namespace {
2762 
2763 class VarDeclFilterCCC final : public CorrectionCandidateCallback {
2764 private:
2765  Sema &SemaRef;
2766 
2767 public:
2768  explicit VarDeclFilterCCC(Sema &S) : SemaRef(S) {}
2769  bool ValidateCandidate(const TypoCorrection &Candidate) override {
2770  NamedDecl *ND = Candidate.getCorrectionDecl();
2771  if (const auto *VD = dyn_cast_or_null<VarDecl>(ND)) {
2772  return VD->hasGlobalStorage() &&
2773  SemaRef.isDeclInScope(ND, SemaRef.getCurLexicalContext(),
2774  SemaRef.getCurScope());
2775  }
2776  return false;
2777  }
2778 
2779  std::unique_ptr<CorrectionCandidateCallback> clone() override {
2780  return std::make_unique<VarDeclFilterCCC>(*this);
2781  }
2782 
2783 };
2784 
2785 class VarOrFuncDeclFilterCCC final : public CorrectionCandidateCallback {
2786 private:
2787  Sema &SemaRef;
2788 
2789 public:
2790  explicit VarOrFuncDeclFilterCCC(Sema &S) : SemaRef(S) {}
2791  bool ValidateCandidate(const TypoCorrection &Candidate) override {
2792  NamedDecl *ND = Candidate.getCorrectionDecl();
2793  if (ND && ((isa<VarDecl>(ND) && ND->getKind() == Decl::Var) ||
2794  isa<FunctionDecl>(ND))) {
2795  return SemaRef.isDeclInScope(ND, SemaRef.getCurLexicalContext(),
2796  SemaRef.getCurScope());
2797  }
2798  return false;
2799  }
2800 
2801  std::unique_ptr<CorrectionCandidateCallback> clone() override {
2802  return std::make_unique<VarOrFuncDeclFilterCCC>(*this);
2803  }
2804 };
2805 
2806 } // namespace
2807 
2808 ExprResult Sema::ActOnOpenMPIdExpression(Scope *CurScope,
2809  CXXScopeSpec &ScopeSpec,
2810  const DeclarationNameInfo &Id,
2812  LookupResult Lookup(*this, Id, LookupOrdinaryName);
2813  LookupParsedName(Lookup, CurScope, &ScopeSpec, true);
2814 
2815  if (Lookup.isAmbiguous())
2816  return ExprError();
2817 
2818  VarDecl *VD;
2819  if (!Lookup.isSingleResult()) {
2820  VarDeclFilterCCC CCC(*this);
2821  if (TypoCorrection Corrected =
2822  CorrectTypo(Id, LookupOrdinaryName, CurScope, nullptr, CCC,
2823  CTK_ErrorRecovery)) {
2824  diagnoseTypo(Corrected,
2825  PDiag(Lookup.empty()
2826  ? diag::err_undeclared_var_use_suggest
2827  : diag::err_omp_expected_var_arg_suggest)
2828  << Id.getName());
2829  VD = Corrected.getCorrectionDeclAs<VarDecl>();
2830  } else {
2831  Diag(Id.getLoc(), Lookup.empty() ? diag::err_undeclared_var_use
2832  : diag::err_omp_expected_var_arg)
2833  << Id.getName();
2834  return ExprError();
2835  }
2836  } else if (!(VD = Lookup.getAsSingle<VarDecl>())) {
2837  Diag(Id.getLoc(), diag::err_omp_expected_var_arg) << Id.getName();
2838  Diag(Lookup.getFoundDecl()->getLocation(), diag::note_declared_at);
2839  return ExprError();
2840  }
2841  Lookup.suppressDiagnostics();
2842 
2843  // OpenMP [2.9.2, Syntax, C/C++]
2844  // Variables must be file-scope, namespace-scope, or static block-scope.
2845  if (Kind == OMPD_threadprivate && !VD->hasGlobalStorage()) {
2846  Diag(Id.getLoc(), diag::err_omp_global_var_arg)
2847  << getOpenMPDirectiveName(Kind) << !VD->isStaticLocal();
2848  bool IsDecl =
2850  Diag(VD->getLocation(),
2851  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
2852  << VD;
2853  return ExprError();
2854  }
2855 
2856  VarDecl *CanonicalVD = VD->getCanonicalDecl();
2857  NamedDecl *ND = CanonicalVD;
2858  // OpenMP [2.9.2, Restrictions, C/C++, p.2]
2859  // A threadprivate directive for file-scope variables must appear outside
2860  // any definition or declaration.
2861  if (CanonicalVD->getDeclContext()->isTranslationUnit() &&
2862  !getCurLexicalContext()->isTranslationUnit()) {
2863  Diag(Id.getLoc(), diag::err_omp_var_scope)
2864  << getOpenMPDirectiveName(Kind) << VD;
2865  bool IsDecl =
2867  Diag(VD->getLocation(),
2868  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
2869  << VD;
2870  return ExprError();
2871  }
2872  // OpenMP [2.9.2, Restrictions, C/C++, p.3]
2873  // A threadprivate directive for static class member variables must appear
2874  // in the class definition, in the same scope in which the member
2875  // variables are declared.
2876  if (CanonicalVD->isStaticDataMember() &&
2877  !CanonicalVD->getDeclContext()->Equals(getCurLexicalContext())) {
2878  Diag(Id.getLoc(), diag::err_omp_var_scope)
2879  << getOpenMPDirectiveName(Kind) << VD;
2880  bool IsDecl =
2882  Diag(VD->getLocation(),
2883  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
2884  << VD;
2885  return ExprError();
2886  }
2887  // OpenMP [2.9.2, Restrictions, C/C++, p.4]
2888  // A threadprivate directive for namespace-scope variables must appear
2889  // outside any definition or declaration other than the namespace
2890  // definition itself.
2891  if (CanonicalVD->getDeclContext()->isNamespace() &&
2892  (!getCurLexicalContext()->isFileContext() ||
2893  !getCurLexicalContext()->Encloses(CanonicalVD->getDeclContext()))) {
2894  Diag(Id.getLoc(), diag::err_omp_var_scope)
2895  << getOpenMPDirectiveName(Kind) << VD;
2896  bool IsDecl =
2898  Diag(VD->getLocation(),
2899  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
2900  << VD;
2901  return ExprError();
2902  }
2903  // OpenMP [2.9.2, Restrictions, C/C++, p.6]
2904  // A threadprivate directive for static block-scope variables must appear
2905  // in the scope of the variable and not in a nested scope.
2906  if (CanonicalVD->isLocalVarDecl() && CurScope &&
2907  !isDeclInScope(ND, getCurLexicalContext(), CurScope)) {
2908  Diag(Id.getLoc(), diag::err_omp_var_scope)
2909  << getOpenMPDirectiveName(Kind) << VD;
2910  bool IsDecl =
2912  Diag(VD->getLocation(),
2913  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
2914  << VD;
2915  return ExprError();
2916  }
2917 
2918  // OpenMP [2.9.2, Restrictions, C/C++, p.2-6]
2919  // A threadprivate directive must lexically precede all references to any
2920  // of the variables in its list.
2921  if (Kind == OMPD_threadprivate && VD->isUsed() &&
2922  !DSAStack->isThreadPrivate(VD)) {
2923  Diag(Id.getLoc(), diag::err_omp_var_used)
2924  << getOpenMPDirectiveName(Kind) << VD;
2925  return ExprError();
2926  }
2927 
2928  QualType ExprType = VD->getType().getNonReferenceType();
2930  SourceLocation(), VD,
2931  /*RefersToEnclosingVariableOrCapture=*/false,
2932  Id.getLoc(), ExprType, VK_LValue);
2933 }
2934 
2937  ArrayRef<Expr *> VarList) {
2938  if (OMPThreadPrivateDecl *D = CheckOMPThreadPrivateDecl(Loc, VarList)) {
2939  CurContext->addDecl(D);
2941  }
2942  return nullptr;
2943 }
2944 
2945 namespace {
2946 class LocalVarRefChecker final
2947  : public ConstStmtVisitor<LocalVarRefChecker, bool> {
2948  Sema &SemaRef;
2949 
2950 public:
2951  bool VisitDeclRefExpr(const DeclRefExpr *E) {
2952  if (const auto *VD = dyn_cast<VarDecl>(E->getDecl())) {
2953  if (VD->hasLocalStorage()) {
2954  SemaRef.Diag(E->getBeginLoc(),
2955  diag::err_omp_local_var_in_threadprivate_init)
2956  << E->getSourceRange();
2957  SemaRef.Diag(VD->getLocation(), diag::note_defined_here)
2958  << VD << VD->getSourceRange();
2959  return true;
2960  }
2961  }
2962  return false;
2963  }
2964  bool VisitStmt(const Stmt *S) {
2965  for (const Stmt *Child : S->children()) {
2966  if (Child && Visit(Child))
2967  return true;
2968  }
2969  return false;
2970  }
2971  explicit LocalVarRefChecker(Sema &SemaRef) : SemaRef(SemaRef) {}
2972 };
2973 } // namespace
2974 
2978  for (Expr *RefExpr : VarList) {
2979  auto *DE = cast<DeclRefExpr>(RefExpr);
2980  auto *VD = cast<VarDecl>(DE->getDecl());
2981  SourceLocation ILoc = DE->getExprLoc();
2982 
2983  // Mark variable as used.
2984  VD->setReferenced();
2985  VD->markUsed(Context);
2986 
2987  QualType QType = VD->getType();
2988  if (QType->isDependentType() || QType->isInstantiationDependentType()) {
2989  // It will be analyzed later.
2990  Vars.push_back(DE);
2991  continue;
2992  }
2993 
2994  // OpenMP [2.9.2, Restrictions, C/C++, p.10]
2995  // A threadprivate variable must not have an incomplete type.
2996  if (RequireCompleteType(ILoc, VD->getType(),
2997  diag::err_omp_threadprivate_incomplete_type)) {
2998  continue;
2999  }
3000 
3001  // OpenMP [2.9.2, Restrictions, C/C++, p.10]
3002  // A threadprivate variable must not have a reference type.
3003  if (VD->getType()->isReferenceType()) {
3004  Diag(ILoc, diag::err_omp_ref_type_arg)
3005  << getOpenMPDirectiveName(OMPD_threadprivate) << VD->getType();
3006  bool IsDecl =
3007  VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
3008  Diag(VD->getLocation(),
3009  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
3010  << VD;
3011  continue;
3012  }
3013 
3014  // Check if this is a TLS variable. If TLS is not being supported, produce
3015  // the corresponding diagnostic.
3016  if ((VD->getTLSKind() != VarDecl::TLS_None &&
3017  !(VD->hasAttr<OMPThreadPrivateDeclAttr>() &&
3018  getLangOpts().OpenMPUseTLS &&
3019  getASTContext().getTargetInfo().isTLSSupported())) ||
3020  (VD->getStorageClass() == SC_Register && VD->hasAttr<AsmLabelAttr>() &&
3021  !VD->isLocalVarDecl())) {
3022  Diag(ILoc, diag::err_omp_var_thread_local)
3023  << VD << ((VD->getTLSKind() != VarDecl::TLS_None) ? 0 : 1);
3024  bool IsDecl =
3025  VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
3026  Diag(VD->getLocation(),
3027  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
3028  << VD;
3029  continue;
3030  }
3031 
3032  // Check if initial value of threadprivate variable reference variable with
3033  // local storage (it is not supported by runtime).
3034  if (const Expr *Init = VD->getAnyInitializer()) {
3035  LocalVarRefChecker Checker(*this);
3036  if (Checker.Visit(Init))
3037  continue;
3038  }
3039 
3040  Vars.push_back(RefExpr);
3041  DSAStack->addDSA(VD, DE, OMPC_threadprivate);
3042  VD->addAttr(OMPThreadPrivateDeclAttr::CreateImplicit(
3043  Context, SourceRange(Loc, Loc)));
3045  ML->DeclarationMarkedOpenMPThreadPrivate(VD);
3046  }
3047  OMPThreadPrivateDecl *D = nullptr;
3048  if (!Vars.empty()) {
3050  Vars);
3051  D->setAccess(AS_public);
3052  }
3053  return D;
3054 }
3055 
3056 static OMPAllocateDeclAttr::AllocatorTypeTy
3057 getAllocatorKind(Sema &S, DSAStackTy *Stack, Expr *Allocator) {
3058  if (!Allocator)
3059  return OMPAllocateDeclAttr::OMPNullMemAlloc;
3060  if (Allocator->isTypeDependent() || Allocator->isValueDependent() ||
3061  Allocator->isInstantiationDependent() ||
3062  Allocator->containsUnexpandedParameterPack())
3063  return OMPAllocateDeclAttr::OMPUserDefinedMemAlloc;
3064  auto AllocatorKindRes = OMPAllocateDeclAttr::OMPUserDefinedMemAlloc;
3065  const Expr *AE = Allocator->IgnoreParenImpCasts();
3066  for (int I = 0; I < OMPAllocateDeclAttr::OMPUserDefinedMemAlloc; ++I) {
3067  auto AllocatorKind = static_cast<OMPAllocateDeclAttr::AllocatorTypeTy>(I);
3068  const Expr *DefAllocator = Stack->getAllocator(AllocatorKind);
3069  llvm::FoldingSetNodeID AEId, DAEId;
3070  AE->Profile(AEId, S.getASTContext(), /*Canonical=*/true);
3071  DefAllocator->Profile(DAEId, S.getASTContext(), /*Canonical=*/true);
3072  if (AEId == DAEId) {
3073  AllocatorKindRes = AllocatorKind;
3074  break;
3075  }
3076  }
3077  return AllocatorKindRes;
3078 }
3079 
3081  Sema &S, DSAStackTy *Stack, Expr *RefExpr, VarDecl *VD,
3082  OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind, Expr *Allocator) {
3083  if (!VD->hasAttr<OMPAllocateDeclAttr>())
3084  return false;
3085  const auto *A = VD->getAttr<OMPAllocateDeclAttr>();
3086  Expr *PrevAllocator = A->getAllocator();
3087  OMPAllocateDeclAttr::AllocatorTypeTy PrevAllocatorKind =
3088  getAllocatorKind(S, Stack, PrevAllocator);
3089  bool AllocatorsMatch = AllocatorKind == PrevAllocatorKind;
3090  if (AllocatorsMatch &&
3091  AllocatorKind == OMPAllocateDeclAttr::OMPUserDefinedMemAlloc &&
3092  Allocator && PrevAllocator) {
3093  const Expr *AE = Allocator->IgnoreParenImpCasts();
3094  const Expr *PAE = PrevAllocator->IgnoreParenImpCasts();
3095  llvm::FoldingSetNodeID AEId, PAEId;
3096  AE->Profile(AEId, S.Context, /*Canonical=*/true);
3097  PAE->Profile(PAEId, S.Context, /*Canonical=*/true);
3098  AllocatorsMatch = AEId == PAEId;
3099  }
3100  if (!AllocatorsMatch) {
3101  SmallString<256> AllocatorBuffer;
3102  llvm::raw_svector_ostream AllocatorStream(AllocatorBuffer);
3103  if (Allocator)
3104  Allocator->printPretty(AllocatorStream, nullptr, S.getPrintingPolicy());
3105  SmallString<256> PrevAllocatorBuffer;
3106  llvm::raw_svector_ostream PrevAllocatorStream(PrevAllocatorBuffer);
3107  if (PrevAllocator)
3108  PrevAllocator->printPretty(PrevAllocatorStream, nullptr,
3109  S.getPrintingPolicy());
3110 
3111  SourceLocation AllocatorLoc =
3112  Allocator ? Allocator->getExprLoc() : RefExpr->getExprLoc();
3113  SourceRange AllocatorRange =
3114  Allocator ? Allocator->getSourceRange() : RefExpr->getSourceRange();
3115  SourceLocation PrevAllocatorLoc =
3116  PrevAllocator ? PrevAllocator->getExprLoc() : A->getLocation();
3117  SourceRange PrevAllocatorRange =
3118  PrevAllocator ? PrevAllocator->getSourceRange() : A->getRange();
3119  S.Diag(AllocatorLoc, diag::warn_omp_used_different_allocator)
3120  << (Allocator ? 1 : 0) << AllocatorStream.str()
3121  << (PrevAllocator ? 1 : 0) << PrevAllocatorStream.str()
3122  << AllocatorRange;
3123  S.Diag(PrevAllocatorLoc, diag::note_omp_previous_allocator)
3124  << PrevAllocatorRange;
3125  return true;
3126  }
3127  return false;
3128 }
3129 
3130 static void
3132  OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind,
3133  Expr *Allocator, Expr *Alignment, SourceRange SR) {
3134  if (VD->hasAttr<OMPAllocateDeclAttr>())
3135  return;
3136  if (Alignment &&
3137  (Alignment->isTypeDependent() || Alignment->isValueDependent() ||
3138  Alignment->isInstantiationDependent() ||
3139  Alignment->containsUnexpandedParameterPack()))
3140  // Apply later when we have a usable value.
3141  return;
3142  if (Allocator &&
3143  (Allocator->isTypeDependent() || Allocator->isValueDependent() ||
3144  Allocator->isInstantiationDependent() ||
3145  Allocator->containsUnexpandedParameterPack()))
3146  return;
3147  auto *A = OMPAllocateDeclAttr::CreateImplicit(S.Context, AllocatorKind,
3148  Allocator, Alignment, SR);
3149  VD->addAttr(A);
3151  ML->DeclarationMarkedOpenMPAllocate(VD, A);
3152 }
3153 
3155  SourceLocation Loc, ArrayRef<Expr *> VarList,
3156  ArrayRef<OMPClause *> Clauses, DeclContext *Owner) {
3157  assert(Clauses.size() <= 2 && "Expected at most two clauses.");
3158  Expr *Alignment = nullptr;
3159  Expr *Allocator = nullptr;
3160  if (Clauses.empty()) {
3161  // OpenMP 5.0, 2.11.3 allocate Directive, Restrictions.
3162  // allocate directives that appear in a target region must specify an
3163  // allocator clause unless a requires directive with the dynamic_allocators
3164  // clause is present in the same compilation unit.
3165  if (LangOpts.OpenMPIsDevice &&
3166  !DSAStack->hasRequiresDeclWithClause<OMPDynamicAllocatorsClause>())
3167  targetDiag(Loc, diag::err_expected_allocator_clause);
3168  } else {
3169  for (const OMPClause *C : Clauses)
3170  if (const auto *AC = dyn_cast<OMPAllocatorClause>(C))
3171  Allocator = AC->getAllocator();
3172  else if (const auto *AC = dyn_cast<OMPAlignClause>(C))
3173  Alignment = AC->getAlignment();
3174  else
3175  llvm_unreachable("Unexpected clause on allocate directive");
3176  }
3177  OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind =
3178  getAllocatorKind(*this, DSAStack, Allocator);
3180  for (Expr *RefExpr : VarList) {
3181  auto *DE = cast<DeclRefExpr>(RefExpr);
3182  auto *VD = cast<VarDecl>(DE->getDecl());
3183 
3184  // Check if this is a TLS variable or global register.
3185  if (VD->getTLSKind() != VarDecl::TLS_None ||
3186  VD->hasAttr<OMPThreadPrivateDeclAttr>() ||
3187  (VD->getStorageClass() == SC_Register && VD->hasAttr<AsmLabelAttr>() &&
3188  !VD->isLocalVarDecl()))
3189  continue;
3190 
3191  // If the used several times in the allocate directive, the same allocator
3192  // must be used.
3193  if (checkPreviousOMPAllocateAttribute(*this, DSAStack, RefExpr, VD,
3194  AllocatorKind, Allocator))
3195  continue;
3196 
3197  // OpenMP, 2.11.3 allocate Directive, Restrictions, C / C++
3198  // If a list item has a static storage type, the allocator expression in the
3199  // allocator clause must be a constant expression that evaluates to one of
3200  // the predefined memory allocator values.
3201  if (Allocator && VD->hasGlobalStorage()) {
3202  if (AllocatorKind == OMPAllocateDeclAttr::OMPUserDefinedMemAlloc) {
3203  Diag(Allocator->getExprLoc(),
3204  diag::err_omp_expected_predefined_allocator)
3205  << Allocator->getSourceRange();
3206  bool IsDecl = VD->isThisDeclarationADefinition(Context) ==
3208  Diag(VD->getLocation(),
3209  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
3210  << VD;
3211  continue;
3212  }
3213  }
3214 
3215  Vars.push_back(RefExpr);
3216  applyOMPAllocateAttribute(*this, VD, AllocatorKind, Allocator, Alignment,
3217  DE->getSourceRange());
3218  }
3219  if (Vars.empty())
3220  return nullptr;
3221  if (!Owner)
3222  Owner = getCurLexicalContext();
3223  auto *D = OMPAllocateDecl::Create(Context, Owner, Loc, Vars, Clauses);
3224  D->setAccess(AS_public);
3225  Owner->addDecl(D);
3227 }
3228 
3231  ArrayRef<OMPClause *> ClauseList) {
3232  OMPRequiresDecl *D = nullptr;
3233  if (!CurContext->isFileContext()) {
3234  Diag(Loc, diag::err_omp_invalid_scope) << "requires";
3235  } else {
3236  D = CheckOMPRequiresDecl(Loc, ClauseList);
3237  if (D) {
3238  CurContext->addDecl(D);
3239  DSAStack->addRequiresDecl(D);
3240  }
3241  }
3243 }
3244 
3246  OpenMPDirectiveKind DKind,
3247  ArrayRef<std::string> Assumptions,
3248  bool SkippedClauses) {
3249  if (!SkippedClauses && Assumptions.empty())
3250  Diag(Loc, diag::err_omp_no_clause_for_directive)
3251  << llvm::omp::getAllAssumeClauseOptions()
3252  << llvm::omp::getOpenMPDirectiveName(DKind);
3253 
3254  auto *AA = AssumptionAttr::Create(Context, llvm::join(Assumptions, ","), Loc);
3255  if (DKind == llvm::omp::Directive::OMPD_begin_assumes) {
3256  OMPAssumeScoped.push_back(AA);
3257  return;
3258  }
3259 
3260  // Global assumes without assumption clauses are ignored.
3261  if (Assumptions.empty())
3262  return;
3263 
3264  assert(DKind == llvm::omp::Directive::OMPD_assumes &&
3265  "Unexpected omp assumption directive!");
3266  OMPAssumeGlobal.push_back(AA);
3267 
3268  // The OMPAssumeGlobal scope above will take care of new declarations but
3269  // we also want to apply the assumption to existing ones, e.g., to
3270  // declarations in included headers. To this end, we traverse all existing
3271  // declaration contexts and annotate function declarations here.
3272  SmallVector<DeclContext *, 8> DeclContexts;
3273  auto *Ctx = CurContext;
3274  while (Ctx->getLexicalParent())
3275  Ctx = Ctx->getLexicalParent();
3276  DeclContexts.push_back(Ctx);
3277  while (!DeclContexts.empty()) {
3278  DeclContext *DC = DeclContexts.pop_back_val();
3279  for (auto *SubDC : DC->decls()) {
3280  if (SubDC->isInvalidDecl())
3281  continue;
3282  if (auto *CTD = dyn_cast<ClassTemplateDecl>(SubDC)) {
3283  DeclContexts.push_back(CTD->getTemplatedDecl());
3284  for (auto *S : CTD->specializations())
3285  DeclContexts.push_back(S);
3286  continue;
3287  }
3288  if (auto *DC = dyn_cast<DeclContext>(SubDC))
3289  DeclContexts.push_back(DC);
3290  if (auto *F = dyn_cast<FunctionDecl>(SubDC)) {
3291  F->addAttr(AA);
3292  continue;
3293  }
3294  }
3295  }
3296 }
3297 
3299  assert(isInOpenMPAssumeScope() && "Not in OpenMP assumes scope!");
3300  OMPAssumeScoped.pop_back();
3301 }
3302 
3304  ArrayRef<OMPClause *> ClauseList) {
3305  /// For target specific clauses, the requires directive cannot be
3306  /// specified after the handling of any of the target regions in the
3307  /// current compilation unit.
3308  ArrayRef<SourceLocation> TargetLocations =
3309  DSAStack->getEncounteredTargetLocs();
3310  SourceLocation AtomicLoc = DSAStack->getAtomicDirectiveLoc();
3311  if (!TargetLocations.empty() || !AtomicLoc.isInvalid()) {
3312  for (const OMPClause *CNew : ClauseList) {
3313  // Check if any of the requires clauses affect target regions.
3314  if (isa<OMPUnifiedSharedMemoryClause>(CNew) ||
3315  isa<OMPUnifiedAddressClause>(CNew) ||
3316  isa<OMPReverseOffloadClause>(CNew) ||
3317  isa<OMPDynamicAllocatorsClause>(CNew)) {
3318  Diag(Loc, diag::err_omp_directive_before_requires)
3319  << "target" << getOpenMPClauseName(CNew->getClauseKind());
3320  for (SourceLocation TargetLoc : TargetLocations) {
3321  Diag(TargetLoc, diag::note_omp_requires_encountered_directive)
3322  << "target";
3323  }
3324  } else if (!AtomicLoc.isInvalid() &&
3325  isa<OMPAtomicDefaultMemOrderClause>(CNew)) {
3326  Diag(Loc, diag::err_omp_directive_before_requires)
3327  << "atomic" << getOpenMPClauseName(CNew->getClauseKind());
3328  Diag(AtomicLoc, diag::note_omp_requires_encountered_directive)
3329  << "atomic";
3330  }
3331  }
3332  }
3333 
3334  if (!DSAStack->hasDuplicateRequiresClause(ClauseList))
3336  ClauseList);
3337  return nullptr;
3338 }
3339 
3340 static void reportOriginalDsa(Sema &SemaRef, const DSAStackTy *Stack,
3341  const ValueDecl *D,
3342  const DSAStackTy::DSAVarData &DVar,
3343  bool IsLoopIterVar) {
3344  if (DVar.RefExpr) {
3345  SemaRef.Diag(DVar.RefExpr->getExprLoc(), diag::note_omp_explicit_dsa)
3346  << getOpenMPClauseName(DVar.CKind);
3347  return;
3348  }
3349  enum {
3350  PDSA_StaticMemberShared,
3351  PDSA_StaticLocalVarShared,
3352  PDSA_LoopIterVarPrivate,
3353  PDSA_LoopIterVarLinear,
3354  PDSA_LoopIterVarLastprivate,
3355  PDSA_ConstVarShared,
3356  PDSA_GlobalVarShared,
3357  PDSA_TaskVarFirstprivate,
3358  PDSA_LocalVarPrivate,
3359  PDSA_Implicit
3360  } Reason = PDSA_Implicit;
3361  bool ReportHint = false;
3362  auto ReportLoc = D->getLocation();
3363  auto *VD = dyn_cast<VarDecl>(D);
3364  if (IsLoopIterVar) {
3365  if (DVar.CKind == OMPC_private)
3366  Reason = PDSA_LoopIterVarPrivate;
3367  else if (DVar.CKind == OMPC_lastprivate)
3368  Reason = PDSA_LoopIterVarLastprivate;
3369  else
3370  Reason = PDSA_LoopIterVarLinear;
3371  } else if (isOpenMPTaskingDirective(DVar.DKind) &&
3372  DVar.CKind == OMPC_firstprivate) {
3373  Reason = PDSA_TaskVarFirstprivate;
3374  ReportLoc = DVar.ImplicitDSALoc;
3375  } else if (VD && VD->isStaticLocal())
3376  Reason = PDSA_StaticLocalVarShared;
3377  else if (VD && VD->isStaticDataMember())
3378  Reason = PDSA_StaticMemberShared;
3379  else if (VD && VD->isFileVarDecl())
3380  Reason = PDSA_GlobalVarShared;
3381  else if (D->getType().isConstant(SemaRef.getASTContext()))
3382  Reason = PDSA_ConstVarShared;
3383  else if (VD && VD->isLocalVarDecl() && DVar.CKind == OMPC_private) {
3384  ReportHint = true;
3385  Reason = PDSA_LocalVarPrivate;
3386  }
3387  if (Reason != PDSA_Implicit) {
3388  SemaRef.Diag(ReportLoc, diag::note_omp_predetermined_dsa)
3389  << Reason << ReportHint
3390  << getOpenMPDirectiveName(Stack->getCurrentDirective());
3391  } else if (DVar.ImplicitDSALoc.isValid()) {
3392  SemaRef.Diag(DVar.ImplicitDSALoc, diag::note_omp_implicit_dsa)
3393  << getOpenMPClauseName(DVar.CKind);
3394  }
3395 }
3396 
3397 static OpenMPMapClauseKind
3399  bool IsAggregateOrDeclareTarget) {
3401  switch (M) {
3402  case OMPC_DEFAULTMAP_MODIFIER_alloc:
3403  Kind = OMPC_MAP_alloc;
3404  break;
3405  case OMPC_DEFAULTMAP_MODIFIER_to:
3406  Kind = OMPC_MAP_to;
3407  break;
3408  case OMPC_DEFAULTMAP_MODIFIER_from:
3409  Kind = OMPC_MAP_from;
3410  break;
3411  case OMPC_DEFAULTMAP_MODIFIER_tofrom:
3412  Kind = OMPC_MAP_tofrom;
3413  break;
3414  case OMPC_DEFAULTMAP_MODIFIER_present:
3415  // OpenMP 5.1 [2.21.7.3] defaultmap clause, Description]
3416  // If implicit-behavior is present, each variable referenced in the
3417  // construct in the category specified by variable-category is treated as if
3418  // it had been listed in a map clause with the map-type of alloc and
3419  // map-type-modifier of present.
3420  Kind = OMPC_MAP_alloc;
3421  break;
3422  case OMPC_DEFAULTMAP_MODIFIER_firstprivate:
3424  llvm_unreachable("Unexpected defaultmap implicit behavior");
3425  case OMPC_DEFAULTMAP_MODIFIER_none:
3426  case OMPC_DEFAULTMAP_MODIFIER_default:
3428  // IsAggregateOrDeclareTarget could be true if:
3429  // 1. the implicit behavior for aggregate is tofrom
3430  // 2. it's a declare target link
3431  if (IsAggregateOrDeclareTarget) {
3432  Kind = OMPC_MAP_tofrom;
3433  break;
3434  }
3435  llvm_unreachable("Unexpected defaultmap implicit behavior");
3436  }
3437  assert(Kind != OMPC_MAP_unknown && "Expect map kind to be known");
3438  return Kind;
3439 }
3440 
3441 namespace {
3442 class DSAAttrChecker final : public StmtVisitor<DSAAttrChecker, void> {
3443  DSAStackTy *Stack;
3444  Sema &SemaRef;
3445  bool ErrorFound = false;
3446  bool TryCaptureCXXThisMembers = false;
3447  CapturedStmt *CS = nullptr;
3448  const static unsigned DefaultmapKindNum = OMPC_DEFAULTMAP_pointer + 1;
3449  llvm::SmallVector<Expr *, 4> ImplicitFirstprivate;
3450  llvm::SmallVector<Expr *, 4> ImplicitMap[DefaultmapKindNum][OMPC_MAP_delete];
3452  ImplicitMapModifier[DefaultmapKindNum];
3453  Sema::VarsWithInheritedDSAType VarsWithInheritedDSA;
3454  llvm::SmallDenseSet<const ValueDecl *, 4> ImplicitDeclarations;
3455 
3456  void VisitSubCaptures(OMPExecutableDirective *S) {
3457  // Check implicitly captured variables.
3458  if (!S->hasAssociatedStmt() || !S->getAssociatedStmt())
3459  return;
3460  if (S->getDirectiveKind() == OMPD_atomic ||
3461  S->getDirectiveKind() == OMPD_critical ||
3462  S->getDirectiveKind() == OMPD_section ||
3463  S->getDirectiveKind() == OMPD_master ||
3464  S->getDirectiveKind() == OMPD_masked ||
3465  isOpenMPLoopTransformationDirective(S->getDirectiveKind())) {
3466  Visit(S->getAssociatedStmt());
3467  return;
3468  }
3469  visitSubCaptures(S->getInnermostCapturedStmt());
3470  // Try to capture inner this->member references to generate correct mappings
3471  // and diagnostics.
3472  if (TryCaptureCXXThisMembers ||
3473  (isOpenMPTargetExecutionDirective(Stack->getCurrentDirective()) &&
3474  llvm::any_of(S->getInnermostCapturedStmt()->captures(),
3475  [](const CapturedStmt::Capture &C) {
3476  return C.capturesThis();
3477  }))) {
3478  bool SavedTryCaptureCXXThisMembers = TryCaptureCXXThisMembers;
3479  TryCaptureCXXThisMembers = true;
3480  Visit(S->getInnermostCapturedStmt()->getCapturedStmt());
3481  TryCaptureCXXThisMembers = SavedTryCaptureCXXThisMembers;
3482  }
3483  // In tasks firstprivates are not captured anymore, need to analyze them
3484  // explicitly.
3485  if (isOpenMPTaskingDirective(S->getDirectiveKind()) &&
3486  !isOpenMPTaskLoopDirective(S->getDirectiveKind())) {
3487  for (OMPClause *C : S->clauses())
3488  if (auto *FC = dyn_cast<OMPFirstprivateClause>(C)) {
3489  for (Expr *Ref : FC->varlists())
3490  Visit(Ref);
3491  }
3492  }
3493  }
3494 
3495 public:
3496  void VisitDeclRefExpr(DeclRefExpr *E) {
3497  if (TryCaptureCXXThisMembers || E->isTypeDependent() ||
3500  return;
3501  if (auto *VD = dyn_cast<VarDecl>(E->getDecl())) {
3502  // Check the datasharing rules for the expressions in the clauses.
3503  if (!CS || (isa<OMPCapturedExprDecl>(VD) && !CS->capturesVariable(VD) &&
3504  !Stack->getTopDSA(VD, /*FromParent=*/false).RefExpr)) {
3505  if (auto *CED = dyn_cast<OMPCapturedExprDecl>(VD))
3506  if (!CED->hasAttr<OMPCaptureNoInitAttr>()) {
3507  Visit(CED->getInit());
3508  return;
3509  }
3510  } else if (VD->isImplicit() || isa<OMPCapturedExprDecl>(VD))
3511  // Do not analyze internal variables and do not enclose them into
3512  // implicit clauses.
3513  return;
3514  VD = VD->getCanonicalDecl();
3515  // Skip internally declared variables.
3516  if (VD->hasLocalStorage() && CS && !CS->capturesVariable(VD) &&
3517  !Stack->isImplicitTaskFirstprivate(VD))
3518  return;
3519  // Skip allocators in uses_allocators clauses.
3520  if (Stack->isUsesAllocatorsDecl(VD).hasValue())
3521  return;
3522 
3523  DSAStackTy::DSAVarData DVar = Stack->getTopDSA(VD, /*FromParent=*/false);
3524  // Check if the variable has explicit DSA set and stop analysis if it so.
3525  if (DVar.RefExpr || !ImplicitDeclarations.insert(VD).second)
3526  return;
3527 
3528  // Skip internally declared static variables.
3530  OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
3531  if (VD->hasGlobalStorage() && CS && !CS->capturesVariable(VD) &&
3532  (Stack->hasRequiresDeclWithClause<OMPUnifiedSharedMemoryClause>() ||
3533  !Res || *Res != OMPDeclareTargetDeclAttr::MT_Link) &&
3534  !Stack->isImplicitTaskFirstprivate(VD))
3535  return;
3536 
3537  SourceLocation ELoc = E->getExprLoc();
3538  OpenMPDirectiveKind DKind = Stack->getCurrentDirective();
3539  // The default(none) clause requires that each variable that is referenced
3540  // in the construct, and does not have a predetermined data-sharing
3541  // attribute, must have its data-sharing attribute explicitly determined
3542  // by being listed in a data-sharing attribute clause.
3543  if (DVar.CKind == OMPC_unknown &&
3544  (Stack->getDefaultDSA() == DSA_none ||
3545  Stack->getDefaultDSA() == DSA_firstprivate) &&
3546  isImplicitOrExplicitTaskingRegion(DKind) &&
3547  VarsWithInheritedDSA.count(VD) == 0) {
3548  bool InheritedDSA = Stack->getDefaultDSA() == DSA_none;
3549  if (!InheritedDSA && Stack->getDefaultDSA() == DSA_firstprivate) {
3550  DSAStackTy::DSAVarData DVar =
3551  Stack->getImplicitDSA(VD, /*FromParent=*/false);
3552  InheritedDSA = DVar.CKind == OMPC_unknown;
3553  }
3554  if (InheritedDSA)
3555  VarsWithInheritedDSA[VD] = E;
3556  return;
3557  }
3558 
3559  // OpenMP 5.0 [2.19.7.2, defaultmap clause, Description]
3560  // If implicit-behavior is none, each variable referenced in the
3561  // construct that does not have a predetermined data-sharing attribute
3562  // and does not appear in a to or link clause on a declare target
3563  // directive must be listed in a data-mapping attribute clause, a
3564  // data-haring attribute clause (including a data-sharing attribute
3565  // clause on a combined construct where target. is one of the
3566  // constituent constructs), or an is_device_ptr clause.
3567  OpenMPDefaultmapClauseKind ClauseKind =
3569  if (SemaRef.getLangOpts().OpenMP >= 50) {
3570  bool IsModifierNone = Stack->getDefaultmapModifier(ClauseKind) ==
3571  OMPC_DEFAULTMAP_MODIFIER_none;
3572  if (DVar.CKind == OMPC_unknown && IsModifierNone &&
3573  VarsWithInheritedDSA.count(VD) == 0 && !Res) {
3574  // Only check for data-mapping attribute and is_device_ptr here
3575  // since we have already make sure that the declaration does not
3576  // have a data-sharing attribute above
3577  if (!Stack->checkMappableExprComponentListsForDecl(
3578  VD, /*CurrentRegionOnly=*/true,
3580  MapExprComponents,
3581  OpenMPClauseKind) {
3582  auto MI = MapExprComponents.rbegin();
3583  auto ME = MapExprComponents.rend();
3584  return MI != ME && MI->getAssociatedDeclaration() == VD;
3585  })) {
3586  VarsWithInheritedDSA[VD] = E;
3587  return;
3588  }
3589  }
3590  }
3591  if (SemaRef.getLangOpts().OpenMP > 50) {
3592  bool IsModifierPresent = Stack->getDefaultmapModifier(ClauseKind) ==
3593  OMPC_DEFAULTMAP_MODIFIER_present;
3594  if (IsModifierPresent) {
3595  if (llvm::find(ImplicitMapModifier[ClauseKind],
3596  OMPC_MAP_MODIFIER_present) ==
3597  std::end(ImplicitMapModifier[ClauseKind])) {
3598  ImplicitMapModifier[ClauseKind].push_back(
3599  OMPC_MAP_MODIFIER_present);
3600  }
3601  }
3602  }
3603 
3604  if (isOpenMPTargetExecutionDirective(DKind) &&
3605  !Stack->isLoopControlVariable(VD).first) {
3606  if (!Stack->checkMappableExprComponentListsForDecl(
3607  VD, /*CurrentRegionOnly=*/true,
3609  StackComponents,
3610  OpenMPClauseKind) {
3611  if (SemaRef.LangOpts.OpenMP >= 50)
3612  return !StackComponents.empty();
3613  // Variable is used if it has been marked as an array, array
3614  // section, array shaping or the variable iself.
3615  return StackComponents.size() == 1 ||
3616  std::all_of(
3617  std::next(StackComponents.rbegin()),
3618  StackComponents.rend(),
3619  [](const OMPClauseMappableExprCommon::
3620  MappableComponent &MC) {
3621  return MC.getAssociatedDeclaration() ==
3622  nullptr &&
3623  (isa<OMPArraySectionExpr>(
3624  MC.getAssociatedExpression()) ||
3625  isa<OMPArrayShapingExpr>(
3626  MC.getAssociatedExpression()) ||
3627  isa<ArraySubscriptExpr>(
3628  MC.getAssociatedExpression()));
3629  });
3630  })) {
3631  bool IsFirstprivate = false;
3632  // By default lambdas are captured as firstprivates.
3633  if (const auto *RD =
3635  IsFirstprivate = RD->isLambda();
3636  IsFirstprivate =
3637  IsFirstprivate || (Stack->mustBeFirstprivate(ClauseKind) && !Res);
3638  if (IsFirstprivate) {
3639  ImplicitFirstprivate.emplace_back(E);
3640  } else {
3642  Stack->getDefaultmapModifier(ClauseKind);
3644  M, ClauseKind == OMPC_DEFAULTMAP_aggregate || Res);
3645  ImplicitMap[ClauseKind][Kind].emplace_back(E);
3646  }
3647  return;
3648  }
3649  }
3650 
3651  // OpenMP [2.9.3.6, Restrictions, p.2]
3652  // A list item that appears in a reduction clause of the innermost
3653  // enclosing worksharing or parallel construct may not be accessed in an
3654  // explicit task.
3655  DVar = Stack->hasInnermostDSA(
3656  VD,
3657  [](OpenMPClauseKind C, bool AppliedToPointee) {
3658  return C == OMPC_reduction && !AppliedToPointee;
3659  },
3660  [](OpenMPDirectiveKind K) {
3661  return isOpenMPParallelDirective(K) ||
3663  },
3664  /*FromParent=*/true);
3665  if (isOpenMPTaskingDirective(DKind) && DVar.CKind == OMPC_reduction) {
3666  ErrorFound = true;
3667  SemaRef.Diag(ELoc, diag::err_omp_reduction_in_task);
3668  reportOriginalDsa(SemaRef, Stack, VD, DVar);
3669  return;
3670  }
3671 
3672  // Define implicit data-sharing attributes for task.
3673  DVar = Stack->getImplicitDSA(VD, /*FromParent=*/false);
3674  if (((isOpenMPTaskingDirective(DKind) && DVar.CKind != OMPC_shared) ||
3675  (Stack->getDefaultDSA() == DSA_firstprivate &&
3676  DVar.CKind == OMPC_firstprivate && !DVar.RefExpr)) &&
3677  !Stack->isLoopControlVariable(VD).first) {
3678  ImplicitFirstprivate.push_back(E);
3679  return;
3680  }
3681 
3682  // Store implicitly used globals with declare target link for parent
3683  // target.
3684  if (!isOpenMPTargetExecutionDirective(DKind) && Res &&
3685  *Res == OMPDeclareTargetDeclAttr::MT_Link) {
3686  Stack->addToParentTargetRegionLinkGlobals(E);
3687  return;
3688  }
3689  }
3690  }
3691  void VisitMemberExpr(MemberExpr *E) {
3692  if (E->isTypeDependent() || E->isValueDependent() ||
3694  return;
3695  auto *FD = dyn_cast<FieldDecl>(E->getMemberDecl());
3696  OpenMPDirectiveKind DKind = Stack->getCurrentDirective();
3697  if (auto *TE = dyn_cast<CXXThisExpr>(E->getBase()->IgnoreParenCasts())) {
3698  if (!FD)
3699  return;
3700  DSAStackTy::DSAVarData DVar = Stack->getTopDSA(FD, /*FromParent=*/false);
3701  // Check if the variable has explicit DSA set and stop analysis if it
3702  // so.
3703  if (DVar.RefExpr || !ImplicitDeclarations.insert(FD).second)
3704  return;
3705 
3706  if (isOpenMPTargetExecutionDirective(DKind) &&
3707  !Stack->isLoopControlVariable(FD).first &&
3708  !Stack->checkMappableExprComponentListsForDecl(
3709  FD, /*CurrentRegionOnly=*/true,
3711  StackComponents,
3712  OpenMPClauseKind) {
3713  return isa<CXXThisExpr>(
3714  cast<MemberExpr>(
3715  StackComponents.back().getAssociatedExpression())
3716  ->getBase()
3717  ->IgnoreParens());
3718  })) {
3719  // OpenMP 4.5 [2.15.5.1, map Clause, Restrictions, C/C++, p.3]
3720  // A bit-field cannot appear in a map clause.
3721  //
3722  if (FD->isBitField())
3723  return;
3724 
3725  // Check to see if the member expression is referencing a class that
3726  // has already been explicitly mapped
3727  if (Stack->isClassPreviouslyMapped(TE->getType()))
3728  return;
3729 
3731  Stack->getDefaultmapModifier(OMPC_DEFAULTMAP_aggregate);
3732  OpenMPDefaultmapClauseKind ClauseKind =
3735  Modifier, /*IsAggregateOrDeclareTarget*/ true);
3736  ImplicitMap[ClauseKind][Kind].emplace_back(E);
3737  return;
3738  }
3739 
3740  SourceLocation ELoc = E->getExprLoc();
3741  // OpenMP [2.9.3.6, Restrictions, p.2]
3742  // A list item that appears in a reduction clause of the innermost
3743  // enclosing worksharing or parallel construct may not be accessed in
3744  // an explicit task.
3745  DVar = Stack->hasInnermostDSA(
3746  FD,
3747  [](OpenMPClauseKind C, bool AppliedToPointee) {
3748  return C == OMPC_reduction && !AppliedToPointee;
3749  },
3750  [](OpenMPDirectiveKind K) {
3751  return isOpenMPParallelDirective(K) ||
3753  },
3754  /*FromParent=*/true);
3755  if (isOpenMPTaskingDirective(DKind) && DVar.CKind == OMPC_reduction) {
3756  ErrorFound = true;
3757  SemaRef.Diag(ELoc, diag::err_omp_reduction_in_task);
3758  reportOriginalDsa(SemaRef, Stack, FD, DVar);
3759  return;
3760  }
3761 
3762  // Define implicit data-sharing attributes for task.
3763  DVar = Stack->getImplicitDSA(FD, /*FromParent=*/false);
3764  if (isOpenMPTaskingDirective(DKind) && DVar.CKind != OMPC_shared &&
3765  !Stack->isLoopControlVariable(FD).first) {
3766  // Check if there is a captured expression for the current field in the
3767  // region. Do not mark it as firstprivate unless there is no captured
3768  // expression.
3769  // TODO: try to make it firstprivate.
3770  if (DVar.CKind != OMPC_unknown)
3771  ImplicitFirstprivate.push_back(E);
3772  }
3773  return;
3774  }
3775  if (isOpenMPTargetExecutionDirective(DKind)) {
3777  if (!checkMapClauseExpressionBase(SemaRef, E, CurComponents, OMPC_map,
3778  Stack->getCurrentDirective(),
3779  /*NoDiagnose=*/true))
3780  return;
3781  const auto *VD = cast<ValueDecl>(
3782  CurComponents.back().getAssociatedDeclaration()->getCanonicalDecl());
3783  if (!Stack->checkMappableExprComponentListsForDecl(
3784  VD, /*CurrentRegionOnly=*/true,
3785  [&CurComponents](
3787  StackComponents,
3788  OpenMPClauseKind) {
3789  auto CCI = CurComponents.rbegin();
3790  auto CCE = CurComponents.rend();
3791  for (const auto &SC : llvm::reverse(StackComponents)) {
3792  // Do both expressions have the same kind?
3793  if (CCI->getAssociatedExpression()->getStmtClass() !=
3794  SC.getAssociatedExpression()->getStmtClass())
3795  if (!((isa<OMPArraySectionExpr>(
3796  SC.getAssociatedExpression()) ||
3797  isa<OMPArrayShapingExpr>(
3798  SC.getAssociatedExpression())) &&
3799  isa<ArraySubscriptExpr>(
3800  CCI->getAssociatedExpression())))
3801  return false;
3802 
3803  const Decl *CCD = CCI->getAssociatedDeclaration();
3804  const Decl *SCD = SC.getAssociatedDeclaration();
3805  CCD = CCD ? CCD->getCanonicalDecl() : nullptr;
3806  SCD = SCD ? SCD->getCanonicalDecl() : nullptr;
3807  if (SCD != CCD)
3808  return false;
3809  std::advance(CCI, 1);
3810  if (CCI == CCE)
3811  break;
3812  }
3813  return true;
3814  })) {
3815  Visit(E->getBase());
3816  }
3817  } else if (!TryCaptureCXXThisMembers) {
3818  Visit(E->getBase());
3819  }
3820  }
3821  void VisitOMPExecutableDirective(OMPExecutableDirective *S) {
3822  for (OMPClause *C : S->clauses()) {
3823  // Skip analysis of arguments of private clauses for task|target
3824  // directives.
3825  if (isa_and_nonnull<OMPPrivateClause>(C))
3826  continue;
3827  // Skip analysis of arguments of implicitly defined firstprivate clause
3828  // for task|target directives.
3829  // Skip analysis of arguments of implicitly defined map clause for target
3830  // directives.
3831  if (C && !((isa<OMPFirstprivateClause>(C) || isa<OMPMapClause>(C)) &&
3832  C->isImplicit() &&
3833  !isOpenMPTaskingDirective(Stack->getCurrentDirective()))) {
3834  for (Stmt *CC : C->children()) {
3835  if (CC)
3836  Visit(CC);
3837  }
3838  }
3839  }
3840  // Check implicitly captured variables.
3841  VisitSubCaptures(S);
3842  }
3843 
3844  void VisitOMPLoopTransformationDirective(OMPLoopTransformationDirective *S) {
3845  // Loop transformation directives do not introduce data sharing
3846  VisitStmt(S);
3847  }
3848 
3849  void VisitCallExpr(CallExpr *S) {
3850  for (Stmt *C : S->arguments()) {
3851  if (C) {
3852  // Check implicitly captured variables in the task-based directives to
3853  // check if they must be firstprivatized.
3854  Visit(C);
3855  }
3856  }
3857  }
3858  void VisitStmt(Stmt *S) {
3859  for (Stmt *C : S->children()) {
3860  if (C) {
3861  // Check implicitly captured variables in the task-based directives to
3862  // check if they must be firstprivatized.
3863  Visit(C);
3864  }
3865  }
3866  }
3867 
3868  void visitSubCaptures(CapturedStmt *S) {
3869  for (const CapturedStmt::Capture &Cap : S->captures()) {
3870  if (!Cap.capturesVariable() && !Cap.capturesVariableByCopy())
3871  continue;
3872  VarDecl *VD = Cap.getCapturedVar();
3873  // Do not try to map the variable if it or its sub-component was mapped
3874  // already.
3875  if (isOpenMPTargetExecutionDirective(Stack->getCurrentDirective()) &&
3876  Stack->checkMappableExprComponentListsForDecl(
3877  VD, /*CurrentRegionOnly=*/true,
3879  OpenMPClauseKind) { return true; }))
3880  continue;
3882  SemaRef, VD, VD->getType().getNonLValueExprType(SemaRef.Context),
3883  Cap.getLocation(), /*RefersToCapture=*/true);
3884  Visit(DRE);
3885  }
3886  }
3887  bool isErrorFound() const { return ErrorFound; }
3888  ArrayRef<Expr *> getImplicitFirstprivate() const {
3889  return ImplicitFirstprivate;
3890  }
3891  ArrayRef<Expr *> getImplicitMap(OpenMPDefaultmapClauseKind DK,
3892  OpenMPMapClauseKind MK) const {
3893  return ImplicitMap[DK][MK];
3894  }
3896  getImplicitMapModifier(OpenMPDefaultmapClauseKind Kind) const {
3897  return ImplicitMapModifier[Kind];
3898  }
3899  const Sema::VarsWithInheritedDSAType &getVarsWithInheritedDSA() const {
3900  return VarsWithInheritedDSA;
3901  }
3902 
3903  DSAAttrChecker(DSAStackTy *S, Sema &SemaRef, CapturedStmt *CS)
3904  : Stack(S), SemaRef(SemaRef), ErrorFound(false), CS(CS) {
3905  // Process declare target link variables for the target directives.
3906  if (isOpenMPTargetExecutionDirective(S->getCurrentDirective())) {
3907  for (DeclRefExpr *E : Stack->getLinkGlobals())
3908  Visit(E);
3909  }
3910  }
3911 };
3912 } // namespace
3913 
3914 static void handleDeclareVariantConstructTrait(DSAStackTy *Stack,
3915  OpenMPDirectiveKind DKind,
3916  bool ScopeEntry) {
3919  Traits.emplace_back(llvm::omp::TraitProperty::construct_target_target);
3920  if (isOpenMPTeamsDirective(DKind))
3921  Traits.emplace_back(llvm::omp::TraitProperty::construct_teams_teams);
3922  if (isOpenMPParallelDirective(DKind))
3923  Traits.emplace_back(llvm::omp::TraitProperty::construct_parallel_parallel);
3924  if (isOpenMPWorksharingDirective(DKind))
3925  Traits.emplace_back(llvm::omp::TraitProperty::construct_for_for);
3926  if (isOpenMPSimdDirective(DKind))
3927  Traits.emplace_back(llvm::omp::TraitProperty::construct_simd_simd);
3928  Stack->handleConstructTrait(Traits, ScopeEntry);
3929 }
3930 
3932  switch (DKind) {
3933  case OMPD_parallel:
3934  case OMPD_parallel_for:
3935  case OMPD_parallel_for_simd:
3936  case OMPD_parallel_sections:
3937  case OMPD_parallel_master:
3938  case OMPD_teams:
3939  case OMPD_teams_distribute:
3940  case OMPD_teams_distribute_simd: {
3941  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
3942  QualType KmpInt32PtrTy =
3943  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
3944  Sema::CapturedParamNameType Params[] = {
3945  std::make_pair(".global_tid.", KmpInt32PtrTy),
3946  std::make_pair(".bound_tid.", KmpInt32PtrTy),
3947  std::make_pair(StringRef(), QualType()) // __context with shared vars
3948  };
3949  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
3950  Params);
3951  break;
3952  }
3953  case OMPD_target_teams:
3954  case OMPD_target_parallel:
3955  case OMPD_target_parallel_for:
3956  case OMPD_target_parallel_for_simd:
3957  case OMPD_target_teams_distribute:
3958  case OMPD_target_teams_distribute_simd: {
3959  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
3961  QualType KmpInt32PtrTy =
3962  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
3963  QualType Args[] = {VoidPtrTy};
3965  EPI.Variadic = true;
3966  QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
3967  Sema::CapturedParamNameType Params[] = {
3968  std::make_pair(".global_tid.", KmpInt32Ty),
3969  std::make_pair(".part_id.", KmpInt32PtrTy),
3970  std::make_pair(".privates.", VoidPtrTy),
3971  std::make_pair(
3972  ".copy_fn.",
3973  Context.getPointerType(CopyFnType).withConst().withRestrict()),
3974  std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
3975  std::make_pair(StringRef(), QualType()) // __context with shared vars
3976  };
3977  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
3978  Params, /*OpenMPCaptureLevel=*/0);
3979  // Mark this captured region as inlined, because we don't use outlined
3980  // function directly.
3982  AlwaysInlineAttr::CreateImplicit(
3984  AlwaysInlineAttr::Keyword_forceinline));
3985  Sema::CapturedParamNameType ParamsTarget[] = {
3986  std::make_pair(StringRef(), QualType()) // __context with shared vars
3987  };
3988  // Start a captured region for 'target' with no implicit parameters.
3989  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
3990  ParamsTarget, /*OpenMPCaptureLevel=*/1);
3991  Sema::CapturedParamNameType ParamsTeamsOrParallel[] = {
3992  std::make_pair(".global_tid.", KmpInt32PtrTy),
3993  std::make_pair(".bound_tid.", KmpInt32PtrTy),
3994  std::make_pair(StringRef(), QualType()) // __context with shared vars
3995  };
3996  // Start a captured region for 'teams' or 'parallel'. Both regions have
3997  // the same implicit parameters.
3998  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
3999  ParamsTeamsOrParallel, /*OpenMPCaptureLevel=*/2);
4000  break;
4001  }
4002  case OMPD_target:
4003  case OMPD_target_simd: {
4004  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
4006  QualType KmpInt32PtrTy =
4007  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
4008  QualType Args[] = {VoidPtrTy};
4010  EPI.Variadic = true;
4011  QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
4012  Sema::CapturedParamNameType Params[] = {
4013  std::make_pair(".global_tid.", KmpInt32Ty),
4014  std::make_pair(".part_id.", KmpInt32PtrTy),
4015  std::make_pair(".privates.", VoidPtrTy),
4016  std::make_pair(
4017  ".copy_fn.",
4018  Context.getPointerType(CopyFnType).withConst().withRestrict()),
4019  std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
4020  std::make_pair(StringRef(), QualType()) // __context with shared vars
4021  };
4022  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
4023  Params, /*OpenMPCaptureLevel=*/0);
4024  // Mark this captured region as inlined, because we don't use outlined
4025  // function directly.
4027  AlwaysInlineAttr::CreateImplicit(
4029  AlwaysInlineAttr::Keyword_forceinline));
4030  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
4031  std::make_pair(StringRef(), QualType()),
4032  /*OpenMPCaptureLevel=*/1);
4033  break;
4034  }
4035  case OMPD_atomic:
4036  case OMPD_critical:
4037  case OMPD_section:
4038  case OMPD_master:
4039  case OMPD_masked:
4040  case OMPD_tile:
4041  case OMPD_unroll:
4042  break;
4043  case OMPD_loop:
4044  // TODO: 'loop' may require additional parameters depending on the binding.
4045  // Treat similar to OMPD_simd/OMPD_for for now.
4046  case OMPD_simd:
4047  case OMPD_for:
4048  case OMPD_for_simd:
4049  case OMPD_sections:
4050  case OMPD_single:
4051  case OMPD_taskgroup:
4052  case OMPD_distribute:
4053  case OMPD_distribute_simd:
4054  case OMPD_ordered:
4055  case OMPD_target_data:
4056  case OMPD_dispatch: {
4057  Sema::CapturedParamNameType Params[] = {
4058  std::make_pair(StringRef(), QualType()) // __context with shared vars
4059  };
4060  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
4061  Params);
4062  break;
4063  }
4064  case OMPD_task: {
4065  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
4067  QualType KmpInt32PtrTy =
4068  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
4069  QualType Args[] = {VoidPtrTy};
4071  EPI.Variadic = true;
4072  QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
4073  Sema::CapturedParamNameType Params[] = {
4074  std::make_pair(".global_tid.", KmpInt32Ty),
4075  std::make_pair(".part_id.", KmpInt32PtrTy),
4076  std::make_pair(".privates.", VoidPtrTy),
4077  std::make_pair(
4078  ".copy_fn.",
4079  Context.getPointerType(CopyFnType).withConst().withRestrict()),
4080  std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
4081  std::make_pair(StringRef(), QualType()) // __context with shared vars
4082  };
4083  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
4084  Params);
4085  // Mark this captured region as inlined, because we don't use outlined
4086  // function directly.
4088  AlwaysInlineAttr::CreateImplicit(
4090  AlwaysInlineAttr::Keyword_forceinline));
4091  break;
4092  }
4093  case OMPD_taskloop:
4094  case OMPD_taskloop_simd:
4095  case OMPD_master_taskloop:
4096  case OMPD_master_taskloop_simd: {
4097  QualType KmpInt32Ty =
4098  Context.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1)
4099  .withConst();
4100  QualType KmpUInt64Ty =
4101  Context.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0)
4102  .withConst();
4103  QualType KmpInt64Ty =
4104  Context.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1)
4105  .withConst();
4107  QualType KmpInt32PtrTy =
4108  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
4109  QualType Args[] = {VoidPtrTy};
4111  EPI.Variadic = true;
4112  QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
4113  Sema::CapturedParamNameType Params[] = {
4114  std::make_pair(".global_tid.", KmpInt32Ty),
4115  std::make_pair(".part_id.", KmpInt32PtrTy),
4116  std::make_pair(".privates.", VoidPtrTy),
4117  std::make_pair(
4118  ".copy_fn.",
4119  Context.getPointerType(CopyFnType).withConst().withRestrict()),
4120  std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
4121  std::make_pair(".lb.", KmpUInt64Ty),
4122  std::make_pair(".ub.", KmpUInt64Ty),
4123  std::make_pair(".st.", KmpInt64Ty),
4124  std::make_pair(".liter.", KmpInt32Ty),
4125  std::make_pair(".reductions.", VoidPtrTy),
4126  std::make_pair(StringRef(), QualType()) // __context with shared vars
4127  };
4128  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
4129  Params);
4130  // Mark this captured region as inlined, because we don't use outlined
4131  // function directly.
4133  AlwaysInlineAttr::CreateImplicit(
4135  AlwaysInlineAttr::Keyword_forceinline));
4136  break;
4137  }
4138  case OMPD_parallel_master_taskloop:
4139  case OMPD_parallel_master_taskloop_simd: {
4140  QualType KmpInt32Ty =
4141  Context.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1)
4142  .withConst();
4143  QualType KmpUInt64Ty =
4144  Context.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0)
4145  .withConst();
4146  QualType KmpInt64Ty =
4147  Context.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1)
4148  .withConst();
4150  QualType KmpInt32PtrTy =
4151  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
4152  Sema::CapturedParamNameType ParamsParallel[] = {
4153  std::make_pair(".global_tid.", KmpInt32PtrTy),
4154  std::make_pair(".bound_tid.", KmpInt32PtrTy),
4155  std::make_pair(StringRef(), QualType()) // __context with shared vars
4156  };
4157  // Start a captured region for 'parallel'.
4158  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
4159  ParamsParallel, /*OpenMPCaptureLevel=*/0);
4160  QualType Args[] = {VoidPtrTy};
4162  EPI.Variadic = true;
4163  QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
4164  Sema::CapturedParamNameType Params[] = {
4165  std::make_pair(".global_tid.", KmpInt32Ty),
4166  std::make_pair(".part_id.", KmpInt32PtrTy),
4167  std::make_pair(".privates.", VoidPtrTy),
4168  std::make_pair(
4169  ".copy_fn.",
4170  Context.getPointerType(CopyFnType).withConst().withRestrict()),
4171  std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
4172  std::make_pair(".lb.", KmpUInt64Ty),
4173  std::make_pair(".ub.", KmpUInt64Ty),
4174  std::make_pair(".st.", KmpInt64Ty),
4175  std::make_pair(".liter.", KmpInt32Ty),
4176  std::make_pair(".reductions.", VoidPtrTy),
4177  std::make_pair(StringRef(), QualType()) // __context with shared vars
4178  };
4179  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
4180  Params, /*OpenMPCaptureLevel=*/1);
4181  // Mark this captured region as inlined, because we don't use outlined
4182  // function directly.
4184  AlwaysInlineAttr::CreateImplicit(
4186  AlwaysInlineAttr::Keyword_forceinline));
4187  break;
4188  }
4189  case OMPD_distribute_parallel_for_simd:
4190  case OMPD_distribute_parallel_for: {
4191  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
4192  QualType KmpInt32PtrTy =
4193  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
4194  Sema::CapturedParamNameType Params[] = {
4195  std::make_pair(".global_tid.", KmpInt32PtrTy),
4196  std::make_pair(".bound_tid.", KmpInt32PtrTy),
4197  std::make_pair(".previous.lb.", Context.getSizeType().withConst()),
4198  std::make_pair(".previous.ub.", Context.getSizeType().withConst()),
4199  std::make_pair(StringRef(), QualType()) // __context with shared vars
4200  };
4201  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
4202  Params);
4203  break;
4204  }
4205  case OMPD_target_teams_distribute_parallel_for:
4206  case OMPD_target_teams_distribute_parallel_for_simd: {
4207  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
4208  QualType KmpInt32PtrTy =
4209  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
4211 
4212  QualType Args[] = {VoidPtrTy};
4214  EPI.Variadic = true;
4215  QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
4216  Sema::CapturedParamNameType Params[] = {
4217  std::make_pair(".global_tid.", KmpInt32Ty),
4218  std::make_pair(".part_id.", KmpInt32PtrTy),
4219  std::make_pair(".privates.", VoidPtrTy),
4220  std::make_pair(
4221  ".copy_fn.",
4222  Context.getPointerType(CopyFnType).withConst().withRestrict()),
4223  std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
4224  std::make_pair(StringRef(), QualType()) // __context with shared vars
4225  };
4226  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
4227  Params, /*OpenMPCaptureLevel=*/0);
4228  // Mark this captured region as inlined, because we don't use outlined
4229  // function directly.
4231  AlwaysInlineAttr::CreateImplicit(
4233  AlwaysInlineAttr::Keyword_forceinline));
4234  Sema::CapturedParamNameType ParamsTarget[] = {
4235  std::make_pair(StringRef(), QualType()) // __context with shared vars
4236  };
4237  // Start a captured region for 'target' with no implicit parameters.
4238  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
4239  ParamsTarget, /*OpenMPCaptureLevel=*/1);
4240 
4241  Sema::CapturedParamNameType ParamsTeams[] = {
4242  std::make_pair(".global_tid.", KmpInt32PtrTy),
4243  std::make_pair(".bound_tid.", KmpInt32PtrTy),
4244  std::make_pair(StringRef(), QualType()) // __context with shared vars
4245  };
4246  // Start a captured region for 'target' with no implicit parameters.
4247  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
4248  ParamsTeams, /*OpenMPCaptureLevel=*/2);
4249 
4250  Sema::CapturedParamNameType ParamsParallel[] = {
4251  std::make_pair(".global_tid.", KmpInt32PtrTy),
4252  std::make_pair(".bound_tid.", KmpInt32PtrTy),
4253  std::make_pair(".previous.lb.", Context.getSizeType().withConst()),
4254  std::make_pair(".previous.ub.", Context.getSizeType().withConst()),
4255  std::make_pair(StringRef(), QualType()) // __context with shared vars
4256  };
4257  // Start a captured region for 'teams' or 'parallel'. Both regions have
4258  // the same implicit parameters.
4259  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
4260  ParamsParallel, /*OpenMPCaptureLevel=*/3);
4261  break;
4262  }
4263 
4264  case OMPD_teams_distribute_parallel_for:
4265  case OMPD_teams_distribute_parallel_for_simd: {
4266  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
4267  QualType KmpInt32PtrTy =
4268  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
4269 
4270  Sema::CapturedParamNameType ParamsTeams[] = {
4271  std::make_pair(".global_tid.", KmpInt32PtrTy),
4272  std::make_pair(".bound_tid.", KmpInt32PtrTy),
4273  std::make_pair(StringRef(), QualType()) // __context with shared vars
4274  };
4275  // Start a captured region for 'target' with no implicit parameters.
4276  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
4277  ParamsTeams, /*OpenMPCaptureLevel=*/0);
4278 
4279  Sema::CapturedParamNameType ParamsParallel[] = {
4280  std::make_pair(".global_tid.", KmpInt32PtrTy),
4281  std::make_pair(".bound_tid.", KmpInt32PtrTy),
4282  std::make_pair(".previous.lb.", Context.getSizeType().withConst()),
4283  std::make_pair(".previous.ub.", Context.getSizeType().withConst()),
4284  std::make_pair(StringRef(), QualType()) // __context with shared vars
4285  };
4286  // Start a captured region for 'teams' or 'parallel'. Both regions have
4287  // the same implicit parameters.
4288  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
4289  ParamsParallel, /*OpenMPCaptureLevel=*/1);
4290  break;
4291  }
4292  case OMPD_target_update:
4293  case OMPD_target_enter_data:
4294  case OMPD_target_exit_data: {
4295  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
4297  QualType KmpInt32PtrTy =
4298  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
4299  QualType Args[] = {VoidPtrTy};
4301  EPI.Variadic = true;
4302  QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
4303  Sema::CapturedParamNameType Params[] = {
4304  std::make_pair(".global_tid.", KmpInt32Ty),
4305  std::make_pair(".part_id.", KmpInt32PtrTy),
4306  std::make_pair(".privates.", VoidPtrTy),
4307  std::make_pair(
4308  ".copy_fn.",
4309  Context.getPointerType(CopyFnType).withConst().withRestrict()),
4310  std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
4311  std::make_pair(StringRef(), QualType()) // __context with shared vars
4312  };
4313  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
4314  Params);
4315  // Mark this captured region as inlined, because we don't use outlined
4316  // function directly.
4318  AlwaysInlineAttr::CreateImplicit(
4320  AlwaysInlineAttr::Keyword_forceinline));
4321  break;
4322  }
4323  case OMPD_threadprivate:
4324  case OMPD_allocate:
4325  case OMPD_taskyield:
4326  case OMPD_barrier:
4327  case OMPD_taskwait:
4328  case OMPD_cancellation_point:
4329  case OMPD_cancel:
4330  case OMPD_flush:
4331  case OMPD_depobj:
4332  case OMPD_scan:
4333  case OMPD_declare_reduction:
4334  case OMPD_declare_mapper:
4335  case OMPD_declare_simd:
4336  case OMPD_declare_target:
4337  case OMPD_end_declare_target:
4338  case OMPD_requires:
4339  case OMPD_declare_variant:
4340  case OMPD_begin_declare_variant:
4341  case OMPD_end_declare_variant:
4342  case OMPD_metadirective:
4343  llvm_unreachable("OpenMP Directive is not allowed");
4344  case OMPD_unknown:
4345  default:
4346  llvm_unreachable("Unknown OpenMP directive");
4347  }
4348  DSAStack->setContext(CurContext);
4349  handleDeclareVariantConstructTrait(DSAStack, DKind, /* ScopeEntry */ true);
4350 }
4351 
4352 int Sema::getNumberOfConstructScopes(unsigned Level) const {
4353  return getOpenMPCaptureLevels(DSAStack->getDirective(Level));
4354 }
4355 
4357  SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
4358  getOpenMPCaptureRegions(CaptureRegions, DKind);
4359  return CaptureRegions.size();
4360 }
4361 
4363  Expr *CaptureExpr, bool WithInit,
4364  bool AsExpression) {
4365  assert(CaptureExpr);
4366  ASTContext &C = S.getASTContext();
4367  Expr *Init = AsExpression ? CaptureExpr : CaptureExpr->IgnoreImpCasts();
4368  QualType Ty = Init->getType();
4369  if (CaptureExpr->getObjectKind() == OK_Ordinary && CaptureExpr->isGLValue()) {
4370  if (S.getLangOpts().CPlusPlus) {
4371  Ty = C.getLValueReferenceType(Ty);
4372  } else {
4373  Ty = C.getPointerType(Ty);
4374  ExprResult Res =
4375  S.CreateBuiltinUnaryOp(CaptureExpr->getExprLoc(), UO_AddrOf, Init);
4376  if (!Res.isUsable())
4377  return nullptr;
4378  Init = Res.get();
4379  }
4380  WithInit = true;
4381  }
4382  auto *CED = OMPCapturedExprDecl::Create(C, S.CurContext, Id, Ty,
4383  CaptureExpr->getBeginLoc());
4384  if (!WithInit)
4385  CED->addAttr(OMPCaptureNoInitAttr::CreateImplicit(C));
4386  S.CurContext->addHiddenDecl(CED);
4388  S.AddInitializerToDecl(CED, Init, /*DirectInit=*/false);
4389  return CED;
4390 }
4391 
4392 static DeclRefExpr *buildCapture(Sema &S, ValueDecl *D, Expr *CaptureExpr,
4393  bool WithInit) {
4394  OMPCapturedExprDecl *CD;
4395  if (VarDecl *VD = S.isOpenMPCapturedDecl(D))
4396  CD = cast<OMPCapturedExprDecl>(VD);
4397  else
4398  CD = buildCaptureDecl(S, D->getIdentifier(), CaptureExpr, WithInit,
4399  /*AsExpression=*/false);
4400  return buildDeclRefExpr(S, CD, CD->getType().getNonReferenceType(),
4401  CaptureExpr->getExprLoc());
4402 }
4403 
4404 static ExprResult buildCapture(Sema &S, Expr *CaptureExpr, DeclRefExpr *&Ref) {
4405  CaptureExpr = S.DefaultLvalueConversion(CaptureExpr).get();
4406  if (!Ref) {
4408  S, &S.getASTContext().Idents.get(".capture_expr."), CaptureExpr,
4409  /*WithInit=*/true, /*AsExpression=*/true);
4410  Ref = buildDeclRefExpr(S, CD, CD->getType().getNonReferenceType(),
4411  CaptureExpr->getExprLoc());
4412  }
4413  ExprResult Res = Ref;
4414  if (!S.getLangOpts().CPlusPlus &&
4415  CaptureExpr->getObjectKind() == OK_Ordinary && CaptureExpr->isGLValue() &&
4416  Ref->getType()->isPointerType()) {
4417  Res = S.CreateBuiltinUnaryOp(CaptureExpr->getExprLoc(), UO_Deref, Ref);
4418  if (!Res.isUsable())
4419  return ExprError();
4420  }
4421  return S.DefaultLvalueConversion(Res.get());
4422 }
4423 
4424 namespace {
4425 // OpenMP directives parsed in this section are represented as a
4426 // CapturedStatement with an associated statement. If a syntax error
4427 // is detected during the parsing of the associated statement, the
4428 // compiler must abort processing and close the CapturedStatement.
4429 //
4430 // Combined directives such as 'target parallel' have more than one
4431 // nested CapturedStatements. This RAII ensures that we unwind out
4432 // of all the nested CapturedStatements when an error is found.
4433 class CaptureRegionUnwinderRAII {
4434 private:
4435  Sema &S;
4436  bool &ErrorFound;
4437  OpenMPDirectiveKind DKind = OMPD_unknown;
4438 
4439 public:
4440  CaptureRegionUnwinderRAII(Sema &S, bool &ErrorFound,
4441  OpenMPDirectiveKind DKind)
4442  : S(S), ErrorFound(ErrorFound), DKind(DKind) {}
4443  ~CaptureRegionUnwinderRAII() {
4444  if (ErrorFound) {
4445  int ThisCaptureLevel = S.getOpenMPCaptureLevels(DKind);
4446  while (--ThisCaptureLevel >= 0)
4448  }
4449  }
4450 };
4451 } // namespace
4452 
4454  // Capture variables captured by reference in lambdas for target-based
4455  // directives.
4456  if (!CurContext->isDependentContext() &&
4457  (isOpenMPTargetExecutionDirective(DSAStack->getCurrentDirective()) ||
4459  DSAStack->getCurrentDirective()))) {
4460  QualType Type = V->getType();
4461  if (const auto *RD = Type.getCanonicalType()
4462  .getNonReferenceType()
4463  ->getAsCXXRecordDecl()) {
4464  bool SavedForceCaptureByReferenceInTargetExecutable =
4465  DSAStack->isForceCaptureByReferenceInTargetExecutable();
4466  DSAStack->setForceCaptureByReferenceInTargetExecutable(
4467  /*V=*/true);
4468  if (RD->isLambda()) {
4469  llvm::DenseMap<const VarDecl *, FieldDecl *> Captures;
4470  FieldDecl *ThisCapture;
4471  RD->getCaptureFields(Captures, ThisCapture);
4472  for (const LambdaCapture &LC : RD->captures()) {
4473  if (LC.getCaptureKind() == LCK_ByRef) {
4474  VarDecl *VD = LC.getCapturedVar();
4475  DeclContext *VDC = VD->getDeclContext();
4476  if (!VDC->Encloses(CurContext))
4477  continue;
4478  MarkVariableReferenced(LC.getLocation(), VD);
4479  } else if (LC.getCaptureKind() == LCK_This) {
4480  QualType ThisTy = getCurrentThisType();
4481  if (!ThisTy.isNull() &&
4482  Context.typesAreCompatible(ThisTy, ThisCapture->getType()))
4483  CheckCXXThisCapture(LC.getLocation());
4484  }
4485  }
4486  }
4487  DSAStack->setForceCaptureByReferenceInTargetExecutable(
4488  SavedForceCaptureByReferenceInTargetExecutable);
4489  }
4490  }
4491 }
4492 
4494  const ArrayRef<OMPClause *> Clauses) {
4495  const OMPOrderedClause *Ordered = nullptr;
4496  const OMPOrderClause *Order = nullptr;
4497 
4498  for (const OMPClause *Clause : Clauses) {
4499  if (Clause->getClauseKind() == OMPC_ordered)
4500  Ordered = cast<OMPOrderedClause>(Clause);
4501  else if (Clause->getClauseKind() == OMPC_order) {
4502  Order = cast<OMPOrderClause>(Clause);
4503  if (Order->getKind() != OMPC_ORDER_concurrent)
4504  Order = nullptr;
4505  }
4506  if (Ordered && Order)
4507  break;
4508  }
4509 
4510  if (Ordered && Order) {
4511  S.Diag(Order->getKindKwLoc(),
4512  diag::err_omp_simple_clause_incompatible_with_ordered)
4513  << getOpenMPClauseName(OMPC_order)
4514  << getOpenMPSimpleClauseTypeName(OMPC_order, OMPC_ORDER_concurrent)
4515  << SourceRange(Order->getBeginLoc(), Order->getEndLoc());
4516  S.Diag(Ordered->getBeginLoc(), diag::note_omp_ordered_param)
4517  << 0 << SourceRange(Ordered->getBeginLoc(), Ordered->getEndLoc());
4518  return true;
4519  }
4520  return false;
4521 }
4522 
4524  ArrayRef<OMPClause *> Clauses) {
4525  handleDeclareVariantConstructTrait(DSAStack, DSAStack->getCurrentDirective(),
4526  /* ScopeEntry */ false);
4527  if (DSAStack->getCurrentDirective() == OMPD_atomic ||
4528  DSAStack->getCurrentDirective() == OMPD_critical ||
4529  DSAStack->getCurrentDirective() == OMPD_section ||
4530  DSAStack->getCurrentDirective() == OMPD_master ||
4531  DSAStack->getCurrentDirective() == OMPD_masked)
4532  return S;
4533 
4534  bool ErrorFound = false;
4535  CaptureRegionUnwinderRAII CaptureRegionUnwinder(
4536  *this, ErrorFound, DSAStack->getCurrentDirective());
4537  if (!S.isUsable()) {
4538  ErrorFound = true;
4539  return StmtError();
4540  }
4541 
4542  SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
4543  getOpenMPCaptureRegions(CaptureRegions, DSAStack->getCurrentDirective());
4544  OMPOrderedClause *OC = nullptr;
4545  OMPScheduleClause *SC = nullptr;
4548  // This is required for proper codegen.
4549  for (OMPClause *Clause : Clauses) {
4550  if (!LangOpts.OpenMPSimd &&
4551  isOpenMPTaskingDirective(DSAStack->getCurrentDirective()) &&
4552  Clause->getClauseKind() == OMPC_in_reduction) {
4553  // Capture taskgroup task_reduction descriptors inside the tasking regions
4554  // with the corresponding in_reduction items.
4555  auto *IRC = cast<OMPInReductionClause>(Clause);
4556  for (Expr *E : IRC->taskgroup_descriptors())
4557  if (E)
4559  }
4560  if (isOpenMPPrivate(Clause->getClauseKind()) ||
4561  Clause->getClauseKind() == OMPC_copyprivate ||
4562  (getLangOpts().OpenMPUseTLS &&
4563  getASTContext().getTargetInfo().isTLSSupported() &&
4564  Clause->getClauseKind() == OMPC_copyin)) {
4565  DSAStack->setForceVarCapturing(Clause->getClauseKind() == OMPC_copyin);
4566  // Mark all variables in private list clauses as used in inner region.
4567  for (Stmt *VarRef : Clause->children()) {
4568  if (auto *E = cast_or_null<Expr>(VarRef)) {
4570  }
4571  }
4572  DSAStack->setForceVarCapturing(/*V=*/false);
4574  DSAStack->getCurrentDirective())) {
4575  assert(CaptureRegions.empty() &&
4576  "No captured regions in loop transformation directives.");
4577  } else if (CaptureRegions.size() > 1 ||
4578  CaptureRegions.back() != OMPD_unknown) {
4579  if (auto *C = OMPClauseWithPreInit::get(Clause))
4580  PICs.push_back(C);
4581  if (auto *C = OMPClauseWithPostUpdate::get(Clause)) {
4582  if (Expr *E = C->getPostUpdateExpr())
4584  }
4585  }
4586  if (Clause->getClauseKind() == OMPC_schedule)
4587  SC = cast<OMPScheduleClause>(Clause);
4588  else if (Clause->getClauseKind() == OMPC_ordered)
4589  OC = cast<OMPOrderedClause>(Clause);
4590  else if (Clause->getClauseKind() == OMPC_linear)
4591  LCs.push_back(cast<OMPLinearClause>(Clause));
4592  }
4593  // Capture allocator expressions if used.
4594  for (Expr *E : DSAStack->getInnerAllocators())
4596  // OpenMP, 2.7.1 Loop Construct, Restrictions
4597  // The nonmonotonic modifier cannot be specified if an ordered clause is
4598  // specified.
4599  if (SC &&
4600  (SC->getFirstScheduleModifier() == OMPC_SCHEDULE_MODIFIER_nonmonotonic ||
4601  SC->getSecondScheduleModifier() ==
4602  OMPC_SCHEDULE_MODIFIER_nonmonotonic) &&
4603  OC) {
4604  Diag(SC->getFirstScheduleModifier() == OMPC_SCHEDULE_MODIFIER_nonmonotonic
4607  diag::err_omp_simple_clause_incompatible_with_ordered)
4608  << getOpenMPClauseName(OMPC_schedule)
4609  << getOpenMPSimpleClauseTypeName(OMPC_schedule,
4610  OMPC_SCHEDULE_MODIFIER_nonmonotonic)
4611  << SourceRange(OC->getBeginLoc(), OC->getEndLoc());
4612  ErrorFound = true;
4613  }
4614  // OpenMP 5.0, 2.9.2 Worksharing-Loop Construct, Restrictions.
4615  // If an order(concurrent) clause is present, an ordered clause may not appear
4616  // on the same directive.
4617  if (checkOrderedOrderSpecified(*this, Clauses))
4618  ErrorFound = true;
4619  if (!LCs.empty() && OC && OC->getNumForLoops()) {
4620  for (const OMPLinearClause *C : LCs) {
4621  Diag(C->getBeginLoc(), diag::err_omp_linear_ordered)
4622  << SourceRange(OC->getBeginLoc(), OC->getEndLoc());
4623  }
4624  ErrorFound = true;
4625  }
4626  if (isOpenMPWorksharingDirective(DSAStack->getCurrentDirective()) &&
4627  isOpenMPSimdDirective(DSAStack->getCurrentDirective()) && OC &&
4628  OC->getNumForLoops()) {
4629  Diag(OC->getBeginLoc(), diag::err_omp_ordered_simd)
4630  << getOpenMPDirectiveName(DSAStack->getCurrentDirective());
4631  ErrorFound = true;
4632  }
4633  if (ErrorFound) {
4634  return StmtError();
4635  }
4636  StmtResult SR = S;
4637  unsigned CompletedRegions = 0;
4638  for (OpenMPDirectiveKind ThisCaptureRegion : llvm::reverse(CaptureRegions)) {
4639  // Mark all variables in private list clauses as used in inner region.
4640  // Required for proper codegen of combined directives.
4641  // TODO: add processing for other clauses.
4642  if (ThisCaptureRegion != OMPD_unknown) {
4643  for (const clang::OMPClauseWithPreInit *C : PICs) {
4644  OpenMPDirectiveKind CaptureRegion = C->getCaptureRegion();
4645  // Find the particular capture region for the clause if the
4646  // directive is a combined one with multiple capture regions.
4647  // If the directive is not a combined one, the capture region
4648  // associated with the clause is OMPD_unknown and is generated
4649  // only once.
4650  if (CaptureRegion == ThisCaptureRegion ||
4651  CaptureRegion == OMPD_unknown) {
4652  if (auto *DS = cast_or_null<DeclStmt>(C->getPreInitStmt())) {
4653  for (Decl *D : DS->decls())
4654  MarkVariableReferenced(D->getLocation(), cast<VarDecl>(D));
4655  }
4656  }
4657  }
4658  }
4659  if (ThisCaptureRegion == OMPD_target) {
4660  // Capture allocator traits in the target region. They are used implicitly
4661  // and, thus, are not captured by default.
4662  for (OMPClause *C : Clauses) {
4663  if (const auto *UAC = dyn_cast<OMPUsesAllocatorsClause>(C)) {
4664  for (unsigned I = 0, End = UAC->getNumberOfAllocators(); I < End;
4665  ++I) {
4666  OMPUsesAllocatorsClause::Data D = UAC->getAllocatorData(I);
4667  if (Expr *E = D.AllocatorTraits)
4669  }
4670  continue;
4671  }
4672  }
4673  }
4674  if (ThisCaptureRegion == OMPD_parallel) {
4675  // Capture temp arrays for inscan reductions and locals in aligned
4676  // clauses.
4677  for (OMPClause *C : Clauses) {
4678  if (auto *RC = dyn_cast<OMPReductionClause>(C)) {
4679  if (RC->getModifier() != OMPC_REDUCTION_inscan)
4680  continue;
4681  for (Expr *E : RC->copy_array_temps())
4683  }
4684  if (auto *AC = dyn_cast<OMPAlignedClause>(C)) {
4685  for (Expr *E : AC->varlists())
4687  }
4688  }
4689  }
4690  if (++CompletedRegions == CaptureRegions.size())
4691  DSAStack->setBodyComplete();
4692  SR = ActOnCapturedRegionEnd(SR.get());
4693  }
4694  return SR;
4695 }
4696 
4697 static bool checkCancelRegion(Sema &SemaRef, OpenMPDirectiveKind CurrentRegion,
4698  OpenMPDirectiveKind CancelRegion,
4699  SourceLocation StartLoc) {
4700  // CancelRegion is only needed for cancel and cancellation_point.
4701  if (CurrentRegion != OMPD_cancel && CurrentRegion != OMPD_cancellation_point)
4702  return false;
4703 
4704  if (CancelRegion == OMPD_parallel || CancelRegion == OMPD_for ||
4705  CancelRegion == OMPD_sections || CancelRegion == OMPD_taskgroup)
4706  return false;
4707 
4708  SemaRef.Diag(StartLoc, diag::err_omp_wrong_cancel_region)
4709  << getOpenMPDirectiveName(CancelRegion);
4710  return true;
4711 }
4712 
4713 static bool checkNestingOfRegions(Sema &SemaRef, const DSAStackTy *Stack,
4714  OpenMPDirectiveKind CurrentRegion,
4715  const DeclarationNameInfo &CurrentName,
4716  OpenMPDirectiveKind CancelRegion,
4717  OpenMPBindClauseKind BindKind,
4718  SourceLocation StartLoc) {
4719  if (Stack->getCurScope()) {
4720  OpenMPDirectiveKind ParentRegion = Stack->getParentDirective();
4721  OpenMPDirectiveKind OffendingRegion = ParentRegion;
4722  bool NestingProhibited = false;
4723  bool CloseNesting = true;
4724  bool OrphanSeen = false;
4725  enum {
4726  NoRecommend,
4727  ShouldBeInParallelRegion,
4728  ShouldBeInOrderedRegion,
4729  ShouldBeInTargetRegion,
4730  ShouldBeInTeamsRegion,
4731  ShouldBeInLoopSimdRegion,
4732  } Recommend = NoRecommend;
4733  if (isOpenMPSimdDirective(ParentRegion) &&
4734  ((SemaRef.LangOpts.OpenMP <= 45 && CurrentRegion != OMPD_ordered) ||
4735  (SemaRef.LangOpts.OpenMP >= 50 && CurrentRegion != OMPD_ordered &&
4736  CurrentRegion != OMPD_simd && CurrentRegion != OMPD_atomic &&
4737  CurrentRegion != OMPD_scan))) {
4738  // OpenMP [2.16, Nesting of Regions]
4739  // OpenMP constructs may not be nested inside a simd region.
4740  // OpenMP [2.8.1,simd Construct, Restrictions]
4741  // An ordered construct with the simd clause is the only OpenMP
4742  // construct that can appear in the simd region.
4743  // Allowing a SIMD construct nested in another SIMD construct is an
4744  // extension. The OpenMP 4.5 spec does not allow it. Issue a warning
4745  // message.
4746  // OpenMP 5.0 [2.9.3.1, simd Construct, Restrictions]
4747  // The only OpenMP constructs that can be encountered during execution of
4748  // a simd region are the atomic construct, the loop construct, the simd
4749  // construct and the ordered construct with the simd clause.
4750  SemaRef.Diag(StartLoc, (CurrentRegion != OMPD_simd)
4751  ? diag::err_omp_prohibited_region_simd
4752  : diag::warn_omp_nesting_simd)
4753  << (SemaRef.LangOpts.OpenMP >= 50 ? 1 : 0);
4754  return CurrentRegion != OMPD_simd;
4755  }
4756  if (ParentRegion == OMPD_atomic) {
4757  // OpenMP [2.16, Nesting of Regions]
4758  // OpenMP constructs may not be nested inside an atomic region.
4759  SemaRef.Diag(StartLoc, diag::err_omp_prohibited_region_atomic);
4760  return true;
4761  }
4762  if (CurrentRegion == OMPD_section) {
4763  // OpenMP [2.7.2, sections Construct, Restrictions]
4764  // Orphaned section directives are prohibited. That is, the section
4765  // directives must appear within the sections construct and must not be
4766  // encountered elsewhere in the sections region.
4767  if (ParentRegion != OMPD_sections &&
4768  ParentRegion != OMPD_parallel_sections) {
4769  SemaRef.Diag(StartLoc, diag::err_omp_orphaned_section_directive)
4770  << (ParentRegion != OMPD_unknown)
4771  << getOpenMPDirectiveName(ParentRegion);
4772  return true;
4773  }
4774  return false;
4775  }
4776  // Allow some constructs (except teams and cancellation constructs) to be
4777  // orphaned (they could be used in functions, called from OpenMP regions
4778  // with the required preconditions).
4779  if (ParentRegion == OMPD_unknown &&
4780  !isOpenMPNestingTeamsDirective(CurrentRegion) &&
4781  CurrentRegion != OMPD_cancellation_point &&
4782  CurrentRegion != OMPD_cancel && CurrentRegion != OMPD_scan)
4783  return false;
4784  if (CurrentRegion == OMPD_cancellation_point ||
4785  CurrentRegion == OMPD_cancel) {
4786  // OpenMP [2.16, Nesting of Regions]
4787  // A cancellation point construct for which construct-type-clause is
4788  // taskgroup must be nested inside a task construct. A cancellation
4789  // point construct for which construct-type-clause is not taskgroup must
4790  // be closely nested inside an OpenMP construct that matches the type
4791  // specified in construct-type-clause.
4792  // A cancel construct for which construct-type-clause is taskgroup must be
4793  // nested inside a task construct. A cancel construct for which
4794  // construct-type-clause is not taskgroup must be closely nested inside an
4795  // OpenMP construct that matches the type specified in
4796  // construct-type-clause.
4797  NestingProhibited =
4798  !((CancelRegion == OMPD_parallel &&
4799  (ParentRegion == OMPD_parallel ||
4800  ParentRegion == OMPD_target_parallel)) ||
4801  (CancelRegion == OMPD_for &&
4802  (ParentRegion == OMPD_for || ParentRegion == OMPD_parallel_for ||
4803  ParentRegion == OMPD_target_parallel_for ||
4804  ParentRegion == OMPD_distribute_parallel_for ||
4805  ParentRegion == OMPD_teams_distribute_parallel_for ||
4806  ParentRegion == OMPD_target_teams_distribute_parallel_for)) ||
4807  (CancelRegion == OMPD_taskgroup &&
4808  (ParentRegion == OMPD_task ||
4809  (SemaRef.getLangOpts().OpenMP >= 50 &&
4810  (ParentRegion == OMPD_taskloop ||
4811  ParentRegion == OMPD_master_taskloop ||
4812  ParentRegion == OMPD_parallel_master_taskloop)))) ||
4813  (CancelRegion == OMPD_sections &&
4814  (ParentRegion == OMPD_section || ParentRegion == OMPD_sections ||
4815  ParentRegion == OMPD_parallel_sections)));
4816  OrphanSeen = ParentRegion == OMPD_unknown;
4817  } else if (CurrentRegion == OMPD_master || CurrentRegion == OMPD_masked) {
4818  // OpenMP 5.1 [2.22, Nesting of Regions]
4819  // A masked region may not be closely nested inside a worksharing, loop,
4820  // atomic, task, or taskloop region.
4821  NestingProhibited = isOpenMPWorksharingDirective(ParentRegion) ||
4822  isOpenMPGenericLoopDirective(ParentRegion) ||
4823  isOpenMPTaskingDirective(ParentRegion);
4824  } else if (CurrentRegion == OMPD_critical && CurrentName.getName()) {
4825  // OpenMP [2.16, Nesting of Regions]
4826  // A critical region may not be nested (closely or otherwise) inside a
4827  // critical region with the same name. Note that this restriction is not
4828  // sufficient to prevent deadlock.
4829  SourceLocation PreviousCriticalLoc;
4830  bool DeadLock = Stack->hasDirective(
4831  [CurrentName, &PreviousCriticalLoc](OpenMPDirectiveKind K,
4832  const DeclarationNameInfo &DNI,
4833  SourceLocation Loc) {
4834  if (K == OMPD_critical && DNI.getName() == CurrentName.getName()) {
4835  PreviousCriticalLoc = Loc;
4836  return true;
4837  }
4838  return false;
4839  },
4840  false /* skip top directive */);
4841  if (DeadLock) {
4842  SemaRef.Diag(StartLoc,
4843  diag::err_omp_prohibited_region_critical_same_name)
4844  << CurrentName.getName();
4845  if (PreviousCriticalLoc.isValid())
4846  SemaRef.Diag(PreviousCriticalLoc,
4847  diag::note_omp_previous_critical_region);
4848  return true;
4849  }
4850  } else if (CurrentRegion == OMPD_barrier) {
4851  // OpenMP 5.1 [2.22, Nesting of Regions]
4852  // A barrier region may not be closely nested inside a worksharing, loop,
4853  // task, taskloop, critical, ordered, atomic, or masked region.
4854  NestingProhibited =
4855  isOpenMPWorksharingDirective(ParentRegion) ||
4856  isOpenMPGenericLoopDirective(ParentRegion) ||
4857  isOpenMPTaskingDirective(ParentRegion) ||
4858  ParentRegion == OMPD_master || ParentRegion == OMPD_masked ||
4859  ParentRegion == OMPD_parallel_master ||
4860  ParentRegion == OMPD_critical || ParentRegion == OMPD_ordered;
4861  } else if (isOpenMPWorksharingDirective(CurrentRegion) &&
4862  !isOpenMPParallelDirective(CurrentRegion) &&
4863  !isOpenMPTeamsDirective(CurrentRegion)) {
4864  // OpenMP 5.1 [2.22, Nesting of Regions]
4865  // A loop region that binds to a parallel region or a worksharing region
4866  // may not be closely nested inside a worksharing, loop, task, taskloop,
4867  // critical, ordered, atomic, or masked region.
4868  NestingProhibited =
4869  isOpenMPWorksharingDirective(ParentRegion) ||
4870  isOpenMPGenericLoopDirective(ParentRegion) ||
4871  isOpenMPTaskingDirective(ParentRegion) ||
4872  ParentRegion == OMPD_master || ParentRegion == OMPD_masked ||
4873  ParentRegion == OMPD_parallel_master ||
4874  ParentRegion == OMPD_critical || ParentRegion == OMPD_ordered;
4875  Recommend = ShouldBeInParallelRegion;
4876  } else if (CurrentRegion == OMPD_ordered) {
4877  // OpenMP [2.16, Nesting of Regions]
4878  // An ordered region may not be closely nested inside a critical,
4879  // atomic, or explicit task region.
4880  // An ordered region must be closely nested inside a loop region (or
4881  // parallel loop region) with an ordered clause.
4882  // OpenMP [2.8.1,simd Construct, Restrictions]
4883  // An ordered construct with the simd clause is the only OpenMP construct
4884  // that can appear in the simd region.
4885  NestingProhibited = ParentRegion == OMPD_critical ||
4886  isOpenMPTaskingDirective(ParentRegion) ||
4887  !(isOpenMPSimdDirective(ParentRegion) ||
4888  Stack->isParentOrderedRegion());
4889  Recommend = ShouldBeInOrderedRegion;
4890  } else if (isOpenMPNestingTeamsDirective(CurrentRegion)) {
4891  // OpenMP [2.16, Nesting of Regions]
4892  // If specified, a teams construct must be contained within a target
4893  // construct.
4894  NestingProhibited =
4895  (SemaRef.LangOpts.OpenMP <= 45 && ParentRegion != OMPD_target) ||
4896  (SemaRef.LangOpts.OpenMP >= 50 && ParentRegion != OMPD_unknown &&
4897  ParentRegion != OMPD_target);
4898  OrphanSeen = ParentRegion == OMPD_unknown;
4899  Recommend = ShouldBeInTargetRegion;
4900  } else if (CurrentRegion == OMPD_scan) {
4901  // OpenMP [2.16, Nesting of Regions]
4902  // If specified, a teams construct must be contained within a target
4903  // construct.
4904  NestingProhibited =
4905  SemaRef.LangOpts.OpenMP < 50 ||
4906  (ParentRegion != OMPD_simd && ParentRegion != OMPD_for &&
4907  ParentRegion != OMPD_for_simd && ParentRegion != OMPD_parallel_for &&
4908  ParentRegion != OMPD_parallel_for_simd);
4909  OrphanSeen = ParentRegion == OMPD_unknown;
4910  Recommend = ShouldBeInLoopSimdRegion;
4911  }
4912  if (!NestingProhibited &&
4913  !isOpenMPTargetExecutionDirective(CurrentRegion) &&
4914  !isOpenMPTargetDataManagementDirective(CurrentRegion) &&
4915  (ParentRegion == OMPD_teams || ParentRegion == OMPD_target_teams)) {
4916  // OpenMP [5.1, 2.22, Nesting of Regions]
4917  // distribute, distribute simd, distribute parallel worksharing-loop,
4918  // distribute parallel worksharing-loop SIMD, loop, parallel regions,
4919  // including any parallel regions arising from combined constructs,
4920  // omp_get_num_teams() regions, and omp_get_team_num() regions are the
4921  // only OpenMP regions that may be strictly nested inside the teams
4922  // region.
4923  NestingProhibited = !isOpenMPParallelDirective(CurrentRegion) &&
4924  !isOpenMPDistributeDirective(CurrentRegion) &&
4925  CurrentRegion != OMPD_loop;
4926  Recommend = ShouldBeInParallelRegion;
4927  }
4928  if (!NestingProhibited && CurrentRegion == OMPD_loop) {
4929  // OpenMP [5.1, 2.11.7, loop Construct, Restrictions]
4930  // If the bind clause is present on the loop construct and binding is
4931  // teams then the corresponding loop region must be strictly nested inside
4932  // a teams region.
4933  NestingProhibited = BindKind == OMPC_BIND_teams &&
4934  ParentRegion != OMPD_teams &&
4935  ParentRegion != OMPD_target_teams;
4936  Recommend = ShouldBeInTeamsRegion;
4937  }
4938  if (!NestingProhibited &&
4939  isOpenMPNestingDistributeDirective(CurrentRegion)) {
4940  // OpenMP 4.5 [2.17 Nesting of Regions]
4941  // The region associated with the distribute construct must be strictly
4942  // nested inside a teams region
4943  NestingProhibited =
4944  (ParentRegion != OMPD_teams && ParentRegion != OMPD_target_teams);
4945  Recommend = ShouldBeInTeamsRegion;
4946  }
4947  if (!NestingProhibited &&
4948  (isOpenMPTargetExecutionDirective(CurrentRegion) ||
4949  isOpenMPTargetDataManagementDirective(CurrentRegion))) {
4950  // OpenMP 4.5 [2.17 Nesting of Regions]
4951  // If a target, target update, target data, target enter data, or
4952  // target exit data construct is encountered during execution of a
4953  // target region, the behavior is unspecified.
4954  NestingProhibited = Stack->hasDirective(
4955  [&OffendingRegion](OpenMPDirectiveKind K, const DeclarationNameInfo &,
4956  SourceLocation) {
4958  OffendingRegion = K;
4959  return true;
4960  }
4961  return false;
4962  },
4963  false /* don't skip top directive */);
4964  CloseNesting = false;
4965  }
4966  if (NestingProhibited) {
4967  if (OrphanSeen) {
4968  SemaRef.Diag(StartLoc, diag::err_omp_orphaned_device_directive)
4969  << getOpenMPDirectiveName(CurrentRegion) << Recommend;
4970  } else {
4971  SemaRef.Diag(StartLoc, diag::err_omp_prohibited_region)
4972  << CloseNesting << getOpenMPDirectiveName(OffendingRegion)
4973  << Recommend << getOpenMPDirectiveName(CurrentRegion);
4974  }
4975  return true;
4976  }
4977  }
4978  return false;
4979 }
4980 
4983  unsigned operator()(argument_type DK) { return unsigned(DK); }
4984 };
4986  ArrayRef<OMPClause *> Clauses,
4987  ArrayRef<OpenMPDirectiveKind> AllowedNameModifiers) {
4988  bool ErrorFound = false;
4989  unsigned NamedModifiersNumber = 0;
4990  llvm::IndexedMap<const OMPIfClause *, Kind2Unsigned> FoundNameModifiers;
4991  FoundNameModifiers.resize(llvm::omp::Directive_enumSize + 1);
4992  SmallVector<SourceLocation, 4> NameModifierLoc;
4993  for (const OMPClause *C : Clauses) {
4994  if (const auto *IC = dyn_cast_or_null<OMPIfClause>(C)) {
4995  // At most one if clause without a directive-name-modifier can appear on
4996  // the directive.
4997  OpenMPDirectiveKind CurNM = IC->getNameModifier();
4998  if (FoundNameModifiers[CurNM]) {
4999  S.Diag(C->getBeginLoc(), diag::err_omp_more_one_clause)
5000  << getOpenMPDirectiveName(Kind) << getOpenMPClauseName(OMPC_if)
5001  << (CurNM != OMPD_unknown) << getOpenMPDirectiveName(CurNM);
5002  ErrorFound = true;
5003  } else if (CurNM != OMPD_unknown) {
5004  NameModifierLoc.push_back(IC->getNameModifierLoc());
5005  ++NamedModifiersNumber;
5006  }
5007  FoundNameModifiers[CurNM] = IC;
5008  if (CurNM == OMPD_unknown)
5009  continue;
5010  // Check if the specified name modifier is allowed for the current
5011  // directive.
5012  // At most one if clause with the particular directive-name-modifier can
5013  // appear on the directive.
5014  if (!llvm::is_contained(AllowedNameModifiers, CurNM)) {
5015  S.Diag(IC->getNameModifierLoc(),
5016  diag::err_omp_wrong_if_directive_name_modifier)
5017  << getOpenMPDirectiveName(CurNM) << getOpenMPDirectiveName(Kind);
5018  ErrorFound = true;
5019  }
5020  }
5021  }
5022  // If any if clause on the directive includes a directive-name-modifier then
5023  // all if clauses on the directive must include a directive-name-modifier.
5024  if (FoundNameModifiers[OMPD_unknown] && NamedModifiersNumber > 0) {
5025  if (NamedModifiersNumber == AllowedNameModifiers.size()) {
5026  S.Diag(FoundNameModifiers[OMPD_unknown]->getBeginLoc(),
5027  diag::err_omp_no_more_if_clause);
5028  } else {
5029  std::string Values;
5030  std::string Sep(", ");
5031  unsigned AllowedCnt = 0;
5032  unsigned TotalAllowedNum =
5033  AllowedNameModifiers.size() - NamedModifiersNumber;
5034  for (unsigned Cnt = 0, End = AllowedNameModifiers.size(); Cnt < End;
5035  ++Cnt) {
5036  OpenMPDirectiveKind NM = AllowedNameModifiers[Cnt];
5037  if (!FoundNameModifiers[NM]) {
5038  Values += "'";
5039  Values += getOpenMPDirectiveName(NM);
5040  Values += "'";
5041  if (AllowedCnt + 2 == TotalAllowedNum)
5042  Values += " or ";
5043  else if (AllowedCnt + 1 != TotalAllowedNum)
5044  Values += Sep;
5045  ++AllowedCnt;
5046  }
5047  }
5048  S.Diag(FoundNameModifiers[OMPD_unknown]->getCondition()->getBeginLoc(),
5049  diag::err_omp_unnamed_if_clause)
5050  << (TotalAllowedNum > 1) << Values;
5051  }
5052  for (SourceLocation Loc : NameModifierLoc) {
5053  S.Diag(Loc, diag::note_omp_previous_named_if_clause);
5054  }
5055  ErrorFound = true;
5056  }
5057  return ErrorFound;
5058 }
5059 
5060 static std::pair<ValueDecl *, bool> getPrivateItem(Sema &S, Expr *&RefExpr,
5061  SourceLocation &ELoc,
5062  SourceRange &ERange,
5063  bool AllowArraySection) {
5064  if (RefExpr->isTypeDependent() || RefExpr->isValueDependent() ||
5066  return std::make_pair(nullptr, true);
5067 
5068  // OpenMP [3.1, C/C++]
5069  // A list item is a variable name.
5070  // OpenMP [2.9.3.3, Restrictions, p.1]
5071  // A variable that is part of another variable (as an array or
5072  // structure element) cannot appear in a private clause.
5073  RefExpr = RefExpr->IgnoreParens();
5074  enum {
5075  NoArrayExpr = -1,
5076  ArraySubscript = 0,
5077  OMPArraySection = 1
5078  } IsArrayExpr = NoArrayExpr;
5079  if (AllowArraySection) {
5080  if (auto *ASE = dyn_cast_or_null<ArraySubscriptExpr>(RefExpr)) {
5081  Expr *Base = ASE->getBase()->IgnoreParenImpCasts();
5082  while (auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
5083  Base = TempASE->getBase()->IgnoreParenImpCasts();
5084  RefExpr = Base;
5085  IsArrayExpr = ArraySubscript;
5086  } else if (auto *OASE = dyn_cast_or_null<OMPArraySectionExpr>(RefExpr)) {
5087  Expr *Base = OASE->getBase()->IgnoreParenImpCasts();
5088  while (auto *TempOASE = dyn_cast<OMPArraySectionExpr>(Base))
5089  Base = TempOASE->getBase()->IgnoreParenImpCasts();
5090  while (auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
5091  Base = TempASE->getBase()->IgnoreParenImpCasts();
5092  RefExpr = Base;
5093  IsArrayExpr = OMPArraySection;
5094  }
5095  }
5096  ELoc = RefExpr->getExprLoc();
5097  ERange = RefExpr->getSourceRange();
5098  RefExpr = RefExpr->IgnoreParenImpCasts();
5099  auto *DE = dyn_cast_or_null<DeclRefExpr>(RefExpr);
5100  auto *ME = dyn_cast_or_null<MemberExpr>(RefExpr);
5101  if ((!DE || !isa<VarDecl>(DE->getDecl())) &&
5102  (S.getCurrentThisType().isNull() || !ME ||
5103  !isa<CXXThisExpr>(ME->getBase()->IgnoreParenImpCasts()) ||
5104  !isa<FieldDecl>(ME->getMemberDecl()))) {
5105  if (IsArrayExpr != NoArrayExpr) {
5106  S.Diag(ELoc, diag::err_omp_expected_base_var_name) << IsArrayExpr
5107  << ERange;
5108  } else {
5109  S.Diag(ELoc,
5110  AllowArraySection
5111  ? diag::err_omp_expected_var_name_member_expr_or_array_item
5112  : diag::err_omp_expected_var_name_member_expr)
5113  << (S.getCurrentThisType().isNull() ? 0 : 1) << ERange;
5114  }
5115  return std::make_pair(nullptr, false);
5116  }
5117  return std::make_pair(
5118  getCanonicalDecl(DE ? DE->getDecl() : ME->getMemberDecl()), false);
5119 }
5120 
5121 namespace {
5122 /// Checks if the allocator is used in uses_allocators clause to be allowed in
5123 /// target regions.
5124 class AllocatorChecker final : public ConstStmtVisitor<AllocatorChecker, bool> {
5125  DSAStackTy *S = nullptr;
5126 
5127 public:
5128  bool VisitDeclRefExpr(const DeclRefExpr *E) {
5129  return S->isUsesAllocatorsDecl(E->getDecl())
5130  .getValueOr(
5131  DSAStackTy::UsesAllocatorsDeclKind::AllocatorTrait) ==
5132  DSAStackTy::UsesAllocatorsDeclKind::AllocatorTrait;
5133  }
5134  bool VisitStmt(const Stmt *S) {
5135  for (const Stmt *Child : S->children()) {
5136  if (Child && Visit(Child))
5137  return true;
5138  }
5139  return false;
5140  }
5141  explicit AllocatorChecker(DSAStackTy *S) : S(S) {}
5142 };
5143 } // namespace
5144 
5145 static void checkAllocateClauses(Sema &S, DSAStackTy *Stack,
5146  ArrayRef<OMPClause *> Clauses) {
5147  assert(!S.CurContext->isDependentContext() &&
5148  "Expected non-dependent context.");
5149  auto AllocateRange =
5150  llvm::make_filter_range(Clauses, OMPAllocateClause::classof);
5151  llvm::DenseMap<CanonicalDeclPtr<Decl>, CanonicalDeclPtr<VarDecl>>
5152  DeclToCopy;
5153  auto PrivateRange = llvm::make_filter_range(Clauses, [](const OMPClause *C) {
5154  return isOpenMPPrivate(C->getClauseKind());
5155  });
5156  for (OMPClause *Cl : PrivateRange) {
5158  if (Cl->getClauseKind() == OMPC_private) {
5159  auto *PC = cast<OMPPrivateClause>(Cl);
5160  I = PC->private_copies().begin();
5161  It = PC->varlist_begin();
5162  Et = PC->varlist_end();
5163  } else if (Cl->getClauseKind() == OMPC_firstprivate) {
5164  auto *PC = cast<OMPFirstprivateClause>(Cl);
5165  I = PC->private_copies().begin();
5166  It = PC->varlist_begin();
5167  Et = PC->varlist_end();
5168  } else if (Cl->getClauseKind() == OMPC_lastprivate) {
5169  auto *PC = cast<OMPLastprivateClause>(Cl);
5170  I = PC->private_copies().begin();
5171  It = PC->varlist_begin();
5172  Et = PC->varlist_end();
5173  } else if (Cl->getClauseKind() == OMPC_linear) {
5174  auto *PC = cast<OMPLinearClause>(Cl);
5175  I = PC->privates().begin();
5176  It = PC->varlist_begin();
5177  Et = PC->varlist_end();
5178  } else if (Cl->getClauseKind() == OMPC_reduction) {
5179  auto *PC = cast<OMPReductionClause>(Cl);
5180  I = PC->privates().begin();
5181  It = PC->varlist_begin();
5182  Et = PC->varlist_end();
5183  } else if (Cl->getClauseKind() == OMPC_task_reduction) {
5184  auto *PC = cast<OMPTaskReductionClause>(Cl);
5185  I = PC->privates().begin();
5186  It = PC->varlist_begin();
5187  Et = PC->varlist_end();
5188  } else if (Cl->getClauseKind() == OMPC_in_reduction) {
5189  auto *PC = cast<OMPInReductionClause>(Cl);
5190  I = PC->privates().begin();
5191  It = PC->varlist_begin();
5192  Et = PC->varlist_end();
5193  } else {
5194  llvm_unreachable("Expected private clause.");
5195  }
5196  for (Expr *E : llvm::make_range(It, Et)) {
5197  if (!*I) {
5198  ++I;
5199  continue;
5200  }
5201  SourceLocation ELoc;
5202  SourceRange ERange;
5203  Expr *SimpleRefExpr = E;
5204  auto Res = getPrivateItem(S, SimpleRefExpr, ELoc, ERange,
5205  /*AllowArraySection=*/true);
5206  DeclToCopy.try_emplace(Res.first,
5207  cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()));
5208  ++I;
5209  }
5210  }
5211  for (OMPClause *C : AllocateRange) {
5212  auto *AC = cast<OMPAllocateClause>(C);
5213  if (S.getLangOpts().OpenMP >= 50 &&
5214  !Stack->hasRequiresDeclWithClause<OMPDynamicAllocatorsClause>() &&
5215  isOpenMPTargetExecutionDirective(Stack->getCurrentDirective()) &&
5216  AC->getAllocator()) {
5217  Expr *Allocator = AC->getAllocator();
5218  // OpenMP, 2.12.5 target Construct
5219  // Memory allocators that do not appear in a uses_allocators clause cannot
5220  // appear as an allocator in an allocate clause or be used in the target
5221  // region unless a requires directive with the dynamic_allocators clause
5222  // is present in the same compilation unit.
5223  AllocatorChecker Checker(Stack);
5224  if (Checker.Visit(Allocator))
5225  S.Diag(Allocator->getExprLoc(),
5226  diag::err_omp_allocator_not_in_uses_allocators)
5227  << Allocator->getSourceRange();
5228  }
5229  OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind =
5230  getAllocatorKind(S, Stack, AC->getAllocator());
5231  // OpenMP, 2.11.4 allocate Clause, Restrictions.
5232  // For task, taskloop or target directives, allocation requests to memory
5233  // allocators with the trait access set to thread result in unspecified
5234  // behavior.
5235  if (AllocatorKind == OMPAllocateDeclAttr::OMPThreadMemAlloc &&
5236  (isOpenMPTaskingDirective(Stack->getCurrentDirective()) ||
5237  isOpenMPTargetExecutionDirective(Stack->getCurrentDirective()))) {
5238  S.Diag(AC->getAllocator()->getExprLoc(),
5239  diag::warn_omp_allocate_thread_on_task_target_directive)
5240  << getOpenMPDirectiveName(Stack->getCurrentDirective());
5241  }
5242  for (Expr *E : AC->varlists()) {
5243  SourceLocation ELoc;
5244  SourceRange ERange;
5245  Expr *SimpleRefExpr = E;
5246  auto Res = getPrivateItem(S, SimpleRefExpr, ELoc, ERange);
5247  ValueDecl *VD = Res.first;
5248  DSAStackTy::DSAVarData Data = Stack->getTopDSA(VD, /*FromParent=*/false);
5249  if (!isOpenMPPrivate(Data.CKind)) {
5250  S.Diag(E->getExprLoc(),
5251  diag::err_omp_expected_private_copy_for_allocate);
5252  continue;
5253  }
5254  VarDecl *PrivateVD = DeclToCopy[VD];
5255  if (checkPreviousOMPAllocateAttribute(S, Stack, E, PrivateVD,
5256  AllocatorKind, AC->getAllocator()))
5257  continue;
5258  // Placeholder until allocate clause supports align modifier.
5259  Expr *Alignment = nullptr;
5260  applyOMPAllocateAttribute(S, PrivateVD, AllocatorKind, AC->getAllocator(),
5261  Alignment, E->getSourceRange());
5262  }
5263  }
5264 }
5265 
5266 namespace {
5267 /// Rewrite statements and expressions for Sema \p Actions CurContext.
5268 ///
5269 /// Used to wrap already parsed statements/expressions into a new CapturedStmt
5270 /// context. DeclRefExpr used inside the new context are changed to refer to the
5271 /// captured variable instead.
5272 class CaptureVars : public TreeTransform<CaptureVars> {
5273  using BaseTransform = TreeTransform<CaptureVars>;
5274 
5275 public:
5276  CaptureVars(Sema &Actions) : BaseTransform(Actions) {}
5277 
5278  bool AlwaysRebuild() { return true; }
5279 };
5280 } // namespace
5281 
5282 static VarDecl *precomputeExpr(Sema &Actions,
5283  SmallVectorImpl<Stmt *> &BodyStmts, Expr *E,
5284  StringRef Name) {
5285  Expr *NewE = AssertSuccess(CaptureVars(Actions).TransformExpr(E));
5286  VarDecl *NewVar = buildVarDecl(Actions, {}, NewE->getType(), Name, nullptr,
5287  dyn_cast<DeclRefExpr>(E->IgnoreImplicit()));
5288  auto *NewDeclStmt = cast<DeclStmt>(AssertSuccess(
5289  Actions.ActOnDeclStmt(Actions.ConvertDeclToDeclGroup(NewVar), {}, {})));
5290  Actions.AddInitializerToDecl(NewDeclStmt->getSingleDecl(), NewE, false);
5291  BodyStmts.push_back(NewDeclStmt);
5292  return NewVar;
5293 }
5294 
5295 /// Create a closure that computes the number of iterations of a loop.
5296 ///
5297 /// \param Actions The Sema object.
5298 /// \param LogicalTy Type for the logical iteration number.
5299 /// \param Rel Comparison operator of the loop condition.
5300 /// \param StartExpr Value of the loop counter at the first iteration.
5301 /// \param StopExpr Expression the loop counter is compared against in the loop
5302 /// condition. \param StepExpr Amount of increment after each iteration.
5303 ///
5304 /// \return Closure (CapturedStmt) of the distance calculation.
5305 static CapturedStmt *buildDistanceFunc(Sema &Actions, QualType LogicalTy,
5307  Expr *StartExpr, Expr *StopExpr,
5308  Expr *StepExpr) {
5309  ASTContext &Ctx = Actions.getASTContext();
5310  TypeSourceInfo *LogicalTSI = Ctx.getTrivialTypeSourceInfo(LogicalTy);
5311 
5312  // Captured regions currently don't support return values, we use an
5313  // out-parameter instead. All inputs are implicit captures.
5314  // TODO: Instead of capturing each DeclRefExpr occurring in
5315  // StartExpr/StopExpr/Step, these could also be passed as a value capture.
5316  QualType ResultTy = Ctx.getLValueReferenceType(LogicalTy);
5317  Sema::CapturedParamNameType Params[] = {{"Distance", ResultTy},
5318  {StringRef(), QualType()}};
5319  Actions.ActOnCapturedRegionStart({}, nullptr, CR_Default, Params);
5320 
5321  Stmt *Body;
5322  {
5323  Sema::CompoundScopeRAII CompoundScope(Actions);
5324  CapturedDecl *CS = cast<CapturedDecl>(Actions.CurContext);
5325 
5326  // Get the LValue expression for the result.
5327  ImplicitParamDecl *DistParam = CS->getParam(0);
5328  DeclRefExpr *DistRef = Actions.BuildDeclRefExpr(
5329  DistParam, LogicalTy, VK_LValue, {}, nullptr, nullptr, {}, nullptr);
5330 
5331  SmallVector<Stmt *, 4> BodyStmts;
5332 
5333  // Capture all referenced variable references.
5334  // TODO: Instead of computing NewStart/NewStop/NewStep inside the
5335  // CapturedStmt, we could compute them before and capture the result, to be
5336  // used jointly with the LoopVar function.
5337  VarDecl *NewStart = precomputeExpr(Actions, BodyStmts, StartExpr, ".start");
5338  VarDecl *NewStop = precomputeExpr(Actions, BodyStmts, StopExpr, ".stop");
5339  VarDecl *NewStep = precomputeExpr(Actions, BodyStmts, StepExpr, ".step");
5340  auto BuildVarRef = [&](VarDecl *VD) {
5341  return buildDeclRefExpr(Actions, VD, VD->getType(), {});
5342  };
5343 
5345  Ctx, llvm::APInt(Ctx.getIntWidth(LogicalTy), 0), LogicalTy, {});
5346  Expr *Dist;
5347  if (Rel == BO_NE) {
5348  // When using a != comparison, the increment can be +1 or -1. This can be
5349  // dynamic at runtime, so we need to check for the direction.
5350  Expr *IsNegStep = AssertSuccess(
5351  Actions.BuildBinOp(nullptr, {}, BO_LT, BuildVarRef(NewStep), Zero));
5352 
5353  // Positive increment.
5354  Expr *ForwardRange = AssertSuccess(Actions.BuildBinOp(
5355  nullptr, {}, BO_Sub, BuildVarRef(NewStop), BuildVarRef(NewStart)));
5356  ForwardRange = AssertSuccess(
5357  Actions.BuildCStyleCastExpr({}, LogicalTSI, {}, ForwardRange));
5358  Expr *ForwardDist = AssertSuccess(Actions.BuildBinOp(
5359  nullptr, {}, BO_Div, ForwardRange, BuildVarRef(NewStep)));
5360 
5361  // Negative increment.
5362  Expr *BackwardRange = AssertSuccess(Actions.BuildBinOp(
5363  nullptr, {}, BO_Sub, BuildVarRef(NewStart), BuildVarRef(NewStop)));
5364  BackwardRange = AssertSuccess(
5365  Actions.BuildCStyleCastExpr({}, LogicalTSI, {}, BackwardRange));
5366  Expr *NegIncAmount = AssertSuccess(
5367  Actions.BuildUnaryOp(nullptr, {}, UO_Minus, BuildVarRef(NewStep)));
5368  Expr *BackwardDist = AssertSuccess(
5369  Actions.BuildBinOp(nullptr, {}, BO_Div, BackwardRange, NegIncAmount));
5370 
5371  // Use the appropriate case.
5372  Dist = AssertSuccess(Actions.ActOnConditionalOp(
5373  {}, {}, IsNegStep, BackwardDist, ForwardDist));
5374  } else {
5375  assert((Rel == BO_LT || Rel == BO_LE || Rel == BO_GE || Rel == BO_GT) &&
5376  "Expected one of these relational operators");
5377 
5378  // We can derive the direction from any other comparison operator. It is
5379  // non well-formed OpenMP if Step increments/decrements in the other
5380  // directions. Whether at least the first iteration passes the loop
5381  // condition.
5382  Expr *HasAnyIteration = AssertSuccess(Actions.BuildBinOp(
5383  nullptr, {}, Rel, BuildVarRef(NewStart), BuildVarRef(NewStop)));
5384 
5385  // Compute the range between first and last counter value.
5386  Expr *Range;
5387  if (Rel == BO_GE || Rel == BO_GT)
5388  Range = AssertSuccess(Actions.BuildBinOp(
5389  nullptr, {}, BO_Sub, BuildVarRef(NewStart), BuildVarRef(NewStop)));
5390  else
5391  Range = AssertSuccess(Actions.BuildBinOp(
5392  nullptr, {}, BO_Sub, BuildVarRef(NewStop), BuildVarRef(NewStart)));
5393 
5394  // Ensure unsigned range space.
5395  Range =
5396  AssertSuccess(Actions.BuildCStyleCastExpr({}, LogicalTSI, {}, Range));
5397 
5398  if (Rel == BO_LE || Rel == BO_GE) {
5399  // Add one to the range if the relational operator is inclusive.
5400  Range = AssertSuccess(Actions.BuildBinOp(
5401  nullptr, {}, BO_Add, Range,
5402  Actions.ActOnIntegerConstant(SourceLocation(), 1).get()));
5403  }
5404 
5405  // Divide by the absolute step amount.
5406  Expr *Divisor = BuildVarRef(NewStep);
5407  if (Rel == BO_GE || Rel == BO_GT)
5408  Divisor =
5409  AssertSuccess(Actions.BuildUnaryOp(nullptr, {}, UO_Minus, Divisor));
5410  Dist = AssertSuccess(
5411  Actions.BuildBinOp(nullptr, {}, BO_Div, Range, Divisor));
5412 
5413  // If there is not at least one iteration, the range contains garbage. Fix
5414  // to zero in this case.
5415  Dist = AssertSuccess(
5416  Actions.ActOnConditionalOp({}, {}, HasAnyIteration, Dist, Zero));
5417  }
5418 
5419  // Assign the result to the out-parameter.
5420  Stmt *ResultAssign = AssertSuccess(Actions.BuildBinOp(
5421  Actions.getCurScope(), {}, BO_Assign, DistRef, Dist));
5422  BodyStmts.push_back(ResultAssign);
5423 
5424  Body = AssertSuccess(Actions.ActOnCompoundStmt({}, {}, BodyStmts, false));
5425  }
5426 
5427  return cast<CapturedStmt>(
5428  AssertSuccess(Actions.ActOnCapturedRegionEnd(Body)));
5429 }
5430 
5431 /// Create a closure that computes the loop variable from the logical iteration
5432 /// number.
5433 ///
5434 /// \param Actions The Sema object.
5435 /// \param LoopVarTy Type for the loop variable used for result value.
5436 /// \param LogicalTy Type for the logical iteration number.
5437 /// \param StartExpr Value of the loop counter at the first iteration.
5438 /// \param Step Amount of increment after each iteration.
5439 /// \param Deref Whether the loop variable is a dereference of the loop
5440 /// counter variable.
5441 ///
5442 /// \return Closure (CapturedStmt) of the loop value calculation.
5443 static CapturedStmt *buildLoopVarFunc(Sema &Actions, QualType LoopVarTy,
5444  QualType LogicalTy,
5445  DeclRefExpr *StartExpr, Expr *Step,
5446  bool Deref) {
5447  ASTContext &Ctx = Actions.getASTContext();
5448 
5449  // Pass the result as an out-parameter. Passing as return value would require
5450  // the OpenMPIRBuilder to know additional C/C++ semantics, such as how to
5451  // invoke a copy constructor.
5452  QualType TargetParamTy = Ctx.getLValueReferenceType(LoopVarTy);
5453  Sema::CapturedParamNameType Params[] = {{"LoopVar", TargetParamTy},
5454  {"Logical", LogicalTy},
5455  {StringRef(), QualType()}};
5456  Actions.ActOnCapturedRegionStart({}, nullptr, CR_Default, Params);
5457 
5458  // Capture the initial iterator which represents the LoopVar value at the
5459  // zero's logical iteration. Since the original ForStmt/CXXForRangeStmt update
5460  // it in every iteration, capture it by value before it is modified.
5461  VarDecl *StartVar = cast<VarDecl>(StartExpr->getDecl());
5462  bool Invalid = Actions.tryCaptureVariable(StartVar, {},
5464  (void)Invalid;
5465  assert(!Invalid && "Expecting capture-by-value to work.");
5466 
5467  Expr *Body;
5468  {
5469  Sema::CompoundScopeRAII CompoundScope(Actions);
5470  auto *CS = cast<CapturedDecl>(Actions.CurContext);
5471 
5472  ImplicitParamDecl *TargetParam = CS->getParam(0);
5473  DeclRefExpr *TargetRef = Actions.BuildDeclRefExpr(
5474  TargetParam, LoopVarTy, VK_LValue, {}, nullptr, nullptr, {}, nullptr);
5475  ImplicitParamDecl *IndvarParam = CS->getParam(1);
5476  DeclRefExpr *LogicalRef = Actions.BuildDeclRefExpr(
5477  IndvarParam, LogicalTy, VK_LValue, {}, nullptr, nullptr, {}, nullptr);
5478 
5479  // Capture the Start expression.
5480  CaptureVars Recap(Actions);
5481  Expr *NewStart = AssertSuccess(Recap.TransformExpr(StartExpr));
5482  Expr *NewStep = AssertSuccess(Recap.TransformExpr(Step));
5483 
5484  Expr *Skip = AssertSuccess(
5485  Actions.BuildBinOp(nullptr, {}, BO_Mul, NewStep, LogicalRef));
5486  // TODO: Explicitly cast to the iterator's difference_type instead of
5487  // relying on implicit conversion.
5488  Expr *Advanced =
5489  AssertSuccess(Actions.BuildBinOp(nullptr, {}, BO_Add, NewStart, Skip));
5490 
5491  if (Deref) {
5492  // For range-based for-loops convert the loop counter value to a concrete
5493  // loop variable value by dereferencing the iterator.
5494  Advanced =
5495  AssertSuccess(Actions.BuildUnaryOp(nullptr, {}, UO_Deref, Advanced));
5496  }
5497 
5498  // Assign the result to the output parameter.
5499  Body = AssertSuccess(Actions.BuildBinOp(Actions.getCurScope(), {},
5500  BO_Assign, TargetRef, Advanced));
5501  }
5502  return cast<CapturedStmt>(
5503  AssertSuccess(Actions.ActOnCapturedRegionEnd(Body)));
5504 }
5505 
5507  ASTContext &Ctx = getASTContext();
5508 
5509  // Extract the common elements of ForStmt and CXXForRangeStmt:
5510  // Loop variable, repeat condition, increment
5511  Expr *Cond, *Inc;
5512  VarDecl *LIVDecl, *LUVDecl;
5513  if (auto *For = dyn_cast<ForStmt>(AStmt)) {
5514  Stmt *Init = For->getInit();
5515  if (auto *LCVarDeclStmt = dyn_cast<DeclStmt>(Init)) {
5516  // For statement declares loop variable.
5517  LIVDecl = cast<VarDecl>(LCVarDeclStmt->getSingleDecl());
5518  } else if (auto *LCAssign = dyn_cast<BinaryOperator>(Init)) {
5519  // For statement reuses variable.
5520  assert(LCAssign->getOpcode() == BO_Assign &&
5521  "init part must be a loop variable assignment");
5522  auto *CounterRef = cast<DeclRefExpr>(LCAssign->getLHS());
5523  LIVDecl = cast<VarDecl>(CounterRef->getDecl());
5524  } else
5525  llvm_unreachable("Cannot determine loop variable");
5526  LUVDecl = LIVDecl;
5527 
5528  Cond = For->getCond();
5529  Inc = For->getInc();
5530  } else if (auto *RangeFor = dyn_cast<CXXForRangeStmt>(AStmt)) {
5531  DeclStmt *BeginStmt = RangeFor->getBeginStmt();
5532  LIVDecl = cast<VarDecl>(BeginStmt->getSingleDecl());
5533  LUVDecl = RangeFor->getLoopVariable();
5534 
5535  Cond = RangeFor->getCond();
5536  Inc = RangeFor->getInc();
5537  } else
5538  llvm_unreachable("unhandled kind of loop");
5539 
5540  QualType CounterTy = LIVDecl->getType();
5541  QualType LVTy = LUVDecl->getType();
5542 
5543  // Analyze the loop condition.
5544  Expr *LHS, *RHS;
5545  BinaryOperator::Opcode CondRel;
5546  Cond = Cond->IgnoreImplicit();
5547  if (auto *CondBinExpr = dyn_cast<BinaryOperator>(Cond)) {
5548  LHS = CondBinExpr->getLHS();
5549  RHS = CondBinExpr->getRHS();
5550  CondRel = CondBinExpr->getOpcode();
5551  } else if (auto *CondCXXOp = dyn_cast<CXXOperatorCallExpr>(Cond)) {
5552  assert(CondCXXOp->getNumArgs() == 2 && "Comparison should have 2 operands");
5553  LHS = CondCXXOp->getArg(0);
5554  RHS = CondCXXOp->getArg(1);
5555  switch (CondCXXOp->getOperator()) {
5556  case OO_ExclaimEqual:
5557  CondRel = BO_NE;
5558  break;
5559  case OO_Less:
5560  CondRel = BO_LT;
5561  break;
5562  case OO_LessEqual:
5563  CondRel = BO_LE;
5564  break;
5565  case OO_Greater:
5566  CondRel = BO_GT;
5567  break;
5568  case OO_GreaterEqual:
5569  CondRel = BO_GE;
5570  break;
5571  default:
5572  llvm_unreachable("unexpected iterator operator");
5573  }
5574  } else
5575  llvm_unreachable("unexpected loop condition");
5576 
5577  // Normalize such that the loop counter is on the LHS.
5578  if (!isa<DeclRefExpr>(LHS->IgnoreImplicit()) ||
5579  cast<DeclRefExpr>(LHS->IgnoreImplicit())->getDecl() != LIVDecl) {
5580  std::swap(LHS, RHS);
5581  CondRel = BinaryOperator::reverseComparisonOp(CondRel);
5582  }
5583  auto *CounterRef = cast<DeclRefExpr>(LHS->IgnoreImplicit());
5584 
5585  // Decide the bit width for the logical iteration counter. By default use the
5586  // unsigned ptrdiff_t integer size (for iterators and pointers).
5587  // TODO: For iterators, use iterator::difference_type,
5588  // std::iterator_traits<>::difference_type or decltype(it - end).
5589  QualType LogicalTy = Ctx.getUnsignedPointerDiffType();
5590  if (CounterTy->isIntegerType()) {
5591  unsigned BitWidth = Ctx.getIntWidth(CounterTy);
5592  LogicalTy = Ctx.getIntTypeForBitwidth(BitWidth, false);
5593  }
5594 
5595  // Analyze the loop increment.
5596  Expr *Step;
5597  if (auto *IncUn = dyn_cast<UnaryOperator>(Inc)) {
5598  int Direction;
5599  switch (IncUn->getOpcode()) {
5600  case UO_PreInc:
5601  case UO_PostInc:
5602  Direction = 1;
5603  break;
5604  case UO_PreDec:
5605  case UO_PostDec:
5606  Direction = -1;
5607  break;
5608  default:
5609  llvm_unreachable("unhandled unary increment operator");
5610  }
5611  Step = IntegerLiteral::Create(
5612  Ctx, llvm::APInt(Ctx.getIntWidth(LogicalTy), Direction), LogicalTy, {});
5613  } else if (auto *IncBin = dyn_cast<BinaryOperator>(Inc)) {
5614  if (IncBin->getOpcode() == BO_AddAssign) {
5615  Step = IncBin->getRHS();
5616  } else if (IncBin->getOpcode() == BO_SubAssign) {
5617  Step =
5618  AssertSuccess(BuildUnaryOp(nullptr, {}, UO_Minus, IncBin->getRHS()));
5619  } else
5620  llvm_unreachable("unhandled binary increment operator");
5621  } else if (auto *CondCXXOp = dyn_cast<CXXOperatorCallExpr>(Inc)) {
5622  switch (CondCXXOp->getOperator()) {
5623  case OO_PlusPlus:
5624  Step = IntegerLiteral::Create(
5625  Ctx, llvm::APInt(Ctx.getIntWidth(LogicalTy), 1), LogicalTy, {});
5626  break;
5627  case OO_MinusMinus:
5628  Step = IntegerLiteral::Create(
5629  Ctx, llvm::APInt(Ctx.getIntWidth(LogicalTy), -1), LogicalTy, {});
5630  break;
5631  case OO_PlusEqual:
5632  Step = CondCXXOp->getArg(1);
5633  break;
5634  case OO_MinusEqual:
5635  Step = AssertSuccess(
5636  BuildUnaryOp(nullptr, {}, UO_Minus, CondCXXOp->getArg(1)));
5637  break;
5638  default:
5639  llvm_unreachable("unhandled overloaded increment operator");
5640  }
5641  } else
5642  llvm_unreachable("unknown increment expression");
5643 
5644  CapturedStmt *DistanceFunc =
5645  buildDistanceFunc(*this, LogicalTy, CondRel, LHS, RHS, Step);
5646  CapturedStmt *LoopVarFunc = buildLoopVarFunc(
5647  *this, LVTy, LogicalTy, CounterRef, Step, isa<CXXForRangeStmt>(AStmt));
5648  DeclRefExpr *LVRef = BuildDeclRefExpr(LUVDecl, LUVDecl->getType(), VK_LValue,