clang  9.0.0svn
SemaOpenMP.cpp
Go to the documentation of this file.
1 //===--- SemaOpenMP.cpp - Semantic Analysis for OpenMP constructs ---------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements semantic analysis for OpenMP directives and
10 /// clauses.
11 ///
12 //===----------------------------------------------------------------------===//
13 
14 #include "TreeTransform.h"
15 #include "clang/AST/ASTContext.h"
18 #include "clang/AST/Decl.h"
19 #include "clang/AST/DeclCXX.h"
20 #include "clang/AST/DeclOpenMP.h"
21 #include "clang/AST/StmtCXX.h"
22 #include "clang/AST/StmtOpenMP.h"
23 #include "clang/AST/StmtVisitor.h"
24 #include "clang/AST/TypeOrdering.h"
27 #include "clang/Sema/Lookup.h"
28 #include "clang/Sema/Scope.h"
29 #include "clang/Sema/ScopeInfo.h"
31 #include "llvm/ADT/PointerEmbeddedInt.h"
32 using namespace clang;
33 
34 //===----------------------------------------------------------------------===//
35 // Stack of data-sharing attributes for variables
36 //===----------------------------------------------------------------------===//
37 
39  Sema &SemaRef, Expr *E,
41  OpenMPClauseKind CKind, bool NoDiagnose);
42 
43 namespace {
44 /// Default data sharing attributes, which can be applied to directive.
46  DSA_unspecified = 0, /// Data sharing attribute not specified.
47  DSA_none = 1 << 0, /// Default data sharing attribute 'none'.
48  DSA_shared = 1 << 1, /// Default data sharing attribute 'shared'.
49 };
50 
51 /// Attributes of the defaultmap clause.
53  DMA_unspecified, /// Default mapping is not specified.
54  DMA_tofrom_scalar, /// Default mapping is 'tofrom:scalar'.
55 };
56 
57 /// Stack for tracking declarations used in OpenMP directives and
58 /// clauses and their data-sharing attributes.
59 class DSAStackTy {
60 public:
61  struct DSAVarData {
64  const Expr *RefExpr = nullptr;
65  DeclRefExpr *PrivateCopy = nullptr;
66  SourceLocation ImplicitDSALoc;
67  DSAVarData() = default;
68  DSAVarData(OpenMPDirectiveKind DKind, OpenMPClauseKind CKind,
69  const Expr *RefExpr, DeclRefExpr *PrivateCopy,
70  SourceLocation ImplicitDSALoc)
71  : DKind(DKind), CKind(CKind), RefExpr(RefExpr),
72  PrivateCopy(PrivateCopy), ImplicitDSALoc(ImplicitDSALoc) {}
73  };
74  using OperatorOffsetTy =
76  using DoacrossDependMapTy =
77  llvm::DenseMap<OMPDependClause *, OperatorOffsetTy>;
78 
79 private:
80  struct DSAInfo {
81  OpenMPClauseKind Attributes = OMPC_unknown;
82  /// Pointer to a reference expression and a flag which shows that the
83  /// variable is marked as lastprivate(true) or not (false).
84  llvm::PointerIntPair<const Expr *, 1, bool> RefExpr;
85  DeclRefExpr *PrivateCopy = nullptr;
86  };
87  using DeclSAMapTy = llvm::SmallDenseMap<const ValueDecl *, DSAInfo, 8>;
88  using AlignedMapTy = llvm::SmallDenseMap<const ValueDecl *, const Expr *, 8>;
89  using LCDeclInfo = std::pair<unsigned, VarDecl *>;
90  using LoopControlVariablesMapTy =
91  llvm::SmallDenseMap<const ValueDecl *, LCDeclInfo, 8>;
92  /// Struct that associates a component with the clause kind where they are
93  /// found.
94  struct MappedExprComponentTy {
97  };
98  using MappedExprComponentsTy =
99  llvm::DenseMap<const ValueDecl *, MappedExprComponentTy>;
100  using CriticalsWithHintsTy =
101  llvm::StringMap<std::pair<const OMPCriticalDirective *, llvm::APSInt>>;
102  struct ReductionData {
103  using BOKPtrType = llvm::PointerEmbeddedInt<BinaryOperatorKind, 16>;
104  SourceRange ReductionRange;
105  llvm::PointerUnion<const Expr *, BOKPtrType> ReductionOp;
106  ReductionData() = default;
107  void set(BinaryOperatorKind BO, SourceRange RR) {
108  ReductionRange = RR;
109  ReductionOp = BO;
110  }
111  void set(const Expr *RefExpr, SourceRange RR) {
112  ReductionRange = RR;
113  ReductionOp = RefExpr;
114  }
115  };
116  using DeclReductionMapTy =
117  llvm::SmallDenseMap<const ValueDecl *, ReductionData, 4>;
118 
119  struct SharingMapTy {
120  DeclSAMapTy SharingMap;
121  DeclReductionMapTy ReductionMap;
122  AlignedMapTy AlignedMap;
123  MappedExprComponentsTy MappedExprComponents;
124  LoopControlVariablesMapTy LCVMap;
125  DefaultDataSharingAttributes DefaultAttr = DSA_unspecified;
126  SourceLocation DefaultAttrLoc;
127  DefaultMapAttributes DefaultMapAttr = DMA_unspecified;
128  SourceLocation DefaultMapAttrLoc;
130  DeclarationNameInfo DirectiveName;
131  Scope *CurScope = nullptr;
132  SourceLocation ConstructLoc;
133  /// Set of 'depend' clauses with 'sink|source' dependence kind. Required to
134  /// get the data (loop counters etc.) about enclosing loop-based construct.
135  /// This data is required during codegen.
136  DoacrossDependMapTy DoacrossDepends;
137  /// First argument (Expr *) contains optional argument of the
138  /// 'ordered' clause, the second one is true if the regions has 'ordered'
139  /// clause, false otherwise.
141  unsigned AssociatedLoops = 1;
142  const Decl *PossiblyLoopCounter = nullptr;
143  bool NowaitRegion = false;
144  bool CancelRegion = false;
145  bool LoopStart = false;
146  bool BodyComplete = false;
147  SourceLocation InnerTeamsRegionLoc;
148  /// Reference to the taskgroup task_reduction reference expression.
149  Expr *TaskgroupReductionRef = nullptr;
150  llvm::DenseSet<QualType> MappedClassesQualTypes;
151  /// List of globals marked as declare target link in this target region
152  /// (isOpenMPTargetExecutionDirective(Directive) == true).
153  llvm::SmallVector<DeclRefExpr *, 4> DeclareTargetLinkVarDecls;
154  SharingMapTy(OpenMPDirectiveKind DKind, DeclarationNameInfo Name,
155  Scope *CurScope, SourceLocation Loc)
156  : Directive(DKind), DirectiveName(Name), CurScope(CurScope),
157  ConstructLoc(Loc) {}
158  SharingMapTy() = default;
159  };
160 
161  using StackTy = SmallVector<SharingMapTy, 4>;
162 
163  /// Stack of used declaration and their data-sharing attributes.
164  DeclSAMapTy Threadprivates;
165  const FunctionScopeInfo *CurrentNonCapturingFunctionScope = nullptr;
167  /// true, if check for DSA must be from parent directive, false, if
168  /// from current directive.
169  OpenMPClauseKind ClauseKindMode = OMPC_unknown;
170  Sema &SemaRef;
171  bool ForceCapturing = false;
172  /// true if all the vaiables in the target executable directives must be
173  /// captured by reference.
174  bool ForceCaptureByReferenceInTargetExecutable = false;
175  CriticalsWithHintsTy Criticals;
176  unsigned IgnoredStackElements = 0;
177 
178  /// Iterators over the stack iterate in order from innermost to outermost
179  /// directive.
180  using const_iterator = StackTy::const_reverse_iterator;
181  const_iterator begin() const {
182  return Stack.empty() ? const_iterator()
183  : Stack.back().first.rbegin() + IgnoredStackElements;
184  }
185  const_iterator end() const {
186  return Stack.empty() ? const_iterator() : Stack.back().first.rend();
187  }
188  using iterator = StackTy::reverse_iterator;
189  iterator begin() {
190  return Stack.empty() ? iterator()
191  : Stack.back().first.rbegin() + IgnoredStackElements;
192  }
193  iterator end() {
194  return Stack.empty() ? iterator() : Stack.back().first.rend();
195  }
196 
197  // Convenience operations to get at the elements of the stack.
198 
199  bool isStackEmpty() const {
200  return Stack.empty() ||
201  Stack.back().second != CurrentNonCapturingFunctionScope ||
202  Stack.back().first.size() <= IgnoredStackElements;
203  }
204  size_t getStackSize() const {
205  return isStackEmpty() ? 0
206  : Stack.back().first.size() - IgnoredStackElements;
207  }
208 
209  SharingMapTy *getTopOfStackOrNull() {
210  size_t Size = getStackSize();
211  if (Size == 0)
212  return nullptr;
213  return &Stack.back().first[Size - 1];
214  }
215  const SharingMapTy *getTopOfStackOrNull() const {
216  return const_cast<DSAStackTy&>(*this).getTopOfStackOrNull();
217  }
218  SharingMapTy &getTopOfStack() {
219  assert(!isStackEmpty() && "no current directive");
220  return *getTopOfStackOrNull();
221  }
222  const SharingMapTy &getTopOfStack() const {
223  return const_cast<DSAStackTy&>(*this).getTopOfStack();
224  }
225 
226  SharingMapTy *getSecondOnStackOrNull() {
227  size_t Size = getStackSize();
228  if (Size <= 1)
229  return nullptr;
230  return &Stack.back().first[Size - 2];
231  }
232  const SharingMapTy *getSecondOnStackOrNull() const {
233  return const_cast<DSAStackTy&>(*this).getSecondOnStackOrNull();
234  }
235 
236  /// Get the stack element at a certain level (previously returned by
237  /// \c getNestingLevel).
238  ///
239  /// Note that nesting levels count from outermost to innermost, and this is
240  /// the reverse of our iteration order where new inner levels are pushed at
241  /// the front of the stack.
242  SharingMapTy &getStackElemAtLevel(unsigned Level) {
243  assert(Level < getStackSize() && "no such stack element");
244  return Stack.back().first[Level];
245  }
246  const SharingMapTy &getStackElemAtLevel(unsigned Level) const {
247  return const_cast<DSAStackTy&>(*this).getStackElemAtLevel(Level);
248  }
249 
250  DSAVarData getDSA(const_iterator &Iter, ValueDecl *D) const;
251 
252  /// Checks if the variable is a local for OpenMP region.
253  bool isOpenMPLocal(VarDecl *D, const_iterator Iter) const;
254 
255  /// Vector of previously declared requires directives
257  /// omp_allocator_handle_t type.
258  QualType OMPAllocatorHandleT;
259  /// Expression for the predefined allocators.
260  Expr *OMPPredefinedAllocators[OMPAllocateDeclAttr::OMPUserDefinedMemAlloc] = {
261  nullptr};
262  /// Vector of previously encountered target directives
263  SmallVector<SourceLocation, 2> TargetLocations;
264 
265 public:
266  explicit DSAStackTy(Sema &S) : SemaRef(S) {}
267 
268  /// Sets omp_allocator_handle_t type.
269  void setOMPAllocatorHandleT(QualType Ty) { OMPAllocatorHandleT = Ty; }
270  /// Gets omp_allocator_handle_t type.
271  QualType getOMPAllocatorHandleT() const { return OMPAllocatorHandleT; }
272  /// Sets the given default allocator.
273  void setAllocator(OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind,
274  Expr *Allocator) {
275  OMPPredefinedAllocators[AllocatorKind] = Allocator;
276  }
277  /// Returns the specified default allocator.
278  Expr *getAllocator(OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind) const {
279  return OMPPredefinedAllocators[AllocatorKind];
280  }
281 
282  bool isClauseParsingMode() const { return ClauseKindMode != OMPC_unknown; }
283  OpenMPClauseKind getClauseParsingMode() const {
284  assert(isClauseParsingMode() && "Must be in clause parsing mode.");
285  return ClauseKindMode;
286  }
287  void setClauseParsingMode(OpenMPClauseKind K) { ClauseKindMode = K; }
288 
289  bool isBodyComplete() const {
290  const SharingMapTy *Top = getTopOfStackOrNull();
291  return Top && Top->BodyComplete;
292  }
293  void setBodyComplete() {
294  getTopOfStack().BodyComplete = true;
295  }
296 
297  bool isForceVarCapturing() const { return ForceCapturing; }
298  void setForceVarCapturing(bool V) { ForceCapturing = V; }
299 
300  void setForceCaptureByReferenceInTargetExecutable(bool V) {
301  ForceCaptureByReferenceInTargetExecutable = V;
302  }
303  bool isForceCaptureByReferenceInTargetExecutable() const {
304  return ForceCaptureByReferenceInTargetExecutable;
305  }
306 
307  void push(OpenMPDirectiveKind DKind, const DeclarationNameInfo &DirName,
308  Scope *CurScope, SourceLocation Loc) {
309  assert(!IgnoredStackElements &&
310  "cannot change stack while ignoring elements");
311  if (Stack.empty() ||
312  Stack.back().second != CurrentNonCapturingFunctionScope)
313  Stack.emplace_back(StackTy(), CurrentNonCapturingFunctionScope);
314  Stack.back().first.emplace_back(DKind, DirName, CurScope, Loc);
315  Stack.back().first.back().DefaultAttrLoc = Loc;
316  }
317 
318  void pop() {
319  assert(!IgnoredStackElements &&
320  "cannot change stack while ignoring elements");
321  assert(!Stack.back().first.empty() &&
322  "Data-sharing attributes stack is empty!");
323  Stack.back().first.pop_back();
324  }
325 
326  /// RAII object to temporarily leave the scope of a directive when we want to
327  /// logically operate in its parent.
328  class ParentDirectiveScope {
329  DSAStackTy &Self;
330  bool Active;
331  public:
332  ParentDirectiveScope(DSAStackTy &Self, bool Activate)
333  : Self(Self), Active(false) {
334  if (Activate)
335  enable();
336  }
337  ~ParentDirectiveScope() { disable(); }
338  void disable() {
339  if (Active) {
340  --Self.IgnoredStackElements;
341  Active = false;
342  }
343  }
344  void enable() {
345  if (!Active) {
346  ++Self.IgnoredStackElements;
347  Active = true;
348  }
349  }
350  };
351 
352  /// Marks that we're started loop parsing.
353  void loopInit() {
354  assert(isOpenMPLoopDirective(getCurrentDirective()) &&
355  "Expected loop-based directive.");
356  getTopOfStack().LoopStart = true;
357  }
358  /// Start capturing of the variables in the loop context.
359  void loopStart() {
360  assert(isOpenMPLoopDirective(getCurrentDirective()) &&
361  "Expected loop-based directive.");
362  getTopOfStack().LoopStart = false;
363  }
364  /// true, if variables are captured, false otherwise.
365  bool isLoopStarted() const {
366  assert(isOpenMPLoopDirective(getCurrentDirective()) &&
367  "Expected loop-based directive.");
368  return !getTopOfStack().LoopStart;
369  }
370  /// Marks (or clears) declaration as possibly loop counter.
371  void resetPossibleLoopCounter(const Decl *D = nullptr) {
372  getTopOfStack().PossiblyLoopCounter =
373  D ? D->getCanonicalDecl() : D;
374  }
375  /// Gets the possible loop counter decl.
376  const Decl *getPossiblyLoopCunter() const {
377  return getTopOfStack().PossiblyLoopCounter;
378  }
379  /// Start new OpenMP region stack in new non-capturing function.
380  void pushFunction() {
381  assert(!IgnoredStackElements &&
382  "cannot change stack while ignoring elements");
383  const FunctionScopeInfo *CurFnScope = SemaRef.getCurFunction();
384  assert(!isa<CapturingScopeInfo>(CurFnScope));
385  CurrentNonCapturingFunctionScope = CurFnScope;
386  }
387  /// Pop region stack for non-capturing function.
388  void popFunction(const FunctionScopeInfo *OldFSI) {
389  assert(!IgnoredStackElements &&
390  "cannot change stack while ignoring elements");
391  if (!Stack.empty() && Stack.back().second == OldFSI) {
392  assert(Stack.back().first.empty());
393  Stack.pop_back();
394  }
395  CurrentNonCapturingFunctionScope = nullptr;
396  for (const FunctionScopeInfo *FSI : llvm::reverse(SemaRef.FunctionScopes)) {
397  if (!isa<CapturingScopeInfo>(FSI)) {
398  CurrentNonCapturingFunctionScope = FSI;
399  break;
400  }
401  }
402  }
403 
404  void addCriticalWithHint(const OMPCriticalDirective *D, llvm::APSInt Hint) {
405  Criticals.try_emplace(D->getDirectiveName().getAsString(), D, Hint);
406  }
407  const std::pair<const OMPCriticalDirective *, llvm::APSInt>
408  getCriticalWithHint(const DeclarationNameInfo &Name) const {
409  auto I = Criticals.find(Name.getAsString());
410  if (I != Criticals.end())
411  return I->second;
412  return std::make_pair(nullptr, llvm::APSInt());
413  }
414  /// If 'aligned' declaration for given variable \a D was not seen yet,
415  /// add it and return NULL; otherwise return previous occurrence's expression
416  /// for diagnostics.
417  const Expr *addUniqueAligned(const ValueDecl *D, const Expr *NewDE);
418 
419  /// Register specified variable as loop control variable.
420  void addLoopControlVariable(const ValueDecl *D, VarDecl *Capture);
421  /// Check if the specified variable is a loop control variable for
422  /// current region.
423  /// \return The index of the loop control variable in the list of associated
424  /// for-loops (from outer to inner).
425  const LCDeclInfo isLoopControlVariable(const ValueDecl *D) const;
426  /// Check if the specified variable is a loop control variable for
427  /// parent region.
428  /// \return The index of the loop control variable in the list of associated
429  /// for-loops (from outer to inner).
430  const LCDeclInfo isParentLoopControlVariable(const ValueDecl *D) const;
431  /// Get the loop control variable for the I-th loop (or nullptr) in
432  /// parent directive.
433  const ValueDecl *getParentLoopControlVariable(unsigned I) const;
434 
435  /// Adds explicit data sharing attribute to the specified declaration.
436  void addDSA(const ValueDecl *D, const Expr *E, OpenMPClauseKind A,
437  DeclRefExpr *PrivateCopy = nullptr);
438 
439  /// Adds additional information for the reduction items with the reduction id
440  /// represented as an operator.
441  void addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
442  BinaryOperatorKind BOK);
443  /// Adds additional information for the reduction items with the reduction id
444  /// represented as reduction identifier.
445  void addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
446  const Expr *ReductionRef);
447  /// Returns the location and reduction operation from the innermost parent
448  /// region for the given \p D.
449  const DSAVarData
450  getTopMostTaskgroupReductionData(const ValueDecl *D, SourceRange &SR,
451  BinaryOperatorKind &BOK,
452  Expr *&TaskgroupDescriptor) const;
453  /// Returns the location and reduction operation from the innermost parent
454  /// region for the given \p D.
455  const DSAVarData
456  getTopMostTaskgroupReductionData(const ValueDecl *D, SourceRange &SR,
457  const Expr *&ReductionRef,
458  Expr *&TaskgroupDescriptor) const;
459  /// Return reduction reference expression for the current taskgroup.
460  Expr *getTaskgroupReductionRef() const {
461  assert(getTopOfStack().Directive == OMPD_taskgroup &&
462  "taskgroup reference expression requested for non taskgroup "
463  "directive.");
464  return getTopOfStack().TaskgroupReductionRef;
465  }
466  /// Checks if the given \p VD declaration is actually a taskgroup reduction
467  /// descriptor variable at the \p Level of OpenMP regions.
468  bool isTaskgroupReductionRef(const ValueDecl *VD, unsigned Level) const {
469  return getStackElemAtLevel(Level).TaskgroupReductionRef &&
470  cast<DeclRefExpr>(getStackElemAtLevel(Level).TaskgroupReductionRef)
471  ->getDecl() == VD;
472  }
473 
474  /// Returns data sharing attributes from top of the stack for the
475  /// specified declaration.
476  const DSAVarData getTopDSA(ValueDecl *D, bool FromParent);
477  /// Returns data-sharing attributes for the specified declaration.
478  const DSAVarData getImplicitDSA(ValueDecl *D, bool FromParent) const;
479  /// Checks if the specified variables has data-sharing attributes which
480  /// match specified \a CPred predicate in any directive which matches \a DPred
481  /// predicate.
482  const DSAVarData
483  hasDSA(ValueDecl *D, const llvm::function_ref<bool(OpenMPClauseKind)> CPred,
484  const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
485  bool FromParent) const;
486  /// Checks if the specified variables has data-sharing attributes which
487  /// match specified \a CPred predicate in any innermost directive which
488  /// matches \a DPred predicate.
489  const DSAVarData
490  hasInnermostDSA(ValueDecl *D,
491  const llvm::function_ref<bool(OpenMPClauseKind)> CPred,
492  const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
493  bool FromParent) const;
494  /// Checks if the specified variables has explicit data-sharing
495  /// attributes which match specified \a CPred predicate at the specified
496  /// OpenMP region.
497  bool hasExplicitDSA(const ValueDecl *D,
498  const llvm::function_ref<bool(OpenMPClauseKind)> CPred,
499  unsigned Level, bool NotLastprivate = false) const;
500 
501  /// Returns true if the directive at level \Level matches in the
502  /// specified \a DPred predicate.
503  bool hasExplicitDirective(
504  const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
505  unsigned Level) const;
506 
507  /// Finds a directive which matches specified \a DPred predicate.
508  bool hasDirective(
509  const llvm::function_ref<bool(
511  DPred,
512  bool FromParent) const;
513 
514  /// Returns currently analyzed directive.
515  OpenMPDirectiveKind getCurrentDirective() const {
516  const SharingMapTy *Top = getTopOfStackOrNull();
517  return Top ? Top->Directive : OMPD_unknown;
518  }
519  /// Returns directive kind at specified level.
520  OpenMPDirectiveKind getDirective(unsigned Level) const {
521  assert(!isStackEmpty() && "No directive at specified level.");
522  return getStackElemAtLevel(Level).Directive;
523  }
524  /// Returns parent directive.
525  OpenMPDirectiveKind getParentDirective() const {
526  const SharingMapTy *Parent = getSecondOnStackOrNull();
527  return Parent ? Parent->Directive : OMPD_unknown;
528  }
529 
530  /// Add requires decl to internal vector
531  void addRequiresDecl(OMPRequiresDecl *RD) {
532  RequiresDecls.push_back(RD);
533  }
534 
535  /// Checks if the defined 'requires' directive has specified type of clause.
536  template <typename ClauseType>
537  bool hasRequiresDeclWithClause() {
538  return llvm::any_of(RequiresDecls, [](const OMPRequiresDecl *D) {
539  return llvm::any_of(D->clauselists(), [](const OMPClause *C) {
540  return isa<ClauseType>(C);
541  });
542  });
543  }
544 
545  /// Checks for a duplicate clause amongst previously declared requires
546  /// directives
547  bool hasDuplicateRequiresClause(ArrayRef<OMPClause *> ClauseList) const {
548  bool IsDuplicate = false;
549  for (OMPClause *CNew : ClauseList) {
550  for (const OMPRequiresDecl *D : RequiresDecls) {
551  for (const OMPClause *CPrev : D->clauselists()) {
552  if (CNew->getClauseKind() == CPrev->getClauseKind()) {
553  SemaRef.Diag(CNew->getBeginLoc(),
554  diag::err_omp_requires_clause_redeclaration)
555  << getOpenMPClauseName(CNew->getClauseKind());
556  SemaRef.Diag(CPrev->getBeginLoc(),
557  diag::note_omp_requires_previous_clause)
558  << getOpenMPClauseName(CPrev->getClauseKind());
559  IsDuplicate = true;
560  }
561  }
562  }
563  }
564  return IsDuplicate;
565  }
566 
567  /// Add location of previously encountered target to internal vector
568  void addTargetDirLocation(SourceLocation LocStart) {
569  TargetLocations.push_back(LocStart);
570  }
571 
572  // Return previously encountered target region locations.
573  ArrayRef<SourceLocation> getEncounteredTargetLocs() const {
574  return TargetLocations;
575  }
576 
577  /// Set default data sharing attribute to none.
578  void setDefaultDSANone(SourceLocation Loc) {
579  getTopOfStack().DefaultAttr = DSA_none;
580  getTopOfStack().DefaultAttrLoc = Loc;
581  }
582  /// Set default data sharing attribute to shared.
583  void setDefaultDSAShared(SourceLocation Loc) {
584  getTopOfStack().DefaultAttr = DSA_shared;
585  getTopOfStack().DefaultAttrLoc = Loc;
586  }
587  /// Set default data mapping attribute to 'tofrom:scalar'.
588  void setDefaultDMAToFromScalar(SourceLocation Loc) {
589  getTopOfStack().DefaultMapAttr = DMA_tofrom_scalar;
590  getTopOfStack().DefaultMapAttrLoc = Loc;
591  }
592 
593  DefaultDataSharingAttributes getDefaultDSA() const {
594  return isStackEmpty() ? DSA_unspecified
595  : getTopOfStack().DefaultAttr;
596  }
597  SourceLocation getDefaultDSALocation() const {
598  return isStackEmpty() ? SourceLocation()
599  : getTopOfStack().DefaultAttrLoc;
600  }
601  DefaultMapAttributes getDefaultDMA() const {
602  return isStackEmpty() ? DMA_unspecified
603  : getTopOfStack().DefaultMapAttr;
604  }
605  DefaultMapAttributes getDefaultDMAAtLevel(unsigned Level) const {
606  return getStackElemAtLevel(Level).DefaultMapAttr;
607  }
608  SourceLocation getDefaultDMALocation() const {
609  return isStackEmpty() ? SourceLocation()
610  : getTopOfStack().DefaultMapAttrLoc;
611  }
612 
613  /// Checks if the specified variable is a threadprivate.
614  bool isThreadPrivate(VarDecl *D) {
615  const DSAVarData DVar = getTopDSA(D, false);
616  return isOpenMPThreadPrivate(DVar.CKind);
617  }
618 
619  /// Marks current region as ordered (it has an 'ordered' clause).
620  void setOrderedRegion(bool IsOrdered, const Expr *Param,
621  OMPOrderedClause *Clause) {
622  if (IsOrdered)
623  getTopOfStack().OrderedRegion.emplace(Param, Clause);
624  else
625  getTopOfStack().OrderedRegion.reset();
626  }
627  /// Returns true, if region is ordered (has associated 'ordered' clause),
628  /// false - otherwise.
629  bool isOrderedRegion() const {
630  if (const SharingMapTy *Top = getTopOfStackOrNull())
631  return Top->OrderedRegion.hasValue();
632  return false;
633  }
634  /// Returns optional parameter for the ordered region.
635  std::pair<const Expr *, OMPOrderedClause *> getOrderedRegionParam() const {
636  if (const SharingMapTy *Top = getTopOfStackOrNull())
637  if (Top->OrderedRegion.hasValue())
638  return Top->OrderedRegion.getValue();
639  return std::make_pair(nullptr, nullptr);
640  }
641  /// Returns true, if parent region is ordered (has associated
642  /// 'ordered' clause), false - otherwise.
643  bool isParentOrderedRegion() const {
644  if (const SharingMapTy *Parent = getSecondOnStackOrNull())
645  return Parent->OrderedRegion.hasValue();
646  return false;
647  }
648  /// Returns optional parameter for the ordered region.
649  std::pair<const Expr *, OMPOrderedClause *>
650  getParentOrderedRegionParam() const {
651  if (const SharingMapTy *Parent = getSecondOnStackOrNull())
652  if (Parent->OrderedRegion.hasValue())
653  return Parent->OrderedRegion.getValue();
654  return std::make_pair(nullptr, nullptr);
655  }
656  /// Marks current region as nowait (it has a 'nowait' clause).
657  void setNowaitRegion(bool IsNowait = true) {
658  getTopOfStack().NowaitRegion = IsNowait;
659  }
660  /// Returns true, if parent region is nowait (has associated
661  /// 'nowait' clause), false - otherwise.
662  bool isParentNowaitRegion() const {
663  if (const SharingMapTy *Parent = getSecondOnStackOrNull())
664  return Parent->NowaitRegion;
665  return false;
666  }
667  /// Marks parent region as cancel region.
668  void setParentCancelRegion(bool Cancel = true) {
669  if (SharingMapTy *Parent = getSecondOnStackOrNull())
670  Parent->CancelRegion |= Cancel;
671  }
672  /// Return true if current region has inner cancel construct.
673  bool isCancelRegion() const {
674  const SharingMapTy *Top = getTopOfStackOrNull();
675  return Top ? Top->CancelRegion : false;
676  }
677 
678  /// Set collapse value for the region.
679  void setAssociatedLoops(unsigned Val) {
680  getTopOfStack().AssociatedLoops = Val;
681  }
682  /// Return collapse value for region.
683  unsigned getAssociatedLoops() const {
684  const SharingMapTy *Top = getTopOfStackOrNull();
685  return Top ? Top->AssociatedLoops : 0;
686  }
687 
688  /// Marks current target region as one with closely nested teams
689  /// region.
690  void setParentTeamsRegionLoc(SourceLocation TeamsRegionLoc) {
691  if (SharingMapTy *Parent = getSecondOnStackOrNull())
692  Parent->InnerTeamsRegionLoc = TeamsRegionLoc;
693  }
694  /// Returns true, if current region has closely nested teams region.
695  bool hasInnerTeamsRegion() const {
696  return getInnerTeamsRegionLoc().isValid();
697  }
698  /// Returns location of the nested teams region (if any).
699  SourceLocation getInnerTeamsRegionLoc() const {
700  const SharingMapTy *Top = getTopOfStackOrNull();
701  return Top ? Top->InnerTeamsRegionLoc : SourceLocation();
702  }
703 
704  Scope *getCurScope() const {
705  const SharingMapTy *Top = getTopOfStackOrNull();
706  return Top ? Top->CurScope : nullptr;
707  }
708  SourceLocation getConstructLoc() const {
709  const SharingMapTy *Top = getTopOfStackOrNull();
710  return Top ? Top->ConstructLoc : SourceLocation();
711  }
712 
713  /// Do the check specified in \a Check to all component lists and return true
714  /// if any issue is found.
715  bool checkMappableExprComponentListsForDecl(
716  const ValueDecl *VD, bool CurrentRegionOnly,
717  const llvm::function_ref<
720  Check) const {
721  if (isStackEmpty())
722  return false;
723  auto SI = begin();
724  auto SE = end();
725 
726  if (SI == SE)
727  return false;
728 
729  if (CurrentRegionOnly)
730  SE = std::next(SI);
731  else
732  std::advance(SI, 1);
733 
734  for (; SI != SE; ++SI) {
735  auto MI = SI->MappedExprComponents.find(VD);
736  if (MI != SI->MappedExprComponents.end())
738  MI->second.Components)
739  if (Check(L, MI->second.Kind))
740  return true;
741  }
742  return false;
743  }
744 
745  /// Do the check specified in \a Check to all component lists at a given level
746  /// and return true if any issue is found.
747  bool checkMappableExprComponentListsForDeclAtLevel(
748  const ValueDecl *VD, unsigned Level,
749  const llvm::function_ref<
752  Check) const {
753  if (getStackSize() <= Level)
754  return false;
755 
756  const SharingMapTy &StackElem = getStackElemAtLevel(Level);
757  auto MI = StackElem.MappedExprComponents.find(VD);
758  if (MI != StackElem.MappedExprComponents.end())
760  MI->second.Components)
761  if (Check(L, MI->second.Kind))
762  return true;
763  return false;
764  }
765 
766  /// Create a new mappable expression component list associated with a given
767  /// declaration and initialize it with the provided list of components.
768  void addMappableExpressionComponents(
769  const ValueDecl *VD,
771  OpenMPClauseKind WhereFoundClauseKind) {
772  MappedExprComponentTy &MEC = getTopOfStack().MappedExprComponents[VD];
773  // Create new entry and append the new components there.
774  MEC.Components.resize(MEC.Components.size() + 1);
775  MEC.Components.back().append(Components.begin(), Components.end());
776  MEC.Kind = WhereFoundClauseKind;
777  }
778 
779  unsigned getNestingLevel() const {
780  assert(!isStackEmpty());
781  return getStackSize() - 1;
782  }
783  void addDoacrossDependClause(OMPDependClause *C,
784  const OperatorOffsetTy &OpsOffs) {
785  SharingMapTy *Parent = getSecondOnStackOrNull();
786  assert(Parent && isOpenMPWorksharingDirective(Parent->Directive));
787  Parent->DoacrossDepends.try_emplace(C, OpsOffs);
788  }
789  llvm::iterator_range<DoacrossDependMapTy::const_iterator>
790  getDoacrossDependClauses() const {
791  const SharingMapTy &StackElem = getTopOfStack();
792  if (isOpenMPWorksharingDirective(StackElem.Directive)) {
793  const DoacrossDependMapTy &Ref = StackElem.DoacrossDepends;
794  return llvm::make_range(Ref.begin(), Ref.end());
795  }
796  return llvm::make_range(StackElem.DoacrossDepends.end(),
797  StackElem.DoacrossDepends.end());
798  }
799 
800  // Store types of classes which have been explicitly mapped
801  void addMappedClassesQualTypes(QualType QT) {
802  SharingMapTy &StackElem = getTopOfStack();
803  StackElem.MappedClassesQualTypes.insert(QT);
804  }
805 
806  // Return set of mapped classes types
807  bool isClassPreviouslyMapped(QualType QT) const {
808  const SharingMapTy &StackElem = getTopOfStack();
809  return StackElem.MappedClassesQualTypes.count(QT) != 0;
810  }
811 
812  /// Adds global declare target to the parent target region.
813  void addToParentTargetRegionLinkGlobals(DeclRefExpr *E) {
814  assert(*OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(
815  E->getDecl()) == OMPDeclareTargetDeclAttr::MT_Link &&
816  "Expected declare target link global.");
817  for (auto &Elem : *this) {
818  if (isOpenMPTargetExecutionDirective(Elem.Directive)) {
819  Elem.DeclareTargetLinkVarDecls.push_back(E);
820  return;
821  }
822  }
823  }
824 
825  /// Returns the list of globals with declare target link if current directive
826  /// is target.
827  ArrayRef<DeclRefExpr *> getLinkGlobals() const {
828  assert(isOpenMPTargetExecutionDirective(getCurrentDirective()) &&
829  "Expected target executable directive.");
830  return getTopOfStack().DeclareTargetLinkVarDecls;
831  }
832 };
833 
834 bool isImplicitTaskingRegion(OpenMPDirectiveKind DKind) {
835  return isOpenMPParallelDirective(DKind) || isOpenMPTeamsDirective(DKind);
836 }
837 
838 bool isImplicitOrExplicitTaskingRegion(OpenMPDirectiveKind DKind) {
839  return isImplicitTaskingRegion(DKind) || isOpenMPTaskingDirective(DKind) ||
840  DKind == OMPD_unknown;
841 }
842 
843 } // namespace
844 
845 static const Expr *getExprAsWritten(const Expr *E) {
846  if (const auto *FE = dyn_cast<FullExpr>(E))
847  E = FE->getSubExpr();
848 
849  if (const auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E))
850  E = MTE->GetTemporaryExpr();
851 
852  while (const auto *Binder = dyn_cast<CXXBindTemporaryExpr>(E))
853  E = Binder->getSubExpr();
854 
855  if (const auto *ICE = dyn_cast<ImplicitCastExpr>(E))
856  E = ICE->getSubExprAsWritten();
857  return E->IgnoreParens();
858 }
859 
861  return const_cast<Expr *>(getExprAsWritten(const_cast<const Expr *>(E)));
862 }
863 
864 static const ValueDecl *getCanonicalDecl(const ValueDecl *D) {
865  if (const auto *CED = dyn_cast<OMPCapturedExprDecl>(D))
866  if (const auto *ME = dyn_cast<MemberExpr>(getExprAsWritten(CED->getInit())))
867  D = ME->getMemberDecl();
868  const auto *VD = dyn_cast<VarDecl>(D);
869  const auto *FD = dyn_cast<FieldDecl>(D);
870  if (VD != nullptr) {
871  VD = VD->getCanonicalDecl();
872  D = VD;
873  } else {
874  assert(FD);
875  FD = FD->getCanonicalDecl();
876  D = FD;
877  }
878  return D;
879 }
880 
882  return const_cast<ValueDecl *>(
883  getCanonicalDecl(const_cast<const ValueDecl *>(D)));
884 }
885 
886 DSAStackTy::DSAVarData DSAStackTy::getDSA(const_iterator &Iter,
887  ValueDecl *D) const {
888  D = getCanonicalDecl(D);
889  auto *VD = dyn_cast<VarDecl>(D);
890  const auto *FD = dyn_cast<FieldDecl>(D);
891  DSAVarData DVar;
892  if (Iter == end()) {
893  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
894  // in a region but not in construct]
895  // File-scope or namespace-scope variables referenced in called routines
896  // in the region are shared unless they appear in a threadprivate
897  // directive.
898  if (VD && !VD->isFunctionOrMethodVarDecl() && !isa<ParmVarDecl>(VD))
899  DVar.CKind = OMPC_shared;
900 
901  // OpenMP [2.9.1.2, Data-sharing Attribute Rules for Variables Referenced
902  // in a region but not in construct]
903  // Variables with static storage duration that are declared in called
904  // routines in the region are shared.
905  if (VD && VD->hasGlobalStorage())
906  DVar.CKind = OMPC_shared;
907 
908  // Non-static data members are shared by default.
909  if (FD)
910  DVar.CKind = OMPC_shared;
911 
912  return DVar;
913  }
914 
915  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
916  // in a Construct, C/C++, predetermined, p.1]
917  // Variables with automatic storage duration that are declared in a scope
918  // inside the construct are private.
919  if (VD && isOpenMPLocal(VD, Iter) && VD->isLocalVarDecl() &&
920  (VD->getStorageClass() == SC_Auto || VD->getStorageClass() == SC_None)) {
921  DVar.CKind = OMPC_private;
922  return DVar;
923  }
924 
925  DVar.DKind = Iter->Directive;
926  // Explicitly specified attributes and local variables with predetermined
927  // attributes.
928  if (Iter->SharingMap.count(D)) {
929  const DSAInfo &Data = Iter->SharingMap.lookup(D);
930  DVar.RefExpr = Data.RefExpr.getPointer();
931  DVar.PrivateCopy = Data.PrivateCopy;
932  DVar.CKind = Data.Attributes;
933  DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
934  return DVar;
935  }
936 
937  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
938  // in a Construct, C/C++, implicitly determined, p.1]
939  // In a parallel or task construct, the data-sharing attributes of these
940  // variables are determined by the default clause, if present.
941  switch (Iter->DefaultAttr) {
942  case DSA_shared:
943  DVar.CKind = OMPC_shared;
944  DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
945  return DVar;
946  case DSA_none:
947  return DVar;
948  case DSA_unspecified:
949  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
950  // in a Construct, implicitly determined, p.2]
951  // In a parallel construct, if no default clause is present, these
952  // variables are shared.
953  DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
954  if (isOpenMPParallelDirective(DVar.DKind) ||
955  isOpenMPTeamsDirective(DVar.DKind)) {
956  DVar.CKind = OMPC_shared;
957  return DVar;
958  }
959 
960  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
961  // in a Construct, implicitly determined, p.4]
962  // In a task construct, if no default clause is present, a variable that in
963  // the enclosing context is determined to be shared by all implicit tasks
964  // bound to the current team is shared.
965  if (isOpenMPTaskingDirective(DVar.DKind)) {
966  DSAVarData DVarTemp;
967  const_iterator I = Iter, E = end();
968  do {
969  ++I;
970  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables
971  // Referenced in a Construct, implicitly determined, p.6]
972  // In a task construct, if no default clause is present, a variable
973  // whose data-sharing attribute is not determined by the rules above is
974  // firstprivate.
975  DVarTemp = getDSA(I, D);
976  if (DVarTemp.CKind != OMPC_shared) {
977  DVar.RefExpr = nullptr;
978  DVar.CKind = OMPC_firstprivate;
979  return DVar;
980  }
981  } while (I != E && !isImplicitTaskingRegion(I->Directive));
982  DVar.CKind =
983  (DVarTemp.CKind == OMPC_unknown) ? OMPC_firstprivate : OMPC_shared;
984  return DVar;
985  }
986  }
987  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
988  // in a Construct, implicitly determined, p.3]
989  // For constructs other than task, if no default clause is present, these
990  // variables inherit their data-sharing attributes from the enclosing
991  // context.
992  return getDSA(++Iter, D);
993 }
994 
995 const Expr *DSAStackTy::addUniqueAligned(const ValueDecl *D,
996  const Expr *NewDE) {
997  assert(!isStackEmpty() && "Data sharing attributes stack is empty");
998  D = getCanonicalDecl(D);
999  SharingMapTy &StackElem = getTopOfStack();
1000  auto It = StackElem.AlignedMap.find(D);
1001  if (It == StackElem.AlignedMap.end()) {
1002  assert(NewDE && "Unexpected nullptr expr to be added into aligned map");
1003  StackElem.AlignedMap[D] = NewDE;
1004  return nullptr;
1005  }
1006  assert(It->second && "Unexpected nullptr expr in the aligned map");
1007  return It->second;
1008 }
1009 
1010 void DSAStackTy::addLoopControlVariable(const ValueDecl *D, VarDecl *Capture) {
1011  assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
1012  D = getCanonicalDecl(D);
1013  SharingMapTy &StackElem = getTopOfStack();
1014  StackElem.LCVMap.try_emplace(
1015  D, LCDeclInfo(StackElem.LCVMap.size() + 1, Capture));
1016 }
1017 
1018 const DSAStackTy::LCDeclInfo
1019 DSAStackTy::isLoopControlVariable(const ValueDecl *D) const {
1020  assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
1021  D = getCanonicalDecl(D);
1022  const SharingMapTy &StackElem = getTopOfStack();
1023  auto It = StackElem.LCVMap.find(D);
1024  if (It != StackElem.LCVMap.end())
1025  return It->second;
1026  return {0, nullptr};
1027 }
1028 
1029 const DSAStackTy::LCDeclInfo
1030 DSAStackTy::isParentLoopControlVariable(const ValueDecl *D) const {
1031  const SharingMapTy *Parent = getSecondOnStackOrNull();
1032  assert(Parent && "Data-sharing attributes stack is empty");
1033  D = getCanonicalDecl(D);
1034  auto It = Parent->LCVMap.find(D);
1035  if (It != Parent->LCVMap.end())
1036  return It->second;
1037  return {0, nullptr};
1038 }
1039 
1040 const ValueDecl *DSAStackTy::getParentLoopControlVariable(unsigned I) const {
1041  const SharingMapTy *Parent = getSecondOnStackOrNull();
1042  assert(Parent && "Data-sharing attributes stack is empty");
1043  if (Parent->LCVMap.size() < I)
1044  return nullptr;
1045  for (const auto &Pair : Parent->LCVMap)
1046  if (Pair.second.first == I)
1047  return Pair.first;
1048  return nullptr;
1049 }
1050 
1051 void DSAStackTy::addDSA(const ValueDecl *D, const Expr *E, OpenMPClauseKind A,
1052  DeclRefExpr *PrivateCopy) {
1053  D = getCanonicalDecl(D);
1054  if (A == OMPC_threadprivate) {
1055  DSAInfo &Data = Threadprivates[D];
1056  Data.Attributes = A;
1057  Data.RefExpr.setPointer(E);
1058  Data.PrivateCopy = nullptr;
1059  } else {
1060  DSAInfo &Data = getTopOfStack().SharingMap[D];
1061  assert(Data.Attributes == OMPC_unknown || (A == Data.Attributes) ||
1062  (A == OMPC_firstprivate && Data.Attributes == OMPC_lastprivate) ||
1063  (A == OMPC_lastprivate && Data.Attributes == OMPC_firstprivate) ||
1064  (isLoopControlVariable(D).first && A == OMPC_private));
1065  if (A == OMPC_lastprivate && Data.Attributes == OMPC_firstprivate) {
1066  Data.RefExpr.setInt(/*IntVal=*/true);
1067  return;
1068  }
1069  const bool IsLastprivate =
1070  A == OMPC_lastprivate || Data.Attributes == OMPC_lastprivate;
1071  Data.Attributes = A;
1072  Data.RefExpr.setPointerAndInt(E, IsLastprivate);
1073  Data.PrivateCopy = PrivateCopy;
1074  if (PrivateCopy) {
1075  DSAInfo &Data = getTopOfStack().SharingMap[PrivateCopy->getDecl()];
1076  Data.Attributes = A;
1077  Data.RefExpr.setPointerAndInt(PrivateCopy, IsLastprivate);
1078  Data.PrivateCopy = nullptr;
1079  }
1080  }
1081 }
1082 
1083 /// Build a variable declaration for OpenMP loop iteration variable.
1085  StringRef Name, const AttrVec *Attrs = nullptr,
1086  DeclRefExpr *OrigRef = nullptr) {
1087  DeclContext *DC = SemaRef.CurContext;
1088  IdentifierInfo *II = &SemaRef.PP.getIdentifierTable().get(Name);
1089  TypeSourceInfo *TInfo = SemaRef.Context.getTrivialTypeSourceInfo(Type, Loc);
1090  auto *Decl =
1091  VarDecl::Create(SemaRef.Context, DC, Loc, Loc, II, Type, TInfo, SC_None);
1092  if (Attrs) {
1093  for (specific_attr_iterator<AlignedAttr> I(Attrs->begin()), E(Attrs->end());
1094  I != E; ++I)
1095  Decl->addAttr(*I);
1096  }
1097  Decl->setImplicit();
1098  if (OrigRef) {
1099  Decl->addAttr(
1100  OMPReferencedVarAttr::CreateImplicit(SemaRef.Context, OrigRef));
1101  }
1102  return Decl;
1103 }
1104 
1106  SourceLocation Loc,
1107  bool RefersToCapture = false) {
1108  D->setReferenced();
1109  D->markUsed(S.Context);
1111  SourceLocation(), D, RefersToCapture, Loc, Ty,
1112  VK_LValue);
1113 }
1114 
1115 void DSAStackTy::addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
1116  BinaryOperatorKind BOK) {
1117  D = getCanonicalDecl(D);
1118  assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
1119  assert(
1120  getTopOfStack().SharingMap[D].Attributes == OMPC_reduction &&
1121  "Additional reduction info may be specified only for reduction items.");
1122  ReductionData &ReductionData = getTopOfStack().ReductionMap[D];
1123  assert(ReductionData.ReductionRange.isInvalid() &&
1124  getTopOfStack().Directive == OMPD_taskgroup &&
1125  "Additional reduction info may be specified only once for reduction "
1126  "items.");
1127  ReductionData.set(BOK, SR);
1128  Expr *&TaskgroupReductionRef =
1129  getTopOfStack().TaskgroupReductionRef;
1130  if (!TaskgroupReductionRef) {
1131  VarDecl *VD = buildVarDecl(SemaRef, SR.getBegin(),
1132  SemaRef.Context.VoidPtrTy, ".task_red.");
1133  TaskgroupReductionRef =
1134  buildDeclRefExpr(SemaRef, VD, SemaRef.Context.VoidPtrTy, SR.getBegin());
1135  }
1136 }
1137 
1138 void DSAStackTy::addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
1139  const Expr *ReductionRef) {
1140  D = getCanonicalDecl(D);
1141  assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
1142  assert(
1143  getTopOfStack().SharingMap[D].Attributes == OMPC_reduction &&
1144  "Additional reduction info may be specified only for reduction items.");
1145  ReductionData &ReductionData = getTopOfStack().ReductionMap[D];
1146  assert(ReductionData.ReductionRange.isInvalid() &&
1147  getTopOfStack().Directive == OMPD_taskgroup &&
1148  "Additional reduction info may be specified only once for reduction "
1149  "items.");
1150  ReductionData.set(ReductionRef, SR);
1151  Expr *&TaskgroupReductionRef =
1152  getTopOfStack().TaskgroupReductionRef;
1153  if (!TaskgroupReductionRef) {
1154  VarDecl *VD = buildVarDecl(SemaRef, SR.getBegin(),
1155  SemaRef.Context.VoidPtrTy, ".task_red.");
1156  TaskgroupReductionRef =
1157  buildDeclRefExpr(SemaRef, VD, SemaRef.Context.VoidPtrTy, SR.getBegin());
1158  }
1159 }
1160 
1161 const DSAStackTy::DSAVarData DSAStackTy::getTopMostTaskgroupReductionData(
1162  const ValueDecl *D, SourceRange &SR, BinaryOperatorKind &BOK,
1163  Expr *&TaskgroupDescriptor) const {
1164  D = getCanonicalDecl(D);
1165  assert(!isStackEmpty() && "Data-sharing attributes stack is empty.");
1166  for (const_iterator I = begin() + 1, E = end(); I != E; ++I) {
1167  const DSAInfo &Data = I->SharingMap.lookup(D);
1168  if (Data.Attributes != OMPC_reduction || I->Directive != OMPD_taskgroup)
1169  continue;
1170  const ReductionData &ReductionData = I->ReductionMap.lookup(D);
1171  if (!ReductionData.ReductionOp ||
1172  ReductionData.ReductionOp.is<const Expr *>())
1173  return DSAVarData();
1174  SR = ReductionData.ReductionRange;
1175  BOK = ReductionData.ReductionOp.get<ReductionData::BOKPtrType>();
1176  assert(I->TaskgroupReductionRef && "taskgroup reduction reference "
1177  "expression for the descriptor is not "
1178  "set.");
1179  TaskgroupDescriptor = I->TaskgroupReductionRef;
1180  return DSAVarData(OMPD_taskgroup, OMPC_reduction, Data.RefExpr.getPointer(),
1181  Data.PrivateCopy, I->DefaultAttrLoc);
1182  }
1183  return DSAVarData();
1184 }
1185 
1186 const DSAStackTy::DSAVarData DSAStackTy::getTopMostTaskgroupReductionData(
1187  const ValueDecl *D, SourceRange &SR, const Expr *&ReductionRef,
1188  Expr *&TaskgroupDescriptor) const {
1189  D = getCanonicalDecl(D);
1190  assert(!isStackEmpty() && "Data-sharing attributes stack is empty.");
1191  for (const_iterator I = begin() + 1, E = end(); I != E; ++I) {
1192  const DSAInfo &Data = I->SharingMap.lookup(D);
1193  if (Data.Attributes != OMPC_reduction || I->Directive != OMPD_taskgroup)
1194  continue;
1195  const ReductionData &ReductionData = I->ReductionMap.lookup(D);
1196  if (!ReductionData.ReductionOp ||
1197  !ReductionData.ReductionOp.is<const Expr *>())
1198  return DSAVarData();
1199  SR = ReductionData.ReductionRange;
1200  ReductionRef = ReductionData.ReductionOp.get<const Expr *>();
1201  assert(I->TaskgroupReductionRef && "taskgroup reduction reference "
1202  "expression for the descriptor is not "
1203  "set.");
1204  TaskgroupDescriptor = I->TaskgroupReductionRef;
1205  return DSAVarData(OMPD_taskgroup, OMPC_reduction, Data.RefExpr.getPointer(),
1206  Data.PrivateCopy, I->DefaultAttrLoc);
1207  }
1208  return DSAVarData();
1209 }
1210 
1211 bool DSAStackTy::isOpenMPLocal(VarDecl *D, const_iterator I) const {
1212  D = D->getCanonicalDecl();
1213  for (const_iterator E = end(); I != E; ++I) {
1214  if (isImplicitOrExplicitTaskingRegion(I->Directive) ||
1215  isOpenMPTargetExecutionDirective(I->Directive)) {
1216  Scope *TopScope = I->CurScope ? I->CurScope->getParent() : nullptr;
1217  Scope *CurScope = getCurScope();
1218  while (CurScope && CurScope != TopScope && !CurScope->isDeclScope(D))
1219  CurScope = CurScope->getParent();
1220  return CurScope != TopScope;
1221  }
1222  }
1223  return false;
1224 }
1225 
1226 static bool isConstNotMutableType(Sema &SemaRef, QualType Type,
1227  bool AcceptIfMutable = true,
1228  bool *IsClassType = nullptr) {
1229  ASTContext &Context = SemaRef.getASTContext();
1230  Type = Type.getNonReferenceType().getCanonicalType();
1231  bool IsConstant = Type.isConstant(Context);
1232  Type = Context.getBaseElementType(Type);
1233  const CXXRecordDecl *RD = AcceptIfMutable && SemaRef.getLangOpts().CPlusPlus
1234  ? Type->getAsCXXRecordDecl()
1235  : nullptr;
1236  if (const auto *CTSD = dyn_cast_or_null<ClassTemplateSpecializationDecl>(RD))
1237  if (const ClassTemplateDecl *CTD = CTSD->getSpecializedTemplate())
1238  RD = CTD->getTemplatedDecl();
1239  if (IsClassType)
1240  *IsClassType = RD;
1241  return IsConstant && !(SemaRef.getLangOpts().CPlusPlus && RD &&
1242  RD->hasDefinition() && RD->hasMutableFields());
1243 }
1244 
1245 static bool rejectConstNotMutableType(Sema &SemaRef, const ValueDecl *D,
1247  SourceLocation ELoc,
1248  bool AcceptIfMutable = true,
1249  bool ListItemNotVar = false) {
1250  ASTContext &Context = SemaRef.getASTContext();
1251  bool IsClassType;
1252  if (isConstNotMutableType(SemaRef, Type, AcceptIfMutable, &IsClassType)) {
1253  unsigned Diag = ListItemNotVar
1254  ? diag::err_omp_const_list_item
1255  : IsClassType ? diag::err_omp_const_not_mutable_variable
1256  : diag::err_omp_const_variable;
1257  SemaRef.Diag(ELoc, Diag) << getOpenMPClauseName(CKind);
1258  if (!ListItemNotVar && D) {
1259  const VarDecl *VD = dyn_cast<VarDecl>(D);
1260  bool IsDecl = !VD || VD->isThisDeclarationADefinition(Context) ==
1262  SemaRef.Diag(D->getLocation(),
1263  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
1264  << D;
1265  }
1266  return true;
1267  }
1268  return false;
1269 }
1270 
1271 const DSAStackTy::DSAVarData DSAStackTy::getTopDSA(ValueDecl *D,
1272  bool FromParent) {
1273  D = getCanonicalDecl(D);
1274  DSAVarData DVar;
1275 
1276  auto *VD = dyn_cast<VarDecl>(D);
1277  auto TI = Threadprivates.find(D);
1278  if (TI != Threadprivates.end()) {
1279  DVar.RefExpr = TI->getSecond().RefExpr.getPointer();
1280  DVar.CKind = OMPC_threadprivate;
1281  return DVar;
1282  }
1283  if (VD && VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
1284  DVar.RefExpr = buildDeclRefExpr(
1285  SemaRef, VD, D->getType().getNonReferenceType(),
1286  VD->getAttr<OMPThreadPrivateDeclAttr>()->getLocation());
1287  DVar.CKind = OMPC_threadprivate;
1288  addDSA(D, DVar.RefExpr, OMPC_threadprivate);
1289  return DVar;
1290  }
1291  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1292  // in a Construct, C/C++, predetermined, p.1]
1293  // Variables appearing in threadprivate directives are threadprivate.
1294  if ((VD && VD->getTLSKind() != VarDecl::TLS_None &&
1295  !(VD->hasAttr<OMPThreadPrivateDeclAttr>() &&
1296  SemaRef.getLangOpts().OpenMPUseTLS &&
1297  SemaRef.getASTContext().getTargetInfo().isTLSSupported())) ||
1298  (VD && VD->getStorageClass() == SC_Register &&
1299  VD->hasAttr<AsmLabelAttr>() && !VD->isLocalVarDecl())) {
1300  DVar.RefExpr = buildDeclRefExpr(
1301  SemaRef, VD, D->getType().getNonReferenceType(), D->getLocation());
1302  DVar.CKind = OMPC_threadprivate;
1303  addDSA(D, DVar.RefExpr, OMPC_threadprivate);
1304  return DVar;
1305  }
1306  if (SemaRef.getLangOpts().OpenMPCUDAMode && VD &&
1307  VD->isLocalVarDeclOrParm() && !isStackEmpty() &&
1308  !isLoopControlVariable(D).first) {
1309  const_iterator IterTarget =
1310  std::find_if(begin(), end(), [](const SharingMapTy &Data) {
1311  return isOpenMPTargetExecutionDirective(Data.Directive);
1312  });
1313  if (IterTarget != end()) {
1314  const_iterator ParentIterTarget = IterTarget + 1;
1315  for (const_iterator Iter = begin();
1316  Iter != ParentIterTarget; ++Iter) {
1317  if (isOpenMPLocal(VD, Iter)) {
1318  DVar.RefExpr =
1319  buildDeclRefExpr(SemaRef, VD, D->getType().getNonReferenceType(),
1320  D->getLocation());
1321  DVar.CKind = OMPC_threadprivate;
1322  return DVar;
1323  }
1324  }
1325  if (!isClauseParsingMode() || IterTarget != begin()) {
1326  auto DSAIter = IterTarget->SharingMap.find(D);
1327  if (DSAIter != IterTarget->SharingMap.end() &&
1328  isOpenMPPrivate(DSAIter->getSecond().Attributes)) {
1329  DVar.RefExpr = DSAIter->getSecond().RefExpr.getPointer();
1330  DVar.CKind = OMPC_threadprivate;
1331  return DVar;
1332  }
1333  const_iterator End = end();
1334  if (!SemaRef.isOpenMPCapturedByRef(
1335  D, std::distance(ParentIterTarget, End))) {
1336  DVar.RefExpr =
1337  buildDeclRefExpr(SemaRef, VD, D->getType().getNonReferenceType(),
1338  IterTarget->ConstructLoc);
1339  DVar.CKind = OMPC_threadprivate;
1340  return DVar;
1341  }
1342  }
1343  }
1344  }
1345 
1346  if (isStackEmpty())
1347  // Not in OpenMP execution region and top scope was already checked.
1348  return DVar;
1349 
1350  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1351  // in a Construct, C/C++, predetermined, p.4]
1352  // Static data members are shared.
1353  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1354  // in a Construct, C/C++, predetermined, p.7]
1355  // Variables with static storage duration that are declared in a scope
1356  // inside the construct are shared.
1357  auto &&MatchesAlways = [](OpenMPDirectiveKind) { return true; };
1358  if (VD && VD->isStaticDataMember()) {
1359  DSAVarData DVarTemp = hasDSA(D, isOpenMPPrivate, MatchesAlways, FromParent);
1360  if (DVarTemp.CKind != OMPC_unknown && DVarTemp.RefExpr)
1361  return DVar;
1362 
1363  DVar.CKind = OMPC_shared;
1364  return DVar;
1365  }
1366 
1367  // The predetermined shared attribute for const-qualified types having no
1368  // mutable members was removed after OpenMP 3.1.
1369  if (SemaRef.LangOpts.OpenMP <= 31) {
1370  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1371  // in a Construct, C/C++, predetermined, p.6]
1372  // Variables with const qualified type having no mutable member are
1373  // shared.
1374  if (isConstNotMutableType(SemaRef, D->getType())) {
1375  // Variables with const-qualified type having no mutable member may be
1376  // listed in a firstprivate clause, even if they are static data members.
1377  DSAVarData DVarTemp = hasInnermostDSA(
1378  D,
1379  [](OpenMPClauseKind C) {
1380  return C == OMPC_firstprivate || C == OMPC_shared;
1381  },
1382  MatchesAlways, FromParent);
1383  if (DVarTemp.CKind != OMPC_unknown && DVarTemp.RefExpr)
1384  return DVarTemp;
1385 
1386  DVar.CKind = OMPC_shared;
1387  return DVar;
1388  }
1389  }
1390 
1391  // Explicitly specified attributes and local variables with predetermined
1392  // attributes.
1393  const_iterator I = begin();
1394  const_iterator EndI = end();
1395  if (FromParent && I != EndI)
1396  ++I;
1397  auto It = I->SharingMap.find(D);
1398  if (It != I->SharingMap.end()) {
1399  const DSAInfo &Data = It->getSecond();
1400  DVar.RefExpr = Data.RefExpr.getPointer();
1401  DVar.PrivateCopy = Data.PrivateCopy;
1402  DVar.CKind = Data.Attributes;
1403  DVar.ImplicitDSALoc = I->DefaultAttrLoc;
1404  DVar.DKind = I->Directive;
1405  }
1406 
1407  return DVar;
1408 }
1409 
1410 const DSAStackTy::DSAVarData DSAStackTy::getImplicitDSA(ValueDecl *D,
1411  bool FromParent) const {
1412  if (isStackEmpty()) {
1413  const_iterator I;
1414  return getDSA(I, D);
1415  }
1416  D = getCanonicalDecl(D);
1417  const_iterator StartI = begin();
1418  const_iterator EndI = end();
1419  if (FromParent && StartI != EndI)
1420  ++StartI;
1421  return getDSA(StartI, D);
1422 }
1423 
1424 const DSAStackTy::DSAVarData
1425 DSAStackTy::hasDSA(ValueDecl *D,
1426  const llvm::function_ref<bool(OpenMPClauseKind)> CPred,
1427  const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
1428  bool FromParent) const {
1429  if (isStackEmpty())
1430  return {};
1431  D = getCanonicalDecl(D);
1432  const_iterator I = begin();
1433  const_iterator EndI = end();
1434  if (FromParent && I != EndI)
1435  ++I;
1436  for (; I != EndI; ++I) {
1437  if (!DPred(I->Directive) &&
1438  !isImplicitOrExplicitTaskingRegion(I->Directive))
1439  continue;
1440  const_iterator NewI = I;
1441  DSAVarData DVar = getDSA(NewI, D);
1442  if (I == NewI && CPred(DVar.CKind))
1443  return DVar;
1444  }
1445  return {};
1446 }
1447 
1448 const DSAStackTy::DSAVarData DSAStackTy::hasInnermostDSA(
1449  ValueDecl *D, const llvm::function_ref<bool(OpenMPClauseKind)> CPred,
1450  const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
1451  bool FromParent) const {
1452  if (isStackEmpty())
1453  return {};
1454  D = getCanonicalDecl(D);
1455  const_iterator StartI = begin();
1456  const_iterator EndI = end();
1457  if (FromParent && StartI != EndI)
1458  ++StartI;
1459  if (StartI == EndI || !DPred(StartI->Directive))
1460  return {};
1461  const_iterator NewI = StartI;
1462  DSAVarData DVar = getDSA(NewI, D);
1463  return (NewI == StartI && CPred(DVar.CKind)) ? DVar : DSAVarData();
1464 }
1465 
1466 bool DSAStackTy::hasExplicitDSA(
1467  const ValueDecl *D, const llvm::function_ref<bool(OpenMPClauseKind)> CPred,
1468  unsigned Level, bool NotLastprivate) const {
1469  if (getStackSize() <= Level)
1470  return false;
1471  D = getCanonicalDecl(D);
1472  const SharingMapTy &StackElem = getStackElemAtLevel(Level);
1473  auto I = StackElem.SharingMap.find(D);
1474  if (I != StackElem.SharingMap.end() &&
1475  I->getSecond().RefExpr.getPointer() &&
1476  CPred(I->getSecond().Attributes) &&
1477  (!NotLastprivate || !I->getSecond().RefExpr.getInt()))
1478  return true;
1479  // Check predetermined rules for the loop control variables.
1480  auto LI = StackElem.LCVMap.find(D);
1481  if (LI != StackElem.LCVMap.end())
1482  return CPred(OMPC_private);
1483  return false;
1484 }
1485 
1486 bool DSAStackTy::hasExplicitDirective(
1487  const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
1488  unsigned Level) const {
1489  if (getStackSize() <= Level)
1490  return false;
1491  const SharingMapTy &StackElem = getStackElemAtLevel(Level);
1492  return DPred(StackElem.Directive);
1493 }
1494 
1495 bool DSAStackTy::hasDirective(
1496  const llvm::function_ref<bool(OpenMPDirectiveKind,
1498  DPred,
1499  bool FromParent) const {
1500  // We look only in the enclosing region.
1501  size_t Skip = FromParent ? 2 : 1;
1502  for (const_iterator I = begin() + std::min(Skip, getStackSize()), E = end();
1503  I != E; ++I) {
1504  if (DPred(I->Directive, I->DirectiveName, I->ConstructLoc))
1505  return true;
1506  }
1507  return false;
1508 }
1509 
1510 void Sema::InitDataSharingAttributesStack() {
1511  VarDataSharingAttributesStack = new DSAStackTy(*this);
1512 }
1513 
1514 #define DSAStack static_cast<DSAStackTy *>(VarDataSharingAttributesStack)
1515 
1516 void Sema::pushOpenMPFunctionRegion() {
1517  DSAStack->pushFunction();
1518 }
1519 
1520 void Sema::popOpenMPFunctionRegion(const FunctionScopeInfo *OldFSI) {
1521  DSAStack->popFunction(OldFSI);
1522 }
1523 
1525  assert(S.LangOpts.OpenMP && S.LangOpts.OpenMPIsDevice &&
1526  "Expected OpenMP device compilation.");
1527  return !S.isInOpenMPTargetExecutionDirective() &&
1529 }
1530 
1531 /// Do we know that we will eventually codegen the given function?
1532 static bool isKnownEmitted(Sema &S, FunctionDecl *FD) {
1533  assert(S.LangOpts.OpenMP && S.LangOpts.OpenMPIsDevice &&
1534  "Expected OpenMP device compilation.");
1535  // Templates are emitted when they're instantiated.
1536  if (FD->isDependentContext())
1537  return false;
1538 
1539  if (OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(
1540  FD->getCanonicalDecl()))
1541  return true;
1542 
1543  // Otherwise, the function is known-emitted if it's in our set of
1544  // known-emitted functions.
1545  return S.DeviceKnownEmittedFns.count(FD) > 0;
1546 }
1547 
1549  unsigned DiagID) {
1550  assert(LangOpts.OpenMP && LangOpts.OpenMPIsDevice &&
1551  "Expected OpenMP device compilation.");
1553  !isKnownEmitted(*this, getCurFunctionDecl()))
1554  ? DeviceDiagBuilder::K_Deferred
1555  : DeviceDiagBuilder::K_Immediate,
1556  Loc, DiagID, getCurFunctionDecl(), *this);
1557 }
1558 
1559 void Sema::checkOpenMPDeviceFunction(SourceLocation Loc, FunctionDecl *Callee) {
1560  assert(LangOpts.OpenMP && LangOpts.OpenMPIsDevice &&
1561  "Expected OpenMP device compilation.");
1562  assert(Callee && "Callee may not be null.");
1563  FunctionDecl *Caller = getCurFunctionDecl();
1564 
1565  // If the caller is known-emitted, mark the callee as known-emitted.
1566  // Otherwise, mark the call in our call graph so we can traverse it later.
1567  if (!isOpenMPDeviceDelayedContext(*this) ||
1568  (Caller && isKnownEmitted(*this, Caller)))
1569  markKnownEmitted(*this, Caller, Callee, Loc, isKnownEmitted);
1570  else if (Caller)
1571  DeviceCallGraph[Caller].insert({Callee, Loc});
1572 }
1573 
1574 void Sema::checkOpenMPDeviceExpr(const Expr *E) {
1575  assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice &&
1576  "OpenMP device compilation mode is expected.");
1577  QualType Ty = E->getType();
1578  if ((Ty->isFloat16Type() && !Context.getTargetInfo().hasFloat16Type()) ||
1579  (Ty->isFloat128Type() && !Context.getTargetInfo().hasFloat128Type()) ||
1580  (Ty->isIntegerType() && Context.getTypeSize(Ty) == 128 &&
1581  !Context.getTargetInfo().hasInt128Type()))
1582  targetDiag(E->getExprLoc(), diag::err_type_unsupported)
1583  << Ty << E->getSourceRange();
1584 }
1585 
1586 bool Sema::isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level) const {
1587  assert(LangOpts.OpenMP && "OpenMP is not allowed");
1588 
1589  ASTContext &Ctx = getASTContext();
1590  bool IsByRef = true;
1591 
1592  // Find the directive that is associated with the provided scope.
1593  D = cast<ValueDecl>(D->getCanonicalDecl());
1594  QualType Ty = D->getType();
1595 
1596  if (DSAStack->hasExplicitDirective(isOpenMPTargetExecutionDirective, Level)) {
1597  // This table summarizes how a given variable should be passed to the device
1598  // given its type and the clauses where it appears. This table is based on
1599  // the description in OpenMP 4.5 [2.10.4, target Construct] and
1600  // OpenMP 4.5 [2.15.5, Data-mapping Attribute Rules and Clauses].
1601  //
1602  // =========================================================================
1603  // | type | defaultmap | pvt | first | is_device_ptr | map | res. |
1604  // | |(tofrom:scalar)| | pvt | | | |
1605  // =========================================================================
1606  // | scl | | | | - | | bycopy|
1607  // | scl | | - | x | - | - | bycopy|
1608  // | scl | | x | - | - | - | null |
1609  // | scl | x | | | - | | byref |
1610  // | scl | x | - | x | - | - | bycopy|
1611  // | scl | x | x | - | - | - | null |
1612  // | scl | | - | - | - | x | byref |
1613  // | scl | x | - | - | - | x | byref |
1614  //
1615  // | agg | n.a. | | | - | | byref |
1616  // | agg | n.a. | - | x | - | - | byref |
1617  // | agg | n.a. | x | - | - | - | null |
1618  // | agg | n.a. | - | - | - | x | byref |
1619  // | agg | n.a. | - | - | - | x[] | byref |
1620  //
1621  // | ptr | n.a. | | | - | | bycopy|
1622  // | ptr | n.a. | - | x | - | - | bycopy|
1623  // | ptr | n.a. | x | - | - | - | null |
1624  // | ptr | n.a. | - | - | - | x | byref |
1625  // | ptr | n.a. | - | - | - | x[] | bycopy|
1626  // | ptr | n.a. | - | - | x | | bycopy|
1627  // | ptr | n.a. | - | - | x | x | bycopy|
1628  // | ptr | n.a. | - | - | x | x[] | bycopy|
1629  // =========================================================================
1630  // Legend:
1631  // scl - scalar
1632  // ptr - pointer
1633  // agg - aggregate
1634  // x - applies
1635  // - - invalid in this combination
1636  // [] - mapped with an array section
1637  // byref - should be mapped by reference
1638  // byval - should be mapped by value
1639  // null - initialize a local variable to null on the device
1640  //
1641  // Observations:
1642  // - All scalar declarations that show up in a map clause have to be passed
1643  // by reference, because they may have been mapped in the enclosing data
1644  // environment.
1645  // - If the scalar value does not fit the size of uintptr, it has to be
1646  // passed by reference, regardless the result in the table above.
1647  // - For pointers mapped by value that have either an implicit map or an
1648  // array section, the runtime library may pass the NULL value to the
1649  // device instead of the value passed to it by the compiler.
1650 
1651  if (Ty->isReferenceType())
1652  Ty = Ty->castAs<ReferenceType>()->getPointeeType();
1653 
1654  // Locate map clauses and see if the variable being captured is referred to
1655  // in any of those clauses. Here we only care about variables, not fields,
1656  // because fields are part of aggregates.
1657  bool IsVariableUsedInMapClause = false;
1658  bool IsVariableAssociatedWithSection = false;
1659 
1660  DSAStack->checkMappableExprComponentListsForDeclAtLevel(
1661  D, Level,
1662  [&IsVariableUsedInMapClause, &IsVariableAssociatedWithSection, D](
1664  MapExprComponents,
1665  OpenMPClauseKind WhereFoundClauseKind) {
1666  // Only the map clause information influences how a variable is
1667  // captured. E.g. is_device_ptr does not require changing the default
1668  // behavior.
1669  if (WhereFoundClauseKind != OMPC_map)
1670  return false;
1671 
1672  auto EI = MapExprComponents.rbegin();
1673  auto EE = MapExprComponents.rend();
1674 
1675  assert(EI != EE && "Invalid map expression!");
1676 
1677  if (isa<DeclRefExpr>(EI->getAssociatedExpression()))
1678  IsVariableUsedInMapClause |= EI->getAssociatedDeclaration() == D;
1679 
1680  ++EI;
1681  if (EI == EE)
1682  return false;
1683 
1684  if (isa<ArraySubscriptExpr>(EI->getAssociatedExpression()) ||
1685  isa<OMPArraySectionExpr>(EI->getAssociatedExpression()) ||
1686  isa<MemberExpr>(EI->getAssociatedExpression())) {
1687  IsVariableAssociatedWithSection = true;
1688  // There is nothing more we need to know about this variable.
1689  return true;
1690  }
1691 
1692  // Keep looking for more map info.
1693  return false;
1694  });
1695 
1696  if (IsVariableUsedInMapClause) {
1697  // If variable is identified in a map clause it is always captured by
1698  // reference except if it is a pointer that is dereferenced somehow.
1699  IsByRef = !(Ty->isPointerType() && IsVariableAssociatedWithSection);
1700  } else {
1701  // By default, all the data that has a scalar type is mapped by copy
1702  // (except for reduction variables).
1703  IsByRef =
1704  (DSAStack->isForceCaptureByReferenceInTargetExecutable() &&
1705  !Ty->isAnyPointerType()) ||
1706  !Ty->isScalarType() ||
1707  DSAStack->getDefaultDMAAtLevel(Level) == DMA_tofrom_scalar ||
1708  DSAStack->hasExplicitDSA(
1709  D, [](OpenMPClauseKind K) { return K == OMPC_reduction; }, Level);
1710  }
1711  }
1712 
1713  if (IsByRef && Ty.getNonReferenceType()->isScalarType()) {
1714  IsByRef =
1715  ((DSAStack->isForceCaptureByReferenceInTargetExecutable() &&
1716  !Ty->isAnyPointerType()) ||
1717  !DSAStack->hasExplicitDSA(
1718  D,
1719  [](OpenMPClauseKind K) -> bool { return K == OMPC_firstprivate; },
1720  Level, /*NotLastprivate=*/true)) &&
1721  // If the variable is artificial and must be captured by value - try to
1722  // capture by value.
1723  !(isa<OMPCapturedExprDecl>(D) && !D->hasAttr<OMPCaptureNoInitAttr>() &&
1724  !cast<OMPCapturedExprDecl>(D)->getInit()->isGLValue());
1725  }
1726 
1727  // When passing data by copy, we need to make sure it fits the uintptr size
1728  // and alignment, because the runtime library only deals with uintptr types.
1729  // If it does not fit the uintptr size, we need to pass the data by reference
1730  // instead.
1731  if (!IsByRef &&
1732  (Ctx.getTypeSizeInChars(Ty) >
1733  Ctx.getTypeSizeInChars(Ctx.getUIntPtrType()) ||
1734  Ctx.getDeclAlign(D) > Ctx.getTypeAlignInChars(Ctx.getUIntPtrType()))) {
1735  IsByRef = true;
1736  }
1737 
1738  return IsByRef;
1739 }
1740 
1741 unsigned Sema::getOpenMPNestingLevel() const {
1742  assert(getLangOpts().OpenMP);
1743  return DSAStack->getNestingLevel();
1744 }
1745 
1747  return (isOpenMPTargetExecutionDirective(DSAStack->getCurrentDirective()) &&
1748  !DSAStack->isClauseParsingMode()) ||
1749  DSAStack->hasDirective(
1751  SourceLocation) -> bool {
1752  return isOpenMPTargetExecutionDirective(K);
1753  },
1754  false);
1755 }
1756 
1758  unsigned StopAt) {
1759  assert(LangOpts.OpenMP && "OpenMP is not allowed");
1760  D = getCanonicalDecl(D);
1761 
1762  // If we want to determine whether the variable should be captured from the
1763  // perspective of the current capturing scope, and we've already left all the
1764  // capturing scopes of the top directive on the stack, check from the
1765  // perspective of its parent directive (if any) instead.
1766  DSAStackTy::ParentDirectiveScope InParentDirectiveRAII(
1767  *DSAStack, CheckScopeInfo && DSAStack->isBodyComplete());
1768 
1769  // If we are attempting to capture a global variable in a directive with
1770  // 'target' we return true so that this global is also mapped to the device.
1771  //
1772  auto *VD = dyn_cast<VarDecl>(D);
1773  if (VD && !VD->hasLocalStorage() &&
1774  (getCurCapturedRegion() || getCurBlock() || getCurLambda())) {
1775  if (isInOpenMPDeclareTargetContext()) {
1776  // Try to mark variable as declare target if it is used in capturing
1777  // regions.
1778  if (!OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
1779  checkDeclIsAllowedInOpenMPTarget(nullptr, VD);
1780  return nullptr;
1781  } else if (isInOpenMPTargetExecutionDirective()) {
1782  // If the declaration is enclosed in a 'declare target' directive,
1783  // then it should not be captured.
1784  //
1785  if (OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
1786  return nullptr;
1787  return VD;
1788  }
1789  }
1790  // Capture variables captured by reference in lambdas for target-based
1791  // directives.
1792  // FIXME: Triggering capture from here is completely inappropriate.
1793  if (VD && !DSAStack->isClauseParsingMode()) {
1794  if (const auto *RD = VD->getType()
1795  .getCanonicalType()
1796  .getNonReferenceType()
1797  ->getAsCXXRecordDecl()) {
1798  bool SavedForceCaptureByReferenceInTargetExecutable =
1799  DSAStack->isForceCaptureByReferenceInTargetExecutable();
1800  DSAStack->setForceCaptureByReferenceInTargetExecutable(/*V=*/true);
1801  InParentDirectiveRAII.disable();
1802  if (RD->isLambda()) {
1803  llvm::DenseMap<const VarDecl *, FieldDecl *> Captures;
1804  FieldDecl *ThisCapture;
1805  RD->getCaptureFields(Captures, ThisCapture);
1806  for (const LambdaCapture &LC : RD->captures()) {
1807  if (LC.getCaptureKind() == LCK_ByRef) {
1808  VarDecl *VD = LC.getCapturedVar();
1809  DeclContext *VDC = VD->getDeclContext();
1810  if (!VDC->Encloses(CurContext))
1811  continue;
1812  DSAStackTy::DSAVarData DVarPrivate =
1813  DSAStack->getTopDSA(VD, /*FromParent=*/false);
1814  // Do not capture already captured variables.
1815  if (!OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD) &&
1816  DVarPrivate.CKind == OMPC_unknown &&
1817  !DSAStack->checkMappableExprComponentListsForDecl(
1818  D, /*CurrentRegionOnly=*/true,
1820  MappableExprComponentListRef,
1821  OpenMPClauseKind) { return true; }))
1822  MarkVariableReferenced(LC.getLocation(), LC.getCapturedVar());
1823  } else if (LC.getCaptureKind() == LCK_This) {
1824  QualType ThisTy = getCurrentThisType();
1825  if (!ThisTy.isNull() &&
1826  Context.typesAreCompatible(ThisTy, ThisCapture->getType()))
1827  CheckCXXThisCapture(LC.getLocation());
1828  }
1829  }
1830  }
1831  if (CheckScopeInfo && DSAStack->isBodyComplete())
1832  InParentDirectiveRAII.enable();
1833  DSAStack->setForceCaptureByReferenceInTargetExecutable(
1834  SavedForceCaptureByReferenceInTargetExecutable);
1835  }
1836  }
1837 
1838  if (CheckScopeInfo) {
1839  bool OpenMPFound = false;
1840  for (unsigned I = StopAt + 1; I > 0; --I) {
1841  FunctionScopeInfo *FSI = FunctionScopes[I - 1];
1842  if(!isa<CapturingScopeInfo>(FSI))
1843  return nullptr;
1844  if (auto *RSI = dyn_cast<CapturedRegionScopeInfo>(FSI))
1845  if (RSI->CapRegionKind == CR_OpenMP) {
1846  OpenMPFound = true;
1847  break;
1848  }
1849  }
1850  if (!OpenMPFound)
1851  return nullptr;
1852  }
1853 
1854  if (DSAStack->getCurrentDirective() != OMPD_unknown &&
1855  (!DSAStack->isClauseParsingMode() ||
1856  DSAStack->getParentDirective() != OMPD_unknown)) {
1857  auto &&Info = DSAStack->isLoopControlVariable(D);
1858  if (Info.first ||
1859  (VD && VD->hasLocalStorage() &&
1860  isImplicitOrExplicitTaskingRegion(DSAStack->getCurrentDirective())) ||
1861  (VD && DSAStack->isForceVarCapturing()))
1862  return VD ? VD : Info.second;
1863  DSAStackTy::DSAVarData DVarPrivate =
1864  DSAStack->getTopDSA(D, DSAStack->isClauseParsingMode());
1865  if (DVarPrivate.CKind != OMPC_unknown && isOpenMPPrivate(DVarPrivate.CKind))
1866  return VD ? VD : cast<VarDecl>(DVarPrivate.PrivateCopy->getDecl());
1867  DVarPrivate = DSAStack->hasDSA(D, isOpenMPPrivate,
1868  [](OpenMPDirectiveKind) { return true; },
1869  DSAStack->isClauseParsingMode());
1870  // The variable is not private or it is the variable in the directive with
1871  // default(none) clause and not used in any clause.
1872  if (DVarPrivate.CKind != OMPC_unknown ||
1873  (VD && DSAStack->getDefaultDSA() == DSA_none))
1874  return VD ? VD : cast<VarDecl>(DVarPrivate.PrivateCopy->getDecl());
1875  }
1876  return nullptr;
1877 }
1878 
1879 void Sema::adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
1880  unsigned Level) const {
1882  getOpenMPCaptureRegions(Regions, DSAStack->getDirective(Level));
1883  FunctionScopesIndex -= Regions.size();
1884 }
1885 
1887  assert(LangOpts.OpenMP && "OpenMP must be enabled.");
1888  if (isOpenMPLoopDirective(DSAStack->getCurrentDirective()))
1889  DSAStack->loopInit();
1890 }
1891 
1892 bool Sema::isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const {
1893  assert(LangOpts.OpenMP && "OpenMP is not allowed");
1894  if (isOpenMPLoopDirective(DSAStack->getCurrentDirective())) {
1895  if (DSAStack->getAssociatedLoops() > 0 &&
1896  !DSAStack->isLoopStarted()) {
1897  DSAStack->resetPossibleLoopCounter(D);
1898  DSAStack->loopStart();
1899  return true;
1900  }
1901  if ((DSAStack->getPossiblyLoopCunter() == D->getCanonicalDecl() ||
1902  DSAStack->isLoopControlVariable(D).first) &&
1903  !DSAStack->hasExplicitDSA(
1904  D, [](OpenMPClauseKind K) { return K != OMPC_private; }, Level) &&
1905  !isOpenMPSimdDirective(DSAStack->getCurrentDirective()))
1906  return true;
1907  }
1908  return DSAStack->hasExplicitDSA(
1909  D, [](OpenMPClauseKind K) { return K == OMPC_private; }, Level) ||
1910  (DSAStack->isClauseParsingMode() &&
1911  DSAStack->getClauseParsingMode() == OMPC_private) ||
1912  // Consider taskgroup reduction descriptor variable a private to avoid
1913  // possible capture in the region.
1914  (DSAStack->hasExplicitDirective(
1915  [](OpenMPDirectiveKind K) { return K == OMPD_taskgroup; },
1916  Level) &&
1917  DSAStack->isTaskgroupReductionRef(D, Level));
1918 }
1919 
1921  unsigned Level) {
1922  assert(LangOpts.OpenMP && "OpenMP is not allowed");
1923  D = getCanonicalDecl(D);
1925  for (unsigned I = DSAStack->getNestingLevel() + 1; I > Level; --I) {
1926  const unsigned NewLevel = I - 1;
1927  if (DSAStack->hasExplicitDSA(D,
1928  [&OMPC](const OpenMPClauseKind K) {
1929  if (isOpenMPPrivate(K)) {
1930  OMPC = K;
1931  return true;
1932  }
1933  return false;
1934  },
1935  NewLevel))
1936  break;
1937  if (DSAStack->checkMappableExprComponentListsForDeclAtLevel(
1938  D, NewLevel,
1940  OpenMPClauseKind) { return true; })) {
1941  OMPC = OMPC_map;
1942  break;
1943  }
1944  if (DSAStack->hasExplicitDirective(isOpenMPTargetExecutionDirective,
1945  NewLevel)) {
1946  OMPC = OMPC_map;
1947  if (D->getType()->isScalarType() &&
1948  DSAStack->getDefaultDMAAtLevel(NewLevel) !=
1949  DefaultMapAttributes::DMA_tofrom_scalar)
1950  OMPC = OMPC_firstprivate;
1951  break;
1952  }
1953  }
1954  if (OMPC != OMPC_unknown)
1955  FD->addAttr(OMPCaptureKindAttr::CreateImplicit(Context, OMPC));
1956 }
1957 
1959  unsigned Level) const {
1960  assert(LangOpts.OpenMP && "OpenMP is not allowed");
1961  // Return true if the current level is no longer enclosed in a target region.
1962 
1963  const auto *VD = dyn_cast<VarDecl>(D);
1964  return VD && !VD->hasLocalStorage() &&
1965  DSAStack->hasExplicitDirective(isOpenMPTargetExecutionDirective,
1966  Level);
1967 }
1968 
1969 void Sema::DestroyDataSharingAttributesStack() { delete DSAStack; }
1970 
1972  const DeclarationNameInfo &DirName,
1973  Scope *CurScope, SourceLocation Loc) {
1974  DSAStack->push(DKind, DirName, CurScope, Loc);
1975  PushExpressionEvaluationContext(
1976  ExpressionEvaluationContext::PotentiallyEvaluated);
1977 }
1978 
1980  DSAStack->setClauseParsingMode(K);
1981 }
1982 
1984  DSAStack->setClauseParsingMode(/*K=*/OMPC_unknown);
1985 }
1986 
1987 static void checkAllocateClauses(Sema &S, DSAStackTy *Stack,
1988  ArrayRef<OMPClause *> Clauses);
1989 
1990 void Sema::EndOpenMPDSABlock(Stmt *CurDirective) {
1991  // OpenMP [2.14.3.5, Restrictions, C/C++, p.1]
1992  // A variable of class type (or array thereof) that appears in a lastprivate
1993  // clause requires an accessible, unambiguous default constructor for the
1994  // class type, unless the list item is also specified in a firstprivate
1995  // clause.
1996  if (const auto *D = dyn_cast_or_null<OMPExecutableDirective>(CurDirective)) {
1997  for (OMPClause *C : D->clauses()) {
1998  if (auto *Clause = dyn_cast<OMPLastprivateClause>(C)) {
1999  SmallVector<Expr *, 8> PrivateCopies;
2000  for (Expr *DE : Clause->varlists()) {
2001  if (DE->isValueDependent() || DE->isTypeDependent()) {
2002  PrivateCopies.push_back(nullptr);
2003  continue;
2004  }
2005  auto *DRE = cast<DeclRefExpr>(DE->IgnoreParens());
2006  auto *VD = cast<VarDecl>(DRE->getDecl());
2007  QualType Type = VD->getType().getNonReferenceType();
2008  const DSAStackTy::DSAVarData DVar =
2009  DSAStack->getTopDSA(VD, /*FromParent=*/false);
2010  if (DVar.CKind == OMPC_lastprivate) {
2011  // Generate helper private variable and initialize it with the
2012  // default value. The address of the original variable is replaced
2013  // by the address of the new private variable in CodeGen. This new
2014  // variable is not added to IdResolver, so the code in the OpenMP
2015  // region uses original variable for proper diagnostics.
2016  VarDecl *VDPrivate = buildVarDecl(
2017  *this, DE->getExprLoc(), Type.getUnqualifiedType(),
2018  VD->getName(), VD->hasAttrs() ? &VD->getAttrs() : nullptr, DRE);
2019  ActOnUninitializedDecl(VDPrivate);
2020  if (VDPrivate->isInvalidDecl()) {
2021  PrivateCopies.push_back(nullptr);
2022  continue;
2023  }
2024  PrivateCopies.push_back(buildDeclRefExpr(
2025  *this, VDPrivate, DE->getType(), DE->getExprLoc()));
2026  } else {
2027  // The variable is also a firstprivate, so initialization sequence
2028  // for private copy is generated already.
2029  PrivateCopies.push_back(nullptr);
2030  }
2031  }
2032  Clause->setPrivateCopies(PrivateCopies);
2033  }
2034  }
2035  // Check allocate clauses.
2036  if (!CurContext->isDependentContext())
2037  checkAllocateClauses(*this, DSAStack, D->clauses());
2038  }
2039 
2040  DSAStack->pop();
2041  DiscardCleanupsInEvaluationContext();
2042  PopExpressionEvaluationContext();
2043 }
2044 
2045 static bool FinishOpenMPLinearClause(OMPLinearClause &Clause, DeclRefExpr *IV,
2046  Expr *NumIterations, Sema &SemaRef,
2047  Scope *S, DSAStackTy *Stack);
2048 
2049 namespace {
2050 
2051 class VarDeclFilterCCC final : public CorrectionCandidateCallback {
2052 private:
2053  Sema &SemaRef;
2054 
2055 public:
2056  explicit VarDeclFilterCCC(Sema &S) : SemaRef(S) {}
2057  bool ValidateCandidate(const TypoCorrection &Candidate) override {
2058  NamedDecl *ND = Candidate.getCorrectionDecl();
2059  if (const auto *VD = dyn_cast_or_null<VarDecl>(ND)) {
2060  return VD->hasGlobalStorage() &&
2061  SemaRef.isDeclInScope(ND, SemaRef.getCurLexicalContext(),
2062  SemaRef.getCurScope());
2063  }
2064  return false;
2065  }
2066 
2067  std::unique_ptr<CorrectionCandidateCallback> clone() override {
2068  return llvm::make_unique<VarDeclFilterCCC>(*this);
2069  }
2070 
2071 };
2072 
2073 class VarOrFuncDeclFilterCCC final : public CorrectionCandidateCallback {
2074 private:
2075  Sema &SemaRef;
2076 
2077 public:
2078  explicit VarOrFuncDeclFilterCCC(Sema &S) : SemaRef(S) {}
2079  bool ValidateCandidate(const TypoCorrection &Candidate) override {
2080  NamedDecl *ND = Candidate.getCorrectionDecl();
2081  if (ND && ((isa<VarDecl>(ND) && ND->getKind() == Decl::Var) ||
2082  isa<FunctionDecl>(ND))) {
2083  return SemaRef.isDeclInScope(ND, SemaRef.getCurLexicalContext(),
2084  SemaRef.getCurScope());
2085  }
2086  return false;
2087  }
2088 
2089  std::unique_ptr<CorrectionCandidateCallback> clone() override {
2090  return llvm::make_unique<VarOrFuncDeclFilterCCC>(*this);
2091  }
2092 };
2093 
2094 } // namespace
2095 
2097  CXXScopeSpec &ScopeSpec,
2098  const DeclarationNameInfo &Id,
2100  LookupResult Lookup(*this, Id, LookupOrdinaryName);
2101  LookupParsedName(Lookup, CurScope, &ScopeSpec, true);
2102 
2103  if (Lookup.isAmbiguous())
2104  return ExprError();
2105 
2106  VarDecl *VD;
2107  if (!Lookup.isSingleResult()) {
2108  VarDeclFilterCCC CCC(*this);
2109  if (TypoCorrection Corrected =
2110  CorrectTypo(Id, LookupOrdinaryName, CurScope, nullptr, CCC,
2111  CTK_ErrorRecovery)) {
2112  diagnoseTypo(Corrected,
2113  PDiag(Lookup.empty()
2114  ? diag::err_undeclared_var_use_suggest
2115  : diag::err_omp_expected_var_arg_suggest)
2116  << Id.getName());
2117  VD = Corrected.getCorrectionDeclAs<VarDecl>();
2118  } else {
2119  Diag(Id.getLoc(), Lookup.empty() ? diag::err_undeclared_var_use
2120  : diag::err_omp_expected_var_arg)
2121  << Id.getName();
2122  return ExprError();
2123  }
2124  } else if (!(VD = Lookup.getAsSingle<VarDecl>())) {
2125  Diag(Id.getLoc(), diag::err_omp_expected_var_arg) << Id.getName();
2126  Diag(Lookup.getFoundDecl()->getLocation(), diag::note_declared_at);
2127  return ExprError();
2128  }
2129  Lookup.suppressDiagnostics();
2130 
2131  // OpenMP [2.9.2, Syntax, C/C++]
2132  // Variables must be file-scope, namespace-scope, or static block-scope.
2133  if (Kind == OMPD_threadprivate && !VD->hasGlobalStorage()) {
2134  Diag(Id.getLoc(), diag::err_omp_global_var_arg)
2135  << getOpenMPDirectiveName(Kind) << !VD->isStaticLocal();
2136  bool IsDecl =
2138  Diag(VD->getLocation(),
2139  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
2140  << VD;
2141  return ExprError();
2142  }
2143 
2144  VarDecl *CanonicalVD = VD->getCanonicalDecl();
2145  NamedDecl *ND = CanonicalVD;
2146  // OpenMP [2.9.2, Restrictions, C/C++, p.2]
2147  // A threadprivate directive for file-scope variables must appear outside
2148  // any definition or declaration.
2149  if (CanonicalVD->getDeclContext()->isTranslationUnit() &&
2150  !getCurLexicalContext()->isTranslationUnit()) {
2151  Diag(Id.getLoc(), diag::err_omp_var_scope)
2152  << getOpenMPDirectiveName(Kind) << VD;
2153  bool IsDecl =
2155  Diag(VD->getLocation(),
2156  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
2157  << VD;
2158  return ExprError();
2159  }
2160  // OpenMP [2.9.2, Restrictions, C/C++, p.3]
2161  // A threadprivate directive for static class member variables must appear
2162  // in the class definition, in the same scope in which the member
2163  // variables are declared.
2164  if (CanonicalVD->isStaticDataMember() &&
2165  !CanonicalVD->getDeclContext()->Equals(getCurLexicalContext())) {
2166  Diag(Id.getLoc(), diag::err_omp_var_scope)
2167  << getOpenMPDirectiveName(Kind) << VD;
2168  bool IsDecl =
2170  Diag(VD->getLocation(),
2171  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
2172  << VD;
2173  return ExprError();
2174  }
2175  // OpenMP [2.9.2, Restrictions, C/C++, p.4]
2176  // A threadprivate directive for namespace-scope variables must appear
2177  // outside any definition or declaration other than the namespace
2178  // definition itself.
2179  if (CanonicalVD->getDeclContext()->isNamespace() &&
2180  (!getCurLexicalContext()->isFileContext() ||
2181  !getCurLexicalContext()->Encloses(CanonicalVD->getDeclContext()))) {
2182  Diag(Id.getLoc(), diag::err_omp_var_scope)
2183  << getOpenMPDirectiveName(Kind) << VD;
2184  bool IsDecl =
2186  Diag(VD->getLocation(),
2187  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
2188  << VD;
2189  return ExprError();
2190  }
2191  // OpenMP [2.9.2, Restrictions, C/C++, p.6]
2192  // A threadprivate directive for static block-scope variables must appear
2193  // in the scope of the variable and not in a nested scope.
2194  if (CanonicalVD->isLocalVarDecl() && CurScope &&
2195  !isDeclInScope(ND, getCurLexicalContext(), CurScope)) {
2196  Diag(Id.getLoc(), diag::err_omp_var_scope)
2197  << getOpenMPDirectiveName(Kind) << VD;
2198  bool IsDecl =
2200  Diag(VD->getLocation(),
2201  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
2202  << VD;
2203  return ExprError();
2204  }
2205 
2206  // OpenMP [2.9.2, Restrictions, C/C++, p.2-6]
2207  // A threadprivate directive must lexically precede all references to any
2208  // of the variables in its list.
2209  if (Kind == OMPD_threadprivate && VD->isUsed() &&
2210  !DSAStack->isThreadPrivate(VD)) {
2211  Diag(Id.getLoc(), diag::err_omp_var_used)
2212  << getOpenMPDirectiveName(Kind) << VD;
2213  return ExprError();
2214  }
2215 
2216  QualType ExprType = VD->getType().getNonReferenceType();
2217  return DeclRefExpr::Create(Context, NestedNameSpecifierLoc(),
2218  SourceLocation(), VD,
2219  /*RefersToEnclosingVariableOrCapture=*/false,
2220  Id.getLoc(), ExprType, VK_LValue);
2221 }
2222 
2225  ArrayRef<Expr *> VarList) {
2226  if (OMPThreadPrivateDecl *D = CheckOMPThreadPrivateDecl(Loc, VarList)) {
2227  CurContext->addDecl(D);
2228  return DeclGroupPtrTy::make(DeclGroupRef(D));
2229  }
2230  return nullptr;
2231 }
2232 
2233 namespace {
2234 class LocalVarRefChecker final
2235  : public ConstStmtVisitor<LocalVarRefChecker, bool> {
2236  Sema &SemaRef;
2237 
2238 public:
2239  bool VisitDeclRefExpr(const DeclRefExpr *E) {
2240  if (const auto *VD = dyn_cast<VarDecl>(E->getDecl())) {
2241  if (VD->hasLocalStorage()) {
2242  SemaRef.Diag(E->getBeginLoc(),
2243  diag::err_omp_local_var_in_threadprivate_init)
2244  << E->getSourceRange();
2245  SemaRef.Diag(VD->getLocation(), diag::note_defined_here)
2246  << VD << VD->getSourceRange();
2247  return true;
2248  }
2249  }
2250  return false;
2251  }
2252  bool VisitStmt(const Stmt *S) {
2253  for (const Stmt *Child : S->children()) {
2254  if (Child && Visit(Child))
2255  return true;
2256  }
2257  return false;
2258  }
2259  explicit LocalVarRefChecker(Sema &SemaRef) : SemaRef(SemaRef) {}
2260 };
2261 } // namespace
2262 
2266  for (Expr *RefExpr : VarList) {
2267  auto *DE = cast<DeclRefExpr>(RefExpr);
2268  auto *VD = cast<VarDecl>(DE->getDecl());
2269  SourceLocation ILoc = DE->getExprLoc();
2270 
2271  // Mark variable as used.
2272  VD->setReferenced();
2273  VD->markUsed(Context);
2274 
2275  QualType QType = VD->getType();
2276  if (QType->isDependentType() || QType->isInstantiationDependentType()) {
2277  // It will be analyzed later.
2278  Vars.push_back(DE);
2279  continue;
2280  }
2281 
2282  // OpenMP [2.9.2, Restrictions, C/C++, p.10]
2283  // A threadprivate variable must not have an incomplete type.
2284  if (RequireCompleteType(ILoc, VD->getType(),
2285  diag::err_omp_threadprivate_incomplete_type)) {
2286  continue;
2287  }
2288 
2289  // OpenMP [2.9.2, Restrictions, C/C++, p.10]
2290  // A threadprivate variable must not have a reference type.
2291  if (VD->getType()->isReferenceType()) {
2292  Diag(ILoc, diag::err_omp_ref_type_arg)
2293  << getOpenMPDirectiveName(OMPD_threadprivate) << VD->getType();
2294  bool IsDecl =
2295  VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
2296  Diag(VD->getLocation(),
2297  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
2298  << VD;
2299  continue;
2300  }
2301 
2302  // Check if this is a TLS variable. If TLS is not being supported, produce
2303  // the corresponding diagnostic.
2304  if ((VD->getTLSKind() != VarDecl::TLS_None &&
2305  !(VD->hasAttr<OMPThreadPrivateDeclAttr>() &&
2306  getLangOpts().OpenMPUseTLS &&
2307  getASTContext().getTargetInfo().isTLSSupported())) ||
2308  (VD->getStorageClass() == SC_Register && VD->hasAttr<AsmLabelAttr>() &&
2309  !VD->isLocalVarDecl())) {
2310  Diag(ILoc, diag::err_omp_var_thread_local)
2311  << VD << ((VD->getTLSKind() != VarDecl::TLS_None) ? 0 : 1);
2312  bool IsDecl =
2313  VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
2314  Diag(VD->getLocation(),
2315  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
2316  << VD;
2317  continue;
2318  }
2319 
2320  // Check if initial value of threadprivate variable reference variable with
2321  // local storage (it is not supported by runtime).
2322  if (const Expr *Init = VD->getAnyInitializer()) {
2323  LocalVarRefChecker Checker(*this);
2324  if (Checker.Visit(Init))
2325  continue;
2326  }
2327 
2328  Vars.push_back(RefExpr);
2329  DSAStack->addDSA(VD, DE, OMPC_threadprivate);
2330  VD->addAttr(OMPThreadPrivateDeclAttr::CreateImplicit(
2331  Context, SourceRange(Loc, Loc)));
2332  if (ASTMutationListener *ML = Context.getASTMutationListener())
2333  ML->DeclarationMarkedOpenMPThreadPrivate(VD);
2334  }
2335  OMPThreadPrivateDecl *D = nullptr;
2336  if (!Vars.empty()) {
2337  D = OMPThreadPrivateDecl::Create(Context, getCurLexicalContext(), Loc,
2338  Vars);
2339  D->setAccess(AS_public);
2340  }
2341  return D;
2342 }
2343 
2344 static OMPAllocateDeclAttr::AllocatorTypeTy
2345 getAllocatorKind(Sema &S, DSAStackTy *Stack, Expr *Allocator) {
2346  if (!Allocator)
2347  return OMPAllocateDeclAttr::OMPDefaultMemAlloc;
2348  if (Allocator->isTypeDependent() || Allocator->isValueDependent() ||
2349  Allocator->isInstantiationDependent() ||
2350  Allocator->containsUnexpandedParameterPack())
2351  return OMPAllocateDeclAttr::OMPUserDefinedMemAlloc;
2352  auto AllocatorKindRes = OMPAllocateDeclAttr::OMPUserDefinedMemAlloc;
2353  const Expr *AE = Allocator->IgnoreParenImpCasts();
2354  for (int I = OMPAllocateDeclAttr::OMPDefaultMemAlloc;
2355  I < OMPAllocateDeclAttr::OMPUserDefinedMemAlloc; ++I) {
2356  auto AllocatorKind = static_cast<OMPAllocateDeclAttr::AllocatorTypeTy>(I);
2357  const Expr *DefAllocator = Stack->getAllocator(AllocatorKind);
2358  llvm::FoldingSetNodeID AEId, DAEId;
2359  AE->Profile(AEId, S.getASTContext(), /*Canonical=*/true);
2360  DefAllocator->Profile(DAEId, S.getASTContext(), /*Canonical=*/true);
2361  if (AEId == DAEId) {
2362  AllocatorKindRes = AllocatorKind;
2363  break;
2364  }
2365  }
2366  return AllocatorKindRes;
2367 }
2368 
2370  Sema &S, DSAStackTy *Stack, Expr *RefExpr, VarDecl *VD,
2371  OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind, Expr *Allocator) {
2372  if (!VD->hasAttr<OMPAllocateDeclAttr>())
2373  return false;
2374  const auto *A = VD->getAttr<OMPAllocateDeclAttr>();
2375  Expr *PrevAllocator = A->getAllocator();
2376  OMPAllocateDeclAttr::AllocatorTypeTy PrevAllocatorKind =
2377  getAllocatorKind(S, Stack, PrevAllocator);
2378  bool AllocatorsMatch = AllocatorKind == PrevAllocatorKind;
2379  if (AllocatorsMatch &&
2380  AllocatorKind == OMPAllocateDeclAttr::OMPUserDefinedMemAlloc &&
2381  Allocator && PrevAllocator) {
2382  const Expr *AE = Allocator->IgnoreParenImpCasts();
2383  const Expr *PAE = PrevAllocator->IgnoreParenImpCasts();
2384  llvm::FoldingSetNodeID AEId, PAEId;
2385  AE->Profile(AEId, S.Context, /*Canonical=*/true);
2386  PAE->Profile(PAEId, S.Context, /*Canonical=*/true);
2387  AllocatorsMatch = AEId == PAEId;
2388  }
2389  if (!AllocatorsMatch) {
2390  SmallString<256> AllocatorBuffer;
2391  llvm::raw_svector_ostream AllocatorStream(AllocatorBuffer);
2392  if (Allocator)
2393  Allocator->printPretty(AllocatorStream, nullptr, S.getPrintingPolicy());
2394  SmallString<256> PrevAllocatorBuffer;
2395  llvm::raw_svector_ostream PrevAllocatorStream(PrevAllocatorBuffer);
2396  if (PrevAllocator)
2397  PrevAllocator->printPretty(PrevAllocatorStream, nullptr,
2398  S.getPrintingPolicy());
2399 
2400  SourceLocation AllocatorLoc =
2401  Allocator ? Allocator->getExprLoc() : RefExpr->getExprLoc();
2402  SourceRange AllocatorRange =
2403  Allocator ? Allocator->getSourceRange() : RefExpr->getSourceRange();
2404  SourceLocation PrevAllocatorLoc =
2405  PrevAllocator ? PrevAllocator->getExprLoc() : A->getLocation();
2406  SourceRange PrevAllocatorRange =
2407  PrevAllocator ? PrevAllocator->getSourceRange() : A->getRange();
2408  S.Diag(AllocatorLoc, diag::warn_omp_used_different_allocator)
2409  << (Allocator ? 1 : 0) << AllocatorStream.str()
2410  << (PrevAllocator ? 1 : 0) << PrevAllocatorStream.str()
2411  << AllocatorRange;
2412  S.Diag(PrevAllocatorLoc, diag::note_omp_previous_allocator)
2413  << PrevAllocatorRange;
2414  return true;
2415  }
2416  return false;
2417 }
2418 
2419 static void
2421  OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind,
2422  Expr *Allocator, SourceRange SR) {
2423  if (VD->hasAttr<OMPAllocateDeclAttr>())
2424  return;
2425  if (Allocator &&
2426  (Allocator->isTypeDependent() || Allocator->isValueDependent() ||
2427  Allocator->isInstantiationDependent() ||
2428  Allocator->containsUnexpandedParameterPack()))
2429  return;
2430  auto *A = OMPAllocateDeclAttr::CreateImplicit(S.Context, AllocatorKind,
2431  Allocator, SR);
2432  VD->addAttr(A);
2434  ML->DeclarationMarkedOpenMPAllocate(VD, A);
2435 }
2436 
2438  SourceLocation Loc, ArrayRef<Expr *> VarList,
2439  ArrayRef<OMPClause *> Clauses, DeclContext *Owner) {
2440  assert(Clauses.size() <= 1 && "Expected at most one clause.");
2441  Expr *Allocator = nullptr;
2442  if (Clauses.empty()) {
2443  // OpenMP 5.0, 2.11.3 allocate Directive, Restrictions.
2444  // allocate directives that appear in a target region must specify an
2445  // allocator clause unless a requires directive with the dynamic_allocators
2446  // clause is present in the same compilation unit.
2447  if (LangOpts.OpenMPIsDevice &&
2448  !DSAStack->hasRequiresDeclWithClause<OMPDynamicAllocatorsClause>())
2449  targetDiag(Loc, diag::err_expected_allocator_clause);
2450  } else {
2451  Allocator = cast<OMPAllocatorClause>(Clauses.back())->getAllocator();
2452  }
2453  OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind =
2454  getAllocatorKind(*this, DSAStack, Allocator);
2456  for (Expr *RefExpr : VarList) {
2457  auto *DE = cast<DeclRefExpr>(RefExpr);
2458  auto *VD = cast<VarDecl>(DE->getDecl());
2459 
2460  // Check if this is a TLS variable or global register.
2461  if (VD->getTLSKind() != VarDecl::TLS_None ||
2462  VD->hasAttr<OMPThreadPrivateDeclAttr>() ||
2463  (VD->getStorageClass() == SC_Register && VD->hasAttr<AsmLabelAttr>() &&
2464  !VD->isLocalVarDecl()))
2465  continue;
2466 
2467  // If the used several times in the allocate directive, the same allocator
2468  // must be used.
2469  if (checkPreviousOMPAllocateAttribute(*this, DSAStack, RefExpr, VD,
2470  AllocatorKind, Allocator))
2471  continue;
2472 
2473  // OpenMP, 2.11.3 allocate Directive, Restrictions, C / C++
2474  // If a list item has a static storage type, the allocator expression in the
2475  // allocator clause must be a constant expression that evaluates to one of
2476  // the predefined memory allocator values.
2477  if (Allocator && VD->hasGlobalStorage()) {
2478  if (AllocatorKind == OMPAllocateDeclAttr::OMPUserDefinedMemAlloc) {
2479  Diag(Allocator->getExprLoc(),
2480  diag::err_omp_expected_predefined_allocator)
2481  << Allocator->getSourceRange();
2482  bool IsDecl = VD->isThisDeclarationADefinition(Context) ==
2484  Diag(VD->getLocation(),
2485  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
2486  << VD;
2487  continue;
2488  }
2489  }
2490 
2491  Vars.push_back(RefExpr);
2492  applyOMPAllocateAttribute(*this, VD, AllocatorKind, Allocator,
2493  DE->getSourceRange());
2494  }
2495  if (Vars.empty())
2496  return nullptr;
2497  if (!Owner)
2498  Owner = getCurLexicalContext();
2499  auto *D = OMPAllocateDecl::Create(Context, Owner, Loc, Vars, Clauses);
2500  D->setAccess(AS_public);
2501  Owner->addDecl(D);
2502  return DeclGroupPtrTy::make(DeclGroupRef(D));
2503 }
2504 
2507  ArrayRef<OMPClause *> ClauseList) {
2508  OMPRequiresDecl *D = nullptr;
2509  if (!CurContext->isFileContext()) {
2510  Diag(Loc, diag::err_omp_invalid_scope) << "requires";
2511  } else {
2512  D = CheckOMPRequiresDecl(Loc, ClauseList);
2513  if (D) {
2514  CurContext->addDecl(D);
2515  DSAStack->addRequiresDecl(D);
2516  }
2517  }
2518  return DeclGroupPtrTy::make(DeclGroupRef(D));
2519 }
2520 
2522  ArrayRef<OMPClause *> ClauseList) {
2523  /// For target specific clauses, the requires directive cannot be
2524  /// specified after the handling of any of the target regions in the
2525  /// current compilation unit.
2526  ArrayRef<SourceLocation> TargetLocations =
2527  DSAStack->getEncounteredTargetLocs();
2528  if (!TargetLocations.empty()) {
2529  for (const OMPClause *CNew : ClauseList) {
2530  // Check if any of the requires clauses affect target regions.
2531  if (isa<OMPUnifiedSharedMemoryClause>(CNew) ||
2532  isa<OMPUnifiedAddressClause>(CNew) ||
2533  isa<OMPReverseOffloadClause>(CNew) ||
2534  isa<OMPDynamicAllocatorsClause>(CNew)) {
2535  Diag(Loc, diag::err_omp_target_before_requires)
2536  << getOpenMPClauseName(CNew->getClauseKind());
2537  for (SourceLocation TargetLoc : TargetLocations) {
2538  Diag(TargetLoc, diag::note_omp_requires_encountered_target);
2539  }
2540  }
2541  }
2542  }
2543 
2544  if (!DSAStack->hasDuplicateRequiresClause(ClauseList))
2545  return OMPRequiresDecl::Create(Context, getCurLexicalContext(), Loc,
2546  ClauseList);
2547  return nullptr;
2548 }
2549 
2550 static void reportOriginalDsa(Sema &SemaRef, const DSAStackTy *Stack,
2551  const ValueDecl *D,
2552  const DSAStackTy::DSAVarData &DVar,
2553  bool IsLoopIterVar = false) {
2554  if (DVar.RefExpr) {
2555  SemaRef.Diag(DVar.RefExpr->getExprLoc(), diag::note_omp_explicit_dsa)
2556  << getOpenMPClauseName(DVar.CKind);
2557  return;
2558  }
2559  enum {
2560  PDSA_StaticMemberShared,
2561  PDSA_StaticLocalVarShared,
2562  PDSA_LoopIterVarPrivate,
2563  PDSA_LoopIterVarLinear,
2564  PDSA_LoopIterVarLastprivate,
2565  PDSA_ConstVarShared,
2566  PDSA_GlobalVarShared,
2567  PDSA_TaskVarFirstprivate,
2568  PDSA_LocalVarPrivate,
2569  PDSA_Implicit
2570  } Reason = PDSA_Implicit;
2571  bool ReportHint = false;
2572  auto ReportLoc = D->getLocation();
2573  auto *VD = dyn_cast<VarDecl>(D);
2574  if (IsLoopIterVar) {
2575  if (DVar.CKind == OMPC_private)
2576  Reason = PDSA_LoopIterVarPrivate;
2577  else if (DVar.CKind == OMPC_lastprivate)
2578  Reason = PDSA_LoopIterVarLastprivate;
2579  else
2580  Reason = PDSA_LoopIterVarLinear;
2581  } else if (isOpenMPTaskingDirective(DVar.DKind) &&
2582  DVar.CKind == OMPC_firstprivate) {
2583  Reason = PDSA_TaskVarFirstprivate;
2584  ReportLoc = DVar.ImplicitDSALoc;
2585  } else if (VD && VD->isStaticLocal())
2586  Reason = PDSA_StaticLocalVarShared;
2587  else if (VD && VD->isStaticDataMember())
2588  Reason = PDSA_StaticMemberShared;
2589  else if (VD && VD->isFileVarDecl())
2590  Reason = PDSA_GlobalVarShared;
2591  else if (D->getType().isConstant(SemaRef.getASTContext()))
2592  Reason = PDSA_ConstVarShared;
2593  else if (VD && VD->isLocalVarDecl() && DVar.CKind == OMPC_private) {
2594  ReportHint = true;
2595  Reason = PDSA_LocalVarPrivate;
2596  }
2597  if (Reason != PDSA_Implicit) {
2598  SemaRef.Diag(ReportLoc, diag::note_omp_predetermined_dsa)
2599  << Reason << ReportHint
2600  << getOpenMPDirectiveName(Stack->getCurrentDirective());
2601  } else if (DVar.ImplicitDSALoc.isValid()) {
2602  SemaRef.Diag(DVar.ImplicitDSALoc, diag::note_omp_implicit_dsa)
2603  << getOpenMPClauseName(DVar.CKind);
2604  }
2605 }
2606 
2607 namespace {
2608 class DSAAttrChecker final : public StmtVisitor<DSAAttrChecker, void> {
2609  DSAStackTy *Stack;
2610  Sema &SemaRef;
2611  bool ErrorFound = false;
2612  CapturedStmt *CS = nullptr;
2613  llvm::SmallVector<Expr *, 4> ImplicitFirstprivate;
2614  llvm::SmallVector<Expr *, 4> ImplicitMap;
2615  Sema::VarsWithInheritedDSAType VarsWithInheritedDSA;
2616  llvm::SmallDenseSet<const ValueDecl *, 4> ImplicitDeclarations;
2617 
2618  void VisitSubCaptures(OMPExecutableDirective *S) {
2619  // Check implicitly captured variables.
2620  if (!S->hasAssociatedStmt() || !S->getAssociatedStmt())
2621  return;
2622  for (const CapturedStmt::Capture &Cap :
2624  if (!Cap.capturesVariable())
2625  continue;
2626  VarDecl *VD = Cap.getCapturedVar();
2627  // Do not try to map the variable if it or its sub-component was mapped
2628  // already.
2629  if (isOpenMPTargetExecutionDirective(Stack->getCurrentDirective()) &&
2630  Stack->checkMappableExprComponentListsForDecl(
2631  VD, /*CurrentRegionOnly=*/true,
2633  OpenMPClauseKind) { return true; }))
2634  continue;
2636  SemaRef, VD, VD->getType().getNonLValueExprType(SemaRef.Context),
2637  Cap.getLocation(), /*RefersToCapture=*/true);
2638  Visit(DRE);
2639  }
2640  }
2641 
2642 public:
2643  void VisitDeclRefExpr(DeclRefExpr *E) {
2644  if (E->isTypeDependent() || E->isValueDependent() ||
2646  return;
2647  if (auto *VD = dyn_cast<VarDecl>(E->getDecl())) {
2648  // Check the datasharing rules for the expressions in the clauses.
2649  if (!CS) {
2650  if (auto *CED = dyn_cast<OMPCapturedExprDecl>(VD))
2651  if (!CED->hasAttr<OMPCaptureNoInitAttr>()) {
2652  Visit(CED->getInit());
2653  return;
2654  }
2655  }
2656  VD = VD->getCanonicalDecl();
2657  // Skip internally declared variables.
2658  if (VD->hasLocalStorage() && CS && !CS->capturesVariable(VD))
2659  return;
2660 
2661  DSAStackTy::DSAVarData DVar = Stack->getTopDSA(VD, /*FromParent=*/false);
2662  // Check if the variable has explicit DSA set and stop analysis if it so.
2663  if (DVar.RefExpr || !ImplicitDeclarations.insert(VD).second)
2664  return;
2665 
2666  // Skip internally declared static variables.
2668  OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
2669  if (VD->hasGlobalStorage() && CS && !CS->capturesVariable(VD) &&
2670  (Stack->hasRequiresDeclWithClause<OMPUnifiedSharedMemoryClause>() ||
2671  !Res || *Res != OMPDeclareTargetDeclAttr::MT_Link))
2672  return;
2673 
2674  SourceLocation ELoc = E->getExprLoc();
2675  OpenMPDirectiveKind DKind = Stack->getCurrentDirective();
2676  // The default(none) clause requires that each variable that is referenced
2677  // in the construct, and does not have a predetermined data-sharing
2678  // attribute, must have its data-sharing attribute explicitly determined
2679  // by being listed in a data-sharing attribute clause.
2680  if (DVar.CKind == OMPC_unknown && Stack->getDefaultDSA() == DSA_none &&
2681  isImplicitOrExplicitTaskingRegion(DKind) &&
2682  VarsWithInheritedDSA.count(VD) == 0) {
2683  VarsWithInheritedDSA[VD] = E;
2684  return;
2685  }
2686 
2687  if (isOpenMPTargetExecutionDirective(DKind) &&
2688  !Stack->isLoopControlVariable(VD).first) {
2689  if (!Stack->checkMappableExprComponentListsForDecl(
2690  VD, /*CurrentRegionOnly=*/true,
2692  StackComponents,
2693  OpenMPClauseKind) {
2694  // Variable is used if it has been marked as an array, array
2695  // section or the variable iself.
2696  return StackComponents.size() == 1 ||
2697  std::all_of(
2698  std::next(StackComponents.rbegin()),
2699  StackComponents.rend(),
2700  [](const OMPClauseMappableExprCommon::
2701  MappableComponent &MC) {
2702  return MC.getAssociatedDeclaration() ==
2703  nullptr &&
2704  (isa<OMPArraySectionExpr>(
2705  MC.getAssociatedExpression()) ||
2706  isa<ArraySubscriptExpr>(
2707  MC.getAssociatedExpression()));
2708  });
2709  })) {
2710  bool IsFirstprivate = false;
2711  // By default lambdas are captured as firstprivates.
2712  if (const auto *RD =
2713  VD->getType().getNonReferenceType()->getAsCXXRecordDecl())
2714  IsFirstprivate = RD->isLambda();
2715  IsFirstprivate =
2716  IsFirstprivate ||
2717  (VD->getType().getNonReferenceType()->isScalarType() &&
2718  Stack->getDefaultDMA() != DMA_tofrom_scalar && !Res);
2719  if (IsFirstprivate)
2720  ImplicitFirstprivate.emplace_back(E);
2721  else
2722  ImplicitMap.emplace_back(E);
2723  return;
2724  }
2725  }
2726 
2727  // OpenMP [2.9.3.6, Restrictions, p.2]
2728  // A list item that appears in a reduction clause of the innermost
2729  // enclosing worksharing or parallel construct may not be accessed in an
2730  // explicit task.
2731  DVar = Stack->hasInnermostDSA(
2732  VD, [](OpenMPClauseKind C) { return C == OMPC_reduction; },
2733  [](OpenMPDirectiveKind K) {
2734  return isOpenMPParallelDirective(K) ||
2736  },
2737  /*FromParent=*/true);
2738  if (isOpenMPTaskingDirective(DKind) && DVar.CKind == OMPC_reduction) {
2739  ErrorFound = true;
2740  SemaRef.Diag(ELoc, diag::err_omp_reduction_in_task);
2741  reportOriginalDsa(SemaRef, Stack, VD, DVar);
2742  return;
2743  }
2744 
2745  // Define implicit data-sharing attributes for task.
2746  DVar = Stack->getImplicitDSA(VD, /*FromParent=*/false);
2747  if (isOpenMPTaskingDirective(DKind) && DVar.CKind != OMPC_shared &&
2748  !Stack->isLoopControlVariable(VD).first) {
2749  ImplicitFirstprivate.push_back(E);
2750  return;
2751  }
2752 
2753  // Store implicitly used globals with declare target link for parent
2754  // target.
2755  if (!isOpenMPTargetExecutionDirective(DKind) && Res &&
2756  *Res == OMPDeclareTargetDeclAttr::MT_Link) {
2757  Stack->addToParentTargetRegionLinkGlobals(E);
2758  return;
2759  }
2760  }
2761  }
2762  void VisitMemberExpr(MemberExpr *E) {
2763  if (E->isTypeDependent() || E->isValueDependent() ||
2765  return;
2766  auto *FD = dyn_cast<FieldDecl>(E->getMemberDecl());
2767  OpenMPDirectiveKind DKind = Stack->getCurrentDirective();
2768  if (auto *TE = dyn_cast<CXXThisExpr>(E->getBase()->IgnoreParens())) {
2769  if (!FD)
2770  return;
2771  DSAStackTy::DSAVarData DVar = Stack->getTopDSA(FD, /*FromParent=*/false);
2772  // Check if the variable has explicit DSA set and stop analysis if it
2773  // so.
2774  if (DVar.RefExpr || !ImplicitDeclarations.insert(FD).second)
2775  return;
2776 
2777  if (isOpenMPTargetExecutionDirective(DKind) &&
2778  !Stack->isLoopControlVariable(FD).first &&
2779  !Stack->checkMappableExprComponentListsForDecl(
2780  FD, /*CurrentRegionOnly=*/true,
2782  StackComponents,
2783  OpenMPClauseKind) {
2784  return isa<CXXThisExpr>(
2785  cast<MemberExpr>(
2786  StackComponents.back().getAssociatedExpression())
2787  ->getBase()
2788  ->IgnoreParens());
2789  })) {
2790  // OpenMP 4.5 [2.15.5.1, map Clause, Restrictions, C/C++, p.3]
2791  // A bit-field cannot appear in a map clause.
2792  //
2793  if (FD->isBitField())
2794  return;
2795 
2796  // Check to see if the member expression is referencing a class that
2797  // has already been explicitly mapped
2798  if (Stack->isClassPreviouslyMapped(TE->getType()))
2799  return;
2800 
2801  ImplicitMap.emplace_back(E);
2802  return;
2803  }
2804 
2805  SourceLocation ELoc = E->getExprLoc();
2806  // OpenMP [2.9.3.6, Restrictions, p.2]
2807  // A list item that appears in a reduction clause of the innermost
2808  // enclosing worksharing or parallel construct may not be accessed in
2809  // an explicit task.
2810  DVar = Stack->hasInnermostDSA(
2811  FD, [](OpenMPClauseKind C) { return C == OMPC_reduction; },
2812  [](OpenMPDirectiveKind K) {
2813  return isOpenMPParallelDirective(K) ||
2815  },
2816  /*FromParent=*/true);
2817  if (isOpenMPTaskingDirective(DKind) && DVar.CKind == OMPC_reduction) {
2818  ErrorFound = true;
2819  SemaRef.Diag(ELoc, diag::err_omp_reduction_in_task);
2820  reportOriginalDsa(SemaRef, Stack, FD, DVar);
2821  return;
2822  }
2823 
2824  // Define implicit data-sharing attributes for task.
2825  DVar = Stack->getImplicitDSA(FD, /*FromParent=*/false);
2826  if (isOpenMPTaskingDirective(DKind) && DVar.CKind != OMPC_shared &&
2827  !Stack->isLoopControlVariable(FD).first) {
2828  // Check if there is a captured expression for the current field in the
2829  // region. Do not mark it as firstprivate unless there is no captured
2830  // expression.
2831  // TODO: try to make it firstprivate.
2832  if (DVar.CKind != OMPC_unknown)
2833  ImplicitFirstprivate.push_back(E);
2834  }
2835  return;
2836  }
2837  if (isOpenMPTargetExecutionDirective(DKind)) {
2839  if (!checkMapClauseExpressionBase(SemaRef, E, CurComponents, OMPC_map,
2840  /*NoDiagnose=*/true))
2841  return;
2842  const auto *VD = cast<ValueDecl>(
2843  CurComponents.back().getAssociatedDeclaration()->getCanonicalDecl());
2844  if (!Stack->checkMappableExprComponentListsForDecl(
2845  VD, /*CurrentRegionOnly=*/true,
2846  [&CurComponents](
2848  StackComponents,
2849  OpenMPClauseKind) {
2850  auto CCI = CurComponents.rbegin();
2851  auto CCE = CurComponents.rend();
2852  for (const auto &SC : llvm::reverse(StackComponents)) {
2853  // Do both expressions have the same kind?
2854  if (CCI->getAssociatedExpression()->getStmtClass() !=
2855  SC.getAssociatedExpression()->getStmtClass())
2856  if (!(isa<OMPArraySectionExpr>(
2857  SC.getAssociatedExpression()) &&
2858  isa<ArraySubscriptExpr>(
2859  CCI->getAssociatedExpression())))
2860  return false;
2861 
2862  const Decl *CCD = CCI->getAssociatedDeclaration();
2863  const Decl *SCD = SC.getAssociatedDeclaration();
2864  CCD = CCD ? CCD->getCanonicalDecl() : nullptr;
2865  SCD = SCD ? SCD->getCanonicalDecl() : nullptr;
2866  if (SCD != CCD)
2867  return false;
2868  std::advance(CCI, 1);
2869  if (CCI == CCE)
2870  break;
2871  }
2872  return true;
2873  })) {
2874  Visit(E->getBase());
2875  }
2876  } else {
2877  Visit(E->getBase());
2878  }
2879  }
2880  void VisitOMPExecutableDirective(OMPExecutableDirective *S) {
2881  for (OMPClause *C : S->clauses()) {
2882  // Skip analysis of arguments of implicitly defined firstprivate clause
2883  // for task|target directives.
2884  // Skip analysis of arguments of implicitly defined map clause for target
2885  // directives.
2886  if (C && !((isa<OMPFirstprivateClause>(C) || isa<OMPMapClause>(C)) &&
2887  C->isImplicit())) {
2888  for (Stmt *CC : C->children()) {
2889  if (CC)
2890  Visit(CC);
2891  }
2892  }
2893  }
2894  // Check implicitly captured variables.
2895  VisitSubCaptures(S);
2896  }
2897  void VisitStmt(Stmt *S) {
2898  for (Stmt *C : S->children()) {
2899  if (C) {
2900  // Check implicitly captured variables in the task-based directives to
2901  // check if they must be firstprivatized.
2902  Visit(C);
2903  }
2904  }
2905  }
2906 
2907  bool isErrorFound() const { return ErrorFound; }
2908  ArrayRef<Expr *> getImplicitFirstprivate() const {
2909  return ImplicitFirstprivate;
2910  }
2911  ArrayRef<Expr *> getImplicitMap() const { return ImplicitMap; }
2912  const Sema::VarsWithInheritedDSAType &getVarsWithInheritedDSA() const {
2913  return VarsWithInheritedDSA;
2914  }
2915 
2916  DSAAttrChecker(DSAStackTy *S, Sema &SemaRef, CapturedStmt *CS)
2917  : Stack(S), SemaRef(SemaRef), ErrorFound(false), CS(CS) {
2918  // Process declare target link variables for the target directives.
2919  if (isOpenMPTargetExecutionDirective(S->getCurrentDirective())) {
2920  for (DeclRefExpr *E : Stack->getLinkGlobals())
2921  Visit(E);
2922  }
2923  }
2924 };
2925 } // namespace
2926 
2928  switch (DKind) {
2929  case OMPD_parallel:
2930  case OMPD_parallel_for:
2931  case OMPD_parallel_for_simd:
2932  case OMPD_parallel_sections:
2933  case OMPD_teams:
2934  case OMPD_teams_distribute:
2935  case OMPD_teams_distribute_simd: {
2936  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
2937  QualType KmpInt32PtrTy =
2938  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
2939  Sema::CapturedParamNameType Params[] = {
2940  std::make_pair(".global_tid.", KmpInt32PtrTy),
2941  std::make_pair(".bound_tid.", KmpInt32PtrTy),
2942  std::make_pair(StringRef(), QualType()) // __context with shared vars
2943  };
2944  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2945  Params);
2946  break;
2947  }
2948  case OMPD_target_teams:
2949  case OMPD_target_parallel:
2950  case OMPD_target_parallel_for:
2951  case OMPD_target_parallel_for_simd:
2952  case OMPD_target_teams_distribute:
2953  case OMPD_target_teams_distribute_simd: {
2954  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
2955  QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
2956  QualType KmpInt32PtrTy =
2957  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
2958  QualType Args[] = {VoidPtrTy};
2960  EPI.Variadic = true;
2961  QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
2962  Sema::CapturedParamNameType Params[] = {
2963  std::make_pair(".global_tid.", KmpInt32Ty),
2964  std::make_pair(".part_id.", KmpInt32PtrTy),
2965  std::make_pair(".privates.", VoidPtrTy),
2966  std::make_pair(
2967  ".copy_fn.",
2968  Context.getPointerType(CopyFnType).withConst().withRestrict()),
2969  std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
2970  std::make_pair(StringRef(), QualType()) // __context with shared vars
2971  };
2972  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2973  Params);
2974  // Mark this captured region as inlined, because we don't use outlined
2975  // function directly.
2976  getCurCapturedRegion()->TheCapturedDecl->addAttr(
2977  AlwaysInlineAttr::CreateImplicit(
2978  Context, AlwaysInlineAttr::Keyword_forceinline));
2979  Sema::CapturedParamNameType ParamsTarget[] = {
2980  std::make_pair(StringRef(), QualType()) // __context with shared vars
2981  };
2982  // Start a captured region for 'target' with no implicit parameters.
2983  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2984  ParamsTarget);
2985  Sema::CapturedParamNameType ParamsTeamsOrParallel[] = {
2986  std::make_pair(".global_tid.", KmpInt32PtrTy),
2987  std::make_pair(".bound_tid.", KmpInt32PtrTy),
2988  std::make_pair(StringRef(), QualType()) // __context with shared vars
2989  };
2990  // Start a captured region for 'teams' or 'parallel'. Both regions have
2991  // the same implicit parameters.
2992  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2993  ParamsTeamsOrParallel);
2994  break;
2995  }
2996  case OMPD_target:
2997  case OMPD_target_simd: {
2998  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
2999  QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
3000  QualType KmpInt32PtrTy =
3001  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
3002  QualType Args[] = {VoidPtrTy};
3004  EPI.Variadic = true;
3005  QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
3006  Sema::CapturedParamNameType Params[] = {
3007  std::make_pair(".global_tid.", KmpInt32Ty),
3008  std::make_pair(".part_id.", KmpInt32PtrTy),
3009  std::make_pair(".privates.", VoidPtrTy),
3010  std::make_pair(
3011  ".copy_fn.",
3012  Context.getPointerType(CopyFnType).withConst().withRestrict()),
3013  std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
3014  std::make_pair(StringRef(), QualType()) // __context with shared vars
3015  };
3016  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
3017  Params);
3018  // Mark this captured region as inlined, because we don't use outlined
3019  // function directly.
3020  getCurCapturedRegion()->TheCapturedDecl->addAttr(
3021  AlwaysInlineAttr::CreateImplicit(
3022  Context, AlwaysInlineAttr::Keyword_forceinline));
3023  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
3024  std::make_pair(StringRef(), QualType()));
3025  break;
3026  }
3027  case OMPD_simd:
3028  case OMPD_for:
3029  case OMPD_for_simd:
3030  case OMPD_sections:
3031  case OMPD_section:
3032  case OMPD_single:
3033  case OMPD_master:
3034  case OMPD_critical:
3035  case OMPD_taskgroup:
3036  case OMPD_distribute:
3037  case OMPD_distribute_simd:
3038  case OMPD_ordered:
3039  case OMPD_atomic:
3040  case OMPD_target_data: {
3041  Sema::CapturedParamNameType Params[] = {
3042  std::make_pair(StringRef(), QualType()) // __context with shared vars
3043  };
3044  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
3045  Params);
3046  break;
3047  }
3048  case OMPD_task: {
3049  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
3050  QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
3051  QualType KmpInt32PtrTy =
3052  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
3053  QualType Args[] = {VoidPtrTy};
3055  EPI.Variadic = true;
3056  QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
3057  Sema::CapturedParamNameType Params[] = {
3058  std::make_pair(".global_tid.", KmpInt32Ty),
3059  std::make_pair(".part_id.", KmpInt32PtrTy),
3060  std::make_pair(".privates.", VoidPtrTy),
3061  std::make_pair(
3062  ".copy_fn.",
3063  Context.getPointerType(CopyFnType).withConst().withRestrict()),
3064  std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
3065  std::make_pair(StringRef(), QualType()) // __context with shared vars
3066  };
3067  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
3068  Params);
3069  // Mark this captured region as inlined, because we don't use outlined
3070  // function directly.
3071  getCurCapturedRegion()->TheCapturedDecl->addAttr(
3072  AlwaysInlineAttr::CreateImplicit(
3073  Context, AlwaysInlineAttr::Keyword_forceinline));
3074  break;
3075  }
3076  case OMPD_taskloop:
3077  case OMPD_taskloop_simd: {
3078  QualType KmpInt32Ty =
3079  Context.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1)
3080  .withConst();
3081  QualType KmpUInt64Ty =
3082  Context.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0)
3083  .withConst();
3084  QualType KmpInt64Ty =
3085  Context.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1)
3086  .withConst();
3087  QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
3088  QualType KmpInt32PtrTy =
3089  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
3090  QualType Args[] = {VoidPtrTy};
3092  EPI.Variadic = true;
3093  QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
3094  Sema::CapturedParamNameType Params[] = {
3095  std::make_pair(".global_tid.", KmpInt32Ty),
3096  std::make_pair(".part_id.", KmpInt32PtrTy),
3097  std::make_pair(".privates.", VoidPtrTy),
3098  std::make_pair(
3099  ".copy_fn.",
3100  Context.getPointerType(CopyFnType).withConst().withRestrict()),
3101  std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
3102  std::make_pair(".lb.", KmpUInt64Ty),
3103  std::make_pair(".ub.", KmpUInt64Ty),
3104  std::make_pair(".st.", KmpInt64Ty),
3105  std::make_pair(".liter.", KmpInt32Ty),
3106  std::make_pair(".reductions.", VoidPtrTy),
3107  std::make_pair(StringRef(), QualType()) // __context with shared vars
3108  };
3109  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
3110  Params);
3111  // Mark this captured region as inlined, because we don't use outlined
3112  // function directly.
3113  getCurCapturedRegion()->TheCapturedDecl->addAttr(
3114  AlwaysInlineAttr::CreateImplicit(
3115  Context, AlwaysInlineAttr::Keyword_forceinline));
3116  break;
3117  }
3118  case OMPD_distribute_parallel_for_simd:
3119  case OMPD_distribute_parallel_for: {
3120  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
3121  QualType KmpInt32PtrTy =
3122  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
3123  Sema::CapturedParamNameType Params[] = {
3124  std::make_pair(".global_tid.", KmpInt32PtrTy),
3125  std::make_pair(".bound_tid.", KmpInt32PtrTy),
3126  std::make_pair(".previous.lb.", Context.getSizeType().withConst()),
3127  std::make_pair(".previous.ub.", Context.getSizeType().withConst()),
3128  std::make_pair(StringRef(), QualType()) // __context with shared vars
3129  };
3130  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
3131  Params);
3132  break;
3133  }
3134  case OMPD_target_teams_distribute_parallel_for:
3135  case OMPD_target_teams_distribute_parallel_for_simd: {
3136  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
3137  QualType KmpInt32PtrTy =
3138  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
3139  QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
3140 
3141  QualType Args[] = {VoidPtrTy};
3143  EPI.Variadic = true;
3144  QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
3145  Sema::CapturedParamNameType Params[] = {
3146  std::make_pair(".global_tid.", KmpInt32Ty),
3147  std::make_pair(".part_id.", KmpInt32PtrTy),
3148  std::make_pair(".privates.", VoidPtrTy),
3149  std::make_pair(
3150  ".copy_fn.",
3151  Context.getPointerType(CopyFnType).withConst().withRestrict()),
3152  std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
3153  std::make_pair(StringRef(), QualType()) // __context with shared vars
3154  };
3155  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
3156  Params);
3157  // Mark this captured region as inlined, because we don't use outlined
3158  // function directly.
3159  getCurCapturedRegion()->TheCapturedDecl->addAttr(
3160  AlwaysInlineAttr::CreateImplicit(
3161  Context, AlwaysInlineAttr::Keyword_forceinline));
3162  Sema::CapturedParamNameType ParamsTarget[] = {
3163  std::make_pair(StringRef(), QualType()) // __context with shared vars
3164  };
3165  // Start a captured region for 'target' with no implicit parameters.
3166  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
3167  ParamsTarget);
3168 
3169  Sema::CapturedParamNameType ParamsTeams[] = {
3170  std::make_pair(".global_tid.", KmpInt32PtrTy),
3171  std::make_pair(".bound_tid.", KmpInt32PtrTy),
3172  std::make_pair(StringRef(), QualType()) // __context with shared vars
3173  };
3174  // Start a captured region for 'target' with no implicit parameters.
3175  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
3176  ParamsTeams);
3177 
3178  Sema::CapturedParamNameType ParamsParallel[] = {
3179  std::make_pair(".global_tid.", KmpInt32PtrTy),
3180  std::make_pair(".bound_tid.", KmpInt32PtrTy),
3181  std::make_pair(".previous.lb.", Context.getSizeType().withConst()),
3182  std::make_pair(".previous.ub.", Context.getSizeType().withConst()),
3183  std::make_pair(StringRef(), QualType()) // __context with shared vars
3184  };
3185  // Start a captured region for 'teams' or 'parallel'. Both regions have
3186  // the same implicit parameters.
3187  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
3188  ParamsParallel);
3189  break;
3190  }
3191 
3192  case OMPD_teams_distribute_parallel_for:
3193  case OMPD_teams_distribute_parallel_for_simd: {
3194  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
3195  QualType KmpInt32PtrTy =
3196  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
3197 
3198  Sema::CapturedParamNameType ParamsTeams[] = {
3199  std::make_pair(".global_tid.", KmpInt32PtrTy),
3200  std::make_pair(".bound_tid.", KmpInt32PtrTy),
3201  std::make_pair(StringRef(), QualType()) // __context with shared vars
3202  };
3203  // Start a captured region for 'target' with no implicit parameters.
3204  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
3205  ParamsTeams);
3206 
3207  Sema::CapturedParamNameType ParamsParallel[] = {
3208  std::make_pair(".global_tid.", KmpInt32PtrTy),
3209  std::make_pair(".bound_tid.", KmpInt32PtrTy),
3210  std::make_pair(".previous.lb.", Context.getSizeType().withConst()),
3211  std::make_pair(".previous.ub.", Context.getSizeType().withConst()),
3212  std::make_pair(StringRef(), QualType()) // __context with shared vars
3213  };
3214  // Start a captured region for 'teams' or 'parallel'. Both regions have
3215  // the same implicit parameters.
3216  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
3217  ParamsParallel);
3218  break;
3219  }
3220  case OMPD_target_update:
3221  case OMPD_target_enter_data:
3222  case OMPD_target_exit_data: {
3223  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
3224  QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
3225  QualType KmpInt32PtrTy =
3226  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
3227  QualType Args[] = {VoidPtrTy};
3229  EPI.Variadic = true;
3230  QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
3231  Sema::CapturedParamNameType Params[] = {
3232  std::make_pair(".global_tid.", KmpInt32Ty),
3233  std::make_pair(".part_id.", KmpInt32PtrTy),
3234  std::make_pair(".privates.", VoidPtrTy),
3235  std::make_pair(
3236  ".copy_fn.",
3237  Context.getPointerType(CopyFnType).withConst().withRestrict()),
3238  std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
3239  std::make_pair(StringRef(), QualType()) // __context with shared vars
3240  };
3241  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
3242  Params);
3243  // Mark this captured region as inlined, because we don't use outlined
3244  // function directly.
3245  getCurCapturedRegion()->TheCapturedDecl->addAttr(
3246  AlwaysInlineAttr::CreateImplicit(
3247  Context, AlwaysInlineAttr::Keyword_forceinline));
3248  break;
3249  }
3250  case OMPD_threadprivate:
3251  case OMPD_allocate:
3252  case OMPD_taskyield:
3253  case OMPD_barrier:
3254  case OMPD_taskwait:
3255  case OMPD_cancellation_point:
3256  case OMPD_cancel:
3257  case OMPD_flush:
3258  case OMPD_declare_reduction:
3259  case OMPD_declare_mapper:
3260  case OMPD_declare_simd:
3261  case OMPD_declare_target:
3262  case OMPD_end_declare_target:
3263  case OMPD_requires:
3264  llvm_unreachable("OpenMP Directive is not allowed");
3265  case OMPD_unknown:
3266  llvm_unreachable("Unknown OpenMP directive");
3267  }
3268 }
3269 
3271  SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
3272  getOpenMPCaptureRegions(CaptureRegions, DKind);
3273  return CaptureRegions.size();
3274 }
3275 
3277  Expr *CaptureExpr, bool WithInit,
3278  bool AsExpression) {
3279  assert(CaptureExpr);
3280  ASTContext &C = S.getASTContext();
3281  Expr *Init = AsExpression ? CaptureExpr : CaptureExpr->IgnoreImpCasts();
3282  QualType Ty = Init->getType();
3283  if (CaptureExpr->getObjectKind() == OK_Ordinary && CaptureExpr->isGLValue()) {
3284  if (S.getLangOpts().CPlusPlus) {
3285  Ty = C.getLValueReferenceType(Ty);
3286  } else {
3287  Ty = C.getPointerType(Ty);
3288  ExprResult Res =
3289  S.CreateBuiltinUnaryOp(CaptureExpr->getExprLoc(), UO_AddrOf, Init);
3290  if (!Res.isUsable())
3291  return nullptr;
3292  Init = Res.get();
3293  }
3294  WithInit = true;
3295  }
3296  auto *CED = OMPCapturedExprDecl::Create(C, S.CurContext, Id, Ty,
3297  CaptureExpr->getBeginLoc());
3298  if (!WithInit)
3299  CED->addAttr(OMPCaptureNoInitAttr::CreateImplicit(C));
3300  S.CurContext->addHiddenDecl(CED);
3301  S.AddInitializerToDecl(CED, Init, /*DirectInit=*/false);
3302  return CED;
3303 }
3304 
3305 static DeclRefExpr *buildCapture(Sema &S, ValueDecl *D, Expr *CaptureExpr,
3306  bool WithInit) {
3307  OMPCapturedExprDecl *CD;
3308  if (VarDecl *VD = S.isOpenMPCapturedDecl(D))
3309  CD = cast<OMPCapturedExprDecl>(VD);
3310  else
3311  CD = buildCaptureDecl(S, D->getIdentifier(), CaptureExpr, WithInit,
3312  /*AsExpression=*/false);
3313  return buildDeclRefExpr(S, CD, CD->getType().getNonReferenceType(),
3314  CaptureExpr->getExprLoc());
3315 }
3316 
3317 static ExprResult buildCapture(Sema &S, Expr *CaptureExpr, DeclRefExpr *&Ref) {
3318  CaptureExpr = S.DefaultLvalueConversion(CaptureExpr).get();
3319  if (!Ref) {
3321  S, &S.getASTContext().Idents.get(".capture_expr."), CaptureExpr,
3322  /*WithInit=*/true, /*AsExpression=*/true);
3323  Ref = buildDeclRefExpr(S, CD, CD->getType().getNonReferenceType(),
3324  CaptureExpr->getExprLoc());
3325  }
3326  ExprResult Res = Ref;
3327  if (!S.getLangOpts().CPlusPlus &&
3328  CaptureExpr->getObjectKind() == OK_Ordinary && CaptureExpr->isGLValue() &&
3329  Ref->getType()->isPointerType()) {
3330  Res = S.CreateBuiltinUnaryOp(CaptureExpr->getExprLoc(), UO_Deref, Ref);
3331  if (!Res.isUsable())
3332  return ExprError();
3333  }
3334  return S.DefaultLvalueConversion(Res.get());
3335 }
3336 
3337 namespace {
3338 // OpenMP directives parsed in this section are represented as a
3339 // CapturedStatement with an associated statement. If a syntax error
3340 // is detected during the parsing of the associated statement, the
3341 // compiler must abort processing and close the CapturedStatement.
3342 //
3343 // Combined directives such as 'target parallel' have more than one
3344 // nested CapturedStatements. This RAII ensures that we unwind out
3345 // of all the nested CapturedStatements when an error is found.
3346 class CaptureRegionUnwinderRAII {
3347 private:
3348  Sema &S;
3349  bool &ErrorFound;
3351 
3352 public:
3353  CaptureRegionUnwinderRAII(Sema &S, bool &ErrorFound,
3354  OpenMPDirectiveKind DKind)
3355  : S(S), ErrorFound(ErrorFound), DKind(DKind) {}
3356  ~CaptureRegionUnwinderRAII() {
3357  if (ErrorFound) {
3358  int ThisCaptureLevel = S.getOpenMPCaptureLevels(DKind);
3359  while (--ThisCaptureLevel >= 0)
3361  }
3362  }
3363 };
3364 } // namespace
3365 
3367  ArrayRef<OMPClause *> Clauses) {
3368  bool ErrorFound = false;
3369  CaptureRegionUnwinderRAII CaptureRegionUnwinder(
3370  *this, ErrorFound, DSAStack->getCurrentDirective());
3371  if (!S.isUsable()) {
3372  ErrorFound = true;
3373  return StmtError();
3374  }
3375 
3376  SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
3377  getOpenMPCaptureRegions(CaptureRegions, DSAStack->getCurrentDirective());
3378  OMPOrderedClause *OC = nullptr;
3379  OMPScheduleClause *SC = nullptr;
3382  // This is required for proper codegen.
3383  for (OMPClause *Clause : Clauses) {
3384  if (isOpenMPTaskingDirective(DSAStack->getCurrentDirective()) &&
3385  Clause->getClauseKind() == OMPC_in_reduction) {
3386  // Capture taskgroup task_reduction descriptors inside the tasking regions
3387  // with the corresponding in_reduction items.
3388  auto *IRC = cast<OMPInReductionClause>(Clause);
3389  for (Expr *E : IRC->taskgroup_descriptors())
3390  if (E)
3391  MarkDeclarationsReferencedInExpr(E);
3392  }
3393  if (isOpenMPPrivate(Clause->getClauseKind()) ||
3394  Clause->getClauseKind() == OMPC_copyprivate ||
3395  (getLangOpts().OpenMPUseTLS &&
3396  getASTContext().getTargetInfo().isTLSSupported() &&
3397  Clause->getClauseKind() == OMPC_copyin)) {
3398  DSAStack->setForceVarCapturing(Clause->getClauseKind() == OMPC_copyin);
3399  // Mark all variables in private list clauses as used in inner region.
3400  for (Stmt *VarRef : Clause->children()) {
3401  if (auto *E = cast_or_null<Expr>(VarRef)) {
3402  MarkDeclarationsReferencedInExpr(E);
3403  }
3404  }
3405  DSAStack->setForceVarCapturing(/*V=*/false);
3406  } else if (CaptureRegions.size() > 1 ||
3407  CaptureRegions.back() != OMPD_unknown) {
3408  if (auto *C = OMPClauseWithPreInit::get(Clause))
3409  PICs.push_back(C);
3410  if (auto *C = OMPClauseWithPostUpdate::get(Clause)) {
3411  if (Expr *E = C->getPostUpdateExpr())
3412  MarkDeclarationsReferencedInExpr(E);
3413  }
3414  }
3415  if (Clause->getClauseKind() == OMPC_schedule)
3416  SC = cast<OMPScheduleClause>(Clause);
3417  else if (Clause->getClauseKind() == OMPC_ordered)
3418  OC = cast<OMPOrderedClause>(Clause);
3419  else if (Clause->getClauseKind() == OMPC_linear)
3420  LCs.push_back(cast<OMPLinearClause>(Clause));
3421  }
3422  // OpenMP, 2.7.1 Loop Construct, Restrictions
3423  // The nonmonotonic modifier cannot be specified if an ordered clause is
3424  // specified.
3425  if (SC &&
3426  (SC->getFirstScheduleModifier() == OMPC_SCHEDULE_MODIFIER_nonmonotonic ||
3427  SC->getSecondScheduleModifier() ==
3428  OMPC_SCHEDULE_MODIFIER_nonmonotonic) &&
3429  OC) {
3430  Diag(SC->getFirstScheduleModifier() == OMPC_SCHEDULE_MODIFIER_nonmonotonic
3433  diag::err_omp_schedule_nonmonotonic_ordered)
3434  << SourceRange(OC->getBeginLoc(), OC->getEndLoc());
3435  ErrorFound = true;
3436  }
3437  if (!LCs.empty() && OC && OC->getNumForLoops()) {
3438  for (const OMPLinearClause *C : LCs) {
3439  Diag(C->getBeginLoc(), diag::err_omp_linear_ordered)
3440  << SourceRange(OC->getBeginLoc(), OC->getEndLoc());
3441  }
3442  ErrorFound = true;
3443  }
3444  if (isOpenMPWorksharingDirective(DSAStack->getCurrentDirective()) &&
3445  isOpenMPSimdDirective(DSAStack->getCurrentDirective()) && OC &&
3446  OC->getNumForLoops()) {
3447  Diag(OC->getBeginLoc(), diag::err_omp_ordered_simd)
3448  << getOpenMPDirectiveName(DSAStack->getCurrentDirective());
3449  ErrorFound = true;
3450  }
3451  if (ErrorFound) {
3452  return StmtError();
3453  }
3454  StmtResult SR = S;
3455  unsigned CompletedRegions = 0;
3456  for (OpenMPDirectiveKind ThisCaptureRegion : llvm::reverse(CaptureRegions)) {
3457  // Mark all variables in private list clauses as used in inner region.
3458  // Required for proper codegen of combined directives.
3459  // TODO: add processing for other clauses.
3460  if (ThisCaptureRegion != OMPD_unknown) {
3461  for (const clang::OMPClauseWithPreInit *C : PICs) {
3462  OpenMPDirectiveKind CaptureRegion = C->getCaptureRegion();
3463  // Find the particular capture region for the clause if the
3464  // directive is a combined one with multiple capture regions.
3465  // If the directive is not a combined one, the capture region
3466  // associated with the clause is OMPD_unknown and is generated
3467  // only once.
3468  if (CaptureRegion == ThisCaptureRegion ||
3469  CaptureRegion == OMPD_unknown) {
3470  if (auto *DS = cast_or_null<DeclStmt>(C->getPreInitStmt())) {
3471  for (Decl *D : DS->decls())
3472  MarkVariableReferenced(D->getLocation(), cast<VarDecl>(D));
3473  }
3474  }
3475  }
3476  }
3477  if (++CompletedRegions == CaptureRegions.size())
3478  DSAStack->setBodyComplete();
3479  SR = ActOnCapturedRegionEnd(SR.get());
3480  }
3481  return SR;
3482 }
3483 
3484 static bool checkCancelRegion(Sema &SemaRef, OpenMPDirectiveKind CurrentRegion,
3485  OpenMPDirectiveKind CancelRegion,
3486  SourceLocation StartLoc) {
3487  // CancelRegion is only needed for cancel and cancellation_point.
3488  if (CurrentRegion != OMPD_cancel && CurrentRegion != OMPD_cancellation_point)
3489  return false;
3490 
3491  if (CancelRegion == OMPD_parallel || CancelRegion == OMPD_for ||
3492  CancelRegion == OMPD_sections || CancelRegion == OMPD_taskgroup)
3493  return false;
3494 
3495  SemaRef.Diag(StartLoc, diag::err_omp_wrong_cancel_region)
3496  << getOpenMPDirectiveName(CancelRegion);
3497  return true;
3498 }
3499 
3500 static bool checkNestingOfRegions(Sema &SemaRef, const DSAStackTy *Stack,
3501  OpenMPDirectiveKind CurrentRegion,
3502  const DeclarationNameInfo &CurrentName,
3503  OpenMPDirectiveKind CancelRegion,
3504  SourceLocation StartLoc) {
3505  if (Stack->getCurScope()) {
3506  OpenMPDirectiveKind ParentRegion = Stack->getParentDirective();
3507  OpenMPDirectiveKind OffendingRegion = ParentRegion;
3508  bool NestingProhibited = false;
3509  bool CloseNesting = true;
3510  bool OrphanSeen = false;
3511  enum {
3512  NoRecommend,
3513  ShouldBeInParallelRegion,
3514  ShouldBeInOrderedRegion,
3515  ShouldBeInTargetRegion,
3516  ShouldBeInTeamsRegion
3517  } Recommend = NoRecommend;
3518  if (isOpenMPSimdDirective(ParentRegion) && CurrentRegion != OMPD_ordered) {
3519  // OpenMP [2.16, Nesting of Regions]
3520  // OpenMP constructs may not be nested inside a simd region.
3521  // OpenMP [2.8.1,simd Construct, Restrictions]
3522  // An ordered construct with the simd clause is the only OpenMP
3523  // construct that can appear in the simd region.
3524  // Allowing a SIMD construct nested in another SIMD construct is an
3525  // extension. The OpenMP 4.5 spec does not allow it. Issue a warning
3526  // message.
3527  SemaRef.Diag(StartLoc, (CurrentRegion != OMPD_simd)
3528  ? diag::err_omp_prohibited_region_simd
3529  : diag::warn_omp_nesting_simd);
3530  return CurrentRegion != OMPD_simd;
3531  }
3532  if (ParentRegion == OMPD_atomic) {
3533  // OpenMP [2.16, Nesting of Regions]
3534  // OpenMP constructs may not be nested inside an atomic region.
3535  SemaRef.Diag(StartLoc, diag::err_omp_prohibited_region_atomic);
3536  return true;
3537  }
3538  if (CurrentRegion == OMPD_section) {
3539  // OpenMP [2.7.2, sections Construct, Restrictions]
3540  // Orphaned section directives are prohibited. That is, the section
3541  // directives must appear within the sections construct and must not be
3542  // encountered elsewhere in the sections region.
3543  if (ParentRegion != OMPD_sections &&
3544  ParentRegion != OMPD_parallel_sections) {
3545  SemaRef.Diag(StartLoc, diag::err_omp_orphaned_section_directive)
3546  << (ParentRegion != OMPD_unknown)
3547  << getOpenMPDirectiveName(ParentRegion);
3548  return true;
3549  }
3550  return false;
3551  }
3552  // Allow some constructs (except teams and cancellation constructs) to be
3553  // orphaned (they could be used in functions, called from OpenMP regions
3554  // with the required preconditions).
3555  if (ParentRegion == OMPD_unknown &&
3556  !isOpenMPNestingTeamsDirective(CurrentRegion) &&
3557  CurrentRegion != OMPD_cancellation_point &&
3558  CurrentRegion != OMPD_cancel)
3559  return false;
3560  if (CurrentRegion == OMPD_cancellation_point ||
3561  CurrentRegion == OMPD_cancel) {
3562  // OpenMP [2.16, Nesting of Regions]
3563  // A cancellation point construct for which construct-type-clause is
3564  // taskgroup must be nested inside a task construct. A cancellation
3565  // point construct for which construct-type-clause is not taskgroup must
3566  // be closely nested inside an OpenMP construct that matches the type
3567  // specified in construct-type-clause.
3568  // A cancel construct for which construct-type-clause is taskgroup must be
3569  // nested inside a task construct. A cancel construct for which
3570  // construct-type-clause is not taskgroup must be closely nested inside an
3571  // OpenMP construct that matches the type specified in
3572  // construct-type-clause.
3573  NestingProhibited =
3574  !((CancelRegion == OMPD_parallel &&
3575  (ParentRegion == OMPD_parallel ||
3576  ParentRegion == OMPD_target_parallel)) ||
3577  (CancelRegion == OMPD_for &&
3578  (ParentRegion == OMPD_for || ParentRegion == OMPD_parallel_for ||
3579  ParentRegion == OMPD_target_parallel_for ||
3580  ParentRegion == OMPD_distribute_parallel_for ||
3581  ParentRegion == OMPD_teams_distribute_parallel_for ||
3582  ParentRegion == OMPD_target_teams_distribute_parallel_for)) ||
3583  (CancelRegion == OMPD_taskgroup && ParentRegion == OMPD_task) ||
3584  (CancelRegion == OMPD_sections &&
3585  (ParentRegion == OMPD_section || ParentRegion == OMPD_sections ||
3586  ParentRegion == OMPD_parallel_sections)));
3587  OrphanSeen = ParentRegion == OMPD_unknown;
3588  } else if (CurrentRegion == OMPD_master) {
3589  // OpenMP [2.16, Nesting of Regions]
3590  // A master region may not be closely nested inside a worksharing,
3591  // atomic, or explicit task region.
3592  NestingProhibited = isOpenMPWorksharingDirective(ParentRegion) ||
3593  isOpenMPTaskingDirective(ParentRegion);
3594  } else if (CurrentRegion == OMPD_critical && CurrentName.getName()) {
3595  // OpenMP [2.16, Nesting of Regions]
3596  // A critical region may not be nested (closely or otherwise) inside a
3597  // critical region with the same name. Note that this restriction is not
3598  // sufficient to prevent deadlock.
3599  SourceLocation PreviousCriticalLoc;
3600  bool DeadLock = Stack->hasDirective(
3601  [CurrentName, &PreviousCriticalLoc](OpenMPDirectiveKind K,
3602  const DeclarationNameInfo &DNI,
3603  SourceLocation Loc) {
3604  if (K == OMPD_critical && DNI.getName() == CurrentName.getName()) {
3605  PreviousCriticalLoc = Loc;
3606  return true;
3607  }
3608  return false;
3609  },
3610  false /* skip top directive */);
3611  if (DeadLock) {
3612  SemaRef.Diag(StartLoc,
3613  diag::err_omp_prohibited_region_critical_same_name)
3614  << CurrentName.getName();
3615  if (PreviousCriticalLoc.isValid())
3616  SemaRef.Diag(PreviousCriticalLoc,
3617  diag::note_omp_previous_critical_region);
3618  return true;
3619  }
3620  } else if (CurrentRegion == OMPD_barrier) {
3621  // OpenMP [2.16, Nesting of Regions]
3622  // A barrier region may not be closely nested inside a worksharing,
3623  // explicit task, critical, ordered, atomic, or master region.
3624  NestingProhibited = isOpenMPWorksharingDirective(ParentRegion) ||
3625  isOpenMPTaskingDirective(ParentRegion) ||
3626  ParentRegion == OMPD_master ||
3627  ParentRegion == OMPD_critical ||
3628  ParentRegion == OMPD_ordered;
3629  } else if (isOpenMPWorksharingDirective(CurrentRegion) &&
3630  !isOpenMPParallelDirective(CurrentRegion) &&
3631  !isOpenMPTeamsDirective(CurrentRegion)) {
3632  // OpenMP [2.16, Nesting of Regions]
3633  // A worksharing region may not be closely nested inside a worksharing,
3634  // explicit task, critical, ordered, atomic, or master region.
3635  NestingProhibited = isOpenMPWorksharingDirective(ParentRegion) ||
3636  isOpenMPTaskingDirective(ParentRegion) ||
3637  ParentRegion == OMPD_master ||
3638  ParentRegion == OMPD_critical ||
3639  ParentRegion == OMPD_ordered;
3640  Recommend = ShouldBeInParallelRegion;
3641  } else if (CurrentRegion == OMPD_ordered) {
3642  // OpenMP [2.16, Nesting of Regions]
3643  // An ordered region may not be closely nested inside a critical,
3644  // atomic, or explicit task region.
3645  // An ordered region must be closely nested inside a loop region (or
3646  // parallel loop region) with an ordered clause.
3647  // OpenMP [2.8.1,simd Construct, Restrictions]
3648  // An ordered construct with the simd clause is the only OpenMP construct
3649  // that can appear in the simd region.
3650  NestingProhibited = ParentRegion == OMPD_critical ||
3651  isOpenMPTaskingDirective(ParentRegion) ||
3652  !(isOpenMPSimdDirective(ParentRegion) ||
3653  Stack->isParentOrderedRegion());
3654  Recommend = ShouldBeInOrderedRegion;
3655  } else if (isOpenMPNestingTeamsDirective(CurrentRegion)) {
3656  // OpenMP [2.16, Nesting of Regions]
3657  // If specified, a teams construct must be contained within a target
3658  // construct.
3659  NestingProhibited = ParentRegion != OMPD_target;
3660  OrphanSeen = ParentRegion == OMPD_unknown;
3661  Recommend = ShouldBeInTargetRegion;
3662  }
3663  if (!NestingProhibited &&
3664  !isOpenMPTargetExecutionDirective(CurrentRegion) &&
3665  !isOpenMPTargetDataManagementDirective(CurrentRegion) &&
3666  (ParentRegion == OMPD_teams || ParentRegion == OMPD_target_teams)) {
3667  // OpenMP [2.16, Nesting of Regions]
3668  // distribute, parallel, parallel sections, parallel workshare, and the
3669  // parallel loop and parallel loop SIMD constructs are the only OpenMP
3670  // constructs that can be closely nested in the teams region.
3671  NestingProhibited = !isOpenMPParallelDirective(CurrentRegion) &&
3672  !isOpenMPDistributeDirective(CurrentRegion);
3673  Recommend = ShouldBeInParallelRegion;
3674  }
3675  if (!NestingProhibited &&
3676  isOpenMPNestingDistributeDirective(CurrentRegion)) {
3677  // OpenMP 4.5 [2.17 Nesting of Regions]
3678  // The region associated with the distribute construct must be strictly
3679  // nested inside a teams region
3680  NestingProhibited =
3681  (ParentRegion != OMPD_teams && ParentRegion != OMPD_target_teams);
3682  Recommend = ShouldBeInTeamsRegion;
3683  }
3684  if (!NestingProhibited &&
3685  (isOpenMPTargetExecutionDirective(CurrentRegion) ||
3686  isOpenMPTargetDataManagementDirective(CurrentRegion))) {
3687  // OpenMP 4.5 [2.17 Nesting of Regions]
3688  // If a target, target update, target data, target enter data, or
3689  // target exit data construct is encountered during execution of a
3690  // target region, the behavior is unspecified.
3691  NestingProhibited = Stack->hasDirective(
3692  [&OffendingRegion](OpenMPDirectiveKind K, const DeclarationNameInfo &,
3693  SourceLocation) {
3695  OffendingRegion = K;
3696  return true;
3697  }
3698  return false;
3699  },
3700  false /* don't skip top directive */);
3701  CloseNesting = false;
3702  }
3703  if (NestingProhibited) {
3704  if (OrphanSeen) {
3705  SemaRef.Diag(StartLoc, diag::err_omp_orphaned_device_directive)
3706  << getOpenMPDirectiveName(CurrentRegion) << Recommend;
3707  } else {
3708  SemaRef.Diag(StartLoc, diag::err_omp_prohibited_region)
3709  << CloseNesting << getOpenMPDirectiveName(OffendingRegion)
3710  << Recommend << getOpenMPDirectiveName(CurrentRegion);
3711  }
3712  return true;
3713  }
3714  }
3715  return false;
3716 }
3717 
3719  ArrayRef<OMPClause *> Clauses,
3720  ArrayRef<OpenMPDirectiveKind> AllowedNameModifiers) {
3721  bool ErrorFound = false;
3722  unsigned NamedModifiersNumber = 0;
3724  OMPD_unknown + 1);
3725  SmallVector<SourceLocation, 4> NameModifierLoc;
3726  for (const OMPClause *C : Clauses) {
3727  if (const auto *IC = dyn_cast_or_null<OMPIfClause>(C)) {
3728  // At most one if clause without a directive-name-modifier can appear on
3729  // the directive.
3730  OpenMPDirectiveKind CurNM = IC->getNameModifier();
3731  if (FoundNameModifiers[CurNM]) {
3732  S.Diag(C->getBeginLoc(), diag::err_omp_more_one_clause)
3733  << getOpenMPDirectiveName(Kind) << getOpenMPClauseName(OMPC_if)
3734  << (CurNM != OMPD_unknown) << getOpenMPDirectiveName(CurNM);
3735  ErrorFound = true;
3736  } else if (CurNM != OMPD_unknown) {
3737  NameModifierLoc.push_back(IC->getNameModifierLoc());
3738  ++NamedModifiersNumber;
3739  }
3740  FoundNameModifiers[CurNM] = IC;
3741  if (CurNM == OMPD_unknown)
3742  continue;
3743  // Check if the specified name modifier is allowed for the current
3744  // directive.
3745  // At most one if clause with the particular directive-name-modifier can
3746  // appear on the directive.
3747  bool MatchFound = false;
3748  for (auto NM : AllowedNameModifiers) {
3749  if (CurNM == NM) {
3750  MatchFound = true;
3751  break;
3752  }
3753  }
3754  if (!MatchFound) {
3755  S.Diag(IC->getNameModifierLoc(),
3756  diag::err_omp_wrong_if_directive_name_modifier)
3758  ErrorFound = true;
3759  }
3760  }
3761  }
3762  // If any if clause on the directive includes a directive-name-modifier then
3763  // all if clauses on the directive must include a directive-name-modifier.
3764  if (FoundNameModifiers[OMPD_unknown] && NamedModifiersNumber > 0) {
3765  if (NamedModifiersNumber == AllowedNameModifiers.size()) {
3766  S.Diag(FoundNameModifiers[OMPD_unknown]->getBeginLoc(),
3767  diag::err_omp_no_more_if_clause);
3768  } else {
3769  std::string Values;
3770  std::string Sep(", ");
3771  unsigned AllowedCnt = 0;
3772  unsigned TotalAllowedNum =
3773  AllowedNameModifiers.size() - NamedModifiersNumber;
3774  for (unsigned Cnt = 0, End = AllowedNameModifiers.size(); Cnt < End;
3775  ++Cnt) {
3776  OpenMPDirectiveKind NM = AllowedNameModifiers[Cnt];
3777  if (!FoundNameModifiers[NM]) {
3778  Values += "'";
3779  Values += getOpenMPDirectiveName(NM);
3780  Values += "'";
3781  if (AllowedCnt + 2 == TotalAllowedNum)
3782  Values += " or ";
3783  else if (AllowedCnt + 1 != TotalAllowedNum)
3784  Values += Sep;
3785  ++AllowedCnt;
3786  }
3787  }
3788  S.Diag(FoundNameModifiers[OMPD_unknown]->getCondition()->getBeginLoc(),
3789  diag::err_omp_unnamed_if_clause)
3790  << (TotalAllowedNum > 1) << Values;
3791  }
3792  for (SourceLocation Loc : NameModifierLoc) {
3793  S.Diag(Loc, diag::note_omp_previous_named_if_clause);
3794  }
3795  ErrorFound = true;
3796  }
3797  return ErrorFound;
3798 }
3799 
3800 static std::pair<ValueDecl *, bool>
3802  SourceRange &ERange, bool AllowArraySection = false) {
3803  if (RefExpr->isTypeDependent() || RefExpr->isValueDependent() ||
3805  return std::make_pair(nullptr, true);
3806 
3807  // OpenMP [3.1, C/C++]
3808  // A list item is a variable name.
3809  // OpenMP [2.9.3.3, Restrictions, p.1]
3810  // A variable that is part of another variable (as an array or
3811  // structure element) cannot appear in a private clause.
3812  RefExpr = RefExpr->IgnoreParens();
3813  enum {
3814  NoArrayExpr = -1,
3815  ArraySubscript = 0,
3816  OMPArraySection = 1
3817  } IsArrayExpr = NoArrayExpr;
3818  if (AllowArraySection) {
3819  if (auto *ASE = dyn_cast_or_null<ArraySubscriptExpr>(RefExpr)) {
3820  Expr *Base = ASE->getBase()->IgnoreParenImpCasts();
3821  while (auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
3822  Base = TempASE->getBase()->IgnoreParenImpCasts();
3823  RefExpr = Base;
3824  IsArrayExpr = ArraySubscript;
3825  } else if (auto *OASE = dyn_cast_or_null<OMPArraySectionExpr>(RefExpr)) {
3826  Expr *Base = OASE->getBase()->IgnoreParenImpCasts();
3827  while (auto *TempOASE = dyn_cast<OMPArraySectionExpr>(Base))
3828  Base = TempOASE->getBase()->IgnoreParenImpCasts();
3829  while (auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
3830  Base = TempASE->getBase()->IgnoreParenImpCasts();
3831  RefExpr = Base;
3832  IsArrayExpr = OMPArraySection;
3833  }
3834  }
3835  ELoc = RefExpr->getExprLoc();
3836  ERange = RefExpr->getSourceRange();
3837  RefExpr = RefExpr->IgnoreParenImpCasts();
3838  auto *DE = dyn_cast_or_null<DeclRefExpr>(RefExpr);
3839  auto *ME = dyn_cast_or_null<MemberExpr>(RefExpr);
3840  if ((!DE || !isa<VarDecl>(DE->getDecl())) &&
3841  (S.getCurrentThisType().isNull() || !ME ||
3842  !isa<CXXThisExpr>(ME->getBase()->IgnoreParenImpCasts()) ||
3843  !isa<FieldDecl>(ME->getMemberDecl()))) {
3844  if (IsArrayExpr != NoArrayExpr) {
3845  S.Diag(ELoc, diag::err_omp_expected_base_var_name) << IsArrayExpr
3846  << ERange;
3847  } else {
3848  S.Diag(ELoc,
3849  AllowArraySection
3850  ? diag::err_omp_expected_var_name_member_expr_or_array_item
3851  : diag::err_omp_expected_var_name_member_expr)
3852  << (S.getCurrentThisType().isNull() ? 0 : 1) << ERange;
3853  }
3854  return std::make_pair(nullptr, false);
3855  }
3856  return std::make_pair(
3857  getCanonicalDecl(DE ? DE->getDecl() : ME->getMemberDecl()), false);
3858 }
3859 
3860 static void checkAllocateClauses(Sema &S, DSAStackTy *Stack,
3861  ArrayRef<OMPClause *> Clauses) {
3862  assert(!S.CurContext->isDependentContext() &&
3863  "Expected non-dependent context.");
3864  auto AllocateRange =
3865  llvm::make_filter_range(Clauses, OMPAllocateClause::classof);
3866  llvm::DenseMap<CanonicalDeclPtr<Decl>, CanonicalDeclPtr<VarDecl>>
3867  DeclToCopy;
3868  auto PrivateRange = llvm::make_filter_range(Clauses, [](const OMPClause *C) {
3869  return isOpenMPPrivate(C->getClauseKind());
3870  });
3871  for (OMPClause *Cl : PrivateRange) {
3872  MutableArrayRef<Expr *>::iterator I, It, Et;
3873  if (Cl->getClauseKind() == OMPC_private) {
3874  auto *PC = cast<OMPPrivateClause>(Cl);
3875  I = PC->private_copies().begin();
3876  It = PC->varlist_begin();
3877  Et = PC->varlist_end();
3878  } else if (Cl->getClauseKind() == OMPC_firstprivate) {
3879  auto *PC = cast<OMPFirstprivateClause>(Cl);
3880  I = PC->private_copies().begin();
3881  It = PC->varlist_begin();
3882  Et = PC->varlist_end();
3883  } else if (Cl->getClauseKind() == OMPC_lastprivate) {
3884  auto *PC = cast<OMPLastprivateClause>(Cl);
3885  I = PC->private_copies().begin();
3886  It = PC->varlist_begin();
3887  Et = PC->varlist_end();
3888  } else if (Cl->getClauseKind() == OMPC_linear) {
3889  auto *PC = cast<OMPLinearClause>(Cl);
3890  I = PC->privates().begin();
3891  It = PC->varlist_begin();
3892  Et = PC->varlist_end();
3893  } else if (Cl->getClauseKind() == OMPC_reduction) {
3894  auto *PC = cast<OMPReductionClause>(Cl);
3895  I = PC->privates().begin();
3896  It = PC->varlist_begin();
3897  Et = PC->varlist_end();
3898  } else if (Cl->getClauseKind() == OMPC_task_reduction) {
3899  auto *PC = cast<OMPTaskReductionClause>(Cl);
3900  I = PC->privates().begin();
3901  It = PC->varlist_begin();
3902  Et = PC->varlist_end();
3903  } else if (Cl->getClauseKind() == OMPC_in_reduction) {
3904  auto *PC = cast<OMPInReductionClause>(Cl);
3905  I = PC->privates().begin();
3906  It = PC->varlist_begin();
3907  Et = PC->varlist_end();
3908  } else {
3909  llvm_unreachable("Expected private clause.");
3910  }
3911  for (Expr *E : llvm::make_range(It, Et)) {
3912  if (!*I) {
3913  ++I;
3914  continue;
3915  }
3916  SourceLocation ELoc;
3917  SourceRange ERange;
3918  Expr *SimpleRefExpr = E;
3919  auto Res = getPrivateItem(S, SimpleRefExpr, ELoc, ERange,
3920  /*AllowArraySection=*/true);
3921  DeclToCopy.try_emplace(Res.first,
3922  cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()));
3923  ++I;
3924  }
3925  }
3926  for (OMPClause *C : AllocateRange) {
3927  auto *AC = cast<OMPAllocateClause>(C);
3928  OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind =
3929  getAllocatorKind(S, Stack, AC->getAllocator());
3930  // OpenMP, 2.11.4 allocate Clause, Restrictions.
3931  // For task, taskloop or target directives, allocation requests to memory
3932  // allocators with the trait access set to thread result in unspecified
3933  // behavior.
3934  if (AllocatorKind == OMPAllocateDeclAttr::OMPThreadMemAlloc &&
3935  (isOpenMPTaskingDirective(Stack->getCurrentDirective()) ||
3936  isOpenMPTargetExecutionDirective(Stack->getCurrentDirective()))) {
3937  S.Diag(AC->getAllocator()->getExprLoc(),
3938  diag::warn_omp_allocate_thread_on_task_target_directive)
3939  << getOpenMPDirectiveName(Stack->getCurrentDirective());
3940  }
3941  for (Expr *E : AC->varlists()) {
3942  SourceLocation ELoc;
3943  SourceRange ERange;
3944  Expr *SimpleRefExpr = E;
3945  auto Res = getPrivateItem(S, SimpleRefExpr, ELoc, ERange);
3946  ValueDecl *VD = Res.first;
3947  DSAStackTy::DSAVarData Data = Stack->getTopDSA(VD, /*FromParent=*/false);
3948  if (!isOpenMPPrivate(Data.CKind)) {
3949  S.Diag(E->getExprLoc(),
3950  diag::err_omp_expected_private_copy_for_allocate);
3951  continue;
3952  }
3953  VarDecl *PrivateVD = DeclToCopy[VD];
3954  if (checkPreviousOMPAllocateAttribute(S, Stack, E, PrivateVD,
3955  AllocatorKind, AC->getAllocator()))
3956  continue;
3957  applyOMPAllocateAttribute(S, PrivateVD, AllocatorKind, AC->getAllocator(),
3958  E->getSourceRange());
3959  }
3960  }
3961 }
3962 
3965  OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
3966  Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc) {
3967  StmtResult Res = StmtError();
3968  // First check CancelRegion which is then used in checkNestingOfRegions.
3969  if (checkCancelRegion(*this, Kind, CancelRegion, StartLoc) ||
3970  checkNestingOfRegions(*this, DSAStack, Kind, DirName, CancelRegion,
3971  StartLoc))
3972  return StmtError();
3973 
3974  llvm::SmallVector<OMPClause *, 8> ClausesWithImplicit;
3975  VarsWithInheritedDSAType VarsWithInheritedDSA;
3976  bool ErrorFound = false;
3977  ClausesWithImplicit.append(Clauses.begin(), Clauses.end());
3978  if (AStmt && !CurContext->isDependentContext()) {
3979  assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
3980 
3981  // Check default data sharing attributes for referenced variables.
3982  DSAAttrChecker DSAChecker(DSAStack, *this, cast<CapturedStmt>(AStmt));
3983  int ThisCaptureLevel = getOpenMPCaptureLevels(Kind);
3984  Stmt *S = AStmt;
3985  while (--ThisCaptureLevel >= 0)
3986  S = cast<CapturedStmt>(S)->getCapturedStmt();
3987  DSAChecker.Visit(S);
3988  if (DSAChecker.isErrorFound())
3989  return StmtError();
3990  // Generate list of implicitly defined firstprivate variables.
3991  VarsWithInheritedDSA = DSAChecker.getVarsWithInheritedDSA();
3992 
3993  SmallVector<Expr *, 4> ImplicitFirstprivates(
3994  DSAChecker.getImplicitFirstprivate().begin(),
3995  DSAChecker.getImplicitFirstprivate().end());
3996  SmallVector<Expr *, 4> ImplicitMaps(DSAChecker.getImplicitMap().begin(),
3997  DSAChecker.getImplicitMap().end());
3998  // Mark taskgroup task_reduction descriptors as implicitly firstprivate.
3999  for (OMPClause *C : Clauses) {
4000  if (auto *IRC = dyn_cast<OMPInReductionClause>(C)) {
4001  for (Expr *E : IRC->taskgroup_descriptors())
4002  if (E)
4003  ImplicitFirstprivates.emplace_back(E);
4004  }
4005  }
4006  if (!ImplicitFirstprivates.empty()) {
4007  if (OMPClause *Implicit = ActOnOpenMPFirstprivateClause(
4008  ImplicitFirstprivates, SourceLocation(), SourceLocation(),
4009  SourceLocation())) {
4010  ClausesWithImplicit.push_back(Implicit);
4011  ErrorFound = cast<OMPFirstprivateClause>(Implicit)->varlist_size() !=
4012  ImplicitFirstprivates.size();
4013  } else {
4014  ErrorFound = true;
4015  }
4016  }
4017  if (!ImplicitMaps.empty()) {
4018  CXXScopeSpec MapperIdScopeSpec;
4019  DeclarationNameInfo MapperId;
4020  if (OMPClause *Implicit = ActOnOpenMPMapClause(
4021  llvm::None, llvm::None, MapperIdScopeSpec, MapperId,
4022  OMPC_MAP_tofrom, /*IsMapTypeImplicit=*/true, SourceLocation(),
4023  SourceLocation(), ImplicitMaps, OMPVarListLocTy())) {
4024  ClausesWithImplicit.emplace_back(Implicit);
4025  ErrorFound |=
4026  cast<OMPMapClause>(Implicit)->varlist_size() != ImplicitMaps.size();
4027  } else {
4028  ErrorFound = true;
4029  }
4030  }
4031  }
4032 
4033  llvm::SmallVector<OpenMPDirectiveKind, 4> AllowedNameModifiers;
4034  switch (Kind) {
4035  case OMPD_parallel:
4036  Res = ActOnOpenMPParallelDirective(ClausesWithImplicit, AStmt, StartLoc,
4037  EndLoc);
4038  AllowedNameModifiers.push_back(OMPD_parallel);
4039  break;
4040  case OMPD_simd:
4041  Res = ActOnOpenMPSimdDirective(ClausesWithImplicit, AStmt, StartLoc, EndLoc,
4042  VarsWithInheritedDSA);
4043  break;
4044  case OMPD_for:
4045  Res = ActOnOpenMPForDirective(ClausesWithImplicit, AStmt, StartLoc, EndLoc,
4046  VarsWithInheritedDSA);
4047  break;
4048  case OMPD_for_simd:
4049  Res = ActOnOpenMPForSimdDirective(ClausesWithImplicit, AStmt, StartLoc,
4050  EndLoc, VarsWithInheritedDSA);
4051  break;
4052  case OMPD_sections:
4053  Res = ActOnOpenMPSectionsDirective(ClausesWithImplicit, AStmt, StartLoc,
4054  EndLoc);
4055  break;
4056  case OMPD_section:
4057  assert(ClausesWithImplicit.empty() &&
4058  "No clauses are allowed for 'omp section' directive");
4059  Res = ActOnOpenMPSectionDirective(AStmt, StartLoc, EndLoc);
4060  break;
4061  case OMPD_single:
4062  Res = ActOnOpenMPSingleDirective(ClausesWithImplicit, AStmt, StartLoc,
4063  EndLoc);
4064  break;
4065  case OMPD_master:
4066  assert(ClausesWithImplicit.empty() &&
4067  "No clauses are allowed for 'omp master' directive");
4068  Res = ActOnOpenMPMasterDirective(AStmt, StartLoc, EndLoc);
4069  break;
4070  case OMPD_critical:
4071  Res = ActOnOpenMPCriticalDirective(DirName, ClausesWithImplicit, AStmt,
4072  StartLoc, EndLoc);
4073  break;
4074  case OMPD_parallel_for:
4075  Res = ActOnOpenMPParallelForDirective(ClausesWithImplicit, AStmt, StartLoc,
4076  EndLoc, VarsWithInheritedDSA);
4077  AllowedNameModifiers.push_back(OMPD_parallel);
4078  break;
4079  case OMPD_parallel_for_simd:
4080  Res = ActOnOpenMPParallelForSimdDirective(
4081  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
4082  AllowedNameModifiers.push_back(OMPD_parallel);
4083  break;
4084  case OMPD_parallel_sections:
4085  Res = ActOnOpenMPParallelSectionsDirective(ClausesWithImplicit, AStmt,
4086  StartLoc, EndLoc);
4087  AllowedNameModifiers.push_back(OMPD_parallel);
4088  break;
4089  case OMPD_task:
4090  Res =
4091  ActOnOpenMPTaskDirective(ClausesWithImplicit, AStmt, StartLoc, EndLoc);
4092  AllowedNameModifiers.push_back(OMPD_task);
4093  break;
4094  case OMPD_taskyield:
4095  assert(ClausesWithImplicit.empty() &&
4096  "No clauses are allowed for 'omp taskyield' directive");
4097  assert(AStmt == nullptr &&
4098  "No associated statement allowed for 'omp taskyield' directive");
4099  Res = ActOnOpenMPTaskyieldDirective(StartLoc, EndLoc);
4100  break;
4101  case OMPD_barrier:
4102  assert(ClausesWithImplicit.empty() &&
4103  "No clauses are allowed for 'omp barrier' directive");
4104  assert(AStmt == nullptr &&
4105  "No associated statement allowed for 'omp barrier' directive");
4106  Res = ActOnOpenMPBarrierDirective(StartLoc, EndLoc);
4107  break;
4108  case OMPD_taskwait:
4109  assert(ClausesWithImplicit.empty() &&
4110  "No clauses are allowed for 'omp taskwait' directive");
4111  assert(AStmt == nullptr &&
4112  "No associated statement allowed for 'omp taskwait' directive");
4113  Res = ActOnOpenMPTaskwaitDirective(StartLoc, EndLoc);
4114  break;
4115  case OMPD_taskgroup:
4116  Res = ActOnOpenMPTaskgroupDirective(ClausesWithImplicit, AStmt, StartLoc,
4117  EndLoc);
4118  break;
4119  case OMPD_flush:
4120  assert(AStmt == nullptr &&
4121  "No associated statement allowed for 'omp flush' directive");
4122  Res = ActOnOpenMPFlushDirective(ClausesWithImplicit, StartLoc, EndLoc);
4123  break;
4124  case OMPD_ordered:
4125  Res = ActOnOpenMPOrderedDirective(ClausesWithImplicit, AStmt, StartLoc,
4126  EndLoc);
4127  break;
4128  case OMPD_atomic:
4129  Res = ActOnOpenMPAtomicDirective(ClausesWithImplicit, AStmt, StartLoc,
4130  EndLoc);
4131  break;
4132  case OMPD_teams:
4133  Res =
4134  ActOnOpenMPTeamsDirective(ClausesWithImplicit, AStmt, StartLoc, EndLoc);
4135  break;
4136  case OMPD_target:
4137  Res = ActOnOpenMPTargetDirective(ClausesWithImplicit, AStmt, StartLoc,
4138  EndLoc);
4139  AllowedNameModifiers.push_back(OMPD_target);
4140  break;
4141  case OMPD_target_parallel:
4142  Res = ActOnOpenMPTargetParallelDirective(ClausesWithImplicit, AStmt,
4143  StartLoc, EndLoc);
4144  AllowedNameModifiers.push_back(OMPD_target);
4145  AllowedNameModifiers.push_back(OMPD_parallel);
4146  break;
4147  case OMPD_target_parallel_for:
4148  Res = ActOnOpenMPTargetParallelForDirective(
4149  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
4150  AllowedNameModifiers.push_back(OMPD_target);
4151  AllowedNameModifiers.push_back(OMPD_parallel);
4152  break;
4153  case OMPD_cancellation_point:
4154  assert(ClausesWithImplicit.empty() &&
4155  "No clauses are allowed for 'omp cancellation point' directive");
4156  assert(AStmt == nullptr && "No associated statement allowed for 'omp "
4157  "cancellation point' directive");
4158  Res = ActOnOpenMPCancellationPointDirective(StartLoc, EndLoc, CancelRegion);
4159  break;
4160  case OMPD_cancel:
4161  assert(AStmt == nullptr &&
4162  "No associated statement allowed for 'omp cancel' directive");
4163  Res = ActOnOpenMPCancelDirective(ClausesWithImplicit, StartLoc, EndLoc,
4164  CancelRegion);
4165  AllowedNameModifiers.push_back(OMPD_cancel);
4166  break;
4167  case OMPD_target_data:
4168  Res = ActOnOpenMPTargetDataDirective(ClausesWithImplicit, AStmt, StartLoc,
4169  EndLoc);
4170  AllowedNameModifiers.push_back(OMPD_target_data);
4171  break;
4172  case OMPD_target_enter_data:
4173  Res = ActOnOpenMPTargetEnterDataDirective(ClausesWithImplicit, StartLoc,
4174  EndLoc, AStmt);
4175  AllowedNameModifiers.push_back(OMPD_target_enter_data);
4176  break;
4177  case OMPD_target_exit_data:
4178  Res = ActOnOpenMPTargetExitDataDirective(ClausesWithImplicit, StartLoc,
4179  EndLoc, AStmt);
4180  AllowedNameModifiers.push_back(OMPD_target_exit_data);
4181  break;
4182  case OMPD_taskloop:
4183  Res = ActOnOpenMPTaskLoopDirective(ClausesWithImplicit, AStmt, StartLoc,
4184  EndLoc, VarsWithInheritedDSA);
4185  AllowedNameModifiers.push_back(OMPD_taskloop);
4186  break;
4187  case OMPD_taskloop_simd:
4188  Res = ActOnOpenMPTaskLoopSimdDirective(ClausesWithImplicit, AStmt, StartLoc,
4189  EndLoc, VarsWithInheritedDSA);
4190  AllowedNameModifiers.push_back(OMPD_taskloop);
4191  break;
4192  case OMPD_distribute:
4193  Res = ActOnOpenMPDistributeDirective(ClausesWithImplicit, AStmt, StartLoc,
4194  EndLoc, VarsWithInheritedDSA);
4195  break;
4196  case OMPD_target_update:
4197  Res = ActOnOpenMPTargetUpdateDirective(ClausesWithImplicit, StartLoc,
4198  EndLoc, AStmt);
4199  AllowedNameModifiers.push_back(OMPD_target_update);
4200  break;
4201  case OMPD_distribute_parallel_for:
4202  Res = ActOnOpenMPDistributeParallelForDirective(
4203  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
4204  AllowedNameModifiers.push_back(OMPD_parallel);
4205  break;
4206  case OMPD_distribute_parallel_for_simd:
4207  Res = ActOnOpenMPDistributeParallelForSimdDirective(
4208  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
4209  AllowedNameModifiers.push_back(OMPD_parallel);
4210  break;
4211  case OMPD_distribute_simd:
4212  Res = ActOnOpenMPDistributeSimdDirective(
4213  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
4214  break;
4215  case OMPD_target_parallel_for_simd:
4216  Res = ActOnOpenMPTargetParallelForSimdDirective(
4217  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
4218  AllowedNameModifiers.push_back(OMPD_target);
4219  AllowedNameModifiers.push_back(OMPD_parallel);
4220  break;
4221  case OMPD_target_simd:
4222  Res = ActOnOpenMPTargetSimdDirective(ClausesWithImplicit, AStmt, StartLoc,
4223  EndLoc, VarsWithInheritedDSA);
4224  AllowedNameModifiers.push_back(OMPD_target);
4225  break;
4226  case OMPD_teams_distribute:
4227  Res = ActOnOpenMPTeamsDistributeDirective(
4228  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
4229  break;
4230  case OMPD_teams_distribute_simd:
4231  Res = ActOnOpenMPTeamsDistributeSimdDirective(
4232  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
4233  break;
4234  case OMPD_teams_distribute_parallel_for_simd:
4235  Res = ActOnOpenMPTeamsDistributeParallelForSimdDirective(
4236  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
4237  AllowedNameModifiers.push_back(OMPD_parallel);
4238  break;
4239  case OMPD_teams_distribute_parallel_for:
4240  Res = ActOnOpenMPTeamsDistributeParallelForDirective(
4241  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
4242  AllowedNameModifiers.push_back(OMPD_parallel);
4243  break;
4244  case OMPD_target_teams:
4245  Res = ActOnOpenMPTargetTeamsDirective(ClausesWithImplicit, AStmt, StartLoc,
4246  EndLoc);
4247  AllowedNameModifiers.push_back(OMPD_target);
4248  break;
4249  case OMPD_target_teams_distribute:
4250  Res = ActOnOpenMPTargetTeamsDistributeDirective(
4251  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
4252  AllowedNameModifiers.push_back(OMPD_target);
4253  break;
4254  case OMPD_target_teams_distribute_parallel_for:
4255  Res = ActOnOpenMPTargetTeamsDistributeParallelForDirective(
4256  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
4257  AllowedNameModifiers.push_back(OMPD_target);
4258  AllowedNameModifiers.push_back(OMPD_parallel);
4259  break;
4260  case OMPD_target_teams_distribute_parallel_for_simd:
4261  Res = ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective(
4262  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
4263  AllowedNameModifiers.push_back(OMPD_target);
4264  AllowedNameModifiers.push_back(OMPD_parallel);
4265  break;
4266  case OMPD_target_teams_distribute_simd:
4267  Res = ActOnOpenMPTargetTeamsDistributeSimdDirective(
4268  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
4269  AllowedNameModifiers.push_back(OMPD_target);
4270  break;
4271  case OMPD_declare_target:
4272  case OMPD_end_declare_target:
4273  case OMPD_threadprivate:
4274  case OMPD_allocate:
4275  case OMPD_declare_reduction:
4276  case OMPD_declare_mapper:
4277  case OMPD_declare_simd:
4278  case OMPD_requires:
4279  llvm_unreachable("OpenMP Directive is not allowed");
4280  case OMPD_unknown:
4281  llvm_unreachable("Unknown OpenMP directive");
4282  }
4283 
4284  ErrorFound = Res.isInvalid() || ErrorFound;
4285 
4286  // Check variables in the clauses if default(none) was specified.
4287  if (DSAStack->getDefaultDSA() == DSA_none) {
4288  DSAAttrChecker DSAChecker(DSAStack, *this, nullptr);
4289  for (OMPClause *C : Clauses) {
4290  switch (C->getClauseKind()) {
4291  case OMPC_num_threads:
4292  case OMPC_dist_schedule:
4293  // Do not analyse if no parent teams directive.
4294  if (isOpenMPTeamsDirective(DSAStack->getCurrentDirective()))
4295  break;
4296  continue;
4297  case OMPC_if:
4298  if (isOpenMPTeamsDirective(DSAStack->getCurrentDirective()) &&
4299  cast<OMPIfClause>(C)->getNameModifier() != OMPD_target)
4300  break;
4301  continue;
4302  case OMPC_schedule:
4303  break;
4304  case OMPC_ordered:
4305  case OMPC_device:
4306  case OMPC_num_teams:
4307  case OMPC_thread_limit:
4308  case OMPC_priority:
4309  case OMPC_grainsize:
4310  case OMPC_num_tasks:
4311  case OMPC_hint:
4312  case OMPC_collapse:
4313  case OMPC_safelen:
4314  case OMPC_simdlen:
4315  case OMPC_final:
4316  case OMPC_default:
4317  case OMPC_proc_bind:
4318  case OMPC_private:
4319  case OMPC_firstprivate:
4320  case OMPC_lastprivate:
4321  case OMPC_shared:
4322  case OMPC_reduction:
4323  case OMPC_task_reduction:
4324  case OMPC_in_reduction:
4325  case OMPC_linear:
4326  case OMPC_aligned:
4327  case OMPC_copyin:
4328  case OMPC_copyprivate:
4329  case OMPC_nowait:
4330  case OMPC_untied:
4331  case OMPC_mergeable:
4332  case OMPC_allocate:
4333  case OMPC_read:
4334  case OMPC_write:
4335  case OMPC_update:
4336  case OMPC_capture:
4337  case OMPC_seq_cst:
4338  case OMPC_depend:
4339  case OMPC_threads:
4340  case OMPC_simd:
4341  case OMPC_map:
4342  case OMPC_nogroup:
4343  case OMPC_defaultmap:
4344  case OMPC_to:
4345  case OMPC_from:
4346  case OMPC_use_device_ptr:
4347  case OMPC_is_device_ptr:
4348  continue;
4349  case OMPC_allocator:
4350  case OMPC_flush:
4351  case OMPC_threadprivate:
4352  case OMPC_uniform:
4353  case OMPC_unknown:
4354  case OMPC_unified_address:
4355  case OMPC_unified_shared_memory:
4356  case OMPC_reverse_offload:
4357  case OMPC_dynamic_allocators:
4358  case OMPC_atomic_default_mem_order:
4359  llvm_unreachable("Unexpected clause");
4360  }
4361  for (Stmt *CC : C->children()) {
4362  if (CC)
4363  DSAChecker.Visit(CC);
4364  }
4365  }
4366  for (auto &P : DSAChecker.getVarsWithInheritedDSA())
4367  VarsWithInheritedDSA[P.getFirst()] = P.getSecond();
4368  }
4369  for (const auto &P : VarsWithInheritedDSA) {
4370  Diag(P.second->getExprLoc(), diag::err_omp_no_dsa_for_variable)
4371  << P.first << P.second->getSourceRange();
4372  Diag(DSAStack->getDefaultDSALocation(), diag::note_omp_default_dsa_none);
4373  }
4374  ErrorFound = !VarsWithInheritedDSA.empty() || ErrorFound;
4375 
4376  if (!AllowedNameModifiers.empty())
4377  ErrorFound = checkIfClauses(*this, Kind, Clauses, AllowedNameModifiers) ||
4378  ErrorFound;
4379 
4380  if (ErrorFound)
4381  return StmtError();
4382 
4385  ->getStructuredBlock()
4386  ->setIsOMPStructuredBlock(true);
4387  }
4388 
4389  if (!CurContext->isDependentContext() &&
4391  !(DSAStack->hasRequiresDeclWithClause<OMPUnifiedSharedMemoryClause>() ||
4392  DSAStack->hasRequiresDeclWithClause<OMPUnifiedAddressClause>() ||
4393  DSAStack->hasRequiresDeclWithClause<OMPReverseOffloadClause>() ||
4394  DSAStack->hasRequiresDeclWithClause<OMPDynamicAllocatorsClause>())) {
4395  // Register target to DSA Stack.
4396  DSAStack->addTargetDirLocation(StartLoc);
4397  }
4398 
4399  return Res;
4400 }
4401 
4403  DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS, Expr *Simdlen,
4404  ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds,
4405  ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears,
4406  ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR) {
4407  assert(Aligneds.size() == Alignments.size());
4408  assert(Linears.size() == LinModifiers.size());
4409  assert(Linears.size() == Steps.size());
4410  if (!DG || DG.get().isNull())
4411  return DeclGroupPtrTy();
4412 
4413  if (!DG.get().isSingleDecl()) {
4414  Diag(SR.getBegin(), diag::err_omp_single_decl_in_declare_simd);
4415  return DG;
4416  }
4417  Decl *ADecl = DG.get().getSingleDecl();
4418  if (auto *FTD = dyn_cast<FunctionTemplateDecl>(ADecl))
4419  ADecl = FTD->getTemplatedDecl();
4420 
4421  auto *FD = dyn_cast<FunctionDecl>(ADecl);
4422  if (!FD) {
4423  Diag(ADecl->getLocation(), diag::err_omp_function_expected);
4424  return DeclGroupPtrTy();
4425  }
4426 
4427  // OpenMP [2.8.2, declare simd construct, Description]
4428  // The parameter of the simdlen clause must be a constant positive integer
4429  // expression.
4430  ExprResult SL;
4431  if (Simdlen)
4432  SL = VerifyPositiveIntegerConstantInClause(Simdlen, OMPC_simdlen);
4433  // OpenMP [2.8.2, declare simd construct, Description]
4434  // The special this pointer can be used as if was one of the arguments to the
4435  // function in any of the linear, aligned, or uniform clauses.
4436  // The uniform clause declares one or more arguments to have an invariant
4437  // value for all concurrent invocations of the function in the execution of a
4438  // single SIMD loop.
4439  llvm::DenseMap<const Decl *, const Expr *> UniformedArgs;
4440  const Expr *UniformedLinearThis = nullptr;
4441  for (const Expr *E : Uniforms) {
4442  E = E->IgnoreParenImpCasts();
4443  if (const auto *DRE = dyn_cast<DeclRefExpr>(E))
4444  if (const auto *PVD = dyn_cast<ParmVarDecl>(DRE->getDecl()))
4445  if (FD->getNumParams() > PVD->getFunctionScopeIndex() &&
4446  FD->getParamDecl(PVD->getFunctionScopeIndex())
4447  ->getCanonicalDecl() == PVD->getCanonicalDecl()) {
4448  UniformedArgs.try_emplace(PVD->getCanonicalDecl(), E);
4449  continue;
4450  }
4451  if (isa<CXXThisExpr>(E)) {
4452  UniformedLinearThis = E;
4453  continue;
4454  }
4455  Diag(E->getExprLoc(), diag::err_omp_param_or_this_in_clause)
4456  << FD->getDeclName() << (isa<CXXMethodDecl>(ADecl) ? 1 : 0);
4457  }
4458  // OpenMP [2.8.2, declare simd construct, Description]
4459  // The aligned clause declares that the object to which each list item points
4460  // is aligned to the number of bytes expressed in the optional parameter of
4461  // the aligned clause.
4462  // The special this pointer can be used as if was one of the arguments to the
4463  // function in any of the linear, aligned, or uniform clauses.
4464  // The type of list items appearing in the aligned clause must be array,
4465  // pointer, reference to array, or reference to pointer.
4466  llvm::DenseMap<const Decl *, const Expr *> AlignedArgs;
4467  const Expr *AlignedThis = nullptr;
4468  for (const Expr *E : Aligneds) {
4469  E = E->IgnoreParenImpCasts();
4470  if (const auto *DRE = dyn_cast<DeclRefExpr>(E))
4471  if (const auto *PVD = dyn_cast<ParmVarDecl>(DRE->getDecl())) {
4472  const VarDecl *CanonPVD = PVD->getCanonicalDecl();
4473  if (FD->getNumParams() > PVD->getFunctionScopeIndex() &&
4474  FD->getParamDecl(PVD->getFunctionScopeIndex())
4475  ->getCanonicalDecl() == CanonPVD) {
4476  // OpenMP [2.8.1, simd construct, Restrictions]
4477  // A list-item cannot appear in more than one aligned clause.
4478  if (AlignedArgs.count(CanonPVD) > 0) {
4479  Diag(E->getExprLoc(), diag::err_omp_aligned_twice)
4480  << 1 << E->getSourceRange();
4481  Diag(AlignedArgs[CanonPVD]->getExprLoc(),
4482  diag::note_omp_explicit_dsa)
4483  << getOpenMPClauseName(OMPC_aligned);
4484  continue;
4485  }
4486  AlignedArgs[CanonPVD] = E;
4487  QualType QTy = PVD->getType()
4488  .getNonReferenceType()
4489  .getUnqualifiedType()
4490  .getCanonicalType();
4491  const Type *Ty = QTy.getTypePtrOrNull();
4492  if (!Ty || (!Ty->isArrayType() && !Ty->isPointerType())) {
4493  Diag(E->getExprLoc(), diag::err_omp_aligned_expected_array_or_ptr)
4494  << QTy << getLangOpts().CPlusPlus << E->getSourceRange();
4495  Diag(PVD->getLocation(), diag::note_previous_decl) << PVD;
4496  }
4497  continue;
4498  }
4499  }
4500  if (isa<CXXThisExpr>(E)) {
4501  if (AlignedThis) {
4502  Diag(E->getExprLoc(), diag::err_omp_aligned_twice)
4503  << 2 << E->getSourceRange();
4504  Diag(AlignedThis->getExprLoc(), diag::note_omp_explicit_dsa)
4505  << getOpenMPClauseName(OMPC_aligned);
4506  }
4507  AlignedThis = E;
4508  continue;
4509  }
4510  Diag(E->getExprLoc(), diag::err_omp_param_or_this_in_clause)
4511  << FD->getDeclName() << (isa<CXXMethodDecl>(ADecl) ? 1 : 0);
4512  }
4513  // The optional parameter of the aligned clause, alignment, must be a constant
4514  // positive integer expression. If no optional parameter is specified,
4515  // implementation-defined default alignments for SIMD instructions on the
4516  // target platforms are assumed.
4517  SmallVector<const Expr *, 4> NewAligns;
4518  for (Expr *E : Alignments) {
4519  ExprResult Align;
4520  if (E)
4521  Align = VerifyPositiveIntegerConstantInClause(E, OMPC_aligned);
4522  NewAligns.push_back(Align.get());
4523  }
4524  // OpenMP [2.8.2, declare simd construct, Description]
4525  // The linear clause declares one or more list items to be private to a SIMD
4526  // lane and to have a linear relationship with respect to the iteration space
4527  // of a loop.
4528  // The special this pointer can be used as if was one of the arguments to the
4529  // function in any of the linear, aligned, or uniform clauses.
4530  // When a linear-step expression is specified in a linear clause it must be
4531  // either a constant integer expression or an integer-typed parameter that is
4532  // specified in a uniform clause on the directive.
4533  llvm::DenseMap<const Decl *, const Expr *> LinearArgs;
4534  const bool IsUniformedThis = UniformedLinearThis != nullptr;
4535  auto MI = LinModifiers.begin();
4536  for (const Expr *E : Linears) {
4537  auto LinKind = static_cast<OpenMPLinearClauseKind>(*MI);
4538  ++MI;
4539  E = E->IgnoreParenImpCasts();
4540  if (const auto *DRE = dyn_cast<DeclRefExpr>(E))
4541  if (const auto *PVD = dyn_cast<ParmVarDecl>(DRE->getDecl())) {
4542  const VarDecl *CanonPVD = PVD->getCanonicalDecl();
4543  if (FD->getNumParams() > PVD->getFunctionScopeIndex() &&
4544  FD->getParamDecl(PVD->getFunctionScopeIndex())
4545  ->getCanonicalDecl() == CanonPVD) {
4546  // OpenMP [2.15.3.7, linear Clause, Restrictions]
4547  // A list-item cannot appear in more than one linear clause.
4548  if (LinearArgs.count(CanonPVD) > 0) {
4549  Diag(E->getExprLoc(), diag::err_omp_wrong_dsa)
4550  << getOpenMPClauseName(OMPC_linear)
4551  << getOpenMPClauseName(OMPC_linear) << E->getSourceRange();
4552  Diag(LinearArgs[CanonPVD]->getExprLoc(),
4553  diag::note_omp_explicit_dsa)
4554  << getOpenMPClauseName(OMPC_linear);
4555  continue;
4556  }
4557  // Each argument can appear in at most one uniform or linear clause.
4558  if (UniformedArgs.count(CanonPVD) > 0) {
4559  Diag(E->getExprLoc(), diag::err_omp_wrong_dsa)
4560  << getOpenMPClauseName(OMPC_linear)
4562  Diag(UniformedArgs[CanonPVD]->getExprLoc(),
4563  diag::note_omp_explicit_dsa)
4565  continue;
4566  }
4567  LinearArgs[CanonPVD] = E;
4568  if (E->isValueDependent() || E->isTypeDependent() ||
4569  E->isInstantiationDependent() ||
4571  continue;
4572  (void)CheckOpenMPLinearDecl(CanonPVD, E->getExprLoc(), LinKind,
4573  PVD->getOriginalType());
4574  continue;
4575  }
4576  }
4577  if (isa<CXXThisExpr>(E)) {
4578  if (UniformedLinearThis) {
4579  Diag(E->getExprLoc(), diag::err_omp_wrong_dsa)
4580  << getOpenMPClauseName(OMPC_linear)
4581  << getOpenMPClauseName(IsUniformedThis ? OMPC_uniform : OMPC_linear)
4582  << E->getSourceRange();
4583  Diag(UniformedLinearThis->getExprLoc(), diag::note_omp_explicit_dsa)
4584  << getOpenMPClauseName(IsUniformedThis ? OMPC_uniform
4585  : OMPC_linear);
4586  continue;
4587  }
4588  UniformedLinearThis = E;
4589  if (E->isValueDependent() || E->isTypeDependent() ||
4591  continue;
4592  (void)CheckOpenMPLinearDecl(/*D=*/nullptr, E->getExprLoc(), LinKind,
4593  E->getType());
4594  continue;
4595  }
4596  Diag(E->getExprLoc(), diag::err_omp_param_or_this_in_clause)
4597  << FD->getDeclName() << (isa<CXXMethodDecl>(ADecl) ? 1 : 0);
4598  }
4599  Expr *Step = nullptr;
4600  Expr *NewStep = nullptr;
4601  SmallVector<Expr *, 4> NewSteps;
4602  for (Expr *E : Steps) {
4603  // Skip the same step expression, it was checked already.
4604  if (Step == E || !E) {
4605  NewSteps.push_back(E ? NewStep : nullptr);
4606  continue;
4607  }
4608  Step = E;
4609  if (const auto *DRE = dyn_cast<DeclRefExpr>(Step))
4610  if (const auto *PVD = dyn_cast<ParmVarDecl>(DRE->getDecl())) {
4611  const VarDecl *CanonPVD = PVD->getCanonicalDecl();
4612  if (UniformedArgs.count(CanonPVD) == 0) {
4613  Diag(Step->getExprLoc(), diag::err_omp_expected_uniform_param)
4614  << Step->getSourceRange();
4615  } else if (E->isValueDependent() || E->isTypeDependent() ||
4616  E->isInstantiationDependent() ||
4618  CanonPVD->getType()->hasIntegerRepresentation()) {
4619  NewSteps.push_back(Step);
4620  } else {
4621  Diag(Step->getExprLoc(), diag::err_omp_expected_int_param)
4622  << Step->getSourceRange();
4623  }
4624  continue;
4625  }
4626  NewStep = Step;
4627  if (Step && !Step->isValueDependent() && !Step->isTypeDependent() &&
4628  !Step->isInstantiationDependent() &&
4630  NewStep = PerformOpenMPImplicitIntegerConversion(Step->getExprLoc(), Step)
4631  .get();
4632  if (NewStep)
4633  NewStep = VerifyIntegerConstantExpression(NewStep).get();
4634  }
4635  NewSteps.push_back(NewStep);
4636  }
4637  auto *NewAttr = OMPDeclareSimdDeclAttr::CreateImplicit(
4638  Context, BS, SL.get(), const_cast<Expr **>(Uniforms.data()),
4639  Uniforms.size(), const_cast<Expr **>(Aligneds.data()), Aligneds.size(),
4640  const_cast<Expr **>(NewAligns.data()), NewAligns.size(),
4641  const_cast<Expr **>(Linears.data()), Linears.size(),
4642  const_cast<unsigned *>(LinModifiers.data()), LinModifiers.size(),
4643  NewSteps.data(), NewSteps.size(), SR);
4644  ADecl->addAttr(NewAttr);
4645  return ConvertDeclToDeclGroup(ADecl);
4646 }
4647 
4649  Stmt *AStmt,
4650  SourceLocation StartLoc,
4651  SourceLocation EndLoc) {
4652  if (!AStmt)
4653  return StmtError();
4654 
4655  auto *CS = cast<CapturedStmt>(AStmt);
4656  // 1.2.2 OpenMP Language Terminology
4657  // Structured block - An executable statement with a single entry at the
4658  // top and a single exit at the bottom.
4659  // The point of exit cannot be a branch out of the structured block.
4660  // longjmp() and throw() must not violate the entry/exit criteria.
4661  CS->getCapturedDecl()->setNothrow();
4662 
4663  setFunctionHasBranchProtectedScope();
4664 
4665  return OMPParallelDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt,
4666  DSAStack->isCancelRegion());
4667 }
4668 
4669 namespace {
4670 /// Helper class for checking canonical form of the OpenMP loops and
4671 /// extracting iteration space of each loop in the loop nest, that will be used
4672 /// for IR generation.
4673 class OpenMPIterationSpaceChecker {
4674  /// Reference to Sema.
4675  Sema &SemaRef;
4676  /// Data-sharing stack.
4677  DSAStackTy &Stack;
4678  /// A location for diagnostics (when there is no some better location).
4679  SourceLocation DefaultLoc;
4680  /// A location for diagnostics (when increment is not compatible).
4681  SourceLocation ConditionLoc;
4682  /// A source location for referring to loop init later.
4683  SourceRange InitSrcRange;
4684  /// A source location for referring to condition later.
4685  SourceRange ConditionSrcRange;
4686  /// A source location for referring to increment later.
4687  SourceRange IncrementSrcRange;
4688  /// Loop variable.
4689  ValueDecl *LCDecl = nullptr;
4690  /// Reference to loop variable.
4691  Expr *LCRef = nullptr;
4692  /// Lower bound (initializer for the var).
4693  Expr *LB = nullptr;
4694  /// Upper bound.
4695  Expr *UB = nullptr;
4696  /// Loop step (increment).
4697  Expr *Step = nullptr;
4698  /// This flag is true when condition is one of:
4699  /// Var < UB
4700  /// Var <= UB
4701  /// UB > Var
4702  /// UB >= Var
4703  /// This will have no value when the condition is !=
4704  llvm::Optional<bool> TestIsLessOp;
4705  /// This flag is true when condition is strict ( < or > ).
4706  bool TestIsStrictOp = false;
4707  /// This flag is true when step is subtracted on each iteration.
4708  bool SubtractStep = false;
4709  /// The outer loop counter this loop depends on (if any).
4710  const ValueDecl *DepDecl = nullptr;
4711  /// Contains number of loop (starts from 1) on which loop counter init
4712  /// expression of this loop depends on.
4713  Optional<unsigned> InitDependOnLC;
4714  /// Contains number of loop (starts from 1) on which loop counter condition
4715  /// expression of this loop depends on.
4716  Optional<unsigned> CondDependOnLC;
4717  /// Checks if the provide statement depends on the loop counter.
4718  Optional<unsigned> doesDependOnLoopCounter(const Stmt *S, bool IsInitializer);
4719 
4720 public:
4721  OpenMPIterationSpaceChecker(Sema &SemaRef, DSAStackTy &Stack,
4722  SourceLocation DefaultLoc)
4723  : SemaRef(SemaRef), Stack(Stack), DefaultLoc(DefaultLoc),
4724  ConditionLoc(DefaultLoc) {}
4725  /// Check init-expr for canonical loop form and save loop counter
4726  /// variable - #Var and its initialization value - #LB.
4727  bool checkAndSetInit(Stmt *S, bool EmitDiags = true);
4728  /// Check test-expr for canonical form, save upper-bound (#UB), flags
4729  /// for less/greater and for strict/non-strict comparison.
4730  bool checkAndSetCond(Expr *S);
4731  /// Check incr-expr for canonical loop form and return true if it
4732  /// does not conform, otherwise save loop step (#Step).
4733  bool checkAndSetInc(Expr *S);
4734  /// Return the loop counter variable.
4735  ValueDecl *getLoopDecl() const { return LCDecl; }
4736  /// Return the reference expression to loop counter variable.
4737  Expr *getLoopDeclRefExpr() const { return LCRef; }
4738  /// Source range of the loop init.
4739  SourceRange getInitSrcRange() const { return InitSrcRange; }
4740  /// Source range of the loop condition.
4741  SourceRange getConditionSrcRange() const { return ConditionSrcRange; }
4742  /// Source range of the loop increment.
4743  SourceRange getIncrementSrcRange() const { return IncrementSrcRange; }
4744  /// True if the step should be subtracted.
4745  bool shouldSubtractStep() const { return SubtractStep; }
4746  /// True, if the compare operator is strict (<, > or !=).
4747  bool isStrictTestOp() const { return TestIsStrictOp; }
4748  /// Build the expression to calculate the number of iterations.
4749  Expr *buildNumIterations(
4750  Scope *S, const bool LimitedType,
4751  llvm::MapVector<const Expr *, DeclRefExpr *> &Captures) const;
4752  /// Build the precondition expression for the loops.
4753  Expr *
4754  buildPreCond(Scope *S, Expr *Cond,
4755  llvm::MapVector<const Expr *, DeclRefExpr *> &Captures) const;
4756  /// Build reference expression to the counter be used for codegen.
4757  DeclRefExpr *
4758  buildCounterVar(llvm::MapVector<const Expr *, DeclRefExpr *> &Captures,
4759  DSAStackTy &DSA) const;
4760  /// Build reference expression to the private counter be used for
4761  /// codegen.
4762  Expr *buildPrivateCounterVar() const;
4763  /// Build initialization of the counter be used for codegen.
4764  Expr *buildCounterInit() const;
4765  /// Build step of the counter be used for codegen.
4766  Expr *buildCounterStep() const;
4767  /// Build loop data with counter value for depend clauses in ordered
4768  /// directives.
4769  Expr *
4770  buildOrderedLoopData(Scope *S, Expr *Counter,
4771  llvm::MapVector<const Expr *, DeclRefExpr *> &Captures,
4772  SourceLocation Loc, Expr *Inc = nullptr,
4773  OverloadedOperatorKind OOK = OO_Amp);
4774  /// Return true if any expression is dependent.
4775  bool dependent() const;
4776 
4777 private:
4778  /// Check the right-hand side of an assignment in the increment
4779  /// expression.
4780  bool checkAndSetIncRHS(Expr *RHS);
4781  /// Helper to set loop counter variable and its initializer.
4782  bool setLCDeclAndLB(ValueDecl *NewLCDecl, Expr *NewDeclRefExpr, Expr *NewLB,
4783  bool EmitDiags);
4784  /// Helper to set upper bound.
4785  bool setUB(Expr *NewUB, llvm::Optional<bool> LessOp, bool StrictOp,
4786  SourceRange SR, SourceLocation SL);
4787  /// Helper to set loop increment.
4788  bool setStep(Expr *NewStep, bool Subtract);
4789 };
4790 
4791 bool OpenMPIterationSpaceChecker::dependent() const {
4792  if (!LCDecl) {
4793  assert(!LB && !UB && !Step);
4794  return false;
4795  }
4796  return LCDecl->getType()->isDependentType() ||
4797  (LB && LB->isValueDependent()) || (UB && UB->isValueDependent()) ||
4798  (Step && Step->isValueDependent());
4799 }
4800 
4801 bool OpenMPIterationSpaceChecker::setLCDeclAndLB(ValueDecl *NewLCDecl,
4802  Expr *NewLCRefExpr,
4803  Expr *NewLB, bool EmitDiags) {
4804  // State consistency checking to ensure correct usage.
4805  assert(LCDecl == nullptr && LB == nullptr && LCRef == nullptr &&
4806  UB == nullptr && Step == nullptr && !TestIsLessOp && !TestIsStrictOp);
4807  if (!NewLCDecl || !NewLB)
4808  return true;
4809  LCDecl = getCanonicalDecl(NewLCDecl);
4810  LCRef = NewLCRefExpr;
4811  if (auto *CE = dyn_cast_or_null<CXXConstructExpr>(NewLB))
4812  if (const CXXConstructorDecl *Ctor = CE->getConstructor())
4813  if ((Ctor->isCopyOrMoveConstructor() ||
4814  Ctor->isConvertingConstructor(/*AllowExplicit=*/false)) &&
4815  CE->getNumArgs() > 0 && CE->getArg(0) != nullptr)
4816  NewLB = CE->getArg(0)->IgnoreParenImpCasts();
4817  LB = NewLB;
4818  if (EmitDiags)
4819  InitDependOnLC = doesDependOnLoopCounter(LB, /*IsInitializer=*/true);
4820  return false;
4821 }
4822 
4823 bool OpenMPIterationSpaceChecker::setUB(Expr *NewUB,
4824  llvm::Optional<bool> LessOp,
4825  bool StrictOp, SourceRange SR,
4826  SourceLocation SL) {
4827  // State consistency checking to ensure correct usage.
4828  assert(LCDecl != nullptr && LB != nullptr && UB == nullptr &&
4829  Step == nullptr && !TestIsLessOp && !TestIsStrictOp);
4830  if (!NewUB)
4831  return true;
4832  UB = NewUB;
4833  if (LessOp)
4834  TestIsLessOp = LessOp;
4835  TestIsStrictOp = StrictOp;
4836  ConditionSrcRange = SR;
4837  ConditionLoc = SL;
4838  CondDependOnLC = doesDependOnLoopCounter(UB, /*IsInitializer=*/false);
4839  return false;
4840 }
4841 
4842 bool OpenMPIterationSpaceChecker::setStep(Expr *NewStep, bool Subtract) {
4843  // State consistency checking to ensure correct usage.
4844  assert(LCDecl != nullptr && LB != nullptr && Step == nullptr);
4845  if (!NewStep)
4846  return true;
4847  if (!NewStep->isValueDependent()) {
4848  // Check that the step is integer expression.
4849  SourceLocation StepLoc = NewStep->getBeginLoc();
4851  StepLoc, getExprAsWritten(NewStep));
4852  if (Val.isInvalid())
4853  return true;
4854  NewStep = Val.get();
4855 
4856  // OpenMP [2.6, Canonical Loop Form, Restrictions]
4857  // If test-expr is of form var relational-op b and relational-op is < or
4858  // <= then incr-expr must cause var to increase on each iteration of the
4859  // loop. If test-expr is of form var relational-op b and relational-op is
4860  // > or >= then incr-expr must cause var to decrease on each iteration of
4861  // the loop.
4862  // If test-expr is of form b relational-op var and relational-op is < or
4863  // <= then incr-expr must cause var to decrease on each iteration of the
4864  // loop. If test-expr is of form b relational-op var and relational-op is
4865  // > or >= then incr-expr must cause var to increase on each iteration of
4866  // the loop.
4867  llvm::APSInt Result;
4868  bool IsConstant = NewStep->isIntegerConstantExpr(Result, SemaRef.Context);
4869  bool IsUnsigned = !NewStep->getType()->hasSignedIntegerRepresentation();
4870  bool IsConstNeg =
4871  IsConstant && Result.isSigned() && (Subtract != Result.isNegative());
4872  bool IsConstPos =
4873  IsConstant && Result.isSigned() && (Subtract == Result.isNegative());
4874  bool IsConstZero = IsConstant && !Result.getBoolValue();
4875 
4876  // != with increment is treated as <; != with decrement is treated as >
4877  if (!TestIsLessOp.hasValue())
4878  TestIsLessOp = IsConstPos || (IsUnsigned && !Subtract);
4879  if (UB && (IsConstZero ||
4880  (TestIsLessOp.getValue() ?
4881  (IsConstNeg || (IsUnsigned && Subtract)) :
4882  (IsConstPos || (IsUnsigned && !Subtract))))) {
4883  SemaRef.Diag(NewStep->getExprLoc(),
4884  diag::err_omp_loop_incr_not_compatible)
4885  << LCDecl << TestIsLessOp.getValue() << NewStep->getSourceRange();
4886  SemaRef.Diag(ConditionLoc,
4887  diag::note_omp_loop_cond_requres_compatible_incr)
4888  << TestIsLessOp.getValue() << ConditionSrcRange;
4889  return true;
4890  }
4891  if (TestIsLessOp.getValue() == Subtract) {
4892  NewStep =
4893  SemaRef.CreateBuiltinUnaryOp(NewStep->getExprLoc(), UO_Minus, NewStep)
4894  .get();
4895  Subtract = !Subtract;
4896  }
4897  }
4898 
4899  Step = NewStep;
4900  SubtractStep = Subtract;
4901  return false;
4902 }
4903 
4904 namespace {
4905 /// Checker for the non-rectangular loops. Checks if the initializer or
4906 /// condition expression references loop counter variable.
4907 class LoopCounterRefChecker final
4908  : public ConstStmtVisitor<LoopCounterRefChecker, bool> {
4909  Sema &SemaRef;
4910  DSAStackTy &Stack;
4911  const ValueDecl *CurLCDecl = nullptr;
4912  const ValueDecl *DepDecl = nullptr;
4913  const ValueDecl *PrevDepDecl = nullptr;
4914  bool IsInitializer = true;
4915  unsigned BaseLoopId = 0;
4916  bool checkDecl(const Expr *E, const ValueDecl *VD) {
4917  if (getCanonicalDecl(VD) == getCanonicalDecl(CurLCDecl)) {
4918  SemaRef.Diag(E->getExprLoc(), diag::err_omp_stmt_depends_on_loop_counter)
4919  << (IsInitializer ? 0 : 1);
4920  return false;
4921  }
4922  const auto &&Data = Stack.isLoopControlVariable(VD);
4923  // OpenMP, 2.9.1 Canonical Loop Form, Restrictions.
4924  // The type of the loop iterator on which we depend may not have a random
4925  // access iterator type.
4926  if (Data.first && VD->getType()->isRecordType()) {
4927  SmallString<128> Name;
4928  llvm::raw_svector_ostream OS(Name);
4929  VD->getNameForDiagnostic(OS, SemaRef.getPrintingPolicy(),
4930  /*Qualified=*/true);
4931  SemaRef.Diag(E->getExprLoc(),
4932  diag::err_omp_wrong_dependency_iterator_type)
4933  << OS.str();
4934  SemaRef.Diag(VD->getLocation(), diag::note_previous_decl) << VD;
4935  return false;
4936  }
4937  if (Data.first &&
4938  (DepDecl || (PrevDepDecl &&
4939  getCanonicalDecl(VD) != getCanonicalDecl(PrevDepDecl)))) {
4940  if (!DepDecl && PrevDepDecl)
4941  DepDecl = PrevDepDecl;
4942  SmallString<128> Name;
4943  llvm::raw_svector_ostream OS(Name);
4944  DepDecl->getNameForDiagnostic(OS, SemaRef.getPrintingPolicy(),
4945  /*Qualified=*/true);
4946  SemaRef.Diag(E->getExprLoc(),
4947  diag::err_omp_invariant_or_linear_dependency)
4948  << OS.str();
4949  return false;
4950  }
4951  if (Data.first) {
4952  DepDecl = VD;
4953  BaseLoopId = Data.first;
4954  }
4955  return Data.first;
4956  }
4957 
4958 public:
4959  bool VisitDeclRefExpr(const DeclRefExpr *E) {
4960  const ValueDecl *VD = E->getDecl();
4961  if (isa<VarDecl>(VD))
4962  return checkDecl(E, VD);
4963  return false;
4964  }
4965  bool VisitMemberExpr(const MemberExpr *E) {
4966  if (isa<CXXThisExpr>(E->getBase()->IgnoreParens())) {
4967  const ValueDecl *VD = E->getMemberDecl();
4968  return checkDecl(E, VD);
4969  }
4970  return false;
4971  }
4972  bool VisitStmt(const Stmt *S) {
4973  bool Res = true;
4974  for (const Stmt *Child : S->children())
4975  Res = Child && Visit(Child) && Res;
4976  return Res;
4977  }
4978  explicit LoopCounterRefChecker(Sema &SemaRef, DSAStackTy &Stack,
4979  const ValueDecl *CurLCDecl, bool IsInitializer,
4980  const ValueDecl *PrevDepDecl = nullptr)
4981  : SemaRef(SemaRef), Stack(Stack), CurLCDecl(CurLCDecl),
4982  PrevDepDecl(PrevDepDecl), IsInitializer(IsInitializer) {}
4983  unsigned getBaseLoopId() const {
4984  assert(CurLCDecl && "Expected loop dependency.");
4985  return BaseLoopId;
4986  }
4987  const ValueDecl *getDepDecl() const {
4988  assert(CurLCDecl && "Expected loop dependency.");
4989  return DepDecl;
4990  }
4991 };
4992 } // namespace
4993 
4995 OpenMPIterationSpaceChecker::doesDependOnLoopCounter(const Stmt *S,
4996  bool IsInitializer) {
4997  // Check for the non-rectangular loops.
4998  LoopCounterRefChecker LoopStmtChecker(SemaRef, Stack, LCDecl, IsInitializer,
4999  DepDecl);
5000  if (LoopStmtChecker.Visit(S)) {
5001  DepDecl = LoopStmtChecker.getDepDecl();
5002  return LoopStmtChecker.getBaseLoopId();
5003  }
5004  return llvm::None;
5005 }
5006 
5007 bool OpenMPIterationSpaceChecker::checkAndSetInit(Stmt *S, bool EmitDiags) {
5008  // Check init-expr for canonical loop form and save loop counter
5009  // variable - #Var and its initialization value - #LB.
5010  // OpenMP [2.6] Canonical loop form. init-expr may be one of the following:
5011  // var = lb
5012  // integer-type var = lb
5013  // random-access-iterator-type var = lb
5014  // pointer-type var = lb
5015  //
5016  if (!S) {
5017  if (EmitDiags) {
5018  SemaRef.Diag(DefaultLoc, diag::err_omp_loop_not_canonical_init);
5019  }
5020  return true;
5021  }
5022  if (auto *ExprTemp = dyn_cast<ExprWithCleanups>(S))
5023  if (!ExprTemp->cleanupsHaveSideEffects())
5024  S = ExprTemp->getSubExpr();
5025 
5026  InitSrcRange = S->getSourceRange();
5027  if (Expr *E = dyn_cast<Expr>(S))
5028  S = E->IgnoreParens();
5029  if (auto *BO = dyn_cast<BinaryOperator>(S)) {
5030  if (BO->getOpcode() == BO_Assign) {
5031  Expr *LHS = BO->getLHS()->IgnoreParens();
5032  if (auto *DRE = dyn_cast<DeclRefExpr>(LHS)) {
5033  if (auto *CED = dyn_cast<OMPCapturedExprDecl>(DRE->getDecl()))
5034  if (auto *ME = dyn_cast<MemberExpr>(getExprAsWritten(CED->getInit())))
5035  return setLCDeclAndLB(ME->getMemberDecl(), ME, BO->getRHS(),
5036  EmitDiags);
5037  return setLCDeclAndLB(DRE->getDecl(), DRE, BO->getRHS(), EmitDiags);
5038  }
5039  if (auto *ME = dyn_cast<MemberExpr>(LHS)) {
5040  if (ME->isArrow() &&
5041  isa<CXXThisExpr>(ME->getBase()->IgnoreParenImpCasts()))
5042  return setLCDeclAndLB(ME->getMemberDecl(), ME, BO->getRHS(),
5043  EmitDiags);
5044  }
5045  }
5046  } else if (auto *DS = dyn_cast<DeclStmt>(S)) {
5047  if (DS->isSingleDecl()) {
5048  if (auto *Var = dyn_cast_or_null<VarDecl>(DS->getSingleDecl())) {
5049  if (Var->hasInit() && !Var->getType()->isReferenceType()) {
5050  // Accept non-canonical init form here but emit ext. warning.
5051  if (Var->getInitStyle() != VarDecl::CInit && EmitDiags)
5052  SemaRef.Diag(S->getBeginLoc(),
5053  diag::ext_omp_loop_not_canonical_init)
5054  << S->getSourceRange();
5055  return setLCDeclAndLB(
5056  Var,
5057  buildDeclRefExpr(SemaRef, Var,
5058  Var->getType().getNonReferenceType(),
5059  DS->getBeginLoc()),
5060  Var->getInit(), EmitDiags);
5061  }
5062  }
5063  }
5064  } else if (auto *CE = dyn_cast<CXXOperatorCallExpr>(S)) {
5065  if (CE->getOperator() == OO_Equal) {
5066  Expr *LHS = CE->getArg(0);
5067  if (auto *DRE = dyn_cast<DeclRefExpr>(LHS)) {
5068  if (auto *CED = dyn_cast<OMPCapturedExprDecl>(DRE->getDecl()))
5069  if (auto *ME = dyn_cast<MemberExpr>(getExprAsWritten(CED->getInit())))
5070  return setLCDeclAndLB(ME->getMemberDecl(), ME, BO->getRHS(),
5071  EmitDiags);
5072  return setLCDeclAndLB(DRE->getDecl(), DRE, CE->getArg(1), EmitDiags);
5073  }
5074  if (auto *ME = dyn_cast<MemberExpr>(LHS)) {
5075  if (ME->isArrow() &&
5076  isa<CXXThisExpr>(ME->getBase()->IgnoreParenImpCasts()))
5077  return setLCDeclAndLB(ME->getMemberDecl(), ME, BO->getRHS(),
5078  EmitDiags);
5079  }
5080  }
5081  }
5082 
5083  if (dependent() || SemaRef.CurContext->isDependentContext())
5084  return false;
5085  if (EmitDiags) {
5086  SemaRef.Diag(S->getBeginLoc(), diag::err_omp_loop_not_canonical_init)
5087  << S->getSourceRange();
5088  }
5089  return true;
5090 }
5091 
5092 /// Ignore parenthesizes, implicit casts, copy constructor and return the
5093 /// variable (which may be the loop variable) if possible.
5094 static const ValueDecl *getInitLCDecl(const Expr *E) {
5095  if (!E)
5096  return nullptr;
5097  E = getExprAsWritten(E);
5098  if (const auto *CE = dyn_cast_or_null<CXXConstructExpr>(E))
5099  if (const CXXConstructorDecl *Ctor = CE->getConstructor())
5100  if ((Ctor->isCopyOrMoveConstructor() ||
5101  Ctor->isConvertingConstructor(/*AllowExplicit=*/false)) &&
5102  CE->getNumArgs() > 0 && CE->getArg(0) != nullptr)
5103  E = CE->getArg(0)->IgnoreParenImpCasts();
5104  if (const auto *DRE = dyn_cast_or_null<DeclRefExpr>(E)) {
5105  if (const auto *VD = dyn_cast<VarDecl>(DRE->getDecl()))
5106  return getCanonicalDecl(VD);
5107  }
5108  if (const auto *ME = dyn_cast_or_null<MemberExpr>(E))
5109  if (ME->isArrow() && isa<CXXThisExpr>(ME->getBase()->IgnoreParenImpCasts()))
5110  return getCanonicalDecl(ME->getMemberDecl());
5111  return nullptr;
5112 }
5113 
5114 bool OpenMPIterationSpaceChecker::checkAndSetCond(Expr *S) {
5115  // Check test-expr for canonical form, save upper-bound UB, flags for
5116  // less/greater and for strict/non-strict comparison.
5117  // OpenMP [2.6] Canonical loop form. Test-expr may be one of the following:
5118  // var relational-op b
5119  // b relational-op var
5120  //
5121  if (!S) {
5122  SemaRef.Diag(DefaultLoc, diag::err_omp_loop_not_canonical_cond) << LCDecl;
5123  return true;
5124  }
5125  S = getExprAsWritten(S);
5126  SourceLocation CondLoc = S->getBeginLoc();
5127  if (auto *BO = dyn_cast<BinaryOperator>(S)) {
5128  if (BO->isRelationalOp()) {
5129  if (getInitLCDecl(BO->getLHS()) == LCDecl)
5130  return setUB(BO->getRHS(),
5131  (BO->getOpcode() == BO_LT || BO->getOpcode() == BO_LE),
5132  (BO->getOpcode() == BO_LT || BO->getOpcode() == BO_GT),
5133  BO->getSourceRange(), BO->getOperatorLoc());
5134  if (getInitLCDecl(BO->getRHS()) == LCDecl)
5135  return setUB(BO->getLHS(),
5136  (BO->getOpcode() == BO_GT || BO->getOpcode() == BO_GE),
5137  (BO->getOpcode() == BO_LT || BO->getOpcode() == BO_GT),
5138  BO->getSourceRange(), BO->getOperatorLoc());
5139  } else if (BO->getOpcode() == BO_NE)
5140  return setUB(getInitLCDecl(BO->getLHS()) == LCDecl ?
5141  BO->getRHS() : BO->getLHS(),
5142  /*LessOp=*/llvm::None,
5143  /*StrictOp=*/true,
5144  BO->getSourceRange(), BO->getOperatorLoc());
5145  } else if (auto *CE = dyn_cast<CXXOperatorCallExpr>(S)) {
5146  if (CE->getNumArgs() == 2) {
5147  auto Op = CE->getOperator();
5148  switch (Op) {
5149  case OO_Greater:
5150  case OO_GreaterEqual:
5151  case OO_Less:
5152  case OO_LessEqual:
5153  if (getInitLCDecl(CE->getArg(0)) == LCDecl)
5154  return setUB(CE->getArg(1), Op == OO_Less || Op == OO_LessEqual,
5155  Op == OO_Less || Op == OO_Greater, CE->getSourceRange(),
5156  CE->getOperatorLoc());
5157  if (getInitLCDecl(CE->getArg(1)) == LCDecl)
5158  return setUB(CE->getArg(0), Op == OO_Greater || Op == OO_GreaterEqual,
5159  Op == OO_Less || Op == OO_Greater, CE->getSourceRange(),
5160  CE->getOperatorLoc());
5161  break;
5162  case OO_ExclaimEqual:
5163  return setUB(getInitLCDecl(CE->getArg(0)) == LCDecl ?
5164  CE->getArg(1) : CE->getArg(0),
5165  /*LessOp=*/llvm::None,
5166  /*StrictOp=*/true,
5167  CE->getSourceRange(),
5168  CE->getOperatorLoc());
5169  break;
5170  default:
5171  break;
5172  }
5173  }
5174  }
5175  if (dependent() || SemaRef.CurContext->isDependentContext())
5176  return false;
5177  SemaRef.Diag(CondLoc, diag::err_omp_loop_not_canonical_cond)
5178  << S->getSourceRange() << LCDecl;
5179  return true;
5180 }
5181 
5182 bool OpenMPIterationSpaceChecker::checkAndSetIncRHS(Expr *RHS) {
5183  // RHS of canonical loop form increment can be:
5184  // var + incr
5185  // incr + var
5186  // var - incr
5187  //
5188  RHS = RHS->IgnoreParenImpCasts();
5189  if (auto *BO = dyn_cast<BinaryOperator>(RHS)) {
5190  if (BO->isAdditiveOp()) {
5191  bool IsAdd = BO->getOpcode() == BO_Add;
5192  if (getInitLCDecl(BO->getLHS()) == LCDecl)
5193  return setStep(BO->getRHS(), !IsAdd);
5194  if (IsAdd && getInitLCDecl(BO->getRHS()) == LCDecl)
5195  return setStep(BO->getLHS(), /*Subtract=*/false);
5196  }
5197  } else if (auto *CE = dyn_cast<CXXOperatorCallExpr>(RHS)) {
5198  bool IsAdd = CE->getOperator() == OO_Plus;
5199  if ((IsAdd || CE->getOperator() == OO_Minus) && CE->getNumArgs() == 2) {
5200  if (getInitLCDecl(CE->getArg(0)) == LCDecl)
5201  return setStep(CE->getArg(1), !IsAdd);
5202  if (IsAdd && getInitLCDecl(CE->getArg(1)) == LCDecl)
5203  return setStep(CE->getArg(0), /*Subtract=*/false);
5204  }
5205  }
5206  if (dependent() || SemaRef.CurContext->isDependentContext())
5207  return false;
5208  SemaRef.Diag(RHS->getBeginLoc(), diag::err_omp_loop_not_canonical_incr)
5209  << RHS->getSourceRange() << LCDecl;
5210  return true;
5211 }
5212 
5213 bool OpenMPIterationSpaceChecker::checkAndSetInc(Expr *S) {
5214  // Check incr-expr for canonical loop form and return true if it
5215  // does not conform.
5216  // OpenMP [2.6] Canonical loop form. Test-expr may be one of the following:
5217  // ++var
5218  // var++
5219  // --var
5220  // var--
5221  // var += incr
5222  // var -= incr
5223  // var = var + incr
5224  // var = incr + var
5225  // var = var - incr
5226  //
5227  if (!S) {
5228  SemaRef.Diag(DefaultLoc, diag::err_omp_loop_not_canonical_incr) << LCDecl;
5229  return true;
5230  }
5231  if (auto *ExprTemp = dyn_cast<ExprWithCleanups>(S))
5232  if (!ExprTemp->cleanupsHaveSideEffects())
5233  S = ExprTemp->getSubExpr();
5234 
5235  IncrementSrcRange = S->getSourceRange();
5236  S = S->IgnoreParens();
5237  if (auto *UO = dyn_cast<UnaryOperator>(S)) {
5238  if (UO->isIncrementDecrementOp() &&
5239  getInitLCDecl(UO->getSubExpr()) == LCDecl)
5240  return setStep(SemaRef
5241  .ActOnIntegerConstant(UO->getBeginLoc(),
5242  (UO->isDecrementOp() ? -1 : 1))
5243  .get(),
5244  /*Subtract=*/false);
5245  } else if (auto *BO = dyn_cast<BinaryOperator>(S)) {
5246  switch (BO->getOpcode()) {
5247  case BO_AddAssign:
5248  case BO_SubAssign:
5249  if (getInitLCDecl(BO->getLHS()) == LCDecl)
5250  return setStep(BO->getRHS(), BO->getOpcode() == BO_SubAssign);
5251  break;
5252  case BO_Assign:
5253  if (getInitLCDecl(BO->getLHS()) == LCDecl)
5254  return checkAndSetIncRHS(BO->getRHS());
5255  break;
5256  default:
5257  break;
5258  }
5259  } else if (auto *CE = dyn_cast<CXXOperatorCallExpr>(S)) {
5260  switch (CE->getOperator()) {
5261  case OO_PlusPlus:
5262  case OO_MinusMinus:
5263  if (getInitLCDecl(CE->getArg(0)) == LCDecl)
5264  return setStep(SemaRef
5265  .ActOnIntegerConstant(
5266  CE->getBeginLoc(),
5267  ((CE->getOperator() == OO_MinusMinus) ? -1 : 1))
5268  .get(),
5269  /*Subtract=*/false);
5270  break;
5271  case OO_PlusEqual:
5272  case OO_MinusEqual:
5273  if (getInitLCDecl(CE->getArg(0)) == LCDecl)
5274  return setStep(CE->getArg(1), CE->getOperator() == OO_MinusEqual);
5275  break;
5276  case OO_Equal:
5277  if (getInitLCDecl(CE->getArg(0)) == LCDecl)
5278  return checkAndSetIncRHS(CE->getArg(1));
5279  break;
5280  default:
5281  break;
5282  }
5283  }
5284  if (dependent() || SemaRef.CurContext->isDependentContext())
5285  return false;
5286  SemaRef.Diag(S->getBeginLoc(), diag::err_omp_loop_not_canonical_incr)
5287  << S->getSourceRange() << LCDecl;
5288  return true;
5289 }
5290 
5291 static ExprResult
5292 tryBuildCapture(Sema &SemaRef, Expr *Capture,
5293  llvm::MapVector<const Expr *, DeclRefExpr *> &Captures) {
5294  if (SemaRef.CurContext->isDependentContext())
5295  return ExprResult(Capture);
5296  if (Capture->isEvaluatable(SemaRef.Context, Expr::SE_AllowSideEffects))
5297  return SemaRef.PerformImplicitConversion(
5298  Capture->IgnoreImpCasts(), Capture->getType(), Sema::AA_Converting,
5299  /*AllowExplicit=*/true);
5300  auto I = Captures.find(Capture);
5301  if (I != Captures.end())
5302  return buildCapture(SemaRef, Capture, I->second);
5303  DeclRefExpr *Ref = nullptr;
5304  ExprResult Res = buildCapture(SemaRef, Capture, Ref);
5305  Captures[Capture] = Ref;
5306  return Res;
5307 }
5308 
5309 /// Build the expression to calculate the number of iterations.
5310 Expr *OpenMPIterationSpaceChecker::buildNumIterations(
5311  Scope *S, const bool LimitedType,
5312  llvm::MapVector<const Expr *, DeclRefExpr *> &Captures) const {
5313  ExprResult Diff;
5314  QualType VarType = LCDecl->getType().getNonReferenceType();
5315  if (VarType->isIntegerType() || VarType->isPointerType() ||
5316  SemaRef.getLangOpts().CPlusPlus) {
5317  // Upper - Lower
5318  Expr *UBExpr = TestIsLessOp.getValue() ? UB : LB;
5319  Expr *LBExpr = TestIsLessOp.getValue() ? LB : UB;
5320  Expr *Upper = tryBuildCapture(SemaRef, UBExpr, Captures).get();
5321  Expr *Lower = tryBuildCapture(SemaRef, LBExpr, Captures).get();
5322  if (!Upper || !Lower)
5323  return nullptr;
5324 
5325  Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Sub, Upper, Lower);
5326 
5327  if (!Diff.isUsable() && VarType->getAsCXXRecordDecl()) {
5328  // BuildBinOp already emitted error, this one is to point user to upper
5329  // and lower bound, and to tell what is passed to 'operator-'.
5330  SemaRef.Diag(Upper->getBeginLoc(), diag::err_omp_loop_diff_cxx)
5331  << Upper->getSourceRange() << Lower->getSourceRange();
5332  return nullptr;
5333  }
5334  }
5335 
5336  if (!Diff.isUsable())
5337  return nullptr;
5338 
5339  // Upper - Lower [- 1]
5340  if (TestIsStrictOp)
5341  Diff = SemaRef.BuildBinOp(
5342  S, DefaultLoc, BO_Sub, Diff.get(),
5343  SemaRef.ActOnIntegerConstant(SourceLocation(), 1).get());
5344  if (!Diff.isUsable())
5345  return nullptr;
5346 
5347  // Upper - Lower [- 1] + Step
5348  ExprResult NewStep = tryBuildCapture(SemaRef, Step, Captures);
5349  if (!NewStep.isUsable())
5350  return nullptr;
5351  Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Add, Diff.get(), NewStep.get());
5352  if (!Diff.isUsable())
5353  return nullptr;
5354 
5355  // Parentheses (for dumping/debugging purposes only).
5356  Diff = SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, Diff.get());
5357  if (!Diff.isUsable())
5358  return nullptr;
5359 
5360  // (Upper - Lower [- 1] + Step) / Step
5361  Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Div, Diff.get(), NewStep.get());
5362  if (!Diff.isUsable())
5363  return nullptr;
5364 
5365  // OpenMP runtime requires 32-bit or 64-bit loop variables.
5366  QualType Type = Diff.get()->getType();
5367  ASTContext &C = SemaRef.Context;
5368  bool UseVarType = VarType->hasIntegerRepresentation() &&
5369  C.getTypeSize(Type) > C.getTypeSize(VarType);
5370  if (!Type->isIntegerType() || UseVarType) {
5371  unsigned NewSize =
5372  UseVarType ? C.getTypeSize(VarType) : C.getTypeSize(Type);
5373  bool IsSigned = UseVarType ? VarType->hasSignedIntegerRepresentation()
5375  Type = C.getIntTypeForBitwidth(NewSize, IsSigned);
5376  if (!SemaRef.Context.hasSameType(Diff.get()->getType(), Type)) {
5377  Diff = SemaRef.PerformImplicitConversion(
5378  Diff.get(), Type, Sema::AA_Converting, /*AllowExplicit=*/true);
5379  if (!Diff.isUsable())
5380  return nullptr;
5381  }
5382  }
5383  if (LimitedType) {
5384  unsigned NewSize = (C.getTypeSize(Type) > 32) ? 64 : 32;
5385  if (NewSize != C.getTypeSize(Type)) {
5386  if (NewSize < C.getTypeSize(Type)) {
5387  assert(NewSize == 64 && "incorrect loop var size");
5388  SemaRef.Diag(DefaultLoc, diag::warn_omp_loop_64_bit_var)
5389  << InitSrcRange << ConditionSrcRange;
5390  }
5391  QualType NewType = C.getIntTypeForBitwidth(
5392  NewSize, Type->hasSignedIntegerRepresentation() ||
5393  C.getTypeSize(Type) < NewSize);
5394  if (!SemaRef.Context.hasSameType(Diff.get()->getType(), NewType)) {
5395  Diff = SemaRef.PerformImplicitConversion(Diff.get(), NewType,
5396  Sema::AA_Converting, true);
5397  if (!Diff.isUsable())
5398  return nullptr;
5399  }
5400  }
5401  }
5402 
5403  return Diff.get();
5404 }
5405 
5406 Expr *OpenMPIterationSpaceChecker::buildPreCond(
5407  Scope *S, Expr *Cond,
5408  llvm::MapVector<const Expr *, DeclRefExpr *> &Captures) const {
5409  // Try to build LB <op> UB, where <op> is <, >, <=, or >=.
5410  bool Suppress = SemaRef.getDiagnostics().getSuppressAllDiagnostics();
5411  SemaRef.getDiagnostics().setSuppressAllDiagnostics(/*Val=*/true);
5412 
5413  ExprResult NewLB = tryBuildCapture(SemaRef, LB, Captures);
5414  ExprResult NewUB = tryBuildCapture(SemaRef, UB, Captures);
5415  if (!NewLB.isUsable() || !NewUB.isUsable())
5416  return nullptr;
5417 
5418  ExprResult CondExpr =
5419  SemaRef.BuildBinOp(S, DefaultLoc,
5420  TestIsLessOp.getValue() ?
5421  (TestIsStrictOp ? BO_LT : BO_LE) :
5422  (TestIsStrictOp ? BO_GT : BO_GE),
5423  NewLB.get(), NewUB.get());
5424  if (CondExpr.isUsable()) {
5425  if (!SemaRef.Context.hasSameUnqualifiedType(CondExpr.get()->getType(),
5426  SemaRef.Context.BoolTy))
5427  CondExpr = SemaRef.PerformImplicitConversion(
5428  CondExpr.get(), SemaRef.Context.BoolTy, /*Action=*/Sema::AA_Casting,
5429  /*AllowExplicit=*/true);
5430  }
5431  SemaRef.getDiagnostics().setSuppressAllDiagnostics(Suppress);
5432  // Otherwise use original loop condition and evaluate it in runtime.
5433  return CondExpr.isUsable() ? CondExpr.get() : Cond;
5434 }
5435 
5436 /// Build reference expression to the counter be used for codegen.
5437 DeclRefExpr *OpenMPIterationSpaceChecker::buildCounterVar(
5438  llvm::MapVector<const Expr *, DeclRefExpr *> &Captures,
5439  DSAStackTy &DSA) const {
5440  auto *VD = dyn_cast<VarDecl>(LCDecl);
5441  if (!VD) {
5442  VD = SemaRef.isOpenMPCapturedDecl(LCDecl);
5444  SemaRef, VD, VD->getType().getNonReferenceType(), DefaultLoc);
5445  const DSAStackTy::DSAVarData Data =
5446  DSA.getTopDSA(LCDecl, /*FromParent=*/false);
5447  // If the loop control decl is explicitly marked as private, do not mark it
5448  // as captured again.
5449  if (!isOpenMPPrivate(Data.CKind) || !Data.RefExpr)
5450  Captures.insert(std::make_pair(LCRef, Ref));
5451  return Ref;
5452  }
5453  return cast<DeclRefExpr>(LCRef);
5454 }
5455 
5456 Expr *OpenMPIterationSpaceChecker::buildPrivateCounterVar() const {
5457  if (LCDecl && !LCDecl->isInvalidDecl()) {
5458  QualType Type = LCDecl->getType().getNonReferenceType();
5459  VarDecl *PrivateVar = buildVarDecl(
5460  SemaRef, DefaultLoc, Type, LCDecl->getName(),
5461  LCDecl->hasAttrs() ? &LCDecl->getAttrs() : nullptr,
5462  isa<VarDecl>(LCDecl)
5463  ? buildDeclRefExpr(SemaRef, cast<VarDecl>(LCDecl), Type, DefaultLoc)
5464  : nullptr);
5465  if (PrivateVar->isInvalidDecl())
5466  return nullptr;
5467  return buildDeclRefExpr(SemaRef, PrivateVar, Type, DefaultLoc);
5468  }
5469  return nullptr;
5470 }
5471 
5472 /// Build initialization of the counter to be used for codegen.
5474 
5475 /// Build step of the counter be used for codegen.
5476 Expr *OpenMPIterationSpaceChecker::buildCounterStep() const { return Step; }
5477 
5478 Expr *OpenMPIterationSpaceChecker::buildOrderedLoopData(
5479  Scope *S, Expr *Counter,
5480  llvm::MapVector<const Expr *, DeclRefExpr *> &Captures, SourceLocation Loc,
5481  Expr *Inc, OverloadedOperatorKind OOK) {
5482  Expr *Cnt = SemaRef.DefaultLvalueConversion(Counter).get();
5483  if (!Cnt)
5484  return nullptr;
5485  if (Inc) {
5486  assert((OOK == OO_Plus || OOK == OO_Minus) &&
5487  "Expected only + or - operations for depend clauses.");
5488  BinaryOperatorKind BOK = (OOK == OO_Plus) ? BO_Add : BO_Sub;
5489  Cnt = SemaRef.BuildBinOp(S, Loc, BOK, Cnt, Inc).get();
5490  if (!Cnt)
5491  return nullptr;
5492  }
5493  ExprResult Diff;
5494  QualType VarType = LCDecl->getType().getNonReferenceType();
5495  if (VarType->isIntegerType() || VarType->isPointerType() ||
5496  SemaRef.getLangOpts().CPlusPlus) {
5497  // Upper - Lower
5498  Expr *Upper = TestIsLessOp.getValue()
5499  ? Cnt
5500  : tryBuildCapture(SemaRef, UB, Captures).get();
5501  Expr *Lower = TestIsLessOp.getValue()
5502  ? tryBuildCapture(SemaRef, LB, Captures).get()
5503  : Cnt;
5504  if (!Upper || !Lower)
5505  return nullptr;
5506 
5507  Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Sub, Upper, Lower);
5508 
5509  if (!Diff.isUsable() && VarType->getAsCXXRecordDecl()) {
5510  // BuildBinOp already emitted error, this one is to point user to upper
5511  // and lower bound, and to tell what is passed to 'operator-'.
5512  SemaRef.Diag(Upper->getBeginLoc(), diag::err_omp_loop_diff_cxx)
5513  << Upper->getSourceRange() << Lower->getSourceRange();
5514  return nullptr;
5515  }
5516  }
5517 
5518  if (!Diff.isUsable())
5519  return nullptr;
5520 
5521  // Parentheses (for dumping/debugging purposes only).
5522  Diff = SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, Diff.get());
5523  if (!Diff.isUsable())
5524  return nullptr;
5525 
5526  ExprResult NewStep = tryBuildCapture(SemaRef, Step, Captures);
5527  if (!NewStep.isUsable())
5528  return nullptr;
5529  // (Upper - Lower) / Step
5530  Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Div, Diff.get(), NewStep.get());
5531  if (!Diff.isUsable())
5532  return nullptr;
5533 
5534  return Diff.get();
5535 }
5536 
5537 /// Iteration space of a single for loop.
5538 struct LoopIterationSpace final {
5539  /// True if the condition operator is the strict compare operator (<, > or
5540  /// !=).
5541  bool IsStrictCompare = false;
5542  /// Condition of the loop.
5543  Expr *PreCond = nullptr;
5544  /// This expression calculates the number of iterations in the loop.
5545  /// It is always possible to calculate it before starting the loop.
5546  Expr *NumIterations = nullptr;
5547  /// The loop counter variable.
5548  Expr *CounterVar = nullptr;
5549  /// Private loop counter variable.
5550  Expr *PrivateCounterVar = nullptr;
5551  /// This is initializer for the initial value of #CounterVar.
5552  Expr *CounterInit = nullptr;
5553  /// This is step for the #CounterVar used to generate its update:
5554  /// #CounterVar = #CounterInit + #CounterStep * CurrentIteration.
5555  Expr *CounterStep = nullptr;
5556  /// Should step be subtracted?
5557  bool Subtract = false;
5558  /// Source range of the loop init.
5559  SourceRange InitSrcRange;
5560  /// Source range of the loop condition.
5561  SourceRange CondSrcRange;
5562  /// Source range of the loop increment.
5563  SourceRange IncSrcRange;
5564 };
5565 
5566 } // namespace
5567 
5569  assert(getLangOpts().OpenMP && "OpenMP is not active.");
5570  assert(Init && "Expected loop in canonical form.");
5571  unsigned AssociatedLoops = DSAStack->getAssociatedLoops();
5572  if (AssociatedLoops > 0 &&
5573  isOpenMPLoopDirective(DSAStack->getCurrentDirective())) {
5574  DSAStack->loopStart();
5575  OpenMPIterationSpaceChecker ISC(*this, *DSAStack, ForLoc);
5576  if (!ISC.checkAndSetInit(Init, /*EmitDiags=*/false)) {
5577  if (ValueDecl *D = ISC.getLoopDecl()) {
5578  auto *VD = dyn_cast<VarDecl>(D);
5579  if (!VD) {
5580  if (VarDecl *Private = isOpenMPCapturedDecl(D)) {
5581  VD = Private;
5582  } else {
5583  DeclRefExpr *Ref = buildCapture(*this, D, ISC.getLoopDeclRefExpr(),
5584  /*WithInit=*/false);
5585  VD = cast<VarDecl>(Ref->getDecl());
5586  }
5587  }
5588  DSAStack->addLoopControlVariable(D, VD);
5589  const Decl *LD = DSAStack->getPossiblyLoopCunter();
5590  if (LD != D->getCanonicalDecl()) {
5591  DSAStack->resetPossibleLoopCounter();
5592  if (auto *Var = dyn_cast_or_null<VarDecl>(LD))
5593  MarkDeclarationsReferencedInExpr(
5594  buildDeclRefExpr(*this, const_cast<VarDecl *>(Var),
5595  Var->getType().getNonLValueExprType(Context),
5596  ForLoc, /*RefersToCapture=*/true));
5597  }
5598  }
5599  }
5600  DSAStack->setAssociatedLoops(AssociatedLoops - 1);
5601  }
5602 }
5603 
5604 /// Called on a for stmt to check and extract its iteration space
5605 /// for further processing (such as collapsing).
5607  OpenMPDirectiveKind DKind, Stmt *S, Sema &SemaRef, DSAStackTy &DSA,
5608  unsigned CurrentNestedLoopCount, unsigned NestedLoopCount,
5609  unsigned TotalNestedLoopCount, Expr *CollapseLoopCountExpr,
5610  Expr *OrderedLoopCountExpr,
5611  Sema::VarsWithInheritedDSAType &VarsWithImplicitDSA,
5612  LoopIterationSpace &ResultIterSpace,
5613  llvm::MapVector<const Expr *, DeclRefExpr *> &Captures) {
5614  // OpenMP [2.6, Canonical Loop Form]
5615  // for (init-expr; test-expr; incr-expr) structured-block
5616  auto *For = dyn_cast_or_null<ForStmt>(S);
5617  if (!For) {
5618  SemaRef.Diag(S->getBeginLoc(), diag::err_omp_not_for)
5619  << (CollapseLoopCountExpr != nullptr || OrderedLoopCountExpr != nullptr)
5620  << getOpenMPDirectiveName(DKind) << TotalNestedLoopCount
5621  << (CurrentNestedLoopCount > 0) << CurrentNestedLoopCount;
5622  if (TotalNestedLoopCount > 1) {
5623  if (CollapseLoopCountExpr && OrderedLoopCountExpr)
5624  SemaRef.Diag(DSA.getConstructLoc(),
5625  diag::note_omp_collapse_ordered_expr)
5626  << 2 << CollapseLoopCountExpr->getSourceRange()
5627  << OrderedLoopCountExpr->getSourceRange();
5628  else if (CollapseLoopCountExpr)
5629  SemaRef.Diag(CollapseLoopCountExpr->getExprLoc(),
5630  diag::note_omp_collapse_ordered_expr)
5631  << 0 << CollapseLoopCountExpr->getSourceRange();
5632  else
5633  SemaRef.Diag(OrderedLoopCountExpr->getExprLoc(),
5634  diag::note_omp_collapse_ordered_expr)
5635  << 1 << OrderedLoopCountExpr->getSourceRange();
5636  }
5637  return true;
5638  }
5639  assert(For->getBody());
5640 
5641  OpenMPIterationSpaceChecker ISC(SemaRef, DSA, For->getForLoc());
5642 
5643  // Check init.
5644  Stmt *Init = For->getInit();
5645  if (ISC.checkAndSetInit(Init))
5646  return true;
5647 
5648  bool HasErrors = false;
5649 
5650  // Check loop variable's type.
5651  if (ValueDecl *LCDecl = ISC.getLoopDecl()) {
5652  Expr *LoopDeclRefExpr = ISC.getLoopDeclRefExpr();
5653 
5654  // OpenMP [2.6, Canonical Loop Form]
5655  // Var is one of the following:
5656  // A variable of signed or unsigned integer type.
5657  // For C++, a variable of a random access iterator type.
5658  // For C, a variable of a pointer type.
5659  QualType VarType = LCDecl->getType().getNonReferenceType();
5660  if (!VarType->isDependentType() && !VarType->isIntegerType() &&
5661  !VarType->isPointerType() &&
5662  !(SemaRef.getLangOpts().CPlusPlus && VarType->isOverloadableType())) {
5663  SemaRef.Diag(Init->getBeginLoc(), diag::err_omp_loop_variable_type)
5664  << SemaRef.getLangOpts().CPlusPlus;
5665  HasErrors = true;
5666  }
5667 
5668  // OpenMP, 2.14.1.1 Data-sharing Attribute Rules for Variables Referenced in
5669  // a Construct
5670  // The loop iteration variable(s) in the associated for-loop(s) of a for or
5671  // parallel for construct is (are) private.
5672  // The loop iteration variable in the associated for-loop of a simd
5673  // construct with just one associated for-loop is linear with a
5674  // constant-linear-step that is the increment of the associated for-loop.
5675  // Exclude loop var from the list of variables with implicitly defined data
5676  // sharing attributes.
5677  VarsWithImplicitDSA.erase(LCDecl);
5678 
5679  // OpenMP [2.14.1.1, Data-sharing Attribute Rules for Variables Referenced
5680  // in a Construct, C/C++].
5681  // The loop iteration variable in the associated for-loop of a simd
5682  // construct with just one associated for-loop may be listed in a linear
5683  // clause with a constant-linear-step that is the increment of the
5684  // associated for-loop.
5685  // The loop iteration variable(s) in the associated for-loop(s) of a for or
5686  // parallel for construct may be listed in a private or lastprivate clause.
5687  DSAStackTy::DSAVarData DVar = DSA.getTopDSA(LCDecl, false);
5688  // If LoopVarRefExpr is nullptr it means the corresponding loop variable is
5689  // declared in the loop and it is predetermined as a private.
5690  OpenMPClauseKind PredeterminedCKind =
5691  isOpenMPSimdDirective(DKind)
5692  ? ((NestedLoopCount == 1) ? OMPC_linear : OMPC_lastprivate)
5693  : OMPC_private;
5694  if (((isOpenMPSimdDirective(DKind) && DVar.CKind != OMPC_unknown &&
5695  DVar.CKind != PredeterminedCKind) ||
5696  ((isOpenMPWorksharingDirective(DKind) || DKind == OMPD_taskloop ||
5697  isOpenMPDistributeDirective(DKind)) &&
5698  !isOpenMPSimdDirective(DKind) && DVar.CKind != OMPC_unknown &&
5699  DVar.CKind != OMPC_private && DVar.CKind != OMPC_lastprivate)) &&
5700  (DVar.CKind != OMPC_private || DVar.RefExpr != nullptr)) {
5701  SemaRef.Diag(Init->getBeginLoc(), diag::err_omp_loop_var_dsa)
5702  << getOpenMPClauseName(DVar.CKind) << getOpenMPDirectiveName(DKind)
5703  << getOpenMPClauseName(PredeterminedCKind);
5704  if (DVar.RefExpr == nullptr)
5705  DVar.CKind = PredeterminedCKind;
5706  reportOriginalDsa(SemaRef, &DSA, LCDecl, DVar, /*IsLoopIterVar=*/true);
5707  HasErrors = true;
5708  } else if (LoopDeclRefExpr != nullptr) {
5709  // Make the loop iteration variable private (for worksharing constructs),
5710  // linear (for simd directives with the only one associated loop) or
5711  // lastprivate (for simd directives with several collapsed or ordered
5712  // loops).
5713  if (DVar.CKind == OMPC_unknown)
5714  DSA.addDSA(LCDecl, LoopDeclRefExpr, PredeterminedCKind);
5715  }
5716 
5717  assert(isOpenMPLoopDirective(DKind) && "DSA for non-loop vars");
5718 
5719  // Check test-expr.
5720  HasErrors |= ISC.checkAndSetCond(For->getCond());
5721 
5722  // Check incr-expr.
5723  HasErrors |= ISC.checkAndSetInc(For->getInc());
5724  }
5725 
5726  if (ISC.dependent() || SemaRef.CurContext->isDependentContext() || HasErrors)
5727  return HasErrors;
5728 
5729  // Build the loop's iteration space representation.
5730  ResultIterSpace.PreCond =
5731  ISC.buildPreCond(DSA.getCurScope(), For->getCond(), Captures);
5732  ResultIterSpace.NumIterations = ISC.buildNumIterations(
5733  DSA.getCurScope(),
5734  (isOpenMPWorksharingDirective(DKind) ||
5736  Captures);
5737  ResultIterSpace.CounterVar = ISC.buildCounterVar(Captures, DSA);
5738  ResultIterSpace.PrivateCounterVar = ISC.buildPrivateCounterVar();
5739  ResultIterSpace.CounterInit = ISC.buildCounterInit();
5740  ResultIterSpace.CounterStep = ISC.buildCounterStep();
5741  ResultIterSpace.InitSrcRange = ISC.getInitSrcRange();
5742  ResultIterSpace.CondSrcRange = ISC.getConditionSrcRange();
5743  ResultIterSpace.IncSrcRange = ISC.getIncrementSrcRange();
5744  ResultIterSpace.Subtract = ISC.shouldSubtractStep();
5745  ResultIterSpace.IsStrictCompare = ISC.isStrictTestOp();
5746 
5747  HasErrors |= (ResultIterSpace.PreCond == nullptr ||
5748  ResultIterSpace.NumIterations == nullptr ||
5749  ResultIterSpace.CounterVar == nullptr ||
5750  ResultIterSpace.PrivateCounterVar == nullptr ||
5751  ResultIterSpace.CounterInit == nullptr ||
5752  ResultIterSpace.CounterStep == nullptr);
5753  if (!HasErrors && DSA.isOrderedRegion()) {
5754  if (DSA.getOrderedRegionParam().second->getNumForLoops()) {
5755  if (CurrentNestedLoopCount <
5756  DSA.getOrderedRegionParam().second->getLoopNumIterations().size()) {
5757  DSA.getOrderedRegionParam().second->setLoopNumIterations(
5758  CurrentNestedLoopCount, ResultIterSpace.NumIterations);
5759  DSA.getOrderedRegionParam().second->setLoopCounter(
5760  CurrentNestedLoopCount, ResultIterSpace.CounterVar);
5761  }
5762  }
5763  for (auto &Pair : DSA.getDoacrossDependClauses()) {
5764  if (CurrentNestedLoopCount >= Pair.first->getNumLoops()) {
5765  // Erroneous case - clause has some problems.
5766  continue;
5767  }
5768  if (Pair.first->getDependencyKind() == OMPC_DEPEND_sink &&
5769  Pair.second.size() <= CurrentNestedLoopCount) {
5770  // Erroneous case - clause has some problems.
5771  Pair.first->setLoopData(CurrentNestedLoopCount, nullptr);
5772  continue;
5773  }
5774  Expr *CntValue;
5775  if (Pair.first->getDependencyKind() == OMPC_DEPEND_source)
5776  CntValue = ISC.buildOrderedLoopData(
5777  DSA.getCurScope(), ResultIterSpace.CounterVar, Captures,
5778  Pair.first->getDependencyLoc());
5779  else
5780  CntValue = ISC.buildOrderedLoopData(
5781  DSA.getCurScope(), ResultIterSpace.CounterVar, Captures,
5782  Pair.first->getDependencyLoc(),
5783  Pair.second[CurrentNestedLoopCount].first,
5784  Pair.second[CurrentNestedLoopCount].second);
5785  Pair.first->setLoopData(CurrentNestedLoopCount, CntValue);
5786  }
5787  }
5788 
5789  return HasErrors;
5790 }
5791 
5792 /// Build 'VarRef = Start.
5793 static ExprResult
5795  ExprResult Start,
5796  llvm::MapVector<const Expr *, DeclRefExpr *> &Captures) {
5797  // Build 'VarRef = Start.
5798  ExprResult NewStart = tryBuildCapture(SemaRef, Start.get(), Captures);
5799  if (!NewStart.isUsable())
5800  return ExprError();
5801  if (!SemaRef.Context.hasSameType(NewStart.get()->getType(),
5802  VarRef.get()->getType())) {
5803  NewStart = SemaRef.PerformImplicitConversion(
5804  NewStart.get(), VarRef.get()->getType(), Sema::AA_Converting,
5805  /*AllowExplicit=*/true);
5806  if (!NewStart.isUsable())
5807  return ExprError();
5808  }
5809 
5810  ExprResult Init =
5811  SemaRef.BuildBinOp(S, Loc, BO_Assign, VarRef.get(), NewStart.get());
5812  return Init;
5813 }
5814