clang  8.0.0svn
SemaOpenMP.cpp
Go to the documentation of this file.
1 //===--- SemaOpenMP.cpp - Semantic Analysis for OpenMP constructs ---------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 /// \file
10 /// This file implements semantic analysis for OpenMP directives and
11 /// clauses.
12 ///
13 //===----------------------------------------------------------------------===//
14 
15 #include "TreeTransform.h"
16 #include "clang/AST/ASTContext.h"
19 #include "clang/AST/Decl.h"
20 #include "clang/AST/DeclCXX.h"
21 #include "clang/AST/DeclOpenMP.h"
22 #include "clang/AST/StmtCXX.h"
23 #include "clang/AST/StmtOpenMP.h"
24 #include "clang/AST/StmtVisitor.h"
27 #include "clang/Sema/Lookup.h"
28 #include "clang/Sema/Scope.h"
29 #include "clang/Sema/ScopeInfo.h"
31 #include "llvm/ADT/PointerEmbeddedInt.h"
32 using namespace clang;
33 
34 //===----------------------------------------------------------------------===//
35 // Stack of data-sharing attributes for variables
36 //===----------------------------------------------------------------------===//
37 
39  Sema &SemaRef, Expr *E,
41  OpenMPClauseKind CKind, bool NoDiagnose);
42 
43 namespace {
44 /// Default data sharing attributes, which can be applied to directive.
46  DSA_unspecified = 0, /// Data sharing attribute not specified.
47  DSA_none = 1 << 0, /// Default data sharing attribute 'none'.
48  DSA_shared = 1 << 1, /// Default data sharing attribute 'shared'.
49 };
50 
51 /// Attributes of the defaultmap clause.
53  DMA_unspecified, /// Default mapping is not specified.
54  DMA_tofrom_scalar, /// Default mapping is 'tofrom:scalar'.
55 };
56 
57 /// Stack for tracking declarations used in OpenMP directives and
58 /// clauses and their data-sharing attributes.
59 class DSAStackTy {
60 public:
61  struct DSAVarData {
64  const Expr *RefExpr = nullptr;
65  DeclRefExpr *PrivateCopy = nullptr;
66  SourceLocation ImplicitDSALoc;
67  DSAVarData() = default;
68  DSAVarData(OpenMPDirectiveKind DKind, OpenMPClauseKind CKind,
69  const Expr *RefExpr, DeclRefExpr *PrivateCopy,
70  SourceLocation ImplicitDSALoc)
71  : DKind(DKind), CKind(CKind), RefExpr(RefExpr),
72  PrivateCopy(PrivateCopy), ImplicitDSALoc(ImplicitDSALoc) {}
73  };
74  using OperatorOffsetTy =
76  using DoacrossDependMapTy =
77  llvm::DenseMap<OMPDependClause *, OperatorOffsetTy>;
78 
79 private:
80  struct DSAInfo {
81  OpenMPClauseKind Attributes = OMPC_unknown;
82  /// Pointer to a reference expression and a flag which shows that the
83  /// variable is marked as lastprivate(true) or not (false).
84  llvm::PointerIntPair<const Expr *, 1, bool> RefExpr;
85  DeclRefExpr *PrivateCopy = nullptr;
86  };
87  using DeclSAMapTy = llvm::SmallDenseMap<const ValueDecl *, DSAInfo, 8>;
88  using AlignedMapTy = llvm::SmallDenseMap<const ValueDecl *, const Expr *, 8>;
89  using LCDeclInfo = std::pair<unsigned, VarDecl *>;
90  using LoopControlVariablesMapTy =
91  llvm::SmallDenseMap<const ValueDecl *, LCDeclInfo, 8>;
92  /// Struct that associates a component with the clause kind where they are
93  /// found.
94  struct MappedExprComponentTy {
97  };
98  using MappedExprComponentsTy =
99  llvm::DenseMap<const ValueDecl *, MappedExprComponentTy>;
100  using CriticalsWithHintsTy =
101  llvm::StringMap<std::pair<const OMPCriticalDirective *, llvm::APSInt>>;
102  struct ReductionData {
103  using BOKPtrType = llvm::PointerEmbeddedInt<BinaryOperatorKind, 16>;
104  SourceRange ReductionRange;
105  llvm::PointerUnion<const Expr *, BOKPtrType> ReductionOp;
106  ReductionData() = default;
107  void set(BinaryOperatorKind BO, SourceRange RR) {
108  ReductionRange = RR;
109  ReductionOp = BO;
110  }
111  void set(const Expr *RefExpr, SourceRange RR) {
112  ReductionRange = RR;
113  ReductionOp = RefExpr;
114  }
115  };
116  using DeclReductionMapTy =
117  llvm::SmallDenseMap<const ValueDecl *, ReductionData, 4>;
118 
119  struct SharingMapTy {
120  DeclSAMapTy SharingMap;
121  DeclReductionMapTy ReductionMap;
122  AlignedMapTy AlignedMap;
123  MappedExprComponentsTy MappedExprComponents;
124  LoopControlVariablesMapTy LCVMap;
125  DefaultDataSharingAttributes DefaultAttr = DSA_unspecified;
126  SourceLocation DefaultAttrLoc;
127  DefaultMapAttributes DefaultMapAttr = DMA_unspecified;
128  SourceLocation DefaultMapAttrLoc;
130  DeclarationNameInfo DirectiveName;
131  Scope *CurScope = nullptr;
132  SourceLocation ConstructLoc;
133  /// Set of 'depend' clauses with 'sink|source' dependence kind. Required to
134  /// get the data (loop counters etc.) about enclosing loop-based construct.
135  /// This data is required during codegen.
136  DoacrossDependMapTy DoacrossDepends;
137  /// first argument (Expr *) contains optional argument of the
138  /// 'ordered' clause, the second one is true if the regions has 'ordered'
139  /// clause, false otherwise.
141  bool NowaitRegion = false;
142  bool CancelRegion = false;
143  unsigned AssociatedLoops = 1;
144  SourceLocation InnerTeamsRegionLoc;
145  /// Reference to the taskgroup task_reduction reference expression.
146  Expr *TaskgroupReductionRef = nullptr;
147  SharingMapTy(OpenMPDirectiveKind DKind, DeclarationNameInfo Name,
148  Scope *CurScope, SourceLocation Loc)
149  : Directive(DKind), DirectiveName(Name), CurScope(CurScope),
150  ConstructLoc(Loc) {}
151  SharingMapTy() = default;
152  };
153 
154  using StackTy = SmallVector<SharingMapTy, 4>;
155 
156  /// Stack of used declaration and their data-sharing attributes.
157  DeclSAMapTy Threadprivates;
158  const FunctionScopeInfo *CurrentNonCapturingFunctionScope = nullptr;
160  /// true, if check for DSA must be from parent directive, false, if
161  /// from current directive.
162  OpenMPClauseKind ClauseKindMode = OMPC_unknown;
163  Sema &SemaRef;
164  bool ForceCapturing = false;
165  CriticalsWithHintsTy Criticals;
166 
167  using iterator = StackTy::const_reverse_iterator;
168 
169  DSAVarData getDSA(iterator &Iter, ValueDecl *D) const;
170 
171  /// Checks if the variable is a local for OpenMP region.
172  bool isOpenMPLocal(VarDecl *D, iterator Iter) const;
173 
174  bool isStackEmpty() const {
175  return Stack.empty() ||
176  Stack.back().second != CurrentNonCapturingFunctionScope ||
177  Stack.back().first.empty();
178  }
179 
180  /// Vector of previously declared requires directives
182 
183 public:
184  explicit DSAStackTy(Sema &S) : SemaRef(S) {}
185 
186  bool isClauseParsingMode() const { return ClauseKindMode != OMPC_unknown; }
187  OpenMPClauseKind getClauseParsingMode() const {
188  assert(isClauseParsingMode() && "Must be in clause parsing mode.");
189  return ClauseKindMode;
190  }
191  void setClauseParsingMode(OpenMPClauseKind K) { ClauseKindMode = K; }
192 
193  bool isForceVarCapturing() const { return ForceCapturing; }
194  void setForceVarCapturing(bool V) { ForceCapturing = V; }
195 
196  void push(OpenMPDirectiveKind DKind, const DeclarationNameInfo &DirName,
197  Scope *CurScope, SourceLocation Loc) {
198  if (Stack.empty() ||
199  Stack.back().second != CurrentNonCapturingFunctionScope)
200  Stack.emplace_back(StackTy(), CurrentNonCapturingFunctionScope);
201  Stack.back().first.emplace_back(DKind, DirName, CurScope, Loc);
202  Stack.back().first.back().DefaultAttrLoc = Loc;
203  }
204 
205  void pop() {
206  assert(!Stack.back().first.empty() &&
207  "Data-sharing attributes stack is empty!");
208  Stack.back().first.pop_back();
209  }
210 
211  /// Start new OpenMP region stack in new non-capturing function.
212  void pushFunction() {
213  const FunctionScopeInfo *CurFnScope = SemaRef.getCurFunction();
214  assert(!isa<CapturingScopeInfo>(CurFnScope));
215  CurrentNonCapturingFunctionScope = CurFnScope;
216  }
217  /// Pop region stack for non-capturing function.
218  void popFunction(const FunctionScopeInfo *OldFSI) {
219  if (!Stack.empty() && Stack.back().second == OldFSI) {
220  assert(Stack.back().first.empty());
221  Stack.pop_back();
222  }
223  CurrentNonCapturingFunctionScope = nullptr;
224  for (const FunctionScopeInfo *FSI : llvm::reverse(SemaRef.FunctionScopes)) {
225  if (!isa<CapturingScopeInfo>(FSI)) {
226  CurrentNonCapturingFunctionScope = FSI;
227  break;
228  }
229  }
230  }
231 
232  void addCriticalWithHint(const OMPCriticalDirective *D, llvm::APSInt Hint) {
233  Criticals.try_emplace(D->getDirectiveName().getAsString(), D, Hint);
234  }
235  const std::pair<const OMPCriticalDirective *, llvm::APSInt>
236  getCriticalWithHint(const DeclarationNameInfo &Name) const {
237  auto I = Criticals.find(Name.getAsString());
238  if (I != Criticals.end())
239  return I->second;
240  return std::make_pair(nullptr, llvm::APSInt());
241  }
242  /// If 'aligned' declaration for given variable \a D was not seen yet,
243  /// add it and return NULL; otherwise return previous occurrence's expression
244  /// for diagnostics.
245  const Expr *addUniqueAligned(const ValueDecl *D, const Expr *NewDE);
246 
247  /// Register specified variable as loop control variable.
248  void addLoopControlVariable(const ValueDecl *D, VarDecl *Capture);
249  /// Check if the specified variable is a loop control variable for
250  /// current region.
251  /// \return The index of the loop control variable in the list of associated
252  /// for-loops (from outer to inner).
253  const LCDeclInfo isLoopControlVariable(const ValueDecl *D) const;
254  /// Check if the specified variable is a loop control variable for
255  /// parent region.
256  /// \return The index of the loop control variable in the list of associated
257  /// for-loops (from outer to inner).
258  const LCDeclInfo isParentLoopControlVariable(const ValueDecl *D) const;
259  /// Get the loop control variable for the I-th loop (or nullptr) in
260  /// parent directive.
261  const ValueDecl *getParentLoopControlVariable(unsigned I) const;
262 
263  /// Adds explicit data sharing attribute to the specified declaration.
264  void addDSA(const ValueDecl *D, const Expr *E, OpenMPClauseKind A,
265  DeclRefExpr *PrivateCopy = nullptr);
266 
267  /// Adds additional information for the reduction items with the reduction id
268  /// represented as an operator.
269  void addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
270  BinaryOperatorKind BOK);
271  /// Adds additional information for the reduction items with the reduction id
272  /// represented as reduction identifier.
273  void addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
274  const Expr *ReductionRef);
275  /// Returns the location and reduction operation from the innermost parent
276  /// region for the given \p D.
277  const DSAVarData
278  getTopMostTaskgroupReductionData(const ValueDecl *D, SourceRange &SR,
279  BinaryOperatorKind &BOK,
280  Expr *&TaskgroupDescriptor) const;
281  /// Returns the location and reduction operation from the innermost parent
282  /// region for the given \p D.
283  const DSAVarData
284  getTopMostTaskgroupReductionData(const ValueDecl *D, SourceRange &SR,
285  const Expr *&ReductionRef,
286  Expr *&TaskgroupDescriptor) const;
287  /// Return reduction reference expression for the current taskgroup.
288  Expr *getTaskgroupReductionRef() const {
289  assert(Stack.back().first.back().Directive == OMPD_taskgroup &&
290  "taskgroup reference expression requested for non taskgroup "
291  "directive.");
292  return Stack.back().first.back().TaskgroupReductionRef;
293  }
294  /// Checks if the given \p VD declaration is actually a taskgroup reduction
295  /// descriptor variable at the \p Level of OpenMP regions.
296  bool isTaskgroupReductionRef(const ValueDecl *VD, unsigned Level) const {
297  return Stack.back().first[Level].TaskgroupReductionRef &&
298  cast<DeclRefExpr>(Stack.back().first[Level].TaskgroupReductionRef)
299  ->getDecl() == VD;
300  }
301 
302  /// Returns data sharing attributes from top of the stack for the
303  /// specified declaration.
304  const DSAVarData getTopDSA(ValueDecl *D, bool FromParent);
305  /// Returns data-sharing attributes for the specified declaration.
306  const DSAVarData getImplicitDSA(ValueDecl *D, bool FromParent) const;
307  /// Checks if the specified variables has data-sharing attributes which
308  /// match specified \a CPred predicate in any directive which matches \a DPred
309  /// predicate.
310  const DSAVarData
311  hasDSA(ValueDecl *D, const llvm::function_ref<bool(OpenMPClauseKind)> CPred,
312  const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
313  bool FromParent) const;
314  /// Checks if the specified variables has data-sharing attributes which
315  /// match specified \a CPred predicate in any innermost directive which
316  /// matches \a DPred predicate.
317  const DSAVarData
318  hasInnermostDSA(ValueDecl *D,
319  const llvm::function_ref<bool(OpenMPClauseKind)> CPred,
320  const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
321  bool FromParent) const;
322  /// Checks if the specified variables has explicit data-sharing
323  /// attributes which match specified \a CPred predicate at the specified
324  /// OpenMP region.
325  bool hasExplicitDSA(const ValueDecl *D,
326  const llvm::function_ref<bool(OpenMPClauseKind)> CPred,
327  unsigned Level, bool NotLastprivate = false) const;
328 
329  /// Returns true if the directive at level \Level matches in the
330  /// specified \a DPred predicate.
331  bool hasExplicitDirective(
332  const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
333  unsigned Level) const;
334 
335  /// Finds a directive which matches specified \a DPred predicate.
336  bool hasDirective(
337  const llvm::function_ref<bool(
339  DPred,
340  bool FromParent) const;
341 
342  /// Returns currently analyzed directive.
343  OpenMPDirectiveKind getCurrentDirective() const {
344  return isStackEmpty() ? OMPD_unknown : Stack.back().first.back().Directive;
345  }
346  /// Returns directive kind at specified level.
347  OpenMPDirectiveKind getDirective(unsigned Level) const {
348  assert(!isStackEmpty() && "No directive at specified level.");
349  return Stack.back().first[Level].Directive;
350  }
351  /// Returns parent directive.
352  OpenMPDirectiveKind getParentDirective() const {
353  if (isStackEmpty() || Stack.back().first.size() == 1)
354  return OMPD_unknown;
355  return std::next(Stack.back().first.rbegin())->Directive;
356  }
357 
358  /// Add requires decl to internal vector
359  void addRequiresDecl(OMPRequiresDecl *RD) {
360  RequiresDecls.push_back(RD);
361  }
362 
363  /// Checks for a duplicate clause amongst previously declared requires
364  /// directives
365  bool hasDuplicateRequiresClause(ArrayRef<OMPClause *> ClauseList) const {
366  bool IsDuplicate = false;
367  for (OMPClause *CNew : ClauseList) {
368  for (const OMPRequiresDecl *D : RequiresDecls) {
369  for (const OMPClause *CPrev : D->clauselists()) {
370  if (CNew->getClauseKind() == CPrev->getClauseKind()) {
371  SemaRef.Diag(CNew->getBeginLoc(),
372  diag::err_omp_requires_clause_redeclaration)
373  << getOpenMPClauseName(CNew->getClauseKind());
374  SemaRef.Diag(CPrev->getBeginLoc(),
375  diag::note_omp_requires_previous_clause)
376  << getOpenMPClauseName(CPrev->getClauseKind());
377  IsDuplicate = true;
378  }
379  }
380  }
381  }
382  return IsDuplicate;
383  }
384 
385  /// Set default data sharing attribute to none.
386  void setDefaultDSANone(SourceLocation Loc) {
387  assert(!isStackEmpty());
388  Stack.back().first.back().DefaultAttr = DSA_none;
389  Stack.back().first.back().DefaultAttrLoc = Loc;
390  }
391  /// Set default data sharing attribute to shared.
392  void setDefaultDSAShared(SourceLocation Loc) {
393  assert(!isStackEmpty());
394  Stack.back().first.back().DefaultAttr = DSA_shared;
395  Stack.back().first.back().DefaultAttrLoc = Loc;
396  }
397  /// Set default data mapping attribute to 'tofrom:scalar'.
398  void setDefaultDMAToFromScalar(SourceLocation Loc) {
399  assert(!isStackEmpty());
400  Stack.back().first.back().DefaultMapAttr = DMA_tofrom_scalar;
401  Stack.back().first.back().DefaultMapAttrLoc = Loc;
402  }
403 
404  DefaultDataSharingAttributes getDefaultDSA() const {
405  return isStackEmpty() ? DSA_unspecified
406  : Stack.back().first.back().DefaultAttr;
407  }
408  SourceLocation getDefaultDSALocation() const {
409  return isStackEmpty() ? SourceLocation()
410  : Stack.back().first.back().DefaultAttrLoc;
411  }
412  DefaultMapAttributes getDefaultDMA() const {
413  return isStackEmpty() ? DMA_unspecified
414  : Stack.back().first.back().DefaultMapAttr;
415  }
416  DefaultMapAttributes getDefaultDMAAtLevel(unsigned Level) const {
417  return Stack.back().first[Level].DefaultMapAttr;
418  }
419  SourceLocation getDefaultDMALocation() const {
420  return isStackEmpty() ? SourceLocation()
421  : Stack.back().first.back().DefaultMapAttrLoc;
422  }
423 
424  /// Checks if the specified variable is a threadprivate.
425  bool isThreadPrivate(VarDecl *D) {
426  const DSAVarData DVar = getTopDSA(D, false);
427  return isOpenMPThreadPrivate(DVar.CKind);
428  }
429 
430  /// Marks current region as ordered (it has an 'ordered' clause).
431  void setOrderedRegion(bool IsOrdered, const Expr *Param,
432  OMPOrderedClause *Clause) {
433  assert(!isStackEmpty());
434  if (IsOrdered)
435  Stack.back().first.back().OrderedRegion.emplace(Param, Clause);
436  else
437  Stack.back().first.back().OrderedRegion.reset();
438  }
439  /// Returns true, if region is ordered (has associated 'ordered' clause),
440  /// false - otherwise.
441  bool isOrderedRegion() const {
442  if (isStackEmpty())
443  return false;
444  return Stack.back().first.rbegin()->OrderedRegion.hasValue();
445  }
446  /// Returns optional parameter for the ordered region.
447  std::pair<const Expr *, OMPOrderedClause *> getOrderedRegionParam() const {
448  if (isStackEmpty() ||
449  !Stack.back().first.rbegin()->OrderedRegion.hasValue())
450  return std::make_pair(nullptr, nullptr);
451  return Stack.back().first.rbegin()->OrderedRegion.getValue();
452  }
453  /// Returns true, if parent region is ordered (has associated
454  /// 'ordered' clause), false - otherwise.
455  bool isParentOrderedRegion() const {
456  if (isStackEmpty() || Stack.back().first.size() == 1)
457  return false;
458  return std::next(Stack.back().first.rbegin())->OrderedRegion.hasValue();
459  }
460  /// Returns optional parameter for the ordered region.
461  std::pair<const Expr *, OMPOrderedClause *>
462  getParentOrderedRegionParam() const {
463  if (isStackEmpty() || Stack.back().first.size() == 1 ||
464  !std::next(Stack.back().first.rbegin())->OrderedRegion.hasValue())
465  return std::make_pair(nullptr, nullptr);
466  return std::next(Stack.back().first.rbegin())->OrderedRegion.getValue();
467  }
468  /// Marks current region as nowait (it has a 'nowait' clause).
469  void setNowaitRegion(bool IsNowait = true) {
470  assert(!isStackEmpty());
471  Stack.back().first.back().NowaitRegion = IsNowait;
472  }
473  /// Returns true, if parent region is nowait (has associated
474  /// 'nowait' clause), false - otherwise.
475  bool isParentNowaitRegion() const {
476  if (isStackEmpty() || Stack.back().first.size() == 1)
477  return false;
478  return std::next(Stack.back().first.rbegin())->NowaitRegion;
479  }
480  /// Marks parent region as cancel region.
481  void setParentCancelRegion(bool Cancel = true) {
482  if (!isStackEmpty() && Stack.back().first.size() > 1) {
483  auto &StackElemRef = *std::next(Stack.back().first.rbegin());
484  StackElemRef.CancelRegion |= StackElemRef.CancelRegion || Cancel;
485  }
486  }
487  /// Return true if current region has inner cancel construct.
488  bool isCancelRegion() const {
489  return isStackEmpty() ? false : Stack.back().first.back().CancelRegion;
490  }
491 
492  /// Set collapse value for the region.
493  void setAssociatedLoops(unsigned Val) {
494  assert(!isStackEmpty());
495  Stack.back().first.back().AssociatedLoops = Val;
496  }
497  /// Return collapse value for region.
498  unsigned getAssociatedLoops() const {
499  return isStackEmpty() ? 0 : Stack.back().first.back().AssociatedLoops;
500  }
501 
502  /// Marks current target region as one with closely nested teams
503  /// region.
504  void setParentTeamsRegionLoc(SourceLocation TeamsRegionLoc) {
505  if (!isStackEmpty() && Stack.back().first.size() > 1) {
506  std::next(Stack.back().first.rbegin())->InnerTeamsRegionLoc =
507  TeamsRegionLoc;
508  }
509  }
510  /// Returns true, if current region has closely nested teams region.
511  bool hasInnerTeamsRegion() const {
512  return getInnerTeamsRegionLoc().isValid();
513  }
514  /// Returns location of the nested teams region (if any).
515  SourceLocation getInnerTeamsRegionLoc() const {
516  return isStackEmpty() ? SourceLocation()
517  : Stack.back().first.back().InnerTeamsRegionLoc;
518  }
519 
520  Scope *getCurScope() const {
521  return isStackEmpty() ? nullptr : Stack.back().first.back().CurScope;
522  }
523  SourceLocation getConstructLoc() const {
524  return isStackEmpty() ? SourceLocation()
525  : Stack.back().first.back().ConstructLoc;
526  }
527 
528  /// Do the check specified in \a Check to all component lists and return true
529  /// if any issue is found.
530  bool checkMappableExprComponentListsForDecl(
531  const ValueDecl *VD, bool CurrentRegionOnly,
532  const llvm::function_ref<
535  Check) const {
536  if (isStackEmpty())
537  return false;
538  auto SI = Stack.back().first.rbegin();
539  auto SE = Stack.back().first.rend();
540 
541  if (SI == SE)
542  return false;
543 
544  if (CurrentRegionOnly)
545  SE = std::next(SI);
546  else
547  std::advance(SI, 1);
548 
549  for (; SI != SE; ++SI) {
550  auto MI = SI->MappedExprComponents.find(VD);
551  if (MI != SI->MappedExprComponents.end())
553  MI->second.Components)
554  if (Check(L, MI->second.Kind))
555  return true;
556  }
557  return false;
558  }
559 
560  /// Do the check specified in \a Check to all component lists at a given level
561  /// and return true if any issue is found.
562  bool checkMappableExprComponentListsForDeclAtLevel(
563  const ValueDecl *VD, unsigned Level,
564  const llvm::function_ref<
567  Check) const {
568  if (isStackEmpty())
569  return false;
570 
571  auto StartI = Stack.back().first.begin();
572  auto EndI = Stack.back().first.end();
573  if (std::distance(StartI, EndI) <= (int)Level)
574  return false;
575  std::advance(StartI, Level);
576 
577  auto MI = StartI->MappedExprComponents.find(VD);
578  if (MI != StartI->MappedExprComponents.end())
580  MI->second.Components)
581  if (Check(L, MI->second.Kind))
582  return true;
583  return false;
584  }
585 
586  /// Create a new mappable expression component list associated with a given
587  /// declaration and initialize it with the provided list of components.
588  void addMappableExpressionComponents(
589  const ValueDecl *VD,
591  OpenMPClauseKind WhereFoundClauseKind) {
592  assert(!isStackEmpty() &&
593  "Not expecting to retrieve components from a empty stack!");
594  MappedExprComponentTy &MEC =
595  Stack.back().first.back().MappedExprComponents[VD];
596  // Create new entry and append the new components there.
597  MEC.Components.resize(MEC.Components.size() + 1);
598  MEC.Components.back().append(Components.begin(), Components.end());
599  MEC.Kind = WhereFoundClauseKind;
600  }
601 
602  unsigned getNestingLevel() const {
603  assert(!isStackEmpty());
604  return Stack.back().first.size() - 1;
605  }
606  void addDoacrossDependClause(OMPDependClause *C,
607  const OperatorOffsetTy &OpsOffs) {
608  assert(!isStackEmpty() && Stack.back().first.size() > 1);
609  SharingMapTy &StackElem = *std::next(Stack.back().first.rbegin());
610  assert(isOpenMPWorksharingDirective(StackElem.Directive));
611  StackElem.DoacrossDepends.try_emplace(C, OpsOffs);
612  }
613  llvm::iterator_range<DoacrossDependMapTy::const_iterator>
614  getDoacrossDependClauses() const {
615  assert(!isStackEmpty());
616  const SharingMapTy &StackElem = Stack.back().first.back();
617  if (isOpenMPWorksharingDirective(StackElem.Directive)) {
618  const DoacrossDependMapTy &Ref = StackElem.DoacrossDepends;
619  return llvm::make_range(Ref.begin(), Ref.end());
620  }
621  return llvm::make_range(StackElem.DoacrossDepends.end(),
622  StackElem.DoacrossDepends.end());
623  }
624 };
625 bool isParallelOrTaskRegion(OpenMPDirectiveKind DKind) {
626  return isOpenMPParallelDirective(DKind) || isOpenMPTaskingDirective(DKind) ||
627  isOpenMPTeamsDirective(DKind) || DKind == OMPD_unknown;
628 }
629 
630 } // namespace
631 
632 static const Expr *getExprAsWritten(const Expr *E) {
633  if (const auto *ExprTemp = dyn_cast<ExprWithCleanups>(E))
634  E = ExprTemp->getSubExpr();
635 
636  if (const auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E))
637  E = MTE->GetTemporaryExpr();
638 
639  while (const auto *Binder = dyn_cast<CXXBindTemporaryExpr>(E))
640  E = Binder->getSubExpr();
641 
642  if (const auto *ICE = dyn_cast<ImplicitCastExpr>(E))
643  E = ICE->getSubExprAsWritten();
644  return E->IgnoreParens();
645 }
646 
648  return const_cast<Expr *>(getExprAsWritten(const_cast<const Expr *>(E)));
649 }
650 
651 static const ValueDecl *getCanonicalDecl(const ValueDecl *D) {
652  if (const auto *CED = dyn_cast<OMPCapturedExprDecl>(D))
653  if (const auto *ME = dyn_cast<MemberExpr>(getExprAsWritten(CED->getInit())))
654  D = ME->getMemberDecl();
655  const auto *VD = dyn_cast<VarDecl>(D);
656  const auto *FD = dyn_cast<FieldDecl>(D);
657  if (VD != nullptr) {
658  VD = VD->getCanonicalDecl();
659  D = VD;
660  } else {
661  assert(FD);
662  FD = FD->getCanonicalDecl();
663  D = FD;
664  }
665  return D;
666 }
667 
669  return const_cast<ValueDecl *>(
670  getCanonicalDecl(const_cast<const ValueDecl *>(D)));
671 }
672 
673 DSAStackTy::DSAVarData DSAStackTy::getDSA(iterator &Iter,
674  ValueDecl *D) const {
675  D = getCanonicalDecl(D);
676  auto *VD = dyn_cast<VarDecl>(D);
677  const auto *FD = dyn_cast<FieldDecl>(D);
678  DSAVarData DVar;
679  if (isStackEmpty() || Iter == Stack.back().first.rend()) {
680  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
681  // in a region but not in construct]
682  // File-scope or namespace-scope variables referenced in called routines
683  // in the region are shared unless they appear in a threadprivate
684  // directive.
685  if (VD && !VD->isFunctionOrMethodVarDecl() && !isa<ParmVarDecl>(VD))
686  DVar.CKind = OMPC_shared;
687 
688  // OpenMP [2.9.1.2, Data-sharing Attribute Rules for Variables Referenced
689  // in a region but not in construct]
690  // Variables with static storage duration that are declared in called
691  // routines in the region are shared.
692  if (VD && VD->hasGlobalStorage())
693  DVar.CKind = OMPC_shared;
694 
695  // Non-static data members are shared by default.
696  if (FD)
697  DVar.CKind = OMPC_shared;
698 
699  return DVar;
700  }
701 
702  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
703  // in a Construct, C/C++, predetermined, p.1]
704  // Variables with automatic storage duration that are declared in a scope
705  // inside the construct are private.
706  if (VD && isOpenMPLocal(VD, Iter) && VD->isLocalVarDecl() &&
707  (VD->getStorageClass() == SC_Auto || VD->getStorageClass() == SC_None)) {
708  DVar.CKind = OMPC_private;
709  return DVar;
710  }
711 
712  DVar.DKind = Iter->Directive;
713  // Explicitly specified attributes and local variables with predetermined
714  // attributes.
715  if (Iter->SharingMap.count(D)) {
716  const DSAInfo &Data = Iter->SharingMap.lookup(D);
717  DVar.RefExpr = Data.RefExpr.getPointer();
718  DVar.PrivateCopy = Data.PrivateCopy;
719  DVar.CKind = Data.Attributes;
720  DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
721  return DVar;
722  }
723 
724  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
725  // in a Construct, C/C++, implicitly determined, p.1]
726  // In a parallel or task construct, the data-sharing attributes of these
727  // variables are determined by the default clause, if present.
728  switch (Iter->DefaultAttr) {
729  case DSA_shared:
730  DVar.CKind = OMPC_shared;
731  DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
732  return DVar;
733  case DSA_none:
734  return DVar;
735  case DSA_unspecified:
736  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
737  // in a Construct, implicitly determined, p.2]
738  // In a parallel construct, if no default clause is present, these
739  // variables are shared.
740  DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
741  if (isOpenMPParallelDirective(DVar.DKind) ||
742  isOpenMPTeamsDirective(DVar.DKind)) {
743  DVar.CKind = OMPC_shared;
744  return DVar;
745  }
746 
747  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
748  // in a Construct, implicitly determined, p.4]
749  // In a task construct, if no default clause is present, a variable that in
750  // the enclosing context is determined to be shared by all implicit tasks
751  // bound to the current team is shared.
752  if (isOpenMPTaskingDirective(DVar.DKind)) {
753  DSAVarData DVarTemp;
754  iterator I = Iter, E = Stack.back().first.rend();
755  do {
756  ++I;
757  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables
758  // Referenced in a Construct, implicitly determined, p.6]
759  // In a task construct, if no default clause is present, a variable
760  // whose data-sharing attribute is not determined by the rules above is
761  // firstprivate.
762  DVarTemp = getDSA(I, D);
763  if (DVarTemp.CKind != OMPC_shared) {
764  DVar.RefExpr = nullptr;
765  DVar.CKind = OMPC_firstprivate;
766  return DVar;
767  }
768  } while (I != E && !isParallelOrTaskRegion(I->Directive));
769  DVar.CKind =
770  (DVarTemp.CKind == OMPC_unknown) ? OMPC_firstprivate : OMPC_shared;
771  return DVar;
772  }
773  }
774  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
775  // in a Construct, implicitly determined, p.3]
776  // For constructs other than task, if no default clause is present, these
777  // variables inherit their data-sharing attributes from the enclosing
778  // context.
779  return getDSA(++Iter, D);
780 }
781 
782 const Expr *DSAStackTy::addUniqueAligned(const ValueDecl *D,
783  const Expr *NewDE) {
784  assert(!isStackEmpty() && "Data sharing attributes stack is empty");
785  D = getCanonicalDecl(D);
786  SharingMapTy &StackElem = Stack.back().first.back();
787  auto It = StackElem.AlignedMap.find(D);
788  if (It == StackElem.AlignedMap.end()) {
789  assert(NewDE && "Unexpected nullptr expr to be added into aligned map");
790  StackElem.AlignedMap[D] = NewDE;
791  return nullptr;
792  }
793  assert(It->second && "Unexpected nullptr expr in the aligned map");
794  return It->second;
795 }
796 
797 void DSAStackTy::addLoopControlVariable(const ValueDecl *D, VarDecl *Capture) {
798  assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
799  D = getCanonicalDecl(D);
800  SharingMapTy &StackElem = Stack.back().first.back();
801  StackElem.LCVMap.try_emplace(
802  D, LCDeclInfo(StackElem.LCVMap.size() + 1, Capture));
803 }
804 
805 const DSAStackTy::LCDeclInfo
806 DSAStackTy::isLoopControlVariable(const ValueDecl *D) const {
807  assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
808  D = getCanonicalDecl(D);
809  const SharingMapTy &StackElem = Stack.back().first.back();
810  auto It = StackElem.LCVMap.find(D);
811  if (It != StackElem.LCVMap.end())
812  return It->second;
813  return {0, nullptr};
814 }
815 
816 const DSAStackTy::LCDeclInfo
817 DSAStackTy::isParentLoopControlVariable(const ValueDecl *D) const {
818  assert(!isStackEmpty() && Stack.back().first.size() > 1 &&
819  "Data-sharing attributes stack is empty");
820  D = getCanonicalDecl(D);
821  const SharingMapTy &StackElem = *std::next(Stack.back().first.rbegin());
822  auto It = StackElem.LCVMap.find(D);
823  if (It != StackElem.LCVMap.end())
824  return It->second;
825  return {0, nullptr};
826 }
827 
828 const ValueDecl *DSAStackTy::getParentLoopControlVariable(unsigned I) const {
829  assert(!isStackEmpty() && Stack.back().first.size() > 1 &&
830  "Data-sharing attributes stack is empty");
831  const SharingMapTy &StackElem = *std::next(Stack.back().first.rbegin());
832  if (StackElem.LCVMap.size() < I)
833  return nullptr;
834  for (const auto &Pair : StackElem.LCVMap)
835  if (Pair.second.first == I)
836  return Pair.first;
837  return nullptr;
838 }
839 
840 void DSAStackTy::addDSA(const ValueDecl *D, const Expr *E, OpenMPClauseKind A,
841  DeclRefExpr *PrivateCopy) {
842  D = getCanonicalDecl(D);
843  if (A == OMPC_threadprivate) {
844  DSAInfo &Data = Threadprivates[D];
845  Data.Attributes = A;
846  Data.RefExpr.setPointer(E);
847  Data.PrivateCopy = nullptr;
848  } else {
849  assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
850  DSAInfo &Data = Stack.back().first.back().SharingMap[D];
851  assert(Data.Attributes == OMPC_unknown || (A == Data.Attributes) ||
852  (A == OMPC_firstprivate && Data.Attributes == OMPC_lastprivate) ||
853  (A == OMPC_lastprivate && Data.Attributes == OMPC_firstprivate) ||
854  (isLoopControlVariable(D).first && A == OMPC_private));
855  if (A == OMPC_lastprivate && Data.Attributes == OMPC_firstprivate) {
856  Data.RefExpr.setInt(/*IntVal=*/true);
857  return;
858  }
859  const bool IsLastprivate =
860  A == OMPC_lastprivate || Data.Attributes == OMPC_lastprivate;
861  Data.Attributes = A;
862  Data.RefExpr.setPointerAndInt(E, IsLastprivate);
863  Data.PrivateCopy = PrivateCopy;
864  if (PrivateCopy) {
865  DSAInfo &Data =
866  Stack.back().first.back().SharingMap[PrivateCopy->getDecl()];
867  Data.Attributes = A;
868  Data.RefExpr.setPointerAndInt(PrivateCopy, IsLastprivate);
869  Data.PrivateCopy = nullptr;
870  }
871  }
872 }
873 
874 /// Build a variable declaration for OpenMP loop iteration variable.
876  StringRef Name, const AttrVec *Attrs = nullptr,
877  DeclRefExpr *OrigRef = nullptr) {
878  DeclContext *DC = SemaRef.CurContext;
879  IdentifierInfo *II = &SemaRef.PP.getIdentifierTable().get(Name);
880  TypeSourceInfo *TInfo = SemaRef.Context.getTrivialTypeSourceInfo(Type, Loc);
881  auto *Decl =
882  VarDecl::Create(SemaRef.Context, DC, Loc, Loc, II, Type, TInfo, SC_None);
883  if (Attrs) {
884  for (specific_attr_iterator<AlignedAttr> I(Attrs->begin()), E(Attrs->end());
885  I != E; ++I)
886  Decl->addAttr(*I);
887  }
888  Decl->setImplicit();
889  if (OrigRef) {
890  Decl->addAttr(
891  OMPReferencedVarAttr::CreateImplicit(SemaRef.Context, OrigRef));
892  }
893  return Decl;
894 }
895 
897  SourceLocation Loc,
898  bool RefersToCapture = false) {
899  D->setReferenced();
900  D->markUsed(S.Context);
902  SourceLocation(), D, RefersToCapture, Loc, Ty,
903  VK_LValue);
904 }
905 
906 void DSAStackTy::addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
907  BinaryOperatorKind BOK) {
908  D = getCanonicalDecl(D);
909  assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
910  assert(
911  Stack.back().first.back().SharingMap[D].Attributes == OMPC_reduction &&
912  "Additional reduction info may be specified only for reduction items.");
913  ReductionData &ReductionData = Stack.back().first.back().ReductionMap[D];
914  assert(ReductionData.ReductionRange.isInvalid() &&
915  Stack.back().first.back().Directive == OMPD_taskgroup &&
916  "Additional reduction info may be specified only once for reduction "
917  "items.");
918  ReductionData.set(BOK, SR);
919  Expr *&TaskgroupReductionRef =
920  Stack.back().first.back().TaskgroupReductionRef;
921  if (!TaskgroupReductionRef) {
922  VarDecl *VD = buildVarDecl(SemaRef, SR.getBegin(),
923  SemaRef.Context.VoidPtrTy, ".task_red.");
924  TaskgroupReductionRef =
925  buildDeclRefExpr(SemaRef, VD, SemaRef.Context.VoidPtrTy, SR.getBegin());
926  }
927 }
928 
929 void DSAStackTy::addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
930  const Expr *ReductionRef) {
931  D = getCanonicalDecl(D);
932  assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
933  assert(
934  Stack.back().first.back().SharingMap[D].Attributes == OMPC_reduction &&
935  "Additional reduction info may be specified only for reduction items.");
936  ReductionData &ReductionData = Stack.back().first.back().ReductionMap[D];
937  assert(ReductionData.ReductionRange.isInvalid() &&
938  Stack.back().first.back().Directive == OMPD_taskgroup &&
939  "Additional reduction info may be specified only once for reduction "
940  "items.");
941  ReductionData.set(ReductionRef, SR);
942  Expr *&TaskgroupReductionRef =
943  Stack.back().first.back().TaskgroupReductionRef;
944  if (!TaskgroupReductionRef) {
945  VarDecl *VD = buildVarDecl(SemaRef, SR.getBegin(),
946  SemaRef.Context.VoidPtrTy, ".task_red.");
947  TaskgroupReductionRef =
948  buildDeclRefExpr(SemaRef, VD, SemaRef.Context.VoidPtrTy, SR.getBegin());
949  }
950 }
951 
952 const DSAStackTy::DSAVarData DSAStackTy::getTopMostTaskgroupReductionData(
953  const ValueDecl *D, SourceRange &SR, BinaryOperatorKind &BOK,
954  Expr *&TaskgroupDescriptor) const {
955  D = getCanonicalDecl(D);
956  assert(!isStackEmpty() && "Data-sharing attributes stack is empty.");
957  if (Stack.back().first.empty())
958  return DSAVarData();
959  for (iterator I = std::next(Stack.back().first.rbegin(), 1),
960  E = Stack.back().first.rend();
961  I != E; std::advance(I, 1)) {
962  const DSAInfo &Data = I->SharingMap.lookup(D);
963  if (Data.Attributes != OMPC_reduction || I->Directive != OMPD_taskgroup)
964  continue;
965  const ReductionData &ReductionData = I->ReductionMap.lookup(D);
966  if (!ReductionData.ReductionOp ||
967  ReductionData.ReductionOp.is<const Expr *>())
968  return DSAVarData();
969  SR = ReductionData.ReductionRange;
970  BOK = ReductionData.ReductionOp.get<ReductionData::BOKPtrType>();
971  assert(I->TaskgroupReductionRef && "taskgroup reduction reference "
972  "expression for the descriptor is not "
973  "set.");
974  TaskgroupDescriptor = I->TaskgroupReductionRef;
975  return DSAVarData(OMPD_taskgroup, OMPC_reduction, Data.RefExpr.getPointer(),
976  Data.PrivateCopy, I->DefaultAttrLoc);
977  }
978  return DSAVarData();
979 }
980 
981 const DSAStackTy::DSAVarData DSAStackTy::getTopMostTaskgroupReductionData(
982  const ValueDecl *D, SourceRange &SR, const Expr *&ReductionRef,
983  Expr *&TaskgroupDescriptor) const {
984  D = getCanonicalDecl(D);
985  assert(!isStackEmpty() && "Data-sharing attributes stack is empty.");
986  if (Stack.back().first.empty())
987  return DSAVarData();
988  for (iterator I = std::next(Stack.back().first.rbegin(), 1),
989  E = Stack.back().first.rend();
990  I != E; std::advance(I, 1)) {
991  const DSAInfo &Data = I->SharingMap.lookup(D);
992  if (Data.Attributes != OMPC_reduction || I->Directive != OMPD_taskgroup)
993  continue;
994  const ReductionData &ReductionData = I->ReductionMap.lookup(D);
995  if (!ReductionData.ReductionOp ||
996  !ReductionData.ReductionOp.is<const Expr *>())
997  return DSAVarData();
998  SR = ReductionData.ReductionRange;
999  ReductionRef = ReductionData.ReductionOp.get<const Expr *>();
1000  assert(I->TaskgroupReductionRef && "taskgroup reduction reference "
1001  "expression for the descriptor is not "
1002  "set.");
1003  TaskgroupDescriptor = I->TaskgroupReductionRef;
1004  return DSAVarData(OMPD_taskgroup, OMPC_reduction, Data.RefExpr.getPointer(),
1005  Data.PrivateCopy, I->DefaultAttrLoc);
1006  }
1007  return DSAVarData();
1008 }
1009 
1010 bool DSAStackTy::isOpenMPLocal(VarDecl *D, iterator Iter) const {
1011  D = D->getCanonicalDecl();
1012  if (!isStackEmpty()) {
1013  iterator I = Iter, E = Stack.back().first.rend();
1014  Scope *TopScope = nullptr;
1015  while (I != E && !isParallelOrTaskRegion(I->Directive) &&
1016  !isOpenMPTargetExecutionDirective(I->Directive))
1017  ++I;
1018  if (I == E)
1019  return false;
1020  TopScope = I->CurScope ? I->CurScope->getParent() : nullptr;
1021  Scope *CurScope = getCurScope();
1022  while (CurScope != TopScope && !CurScope->isDeclScope(D))
1023  CurScope = CurScope->getParent();
1024  return CurScope != TopScope;
1025  }
1026  return false;
1027 }
1028 
1029 const DSAStackTy::DSAVarData DSAStackTy::getTopDSA(ValueDecl *D,
1030  bool FromParent) {
1031  D = getCanonicalDecl(D);
1032  DSAVarData DVar;
1033 
1034  auto *VD = dyn_cast<VarDecl>(D);
1035  auto TI = Threadprivates.find(D);
1036  if (TI != Threadprivates.end()) {
1037  DVar.RefExpr = TI->getSecond().RefExpr.getPointer();
1038  DVar.CKind = OMPC_threadprivate;
1039  return DVar;
1040  }
1041  if (VD && VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
1042  DVar.RefExpr = buildDeclRefExpr(
1043  SemaRef, VD, D->getType().getNonReferenceType(),
1044  VD->getAttr<OMPThreadPrivateDeclAttr>()->getLocation());
1045  DVar.CKind = OMPC_threadprivate;
1046  addDSA(D, DVar.RefExpr, OMPC_threadprivate);
1047  return DVar;
1048  }
1049  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1050  // in a Construct, C/C++, predetermined, p.1]
1051  // Variables appearing in threadprivate directives are threadprivate.
1052  if ((VD && VD->getTLSKind() != VarDecl::TLS_None &&
1053  !(VD->hasAttr<OMPThreadPrivateDeclAttr>() &&
1054  SemaRef.getLangOpts().OpenMPUseTLS &&
1055  SemaRef.getASTContext().getTargetInfo().isTLSSupported())) ||
1056  (VD && VD->getStorageClass() == SC_Register &&
1057  VD->hasAttr<AsmLabelAttr>() && !VD->isLocalVarDecl())) {
1058  DVar.RefExpr = buildDeclRefExpr(
1059  SemaRef, VD, D->getType().getNonReferenceType(), D->getLocation());
1060  DVar.CKind = OMPC_threadprivate;
1061  addDSA(D, DVar.RefExpr, OMPC_threadprivate);
1062  return DVar;
1063  }
1064  if (SemaRef.getLangOpts().OpenMPCUDAMode && VD &&
1065  VD->isLocalVarDeclOrParm() && !isStackEmpty() &&
1066  !isLoopControlVariable(D).first) {
1067  iterator IterTarget =
1068  std::find_if(Stack.back().first.rbegin(), Stack.back().first.rend(),
1069  [](const SharingMapTy &Data) {
1070  return isOpenMPTargetExecutionDirective(Data.Directive);
1071  });
1072  if (IterTarget != Stack.back().first.rend()) {
1073  iterator ParentIterTarget = std::next(IterTarget, 1);
1074  for (iterator Iter = Stack.back().first.rbegin();
1075  Iter != ParentIterTarget; std::advance(Iter, 1)) {
1076  if (isOpenMPLocal(VD, Iter)) {
1077  DVar.RefExpr =
1078  buildDeclRefExpr(SemaRef, VD, D->getType().getNonReferenceType(),
1079  D->getLocation());
1080  DVar.CKind = OMPC_threadprivate;
1081  return DVar;
1082  }
1083  }
1084  if (!isClauseParsingMode() || IterTarget != Stack.back().first.rbegin()) {
1085  auto DSAIter = IterTarget->SharingMap.find(D);
1086  if (DSAIter != IterTarget->SharingMap.end() &&
1087  isOpenMPPrivate(DSAIter->getSecond().Attributes)) {
1088  DVar.RefExpr = DSAIter->getSecond().RefExpr.getPointer();
1089  DVar.CKind = OMPC_threadprivate;
1090  return DVar;
1091  }
1092  iterator End = Stack.back().first.rend();
1093  if (!SemaRef.isOpenMPCapturedByRef(
1094  D, std::distance(ParentIterTarget, End))) {
1095  DVar.RefExpr =
1096  buildDeclRefExpr(SemaRef, VD, D->getType().getNonReferenceType(),
1097  IterTarget->ConstructLoc);
1098  DVar.CKind = OMPC_threadprivate;
1099  return DVar;
1100  }
1101  }
1102  }
1103  }
1104 
1105  if (isStackEmpty())
1106  // Not in OpenMP execution region and top scope was already checked.
1107  return DVar;
1108 
1109  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1110  // in a Construct, C/C++, predetermined, p.4]
1111  // Static data members are shared.
1112  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1113  // in a Construct, C/C++, predetermined, p.7]
1114  // Variables with static storage duration that are declared in a scope
1115  // inside the construct are shared.
1116  auto &&MatchesAlways = [](OpenMPDirectiveKind) { return true; };
1117  if (VD && VD->isStaticDataMember()) {
1118  DSAVarData DVarTemp = hasDSA(D, isOpenMPPrivate, MatchesAlways, FromParent);
1119  if (DVarTemp.CKind != OMPC_unknown && DVarTemp.RefExpr)
1120  return DVar;
1121 
1122  DVar.CKind = OMPC_shared;
1123  return DVar;
1124  }
1125 
1127  bool IsConstant = Type.isConstant(SemaRef.getASTContext());
1128  Type = SemaRef.getASTContext().getBaseElementType(Type);
1129  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1130  // in a Construct, C/C++, predetermined, p.6]
1131  // Variables with const qualified type having no mutable member are
1132  // shared.
1133  const CXXRecordDecl *RD =
1134  SemaRef.getLangOpts().CPlusPlus ? Type->getAsCXXRecordDecl() : nullptr;
1135  if (const auto *CTSD = dyn_cast_or_null<ClassTemplateSpecializationDecl>(RD))
1136  if (const ClassTemplateDecl *CTD = CTSD->getSpecializedTemplate())
1137  RD = CTD->getTemplatedDecl();
1138  if (IsConstant &&
1139  !(SemaRef.getLangOpts().CPlusPlus && RD && RD->hasDefinition() &&
1140  RD->hasMutableFields())) {
1141  // Variables with const-qualified type having no mutable member may be
1142  // listed in a firstprivate clause, even if they are static data members.
1143  DSAVarData DVarTemp =
1144  hasDSA(D, [](OpenMPClauseKind C) { return C == OMPC_firstprivate; },
1145  MatchesAlways, FromParent);
1146  if (DVarTemp.CKind == OMPC_firstprivate && DVarTemp.RefExpr)
1147  return DVarTemp;
1148 
1149  DVar.CKind = OMPC_shared;
1150  return DVar;
1151  }
1152 
1153  // Explicitly specified attributes and local variables with predetermined
1154  // attributes.
1155  iterator I = Stack.back().first.rbegin();
1156  iterator EndI = Stack.back().first.rend();
1157  if (FromParent && I != EndI)
1158  std::advance(I, 1);
1159  auto It = I->SharingMap.find(D);
1160  if (It != I->SharingMap.end()) {
1161  const DSAInfo &Data = It->getSecond();
1162  DVar.RefExpr = Data.RefExpr.getPointer();
1163  DVar.PrivateCopy = Data.PrivateCopy;
1164  DVar.CKind = Data.Attributes;
1165  DVar.ImplicitDSALoc = I->DefaultAttrLoc;
1166  DVar.DKind = I->Directive;
1167  }
1168 
1169  return DVar;
1170 }
1171 
1172 const DSAStackTy::DSAVarData DSAStackTy::getImplicitDSA(ValueDecl *D,
1173  bool FromParent) const {
1174  if (isStackEmpty()) {
1175  iterator I;
1176  return getDSA(I, D);
1177  }
1178  D = getCanonicalDecl(D);
1179  iterator StartI = Stack.back().first.rbegin();
1180  iterator EndI = Stack.back().first.rend();
1181  if (FromParent && StartI != EndI)
1182  std::advance(StartI, 1);
1183  return getDSA(StartI, D);
1184 }
1185 
1186 const DSAStackTy::DSAVarData
1187 DSAStackTy::hasDSA(ValueDecl *D,
1188  const llvm::function_ref<bool(OpenMPClauseKind)> CPred,
1189  const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
1190  bool FromParent) const {
1191  if (isStackEmpty())
1192  return {};
1193  D = getCanonicalDecl(D);
1194  iterator I = Stack.back().first.rbegin();
1195  iterator EndI = Stack.back().first.rend();
1196  if (FromParent && I != EndI)
1197  std::advance(I, 1);
1198  for (; I != EndI; std::advance(I, 1)) {
1199  if (!DPred(I->Directive) && !isParallelOrTaskRegion(I->Directive))
1200  continue;
1201  iterator NewI = I;
1202  DSAVarData DVar = getDSA(NewI, D);
1203  if (I == NewI && CPred(DVar.CKind))
1204  return DVar;
1205  }
1206  return {};
1207 }
1208 
1209 const DSAStackTy::DSAVarData DSAStackTy::hasInnermostDSA(
1210  ValueDecl *D, const llvm::function_ref<bool(OpenMPClauseKind)> CPred,
1211  const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
1212  bool FromParent) const {
1213  if (isStackEmpty())
1214  return {};
1215  D = getCanonicalDecl(D);
1216  iterator StartI = Stack.back().first.rbegin();
1217  iterator EndI = Stack.back().first.rend();
1218  if (FromParent && StartI != EndI)
1219  std::advance(StartI, 1);
1220  if (StartI == EndI || !DPred(StartI->Directive))
1221  return {};
1222  iterator NewI = StartI;
1223  DSAVarData DVar = getDSA(NewI, D);
1224  return (NewI == StartI && CPred(DVar.CKind)) ? DVar : DSAVarData();
1225 }
1226 
1227 bool DSAStackTy::hasExplicitDSA(
1228  const ValueDecl *D, const llvm::function_ref<bool(OpenMPClauseKind)> CPred,
1229  unsigned Level, bool NotLastprivate) const {
1230  if (isStackEmpty())
1231  return false;
1232  D = getCanonicalDecl(D);
1233  auto StartI = Stack.back().first.begin();
1234  auto EndI = Stack.back().first.end();
1235  if (std::distance(StartI, EndI) <= (int)Level)
1236  return false;
1237  std::advance(StartI, Level);
1238  auto I = StartI->SharingMap.find(D);
1239  return (I != StartI->SharingMap.end()) &&
1240  I->getSecond().RefExpr.getPointer() &&
1241  CPred(I->getSecond().Attributes) &&
1242  (!NotLastprivate || !I->getSecond().RefExpr.getInt());
1243 }
1244 
1245 bool DSAStackTy::hasExplicitDirective(
1246  const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
1247  unsigned Level) const {
1248  if (isStackEmpty())
1249  return false;
1250  auto StartI = Stack.back().first.begin();
1251  auto EndI = Stack.back().first.end();
1252  if (std::distance(StartI, EndI) <= (int)Level)
1253  return false;
1254  std::advance(StartI, Level);
1255  return DPred(StartI->Directive);
1256 }
1257 
1258 bool DSAStackTy::hasDirective(
1259  const llvm::function_ref<bool(OpenMPDirectiveKind,
1261  DPred,
1262  bool FromParent) const {
1263  // We look only in the enclosing region.
1264  if (isStackEmpty())
1265  return false;
1266  auto StartI = std::next(Stack.back().first.rbegin());
1267  auto EndI = Stack.back().first.rend();
1268  if (FromParent && StartI != EndI)
1269  StartI = std::next(StartI);
1270  for (auto I = StartI, EE = EndI; I != EE; ++I) {
1271  if (DPred(I->Directive, I->DirectiveName, I->ConstructLoc))
1272  return true;
1273  }
1274  return false;
1275 }
1276 
1277 void Sema::InitDataSharingAttributesStack() {
1278  VarDataSharingAttributesStack = new DSAStackTy(*this);
1279 }
1280 
1281 #define DSAStack static_cast<DSAStackTy *>(VarDataSharingAttributesStack)
1282 
1283 void Sema::pushOpenMPFunctionRegion() {
1284  DSAStack->pushFunction();
1285 }
1286 
1287 void Sema::popOpenMPFunctionRegion(const FunctionScopeInfo *OldFSI) {
1288  DSAStack->popFunction(OldFSI);
1289 }
1290 
1291 bool Sema::isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level) const {
1292  assert(LangOpts.OpenMP && "OpenMP is not allowed");
1293 
1294  ASTContext &Ctx = getASTContext();
1295  bool IsByRef = true;
1296 
1297  // Find the directive that is associated with the provided scope.
1298  D = cast<ValueDecl>(D->getCanonicalDecl());
1299  QualType Ty = D->getType();
1300 
1301  if (DSAStack->hasExplicitDirective(isOpenMPTargetExecutionDirective, Level)) {
1302  // This table summarizes how a given variable should be passed to the device
1303  // given its type and the clauses where it appears. This table is based on
1304  // the description in OpenMP 4.5 [2.10.4, target Construct] and
1305  // OpenMP 4.5 [2.15.5, Data-mapping Attribute Rules and Clauses].
1306  //
1307  // =========================================================================
1308  // | type | defaultmap | pvt | first | is_device_ptr | map | res. |
1309  // | |(tofrom:scalar)| | pvt | | | |
1310  // =========================================================================
1311  // | scl | | | | - | | bycopy|
1312  // | scl | | - | x | - | - | bycopy|
1313  // | scl | | x | - | - | - | null |
1314  // | scl | x | | | - | | byref |
1315  // | scl | x | - | x | - | - | bycopy|
1316  // | scl | x | x | - | - | - | null |
1317  // | scl | | - | - | - | x | byref |
1318  // | scl | x | - | - | - | x | byref |
1319  //
1320  // | agg | n.a. | | | - | | byref |
1321  // | agg | n.a. | - | x | - | - | byref |
1322  // | agg | n.a. | x | - | - | - | null |
1323  // | agg | n.a. | - | - | - | x | byref |
1324  // | agg | n.a. | - | - | - | x[] | byref |
1325  //
1326  // | ptr | n.a. | | | - | | bycopy|
1327  // | ptr | n.a. | - | x | - | - | bycopy|
1328  // | ptr | n.a. | x | - | - | - | null |
1329  // | ptr | n.a. | - | - | - | x | byref |
1330  // | ptr | n.a. | - | - | - | x[] | bycopy|
1331  // | ptr | n.a. | - | - | x | | bycopy|
1332  // | ptr | n.a. | - | - | x | x | bycopy|
1333  // | ptr | n.a. | - | - | x | x[] | bycopy|
1334  // =========================================================================
1335  // Legend:
1336  // scl - scalar
1337  // ptr - pointer
1338  // agg - aggregate
1339  // x - applies
1340  // - - invalid in this combination
1341  // [] - mapped with an array section
1342  // byref - should be mapped by reference
1343  // byval - should be mapped by value
1344  // null - initialize a local variable to null on the device
1345  //
1346  // Observations:
1347  // - All scalar declarations that show up in a map clause have to be passed
1348  // by reference, because they may have been mapped in the enclosing data
1349  // environment.
1350  // - If the scalar value does not fit the size of uintptr, it has to be
1351  // passed by reference, regardless the result in the table above.
1352  // - For pointers mapped by value that have either an implicit map or an
1353  // array section, the runtime library may pass the NULL value to the
1354  // device instead of the value passed to it by the compiler.
1355 
1356  if (Ty->isReferenceType())
1357  Ty = Ty->castAs<ReferenceType>()->getPointeeType();
1358 
1359  // Locate map clauses and see if the variable being captured is referred to
1360  // in any of those clauses. Here we only care about variables, not fields,
1361  // because fields are part of aggregates.
1362  bool IsVariableUsedInMapClause = false;
1363  bool IsVariableAssociatedWithSection = false;
1364 
1365  DSAStack->checkMappableExprComponentListsForDeclAtLevel(
1366  D, Level,
1367  [&IsVariableUsedInMapClause, &IsVariableAssociatedWithSection, D](
1369  MapExprComponents,
1370  OpenMPClauseKind WhereFoundClauseKind) {
1371  // Only the map clause information influences how a variable is
1372  // captured. E.g. is_device_ptr does not require changing the default
1373  // behavior.
1374  if (WhereFoundClauseKind != OMPC_map)
1375  return false;
1376 
1377  auto EI = MapExprComponents.rbegin();
1378  auto EE = MapExprComponents.rend();
1379 
1380  assert(EI != EE && "Invalid map expression!");
1381 
1382  if (isa<DeclRefExpr>(EI->getAssociatedExpression()))
1383  IsVariableUsedInMapClause |= EI->getAssociatedDeclaration() == D;
1384 
1385  ++EI;
1386  if (EI == EE)
1387  return false;
1388 
1389  if (isa<ArraySubscriptExpr>(EI->getAssociatedExpression()) ||
1390  isa<OMPArraySectionExpr>(EI->getAssociatedExpression()) ||
1391  isa<MemberExpr>(EI->getAssociatedExpression())) {
1392  IsVariableAssociatedWithSection = true;
1393  // There is nothing more we need to know about this variable.
1394  return true;
1395  }
1396 
1397  // Keep looking for more map info.
1398  return false;
1399  });
1400 
1401  if (IsVariableUsedInMapClause) {
1402  // If variable is identified in a map clause it is always captured by
1403  // reference except if it is a pointer that is dereferenced somehow.
1404  IsByRef = !(Ty->isPointerType() && IsVariableAssociatedWithSection);
1405  } else {
1406  // By default, all the data that has a scalar type is mapped by copy
1407  // (except for reduction variables).
1408  IsByRef =
1409  !Ty->isScalarType() ||
1410  DSAStack->getDefaultDMAAtLevel(Level) == DMA_tofrom_scalar ||
1411  DSAStack->hasExplicitDSA(
1412  D, [](OpenMPClauseKind K) { return K == OMPC_reduction; }, Level);
1413  }
1414  }
1415 
1416  if (IsByRef && Ty.getNonReferenceType()->isScalarType()) {
1417  IsByRef =
1418  !DSAStack->hasExplicitDSA(
1419  D,
1420  [](OpenMPClauseKind K) -> bool { return K == OMPC_firstprivate; },
1421  Level, /*NotLastprivate=*/true) &&
1422  // If the variable is artificial and must be captured by value - try to
1423  // capture by value.
1424  !(isa<OMPCapturedExprDecl>(D) && !D->hasAttr<OMPCaptureNoInitAttr>() &&
1425  !cast<OMPCapturedExprDecl>(D)->getInit()->isGLValue());
1426  }
1427 
1428  // When passing data by copy, we need to make sure it fits the uintptr size
1429  // and alignment, because the runtime library only deals with uintptr types.
1430  // If it does not fit the uintptr size, we need to pass the data by reference
1431  // instead.
1432  if (!IsByRef &&
1433  (Ctx.getTypeSizeInChars(Ty) >
1434  Ctx.getTypeSizeInChars(Ctx.getUIntPtrType()) ||
1435  Ctx.getDeclAlign(D) > Ctx.getTypeAlignInChars(Ctx.getUIntPtrType()))) {
1436  IsByRef = true;
1437  }
1438 
1439  return IsByRef;
1440 }
1441 
1442 unsigned Sema::getOpenMPNestingLevel() const {
1443  assert(getLangOpts().OpenMP);
1444  return DSAStack->getNestingLevel();
1445 }
1446 
1448  return (isOpenMPTargetExecutionDirective(DSAStack->getCurrentDirective()) &&
1449  !DSAStack->isClauseParsingMode()) ||
1450  DSAStack->hasDirective(
1452  SourceLocation) -> bool {
1453  return isOpenMPTargetExecutionDirective(K);
1454  },
1455  false);
1456 }
1457 
1459  assert(LangOpts.OpenMP && "OpenMP is not allowed");
1460  D = getCanonicalDecl(D);
1461 
1462  // If we are attempting to capture a global variable in a directive with
1463  // 'target' we return true so that this global is also mapped to the device.
1464  //
1465  auto *VD = dyn_cast<VarDecl>(D);
1466  if (VD && !VD->hasLocalStorage()) {
1467  if (isInOpenMPDeclareTargetContext() &&
1468  (getCurCapturedRegion() || getCurBlock() || getCurLambda())) {
1469  // Try to mark variable as declare target if it is used in capturing
1470  // regions.
1471  if (!OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
1472  checkDeclIsAllowedInOpenMPTarget(nullptr, VD);
1473  return nullptr;
1474  } else if (isInOpenMPTargetExecutionDirective()) {
1475  // If the declaration is enclosed in a 'declare target' directive,
1476  // then it should not be captured.
1477  //
1478  if (OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
1479  return nullptr;
1480  return VD;
1481  }
1482  }
1483 
1484  if (DSAStack->getCurrentDirective() != OMPD_unknown &&
1485  (!DSAStack->isClauseParsingMode() ||
1486  DSAStack->getParentDirective() != OMPD_unknown)) {
1487  auto &&Info = DSAStack->isLoopControlVariable(D);
1488  if (Info.first ||
1489  (VD && VD->hasLocalStorage() &&
1490  isParallelOrTaskRegion(DSAStack->getCurrentDirective())) ||
1491  (VD && DSAStack->isForceVarCapturing()))
1492  return VD ? VD : Info.second;
1493  DSAStackTy::DSAVarData DVarPrivate =
1494  DSAStack->getTopDSA(D, DSAStack->isClauseParsingMode());
1495  if (DVarPrivate.CKind != OMPC_unknown && isOpenMPPrivate(DVarPrivate.CKind))
1496  return VD ? VD : cast<VarDecl>(DVarPrivate.PrivateCopy->getDecl());
1497  DVarPrivate = DSAStack->hasDSA(D, isOpenMPPrivate,
1498  [](OpenMPDirectiveKind) { return true; },
1499  DSAStack->isClauseParsingMode());
1500  if (DVarPrivate.CKind != OMPC_unknown)
1501  return VD ? VD : cast<VarDecl>(DVarPrivate.PrivateCopy->getDecl());
1502  }
1503  return nullptr;
1504 }
1505 
1506 void Sema::adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
1507  unsigned Level) const {
1509  getOpenMPCaptureRegions(Regions, DSAStack->getDirective(Level));
1510  FunctionScopesIndex -= Regions.size();
1511 }
1512 
1513 bool Sema::isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const {
1514  assert(LangOpts.OpenMP && "OpenMP is not allowed");
1515  return DSAStack->hasExplicitDSA(
1516  D, [](OpenMPClauseKind K) { return K == OMPC_private; }, Level) ||
1517  (DSAStack->isClauseParsingMode() &&
1518  DSAStack->getClauseParsingMode() == OMPC_private) ||
1519  // Consider taskgroup reduction descriptor variable a private to avoid
1520  // possible capture in the region.
1521  (DSAStack->hasExplicitDirective(
1522  [](OpenMPDirectiveKind K) { return K == OMPD_taskgroup; },
1523  Level) &&
1524  DSAStack->isTaskgroupReductionRef(D, Level));
1525 }
1526 
1528  unsigned Level) {
1529  assert(LangOpts.OpenMP && "OpenMP is not allowed");
1530  D = getCanonicalDecl(D);
1532  for (unsigned I = DSAStack->getNestingLevel() + 1; I > Level; --I) {
1533  const unsigned NewLevel = I - 1;
1534  if (DSAStack->hasExplicitDSA(D,
1535  [&OMPC](const OpenMPClauseKind K) {
1536  if (isOpenMPPrivate(K)) {
1537  OMPC = K;
1538  return true;
1539  }
1540  return false;
1541  },
1542  NewLevel))
1543  break;
1544  if (DSAStack->checkMappableExprComponentListsForDeclAtLevel(
1545  D, NewLevel,
1547  OpenMPClauseKind) { return true; })) {
1548  OMPC = OMPC_map;
1549  break;
1550  }
1551  if (DSAStack->hasExplicitDirective(isOpenMPTargetExecutionDirective,
1552  NewLevel)) {
1553  OMPC = OMPC_map;
1554  if (D->getType()->isScalarType() &&
1555  DSAStack->getDefaultDMAAtLevel(NewLevel) !=
1556  DefaultMapAttributes::DMA_tofrom_scalar)
1557  OMPC = OMPC_firstprivate;
1558  break;
1559  }
1560  }
1561  if (OMPC != OMPC_unknown)
1562  FD->addAttr(OMPCaptureKindAttr::CreateImplicit(Context, OMPC));
1563 }
1564 
1566  unsigned Level) const {
1567  assert(LangOpts.OpenMP && "OpenMP is not allowed");
1568  // Return true if the current level is no longer enclosed in a target region.
1569 
1570  const auto *VD = dyn_cast<VarDecl>(D);
1571  return VD && !VD->hasLocalStorage() &&
1572  DSAStack->hasExplicitDirective(isOpenMPTargetExecutionDirective,
1573  Level);
1574 }
1575 
1576 void Sema::DestroyDataSharingAttributesStack() { delete DSAStack; }
1577 
1579  const DeclarationNameInfo &DirName,
1580  Scope *CurScope, SourceLocation Loc) {
1581  DSAStack->push(DKind, DirName, CurScope, Loc);
1582  PushExpressionEvaluationContext(
1583  ExpressionEvaluationContext::PotentiallyEvaluated);
1584 }
1585 
1587  DSAStack->setClauseParsingMode(K);
1588 }
1589 
1591  DSAStack->setClauseParsingMode(/*K=*/OMPC_unknown);
1592 }
1593 
1594 void Sema::EndOpenMPDSABlock(Stmt *CurDirective) {
1595  // OpenMP [2.14.3.5, Restrictions, C/C++, p.1]
1596  // A variable of class type (or array thereof) that appears in a lastprivate
1597  // clause requires an accessible, unambiguous default constructor for the
1598  // class type, unless the list item is also specified in a firstprivate
1599  // clause.
1600  if (const auto *D = dyn_cast_or_null<OMPExecutableDirective>(CurDirective)) {
1601  for (OMPClause *C : D->clauses()) {
1602  if (auto *Clause = dyn_cast<OMPLastprivateClause>(C)) {
1603  SmallVector<Expr *, 8> PrivateCopies;
1604  for (Expr *DE : Clause->varlists()) {
1605  if (DE->isValueDependent() || DE->isTypeDependent()) {
1606  PrivateCopies.push_back(nullptr);
1607  continue;
1608  }
1609  auto *DRE = cast<DeclRefExpr>(DE->IgnoreParens());
1610  auto *VD = cast<VarDecl>(DRE->getDecl());
1611  QualType Type = VD->getType().getNonReferenceType();
1612  const DSAStackTy::DSAVarData DVar =
1613  DSAStack->getTopDSA(VD, /*FromParent=*/false);
1614  if (DVar.CKind == OMPC_lastprivate) {
1615  // Generate helper private variable and initialize it with the
1616  // default value. The address of the original variable is replaced
1617  // by the address of the new private variable in CodeGen. This new
1618  // variable is not added to IdResolver, so the code in the OpenMP
1619  // region uses original variable for proper diagnostics.
1620  VarDecl *VDPrivate = buildVarDecl(
1621  *this, DE->getExprLoc(), Type.getUnqualifiedType(),
1622  VD->getName(), VD->hasAttrs() ? &VD->getAttrs() : nullptr, DRE);
1623  ActOnUninitializedDecl(VDPrivate);
1624  if (VDPrivate->isInvalidDecl())
1625  continue;
1626  PrivateCopies.push_back(buildDeclRefExpr(
1627  *this, VDPrivate, DE->getType(), DE->getExprLoc()));
1628  } else {
1629  // The variable is also a firstprivate, so initialization sequence
1630  // for private copy is generated already.
1631  PrivateCopies.push_back(nullptr);
1632  }
1633  }
1634  // Set initializers to private copies if no errors were found.
1635  if (PrivateCopies.size() == Clause->varlist_size())
1636  Clause->setPrivateCopies(PrivateCopies);
1637  }
1638  }
1639  }
1640 
1641  DSAStack->pop();
1642  DiscardCleanupsInEvaluationContext();
1643  PopExpressionEvaluationContext();
1644 }
1645 
1646 static bool FinishOpenMPLinearClause(OMPLinearClause &Clause, DeclRefExpr *IV,
1647  Expr *NumIterations, Sema &SemaRef,
1648  Scope *S, DSAStackTy *Stack);
1649 
1650 namespace {
1651 
1652 class VarDeclFilterCCC final : public CorrectionCandidateCallback {
1653 private:
1654  Sema &SemaRef;
1655 
1656 public:
1657  explicit VarDeclFilterCCC(Sema &S) : SemaRef(S) {}
1658  bool ValidateCandidate(const TypoCorrection &Candidate) override {
1659  NamedDecl *ND = Candidate.getCorrectionDecl();
1660  if (const auto *VD = dyn_cast_or_null<VarDecl>(ND)) {
1661  return VD->hasGlobalStorage() &&
1662  SemaRef.isDeclInScope(ND, SemaRef.getCurLexicalContext(),
1663  SemaRef.getCurScope());
1664  }
1665  return false;
1666  }
1667 };
1668 
1669 class VarOrFuncDeclFilterCCC final : public CorrectionCandidateCallback {
1670 private:
1671  Sema &SemaRef;
1672 
1673 public:
1674  explicit VarOrFuncDeclFilterCCC(Sema &S) : SemaRef(S) {}
1675  bool ValidateCandidate(const TypoCorrection &Candidate) override {
1676  NamedDecl *ND = Candidate.getCorrectionDecl();
1677  if (ND && (isa<VarDecl>(ND) || isa<FunctionDecl>(ND))) {
1678  return SemaRef.isDeclInScope(ND, SemaRef.getCurLexicalContext(),
1679  SemaRef.getCurScope());
1680  }
1681  return false;
1682  }
1683 };
1684 
1685 } // namespace
1686 
1688  CXXScopeSpec &ScopeSpec,
1689  const DeclarationNameInfo &Id) {
1690  LookupResult Lookup(*this, Id, LookupOrdinaryName);
1691  LookupParsedName(Lookup, CurScope, &ScopeSpec, true);
1692 
1693  if (Lookup.isAmbiguous())
1694  return ExprError();
1695 
1696  VarDecl *VD;
1697  if (!Lookup.isSingleResult()) {
1698  if (TypoCorrection Corrected = CorrectTypo(
1699  Id, LookupOrdinaryName, CurScope, nullptr,
1700  llvm::make_unique<VarDeclFilterCCC>(*this), CTK_ErrorRecovery)) {
1701  diagnoseTypo(Corrected,
1702  PDiag(Lookup.empty()
1703  ? diag::err_undeclared_var_use_suggest
1704  : diag::err_omp_expected_var_arg_suggest)
1705  << Id.getName());
1706  VD = Corrected.getCorrectionDeclAs<VarDecl>();
1707  } else {
1708  Diag(Id.getLoc(), Lookup.empty() ? diag::err_undeclared_var_use
1709  : diag::err_omp_expected_var_arg)
1710  << Id.getName();
1711  return ExprError();
1712  }
1713  } else if (!(VD = Lookup.getAsSingle<VarDecl>())) {
1714  Diag(Id.getLoc(), diag::err_omp_expected_var_arg) << Id.getName();
1715  Diag(Lookup.getFoundDecl()->getLocation(), diag::note_declared_at);
1716  return ExprError();
1717  }
1718  Lookup.suppressDiagnostics();
1719 
1720  // OpenMP [2.9.2, Syntax, C/C++]
1721  // Variables must be file-scope, namespace-scope, or static block-scope.
1722  if (!VD->hasGlobalStorage()) {
1723  Diag(Id.getLoc(), diag::err_omp_global_var_arg)
1724  << getOpenMPDirectiveName(OMPD_threadprivate) << !VD->isStaticLocal();
1725  bool IsDecl =
1727  Diag(VD->getLocation(),
1728  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
1729  << VD;
1730  return ExprError();
1731  }
1732 
1733  VarDecl *CanonicalVD = VD->getCanonicalDecl();
1734  NamedDecl *ND = CanonicalVD;
1735  // OpenMP [2.9.2, Restrictions, C/C++, p.2]
1736  // A threadprivate directive for file-scope variables must appear outside
1737  // any definition or declaration.
1738  if (CanonicalVD->getDeclContext()->isTranslationUnit() &&
1739  !getCurLexicalContext()->isTranslationUnit()) {
1740  Diag(Id.getLoc(), diag::err_omp_var_scope)
1741  << getOpenMPDirectiveName(OMPD_threadprivate) << VD;
1742  bool IsDecl =
1744  Diag(VD->getLocation(),
1745  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
1746  << VD;
1747  return ExprError();
1748  }
1749  // OpenMP [2.9.2, Restrictions, C/C++, p.3]
1750  // A threadprivate directive for static class member variables must appear
1751  // in the class definition, in the same scope in which the member
1752  // variables are declared.
1753  if (CanonicalVD->isStaticDataMember() &&
1754  !CanonicalVD->getDeclContext()->Equals(getCurLexicalContext())) {
1755  Diag(Id.getLoc(), diag::err_omp_var_scope)
1756  << getOpenMPDirectiveName(OMPD_threadprivate) << VD;
1757  bool IsDecl =
1759  Diag(VD->getLocation(),
1760  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
1761  << VD;
1762  return ExprError();
1763  }
1764  // OpenMP [2.9.2, Restrictions, C/C++, p.4]
1765  // A threadprivate directive for namespace-scope variables must appear
1766  // outside any definition or declaration other than the namespace
1767  // definition itself.
1768  if (CanonicalVD->getDeclContext()->isNamespace() &&
1769  (!getCurLexicalContext()->isFileContext() ||
1770  !getCurLexicalContext()->Encloses(CanonicalVD->getDeclContext()))) {
1771  Diag(Id.getLoc(), diag::err_omp_var_scope)
1772  << getOpenMPDirectiveName(OMPD_threadprivate) << VD;
1773  bool IsDecl =
1775  Diag(VD->getLocation(),
1776  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
1777  << VD;
1778  return ExprError();
1779  }
1780  // OpenMP [2.9.2, Restrictions, C/C++, p.6]
1781  // A threadprivate directive for static block-scope variables must appear
1782  // in the scope of the variable and not in a nested scope.
1783  if (CanonicalVD->isStaticLocal() && CurScope &&
1784  !isDeclInScope(ND, getCurLexicalContext(), CurScope)) {
1785  Diag(Id.getLoc(), diag::err_omp_var_scope)
1786  << getOpenMPDirectiveName(OMPD_threadprivate) << VD;
1787  bool IsDecl =
1789  Diag(VD->getLocation(),
1790  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
1791  << VD;
1792  return ExprError();
1793  }
1794 
1795  // OpenMP [2.9.2, Restrictions, C/C++, p.2-6]
1796  // A threadprivate directive must lexically precede all references to any
1797  // of the variables in its list.
1798  if (VD->isUsed() && !DSAStack->isThreadPrivate(VD)) {
1799  Diag(Id.getLoc(), diag::err_omp_var_used)
1800  << getOpenMPDirectiveName(OMPD_threadprivate) << VD;
1801  return ExprError();
1802  }
1803 
1804  QualType ExprType = VD->getType().getNonReferenceType();
1805  return DeclRefExpr::Create(Context, NestedNameSpecifierLoc(),
1806  SourceLocation(), VD,
1807  /*RefersToEnclosingVariableOrCapture=*/false,
1808  Id.getLoc(), ExprType, VK_LValue);
1809 }
1810 
1813  ArrayRef<Expr *> VarList) {
1814  if (OMPThreadPrivateDecl *D = CheckOMPThreadPrivateDecl(Loc, VarList)) {
1815  CurContext->addDecl(D);
1816  return DeclGroupPtrTy::make(DeclGroupRef(D));
1817  }
1818  return nullptr;
1819 }
1820 
1821 namespace {
1822 class LocalVarRefChecker final
1823  : public ConstStmtVisitor<LocalVarRefChecker, bool> {
1824  Sema &SemaRef;
1825 
1826 public:
1827  bool VisitDeclRefExpr(const DeclRefExpr *E) {
1828  if (const auto *VD = dyn_cast<VarDecl>(E->getDecl())) {
1829  if (VD->hasLocalStorage()) {
1830  SemaRef.Diag(E->getBeginLoc(),
1831  diag::err_omp_local_var_in_threadprivate_init)
1832  << E->getSourceRange();
1833  SemaRef.Diag(VD->getLocation(), diag::note_defined_here)
1834  << VD << VD->getSourceRange();
1835  return true;
1836  }
1837  }
1838  return false;
1839  }
1840  bool VisitStmt(const Stmt *S) {
1841  for (const Stmt *Child : S->children()) {
1842  if (Child && Visit(Child))
1843  return true;
1844  }
1845  return false;
1846  }
1847  explicit LocalVarRefChecker(Sema &SemaRef) : SemaRef(SemaRef) {}
1848 };
1849 } // namespace
1850 
1854  for (Expr *RefExpr : VarList) {
1855  auto *DE = cast<DeclRefExpr>(RefExpr);
1856  auto *VD = cast<VarDecl>(DE->getDecl());
1857  SourceLocation ILoc = DE->getExprLoc();
1858 
1859  // Mark variable as used.
1860  VD->setReferenced();
1861  VD->markUsed(Context);
1862 
1863  QualType QType = VD->getType();
1864  if (QType->isDependentType() || QType->isInstantiationDependentType()) {
1865  // It will be analyzed later.
1866  Vars.push_back(DE);
1867  continue;
1868  }
1869 
1870  // OpenMP [2.9.2, Restrictions, C/C++, p.10]
1871  // A threadprivate variable must not have an incomplete type.
1872  if (RequireCompleteType(ILoc, VD->getType(),
1873  diag::err_omp_threadprivate_incomplete_type)) {
1874  continue;
1875  }
1876 
1877  // OpenMP [2.9.2, Restrictions, C/C++, p.10]
1878  // A threadprivate variable must not have a reference type.
1879  if (VD->getType()->isReferenceType()) {
1880  Diag(ILoc, diag::err_omp_ref_type_arg)
1881  << getOpenMPDirectiveName(OMPD_threadprivate) << VD->getType();
1882  bool IsDecl =
1883  VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
1884  Diag(VD->getLocation(),
1885  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
1886  << VD;
1887  continue;
1888  }
1889 
1890  // Check if this is a TLS variable. If TLS is not being supported, produce
1891  // the corresponding diagnostic.
1892  if ((VD->getTLSKind() != VarDecl::TLS_None &&
1893  !(VD->hasAttr<OMPThreadPrivateDeclAttr>() &&
1894  getLangOpts().OpenMPUseTLS &&
1895  getASTContext().getTargetInfo().isTLSSupported())) ||
1896  (VD->getStorageClass() == SC_Register && VD->hasAttr<AsmLabelAttr>() &&
1897  !VD->isLocalVarDecl())) {
1898  Diag(ILoc, diag::err_omp_var_thread_local)
1899  << VD << ((VD->getTLSKind() != VarDecl::TLS_None) ? 0 : 1);
1900  bool IsDecl =
1901  VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
1902  Diag(VD->getLocation(),
1903  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
1904  << VD;
1905  continue;
1906  }
1907 
1908  // Check if initial value of threadprivate variable reference variable with
1909  // local storage (it is not supported by runtime).
1910  if (const Expr *Init = VD->getAnyInitializer()) {
1911  LocalVarRefChecker Checker(*this);
1912  if (Checker.Visit(Init))
1913  continue;
1914  }
1915 
1916  Vars.push_back(RefExpr);
1917  DSAStack->addDSA(VD, DE, OMPC_threadprivate);
1918  VD->addAttr(OMPThreadPrivateDeclAttr::CreateImplicit(
1919  Context, SourceRange(Loc, Loc)));
1920  if (ASTMutationListener *ML = Context.getASTMutationListener())
1921  ML->DeclarationMarkedOpenMPThreadPrivate(VD);
1922  }
1923  OMPThreadPrivateDecl *D = nullptr;
1924  if (!Vars.empty()) {
1925  D = OMPThreadPrivateDecl::Create(Context, getCurLexicalContext(), Loc,
1926  Vars);
1927  D->setAccess(AS_public);
1928  }
1929  return D;
1930 }
1931 
1934  ArrayRef<OMPClause *> ClauseList) {
1935  OMPRequiresDecl *D = nullptr;
1936  if (!CurContext->isFileContext()) {
1937  Diag(Loc, diag::err_omp_invalid_scope) << "requires";
1938  } else {
1939  D = CheckOMPRequiresDecl(Loc, ClauseList);
1940  if (D) {
1941  CurContext->addDecl(D);
1942  DSAStack->addRequiresDecl(D);
1943  }
1944  }
1945  return DeclGroupPtrTy::make(DeclGroupRef(D));
1946 }
1947 
1949  ArrayRef<OMPClause *> ClauseList) {
1950  if (!DSAStack->hasDuplicateRequiresClause(ClauseList))
1951  return OMPRequiresDecl::Create(Context, getCurLexicalContext(), Loc,
1952  ClauseList);
1953  return nullptr;
1954 }
1955 
1956 static void reportOriginalDsa(Sema &SemaRef, const DSAStackTy *Stack,
1957  const ValueDecl *D,
1958  const DSAStackTy::DSAVarData &DVar,
1959  bool IsLoopIterVar = false) {
1960  if (DVar.RefExpr) {
1961  SemaRef.Diag(DVar.RefExpr->getExprLoc(), diag::note_omp_explicit_dsa)
1962  << getOpenMPClauseName(DVar.CKind);
1963  return;
1964  }
1965  enum {
1966  PDSA_StaticMemberShared,
1967  PDSA_StaticLocalVarShared,
1968  PDSA_LoopIterVarPrivate,
1969  PDSA_LoopIterVarLinear,
1970  PDSA_LoopIterVarLastprivate,
1971  PDSA_ConstVarShared,
1972  PDSA_GlobalVarShared,
1973  PDSA_TaskVarFirstprivate,
1974  PDSA_LocalVarPrivate,
1975  PDSA_Implicit
1976  } Reason = PDSA_Implicit;
1977  bool ReportHint = false;
1978  auto ReportLoc = D->getLocation();
1979  auto *VD = dyn_cast<VarDecl>(D);
1980  if (IsLoopIterVar) {
1981  if (DVar.CKind == OMPC_private)
1982  Reason = PDSA_LoopIterVarPrivate;
1983  else if (DVar.CKind == OMPC_lastprivate)
1984  Reason = PDSA_LoopIterVarLastprivate;
1985  else
1986  Reason = PDSA_LoopIterVarLinear;
1987  } else if (isOpenMPTaskingDirective(DVar.DKind) &&
1988  DVar.CKind == OMPC_firstprivate) {
1989  Reason = PDSA_TaskVarFirstprivate;
1990  ReportLoc = DVar.ImplicitDSALoc;
1991  } else if (VD && VD->isStaticLocal())
1992  Reason = PDSA_StaticLocalVarShared;
1993  else if (VD && VD->isStaticDataMember())
1994  Reason = PDSA_StaticMemberShared;
1995  else if (VD && VD->isFileVarDecl())
1996  Reason = PDSA_GlobalVarShared;
1997  else if (D->getType().isConstant(SemaRef.getASTContext()))
1998  Reason = PDSA_ConstVarShared;
1999  else if (VD && VD->isLocalVarDecl() && DVar.CKind == OMPC_private) {
2000  ReportHint = true;
2001  Reason = PDSA_LocalVarPrivate;
2002  }
2003  if (Reason != PDSA_Implicit) {
2004  SemaRef.Diag(ReportLoc, diag::note_omp_predetermined_dsa)
2005  << Reason << ReportHint
2006  << getOpenMPDirectiveName(Stack->getCurrentDirective());
2007  } else if (DVar.ImplicitDSALoc.isValid()) {
2008  SemaRef.Diag(DVar.ImplicitDSALoc, diag::note_omp_implicit_dsa)
2009  << getOpenMPClauseName(DVar.CKind);
2010  }
2011 }
2012 
2013 namespace {
2014 class DSAAttrChecker final : public StmtVisitor<DSAAttrChecker, void> {
2015  DSAStackTy *Stack;
2016  Sema &SemaRef;
2017  bool ErrorFound = false;
2018  CapturedStmt *CS = nullptr;
2019  llvm::SmallVector<Expr *, 4> ImplicitFirstprivate;
2020  llvm::SmallVector<Expr *, 4> ImplicitMap;
2021  Sema::VarsWithInheritedDSAType VarsWithInheritedDSA;
2022  llvm::SmallDenseSet<const ValueDecl *, 4> ImplicitDeclarations;
2023 
2024 public:
2025  void VisitDeclRefExpr(DeclRefExpr *E) {
2026  if (E->isTypeDependent() || E->isValueDependent() ||
2028  return;
2029  if (auto *VD = dyn_cast<VarDecl>(E->getDecl())) {
2030  VD = VD->getCanonicalDecl();
2031  // Skip internally declared variables.
2032  if (VD->hasLocalStorage() && !CS->capturesVariable(VD))
2033  return;
2034 
2035  DSAStackTy::DSAVarData DVar = Stack->getTopDSA(VD, /*FromParent=*/false);
2036  // Check if the variable has explicit DSA set and stop analysis if it so.
2037  if (DVar.RefExpr || !ImplicitDeclarations.insert(VD).second)
2038  return;
2039 
2040  // Skip internally declared static variables.
2042  OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
2043  if (VD->hasGlobalStorage() && !CS->capturesVariable(VD) &&
2044  (!Res || *Res != OMPDeclareTargetDeclAttr::MT_Link))
2045  return;
2046 
2047  SourceLocation ELoc = E->getExprLoc();
2048  OpenMPDirectiveKind DKind = Stack->getCurrentDirective();
2049  // The default(none) clause requires that each variable that is referenced
2050  // in the construct, and does not have a predetermined data-sharing
2051  // attribute, must have its data-sharing attribute explicitly determined
2052  // by being listed in a data-sharing attribute clause.
2053  if (DVar.CKind == OMPC_unknown && Stack->getDefaultDSA() == DSA_none &&
2054  isParallelOrTaskRegion(DKind) &&
2055  VarsWithInheritedDSA.count(VD) == 0) {
2056  VarsWithInheritedDSA[VD] = E;
2057  return;
2058  }
2059 
2060  if (isOpenMPTargetExecutionDirective(DKind) &&
2061  !Stack->isLoopControlVariable(VD).first) {
2062  if (!Stack->checkMappableExprComponentListsForDecl(
2063  VD, /*CurrentRegionOnly=*/true,
2065  StackComponents,
2066  OpenMPClauseKind) {
2067  // Variable is used if it has been marked as an array, array
2068  // section or the variable iself.
2069  return StackComponents.size() == 1 ||
2070  std::all_of(
2071  std::next(StackComponents.rbegin()),
2072  StackComponents.rend(),
2073  [](const OMPClauseMappableExprCommon::
2074  MappableComponent &MC) {
2075  return MC.getAssociatedDeclaration() ==
2076  nullptr &&
2077  (isa<OMPArraySectionExpr>(
2078  MC.getAssociatedExpression()) ||
2079  isa<ArraySubscriptExpr>(
2080  MC.getAssociatedExpression()));
2081  });
2082  })) {
2083  bool IsFirstprivate = false;
2084  // By default lambdas are captured as firstprivates.
2085  if (const auto *RD =
2086  VD->getType().getNonReferenceType()->getAsCXXRecordDecl())
2087  IsFirstprivate = RD->isLambda();
2088  IsFirstprivate =
2089  IsFirstprivate ||
2090  (VD->getType().getNonReferenceType()->isScalarType() &&
2091  Stack->getDefaultDMA() != DMA_tofrom_scalar && !Res);
2092  if (IsFirstprivate)
2093  ImplicitFirstprivate.emplace_back(E);
2094  else
2095  ImplicitMap.emplace_back(E);
2096  return;
2097  }
2098  }
2099 
2100  // OpenMP [2.9.3.6, Restrictions, p.2]
2101  // A list item that appears in a reduction clause of the innermost
2102  // enclosing worksharing or parallel construct may not be accessed in an
2103  // explicit task.
2104  DVar = Stack->hasInnermostDSA(
2105  VD, [](OpenMPClauseKind C) { return C == OMPC_reduction; },
2106  [](OpenMPDirectiveKind K) {
2107  return isOpenMPParallelDirective(K) ||
2109  },
2110  /*FromParent=*/true);
2111  if (isOpenMPTaskingDirective(DKind) && DVar.CKind == OMPC_reduction) {
2112  ErrorFound = true;
2113  SemaRef.Diag(ELoc, diag::err_omp_reduction_in_task);
2114  reportOriginalDsa(SemaRef, Stack, VD, DVar);
2115  return;
2116  }
2117 
2118  // Define implicit data-sharing attributes for task.
2119  DVar = Stack->getImplicitDSA(VD, /*FromParent=*/false);
2120  if (isOpenMPTaskingDirective(DKind) && DVar.CKind != OMPC_shared &&
2121  !Stack->isLoopControlVariable(VD).first)
2122  ImplicitFirstprivate.push_back(E);
2123  }
2124  }
2125  void VisitMemberExpr(MemberExpr *E) {
2126  if (E->isTypeDependent() || E->isValueDependent() ||
2128  return;
2129  auto *FD = dyn_cast<FieldDecl>(E->getMemberDecl());
2130  OpenMPDirectiveKind DKind = Stack->getCurrentDirective();
2131  if (isa<CXXThisExpr>(E->getBase()->IgnoreParens())) {
2132  if (!FD)
2133  return;
2134  DSAStackTy::DSAVarData DVar = Stack->getTopDSA(FD, /*FromParent=*/false);
2135  // Check if the variable has explicit DSA set and stop analysis if it
2136  // so.
2137  if (DVar.RefExpr || !ImplicitDeclarations.insert(FD).second)
2138  return;
2139 
2140  if (isOpenMPTargetExecutionDirective(DKind) &&
2141  !Stack->isLoopControlVariable(FD).first &&
2142  !Stack->checkMappableExprComponentListsForDecl(
2143  FD, /*CurrentRegionOnly=*/true,
2145  StackComponents,
2146  OpenMPClauseKind) {
2147  return isa<CXXThisExpr>(
2148  cast<MemberExpr>(
2149  StackComponents.back().getAssociatedExpression())
2150  ->getBase()
2151  ->IgnoreParens());
2152  })) {
2153  // OpenMP 4.5 [2.15.5.1, map Clause, Restrictions, C/C++, p.3]
2154  // A bit-field cannot appear in a map clause.
2155  //
2156  if (FD->isBitField())
2157  return;
2158  ImplicitMap.emplace_back(E);
2159  return;
2160  }
2161 
2162  SourceLocation ELoc = E->getExprLoc();
2163  // OpenMP [2.9.3.6, Restrictions, p.2]
2164  // A list item that appears in a reduction clause of the innermost
2165  // enclosing worksharing or parallel construct may not be accessed in
2166  // an explicit task.
2167  DVar = Stack->hasInnermostDSA(
2168  FD, [](OpenMPClauseKind C) { return C == OMPC_reduction; },
2169  [](OpenMPDirectiveKind K) {
2170  return isOpenMPParallelDirective(K) ||
2172  },
2173  /*FromParent=*/true);
2174  if (isOpenMPTaskingDirective(DKind) && DVar.CKind == OMPC_reduction) {
2175  ErrorFound = true;
2176  SemaRef.Diag(ELoc, diag::err_omp_reduction_in_task);
2177  reportOriginalDsa(SemaRef, Stack, FD, DVar);
2178  return;
2179  }
2180 
2181  // Define implicit data-sharing attributes for task.
2182  DVar = Stack->getImplicitDSA(FD, /*FromParent=*/false);
2183  if (isOpenMPTaskingDirective(DKind) && DVar.CKind != OMPC_shared &&
2184  !Stack->isLoopControlVariable(FD).first)
2185  ImplicitFirstprivate.push_back(E);
2186  return;
2187  }
2188  if (isOpenMPTargetExecutionDirective(DKind)) {
2190  if (!checkMapClauseExpressionBase(SemaRef, E, CurComponents, OMPC_map,
2191  /*NoDiagnose=*/true))
2192  return;
2193  const auto *VD = cast<ValueDecl>(
2194  CurComponents.back().getAssociatedDeclaration()->getCanonicalDecl());
2195  if (!Stack->checkMappableExprComponentListsForDecl(
2196  VD, /*CurrentRegionOnly=*/true,
2197  [&CurComponents](
2199  StackComponents,
2200  OpenMPClauseKind) {
2201  auto CCI = CurComponents.rbegin();
2202  auto CCE = CurComponents.rend();
2203  for (const auto &SC : llvm::reverse(StackComponents)) {
2204  // Do both expressions have the same kind?
2205  if (CCI->getAssociatedExpression()->getStmtClass() !=
2206  SC.getAssociatedExpression()->getStmtClass())
2207  if (!(isa<OMPArraySectionExpr>(
2208  SC.getAssociatedExpression()) &&
2209  isa<ArraySubscriptExpr>(
2210  CCI->getAssociatedExpression())))
2211  return false;
2212 
2213  const Decl *CCD = CCI->getAssociatedDeclaration();
2214  const Decl *SCD = SC.getAssociatedDeclaration();
2215  CCD = CCD ? CCD->getCanonicalDecl() : nullptr;
2216  SCD = SCD ? SCD->getCanonicalDecl() : nullptr;
2217  if (SCD != CCD)
2218  return false;
2219  std::advance(CCI, 1);
2220  if (CCI == CCE)
2221  break;
2222  }
2223  return true;
2224  })) {
2225  Visit(E->getBase());
2226  }
2227  } else {
2228  Visit(E->getBase());
2229  }
2230  }
2231  void VisitOMPExecutableDirective(OMPExecutableDirective *S) {
2232  for (OMPClause *C : S->clauses()) {
2233  // Skip analysis of arguments of implicitly defined firstprivate clause
2234  // for task|target directives.
2235  // Skip analysis of arguments of implicitly defined map clause for target
2236  // directives.
2237  if (C && !((isa<OMPFirstprivateClause>(C) || isa<OMPMapClause>(C)) &&
2238  C->isImplicit())) {
2239  for (Stmt *CC : C->children()) {
2240  if (CC)
2241  Visit(CC);
2242  }
2243  }
2244  }
2245  }
2246  void VisitStmt(Stmt *S) {
2247  for (Stmt *C : S->children()) {
2248  if (C && !isa<OMPExecutableDirective>(C))
2249  Visit(C);
2250  }
2251  }
2252 
2253  bool isErrorFound() const { return ErrorFound; }
2254  ArrayRef<Expr *> getImplicitFirstprivate() const {
2255  return ImplicitFirstprivate;
2256  }
2257  ArrayRef<Expr *> getImplicitMap() const { return ImplicitMap; }
2258  const Sema::VarsWithInheritedDSAType &getVarsWithInheritedDSA() const {
2259  return VarsWithInheritedDSA;
2260  }
2261 
2262  DSAAttrChecker(DSAStackTy *S, Sema &SemaRef, CapturedStmt *CS)
2263  : Stack(S), SemaRef(SemaRef), ErrorFound(false), CS(CS) {}
2264 };
2265 } // namespace
2266 
2268  switch (DKind) {
2269  case OMPD_parallel:
2270  case OMPD_parallel_for:
2271  case OMPD_parallel_for_simd:
2272  case OMPD_parallel_sections:
2273  case OMPD_teams:
2274  case OMPD_teams_distribute:
2275  case OMPD_teams_distribute_simd: {
2276  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
2277  QualType KmpInt32PtrTy =
2278  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
2279  Sema::CapturedParamNameType Params[] = {
2280  std::make_pair(".global_tid.", KmpInt32PtrTy),
2281  std::make_pair(".bound_tid.", KmpInt32PtrTy),
2282  std::make_pair(StringRef(), QualType()) // __context with shared vars
2283  };
2284  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2285  Params);
2286  break;
2287  }
2288  case OMPD_target_teams:
2289  case OMPD_target_parallel:
2290  case OMPD_target_parallel_for:
2291  case OMPD_target_parallel_for_simd:
2292  case OMPD_target_teams_distribute:
2293  case OMPD_target_teams_distribute_simd: {
2294  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
2295  QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
2296  QualType KmpInt32PtrTy =
2297  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
2298  QualType Args[] = {VoidPtrTy};
2300  EPI.Variadic = true;
2301  QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
2302  Sema::CapturedParamNameType Params[] = {
2303  std::make_pair(".global_tid.", KmpInt32Ty),
2304  std::make_pair(".part_id.", KmpInt32PtrTy),
2305  std::make_pair(".privates.", VoidPtrTy),
2306  std::make_pair(
2307  ".copy_fn.",
2308  Context.getPointerType(CopyFnType).withConst().withRestrict()),
2309  std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
2310  std::make_pair(StringRef(), QualType()) // __context with shared vars
2311  };
2312  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2313  Params);
2314  // Mark this captured region as inlined, because we don't use outlined
2315  // function directly.
2316  getCurCapturedRegion()->TheCapturedDecl->addAttr(
2317  AlwaysInlineAttr::CreateImplicit(
2318  Context, AlwaysInlineAttr::Keyword_forceinline));
2319  Sema::CapturedParamNameType ParamsTarget[] = {
2320  std::make_pair(StringRef(), QualType()) // __context with shared vars
2321  };
2322  // Start a captured region for 'target' with no implicit parameters.
2323  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2324  ParamsTarget);
2325  Sema::CapturedParamNameType ParamsTeamsOrParallel[] = {
2326  std::make_pair(".global_tid.", KmpInt32PtrTy),
2327  std::make_pair(".bound_tid.", KmpInt32PtrTy),
2328  std::make_pair(StringRef(), QualType()) // __context with shared vars
2329  };
2330  // Start a captured region for 'teams' or 'parallel'. Both regions have
2331  // the same implicit parameters.
2332  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2333  ParamsTeamsOrParallel);
2334  break;
2335  }
2336  case OMPD_target:
2337  case OMPD_target_simd: {
2338  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
2339  QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
2340  QualType KmpInt32PtrTy =
2341  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
2342  QualType Args[] = {VoidPtrTy};
2344  EPI.Variadic = true;
2345  QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
2346  Sema::CapturedParamNameType Params[] = {
2347  std::make_pair(".global_tid.", KmpInt32Ty),
2348  std::make_pair(".part_id.", KmpInt32PtrTy),
2349  std::make_pair(".privates.", VoidPtrTy),
2350  std::make_pair(
2351  ".copy_fn.",
2352  Context.getPointerType(CopyFnType).withConst().withRestrict()),
2353  std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
2354  std::make_pair(StringRef(), QualType()) // __context with shared vars
2355  };
2356  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2357  Params);
2358  // Mark this captured region as inlined, because we don't use outlined
2359  // function directly.
2360  getCurCapturedRegion()->TheCapturedDecl->addAttr(
2361  AlwaysInlineAttr::CreateImplicit(
2362  Context, AlwaysInlineAttr::Keyword_forceinline));
2363  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2364  std::make_pair(StringRef(), QualType()));
2365  break;
2366  }
2367  case OMPD_simd:
2368  case OMPD_for:
2369  case OMPD_for_simd:
2370  case OMPD_sections:
2371  case OMPD_section:
2372  case OMPD_single:
2373  case OMPD_master:
2374  case OMPD_critical:
2375  case OMPD_taskgroup:
2376  case OMPD_distribute:
2377  case OMPD_distribute_simd:
2378  case OMPD_ordered:
2379  case OMPD_atomic:
2380  case OMPD_target_data: {
2381  Sema::CapturedParamNameType Params[] = {
2382  std::make_pair(StringRef(), QualType()) // __context with shared vars
2383  };
2384  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2385  Params);
2386  break;
2387  }
2388  case OMPD_task: {
2389  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
2390  QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
2391  QualType KmpInt32PtrTy =
2392  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
2393  QualType Args[] = {VoidPtrTy};
2395  EPI.Variadic = true;
2396  QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
2397  Sema::CapturedParamNameType Params[] = {
2398  std::make_pair(".global_tid.", KmpInt32Ty),
2399  std::make_pair(".part_id.", KmpInt32PtrTy),
2400  std::make_pair(".privates.", VoidPtrTy),
2401  std::make_pair(
2402  ".copy_fn.",
2403  Context.getPointerType(CopyFnType).withConst().withRestrict()),
2404  std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
2405  std::make_pair(StringRef(), QualType()) // __context with shared vars
2406  };
2407  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2408  Params);
2409  // Mark this captured region as inlined, because we don't use outlined
2410  // function directly.
2411  getCurCapturedRegion()->TheCapturedDecl->addAttr(
2412  AlwaysInlineAttr::CreateImplicit(
2413  Context, AlwaysInlineAttr::Keyword_forceinline));
2414  break;
2415  }
2416  case OMPD_taskloop:
2417  case OMPD_taskloop_simd: {
2418  QualType KmpInt32Ty =
2419  Context.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1)
2420  .withConst();
2421  QualType KmpUInt64Ty =
2422  Context.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0)
2423  .withConst();
2424  QualType KmpInt64Ty =
2425  Context.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1)
2426  .withConst();
2427  QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
2428  QualType KmpInt32PtrTy =
2429  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
2430  QualType Args[] = {VoidPtrTy};
2432  EPI.Variadic = true;
2433  QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
2434  Sema::CapturedParamNameType Params[] = {
2435  std::make_pair(".global_tid.", KmpInt32Ty),
2436  std::make_pair(".part_id.", KmpInt32PtrTy),
2437  std::make_pair(".privates.", VoidPtrTy),
2438  std::make_pair(
2439  ".copy_fn.",
2440  Context.getPointerType(CopyFnType).withConst().withRestrict()),
2441  std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
2442  std::make_pair(".lb.", KmpUInt64Ty),
2443  std::make_pair(".ub.", KmpUInt64Ty),
2444  std::make_pair(".st.", KmpInt64Ty),
2445  std::make_pair(".liter.", KmpInt32Ty),
2446  std::make_pair(".reductions.", VoidPtrTy),
2447  std::make_pair(StringRef(), QualType()) // __context with shared vars
2448  };
2449  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2450  Params);
2451  // Mark this captured region as inlined, because we don't use outlined
2452  // function directly.
2453  getCurCapturedRegion()->TheCapturedDecl->addAttr(
2454  AlwaysInlineAttr::CreateImplicit(
2455  Context, AlwaysInlineAttr::Keyword_forceinline));
2456  break;
2457  }
2458  case OMPD_distribute_parallel_for_simd:
2459  case OMPD_distribute_parallel_for: {
2460  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
2461  QualType KmpInt32PtrTy =
2462  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
2463  Sema::CapturedParamNameType Params[] = {
2464  std::make_pair(".global_tid.", KmpInt32PtrTy),
2465  std::make_pair(".bound_tid.", KmpInt32PtrTy),
2466  std::make_pair(".previous.lb.", Context.getSizeType().withConst()),
2467  std::make_pair(".previous.ub.", Context.getSizeType().withConst()),
2468  std::make_pair(StringRef(), QualType()) // __context with shared vars
2469  };
2470  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2471  Params);
2472  break;
2473  }
2474  case OMPD_target_teams_distribute_parallel_for:
2475  case OMPD_target_teams_distribute_parallel_for_simd: {
2476  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
2477  QualType KmpInt32PtrTy =
2478  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
2479  QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
2480 
2481  QualType Args[] = {VoidPtrTy};
2483  EPI.Variadic = true;
2484  QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
2485  Sema::CapturedParamNameType Params[] = {
2486  std::make_pair(".global_tid.", KmpInt32Ty),
2487  std::make_pair(".part_id.", KmpInt32PtrTy),
2488  std::make_pair(".privates.", VoidPtrTy),
2489  std::make_pair(
2490  ".copy_fn.",
2491  Context.getPointerType(CopyFnType).withConst().withRestrict()),
2492  std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
2493  std::make_pair(StringRef(), QualType()) // __context with shared vars
2494  };
2495  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2496  Params);
2497  // Mark this captured region as inlined, because we don't use outlined
2498  // function directly.
2499  getCurCapturedRegion()->TheCapturedDecl->addAttr(
2500  AlwaysInlineAttr::CreateImplicit(
2501  Context, AlwaysInlineAttr::Keyword_forceinline));
2502  Sema::CapturedParamNameType ParamsTarget[] = {
2503  std::make_pair(StringRef(), QualType()) // __context with shared vars
2504  };
2505  // Start a captured region for 'target' with no implicit parameters.
2506  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2507  ParamsTarget);
2508 
2509  Sema::CapturedParamNameType ParamsTeams[] = {
2510  std::make_pair(".global_tid.", KmpInt32PtrTy),
2511  std::make_pair(".bound_tid.", KmpInt32PtrTy),
2512  std::make_pair(StringRef(), QualType()) // __context with shared vars
2513  };
2514  // Start a captured region for 'target' with no implicit parameters.
2515  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2516  ParamsTeams);
2517 
2518  Sema::CapturedParamNameType ParamsParallel[] = {
2519  std::make_pair(".global_tid.", KmpInt32PtrTy),
2520  std::make_pair(".bound_tid.", KmpInt32PtrTy),
2521  std::make_pair(".previous.lb.", Context.getSizeType().withConst()),
2522  std::make_pair(".previous.ub.", Context.getSizeType().withConst()),
2523  std::make_pair(StringRef(), QualType()) // __context with shared vars
2524  };
2525  // Start a captured region for 'teams' or 'parallel'. Both regions have
2526  // the same implicit parameters.
2527  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2528  ParamsParallel);
2529  break;
2530  }
2531 
2532  case OMPD_teams_distribute_parallel_for:
2533  case OMPD_teams_distribute_parallel_for_simd: {
2534  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
2535  QualType KmpInt32PtrTy =
2536  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
2537 
2538  Sema::CapturedParamNameType ParamsTeams[] = {
2539  std::make_pair(".global_tid.", KmpInt32PtrTy),
2540  std::make_pair(".bound_tid.", KmpInt32PtrTy),
2541  std::make_pair(StringRef(), QualType()) // __context with shared vars
2542  };
2543  // Start a captured region for 'target' with no implicit parameters.
2544  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2545  ParamsTeams);
2546 
2547  Sema::CapturedParamNameType ParamsParallel[] = {
2548  std::make_pair(".global_tid.", KmpInt32PtrTy),
2549  std::make_pair(".bound_tid.", KmpInt32PtrTy),
2550  std::make_pair(".previous.lb.", Context.getSizeType().withConst()),
2551  std::make_pair(".previous.ub.", Context.getSizeType().withConst()),
2552  std::make_pair(StringRef(), QualType()) // __context with shared vars
2553  };
2554  // Start a captured region for 'teams' or 'parallel'. Both regions have
2555  // the same implicit parameters.
2556  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2557  ParamsParallel);
2558  break;
2559  }
2560  case OMPD_target_update:
2561  case OMPD_target_enter_data:
2562  case OMPD_target_exit_data: {
2563  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
2564  QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
2565  QualType KmpInt32PtrTy =
2566  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
2567  QualType Args[] = {VoidPtrTy};
2569  EPI.Variadic = true;
2570  QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
2571  Sema::CapturedParamNameType Params[] = {
2572  std::make_pair(".global_tid.", KmpInt32Ty),
2573  std::make_pair(".part_id.", KmpInt32PtrTy),
2574  std::make_pair(".privates.", VoidPtrTy),
2575  std::make_pair(
2576  ".copy_fn.",
2577  Context.getPointerType(CopyFnType).withConst().withRestrict()),
2578  std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
2579  std::make_pair(StringRef(), QualType()) // __context with shared vars
2580  };
2581  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2582  Params);
2583  // Mark this captured region as inlined, because we don't use outlined
2584  // function directly.
2585  getCurCapturedRegion()->TheCapturedDecl->addAttr(
2586  AlwaysInlineAttr::CreateImplicit(
2587  Context, AlwaysInlineAttr::Keyword_forceinline));
2588  break;
2589  }
2590  case OMPD_threadprivate:
2591  case OMPD_taskyield:
2592  case OMPD_barrier:
2593  case OMPD_taskwait:
2594  case OMPD_cancellation_point:
2595  case OMPD_cancel:
2596  case OMPD_flush:
2597  case OMPD_declare_reduction:
2598  case OMPD_declare_simd:
2599  case OMPD_declare_target:
2600  case OMPD_end_declare_target:
2601  case OMPD_requires:
2602  llvm_unreachable("OpenMP Directive is not allowed");
2603  case OMPD_unknown:
2604  llvm_unreachable("Unknown OpenMP directive");
2605  }
2606 }
2607 
2609  SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
2610  getOpenMPCaptureRegions(CaptureRegions, DKind);
2611  return CaptureRegions.size();
2612 }
2613 
2615  Expr *CaptureExpr, bool WithInit,
2616  bool AsExpression) {
2617  assert(CaptureExpr);
2618  ASTContext &C = S.getASTContext();
2619  Expr *Init = AsExpression ? CaptureExpr : CaptureExpr->IgnoreImpCasts();
2620  QualType Ty = Init->getType();
2621  if (CaptureExpr->getObjectKind() == OK_Ordinary && CaptureExpr->isGLValue()) {
2622  if (S.getLangOpts().CPlusPlus) {
2623  Ty = C.getLValueReferenceType(Ty);
2624  } else {
2625  Ty = C.getPointerType(Ty);
2626  ExprResult Res =
2627  S.CreateBuiltinUnaryOp(CaptureExpr->getExprLoc(), UO_AddrOf, Init);
2628  if (!Res.isUsable())
2629  return nullptr;
2630  Init = Res.get();
2631  }
2632  WithInit = true;
2633  }
2634  auto *CED = OMPCapturedExprDecl::Create(C, S.CurContext, Id, Ty,
2635  CaptureExpr->getBeginLoc());
2636  if (!WithInit)
2637  CED->addAttr(OMPCaptureNoInitAttr::CreateImplicit(C));
2638  S.CurContext->addHiddenDecl(CED);
2639  S.AddInitializerToDecl(CED, Init, /*DirectInit=*/false);
2640  return CED;
2641 }
2642 
2643 static DeclRefExpr *buildCapture(Sema &S, ValueDecl *D, Expr *CaptureExpr,
2644  bool WithInit) {
2645  OMPCapturedExprDecl *CD;
2646  if (VarDecl *VD = S.isOpenMPCapturedDecl(D))
2647  CD = cast<OMPCapturedExprDecl>(VD);
2648  else
2649  CD = buildCaptureDecl(S, D->getIdentifier(), CaptureExpr, WithInit,
2650  /*AsExpression=*/false);
2651  return buildDeclRefExpr(S, CD, CD->getType().getNonReferenceType(),
2652  CaptureExpr->getExprLoc());
2653 }
2654 
2655 static ExprResult buildCapture(Sema &S, Expr *CaptureExpr, DeclRefExpr *&Ref) {
2656  CaptureExpr = S.DefaultLvalueConversion(CaptureExpr).get();
2657  if (!Ref) {
2659  S, &S.getASTContext().Idents.get(".capture_expr."), CaptureExpr,
2660  /*WithInit=*/true, /*AsExpression=*/true);
2661  Ref = buildDeclRefExpr(S, CD, CD->getType().getNonReferenceType(),
2662  CaptureExpr->getExprLoc());
2663  }
2664  ExprResult Res = Ref;
2665  if (!S.getLangOpts().CPlusPlus &&
2666  CaptureExpr->getObjectKind() == OK_Ordinary && CaptureExpr->isGLValue() &&
2667  Ref->getType()->isPointerType()) {
2668  Res = S.CreateBuiltinUnaryOp(CaptureExpr->getExprLoc(), UO_Deref, Ref);
2669  if (!Res.isUsable())
2670  return ExprError();
2671  }
2672  return S.DefaultLvalueConversion(Res.get());
2673 }
2674 
2675 namespace {
2676 // OpenMP directives parsed in this section are represented as a
2677 // CapturedStatement with an associated statement. If a syntax error
2678 // is detected during the parsing of the associated statement, the
2679 // compiler must abort processing and close the CapturedStatement.
2680 //
2681 // Combined directives such as 'target parallel' have more than one
2682 // nested CapturedStatements. This RAII ensures that we unwind out
2683 // of all the nested CapturedStatements when an error is found.
2684 class CaptureRegionUnwinderRAII {
2685 private:
2686  Sema &S;
2687  bool &ErrorFound;
2689 
2690 public:
2691  CaptureRegionUnwinderRAII(Sema &S, bool &ErrorFound,
2692  OpenMPDirectiveKind DKind)
2693  : S(S), ErrorFound(ErrorFound), DKind(DKind) {}
2694  ~CaptureRegionUnwinderRAII() {
2695  if (ErrorFound) {
2696  int ThisCaptureLevel = S.getOpenMPCaptureLevels(DKind);
2697  while (--ThisCaptureLevel >= 0)
2699  }
2700  }
2701 };
2702 } // namespace
2703 
2705  ArrayRef<OMPClause *> Clauses) {
2706  bool ErrorFound = false;
2707  CaptureRegionUnwinderRAII CaptureRegionUnwinder(
2708  *this, ErrorFound, DSAStack->getCurrentDirective());
2709  if (!S.isUsable()) {
2710  ErrorFound = true;
2711  return StmtError();
2712  }
2713 
2714  SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
2715  getOpenMPCaptureRegions(CaptureRegions, DSAStack->getCurrentDirective());
2716  OMPOrderedClause *OC = nullptr;
2717  OMPScheduleClause *SC = nullptr;
2720  // This is required for proper codegen.
2721  for (OMPClause *Clause : Clauses) {
2722  if (isOpenMPTaskingDirective(DSAStack->getCurrentDirective()) &&
2723  Clause->getClauseKind() == OMPC_in_reduction) {
2724  // Capture taskgroup task_reduction descriptors inside the tasking regions
2725  // with the corresponding in_reduction items.
2726  auto *IRC = cast<OMPInReductionClause>(Clause);
2727  for (Expr *E : IRC->taskgroup_descriptors())
2728  if (E)
2729  MarkDeclarationsReferencedInExpr(E);
2730  }
2731  if (isOpenMPPrivate(Clause->getClauseKind()) ||
2732  Clause->getClauseKind() == OMPC_copyprivate ||
2733  (getLangOpts().OpenMPUseTLS &&
2734  getASTContext().getTargetInfo().isTLSSupported() &&
2735  Clause->getClauseKind() == OMPC_copyin)) {
2736  DSAStack->setForceVarCapturing(Clause->getClauseKind() == OMPC_copyin);
2737  // Mark all variables in private list clauses as used in inner region.
2738  for (Stmt *VarRef : Clause->children()) {
2739  if (auto *E = cast_or_null<Expr>(VarRef)) {
2740  MarkDeclarationsReferencedInExpr(E);
2741  }
2742  }
2743  DSAStack->setForceVarCapturing(/*V=*/false);
2744  } else if (CaptureRegions.size() > 1 ||
2745  CaptureRegions.back() != OMPD_unknown) {
2746  if (auto *C = OMPClauseWithPreInit::get(Clause))
2747  PICs.push_back(C);
2748  if (auto *C = OMPClauseWithPostUpdate::get(Clause)) {
2749  if (Expr *E = C->getPostUpdateExpr())
2750  MarkDeclarationsReferencedInExpr(E);
2751  }
2752  }
2753  if (Clause->getClauseKind() == OMPC_schedule)
2754  SC = cast<OMPScheduleClause>(Clause);
2755  else if (Clause->getClauseKind() == OMPC_ordered)
2756  OC = cast<OMPOrderedClause>(Clause);
2757  else if (Clause->getClauseKind() == OMPC_linear)
2758  LCs.push_back(cast<OMPLinearClause>(Clause));
2759  }
2760  // OpenMP, 2.7.1 Loop Construct, Restrictions
2761  // The nonmonotonic modifier cannot be specified if an ordered clause is
2762  // specified.
2763  if (SC &&
2764  (SC->getFirstScheduleModifier() == OMPC_SCHEDULE_MODIFIER_nonmonotonic ||
2765  SC->getSecondScheduleModifier() ==
2766  OMPC_SCHEDULE_MODIFIER_nonmonotonic) &&
2767  OC) {
2768  Diag(SC->getFirstScheduleModifier() == OMPC_SCHEDULE_MODIFIER_nonmonotonic
2771  diag::err_omp_schedule_nonmonotonic_ordered)
2772  << SourceRange(OC->getBeginLoc(), OC->getEndLoc());
2773  ErrorFound = true;
2774  }
2775  if (!LCs.empty() && OC && OC->getNumForLoops()) {
2776  for (const OMPLinearClause *C : LCs) {
2777  Diag(C->getBeginLoc(), diag::err_omp_linear_ordered)
2778  << SourceRange(OC->getBeginLoc(), OC->getEndLoc());
2779  }
2780  ErrorFound = true;
2781  }
2782  if (isOpenMPWorksharingDirective(DSAStack->getCurrentDirective()) &&
2783  isOpenMPSimdDirective(DSAStack->getCurrentDirective()) && OC &&
2784  OC->getNumForLoops()) {
2785  Diag(OC->getBeginLoc(), diag::err_omp_ordered_simd)
2786  << getOpenMPDirectiveName(DSAStack->getCurrentDirective());
2787  ErrorFound = true;
2788  }
2789  if (ErrorFound) {
2790  return StmtError();
2791  }
2792  StmtResult SR = S;
2793  for (OpenMPDirectiveKind ThisCaptureRegion : llvm::reverse(CaptureRegions)) {
2794  // Mark all variables in private list clauses as used in inner region.
2795  // Required for proper codegen of combined directives.
2796  // TODO: add processing for other clauses.
2797  if (ThisCaptureRegion != OMPD_unknown) {
2798  for (const clang::OMPClauseWithPreInit *C : PICs) {
2799  OpenMPDirectiveKind CaptureRegion = C->getCaptureRegion();
2800  // Find the particular capture region for the clause if the
2801  // directive is a combined one with multiple capture regions.
2802  // If the directive is not a combined one, the capture region
2803  // associated with the clause is OMPD_unknown and is generated
2804  // only once.
2805  if (CaptureRegion == ThisCaptureRegion ||
2806  CaptureRegion == OMPD_unknown) {
2807  if (auto *DS = cast_or_null<DeclStmt>(C->getPreInitStmt())) {
2808  for (Decl *D : DS->decls())
2809  MarkVariableReferenced(D->getLocation(), cast<VarDecl>(D));
2810  }
2811  }
2812  }
2813  }
2814  SR = ActOnCapturedRegionEnd(SR.get());
2815  }
2816  return SR;
2817 }
2818 
2819 static bool checkCancelRegion(Sema &SemaRef, OpenMPDirectiveKind CurrentRegion,
2820  OpenMPDirectiveKind CancelRegion,
2821  SourceLocation StartLoc) {
2822  // CancelRegion is only needed for cancel and cancellation_point.
2823  if (CurrentRegion != OMPD_cancel && CurrentRegion != OMPD_cancellation_point)
2824  return false;
2825 
2826  if (CancelRegion == OMPD_parallel || CancelRegion == OMPD_for ||
2827  CancelRegion == OMPD_sections || CancelRegion == OMPD_taskgroup)
2828  return false;
2829 
2830  SemaRef.Diag(StartLoc, diag::err_omp_wrong_cancel_region)
2831  << getOpenMPDirectiveName(CancelRegion);
2832  return true;
2833 }
2834 
2835 static bool checkNestingOfRegions(Sema &SemaRef, const DSAStackTy *Stack,
2836  OpenMPDirectiveKind CurrentRegion,
2837  const DeclarationNameInfo &CurrentName,
2838  OpenMPDirectiveKind CancelRegion,
2839  SourceLocation StartLoc) {
2840  if (Stack->getCurScope()) {
2841  OpenMPDirectiveKind ParentRegion = Stack->getParentDirective();
2842  OpenMPDirectiveKind OffendingRegion = ParentRegion;
2843  bool NestingProhibited = false;
2844  bool CloseNesting = true;
2845  bool OrphanSeen = false;
2846  enum {
2847  NoRecommend,
2848  ShouldBeInParallelRegion,
2849  ShouldBeInOrderedRegion,
2850  ShouldBeInTargetRegion,
2851  ShouldBeInTeamsRegion
2852  } Recommend = NoRecommend;
2853  if (isOpenMPSimdDirective(ParentRegion) && CurrentRegion != OMPD_ordered) {
2854  // OpenMP [2.16, Nesting of Regions]
2855  // OpenMP constructs may not be nested inside a simd region.
2856  // OpenMP [2.8.1,simd Construct, Restrictions]
2857  // An ordered construct with the simd clause is the only OpenMP
2858  // construct that can appear in the simd region.
2859  // Allowing a SIMD construct nested in another SIMD construct is an
2860  // extension. The OpenMP 4.5 spec does not allow it. Issue a warning
2861  // message.
2862  SemaRef.Diag(StartLoc, (CurrentRegion != OMPD_simd)
2863  ? diag::err_omp_prohibited_region_simd
2864  : diag::warn_omp_nesting_simd);
2865  return CurrentRegion != OMPD_simd;
2866  }
2867  if (ParentRegion == OMPD_atomic) {
2868  // OpenMP [2.16, Nesting of Regions]
2869  // OpenMP constructs may not be nested inside an atomic region.
2870  SemaRef.Diag(StartLoc, diag::err_omp_prohibited_region_atomic);
2871  return true;
2872  }
2873  if (CurrentRegion == OMPD_section) {
2874  // OpenMP [2.7.2, sections Construct, Restrictions]
2875  // Orphaned section directives are prohibited. That is, the section
2876  // directives must appear within the sections construct and must not be
2877  // encountered elsewhere in the sections region.
2878  if (ParentRegion != OMPD_sections &&
2879  ParentRegion != OMPD_parallel_sections) {
2880  SemaRef.Diag(StartLoc, diag::err_omp_orphaned_section_directive)
2881  << (ParentRegion != OMPD_unknown)
2882  << getOpenMPDirectiveName(ParentRegion);
2883  return true;
2884  }
2885  return false;
2886  }
2887  // Allow some constructs (except teams) to be orphaned (they could be
2888  // used in functions, called from OpenMP regions with the required
2889  // preconditions).
2890  if (ParentRegion == OMPD_unknown &&
2891  !isOpenMPNestingTeamsDirective(CurrentRegion))
2892  return false;
2893  if (CurrentRegion == OMPD_cancellation_point ||
2894  CurrentRegion == OMPD_cancel) {
2895  // OpenMP [2.16, Nesting of Regions]
2896  // A cancellation point construct for which construct-type-clause is
2897  // taskgroup must be nested inside a task construct. A cancellation
2898  // point construct for which construct-type-clause is not taskgroup must
2899  // be closely nested inside an OpenMP construct that matches the type
2900  // specified in construct-type-clause.
2901  // A cancel construct for which construct-type-clause is taskgroup must be
2902  // nested inside a task construct. A cancel construct for which
2903  // construct-type-clause is not taskgroup must be closely nested inside an
2904  // OpenMP construct that matches the type specified in
2905  // construct-type-clause.
2906  NestingProhibited =
2907  !((CancelRegion == OMPD_parallel &&
2908  (ParentRegion == OMPD_parallel ||
2909  ParentRegion == OMPD_target_parallel)) ||
2910  (CancelRegion == OMPD_for &&
2911  (ParentRegion == OMPD_for || ParentRegion == OMPD_parallel_for ||
2912  ParentRegion == OMPD_target_parallel_for ||
2913  ParentRegion == OMPD_distribute_parallel_for ||
2914  ParentRegion == OMPD_teams_distribute_parallel_for ||
2915  ParentRegion == OMPD_target_teams_distribute_parallel_for)) ||
2916  (CancelRegion == OMPD_taskgroup && ParentRegion == OMPD_task) ||
2917  (CancelRegion == OMPD_sections &&
2918  (ParentRegion == OMPD_section || ParentRegion == OMPD_sections ||
2919  ParentRegion == OMPD_parallel_sections)));
2920  } else if (CurrentRegion == OMPD_master) {
2921  // OpenMP [2.16, Nesting of Regions]
2922  // A master region may not be closely nested inside a worksharing,
2923  // atomic, or explicit task region.
2924  NestingProhibited = isOpenMPWorksharingDirective(ParentRegion) ||
2925  isOpenMPTaskingDirective(ParentRegion);
2926  } else if (CurrentRegion == OMPD_critical && CurrentName.getName()) {
2927  // OpenMP [2.16, Nesting of Regions]
2928  // A critical region may not be nested (closely or otherwise) inside a
2929  // critical region with the same name. Note that this restriction is not
2930  // sufficient to prevent deadlock.
2931  SourceLocation PreviousCriticalLoc;
2932  bool DeadLock = Stack->hasDirective(
2933  [CurrentName, &PreviousCriticalLoc](OpenMPDirectiveKind K,
2934  const DeclarationNameInfo &DNI,
2935  SourceLocation Loc) {
2936  if (K == OMPD_critical && DNI.getName() == CurrentName.getName()) {
2937  PreviousCriticalLoc = Loc;
2938  return true;
2939  }
2940  return false;
2941  },
2942  false /* skip top directive */);
2943  if (DeadLock) {
2944  SemaRef.Diag(StartLoc,
2945  diag::err_omp_prohibited_region_critical_same_name)
2946  << CurrentName.getName();
2947  if (PreviousCriticalLoc.isValid())
2948  SemaRef.Diag(PreviousCriticalLoc,
2949  diag::note_omp_previous_critical_region);
2950  return true;
2951  }
2952  } else if (CurrentRegion == OMPD_barrier) {
2953  // OpenMP [2.16, Nesting of Regions]
2954  // A barrier region may not be closely nested inside a worksharing,
2955  // explicit task, critical, ordered, atomic, or master region.
2956  NestingProhibited = isOpenMPWorksharingDirective(ParentRegion) ||
2957  isOpenMPTaskingDirective(ParentRegion) ||
2958  ParentRegion == OMPD_master ||
2959  ParentRegion == OMPD_critical ||
2960  ParentRegion == OMPD_ordered;
2961  } else if (isOpenMPWorksharingDirective(CurrentRegion) &&
2962  !isOpenMPParallelDirective(CurrentRegion) &&
2963  !isOpenMPTeamsDirective(CurrentRegion)) {
2964  // OpenMP [2.16, Nesting of Regions]
2965  // A worksharing region may not be closely nested inside a worksharing,
2966  // explicit task, critical, ordered, atomic, or master region.
2967  NestingProhibited = isOpenMPWorksharingDirective(ParentRegion) ||
2968  isOpenMPTaskingDirective(ParentRegion) ||
2969  ParentRegion == OMPD_master ||
2970  ParentRegion == OMPD_critical ||
2971  ParentRegion == OMPD_ordered;
2972  Recommend = ShouldBeInParallelRegion;
2973  } else if (CurrentRegion == OMPD_ordered) {
2974  // OpenMP [2.16, Nesting of Regions]
2975  // An ordered region may not be closely nested inside a critical,
2976  // atomic, or explicit task region.
2977  // An ordered region must be closely nested inside a loop region (or
2978  // parallel loop region) with an ordered clause.
2979  // OpenMP [2.8.1,simd Construct, Restrictions]
2980  // An ordered construct with the simd clause is the only OpenMP construct
2981  // that can appear in the simd region.
2982  NestingProhibited = ParentRegion == OMPD_critical ||
2983  isOpenMPTaskingDirective(ParentRegion) ||
2984  !(isOpenMPSimdDirective(ParentRegion) ||
2985  Stack->isParentOrderedRegion());
2986  Recommend = ShouldBeInOrderedRegion;
2987  } else if (isOpenMPNestingTeamsDirective(CurrentRegion)) {
2988  // OpenMP [2.16, Nesting of Regions]
2989  // If specified, a teams construct must be contained within a target
2990  // construct.
2991  NestingProhibited = ParentRegion != OMPD_target;
2992  OrphanSeen = ParentRegion == OMPD_unknown;
2993  Recommend = ShouldBeInTargetRegion;
2994  }
2995  if (!NestingProhibited &&
2996  !isOpenMPTargetExecutionDirective(CurrentRegion) &&
2997  !isOpenMPTargetDataManagementDirective(CurrentRegion) &&
2998  (ParentRegion == OMPD_teams || ParentRegion == OMPD_target_teams)) {
2999  // OpenMP [2.16, Nesting of Regions]
3000  // distribute, parallel, parallel sections, parallel workshare, and the
3001  // parallel loop and parallel loop SIMD constructs are the only OpenMP
3002  // constructs that can be closely nested in the teams region.
3003  NestingProhibited = !isOpenMPParallelDirective(CurrentRegion) &&
3004  !isOpenMPDistributeDirective(CurrentRegion);
3005  Recommend = ShouldBeInParallelRegion;
3006  }
3007  if (!NestingProhibited &&
3008  isOpenMPNestingDistributeDirective(CurrentRegion)) {
3009  // OpenMP 4.5 [2.17 Nesting of Regions]
3010  // The region associated with the distribute construct must be strictly
3011  // nested inside a teams region
3012  NestingProhibited =
3013  (ParentRegion != OMPD_teams && ParentRegion != OMPD_target_teams);
3014  Recommend = ShouldBeInTeamsRegion;
3015  }
3016  if (!NestingProhibited &&
3017  (isOpenMPTargetExecutionDirective(CurrentRegion) ||
3018  isOpenMPTargetDataManagementDirective(CurrentRegion))) {
3019  // OpenMP 4.5 [2.17 Nesting of Regions]
3020  // If a target, target update, target data, target enter data, or
3021  // target exit data construct is encountered during execution of a
3022  // target region, the behavior is unspecified.
3023  NestingProhibited = Stack->hasDirective(
3024  [&OffendingRegion](OpenMPDirectiveKind K, const DeclarationNameInfo &,
3025  SourceLocation) {
3027  OffendingRegion = K;
3028  return true;
3029  }
3030  return false;
3031  },
3032  false /* don't skip top directive */);
3033  CloseNesting = false;
3034  }
3035  if (NestingProhibited) {
3036  if (OrphanSeen) {
3037  SemaRef.Diag(StartLoc, diag::err_omp_orphaned_device_directive)
3038  << getOpenMPDirectiveName(CurrentRegion) << Recommend;
3039  } else {
3040  SemaRef.Diag(StartLoc, diag::err_omp_prohibited_region)
3041  << CloseNesting << getOpenMPDirectiveName(OffendingRegion)
3042  << Recommend << getOpenMPDirectiveName(CurrentRegion);
3043  }
3044  return true;
3045  }
3046  }
3047  return false;
3048 }
3049 
3051  ArrayRef<OMPClause *> Clauses,
3052  ArrayRef<OpenMPDirectiveKind> AllowedNameModifiers) {
3053  bool ErrorFound = false;
3054  unsigned NamedModifiersNumber = 0;
3056  OMPD_unknown + 1);
3057  SmallVector<SourceLocation, 4> NameModifierLoc;
3058  for (const OMPClause *C : Clauses) {
3059  if (const auto *IC = dyn_cast_or_null<OMPIfClause>(C)) {
3060  // At most one if clause without a directive-name-modifier can appear on
3061  // the directive.
3062  OpenMPDirectiveKind CurNM = IC->getNameModifier();
3063  if (FoundNameModifiers[CurNM]) {
3064  S.Diag(C->getBeginLoc(), diag::err_omp_more_one_clause)
3065  << getOpenMPDirectiveName(Kind) << getOpenMPClauseName(OMPC_if)
3066  << (CurNM != OMPD_unknown) << getOpenMPDirectiveName(CurNM);
3067  ErrorFound = true;
3068  } else if (CurNM != OMPD_unknown) {
3069  NameModifierLoc.push_back(IC->getNameModifierLoc());
3070  ++NamedModifiersNumber;
3071  }
3072  FoundNameModifiers[CurNM] = IC;
3073  if (CurNM == OMPD_unknown)
3074  continue;
3075  // Check if the specified name modifier is allowed for the current
3076  // directive.
3077  // At most one if clause with the particular directive-name-modifier can
3078  // appear on the directive.
3079  bool MatchFound = false;
3080  for (auto NM : AllowedNameModifiers) {
3081  if (CurNM == NM) {
3082  MatchFound = true;
3083  break;
3084  }
3085  }
3086  if (!MatchFound) {
3087  S.Diag(IC->getNameModifierLoc(),
3088  diag::err_omp_wrong_if_directive_name_modifier)
3090  ErrorFound = true;
3091  }
3092  }
3093  }
3094  // If any if clause on the directive includes a directive-name-modifier then
3095  // all if clauses on the directive must include a directive-name-modifier.
3096  if (FoundNameModifiers[OMPD_unknown] && NamedModifiersNumber > 0) {
3097  if (NamedModifiersNumber == AllowedNameModifiers.size()) {
3098  S.Diag(FoundNameModifiers[OMPD_unknown]->getBeginLoc(),
3099  diag::err_omp_no_more_if_clause);
3100  } else {
3101  std::string Values;
3102  std::string Sep(", ");
3103  unsigned AllowedCnt = 0;
3104  unsigned TotalAllowedNum =
3105  AllowedNameModifiers.size() - NamedModifiersNumber;
3106  for (unsigned Cnt = 0, End = AllowedNameModifiers.size(); Cnt < End;
3107  ++Cnt) {
3108  OpenMPDirectiveKind NM = AllowedNameModifiers[Cnt];
3109  if (!FoundNameModifiers[NM]) {
3110  Values += "'";
3111  Values += getOpenMPDirectiveName(NM);
3112  Values += "'";
3113  if (AllowedCnt + 2 == TotalAllowedNum)
3114  Values += " or ";
3115  else if (AllowedCnt + 1 != TotalAllowedNum)
3116  Values += Sep;
3117  ++AllowedCnt;
3118  }
3119  }
3120  S.Diag(FoundNameModifiers[OMPD_unknown]->getCondition()->getBeginLoc(),
3121  diag::err_omp_unnamed_if_clause)
3122  << (TotalAllowedNum > 1) << Values;
3123  }
3124  for (SourceLocation Loc : NameModifierLoc) {
3125  S.Diag(Loc, diag::note_omp_previous_named_if_clause);
3126  }
3127  ErrorFound = true;
3128  }
3129  return ErrorFound;
3130 }
3131 
3134  OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
3135  Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc) {
3136  StmtResult Res = StmtError();
3137  // First check CancelRegion which is then used in checkNestingOfRegions.
3138  if (checkCancelRegion(*this, Kind, CancelRegion, StartLoc) ||
3139  checkNestingOfRegions(*this, DSAStack, Kind, DirName, CancelRegion,
3140  StartLoc))
3141  return StmtError();
3142 
3143  llvm::SmallVector<OMPClause *, 8> ClausesWithImplicit;
3144  VarsWithInheritedDSAType VarsWithInheritedDSA;
3145  bool ErrorFound = false;
3146  ClausesWithImplicit.append(Clauses.begin(), Clauses.end());
3147  if (AStmt && !CurContext->isDependentContext()) {
3148  assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
3149 
3150  // Check default data sharing attributes for referenced variables.
3151  DSAAttrChecker DSAChecker(DSAStack, *this, cast<CapturedStmt>(AStmt));
3152  int ThisCaptureLevel = getOpenMPCaptureLevels(Kind);
3153  Stmt *S = AStmt;
3154  while (--ThisCaptureLevel >= 0)
3155  S = cast<CapturedStmt>(S)->getCapturedStmt();
3156  DSAChecker.Visit(S);
3157  if (DSAChecker.isErrorFound())
3158  return StmtError();
3159  // Generate list of implicitly defined firstprivate variables.
3160  VarsWithInheritedDSA = DSAChecker.getVarsWithInheritedDSA();
3161 
3162  SmallVector<Expr *, 4> ImplicitFirstprivates(
3163  DSAChecker.getImplicitFirstprivate().begin(),
3164  DSAChecker.getImplicitFirstprivate().end());
3165  SmallVector<Expr *, 4> ImplicitMaps(DSAChecker.getImplicitMap().begin(),
3166  DSAChecker.getImplicitMap().end());
3167  // Mark taskgroup task_reduction descriptors as implicitly firstprivate.
3168  for (OMPClause *C : Clauses) {
3169  if (auto *IRC = dyn_cast<OMPInReductionClause>(C)) {
3170  for (Expr *E : IRC->taskgroup_descriptors())
3171  if (E)
3172  ImplicitFirstprivates.emplace_back(E);
3173  }
3174  }
3175  if (!ImplicitFirstprivates.empty()) {
3176  if (OMPClause *Implicit = ActOnOpenMPFirstprivateClause(
3177  ImplicitFirstprivates, SourceLocation(), SourceLocation(),
3178  SourceLocation())) {
3179  ClausesWithImplicit.push_back(Implicit);
3180  ErrorFound = cast<OMPFirstprivateClause>(Implicit)->varlist_size() !=
3181  ImplicitFirstprivates.size();
3182  } else {
3183  ErrorFound = true;
3184  }
3185  }
3186  if (!ImplicitMaps.empty()) {
3187  if (OMPClause *Implicit = ActOnOpenMPMapClause(
3188  OMPC_MAP_unknown, OMPC_MAP_tofrom, /*IsMapTypeImplicit=*/true,
3189  SourceLocation(), SourceLocation(), ImplicitMaps,
3191  ClausesWithImplicit.emplace_back(Implicit);
3192  ErrorFound |=
3193  cast<OMPMapClause>(Implicit)->varlist_size() != ImplicitMaps.size();
3194  } else {
3195  ErrorFound = true;
3196  }
3197  }
3198  }
3199 
3200  llvm::SmallVector<OpenMPDirectiveKind, 4> AllowedNameModifiers;
3201  switch (Kind) {
3202  case OMPD_parallel:
3203  Res = ActOnOpenMPParallelDirective(ClausesWithImplicit, AStmt, StartLoc,
3204  EndLoc);
3205  AllowedNameModifiers.push_back(OMPD_parallel);
3206  break;
3207  case OMPD_simd:
3208  Res = ActOnOpenMPSimdDirective(ClausesWithImplicit, AStmt, StartLoc, EndLoc,
3209  VarsWithInheritedDSA);
3210  break;
3211  case OMPD_for:
3212  Res = ActOnOpenMPForDirective(ClausesWithImplicit, AStmt, StartLoc, EndLoc,
3213  VarsWithInheritedDSA);
3214  break;
3215  case OMPD_for_simd:
3216  Res = ActOnOpenMPForSimdDirective(ClausesWithImplicit, AStmt, StartLoc,
3217  EndLoc, VarsWithInheritedDSA);
3218  break;
3219  case OMPD_sections:
3220  Res = ActOnOpenMPSectionsDirective(ClausesWithImplicit, AStmt, StartLoc,
3221  EndLoc);
3222  break;
3223  case OMPD_section:
3224  assert(ClausesWithImplicit.empty() &&
3225  "No clauses are allowed for 'omp section' directive");
3226  Res = ActOnOpenMPSectionDirective(AStmt, StartLoc, EndLoc);
3227  break;
3228  case OMPD_single:
3229  Res = ActOnOpenMPSingleDirective(ClausesWithImplicit, AStmt, StartLoc,
3230  EndLoc);
3231  break;
3232  case OMPD_master:
3233  assert(ClausesWithImplicit.empty() &&
3234  "No clauses are allowed for 'omp master' directive");
3235  Res = ActOnOpenMPMasterDirective(AStmt, StartLoc, EndLoc);
3236  break;
3237  case OMPD_critical:
3238  Res = ActOnOpenMPCriticalDirective(DirName, ClausesWithImplicit, AStmt,
3239  StartLoc, EndLoc);
3240  break;
3241  case OMPD_parallel_for:
3242  Res = ActOnOpenMPParallelForDirective(ClausesWithImplicit, AStmt, StartLoc,
3243  EndLoc, VarsWithInheritedDSA);
3244  AllowedNameModifiers.push_back(OMPD_parallel);
3245  break;
3246  case OMPD_parallel_for_simd:
3247  Res = ActOnOpenMPParallelForSimdDirective(
3248  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
3249  AllowedNameModifiers.push_back(OMPD_parallel);
3250  break;
3251  case OMPD_parallel_sections:
3252  Res = ActOnOpenMPParallelSectionsDirective(ClausesWithImplicit, AStmt,
3253  StartLoc, EndLoc);
3254  AllowedNameModifiers.push_back(OMPD_parallel);
3255  break;
3256  case OMPD_task:
3257  Res =
3258  ActOnOpenMPTaskDirective(ClausesWithImplicit, AStmt, StartLoc, EndLoc);
3259  AllowedNameModifiers.push_back(OMPD_task);
3260  break;
3261  case OMPD_taskyield:
3262  assert(ClausesWithImplicit.empty() &&
3263  "No clauses are allowed for 'omp taskyield' directive");
3264  assert(AStmt == nullptr &&
3265  "No associated statement allowed for 'omp taskyield' directive");
3266  Res = ActOnOpenMPTaskyieldDirective(StartLoc, EndLoc);
3267  break;
3268  case OMPD_barrier:
3269  assert(ClausesWithImplicit.empty() &&
3270  "No clauses are allowed for 'omp barrier' directive");
3271  assert(AStmt == nullptr &&
3272  "No associated statement allowed for 'omp barrier' directive");
3273  Res = ActOnOpenMPBarrierDirective(StartLoc, EndLoc);
3274  break;
3275  case OMPD_taskwait:
3276  assert(ClausesWithImplicit.empty() &&
3277  "No clauses are allowed for 'omp taskwait' directive");
3278  assert(AStmt == nullptr &&
3279  "No associated statement allowed for 'omp taskwait' directive");
3280  Res = ActOnOpenMPTaskwaitDirective(StartLoc, EndLoc);
3281  break;
3282  case OMPD_taskgroup:
3283  Res = ActOnOpenMPTaskgroupDirective(ClausesWithImplicit, AStmt, StartLoc,
3284  EndLoc);
3285  break;
3286  case OMPD_flush:
3287  assert(AStmt == nullptr &&
3288  "No associated statement allowed for 'omp flush' directive");
3289  Res = ActOnOpenMPFlushDirective(ClausesWithImplicit, StartLoc, EndLoc);
3290  break;
3291  case OMPD_ordered:
3292  Res = ActOnOpenMPOrderedDirective(ClausesWithImplicit, AStmt, StartLoc,
3293  EndLoc);
3294  break;
3295  case OMPD_atomic:
3296  Res = ActOnOpenMPAtomicDirective(ClausesWithImplicit, AStmt, StartLoc,
3297  EndLoc);
3298  break;
3299  case OMPD_teams:
3300  Res =
3301  ActOnOpenMPTeamsDirective(ClausesWithImplicit, AStmt, StartLoc, EndLoc);
3302  break;
3303  case OMPD_target:
3304  Res = ActOnOpenMPTargetDirective(ClausesWithImplicit, AStmt, StartLoc,
3305  EndLoc);
3306  AllowedNameModifiers.push_back(OMPD_target);
3307  break;
3308  case OMPD_target_parallel:
3309  Res = ActOnOpenMPTargetParallelDirective(ClausesWithImplicit, AStmt,
3310  StartLoc, EndLoc);
3311  AllowedNameModifiers.push_back(OMPD_target);
3312  AllowedNameModifiers.push_back(OMPD_parallel);
3313  break;
3314  case OMPD_target_parallel_for:
3315  Res = ActOnOpenMPTargetParallelForDirective(
3316  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
3317  AllowedNameModifiers.push_back(OMPD_target);
3318  AllowedNameModifiers.push_back(OMPD_parallel);
3319  break;
3320  case OMPD_cancellation_point:
3321  assert(ClausesWithImplicit.empty() &&
3322  "No clauses are allowed for 'omp cancellation point' directive");
3323  assert(AStmt == nullptr && "No associated statement allowed for 'omp "
3324  "cancellation point' directive");
3325  Res = ActOnOpenMPCancellationPointDirective(StartLoc, EndLoc, CancelRegion);
3326  break;
3327  case OMPD_cancel:
3328  assert(AStmt == nullptr &&
3329  "No associated statement allowed for 'omp cancel' directive");
3330  Res = ActOnOpenMPCancelDirective(ClausesWithImplicit, StartLoc, EndLoc,
3331  CancelRegion);
3332  AllowedNameModifiers.push_back(OMPD_cancel);
3333  break;
3334  case OMPD_target_data:
3335  Res = ActOnOpenMPTargetDataDirective(ClausesWithImplicit, AStmt, StartLoc,
3336  EndLoc);
3337  AllowedNameModifiers.push_back(OMPD_target_data);
3338  break;
3339  case OMPD_target_enter_data:
3340  Res = ActOnOpenMPTargetEnterDataDirective(ClausesWithImplicit, StartLoc,
3341  EndLoc, AStmt);
3342  AllowedNameModifiers.push_back(OMPD_target_enter_data);
3343  break;
3344  case OMPD_target_exit_data:
3345  Res = ActOnOpenMPTargetExitDataDirective(ClausesWithImplicit, StartLoc,
3346  EndLoc, AStmt);
3347  AllowedNameModifiers.push_back(OMPD_target_exit_data);
3348  break;
3349  case OMPD_taskloop:
3350  Res = ActOnOpenMPTaskLoopDirective(ClausesWithImplicit, AStmt, StartLoc,
3351  EndLoc, VarsWithInheritedDSA);
3352  AllowedNameModifiers.push_back(OMPD_taskloop);
3353  break;
3354  case OMPD_taskloop_simd:
3355  Res = ActOnOpenMPTaskLoopSimdDirective(ClausesWithImplicit, AStmt, StartLoc,
3356  EndLoc, VarsWithInheritedDSA);
3357  AllowedNameModifiers.push_back(OMPD_taskloop);
3358  break;
3359  case OMPD_distribute:
3360  Res = ActOnOpenMPDistributeDirective(ClausesWithImplicit, AStmt, StartLoc,
3361  EndLoc, VarsWithInheritedDSA);
3362  break;
3363  case OMPD_target_update:
3364  Res = ActOnOpenMPTargetUpdateDirective(ClausesWithImplicit, StartLoc,
3365  EndLoc, AStmt);
3366  AllowedNameModifiers.push_back(OMPD_target_update);
3367  break;
3368  case OMPD_distribute_parallel_for:
3369  Res = ActOnOpenMPDistributeParallelForDirective(
3370  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
3371  AllowedNameModifiers.push_back(OMPD_parallel);
3372  break;
3373  case OMPD_distribute_parallel_for_simd:
3374  Res = ActOnOpenMPDistributeParallelForSimdDirective(
3375  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
3376  AllowedNameModifiers.push_back(OMPD_parallel);
3377  break;
3378  case OMPD_distribute_simd:
3379  Res = ActOnOpenMPDistributeSimdDirective(
3380  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
3381  break;
3382  case OMPD_target_parallel_for_simd:
3383  Res = ActOnOpenMPTargetParallelForSimdDirective(
3384  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
3385  AllowedNameModifiers.push_back(OMPD_target);
3386  AllowedNameModifiers.push_back(OMPD_parallel);
3387  break;
3388  case OMPD_target_simd:
3389  Res = ActOnOpenMPTargetSimdDirective(ClausesWithImplicit, AStmt, StartLoc,
3390  EndLoc, VarsWithInheritedDSA);
3391  AllowedNameModifiers.push_back(OMPD_target);
3392  break;
3393  case OMPD_teams_distribute:
3394  Res = ActOnOpenMPTeamsDistributeDirective(
3395  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
3396  break;
3397  case OMPD_teams_distribute_simd:
3398  Res = ActOnOpenMPTeamsDistributeSimdDirective(
3399  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
3400  break;
3401  case OMPD_teams_distribute_parallel_for_simd:
3402  Res = ActOnOpenMPTeamsDistributeParallelForSimdDirective(
3403  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
3404  AllowedNameModifiers.push_back(OMPD_parallel);
3405  break;
3406  case OMPD_teams_distribute_parallel_for:
3407  Res = ActOnOpenMPTeamsDistributeParallelForDirective(
3408  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
3409  AllowedNameModifiers.push_back(OMPD_parallel);
3410  break;
3411  case OMPD_target_teams:
3412  Res = ActOnOpenMPTargetTeamsDirective(ClausesWithImplicit, AStmt, StartLoc,
3413  EndLoc);
3414  AllowedNameModifiers.push_back(OMPD_target);
3415  break;
3416  case OMPD_target_teams_distribute:
3417  Res = ActOnOpenMPTargetTeamsDistributeDirective(
3418  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
3419  AllowedNameModifiers.push_back(OMPD_target);
3420  break;
3421  case OMPD_target_teams_distribute_parallel_for:
3422  Res = ActOnOpenMPTargetTeamsDistributeParallelForDirective(
3423  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
3424  AllowedNameModifiers.push_back(OMPD_target);
3425  AllowedNameModifiers.push_back(OMPD_parallel);
3426  break;
3427  case OMPD_target_teams_distribute_parallel_for_simd:
3428  Res = ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective(
3429  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
3430  AllowedNameModifiers.push_back(OMPD_target);
3431  AllowedNameModifiers.push_back(OMPD_parallel);
3432  break;
3433  case OMPD_target_teams_distribute_simd:
3434  Res = ActOnOpenMPTargetTeamsDistributeSimdDirective(
3435  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
3436  AllowedNameModifiers.push_back(OMPD_target);
3437  break;
3438  case OMPD_declare_target:
3439  case OMPD_end_declare_target:
3440  case OMPD_threadprivate:
3441  case OMPD_declare_reduction:
3442  case OMPD_declare_simd:
3443  case OMPD_requires:
3444  llvm_unreachable("OpenMP Directive is not allowed");
3445  case OMPD_unknown:
3446  llvm_unreachable("Unknown OpenMP directive");
3447  }
3448 
3449  for (const auto &P : VarsWithInheritedDSA) {
3450  Diag(P.second->getExprLoc(), diag::err_omp_no_dsa_for_variable)
3451  << P.first << P.second->getSourceRange();
3452  }
3453  ErrorFound = !VarsWithInheritedDSA.empty() || ErrorFound;
3454 
3455  if (!AllowedNameModifiers.empty())
3456  ErrorFound = checkIfClauses(*this, Kind, Clauses, AllowedNameModifiers) ||
3457  ErrorFound;
3458 
3459  if (ErrorFound)
3460  return StmtError();
3461  return Res;
3462 }
3463 
3465  DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS, Expr *Simdlen,
3466  ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds,
3467  ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears,
3468  ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR) {
3469  assert(Aligneds.size() == Alignments.size());
3470  assert(Linears.size() == LinModifiers.size());
3471  assert(Linears.size() == Steps.size());
3472  if (!DG || DG.get().isNull())
3473  return DeclGroupPtrTy();
3474 
3475  if (!DG.get().isSingleDecl()) {
3476  Diag(SR.getBegin(), diag::err_omp_single_decl_in_declare_simd);
3477  return DG;
3478  }
3479  Decl *ADecl = DG.get().getSingleDecl();
3480  if (auto *FTD = dyn_cast<FunctionTemplateDecl>(ADecl))
3481  ADecl = FTD->getTemplatedDecl();
3482 
3483  auto *FD = dyn_cast<FunctionDecl>(ADecl);
3484  if (!FD) {
3485  Diag(ADecl->getLocation(), diag::err_omp_function_expected);
3486  return DeclGroupPtrTy();
3487  }
3488 
3489  // OpenMP [2.8.2, declare simd construct, Description]
3490  // The parameter of the simdlen clause must be a constant positive integer
3491  // expression.
3492  ExprResult SL;
3493  if (Simdlen)
3494  SL = VerifyPositiveIntegerConstantInClause(Simdlen, OMPC_simdlen);
3495  // OpenMP [2.8.2, declare simd construct, Description]
3496  // The special this pointer can be used as if was one of the arguments to the
3497  // function in any of the linear, aligned, or uniform clauses.
3498  // The uniform clause declares one or more arguments to have an invariant
3499  // value for all concurrent invocations of the function in the execution of a
3500  // single SIMD loop.
3501  llvm::DenseMap<const Decl *, const Expr *> UniformedArgs;
3502  const Expr *UniformedLinearThis = nullptr;
3503  for (const Expr *E : Uniforms) {
3504  E = E->IgnoreParenImpCasts();
3505  if (const auto *DRE = dyn_cast<DeclRefExpr>(E))
3506  if (const auto *PVD = dyn_cast<ParmVarDecl>(DRE->getDecl()))
3507  if (FD->getNumParams() > PVD->getFunctionScopeIndex() &&
3508  FD->getParamDecl(PVD->getFunctionScopeIndex())
3509  ->getCanonicalDecl() == PVD->getCanonicalDecl()) {
3510  UniformedArgs.try_emplace(PVD->getCanonicalDecl(), E);
3511  continue;
3512  }
3513  if (isa<CXXThisExpr>(E)) {
3514  UniformedLinearThis = E;
3515  continue;
3516  }
3517  Diag(E->getExprLoc(), diag::err_omp_param_or_this_in_clause)
3518  << FD->getDeclName() << (isa<CXXMethodDecl>(ADecl) ? 1 : 0);
3519  }
3520  // OpenMP [2.8.2, declare simd construct, Description]
3521  // The aligned clause declares that the object to which each list item points
3522  // is aligned to the number of bytes expressed in the optional parameter of
3523  // the aligned clause.
3524  // The special this pointer can be used as if was one of the arguments to the
3525  // function in any of the linear, aligned, or uniform clauses.
3526  // The type of list items appearing in the aligned clause must be array,
3527  // pointer, reference to array, or reference to pointer.
3528  llvm::DenseMap<const Decl *, const Expr *> AlignedArgs;
3529  const Expr *AlignedThis = nullptr;
3530  for (const Expr *E : Aligneds) {
3531  E = E->IgnoreParenImpCasts();
3532  if (const auto *DRE = dyn_cast<DeclRefExpr>(E))
3533  if (const auto *PVD = dyn_cast<ParmVarDecl>(DRE->getDecl())) {
3534  const VarDecl *CanonPVD = PVD->getCanonicalDecl();
3535  if (FD->getNumParams() > PVD->getFunctionScopeIndex() &&
3536  FD->getParamDecl(PVD->getFunctionScopeIndex())
3537  ->getCanonicalDecl() == CanonPVD) {
3538  // OpenMP [2.8.1, simd construct, Restrictions]
3539  // A list-item cannot appear in more than one aligned clause.
3540  if (AlignedArgs.count(CanonPVD) > 0) {
3541  Diag(E->getExprLoc(), diag::err_omp_aligned_twice)
3542  << 1 << E->getSourceRange();
3543  Diag(AlignedArgs[CanonPVD]->getExprLoc(),
3544  diag::note_omp_explicit_dsa)
3545  << getOpenMPClauseName(OMPC_aligned);
3546  continue;
3547  }
3548  AlignedArgs[CanonPVD] = E;
3549  QualType QTy = PVD->getType()
3550  .getNonReferenceType()
3551  .getUnqualifiedType()
3552  .getCanonicalType();
3553  const Type *Ty = QTy.getTypePtrOrNull();
3554  if (!Ty || (!Ty->isArrayType() && !Ty->isPointerType())) {
3555  Diag(E->getExprLoc(), diag::err_omp_aligned_expected_array_or_ptr)
3556  << QTy << getLangOpts().CPlusPlus << E->getSourceRange();
3557  Diag(PVD->getLocation(), diag::note_previous_decl) << PVD;
3558  }
3559  continue;
3560  }
3561  }
3562  if (isa<CXXThisExpr>(E)) {
3563  if (AlignedThis) {
3564  Diag(E->getExprLoc(), diag::err_omp_aligned_twice)
3565  << 2 << E->getSourceRange();
3566  Diag(AlignedThis->getExprLoc(), diag::note_omp_explicit_dsa)
3567  << getOpenMPClauseName(OMPC_aligned);
3568  }
3569  AlignedThis = E;
3570  continue;
3571  }
3572  Diag(E->getExprLoc(), diag::err_omp_param_or_this_in_clause)
3573  << FD->getDeclName() << (isa<CXXMethodDecl>(ADecl) ? 1 : 0);
3574  }
3575  // The optional parameter of the aligned clause, alignment, must be a constant
3576  // positive integer expression. If no optional parameter is specified,
3577  // implementation-defined default alignments for SIMD instructions on the
3578  // target platforms are assumed.
3579  SmallVector<const Expr *, 4> NewAligns;
3580  for (Expr *E : Alignments) {
3581  ExprResult Align;
3582  if (E)
3583  Align = VerifyPositiveIntegerConstantInClause(E, OMPC_aligned);
3584  NewAligns.push_back(Align.get());
3585  }
3586  // OpenMP [2.8.2, declare simd construct, Description]
3587  // The linear clause declares one or more list items to be private to a SIMD
3588  // lane and to have a linear relationship with respect to the iteration space
3589  // of a loop.
3590  // The special this pointer can be used as if was one of the arguments to the
3591  // function in any of the linear, aligned, or uniform clauses.
3592  // When a linear-step expression is specified in a linear clause it must be
3593  // either a constant integer expression or an integer-typed parameter that is
3594  // specified in a uniform clause on the directive.
3595  llvm::DenseMap<const Decl *, const Expr *> LinearArgs;
3596  const bool IsUniformedThis = UniformedLinearThis != nullptr;
3597  auto MI = LinModifiers.begin();
3598  for (const Expr *E : Linears) {
3599  auto LinKind = static_cast<OpenMPLinearClauseKind>(*MI);
3600  ++MI;
3601  E = E->IgnoreParenImpCasts();
3602  if (const auto *DRE = dyn_cast<DeclRefExpr>(E))
3603  if (const auto *PVD = dyn_cast<ParmVarDecl>(DRE->getDecl())) {
3604  const VarDecl *CanonPVD = PVD->getCanonicalDecl();
3605  if (FD->getNumParams() > PVD->getFunctionScopeIndex() &&
3606  FD->getParamDecl(PVD->getFunctionScopeIndex())
3607  ->getCanonicalDecl() == CanonPVD) {
3608  // OpenMP [2.15.3.7, linear Clause, Restrictions]
3609  // A list-item cannot appear in more than one linear clause.
3610  if (LinearArgs.count(CanonPVD) > 0) {
3611  Diag(E->getExprLoc(), diag::err_omp_wrong_dsa)
3612  << getOpenMPClauseName(OMPC_linear)
3613  << getOpenMPClauseName(OMPC_linear) << E->getSourceRange();
3614  Diag(LinearArgs[CanonPVD]->getExprLoc(),
3615  diag::note_omp_explicit_dsa)
3616  << getOpenMPClauseName(OMPC_linear);
3617  continue;
3618  }
3619  // Each argument can appear in at most one uniform or linear clause.
3620  if (UniformedArgs.count(CanonPVD) > 0) {
3621  Diag(E->getExprLoc(), diag::err_omp_wrong_dsa)
3622  << getOpenMPClauseName(OMPC_linear)
3624  Diag(UniformedArgs[CanonPVD]->getExprLoc(),
3625  diag::note_omp_explicit_dsa)
3627  continue;
3628  }
3629  LinearArgs[CanonPVD] = E;
3630  if (E->isValueDependent() || E->isTypeDependent() ||
3631  E->isInstantiationDependent() ||
3633  continue;
3634  (void)CheckOpenMPLinearDecl(CanonPVD, E->getExprLoc(), LinKind,
3635  PVD->getOriginalType());
3636  continue;
3637  }
3638  }
3639  if (isa<CXXThisExpr>(E)) {
3640  if (UniformedLinearThis) {
3641  Diag(E->getExprLoc(), diag::err_omp_wrong_dsa)
3642  << getOpenMPClauseName(OMPC_linear)
3643  << getOpenMPClauseName(IsUniformedThis ? OMPC_uniform : OMPC_linear)
3644  << E->getSourceRange();
3645  Diag(UniformedLinearThis->getExprLoc(), diag::note_omp_explicit_dsa)
3646  << getOpenMPClauseName(IsUniformedThis ? OMPC_uniform
3647  : OMPC_linear);
3648  continue;
3649  }
3650  UniformedLinearThis = E;
3651  if (E->isValueDependent() || E->isTypeDependent() ||
3653  continue;
3654  (void)CheckOpenMPLinearDecl(/*D=*/nullptr, E->getExprLoc(), LinKind,
3655  E->getType());
3656  continue;
3657  }
3658  Diag(E->getExprLoc(), diag::err_omp_param_or_this_in_clause)
3659  << FD->getDeclName() << (isa<CXXMethodDecl>(ADecl) ? 1 : 0);
3660  }
3661  Expr *Step = nullptr;
3662  Expr *NewStep = nullptr;
3663  SmallVector<Expr *, 4> NewSteps;
3664  for (Expr *E : Steps) {
3665  // Skip the same step expression, it was checked already.
3666  if (Step == E || !E) {
3667  NewSteps.push_back(E ? NewStep : nullptr);
3668  continue;
3669  }
3670  Step = E;
3671  if (const auto *DRE = dyn_cast<DeclRefExpr>(Step))
3672  if (const auto *PVD = dyn_cast<ParmVarDecl>(DRE->getDecl())) {
3673  const VarDecl *CanonPVD = PVD->getCanonicalDecl();
3674  if (UniformedArgs.count(CanonPVD) == 0) {
3675  Diag(Step->getExprLoc(), diag::err_omp_expected_uniform_param)
3676  << Step->getSourceRange();
3677  } else if (E->isValueDependent() || E->isTypeDependent() ||
3678  E->isInstantiationDependent() ||
3680  CanonPVD->getType()->hasIntegerRepresentation()) {
3681  NewSteps.push_back(Step);
3682  } else {
3683  Diag(Step->getExprLoc(), diag::err_omp_expected_int_param)
3684  << Step->getSourceRange();
3685  }
3686  continue;
3687  }
3688  NewStep = Step;
3689  if (Step && !Step->isValueDependent() && !Step->isTypeDependent() &&
3690  !Step->isInstantiationDependent() &&
3692  NewStep = PerformOpenMPImplicitIntegerConversion(Step->getExprLoc(), Step)
3693  .get();
3694  if (NewStep)
3695  NewStep = VerifyIntegerConstantExpression(NewStep).get();
3696  }
3697  NewSteps.push_back(NewStep);
3698  }
3699  auto *NewAttr = OMPDeclareSimdDeclAttr::CreateImplicit(
3700  Context, BS, SL.get(), const_cast<Expr **>(Uniforms.data()),
3701  Uniforms.size(), const_cast<Expr **>(Aligneds.data()), Aligneds.size(),
3702  const_cast<Expr **>(NewAligns.data()), NewAligns.size(),
3703  const_cast<Expr **>(Linears.data()), Linears.size(),
3704  const_cast<unsigned *>(LinModifiers.data()), LinModifiers.size(),
3705  NewSteps.data(), NewSteps.size(), SR);
3706  ADecl->addAttr(NewAttr);
3707  return ConvertDeclToDeclGroup(ADecl);
3708 }
3709 
3711  Stmt *AStmt,
3712  SourceLocation StartLoc,
3713  SourceLocation EndLoc) {
3714  if (!AStmt)
3715  return StmtError();
3716 
3717  auto *CS = cast<CapturedStmt>(AStmt);
3718  // 1.2.2 OpenMP Language Terminology
3719  // Structured block - An executable statement with a single entry at the
3720  // top and a single exit at the bottom.
3721  // The point of exit cannot be a branch out of the structured block.
3722  // longjmp() and throw() must not violate the entry/exit criteria.
3723  CS->getCapturedDecl()->setNothrow();
3724 
3725  setFunctionHasBranchProtectedScope();
3726 
3727  return OMPParallelDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt,
3728  DSAStack->isCancelRegion());
3729 }
3730 
3731 namespace {
3732 /// Helper class for checking canonical form of the OpenMP loops and
3733 /// extracting iteration space of each loop in the loop nest, that will be used
3734 /// for IR generation.
3735 class OpenMPIterationSpaceChecker {
3736  /// Reference to Sema.
3737  Sema &SemaRef;
3738  /// A location for diagnostics (when there is no some better location).
3739  SourceLocation DefaultLoc;
3740  /// A location for diagnostics (when increment is not compatible).
3741  SourceLocation ConditionLoc;
3742  /// A source location for referring to loop init later.
3743  SourceRange InitSrcRange;
3744  /// A source location for referring to condition later.
3745  SourceRange ConditionSrcRange;
3746  /// A source location for referring to increment later.
3747  SourceRange IncrementSrcRange;
3748  /// Loop variable.
3749  ValueDecl *LCDecl = nullptr;
3750  /// Reference to loop variable.
3751  Expr *LCRef = nullptr;
3752  /// Lower bound (initializer for the var).
3753  Expr *LB = nullptr;
3754  /// Upper bound.
3755  Expr *UB = nullptr;
3756  /// Loop step (increment).
3757  Expr *Step = nullptr;
3758  /// This flag is true when condition is one of:
3759  /// Var < UB
3760  /// Var <= UB
3761  /// UB > Var
3762  /// UB >= Var
3763  bool TestIsLessOp = false;
3764  /// This flag is true when condition is strict ( < or > ).
3765  bool TestIsStrictOp = false;
3766  /// This flag is true when step is subtracted on each iteration.
3767  bool SubtractStep = false;
3768 
3769 public:
3770  OpenMPIterationSpaceChecker(Sema &SemaRef, SourceLocation DefaultLoc)
3771  : SemaRef(SemaRef), DefaultLoc(DefaultLoc), ConditionLoc(DefaultLoc) {}
3772  /// Check init-expr for canonical loop form and save loop counter
3773  /// variable - #Var and its initialization value - #LB.
3774  bool checkAndSetInit(Stmt *S, bool EmitDiags = true);
3775  /// Check test-expr for canonical form, save upper-bound (#UB), flags
3776  /// for less/greater and for strict/non-strict comparison.
3777  bool checkAndSetCond(Expr *S);
3778  /// Check incr-expr for canonical loop form and return true if it
3779  /// does not conform, otherwise save loop step (#Step).
3780  bool checkAndSetInc(Expr *S);
3781  /// Return the loop counter variable.
3782  ValueDecl *getLoopDecl() const { return LCDecl; }
3783  /// Return the reference expression to loop counter variable.
3784  Expr *getLoopDeclRefExpr() const { return LCRef; }
3785  /// Source range of the loop init.
3786  SourceRange getInitSrcRange() const { return InitSrcRange; }
3787  /// Source range of the loop condition.
3788  SourceRange getConditionSrcRange() const { return ConditionSrcRange; }
3789  /// Source range of the loop increment.
3790  SourceRange getIncrementSrcRange() const { return IncrementSrcRange; }
3791  /// True if the step should be subtracted.
3792  bool shouldSubtractStep() const { return SubtractStep; }
3793  /// Build the expression to calculate the number of iterations.
3794  Expr *buildNumIterations(
3795  Scope *S, const bool LimitedType,
3796  llvm::MapVector<const Expr *, DeclRefExpr *> &Captures) const;
3797  /// Build the precondition expression for the loops.
3798  Expr *
3799  buildPreCond(Scope *S, Expr *Cond,
3800  llvm::MapVector<const Expr *, DeclRefExpr *> &Captures) const;
3801  /// Build reference expression to the counter be used for codegen.
3802  DeclRefExpr *
3803  buildCounterVar(llvm::MapVector<const Expr *, DeclRefExpr *> &Captures,
3804  DSAStackTy &DSA) const;
3805  /// Build reference expression to the private counter be used for
3806  /// codegen.
3807  Expr *buildPrivateCounterVar() const;
3808  /// Build initialization of the counter be used for codegen.
3809  Expr *buildCounterInit() const;
3810  /// Build step of the counter be used for codegen.
3811  Expr *buildCounterStep() const;
3812  /// Build loop data with counter value for depend clauses in ordered
3813  /// directives.
3814  Expr *
3815  buildOrderedLoopData(Scope *S, Expr *Counter,
3816  llvm::MapVector<const Expr *, DeclRefExpr *> &Captures,
3817  SourceLocation Loc, Expr *Inc = nullptr,
3818  OverloadedOperatorKind OOK = OO_Amp);
3819  /// Return true if any expression is dependent.
3820  bool dependent() const;
3821 
3822 private:
3823  /// Check the right-hand side of an assignment in the increment
3824  /// expression.
3825  bool checkAndSetIncRHS(Expr *RHS);
3826  /// Helper to set loop counter variable and its initializer.
3827  bool setLCDeclAndLB(ValueDecl *NewLCDecl, Expr *NewDeclRefExpr, Expr *NewLB);
3828  /// Helper to set upper bound.
3829  bool setUB(Expr *NewUB, bool LessOp, bool StrictOp, SourceRange SR,
3830  SourceLocation SL);
3831  /// Helper to set loop increment.
3832  bool setStep(Expr *NewStep, bool Subtract);
3833 };
3834 
3835 bool OpenMPIterationSpaceChecker::dependent() const {
3836  if (!LCDecl) {
3837  assert(!LB && !UB && !Step);
3838  return false;
3839  }
3840  return LCDecl->getType()->isDependentType() ||
3841  (LB && LB->isValueDependent()) || (UB && UB->isValueDependent()) ||
3842  (Step && Step->isValueDependent());
3843 }
3844 
3845 bool OpenMPIterationSpaceChecker::setLCDeclAndLB(ValueDecl *NewLCDecl,
3846  Expr *NewLCRefExpr,
3847  Expr *NewLB) {
3848  // State consistency checking to ensure correct usage.
3849  assert(LCDecl == nullptr && LB == nullptr && LCRef == nullptr &&
3850  UB == nullptr && Step == nullptr && !TestIsLessOp && !TestIsStrictOp);
3851  if (!NewLCDecl || !NewLB)
3852  return true;
3853  LCDecl = getCanonicalDecl(NewLCDecl);
3854  LCRef = NewLCRefExpr;
3855  if (auto *CE = dyn_cast_or_null<CXXConstructExpr>(NewLB))
3856  if (const CXXConstructorDecl *Ctor = CE->getConstructor())
3857  if ((Ctor->isCopyOrMoveConstructor() ||
3858  Ctor->isConvertingConstructor(/*AllowExplicit=*/false)) &&
3859  CE->getNumArgs() > 0 && CE->getArg(0) != nullptr)
3860  NewLB = CE->getArg(0)->IgnoreParenImpCasts();
3861  LB = NewLB;
3862  return false;
3863 }
3864 
3865 bool OpenMPIterationSpaceChecker::setUB(Expr *NewUB, bool LessOp, bool StrictOp,
3866  SourceRange SR, SourceLocation SL) {
3867  // State consistency checking to ensure correct usage.
3868  assert(LCDecl != nullptr && LB != nullptr && UB == nullptr &&
3869  Step == nullptr && !TestIsLessOp && !TestIsStrictOp);
3870  if (!NewUB)
3871  return true;
3872  UB = NewUB;
3873  TestIsLessOp = LessOp;
3874  TestIsStrictOp = StrictOp;
3875  ConditionSrcRange = SR;
3876  ConditionLoc = SL;
3877  return false;
3878 }
3879 
3880 bool OpenMPIterationSpaceChecker::setStep(Expr *NewStep, bool Subtract) {
3881  // State consistency checking to ensure correct usage.
3882  assert(LCDecl != nullptr && LB != nullptr && Step == nullptr);
3883  if (!NewStep)
3884  return true;
3885  if (!NewStep->isValueDependent()) {
3886  // Check that the step is integer expression.
3887  SourceLocation StepLoc = NewStep->getBeginLoc();
3889  StepLoc, getExprAsWritten(NewStep));
3890  if (Val.isInvalid())
3891  return true;
3892  NewStep = Val.get();
3893 
3894  // OpenMP [2.6, Canonical Loop Form, Restrictions]
3895  // If test-expr is of form var relational-op b and relational-op is < or
3896  // <= then incr-expr must cause var to increase on each iteration of the
3897  // loop. If test-expr is of form var relational-op b and relational-op is
3898  // > or >= then incr-expr must cause var to decrease on each iteration of
3899  // the loop.
3900  // If test-expr is of form b relational-op var and relational-op is < or
3901  // <= then incr-expr must cause var to decrease on each iteration of the
3902  // loop. If test-expr is of form b relational-op var and relational-op is
3903  // > or >= then incr-expr must cause var to increase on each iteration of
3904  // the loop.
3905  llvm::APSInt Result;
3906  bool IsConstant = NewStep->isIntegerConstantExpr(Result, SemaRef.Context);
3907  bool IsUnsigned = !NewStep->getType()->hasSignedIntegerRepresentation();
3908  bool IsConstNeg =
3909  IsConstant && Result.isSigned() && (Subtract != Result.isNegative());
3910  bool IsConstPos =
3911  IsConstant && Result.isSigned() && (Subtract == Result.isNegative());
3912  bool IsConstZero = IsConstant && !Result.getBoolValue();
3913  if (UB && (IsConstZero ||
3914  (TestIsLessOp ? (IsConstNeg || (IsUnsigned && Subtract))
3915  : (IsConstPos || (IsUnsigned && !Subtract))))) {
3916  SemaRef.Diag(NewStep->getExprLoc(),
3917  diag::err_omp_loop_incr_not_compatible)
3918  << LCDecl << TestIsLessOp << NewStep->getSourceRange();
3919  SemaRef.Diag(ConditionLoc,
3920  diag::note_omp_loop_cond_requres_compatible_incr)
3921  << TestIsLessOp << ConditionSrcRange;
3922  return true;
3923  }
3924  if (TestIsLessOp == Subtract) {
3925  NewStep =
3926  SemaRef.CreateBuiltinUnaryOp(NewStep->getExprLoc(), UO_Minus, NewStep)
3927  .get();
3928  Subtract = !Subtract;
3929  }
3930  }
3931 
3932  Step = NewStep;
3933  SubtractStep = Subtract;
3934  return false;
3935 }
3936 
3937 bool OpenMPIterationSpaceChecker::checkAndSetInit(Stmt *S, bool EmitDiags) {
3938  // Check init-expr for canonical loop form and save loop counter
3939  // variable - #Var and its initialization value - #LB.
3940  // OpenMP [2.6] Canonical loop form. init-expr may be one of the following:
3941  // var = lb
3942  // integer-type var = lb
3943  // random-access-iterator-type var = lb
3944  // pointer-type var = lb
3945  //
3946  if (!S) {
3947  if (EmitDiags) {
3948  SemaRef.Diag(DefaultLoc, diag::err_omp_loop_not_canonical_init);
3949  }
3950  return true;
3951  }
3952  if (auto *ExprTemp = dyn_cast<ExprWithCleanups>(S))
3953  if (!ExprTemp->cleanupsHaveSideEffects())
3954  S = ExprTemp->getSubExpr();
3955 
3956  InitSrcRange = S->getSourceRange();
3957  if (Expr *E = dyn_cast<Expr>(S))
3958  S = E->IgnoreParens();
3959  if (auto *BO = dyn_cast<BinaryOperator>(S)) {
3960  if (BO->getOpcode() == BO_Assign) {
3961  Expr *LHS = BO->getLHS()->IgnoreParens();
3962  if (auto *DRE = dyn_cast<DeclRefExpr>(LHS)) {
3963  if (auto *CED = dyn_cast<OMPCapturedExprDecl>(DRE->getDecl()))
3964  if (auto *ME = dyn_cast<MemberExpr>(getExprAsWritten(CED->getInit())))
3965  return setLCDeclAndLB(ME->getMemberDecl(), ME, BO->getRHS());
3966  return setLCDeclAndLB(DRE->getDecl(), DRE, BO->getRHS());
3967  }
3968  if (auto *ME = dyn_cast<MemberExpr>(LHS)) {
3969  if (ME->isArrow() &&
3970  isa<CXXThisExpr>(ME->getBase()->IgnoreParenImpCasts()))
3971  return setLCDeclAndLB(ME->getMemberDecl(), ME, BO->getRHS());
3972  }
3973  }
3974  } else if (auto *DS = dyn_cast<DeclStmt>(S)) {
3975  if (DS->isSingleDecl()) {
3976  if (auto *Var = dyn_cast_or_null<VarDecl>(DS->getSingleDecl())) {
3977  if (Var->hasInit() && !Var->getType()->isReferenceType()) {
3978  // Accept non-canonical init form here but emit ext. warning.
3979  if (Var->getInitStyle() != VarDecl::CInit && EmitDiags)
3980  SemaRef.Diag(S->getBeginLoc(),
3981  diag::ext_omp_loop_not_canonical_init)
3982  << S->getSourceRange();
3983  return setLCDeclAndLB(
3984  Var,
3985  buildDeclRefExpr(SemaRef, Var,
3986  Var->getType().getNonReferenceType(),
3987  DS->getBeginLoc()),
3988  Var->getInit());
3989  }
3990  }
3991  }
3992  } else if (auto *CE = dyn_cast<CXXOperatorCallExpr>(S)) {
3993  if (CE->getOperator() == OO_Equal) {
3994  Expr *LHS = CE->getArg(0);
3995  if (auto *DRE = dyn_cast<DeclRefExpr>(LHS)) {
3996  if (auto *CED = dyn_cast<OMPCapturedExprDecl>(DRE->getDecl()))
3997  if (auto *ME = dyn_cast<MemberExpr>(getExprAsWritten(CED->getInit())))
3998  return setLCDeclAndLB(ME->getMemberDecl(), ME, BO->getRHS());
3999  return setLCDeclAndLB(DRE->getDecl(), DRE, CE->getArg(1));
4000  }
4001  if (auto *ME = dyn_cast<MemberExpr>(LHS)) {
4002  if (ME->isArrow() &&
4003  isa<CXXThisExpr>(ME->getBase()->IgnoreParenImpCasts()))
4004  return setLCDeclAndLB(ME->getMemberDecl(), ME, BO->getRHS());
4005  }
4006  }
4007  }
4008 
4009  if (dependent() || SemaRef.CurContext->isDependentContext())
4010  return false;
4011  if (EmitDiags) {
4012  SemaRef.Diag(S->getBeginLoc(), diag::err_omp_loop_not_canonical_init)
4013  << S->getSourceRange();
4014  }
4015  return true;
4016 }
4017 
4018 /// Ignore parenthesizes, implicit casts, copy constructor and return the
4019 /// variable (which may be the loop variable) if possible.
4020 static const ValueDecl *getInitLCDecl(const Expr *E) {
4021  if (!E)
4022  return nullptr;
4023  E = getExprAsWritten(E);
4024  if (const auto *CE = dyn_cast_or_null<CXXConstructExpr>(E))
4025  if (const CXXConstructorDecl *Ctor = CE->getConstructor())
4026  if ((Ctor->isCopyOrMoveConstructor() ||
4027  Ctor->isConvertingConstructor(/*AllowExplicit=*/false)) &&
4028  CE->getNumArgs() > 0 && CE->getArg(0) != nullptr)
4029  E = CE->getArg(0)->IgnoreParenImpCasts();
4030  if (const auto *DRE = dyn_cast_or_null<DeclRefExpr>(E)) {
4031  if (const auto *VD = dyn_cast<VarDecl>(DRE->getDecl()))
4032  return getCanonicalDecl(VD);
4033  }
4034  if (const auto *ME = dyn_cast_or_null<MemberExpr>(E))
4035  if (ME->isArrow() && isa<CXXThisExpr>(ME->getBase()->IgnoreParenImpCasts()))
4036  return getCanonicalDecl(ME->getMemberDecl());
4037  return nullptr;
4038 }
4039 
4040 bool OpenMPIterationSpaceChecker::checkAndSetCond(Expr *S) {
4041  // Check test-expr for canonical form, save upper-bound UB, flags for
4042  // less/greater and for strict/non-strict comparison.
4043  // OpenMP [2.6] Canonical loop form. Test-expr may be one of the following:
4044  // var relational-op b
4045  // b relational-op var
4046  //
4047  if (!S) {
4048  SemaRef.Diag(DefaultLoc, diag::err_omp_loop_not_canonical_cond) << LCDecl;
4049  return true;
4050  }
4051  S = getExprAsWritten(S);
4052  SourceLocation CondLoc = S->getBeginLoc();
4053  if (auto *BO = dyn_cast<BinaryOperator>(S)) {
4054  if (BO->isRelationalOp()) {
4055  if (getInitLCDecl(BO->getLHS()) == LCDecl)
4056  return setUB(BO->getRHS(),
4057  (BO->getOpcode() == BO_LT || BO->getOpcode() == BO_LE),
4058  (BO->getOpcode() == BO_LT || BO->getOpcode() == BO_GT),
4059  BO->getSourceRange(), BO->getOperatorLoc());
4060  if (getInitLCDecl(BO->getRHS()) == LCDecl)
4061  return setUB(BO->getLHS(),
4062  (BO->getOpcode() == BO_GT || BO->getOpcode() == BO_GE),
4063  (BO->getOpcode() == BO_LT || BO->getOpcode() == BO_GT),
4064  BO->getSourceRange(), BO->getOperatorLoc());
4065  }
4066  } else if (auto *CE = dyn_cast<CXXOperatorCallExpr>(S)) {
4067  if (CE->getNumArgs() == 2) {
4068  auto Op = CE->getOperator();
4069  switch (Op) {
4070  case OO_Greater:
4071  case OO_GreaterEqual:
4072  case OO_Less:
4073  case OO_LessEqual:
4074  if (getInitLCDecl(CE->getArg(0)) == LCDecl)
4075  return setUB(CE->getArg(1), Op == OO_Less || Op == OO_LessEqual,
4076  Op == OO_Less || Op == OO_Greater, CE->getSourceRange(),
4077  CE->getOperatorLoc());
4078  if (getInitLCDecl(CE->getArg(1)) == LCDecl)
4079  return setUB(CE->getArg(0), Op == OO_Greater || Op == OO_GreaterEqual,
4080  Op == OO_Less || Op == OO_Greater, CE->getSourceRange(),
4081  CE->getOperatorLoc());
4082  break;
4083  default:
4084  break;
4085  }
4086  }
4087  }
4088  if (dependent() || SemaRef.CurContext->isDependentContext())
4089  return false;
4090  SemaRef.Diag(CondLoc, diag::err_omp_loop_not_canonical_cond)
4091  << S->getSourceRange() << LCDecl;
4092  return true;
4093 }
4094 
4095 bool OpenMPIterationSpaceChecker::checkAndSetIncRHS(Expr *RHS) {
4096  // RHS of canonical loop form increment can be:
4097  // var + incr
4098  // incr + var
4099  // var - incr
4100  //
4101  RHS = RHS->IgnoreParenImpCasts();
4102  if (auto *BO = dyn_cast<BinaryOperator>(RHS)) {
4103  if (BO->isAdditiveOp()) {
4104  bool IsAdd = BO->getOpcode() == BO_Add;
4105  if (getInitLCDecl(BO->getLHS()) == LCDecl)
4106  return setStep(BO->getRHS(), !IsAdd);
4107  if (IsAdd && getInitLCDecl(BO->getRHS()) == LCDecl)
4108  return setStep(BO->getLHS(), /*Subtract=*/false);
4109  }
4110  } else if (auto *CE = dyn_cast<CXXOperatorCallExpr>(RHS)) {
4111  bool IsAdd = CE->getOperator() == OO_Plus;
4112  if ((IsAdd || CE->getOperator() == OO_Minus) && CE->getNumArgs() == 2) {
4113  if (getInitLCDecl(CE->getArg(0)) == LCDecl)
4114  return setStep(CE->getArg(1), !IsAdd);
4115  if (IsAdd && getInitLCDecl(CE->getArg(1)) == LCDecl)
4116  return setStep(CE->getArg(0), /*Subtract=*/false);
4117  }
4118  }
4119  if (dependent() || SemaRef.CurContext->isDependentContext())
4120  return false;
4121  SemaRef.Diag(RHS->getBeginLoc(), diag::err_omp_loop_not_canonical_incr)
4122  << RHS->getSourceRange() << LCDecl;
4123  return true;
4124 }
4125 
4126 bool OpenMPIterationSpaceChecker::checkAndSetInc(Expr *S) {
4127  // Check incr-expr for canonical loop form and return true if it
4128  // does not conform.
4129  // OpenMP [2.6] Canonical loop form. Test-expr may be one of the following:
4130  // ++var
4131  // var++
4132  // --var
4133  // var--
4134  // var += incr
4135  // var -= incr
4136  // var = var + incr
4137  // var = incr + var
4138  // var = var - incr
4139  //
4140  if (!S) {
4141  SemaRef.Diag(DefaultLoc, diag::err_omp_loop_not_canonical_incr) << LCDecl;
4142  return true;
4143  }
4144  if (auto *ExprTemp = dyn_cast<ExprWithCleanups>(S))
4145  if (!ExprTemp->cleanupsHaveSideEffects())
4146  S = ExprTemp->getSubExpr();
4147 
4148  IncrementSrcRange = S->getSourceRange();
4149  S = S->IgnoreParens();
4150  if (auto *UO = dyn_cast<UnaryOperator>(S)) {
4151  if (UO->isIncrementDecrementOp() &&
4152  getInitLCDecl(UO->getSubExpr()) == LCDecl)
4153  return setStep(SemaRef
4154  .ActOnIntegerConstant(UO->getBeginLoc(),
4155  (UO->isDecrementOp() ? -1 : 1))
4156  .get(),
4157  /*Subtract=*/false);
4158  } else if (auto *BO = dyn_cast<BinaryOperator>(S)) {
4159  switch (BO->getOpcode()) {
4160  case BO_AddAssign:
4161  case BO_SubAssign:
4162  if (getInitLCDecl(BO->getLHS()) == LCDecl)
4163  return setStep(BO->getRHS(), BO->getOpcode() == BO_SubAssign);
4164  break;
4165  case BO_Assign:
4166  if (getInitLCDecl(BO->getLHS()) == LCDecl)
4167  return checkAndSetIncRHS(BO->getRHS());
4168  break;
4169  default:
4170  break;
4171  }
4172  } else if (auto *CE = dyn_cast<CXXOperatorCallExpr>(S)) {
4173  switch (CE->getOperator()) {
4174  case OO_PlusPlus:
4175  case OO_MinusMinus:
4176  if (getInitLCDecl(CE->getArg(0)) == LCDecl)
4177  return setStep(SemaRef
4178  .ActOnIntegerConstant(
4179  CE->getBeginLoc(),
4180  ((CE->getOperator() == OO_MinusMinus) ? -1 : 1))
4181  .get(),
4182  /*Subtract=*/false);
4183  break;
4184  case OO_PlusEqual:
4185  case OO_MinusEqual:
4186  if (getInitLCDecl(CE->getArg(0)) == LCDecl)
4187  return setStep(CE->getArg(1), CE->getOperator() == OO_MinusEqual);
4188  break;
4189  case OO_Equal:
4190  if (getInitLCDecl(CE->getArg(0)) == LCDecl)
4191  return checkAndSetIncRHS(CE->getArg(1));
4192  break;
4193  default:
4194  break;
4195  }
4196  }
4197  if (dependent() || SemaRef.CurContext->isDependentContext())
4198  return false;
4199  SemaRef.Diag(S->getBeginLoc(), diag::err_omp_loop_not_canonical_incr)
4200  << S->getSourceRange() << LCDecl;
4201  return true;
4202 }
4203 
4204 static ExprResult
4205 tryBuildCapture(Sema &SemaRef, Expr *Capture,
4206  llvm::MapVector<const Expr *, DeclRefExpr *> &Captures) {
4207  if (SemaRef.CurContext->isDependentContext())
4208  return ExprResult(Capture);
4209  if (Capture->isEvaluatable(SemaRef.Context, Expr::SE_AllowSideEffects))
4210  return SemaRef.PerformImplicitConversion(
4211  Capture->IgnoreImpCasts(), Capture->getType(), Sema::AA_Converting,
4212  /*AllowExplicit=*/true);
4213  auto I = Captures.find(Capture);
4214  if (I != Captures.end())
4215  return buildCapture(SemaRef, Capture, I->second);
4216  DeclRefExpr *Ref = nullptr;
4217  ExprResult Res = buildCapture(SemaRef, Capture, Ref);
4218  Captures[Capture] = Ref;
4219  return Res;
4220 }
4221 
4222 /// Build the expression to calculate the number of iterations.
4223 Expr *OpenMPIterationSpaceChecker::buildNumIterations(
4224  Scope *S, const bool LimitedType,
4225  llvm::MapVector<const Expr *, DeclRefExpr *> &Captures) const {
4226  ExprResult Diff;
4227  QualType VarType = LCDecl->getType().getNonReferenceType();
4228  if (VarType->isIntegerType() || VarType->isPointerType() ||
4229  SemaRef.getLangOpts().CPlusPlus) {
4230  // Upper - Lower
4231  Expr *UBExpr = TestIsLessOp ? UB : LB;
4232  Expr *LBExpr = TestIsLessOp ? LB : UB;
4233  Expr *Upper = tryBuildCapture(SemaRef, UBExpr, Captures).get();
4234  Expr *Lower = tryBuildCapture(SemaRef, LBExpr, Captures).get();
4235  if (!Upper || !Lower)
4236  return nullptr;
4237 
4238  Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Sub, Upper, Lower);
4239 
4240  if (!Diff.isUsable() && VarType->getAsCXXRecordDecl()) {
4241  // BuildBinOp already emitted error, this one is to point user to upper
4242  // and lower bound, and to tell what is passed to 'operator-'.
4243  SemaRef.Diag(Upper->getBeginLoc(), diag::err_omp_loop_diff_cxx)
4244  << Upper->getSourceRange() << Lower->getSourceRange();
4245  return nullptr;
4246  }
4247  }
4248 
4249  if (!Diff.isUsable())
4250  return nullptr;
4251 
4252  // Upper - Lower [- 1]
4253  if (TestIsStrictOp)
4254  Diff = SemaRef.BuildBinOp(
4255  S, DefaultLoc, BO_Sub, Diff.get(),
4256  SemaRef.ActOnIntegerConstant(SourceLocation(), 1).get());
4257  if (!Diff.isUsable())
4258  return nullptr;
4259 
4260  // Upper - Lower [- 1] + Step
4261  ExprResult NewStep = tryBuildCapture(SemaRef, Step, Captures);
4262  if (!NewStep.isUsable())
4263  return nullptr;
4264  Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Add, Diff.get(), NewStep.get());
4265  if (!Diff.isUsable())
4266  return nullptr;
4267 
4268  // Parentheses (for dumping/debugging purposes only).
4269  Diff = SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, Diff.get());
4270  if (!Diff.isUsable())
4271  return nullptr;
4272 
4273  // (Upper - Lower [- 1] + Step) / Step
4274  Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Div, Diff.get(), NewStep.get());
4275  if (!Diff.isUsable())
4276  return nullptr;
4277 
4278  // OpenMP runtime requires 32-bit or 64-bit loop variables.
4279  QualType Type = Diff.get()->getType();
4280  ASTContext &C = SemaRef.Context;
4281  bool UseVarType = VarType->hasIntegerRepresentation() &&
4282  C.getTypeSize(Type) > C.getTypeSize(VarType);
4283  if (!Type->isIntegerType() || UseVarType) {
4284  unsigned NewSize =
4285  UseVarType ? C.getTypeSize(VarType) : C.getTypeSize(Type);
4286  bool IsSigned = UseVarType ? VarType->hasSignedIntegerRepresentation()
4288  Type = C.getIntTypeForBitwidth(NewSize, IsSigned);
4289  if (!SemaRef.Context.hasSameType(Diff.get()->getType(), Type)) {
4290  Diff = SemaRef.PerformImplicitConversion(
4291  Diff.get(), Type, Sema::AA_Converting, /*AllowExplicit=*/true);
4292  if (!Diff.isUsable())
4293  return nullptr;
4294  }
4295  }
4296  if (LimitedType) {
4297  unsigned NewSize = (C.getTypeSize(Type) > 32) ? 64 : 32;
4298  if (NewSize != C.getTypeSize(Type)) {
4299  if (NewSize < C.getTypeSize(Type)) {
4300  assert(NewSize == 64 && "incorrect loop var size");
4301  SemaRef.Diag(DefaultLoc, diag::warn_omp_loop_64_bit_var)
4302  << InitSrcRange << ConditionSrcRange;
4303  }
4304  QualType NewType = C.getIntTypeForBitwidth(
4305  NewSize, Type->hasSignedIntegerRepresentation() ||
4306  C.getTypeSize(Type) < NewSize);
4307  if (!SemaRef.Context.hasSameType(Diff.get()->getType(), NewType)) {
4308  Diff = SemaRef.PerformImplicitConversion(Diff.get(), NewType,
4309  Sema::AA_Converting, true);
4310  if (!Diff.isUsable())
4311  return nullptr;
4312  }
4313  }
4314  }
4315 
4316  return Diff.get();
4317 }
4318 
4319 Expr *OpenMPIterationSpaceChecker::buildPreCond(
4320  Scope *S, Expr *Cond,
4321  llvm::MapVector<const Expr *, DeclRefExpr *> &Captures) const {
4322  // Try to build LB <op> UB, where <op> is <, >, <=, or >=.
4323  bool Suppress = SemaRef.getDiagnostics().getSuppressAllDiagnostics();
4324  SemaRef.getDiagnostics().setSuppressAllDiagnostics(/*Val=*/true);
4325 
4326  ExprResult NewLB = tryBuildCapture(SemaRef, LB, Captures);
4327  ExprResult NewUB = tryBuildCapture(SemaRef, UB, Captures);
4328  if (!NewLB.isUsable() || !NewUB.isUsable())
4329  return nullptr;
4330 
4331  ExprResult CondExpr =
4332  SemaRef.BuildBinOp(S, DefaultLoc,
4333  TestIsLessOp ? (TestIsStrictOp ? BO_LT : BO_LE)
4334  : (TestIsStrictOp ? BO_GT : BO_GE),
4335  NewLB.get(), NewUB.get());
4336  if (CondExpr.isUsable()) {
4337  if (!SemaRef.Context.hasSameUnqualifiedType(CondExpr.get()->getType(),
4338  SemaRef.Context.BoolTy))
4339  CondExpr = SemaRef.PerformImplicitConversion(
4340  CondExpr.get(), SemaRef.Context.BoolTy, /*Action=*/Sema::AA_Casting,
4341  /*AllowExplicit=*/true);
4342  }
4343  SemaRef.getDiagnostics().setSuppressAllDiagnostics(Suppress);
4344  // Otherwise use original loop conditon and evaluate it in runtime.
4345  return CondExpr.isUsable() ? CondExpr.get() : Cond;
4346 }
4347 
4348 /// Build reference expression to the counter be used for codegen.
4349 DeclRefExpr *OpenMPIterationSpaceChecker::buildCounterVar(
4350  llvm::MapVector<const Expr *, DeclRefExpr *> &Captures,
4351  DSAStackTy &DSA) const {
4352  auto *VD = dyn_cast<VarDecl>(LCDecl);
4353  if (!VD) {
4354  VD = SemaRef.isOpenMPCapturedDecl(LCDecl);
4356  SemaRef, VD, VD->getType().getNonReferenceType(), DefaultLoc);
4357  const DSAStackTy::DSAVarData Data =
4358  DSA.getTopDSA(LCDecl, /*FromParent=*/false);
4359  // If the loop control decl is explicitly marked as private, do not mark it
4360  // as captured again.
4361  if (!isOpenMPPrivate(Data.CKind) || !Data.RefExpr)
4362  Captures.insert(std::make_pair(LCRef, Ref));
4363  return Ref;
4364  }
4365  return buildDeclRefExpr(SemaRef, VD, VD->getType().getNonReferenceType(),
4366  DefaultLoc);
4367 }
4368 
4369 Expr *OpenMPIterationSpaceChecker::buildPrivateCounterVar() const {
4370  if (LCDecl && !LCDecl->isInvalidDecl()) {
4371  QualType Type = LCDecl->getType().getNonReferenceType();
4372  VarDecl *PrivateVar = buildVarDecl(
4373  SemaRef, DefaultLoc, Type, LCDecl->getName(),
4374  LCDecl->hasAttrs() ? &LCDecl->getAttrs() : nullptr,
4375  isa<VarDecl>(LCDecl)
4376  ? buildDeclRefExpr(SemaRef, cast<VarDecl>(LCDecl), Type, DefaultLoc)
4377  : nullptr);
4378  if (PrivateVar->isInvalidDecl())
4379  return nullptr;
4380  return buildDeclRefExpr(SemaRef, PrivateVar, Type, DefaultLoc);
4381  }
4382  return nullptr;
4383 }
4384 
4385 /// Build initialization of the counter to be used for codegen.
4387 
4388 /// Build step of the counter be used for codegen.
4389 Expr *OpenMPIterationSpaceChecker::buildCounterStep() const { return Step; }
4390 
4391 Expr *OpenMPIterationSpaceChecker::buildOrderedLoopData(
4392  Scope *S, Expr *Counter,
4393  llvm::MapVector<const Expr *, DeclRefExpr *> &Captures, SourceLocation Loc,
4394  Expr *Inc, OverloadedOperatorKind OOK) {
4395  Expr *Cnt = SemaRef.DefaultLvalueConversion(Counter).get();
4396  if (!Cnt)
4397  return nullptr;
4398  if (Inc) {
4399  assert((OOK == OO_Plus || OOK == OO_Minus) &&
4400  "Expected only + or - operations for depend clauses.");
4401  BinaryOperatorKind BOK = (OOK == OO_Plus) ? BO_Add : BO_Sub;
4402  Cnt = SemaRef.BuildBinOp(S, Loc, BOK, Cnt, Inc).get();
4403  if (!Cnt)
4404  return nullptr;
4405  }
4406  ExprResult Diff;
4407  QualType VarType = LCDecl->getType().getNonReferenceType();
4408  if (VarType->isIntegerType() || VarType->isPointerType() ||
4409  SemaRef.getLangOpts().CPlusPlus) {
4410  // Upper - Lower
4411  Expr *Upper =
4412  TestIsLessOp ? Cnt : tryBuildCapture(SemaRef, UB, Captures).get();
4413  Expr *Lower =
4414  TestIsLessOp ? tryBuildCapture(SemaRef, LB, Captures).get() : Cnt;
4415  if (!Upper || !Lower)
4416  return nullptr;
4417 
4418  Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Sub, Upper, Lower);
4419 
4420  if (!Diff.isUsable() && VarType->getAsCXXRecordDecl()) {
4421  // BuildBinOp already emitted error, this one is to point user to upper
4422  // and lower bound, and to tell what is passed to 'operator-'.
4423  SemaRef.Diag(Upper->getBeginLoc(), diag::err_omp_loop_diff_cxx)
4424  << Upper->getSourceRange() << Lower->getSourceRange();
4425  return nullptr;
4426  }
4427  }
4428 
4429  if (!Diff.isUsable())
4430  return nullptr;
4431 
4432  // Parentheses (for dumping/debugging purposes only).
4433  Diff = SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, Diff.get());
4434  if (!Diff.isUsable())
4435  return nullptr;
4436 
4437  ExprResult NewStep = tryBuildCapture(SemaRef, Step, Captures);
4438  if (!NewStep.isUsable())
4439  return nullptr;
4440  // (Upper - Lower) / Step
4441  Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Div, Diff.get(), NewStep.get());
4442  if (!Diff.isUsable())
4443  return nullptr;
4444 
4445  return Diff.get();
4446 }
4447 
4448 /// Iteration space of a single for loop.
4449 struct LoopIterationSpace final {
4450  /// Condition of the loop.
4451  Expr *PreCond = nullptr;
4452  /// This expression calculates the number of iterations in the loop.
4453  /// It is always possible to calculate it before starting the loop.
4454  Expr *NumIterations = nullptr;
4455  /// The loop counter variable.
4456  Expr *CounterVar = nullptr;
4457  /// Private loop counter variable.
4458  Expr *PrivateCounterVar = nullptr;
4459  /// This is initializer for the initial value of #CounterVar.
4460  Expr *CounterInit = nullptr;
4461  /// This is step for the #CounterVar used to generate its update:
4462  /// #CounterVar = #CounterInit + #CounterStep * CurrentIteration.
4463  Expr *CounterStep = nullptr;
4464  /// Should step be subtracted?
4465  bool Subtract = false;
4466  /// Source range of the loop init.
4467  SourceRange InitSrcRange;
4468  /// Source range of the loop condition.
4469  SourceRange CondSrcRange;
4470  /// Source range of the loop increment.
4471  SourceRange IncSrcRange;
4472 };
4473 
4474 } // namespace
4475 
4477  assert(getLangOpts().OpenMP && "OpenMP is not active.");
4478  assert(Init && "Expected loop in canonical form.");
4479  unsigned AssociatedLoops = DSAStack->getAssociatedLoops();
4480  if (AssociatedLoops > 0 &&
4481  isOpenMPLoopDirective(DSAStack->getCurrentDirective())) {
4482  OpenMPIterationSpaceChecker ISC(*this, ForLoc);
4483  if (!ISC.checkAndSetInit(Init, /*EmitDiags=*/false)) {
4484  if (ValueDecl *D = ISC.getLoopDecl()) {
4485  auto *VD = dyn_cast<VarDecl>(D);
4486  if (!VD) {
4487  if (VarDecl *Private = isOpenMPCapturedDecl(D)) {
4488  VD = Private;
4489  } else {
4490  DeclRefExpr *Ref = buildCapture(*this, D, ISC.getLoopDeclRefExpr(),
4491  /*WithInit=*/false);
4492  VD = cast<VarDecl>(Ref->getDecl());
4493  }
4494  }
4495  DSAStack->addLoopControlVariable(D, VD);
4496  }
4497  }
4498  DSAStack->setAssociatedLoops(AssociatedLoops - 1);
4499  }
4500 }
4501 
4502 /// Called on a for stmt to check and extract its iteration space
4503 /// for further processing (such as collapsing).
4505  OpenMPDirectiveKind DKind, Stmt *S, Sema &SemaRef, DSAStackTy &DSA,
4506  unsigned CurrentNestedLoopCount, unsigned NestedLoopCount,
4507  unsigned TotalNestedLoopCount, Expr *CollapseLoopCountExpr,
4508  Expr *OrderedLoopCountExpr,
4509  Sema::VarsWithInheritedDSAType &VarsWithImplicitDSA,
4510  LoopIterationSpace &ResultIterSpace,
4511  llvm::MapVector<const Expr *, DeclRefExpr *> &Captures) {
4512  // OpenMP [2.6, Canonical Loop Form]
4513  // for (init-expr; test-expr; incr-expr) structured-block
4514  auto *For = dyn_cast_or_null<ForStmt>(S);
4515  if (!For) {
4516  SemaRef.Diag(S->getBeginLoc(), diag::err_omp_not_for)
4517  << (CollapseLoopCountExpr != nullptr || OrderedLoopCountExpr != nullptr)
4518  << getOpenMPDirectiveName(DKind) << TotalNestedLoopCount
4519  << (CurrentNestedLoopCount > 0) << CurrentNestedLoopCount;
4520  if (TotalNestedLoopCount > 1) {
4521  if (CollapseLoopCountExpr && OrderedLoopCountExpr)
4522  SemaRef.Diag(DSA.getConstructLoc(),
4523  diag::note_omp_collapse_ordered_expr)
4524  << 2 << CollapseLoopCountExpr->getSourceRange()
4525  << OrderedLoopCountExpr->getSourceRange();
4526  else if (CollapseLoopCountExpr)
4527  SemaRef.Diag(CollapseLoopCountExpr->getExprLoc(),
4528  diag::note_omp_collapse_ordered_expr)
4529  << 0 << CollapseLoopCountExpr->getSourceRange();
4530  else
4531  SemaRef.Diag(OrderedLoopCountExpr->getExprLoc(),
4532  diag::note_omp_collapse_ordered_expr)
4533  << 1 << OrderedLoopCountExpr->getSourceRange();
4534  }
4535  return true;
4536  }
4537  assert(For->getBody());
4538 
4539  OpenMPIterationSpaceChecker ISC(SemaRef, For->getForLoc());
4540 
4541  // Check init.
4542  Stmt *Init = For->getInit();
4543  if (ISC.checkAndSetInit(Init))
4544  return true;
4545 
4546  bool HasErrors = false;
4547 
4548  // Check loop variable's type.
4549  if (ValueDecl *LCDecl = ISC.getLoopDecl()) {
4550  Expr *LoopDeclRefExpr = ISC.getLoopDeclRefExpr();
4551 
4552  // OpenMP [2.6, Canonical Loop Form]
4553  // Var is one of the following:
4554  // A variable of signed or unsigned integer type.
4555  // For C++, a variable of a random access iterator type.
4556  // For C, a variable of a pointer type.
4557  QualType VarType = LCDecl->getType().getNonReferenceType();
4558  if (!VarType->isDependentType() && !VarType->isIntegerType() &&
4559  !VarType->isPointerType() &&
4560  !(SemaRef.getLangOpts().CPlusPlus && VarType->isOverloadableType())) {
4561  SemaRef.Diag(Init->getBeginLoc(), diag::err_omp_loop_variable_type)
4562  << SemaRef.getLangOpts().CPlusPlus;
4563  HasErrors = true;
4564  }
4565 
4566  // OpenMP, 2.14.1.1 Data-sharing Attribute Rules for Variables Referenced in
4567  // a Construct
4568  // The loop iteration variable(s) in the associated for-loop(s) of a for or
4569  // parallel for construct is (are) private.
4570  // The loop iteration variable in the associated for-loop of a simd
4571  // construct with just one associated for-loop is linear with a
4572  // constant-linear-step that is the increment of the associated for-loop.
4573  // Exclude loop var from the list of variables with implicitly defined data
4574  // sharing attributes.
4575  VarsWithImplicitDSA.erase(LCDecl);
4576 
4577  // OpenMP [2.14.1.1, Data-sharing Attribute Rules for Variables Referenced
4578  // in a Construct, C/C++].
4579  // The loop iteration variable in the associated for-loop of a simd
4580  // construct with just one associated for-loop may be listed in a linear
4581  // clause with a constant-linear-step that is the increment of the
4582  // associated for-loop.
4583  // The loop iteration variable(s) in the associated for-loop(s) of a for or
4584  // parallel for construct may be listed in a private or lastprivate clause.
4585  DSAStackTy::DSAVarData DVar = DSA.getTopDSA(LCDecl, false);
4586  // If LoopVarRefExpr is nullptr it means the corresponding loop variable is
4587  // declared in the loop and it is predetermined as a private.
4588  OpenMPClauseKind PredeterminedCKind =
4589  isOpenMPSimdDirective(DKind)
4590  ? ((NestedLoopCount == 1) ? OMPC_linear : OMPC_lastprivate)
4591  : OMPC_private;
4592  if (((isOpenMPSimdDirective(DKind) && DVar.CKind != OMPC_unknown &&
4593  DVar.CKind != PredeterminedCKind) ||
4594  ((isOpenMPWorksharingDirective(DKind) || DKind == OMPD_taskloop ||
4595  isOpenMPDistributeDirective(DKind)) &&
4596  !isOpenMPSimdDirective(DKind) && DVar.CKind != OMPC_unknown &&
4597  DVar.CKind != OMPC_private && DVar.CKind != OMPC_lastprivate)) &&
4598  (DVar.CKind != OMPC_private || DVar.RefExpr != nullptr)) {
4599  SemaRef.Diag(Init->getBeginLoc(), diag::err_omp_loop_var_dsa)
4600  << getOpenMPClauseName(DVar.CKind) << getOpenMPDirectiveName(DKind)
4601  << getOpenMPClauseName(PredeterminedCKind);
4602  if (DVar.RefExpr == nullptr)
4603  DVar.CKind = PredeterminedCKind;
4604  reportOriginalDsa(SemaRef, &DSA, LCDecl, DVar, /*IsLoopIterVar=*/true);
4605  HasErrors = true;
4606  } else if (LoopDeclRefExpr != nullptr) {
4607  // Make the loop iteration variable private (for worksharing constructs),
4608  // linear (for simd directives with the only one associated loop) or
4609  // lastprivate (for simd directives with several collapsed or ordered
4610  // loops).
4611  if (DVar.CKind == OMPC_unknown)
4612  DVar = DSA.hasDSA(LCDecl, isOpenMPPrivate,
4613  [](OpenMPDirectiveKind) -> bool { return true; },
4614  /*FromParent=*/false);
4615  DSA.addDSA(LCDecl, LoopDeclRefExpr, PredeterminedCKind);
4616  }
4617 
4618  assert(isOpenMPLoopDirective(DKind) && "DSA for non-loop vars");
4619 
4620  // Check test-expr.
4621  HasErrors |= ISC.checkAndSetCond(For->getCond());
4622 
4623  // Check incr-expr.
4624  HasErrors |= ISC.checkAndSetInc(For->getInc());
4625  }
4626 
4627  if (ISC.dependent() || SemaRef.CurContext->isDependentContext() || HasErrors)
4628  return HasErrors;
4629 
4630  // Build the loop's iteration space representation.
4631  ResultIterSpace.PreCond =
4632  ISC.buildPreCond(DSA.getCurScope(), For->getCond(), Captures);
4633  ResultIterSpace.NumIterations = ISC.buildNumIterations(
4634  DSA.getCurScope(),
4635  (isOpenMPWorksharingDirective(DKind) ||
4637  Captures);
4638  ResultIterSpace.CounterVar = ISC.buildCounterVar(Captures, DSA);
4639  ResultIterSpace.PrivateCounterVar = ISC.buildPrivateCounterVar();
4640  ResultIterSpace.CounterInit = ISC.buildCounterInit();
4641  ResultIterSpace.CounterStep = ISC.buildCounterStep();
4642  ResultIterSpace.InitSrcRange = ISC.getInitSrcRange();
4643  ResultIterSpace.CondSrcRange = ISC.getConditionSrcRange();
4644  ResultIterSpace.IncSrcRange = ISC.getIncrementSrcRange();
4645  ResultIterSpace.Subtract = ISC.shouldSubtractStep();
4646 
4647  HasErrors |= (ResultIterSpace.PreCond == nullptr ||
4648  ResultIterSpace.NumIterations == nullptr ||
4649  ResultIterSpace.CounterVar == nullptr ||
4650  ResultIterSpace.PrivateCounterVar == nullptr ||
4651  ResultIterSpace.CounterInit == nullptr ||
4652  ResultIterSpace.CounterStep == nullptr);
4653  if (!HasErrors && DSA.isOrderedRegion()) {
4654  if (DSA.getOrderedRegionParam().second->getNumForLoops()) {
4655  if (CurrentNestedLoopCount <
4656  DSA.getOrderedRegionParam().second->getLoopNumIterations().size()) {
4657  DSA.getOrderedRegionParam().second->setLoopNumIterations(
4658  CurrentNestedLoopCount, ResultIterSpace.NumIterations);
4659  DSA.getOrderedRegionParam().second->setLoopCounter(
4660  CurrentNestedLoopCount, ResultIterSpace.CounterVar);
4661  }
4662  }
4663  for (auto &Pair : DSA.getDoacrossDependClauses()) {
4664  if (CurrentNestedLoopCount >= Pair.first->getNumLoops()) {
4665  // Erroneous case - clause has some problems.
4666  continue;
4667  }
4668  if (Pair.first->getDependencyKind() == OMPC_DEPEND_sink &&
4669  Pair.second.size() <= CurrentNestedLoopCount) {
4670  // Erroneous case - clause has some problems.
4671  Pair.first->setLoopData(CurrentNestedLoopCount, nullptr);
4672  continue;
4673  }
4674  Expr *CntValue;
4675  if (Pair.first->getDependencyKind() == OMPC_DEPEND_source)
4676  CntValue = ISC.buildOrderedLoopData(
4677  DSA.getCurScope(), ResultIterSpace.CounterVar, Captures,
4678  Pair.first->getDependencyLoc());
4679  else
4680  CntValue = ISC.buildOrderedLoopData(
4681  DSA.getCurScope(), ResultIterSpace.CounterVar, Captures,
4682  Pair.first->getDependencyLoc(),
4683  Pair.second[CurrentNestedLoopCount].first,
4684  Pair.second[CurrentNestedLoopCount].second);
4685  Pair.first->setLoopData(CurrentNestedLoopCount, CntValue);
4686  }
4687  }
4688 
4689  return HasErrors;
4690 }
4691 
4692 /// Build 'VarRef = Start.
4693 static ExprResult
4695  ExprResult Start,
4696  llvm::MapVector<const Expr *, DeclRefExpr *> &Captures) {
4697  // Build 'VarRef = Start.
4698  ExprResult NewStart = tryBuildCapture(SemaRef, Start.get(), Captures);
4699  if (!NewStart.isUsable())
4700  return ExprError();
4701  if (!SemaRef.Context.hasSameType(NewStart.get()->getType(),
4702  VarRef.get()->getType())) {
4703  NewStart = SemaRef.PerformImplicitConversion(
4704  NewStart.get(), VarRef.get()->getType(), Sema::AA_Converting,
4705  /*AllowExplicit=*/true);
4706  if (!NewStart.isUsable())
4707  return ExprError();
4708  }
4709 
4710  ExprResult Init =
4711  SemaRef.BuildBinOp(S, Loc, BO_Assign, VarRef.get(), NewStart.get());
4712  return Init;
4713 }
4714 
4715 /// Build 'VarRef = Start + Iter * Step'.
4717  Sema &SemaRef, Scope *S, SourceLocation Loc, ExprResult VarRef,
4718  ExprResult Start, ExprResult Iter, ExprResult Step, bool Subtract,
4719  llvm::MapVector<const Expr *, DeclRefExpr *> *Captures = nullptr) {
4720  // Add parentheses (for debugging purposes only).
4721  Iter = SemaRef.ActOnParenExpr(Loc, Loc, Iter.get());
4722  if (!VarRef.isUsable() || !Start.isUsable() || !Iter.isUsable() ||
4723  !Step.isUsable())
4724  return ExprError();
4725 
4726  ExprResult NewStep = Step;
4727  if (Captures)
4728  NewStep = tryBuildCapture(SemaRef, Step.get(), *Captures);
4729  if (NewStep.isInvalid())
4730  return ExprError();
4731  ExprResult Update =
4732  SemaRef.BuildBinOp(S, Loc, BO_Mul, Iter.get(), NewStep.get());
4733  if (!Update.isUsable())
4734  return ExprError();
4735 
4736  // Try to build 'VarRef = Start, VarRef (+|-)= Iter * Step' or
4737  // 'VarRef = Start (+|-) Iter * Step'.
4738  ExprResult NewStart = Start;
4739  if (Captures)
4740  NewStart = tryBuildCapture(SemaRef, Start.get(), *Captures);
4741  if (NewStart.isInvalid())
4742  return ExprError();
4743 
4744  // First attempt: try to build 'VarRef = Start, VarRef += Iter * Step'.
4745  ExprResult SavedUpdate = Update;
4746  ExprResult UpdateVal;
4747  if (VarRef.get()->getType()->isOverloadableType() ||
4748  NewStart.get()->getType()->isOverloadableType() ||
4749  Update.get()->getType()->isOverloadableType()) {
4750  bool Suppress = SemaRef.getDiagnostics().getSuppressAllDiagnostics();
4751  SemaRef.getDiagnostics().setSuppressAllDiagnostics(/*Val=*/true);
4752  Update =
4753  SemaRef.BuildBinOp(S, Loc, BO_Assign, VarRef.get(), NewStart.get());
4754  if (Update.isUsable()) {
4755  UpdateVal =
4756  SemaRef.BuildBinOp(S, Loc, Subtract ? BO_SubAssign : BO_AddAssign,
4757  VarRef.get(), SavedUpdate.get());
4758  if (UpdateVal.isUsable()) {
4759  Update = SemaRef.CreateBuiltinBinOp(Loc, BO_Comma, Update.get(),
4760  UpdateVal.get());
4761  }
4762  }
4763  SemaRef.getDiagnostics().setSuppressAllDiagnostics(Suppress);
4764  }
4765 
4766  // Second attempt: try to build 'VarRef = Start (+|-) Iter * Step'.
4767  if (!Update.isUsable() || !UpdateVal.isUsable()) {
4768  Update = SemaRef.BuildBinOp(S, Loc, Subtract ? BO_Sub : BO_Add,
4769  NewStart.get(), SavedUpdate.get());
4770  if (!Update.isUsable())
4771  return ExprError();
4772 
4773  if (!SemaRef.Context.hasSameType(Update.get()->getType(),
4774  VarRef.get()->getType())) {
4775  Update = SemaRef.PerformImplicitConversion(
4776  Update.get(), VarRef.get()->getType(), Sema::AA_Converting, true);
4777  if (!Update.isUsable())
4778  return ExprError();
4779  }
4780 
4781  Update = SemaRef.BuildBinOp(S, Loc, BO_Assign, VarRef.get(), Update.get());
4782  }
4783  return Update;
4784 }
4785 
4786 /// Convert integer expression \a E to make it have at least \a Bits
4787 /// bits.
4788 static ExprResult widenIterationCount(unsigned Bits, Expr *E, Sema &SemaRef) {
4789  if (E == nullptr)
4790  return ExprError();
4791  ASTContext &C = SemaRef.Context;
4792  QualType OldType = E->getType();
4793  unsigned HasBits = C.getTypeSize(OldType);
4794  if (HasBits >= Bits)
4795  return ExprResult(E);
4796  // OK to convert to signed, because new type has more bits than old.
4797  QualType NewType = C.getIntTypeForBitwidth(Bits, /* Signed */ true);
4798  return SemaRef.PerformImplicitConversion(E, NewType, Sema::AA_Converting,
4799  true);
4800 }
4801 
4802 /// Check if the given expression \a E is a constant integer that fits
4803 /// into \a Bits bits.
4804 static bool fitsInto(unsigned Bits, bool Signed, const Expr *E, Sema &SemaRef) {
4805  if (E == nullptr)
4806  return false;
4807  llvm::APSInt Result;
4808  if (E->isIntegerConstantExpr(Result, SemaRef.Context))
4809  return Signed ? Result.isSignedIntN(Bits) : Result.isIntN(Bits);
4810  return false;
4811 }
4812 
4813 /// Build preinits statement for the given declarations.
4814 static Stmt *buildPreInits(ASTContext &Context,
4815  MutableArrayRef<Decl *> PreInits) {
4816  if (!PreInits.empty()) {
4817  return new (Context) DeclStmt(
4818  DeclGroupRef::Create(Context, PreInits.begin(), PreInits.size()),
4820  }
4821  return nullptr;
4822 }
4823 
4824 /// Build preinits statement for the given declarations.
4825 static Stmt *
4827  const llvm::MapVector<const Expr *, DeclRefExpr *> &Captures) {
4828  if (!Captures.empty()) {
4829  SmallVector<Decl *, 16> PreInits;
4830  for (const auto &Pair : Captures)
4831  PreInits.push_back(Pair.second->getDecl());
4832  return buildPreInits(Context, PreInits);
4833  }
4834  return nullptr;
4835 }
4836 
4837 /// Build postupdate expression for the given list of postupdates expressions.
4838 static Expr *buildPostUpdate(Sema &S, ArrayRef<Expr *> PostUpdates) {
4839  Expr *PostUpdate = nullptr;
4840  if (!PostUpdates.empty()) {
4841  for (Expr *E : PostUpdates) {
4842  Expr *ConvE = S.BuildCStyleCastExpr(
4843  E->getExprLoc(),
4845  E->getExprLoc(), E)
4846  .get();
4847  PostUpdate = PostUpdate
4848  ? S.CreateBuiltinBinOp(ConvE->getExprLoc(), BO_Comma,
4849  PostUpdate, ConvE)
4850  .get()
4851  : ConvE;
4852  }
4853  }
4854  return PostUpdate;
4855 }
4856 
4857 /// Called on a for stmt to check itself and nested loops (if any).
4858 /// \return Returns 0 if one of the collapsed stmts is not canonical for loop,
4859 /// number of collapsed loops otherwise.
4860 static unsigned
4861 checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
4862  Expr *OrderedLoopCountExpr, Stmt *AStmt, Sema &SemaRef,
4863  DSAStackTy &DSA,
4864  Sema::VarsWithInheritedDSAType &VarsWithImplicitDSA,
4866  unsigned NestedLoopCount = 1;
4867  if (CollapseLoopCountExpr) {
4868  // Found 'collapse' clause - calculate collapse number.
4869  llvm::APSInt Result;
4870  if (CollapseLoopCountExpr->EvaluateAsInt(Result, SemaRef.getASTContext()))
4871  NestedLoopCount = Result.getLimitedValue();
4872  }
4873  unsigned OrderedLoopCount = 1;
4874  if (OrderedLoopCountExpr) {
4875  // Found 'ordered' clause - calculate collapse number.
4876  llvm::APSInt Result;
4877  if (OrderedLoopCountExpr->EvaluateAsInt(Result, SemaRef.getASTContext())) {
4878  if (Result.getLimitedValue() < NestedLoopCount) {
4879  SemaRef.Diag(OrderedLoopCountExpr->getExprLoc(),
4880  diag::err_omp_wrong_ordered_loop_count)
4881  << OrderedLoopCountExpr->getSourceRange();
4882  SemaRef.Diag(CollapseLoopCountExpr->getExprLoc(),
4883  diag::note_collapse_loop_count)
4884  << CollapseLoopCountExpr->getSourceRange();
4885  }
4886  OrderedLoopCount = Result.getLimitedValue();
4887  }
4888  }
4889  // This is helper routine for loop directives (e.g., 'for', 'simd',
4890  // 'for simd', etc.).
4891  llvm::MapVector<const Expr *, DeclRefExpr *> Captures;
4893  IterSpaces.resize(std::max(OrderedLoopCount, NestedLoopCount));
4894  Stmt *CurStmt = AStmt->IgnoreContainers(/* IgnoreCaptured */ true);
4895  for (unsigned Cnt = 0; Cnt < NestedLoopCount; ++Cnt) {
4897  DKind, CurStmt, SemaRef, DSA, Cnt, NestedLoopCount,
4898  std::max(OrderedLoopCount, NestedLoopCount), CollapseLoopCountExpr,
4899  OrderedLoopCountExpr, VarsWithImplicitDSA, IterSpaces[Cnt],
4900  Captures))
4901  return 0;
4902  // Move on to the next nested for loop, or to the loop body.
4903  // OpenMP [2.8.1, simd construct, Restrictions]
4904  // All loops associated with the construct must be perfectly nested; that
4905  // is, there must be no intervening code nor any OpenMP directive between
4906  // any two loops.
4907  CurStmt = cast<ForStmt>(CurStmt)->getBody()->IgnoreContainers();
4908  }
4909  for (unsigned Cnt = NestedLoopCount; Cnt < OrderedLoopCount; ++Cnt) {
4911  DKind, CurStmt, SemaRef, DSA, Cnt, NestedLoopCount,
4912  std::max(OrderedLoopCount, NestedLoopCount), CollapseLoopCountExpr,
4913  OrderedLoopCountExpr, VarsWithImplicitDSA, IterSpaces[Cnt],
4914  Captures))
4915  return 0;
4916  if (Cnt > 0 && IterSpaces[Cnt].CounterVar) {
4917  // Handle initialization of captured loop iterator variables.
4918  auto *DRE = cast<DeclRefExpr>(IterSpaces[Cnt].CounterVar);
4919  if (isa<OMPCapturedExprDecl>(DRE->getDecl())) {
4920  Captures[DRE] = DRE;
4921  }
4922  }
4923  // Move on to the next nested for loop, or to the loop body.
4924  // OpenMP [2.8.1, simd construct, Restrictions]
4925  // All loops associated with the construct must be perfectly nested; that
4926  // is, there must be no intervening code nor any OpenMP directive between
4927  // any two loops.
4928  CurStmt = cast<ForStmt>(CurStmt)->getBody()->IgnoreContainers();
4929  }
4930 
4931  Built.clear(/* size */ NestedLoopCount);
4932 
4933  if (SemaRef.CurContext->isDependentContext())
4934  return NestedLoopCount;
4935 
4936  // An example of what is generated for the following code:
4937  //
4938  // #pragma omp simd collapse(2) ordered(2)
4939  // for (i = 0; i < NI; ++i)
4940  // for (k = 0; k < NK; ++k)
4941  // for (j = J0; j < NJ; j+=2) {
4942  // <loop body>
4943  // }
4944  //
4945  // We generate the code below.
4946  // Note: the loop body may be outlined in CodeGen.
4947  // Note: some counters may be C++ classes, operator- is used to find number of
4948  // iterations and operator+= to calculate counter value.
4949  // Note: decltype(NumIterations) must be integer type (in 'omp for', only i32
4950  // or i64 is currently supported).
4951  //
4952  // #define NumIterations (NI * ((NJ - J0 - 1 + 2) / 2))
4953  // for (int[32|64]_t IV = 0; IV < NumIterations; ++IV ) {
4954  // .local.i = IV / ((NJ - J0 - 1 + 2) / 2);
4955  // .local.j = J0 + (IV % ((NJ - J0 - 1 + 2) / 2)) * 2;
4956  // // similar updates for vars in clauses (e.g. 'linear')
4957  // <loop body (using local i and j)>
4958  // }
4959  // i = NI; // assign final values of counters
4960  // j = NJ;
4961  //
4962 
4963  // Last iteration number is (I1 * I2 * ... In) - 1, where I1, I2 ... In are
4964  // the iteration counts of the collapsed for loops.
4965  // Precondition tests if there is at least one iteration (all conditions are
4966  // true).
4967  auto PreCond = ExprResult(IterSpaces[0].PreCond);
4968  Expr *N0 = IterSpaces[0].NumIterations;
4969  ExprResult LastIteration32 =
4970  widenIterationCount(/*Bits=*/32,
4971  SemaRef
4972  .PerformImplicitConversion(
4973  N0->IgnoreImpCasts(), N0->getType(),
4974  Sema::AA_Converting, /*AllowExplicit=*/true)
4975  .get(),
4976  SemaRef);
4977  ExprResult LastIteration64 = widenIterationCount(
4978  /*Bits=*/64,
4979  SemaRef
4980  .PerformImplicitConversion(N0->IgnoreImpCasts(), N0->getType(),
4982  /*AllowExplicit=*/true)
4983  .get(),
4984  SemaRef);
4985 
4986  if (!LastIteration32.isUsable() || !LastIteration64.isUsable())
4987  return NestedLoopCount;
4988 
4989  ASTContext &C = SemaRef.Context;
4990  bool AllCountsNeedLessThan32Bits = C.getTypeSize(N0->getType()) < 32;
4991 
4992  Scope *CurScope = DSA.getCurScope();
4993  for (unsigned Cnt = 1; Cnt < NestedLoopCount; ++Cnt) {
4994  if (PreCond.isUsable()) {
4995  PreCond =
4996  SemaRef.BuildBinOp(CurScope, PreCond.get()->getExprLoc(), BO_LAnd,
4997  PreCond.get(), IterSpaces[Cnt].PreCond);
4998  }
4999  Expr *N = IterSpaces[Cnt].NumIterations;
5000  SourceLocation Loc = N->getExprLoc();
5001  AllCountsNeedLessThan32Bits &= C.getTypeSize(N->getType()) < 32;
5002  if (LastIteration32.isUsable())
5003  LastIteration32 = SemaRef.BuildBinOp(
5004  CurScope, Loc, BO_Mul, LastIteration32.get(),
5005  SemaRef
5008  /*AllowExplicit=*/true)
5009  .get());
5010  if (LastIteration64.isUsable())
5011  LastIteration64 = SemaRef.BuildBinOp(
5012  CurScope, Loc, BO_Mul, LastIteration64.get(),
5013  SemaRef
5016  /*AllowExplicit=*/true)
5017  .get());
5018  }
5019 
5020  // Choose either the 32-bit or 64-bit version.
5021  ExprResult LastIteration = LastIteration64;
5022  if (LastIteration32.isUsable() &&
5023  C.getTypeSize(LastIteration32.get()->getType()) == 32 &&
5024  (AllCountsNeedLessThan32Bits || NestedLoopCount == 1 ||
5025  fitsInto(
5026  /*Bits=*/32,
5027  LastIteration32.get()->getType()->hasSignedIntegerRepresentation(),
5028  LastIteration64.get(), SemaRef)))
5029  LastIteration = LastIteration32;
5030  QualType VType = LastIteration.get()->getType();
5031  QualType RealVType = VType;
5032  QualType StrideVType = VType;
5033  if (isOpenMPTaskLoopDirective(DKind)) {
5034  VType =
5035  SemaRef.Context.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0);
5036  StrideVType =
5037  SemaRef.Context.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1);
5038  }
5039 
5040  if (!LastIteration.isUsable())
5041  return 0;
5042 
5043  // Save the number of iterations.
5044  ExprResult NumIterations = LastIteration;
5045  {
5046  LastIteration = SemaRef.BuildBinOp(
5047  CurScope, LastIteration.get()->getExprLoc(), BO_Sub,
5048  LastIteration.get(),
5049  SemaRef.ActOnIntegerConstant(SourceLocation(), 1).get());
5050  if (!LastIteration.isUsable())
5051  return 0;
5052  }
5053 
5054  // Calculate the last iteration number beforehand instead of doing this on
5055  // each iteration. Do not do this if the number of iterations may be kfold-ed.
5056  llvm::APSInt Result;
5057  bool IsConstant =
5058  LastIteration.get()->isIntegerConstantExpr(Result, SemaRef.Context);
5059  ExprResult CalcLastIteration;
5060  if (!IsConstant) {
5061  ExprResult SaveRef =
5062  tryBuildCapture(SemaRef, LastIteration.get(), Captures);
5063  LastIteration = SaveRef;
5064 
5065  // Prepare SaveRef + 1.
5066  NumIterations = SemaRef.BuildBinOp(
5067  CurScope, SaveRef.get()->getExprLoc(), BO_Add, SaveRef.get(),
5068  SemaRef.ActOnIntegerConstant(SourceLocation(), 1).get());
5069  if (!NumIterations.isUsable())
5070  return 0;
5071  }
5072 
5073  SourceLocation InitLoc = IterSpaces[0].InitSrcRange.getBegin();
5074 
5075  // Build variables passed into runtime, necessary for worksharing directives.
5076  ExprResult LB, UB, IL, ST, EUB, CombLB, CombUB, PrevLB, PrevUB, CombEUB;
5078  isOpenMPDistributeDirective(DKind)) {
5079  // Lower bound variable, initialized with zero.
5080  VarDecl *LBDecl = buildVarDecl(SemaRef, InitLoc, VType, ".omp.lb");
5081  LB = buildDeclRefExpr(SemaRef, LBDecl, VType, InitLoc);
5082  SemaRef.AddInitializerToDecl(LBDecl,
5083  SemaRef.ActOnIntegerConstant(InitLoc, 0).get(),
5084  /*DirectInit*/ false);
5085 
5086  // Upper bound variable, initialized with last iteration number.
5087  VarDecl *UBDecl = buildVarDecl(SemaRef, InitLoc, VType, ".omp.ub");
5088  UB = buildDeclRefExpr(SemaRef, UBDecl, VType, InitLoc);
5089  SemaRef.AddInitializerToDecl(UBDecl, LastIteration.get(),
5090  /*DirectInit*/ false);
5091 
5092  // A 32-bit variable-flag where runtime returns 1 for the last iteration.
5093  // This will be used to implement clause 'lastprivate'.
5094  QualType Int32Ty = SemaRef.Context.getIntTypeForBitwidth(32, true);
5095  VarDecl *ILDecl = buildVarDecl(SemaRef, InitLoc, Int32Ty, ".omp.is_last");
5096  IL = buildDeclRefExpr(SemaRef, ILDecl, Int32Ty, InitLoc);
5097  SemaRef.AddInitializerToDecl(ILDecl,
5098  SemaRef.ActOnIntegerConstant(InitLoc, 0).get(),
5099  /*DirectInit*/ false);
5100 
5101  // Stride variable returned by runtime (we initialize it to 1 by default).
5102  VarDecl *STDecl =
5103  buildVarDecl(SemaRef, InitLoc, StrideVType, ".omp.stride");
5104  ST = buildDeclRefExpr(SemaRef, STDecl, StrideVType, InitLoc);
5105  SemaRef.AddInitializerToDecl(STDecl,
5106  SemaRef.ActOnIntegerConstant(InitLoc, 1).get(),
5107  /*DirectInit*/ false);
5108 
5109  // Build expression: UB = min(UB, LastIteration)
5110  // It is necessary for CodeGen of directives with static scheduling.
5111  ExprResult IsUBGreater = SemaRef.BuildBinOp(CurScope, InitLoc, BO_GT,
5112  UB.get(), LastIteration.get());
5113  ExprResult CondOp = SemaRef.ActOnConditionalOp(
5114  LastIteration.get()->getExprLoc(), InitLoc, IsUBGreater.get(),
5115  LastIteration.get(), UB.get());
5116  EUB = SemaRef.BuildBinOp(CurScope, InitLoc, BO_Assign, UB.get(),
5117  CondOp.get());
5118  EUB = SemaRef.ActOnFinishFullExpr(EUB.get());
5119 
5120  // If we have a combined directive that combines 'distribute', 'for' or
5121  // 'simd' we need to be able to access the bounds of the schedule of the
5122  // enclosing region. E.g. in 'distribute parallel for' the bounds obtained
5123  // by scheduling 'distribute' have to be passed to the schedule of 'for'.
5124  if (isOpenMPLoopBoundSharingDirective(DKind)) {
5125  // Lower bound variable, initialized with zero.
5126  VarDecl *CombLBDecl =
5127  buildVarDecl(SemaRef, InitLoc, VType, ".omp.comb.lb");
5128  CombLB = buildDeclRefExpr(SemaRef, CombLBDecl, VType, InitLoc);
5129  SemaRef.AddInitializerToDecl(
5130  CombLBDecl, SemaRef.ActOnIntegerConstant(InitLoc, 0).get(),
5131  /*DirectInit*/ false);
5132 
5133  // Upper bound variable, initialized with last iteration number.
5134  VarDecl *CombUBDecl =
5135  buildVarDecl(SemaRef, InitLoc, VType, ".omp.comb.ub");
5136  CombUB = buildDeclRefExpr(SemaRef, CombUBDecl, VType, InitLoc);
5137  SemaRef.AddInitializerToDecl(CombUBDecl, LastIteration.get(),
5138  /*DirectInit*/ false);
5139 
5140  ExprResult CombIsUBGreater = SemaRef.BuildBinOp(
5141  CurScope, InitLoc, BO_GT, CombUB.get(), LastIteration.get());
5142  ExprResult CombCondOp =
5143  SemaRef.ActOnConditionalOp(InitLoc, InitLoc, CombIsUBGreater.get(),
5144  LastIteration.get(), CombUB.get());
5145  CombEUB = SemaRef.BuildBinOp(CurScope, InitLoc, BO_Assign, CombUB.get(),
5146  CombCondOp.get());
5147  CombEUB = SemaRef.ActOnFinishFullExpr(CombEUB.get());
5148 
5149  const CapturedDecl *CD = cast<CapturedStmt>(AStmt)->getCapturedDecl();
5150  // We expect to have at least 2 more parameters than the 'parallel'
5151  // directive does - the lower and upper bounds of the previous schedule.
5152  assert(CD->getNumParams() >= 4 &&
5153  "Unexpected number of parameters in loop combined directive");
5154 
5155  // Set the proper type for the bounds given what we learned from the
5156  // enclosed loops.
5157  ImplicitParamDecl *PrevLBDecl = CD->getParam(/*PrevLB=*/2);
5158  ImplicitParamDecl *PrevUBDecl = CD->getParam(/*PrevUB=*/3);
5159 
5160  // Previous lower and upper bounds are obtained from the region
5161  // parameters.
5162  PrevLB =
5163  buildDeclRefExpr(SemaRef, PrevLBDecl, PrevLBDecl->getType(), InitLoc);
5164  PrevUB =
5165  buildDeclRefExpr(SemaRef, PrevUBDecl, PrevUBDecl->getType(), InitLoc);
5166  }
5167  }
5168 
5169  // Build the iteration variable and its initialization before loop.
5170  ExprResult IV;
5171  ExprResult Init, CombInit;
5172  {
5173  VarDecl *IVDecl = buildVarDecl(SemaRef, InitLoc, RealVType, ".omp.iv");
5174  IV = buildDeclRefExpr(SemaRef, IVDecl, RealVType, InitLoc);
5175  Expr *RHS =
5176  (isOpenMPWorksharingDirective(DKind) ||
5178  ? LB.get()
5179  : SemaRef.ActOnIntegerConstant(SourceLocation(), 0).get();
5180  Init = SemaRef.BuildBinOp(CurScope, InitLoc, BO_Assign, IV.get(), RHS);
5181  Init = SemaRef.ActOnFinishFullExpr(Init.get());
5182 
5183  if (isOpenMPLoopBoundSharingDirective(DKind)) {
5184  Expr *CombRHS =
5185  (isOpenMPWorksharingDirective(DKind) ||
5186  isOpenMPTaskLoopDirective(DKind) ||
5188  ? CombLB.get()
5189  : SemaRef.ActOnIntegerConstant(SourceLocation(), 0).get();
5190  CombInit =
5191  SemaRef.BuildBinOp(CurScope, InitLoc, BO_Assign, IV.get(), CombRHS);
5192  CombInit = SemaRef.ActOnFinishFullExpr(CombInit.get());
5193  }
5194  }
5195 
5196  // Loop condition (IV < NumIterations) or (IV <= UB) for worksharing loops.
5197  SourceLocation CondLoc = AStmt->getBeginLoc();
5198  ExprResult Cond =
5199  (isOpenMPWorksharingDirective(DKind) ||
5201  ? SemaRef.BuildBinOp(CurScope, CondLoc, BO_LE, IV.get(), UB.get())
5202  : SemaRef.BuildBinOp(CurScope, CondLoc, BO_LT, IV.get(),
5203  NumIterations.get());
5204  ExprResult CombCond;
5205  if (isOpenMPLoopBoundSharingDirective(DKind)) {
5206  CombCond =
5207  SemaRef.BuildBinOp(CurScope, CondLoc, BO_LE, IV.get(), CombUB.get());
5208  }
5209  // Loop increment (IV = IV + 1)
5210  SourceLocation IncLoc = AStmt->getBeginLoc();
5211  ExprResult Inc =
5212  SemaRef.BuildBinOp(CurScope, IncLoc, BO_Add, IV.get(),
5213  SemaRef.ActOnIntegerConstant(IncLoc, 1).get());
5214  if (!Inc.isUsable())
5215  return 0;
5216  Inc = SemaRef.BuildBinOp(CurScope, IncLoc, BO_Assign, IV.get(), Inc.get());
5217  Inc = SemaRef.ActOnFinishFullExpr(Inc.get());
5218  if (!Inc.isUsable())
5219  return 0;
5220 
5221  // Increments for worksharing loops (LB = LB + ST; UB = UB + ST).
5222  // Used for directives with static scheduling.
5223  // In combined construct, add combined version that use CombLB and CombUB
5224  // base variables for the update
5225  ExprResult NextLB, NextUB, CombNextLB, CombNextUB;
5227  isOpenMPDistributeDirective(DKind)) {
5228  // LB + ST
5229  NextLB = SemaRef.BuildBinOp(CurScope, IncLoc, BO_Add, LB.get(), ST.get());
5230  if (!NextLB.isUsable())
5231  return 0;
5232  // LB = LB + ST
5233  NextLB =
5234  SemaRef.BuildBinOp(CurScope, IncLoc, BO_Assign, LB.get(), NextLB.get());
5235  NextLB = SemaRef.ActOnFinishFullExpr(NextLB.get());
5236  if (!NextLB.isUsable())
5237  return 0;
5238  // UB + ST
5239  NextUB = SemaRef.BuildBinOp(CurScope, IncLoc, BO_Add, UB.get(), ST.get());
5240  if (!NextUB.isUsable())
5241  return 0;
5242  // UB = UB + ST
5243  NextUB =
5244  SemaRef.BuildBinOp(CurScope, IncLoc, BO_Assign, UB.get(), NextUB.get());
5245  NextUB = SemaRef.ActOnFinishFullExpr(NextUB.get());
5246  if (!NextUB.isUsable())
5247  return 0;
5248  if (isOpenMPLoopBoundSharingDirective(DKind)) {
5249  CombNextLB =
5250  SemaRef.BuildBinOp(CurScope, IncLoc, BO_Add, CombLB.get(), ST.get());
5251  if (!NextLB.isUsable())
5252  return 0;
5253  // LB = LB + ST
5254  CombNextLB = SemaRef.BuildBinOp(CurScope, IncLoc, BO_Assign, CombLB.get(),
5255  CombNextLB.get());
5256  CombNextLB = SemaRef.ActOnFinishFullExpr(CombNextLB.get());
5257  if (!CombNextLB.isUsable())
5258  return 0;
5259  // UB + ST
5260  CombNextUB =
5261  SemaRef.BuildBinOp(CurScope, IncLoc, BO_Add, CombUB.get(), ST.get());
5262  if (!CombNextUB.isUsable())
5263  return 0;
5264  // UB = UB + ST
5265  CombNextUB = SemaRef.BuildBinOp(CurScope, IncLoc, BO_Assign, CombUB.get(),
5266  CombNextUB.get());
5267  CombNextUB = SemaRef.ActOnFinishFullExpr(CombNextUB.get());
5268  if (!CombNextUB.isUsable())
5269  return 0;
5270  }
5271  }
5272 
5273  // Create increment expression for distribute loop when combined in a same
5274  // directive with for as IV = IV + ST; ensure upper bound expression based
5275  // on PrevUB instead of NumIterations - used to implement 'for' when found
5276  // in combination with 'distribute', like in 'distribute parallel for'
5277  SourceLocation DistIncLoc = AStmt->getBeginLoc();
5278  ExprResult DistCond, DistInc, PrevEUB;
5279  if (isOpenMPLoopBoundSharingDirective(DKind)) {
5280  DistCond = SemaRef.BuildBinOp(CurScope, CondLoc, BO_LE, IV.get(), UB.get());
5281  assert(DistCond.isUsable() && "distribute cond expr was not built");
5282 
5283  DistInc =
5284  SemaRef.BuildBinOp(CurScope, DistIncLoc, BO_Add, IV.get(), ST.get());
5285  assert(DistInc.isUsable() && "distribute inc expr was not built");
5286  DistInc = SemaRef.BuildBinOp(CurScope, DistIncLoc, BO_Assign, IV.get(),
5287  DistInc.get());
5288  DistInc = SemaRef.ActOnFinishFullExpr(DistInc.get());
5289  assert(DistInc.isUsable() && "distribute inc expr was not built");
5290 
5291  // Build expression: UB = min(UB, prevUB) for #for in composite or combined
5292  // construct
5293  SourceLocation DistEUBLoc = AStmt->getBeginLoc();
5294  ExprResult IsUBGreater =
5295  SemaRef.BuildBinOp(CurScope, DistEUBLoc, BO_GT, UB.get(), PrevUB.get());
5296  ExprResult CondOp = SemaRef.ActOnConditionalOp(
5297  DistEUBLoc, DistEUBLoc, IsUBGreater.get(), PrevUB.get(), UB.get());
5298  PrevEUB = SemaRef.BuildBinOp(CurScope, DistIncLoc, BO_Assign, UB.get(),
5299  CondOp.get());
5300  PrevEUB = SemaRef.ActOnFinishFullExpr(PrevEUB.get());
5301  }
5302 
5303  // Build updates and final values of the loop counters.
5304  bool HasErrors = false;
5305  Built.Counters.resize(NestedLoopCount);
5306  Built.Inits.resize(NestedLoopCount);
5307  Built.Updates.resize(NestedLoopCount);
5308  Built.Finals.resize(NestedLoopCount);
5309  {
5310  ExprResult Div;
5311  // Go from inner nested loop to outer.
5312  for (int Cnt = NestedLoopCount - 1; Cnt >= 0; --Cnt) {
5313  LoopIterationSpace &IS = IterSpaces[Cnt];
5314  SourceLocation UpdLoc = IS.IncSrcRange.getBegin();
5315  // Build: Iter = (IV / Div) % IS.NumIters
5316  // where Div is product of previous iterations' IS.NumIters.
5317  ExprResult Iter;
5318  if (Div.isUsable()) {
5319  Iter =
5320  SemaRef.BuildBinOp(CurScope, UpdLoc, BO_Div, IV.get(), Div.get());
5321  } else {
5322  Iter = IV;
5323  assert((Cnt == (int)NestedLoopCount - 1) &&
5324  "unusable div expected on first iteration only");
5325  }
5326 
5327  if (Cnt != 0 && Iter.isUsable())
5328  Iter = SemaRef.BuildBinOp(CurScope, UpdLoc, BO_Rem, Iter.get(),
5329  IS.NumIterations);
5330  if (!Iter.isUsable()) {
5331  HasErrors = true;
5332  break;
5333  }
5334 
5335  // Build update: IS.CounterVar(Private) = IS.Start + Iter * IS.Step
5336  auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IS.CounterVar)->getDecl());
5337  DeclRefExpr *CounterVar = buildDeclRefExpr(
5338  SemaRef, VD, IS.CounterVar->getType(), IS.CounterVar->getExprLoc(),
5339  /*RefersToCapture=*/true);
5340  ExprResult Init = buildCounterInit(SemaRef, CurScope, UpdLoc, CounterVar,
5341  IS.CounterInit, Captures);
5342  if (!Init.isUsable()) {
5343  HasErrors = true;
5344  break;
5345  }
5347  SemaRef, CurScope, UpdLoc, CounterVar, IS.CounterInit, Iter,
5348  IS.CounterStep, IS.Subtract, &Captures);
5349  if (!Update.isUsable()) {
5350  HasErrors = true;
5351  break;
5352  }
5353 
5354  // Build final: IS.CounterVar = IS.Start + IS.NumIters * IS.Step
5356  SemaRef, CurScope, UpdLoc, CounterVar, IS.CounterInit,
5357  IS.NumIterations, IS.CounterStep, IS.Subtract, &Captures);
5358  if (!Final.isUsable()) {
5359  HasErrors = true;
5360  break;
5361  }
5362 
5363  // Build Div for the next iteration: Div <- Div * IS.NumIters
5364  if (Cnt != 0) {
5365  if (Div.isUnset())
5366  Div = IS.NumIterations;
5367  else
5368  Div = SemaRef.BuildBinOp(CurScope, UpdLoc, BO_Mul, Div.get(),
5369  IS.NumIterations);
5370 
5371  // Add parentheses (for debugging purposes only).
5372  if (Div.isUsable())
5373  Div = tryBuildCapture(SemaRef, Div.get(), Captures);
5374  if (!Div.isUsable()) {
5375  HasErrors = true;
5376  break;
5377  }
5378  }
5379  if (!Update.isUsable() || !Final.isUsable()) {
5380  HasErrors = true;
5381  break;
5382  }
5383  // Save results
5384  Built.Counters[Cnt] = IS.CounterVar;
5385  Built.PrivateCounters[Cnt] = IS.PrivateCounterVar;
5386  Built.Inits[Cnt] = Init.get();
5387  Built.Updates[Cnt] = Update.get();
5388  Built.Finals[Cnt] = Final.get();
5389  }
5390  }
5391 
5392  if (HasErrors)
5393  return 0;
5394 
5395  // Save results
5396  Built.IterationVarRef = IV.get();
5397  Built.LastIteration = LastIteration.get();
5398  Built.NumIterations = NumIterations.get();
5399  Built.CalcLastIteration =
5400  SemaRef.ActOnFinishFullExpr(CalcLastIteration.get()).get();
5401  Built.PreCond = PreCond.get();
5402  Built.PreInits = buildPreInits(C, Captures);
5403  Built.Cond = Cond.get();
5404  Built.Init = Init.get();
5405  Built.Inc = Inc.get();
5406  Built.LB = LB.get();
5407  Built.UB = UB.get();
5408  Built.IL = IL.get();
5409  Built.ST = ST.get();
5410  Built.EUB = EUB.get();
5411  Built.NLB = NextLB.get();
5412  Built.NUB = NextUB.get();
5413  Built.PrevLB = PrevLB.get();
5414  Built.PrevUB = PrevUB.get();
5415  Built.DistInc = DistInc.get();
5416  Built.PrevEUB = PrevEUB.get();
5417  Built.DistCombinedFields.LB = CombLB.get();
5418  Built.DistCombinedFields.UB = CombUB.get();
5419  Built.DistCombinedFields.EUB = CombEUB.get();
5420  Built.DistCombinedFields.Init = CombInit.get();
5421  Built.DistCombinedFields.Cond = CombCond.get();
5422  Built.DistCombinedFields.NLB = CombNextLB.get();
5423  Built.DistCombinedFields.NUB = CombNextUB.get();
5424 
5425  return NestedLoopCount;
5426 }
5427 
5429  auto CollapseClauses =
5430  OMPExecutableDirective::getClausesOfKind<OMPCollapseClause>(Clauses);
5431  if (CollapseClauses.begin() != CollapseClauses.end())
5432  return (*CollapseClauses.begin())->getNumForLoops();
5433  return nullptr;
5434 }
5435 
5437  auto OrderedClauses =
5438  OMPExecutableDirective::getClausesOfKind<OMPOrderedClause>(Clauses);
5439  if (OrderedClauses.begin() != OrderedClauses.end())
5440  return (*OrderedClauses.begin())->getNumForLoops();
5441  return nullptr;
5442 }
5443 
5445  const ArrayRef<OMPClause *> Clauses) {
5446  const OMPSafelenClause *Safelen = nullptr;
5447  const OMPSimdlenClause *Simdlen = nullptr;
5448 
5449  for (const OMPClause *Clause : Clauses) {
5450  if (Clause->getClauseKind() == OMPC_safelen)
5451  Safelen = cast<OMPSafelenClause>(Clause);
5452  else if (Clause->getClauseKind() == OMPC_simdlen)
5453  Simdlen = cast<OMPSimdlenClause>(Clause);
5454  if (Safelen && Simdlen)
5455  break;
5456  }
5457 
5458  if (Simdlen && Safelen) {
5459  llvm::APSInt SimdlenRes, SafelenRes;
5460  const Expr *SimdlenLength = Simdlen->getSimdlen();
5461  const Expr *SafelenLength = Safelen->getSafelen();
5462  if (SimdlenLength->isValueDependent() || SimdlenLength->isTypeDependent() ||
5463  SimdlenLength->isInstantiationDependent() ||
5464  SimdlenLength->containsUnexpandedParameterPack())
5465  return false;
5466  if (SafelenLength->isValueDependent() || SafelenLength->isTypeDependent() ||
5467  SafelenLength->isInstantiationDependent() ||
5468  SafelenLength->containsUnexpandedParameterPack())
5469  return false;
5470  SimdlenLength->EvaluateAsInt(SimdlenRes, S.Context);
5471  SafelenLength->EvaluateAsInt(SafelenRes, S.Context);
5472  // OpenMP 4.5 [2.8.1, simd Construct, Restrictions]
5473  // If both simdlen and safelen clauses are specified, the value of the
5474  // simdlen parameter must be less than or equal to the value of the safelen
5475  // parameter.
5476  if (SimdlenRes > SafelenRes) {
5477  S.Diag(SimdlenLength->getExprLoc(),
5478  diag::err_omp_wrong_simdlen_safelen_values)
5479  << SimdlenLength->getSourceRange() << SafelenLength->getSourceRange();
5480  return true;
5481  }
5482  }
5483  return false;
5484 }
5485 
5486 StmtResult
5488  SourceLocation StartLoc, SourceLocation EndLoc,
5489  VarsWithInheritedDSAType &VarsWithImplicitDSA) {
5490  if (!AStmt)
5491  return StmtError();
5492 
5493  assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
5495  // In presence of clause 'collapse' or 'ordered' with number of loops, it will
5496  // define the nested loops number.
5497  unsigned NestedLoopCount = checkOpenMPLoop(
5498  OMPD_simd, getCollapseNumberExpr(Clauses), getOrderedNumberExpr(Clauses),
5499  AStmt, *this, *DSAStack, VarsWithImplicitDSA, B);
5500  if (NestedLoopCount == 0)
5501  return StmtError();
5502 
5503  assert((CurContext->isDependentContext() || B.builtAll()) &&
5504  "omp simd loop exprs were not built");
5505 
5506  if (!CurContext->isDependentContext()) {
5507  // Finalize the clauses that need pre-built expressions for CodeGen.
5508  for (OMPClause *C : Clauses) {
5509  if (auto *LC = dyn_cast<OMPLinearClause>(C))
5510  if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
5511  B.NumIterations, *this, CurScope,
5512  DSAStack))
5513  return StmtError();
5514  }
5515  }
5516 
5517  if (checkSimdlenSafelenSpecified(*this, Clauses))
5518  return StmtError();
5519 
5520  setFunctionHasBranchProtectedScope();
5521  return OMPSimdDirective::Create(Context, StartLoc, EndLoc, NestedLoopCount,
5522  Clauses, AStmt, B);
5523 }
5524 
5525 StmtResult
5527  SourceLocation StartLoc, SourceLocation EndLoc,
5528  VarsWithInheritedDSAType &VarsWithImplicitDSA) {
5529  if (!AStmt)
5530  return StmtError();
5531 
5532  assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
5534  // In presence of clause 'collapse' or 'ordered' with number of loops, it will
5535  // define the nested loops number.
5536  unsigned NestedLoopCount = checkOpenMPLoop(
5537  OMPD_for, getCollapseNumberExpr(Clauses), getOrderedNumberExpr(Clauses),
5538  AStmt, *this, *DSAStack, VarsWithImplicitDSA, B);
5539  if (NestedLoopCount == 0)
5540  return StmtError();
5541 
5542  assert((CurContext->isDependentContext() || B.builtAll()) &&
5543  "omp for loop exprs were not built");
5544 
5545  if (!CurContext->isDependentContext()) {
5546  // Finalize the clauses that need pre-built expressions for CodeGen.
5547  for (OMPClause *C : Clauses) {
5548  if (auto *LC = dyn_cast<OMPLinearClause>(C))
5549  if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
5550  B.NumIterations, *this, CurScope,
5551  DSAStack))
5552  return StmtError();
5553  }
5554  }
5555 
5556  setFunctionHasBranchProtectedScope();
5557  return OMPForDirective::Create(Context, StartLoc, EndLoc, NestedLoopCount,
5558  Clauses, AStmt, B, DSAStack->isCancelRegion());
5559 }
5560 
5562  ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
5563  SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
5564  if (!AStmt)
5565  return StmtError();
5566 
5567  assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
5569  // In presence of clause 'collapse' or 'ordered' with number of loops, it will
5570  // define the nested loops number.
5571  unsigned NestedLoopCount =
5572  checkOpenMPLoop(OMPD_for_simd, getCollapseNumberExpr(Clauses),
5573  getOrderedNumberExpr(Clauses), AStmt, *this, *DSAStack,
5574  VarsWithImplicitDSA, B);
5575  if (NestedLoopCount == 0)
5576  return StmtError();
5577 
5578  assert((CurContext->isDependentContext() || B.builtAll()) &&
5579  "omp for simd loop exprs were not built");
5580 
5581  if (!CurContext->isDependentContext()) {
5582  // Finalize the clauses that need pre-built expressions for CodeGen.
5583  for (OMPClause *C : Clauses) {
5584  if (auto *LC = dyn_cast<OMPLinearClause>(C))
5585  if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
5586  B.NumIterations, *this, CurScope,
5587  DSAStack))
5588  return StmtError();
5589  }
5590  }
5591 
5592  if (checkSimdlenSafelenSpecified(*this, Clauses))
5593  return StmtError();
5594 
5595  setFunctionHasBranchProtectedScope();
5596  return OMPForSimdDirective::Create(Context, StartLoc, EndLoc, NestedLoopCount,
5597  Clauses, AStmt, B);
5598 }
5599 
5601  Stmt *AStmt,
5602  SourceLocation StartLoc,
5603  SourceLocation EndLoc) {
5604  if (!AStmt)
5605  return StmtError();
5606 
5607  assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
5608  auto BaseStmt = AStmt;
5609  while (auto *CS = dyn_cast_or_null<CapturedStmt>(BaseStmt))
5610  BaseStmt = CS->getCapturedStmt();
5611  if (auto *C = dyn_cast_or_null<CompoundStmt>(BaseStmt)) {
5612  auto S = C->children();
5613  if (S.begin() == S.end())
5614  return StmtError();
5615  // All associated statements must be '#pragma omp section' except for
5616  // the first one.
5617  for (Stmt *SectionStmt : llvm::make_range(std::next(S.begin()), S.end())) {
5618  if (!SectionStmt || !isa<OMPSectionDirective>(SectionStmt)) {
5619  if (SectionStmt)
5620  Diag(SectionStmt->getBeginLoc(),
5621  diag::err_omp_sections_substmt_not_section);
5622  return StmtError();
5623  }
5624  cast<OMPSectionDirective>(SectionStmt)
5625  ->setHasCancel(DSAStack->isCancelRegion());
5626  }
5627  } else {
5628  Diag(AStmt->getBeginLoc(), diag::err_omp_sections_not_compound_stmt);
5629  return StmtError();
5630  }
5631 
5632  setFunctionHasBranchProtectedScope();
5633 
5634  return OMPSectionsDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt,
5635  DSAStack->isCancelRegion());
5636 }
5637 
5639  SourceLocation StartLoc,
5640  SourceLocation EndLoc) {
5641  if (!AStmt)
5642  return StmtError();
5643 
5644  assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
5645 
5646  setFunctionHasBranchProtectedScope();
5647  DSAStack->setParentCancelRegion(DSAStack->isCancelRegion());
5648 
5649  return OMPSectionDirective::Create(Context, StartLoc, EndLoc, AStmt,
5650  DSAStack->isCancelRegion());
5651 }
5652 
5654  Stmt *AStmt,
5655  SourceLocation StartLoc,
5656  SourceLocation EndLoc) {
5657  if (!AStmt)
5658  return StmtError();
5659 
5660  assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
5661 
5662  setFunctionHasBranchProtectedScope();
5663 
5664  // OpenMP [2.7.3, single Construct, Restrictions]
5665  // The copyprivate clause must not be used with the nowait clause.
5666  const OMPClause *Nowait = nullptr;
5667  const OMPClause *Copyprivate = nullptr;
5668  for (const OMPClause *Clause : Clauses) {
5669  if (Clause->getClauseKind() == OMPC_nowait)
5670  Nowait = Clause;
5671  else if (Clause->getClauseKind() == OMPC_copyprivate)
5672  Copyprivate = Clause;
5673  if (Copyprivate && Nowait) {
5674  Diag(Copyprivate->getBeginLoc(),