clang  8.0.0svn
SemaOpenMP.cpp
Go to the documentation of this file.
1 //===--- SemaOpenMP.cpp - Semantic Analysis for OpenMP constructs ---------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 /// \file
10 /// This file implements semantic analysis for OpenMP directives and
11 /// clauses.
12 ///
13 //===----------------------------------------------------------------------===//
14 
15 #include "TreeTransform.h"
16 #include "clang/AST/ASTContext.h"
19 #include "clang/AST/Decl.h"
20 #include "clang/AST/DeclCXX.h"
21 #include "clang/AST/DeclOpenMP.h"
22 #include "clang/AST/StmtCXX.h"
23 #include "clang/AST/StmtOpenMP.h"
24 #include "clang/AST/StmtVisitor.h"
27 #include "clang/Sema/Lookup.h"
28 #include "clang/Sema/Scope.h"
29 #include "clang/Sema/ScopeInfo.h"
31 #include "llvm/ADT/PointerEmbeddedInt.h"
32 using namespace clang;
33 
34 //===----------------------------------------------------------------------===//
35 // Stack of data-sharing attributes for variables
36 //===----------------------------------------------------------------------===//
37 
39  Sema &SemaRef, Expr *E,
41  OpenMPClauseKind CKind, bool NoDiagnose);
42 
43 namespace {
44 /// Default data sharing attributes, which can be applied to directive.
46  DSA_unspecified = 0, /// Data sharing attribute not specified.
47  DSA_none = 1 << 0, /// Default data sharing attribute 'none'.
48  DSA_shared = 1 << 1, /// Default data sharing attribute 'shared'.
49 };
50 
51 /// Attributes of the defaultmap clause.
53  DMA_unspecified, /// Default mapping is not specified.
54  DMA_tofrom_scalar, /// Default mapping is 'tofrom:scalar'.
55 };
56 
57 /// Stack for tracking declarations used in OpenMP directives and
58 /// clauses and their data-sharing attributes.
59 class DSAStackTy {
60 public:
61  struct DSAVarData {
64  const Expr *RefExpr = nullptr;
65  DeclRefExpr *PrivateCopy = nullptr;
66  SourceLocation ImplicitDSALoc;
67  DSAVarData() = default;
68  DSAVarData(OpenMPDirectiveKind DKind, OpenMPClauseKind CKind,
69  const Expr *RefExpr, DeclRefExpr *PrivateCopy,
70  SourceLocation ImplicitDSALoc)
71  : DKind(DKind), CKind(CKind), RefExpr(RefExpr),
72  PrivateCopy(PrivateCopy), ImplicitDSALoc(ImplicitDSALoc) {}
73  };
74  using OperatorOffsetTy =
76  using DoacrossDependMapTy =
77  llvm::DenseMap<OMPDependClause *, OperatorOffsetTy>;
78 
79 private:
80  struct DSAInfo {
81  OpenMPClauseKind Attributes = OMPC_unknown;
82  /// Pointer to a reference expression and a flag which shows that the
83  /// variable is marked as lastprivate(true) or not (false).
84  llvm::PointerIntPair<const Expr *, 1, bool> RefExpr;
85  DeclRefExpr *PrivateCopy = nullptr;
86  };
87  using DeclSAMapTy = llvm::SmallDenseMap<const ValueDecl *, DSAInfo, 8>;
88  using AlignedMapTy = llvm::SmallDenseMap<const ValueDecl *, const Expr *, 8>;
89  using LCDeclInfo = std::pair<unsigned, VarDecl *>;
90  using LoopControlVariablesMapTy =
91  llvm::SmallDenseMap<const ValueDecl *, LCDeclInfo, 8>;
92  /// Struct that associates a component with the clause kind where they are
93  /// found.
94  struct MappedExprComponentTy {
97  };
98  using MappedExprComponentsTy =
99  llvm::DenseMap<const ValueDecl *, MappedExprComponentTy>;
100  using CriticalsWithHintsTy =
101  llvm::StringMap<std::pair<const OMPCriticalDirective *, llvm::APSInt>>;
102  struct ReductionData {
103  using BOKPtrType = llvm::PointerEmbeddedInt<BinaryOperatorKind, 16>;
104  SourceRange ReductionRange;
105  llvm::PointerUnion<const Expr *, BOKPtrType> ReductionOp;
106  ReductionData() = default;
107  void set(BinaryOperatorKind BO, SourceRange RR) {
108  ReductionRange = RR;
109  ReductionOp = BO;
110  }
111  void set(const Expr *RefExpr, SourceRange RR) {
112  ReductionRange = RR;
113  ReductionOp = RefExpr;
114  }
115  };
116  using DeclReductionMapTy =
117  llvm::SmallDenseMap<const ValueDecl *, ReductionData, 4>;
118 
119  struct SharingMapTy {
120  DeclSAMapTy SharingMap;
121  DeclReductionMapTy ReductionMap;
122  AlignedMapTy AlignedMap;
123  MappedExprComponentsTy MappedExprComponents;
124  LoopControlVariablesMapTy LCVMap;
125  DefaultDataSharingAttributes DefaultAttr = DSA_unspecified;
126  SourceLocation DefaultAttrLoc;
127  DefaultMapAttributes DefaultMapAttr = DMA_unspecified;
128  SourceLocation DefaultMapAttrLoc;
130  DeclarationNameInfo DirectiveName;
131  Scope *CurScope = nullptr;
132  SourceLocation ConstructLoc;
133  /// Set of 'depend' clauses with 'sink|source' dependence kind. Required to
134  /// get the data (loop counters etc.) about enclosing loop-based construct.
135  /// This data is required during codegen.
136  DoacrossDependMapTy DoacrossDepends;
137  /// first argument (Expr *) contains optional argument of the
138  /// 'ordered' clause, the second one is true if the regions has 'ordered'
139  /// clause, false otherwise.
141  bool NowaitRegion = false;
142  bool CancelRegion = false;
143  unsigned AssociatedLoops = 1;
144  SourceLocation InnerTeamsRegionLoc;
145  /// Reference to the taskgroup task_reduction reference expression.
146  Expr *TaskgroupReductionRef = nullptr;
147  SharingMapTy(OpenMPDirectiveKind DKind, DeclarationNameInfo Name,
148  Scope *CurScope, SourceLocation Loc)
149  : Directive(DKind), DirectiveName(Name), CurScope(CurScope),
150  ConstructLoc(Loc) {}
151  SharingMapTy() = default;
152  };
153 
154  using StackTy = SmallVector<SharingMapTy, 4>;
155 
156  /// Stack of used declaration and their data-sharing attributes.
157  DeclSAMapTy Threadprivates;
158  const FunctionScopeInfo *CurrentNonCapturingFunctionScope = nullptr;
160  /// true, if check for DSA must be from parent directive, false, if
161  /// from current directive.
162  OpenMPClauseKind ClauseKindMode = OMPC_unknown;
163  Sema &SemaRef;
164  bool ForceCapturing = false;
165  CriticalsWithHintsTy Criticals;
166 
167  using iterator = StackTy::const_reverse_iterator;
168 
169  DSAVarData getDSA(iterator &Iter, ValueDecl *D) const;
170 
171  /// Checks if the variable is a local for OpenMP region.
172  bool isOpenMPLocal(VarDecl *D, iterator Iter) const;
173 
174  bool isStackEmpty() const {
175  return Stack.empty() ||
176  Stack.back().second != CurrentNonCapturingFunctionScope ||
177  Stack.back().first.empty();
178  }
179 
180 public:
181  explicit DSAStackTy(Sema &S) : SemaRef(S) {}
182 
183  bool isClauseParsingMode() const { return ClauseKindMode != OMPC_unknown; }
184  OpenMPClauseKind getClauseParsingMode() const {
185  assert(isClauseParsingMode() && "Must be in clause parsing mode.");
186  return ClauseKindMode;
187  }
188  void setClauseParsingMode(OpenMPClauseKind K) { ClauseKindMode = K; }
189 
190  bool isForceVarCapturing() const { return ForceCapturing; }
191  void setForceVarCapturing(bool V) { ForceCapturing = V; }
192 
193  void push(OpenMPDirectiveKind DKind, const DeclarationNameInfo &DirName,
194  Scope *CurScope, SourceLocation Loc) {
195  if (Stack.empty() ||
196  Stack.back().second != CurrentNonCapturingFunctionScope)
197  Stack.emplace_back(StackTy(), CurrentNonCapturingFunctionScope);
198  Stack.back().first.emplace_back(DKind, DirName, CurScope, Loc);
199  Stack.back().first.back().DefaultAttrLoc = Loc;
200  }
201 
202  void pop() {
203  assert(!Stack.back().first.empty() &&
204  "Data-sharing attributes stack is empty!");
205  Stack.back().first.pop_back();
206  }
207 
208  /// Start new OpenMP region stack in new non-capturing function.
209  void pushFunction() {
210  const FunctionScopeInfo *CurFnScope = SemaRef.getCurFunction();
211  assert(!isa<CapturingScopeInfo>(CurFnScope));
212  CurrentNonCapturingFunctionScope = CurFnScope;
213  }
214  /// Pop region stack for non-capturing function.
215  void popFunction(const FunctionScopeInfo *OldFSI) {
216  if (!Stack.empty() && Stack.back().second == OldFSI) {
217  assert(Stack.back().first.empty());
218  Stack.pop_back();
219  }
220  CurrentNonCapturingFunctionScope = nullptr;
221  for (const FunctionScopeInfo *FSI : llvm::reverse(SemaRef.FunctionScopes)) {
222  if (!isa<CapturingScopeInfo>(FSI)) {
223  CurrentNonCapturingFunctionScope = FSI;
224  break;
225  }
226  }
227  }
228 
229  void addCriticalWithHint(const OMPCriticalDirective *D, llvm::APSInt Hint) {
230  Criticals.try_emplace(D->getDirectiveName().getAsString(), D, Hint);
231  }
232  const std::pair<const OMPCriticalDirective *, llvm::APSInt>
233  getCriticalWithHint(const DeclarationNameInfo &Name) const {
234  auto I = Criticals.find(Name.getAsString());
235  if (I != Criticals.end())
236  return I->second;
237  return std::make_pair(nullptr, llvm::APSInt());
238  }
239  /// If 'aligned' declaration for given variable \a D was not seen yet,
240  /// add it and return NULL; otherwise return previous occurrence's expression
241  /// for diagnostics.
242  const Expr *addUniqueAligned(const ValueDecl *D, const Expr *NewDE);
243 
244  /// Register specified variable as loop control variable.
245  void addLoopControlVariable(const ValueDecl *D, VarDecl *Capture);
246  /// Check if the specified variable is a loop control variable for
247  /// current region.
248  /// \return The index of the loop control variable in the list of associated
249  /// for-loops (from outer to inner).
250  const LCDeclInfo isLoopControlVariable(const ValueDecl *D) const;
251  /// Check if the specified variable is a loop control variable for
252  /// parent region.
253  /// \return The index of the loop control variable in the list of associated
254  /// for-loops (from outer to inner).
255  const LCDeclInfo isParentLoopControlVariable(const ValueDecl *D) const;
256  /// Get the loop control variable for the I-th loop (or nullptr) in
257  /// parent directive.
258  const ValueDecl *getParentLoopControlVariable(unsigned I) const;
259 
260  /// Adds explicit data sharing attribute to the specified declaration.
261  void addDSA(const ValueDecl *D, const Expr *E, OpenMPClauseKind A,
262  DeclRefExpr *PrivateCopy = nullptr);
263 
264  /// Adds additional information for the reduction items with the reduction id
265  /// represented as an operator.
266  void addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
267  BinaryOperatorKind BOK);
268  /// Adds additional information for the reduction items with the reduction id
269  /// represented as reduction identifier.
270  void addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
271  const Expr *ReductionRef);
272  /// Returns the location and reduction operation from the innermost parent
273  /// region for the given \p D.
274  const DSAVarData
275  getTopMostTaskgroupReductionData(const ValueDecl *D, SourceRange &SR,
276  BinaryOperatorKind &BOK,
277  Expr *&TaskgroupDescriptor) const;
278  /// Returns the location and reduction operation from the innermost parent
279  /// region for the given \p D.
280  const DSAVarData
281  getTopMostTaskgroupReductionData(const ValueDecl *D, SourceRange &SR,
282  const Expr *&ReductionRef,
283  Expr *&TaskgroupDescriptor) const;
284  /// Return reduction reference expression for the current taskgroup.
285  Expr *getTaskgroupReductionRef() const {
286  assert(Stack.back().first.back().Directive == OMPD_taskgroup &&
287  "taskgroup reference expression requested for non taskgroup "
288  "directive.");
289  return Stack.back().first.back().TaskgroupReductionRef;
290  }
291  /// Checks if the given \p VD declaration is actually a taskgroup reduction
292  /// descriptor variable at the \p Level of OpenMP regions.
293  bool isTaskgroupReductionRef(const ValueDecl *VD, unsigned Level) const {
294  return Stack.back().first[Level].TaskgroupReductionRef &&
295  cast<DeclRefExpr>(Stack.back().first[Level].TaskgroupReductionRef)
296  ->getDecl() == VD;
297  }
298 
299  /// Returns data sharing attributes from top of the stack for the
300  /// specified declaration.
301  const DSAVarData getTopDSA(ValueDecl *D, bool FromParent);
302  /// Returns data-sharing attributes for the specified declaration.
303  const DSAVarData getImplicitDSA(ValueDecl *D, bool FromParent) const;
304  /// Checks if the specified variables has data-sharing attributes which
305  /// match specified \a CPred predicate in any directive which matches \a DPred
306  /// predicate.
307  const DSAVarData
308  hasDSA(ValueDecl *D, const llvm::function_ref<bool(OpenMPClauseKind)> CPred,
309  const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
310  bool FromParent) const;
311  /// Checks if the specified variables has data-sharing attributes which
312  /// match specified \a CPred predicate in any innermost directive which
313  /// matches \a DPred predicate.
314  const DSAVarData
315  hasInnermostDSA(ValueDecl *D,
316  const llvm::function_ref<bool(OpenMPClauseKind)> CPred,
317  const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
318  bool FromParent) const;
319  /// Checks if the specified variables has explicit data-sharing
320  /// attributes which match specified \a CPred predicate at the specified
321  /// OpenMP region.
322  bool hasExplicitDSA(const ValueDecl *D,
323  const llvm::function_ref<bool(OpenMPClauseKind)> CPred,
324  unsigned Level, bool NotLastprivate = false) const;
325 
326  /// Returns true if the directive at level \Level matches in the
327  /// specified \a DPred predicate.
328  bool hasExplicitDirective(
329  const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
330  unsigned Level) const;
331 
332  /// Finds a directive which matches specified \a DPred predicate.
333  bool hasDirective(
334  const llvm::function_ref<bool(
336  DPred,
337  bool FromParent) const;
338 
339  /// Returns currently analyzed directive.
340  OpenMPDirectiveKind getCurrentDirective() const {
341  return isStackEmpty() ? OMPD_unknown : Stack.back().first.back().Directive;
342  }
343  /// Returns directive kind at specified level.
344  OpenMPDirectiveKind getDirective(unsigned Level) const {
345  assert(!isStackEmpty() && "No directive at specified level.");
346  return Stack.back().first[Level].Directive;
347  }
348  /// Returns parent directive.
349  OpenMPDirectiveKind getParentDirective() const {
350  if (isStackEmpty() || Stack.back().first.size() == 1)
351  return OMPD_unknown;
352  return std::next(Stack.back().first.rbegin())->Directive;
353  }
354 
355  /// Set default data sharing attribute to none.
356  void setDefaultDSANone(SourceLocation Loc) {
357  assert(!isStackEmpty());
358  Stack.back().first.back().DefaultAttr = DSA_none;
359  Stack.back().first.back().DefaultAttrLoc = Loc;
360  }
361  /// Set default data sharing attribute to shared.
362  void setDefaultDSAShared(SourceLocation Loc) {
363  assert(!isStackEmpty());
364  Stack.back().first.back().DefaultAttr = DSA_shared;
365  Stack.back().first.back().DefaultAttrLoc = Loc;
366  }
367  /// Set default data mapping attribute to 'tofrom:scalar'.
368  void setDefaultDMAToFromScalar(SourceLocation Loc) {
369  assert(!isStackEmpty());
370  Stack.back().first.back().DefaultMapAttr = DMA_tofrom_scalar;
371  Stack.back().first.back().DefaultMapAttrLoc = Loc;
372  }
373 
374  DefaultDataSharingAttributes getDefaultDSA() const {
375  return isStackEmpty() ? DSA_unspecified
376  : Stack.back().first.back().DefaultAttr;
377  }
378  SourceLocation getDefaultDSALocation() const {
379  return isStackEmpty() ? SourceLocation()
380  : Stack.back().first.back().DefaultAttrLoc;
381  }
382  DefaultMapAttributes getDefaultDMA() const {
383  return isStackEmpty() ? DMA_unspecified
384  : Stack.back().first.back().DefaultMapAttr;
385  }
386  DefaultMapAttributes getDefaultDMAAtLevel(unsigned Level) const {
387  return Stack.back().first[Level].DefaultMapAttr;
388  }
389  SourceLocation getDefaultDMALocation() const {
390  return isStackEmpty() ? SourceLocation()
391  : Stack.back().first.back().DefaultMapAttrLoc;
392  }
393 
394  /// Checks if the specified variable is a threadprivate.
395  bool isThreadPrivate(VarDecl *D) {
396  const DSAVarData DVar = getTopDSA(D, false);
397  return isOpenMPThreadPrivate(DVar.CKind);
398  }
399 
400  /// Marks current region as ordered (it has an 'ordered' clause).
401  void setOrderedRegion(bool IsOrdered, const Expr *Param,
402  OMPOrderedClause *Clause) {
403  assert(!isStackEmpty());
404  if (IsOrdered)
405  Stack.back().first.back().OrderedRegion.emplace(Param, Clause);
406  else
407  Stack.back().first.back().OrderedRegion.reset();
408  }
409  /// Returns true, if region is ordered (has associated 'ordered' clause),
410  /// false - otherwise.
411  bool isOrderedRegion() const {
412  if (isStackEmpty())
413  return false;
414  return Stack.back().first.rbegin()->OrderedRegion.hasValue();
415  }
416  /// Returns optional parameter for the ordered region.
417  std::pair<const Expr *, OMPOrderedClause *> getOrderedRegionParam() const {
418  if (isStackEmpty() ||
419  !Stack.back().first.rbegin()->OrderedRegion.hasValue())
420  return std::make_pair(nullptr, nullptr);
421  return Stack.back().first.rbegin()->OrderedRegion.getValue();
422  }
423  /// Returns true, if parent region is ordered (has associated
424  /// 'ordered' clause), false - otherwise.
425  bool isParentOrderedRegion() const {
426  if (isStackEmpty() || Stack.back().first.size() == 1)
427  return false;
428  return std::next(Stack.back().first.rbegin())->OrderedRegion.hasValue();
429  }
430  /// Returns optional parameter for the ordered region.
431  std::pair<const Expr *, OMPOrderedClause *>
432  getParentOrderedRegionParam() const {
433  if (isStackEmpty() || Stack.back().first.size() == 1 ||
434  !std::next(Stack.back().first.rbegin())->OrderedRegion.hasValue())
435  return std::make_pair(nullptr, nullptr);
436  return std::next(Stack.back().first.rbegin())->OrderedRegion.getValue();
437  }
438  /// Marks current region as nowait (it has a 'nowait' clause).
439  void setNowaitRegion(bool IsNowait = true) {
440  assert(!isStackEmpty());
441  Stack.back().first.back().NowaitRegion = IsNowait;
442  }
443  /// Returns true, if parent region is nowait (has associated
444  /// 'nowait' clause), false - otherwise.
445  bool isParentNowaitRegion() const {
446  if (isStackEmpty() || Stack.back().first.size() == 1)
447  return false;
448  return std::next(Stack.back().first.rbegin())->NowaitRegion;
449  }
450  /// Marks parent region as cancel region.
451  void setParentCancelRegion(bool Cancel = true) {
452  if (!isStackEmpty() && Stack.back().first.size() > 1) {
453  auto &StackElemRef = *std::next(Stack.back().first.rbegin());
454  StackElemRef.CancelRegion |= StackElemRef.CancelRegion || Cancel;
455  }
456  }
457  /// Return true if current region has inner cancel construct.
458  bool isCancelRegion() const {
459  return isStackEmpty() ? false : Stack.back().first.back().CancelRegion;
460  }
461 
462  /// Set collapse value for the region.
463  void setAssociatedLoops(unsigned Val) {
464  assert(!isStackEmpty());
465  Stack.back().first.back().AssociatedLoops = Val;
466  }
467  /// Return collapse value for region.
468  unsigned getAssociatedLoops() const {
469  return isStackEmpty() ? 0 : Stack.back().first.back().AssociatedLoops;
470  }
471 
472  /// Marks current target region as one with closely nested teams
473  /// region.
474  void setParentTeamsRegionLoc(SourceLocation TeamsRegionLoc) {
475  if (!isStackEmpty() && Stack.back().first.size() > 1) {
476  std::next(Stack.back().first.rbegin())->InnerTeamsRegionLoc =
477  TeamsRegionLoc;
478  }
479  }
480  /// Returns true, if current region has closely nested teams region.
481  bool hasInnerTeamsRegion() const {
482  return getInnerTeamsRegionLoc().isValid();
483  }
484  /// Returns location of the nested teams region (if any).
485  SourceLocation getInnerTeamsRegionLoc() const {
486  return isStackEmpty() ? SourceLocation()
487  : Stack.back().first.back().InnerTeamsRegionLoc;
488  }
489 
490  Scope *getCurScope() const {
491  return isStackEmpty() ? nullptr : Stack.back().first.back().CurScope;
492  }
493  SourceLocation getConstructLoc() const {
494  return isStackEmpty() ? SourceLocation()
495  : Stack.back().first.back().ConstructLoc;
496  }
497 
498  /// Do the check specified in \a Check to all component lists and return true
499  /// if any issue is found.
500  bool checkMappableExprComponentListsForDecl(
501  const ValueDecl *VD, bool CurrentRegionOnly,
502  const llvm::function_ref<
505  Check) const {
506  if (isStackEmpty())
507  return false;
508  auto SI = Stack.back().first.rbegin();
509  auto SE = Stack.back().first.rend();
510 
511  if (SI == SE)
512  return false;
513 
514  if (CurrentRegionOnly)
515  SE = std::next(SI);
516  else
517  std::advance(SI, 1);
518 
519  for (; SI != SE; ++SI) {
520  auto MI = SI->MappedExprComponents.find(VD);
521  if (MI != SI->MappedExprComponents.end())
523  MI->second.Components)
524  if (Check(L, MI->second.Kind))
525  return true;
526  }
527  return false;
528  }
529 
530  /// Do the check specified in \a Check to all component lists at a given level
531  /// and return true if any issue is found.
532  bool checkMappableExprComponentListsForDeclAtLevel(
533  const ValueDecl *VD, unsigned Level,
534  const llvm::function_ref<
537  Check) const {
538  if (isStackEmpty())
539  return false;
540 
541  auto StartI = Stack.back().first.begin();
542  auto EndI = Stack.back().first.end();
543  if (std::distance(StartI, EndI) <= (int)Level)
544  return false;
545  std::advance(StartI, Level);
546 
547  auto MI = StartI->MappedExprComponents.find(VD);
548  if (MI != StartI->MappedExprComponents.end())
550  MI->second.Components)
551  if (Check(L, MI->second.Kind))
552  return true;
553  return false;
554  }
555 
556  /// Create a new mappable expression component list associated with a given
557  /// declaration and initialize it with the provided list of components.
558  void addMappableExpressionComponents(
559  const ValueDecl *VD,
561  OpenMPClauseKind WhereFoundClauseKind) {
562  assert(!isStackEmpty() &&
563  "Not expecting to retrieve components from a empty stack!");
564  MappedExprComponentTy &MEC =
565  Stack.back().first.back().MappedExprComponents[VD];
566  // Create new entry and append the new components there.
567  MEC.Components.resize(MEC.Components.size() + 1);
568  MEC.Components.back().append(Components.begin(), Components.end());
569  MEC.Kind = WhereFoundClauseKind;
570  }
571 
572  unsigned getNestingLevel() const {
573  assert(!isStackEmpty());
574  return Stack.back().first.size() - 1;
575  }
576  void addDoacrossDependClause(OMPDependClause *C,
577  const OperatorOffsetTy &OpsOffs) {
578  assert(!isStackEmpty() && Stack.back().first.size() > 1);
579  SharingMapTy &StackElem = *std::next(Stack.back().first.rbegin());
580  assert(isOpenMPWorksharingDirective(StackElem.Directive));
581  StackElem.DoacrossDepends.try_emplace(C, OpsOffs);
582  }
583  llvm::iterator_range<DoacrossDependMapTy::const_iterator>
584  getDoacrossDependClauses() const {
585  assert(!isStackEmpty());
586  const SharingMapTy &StackElem = Stack.back().first.back();
587  if (isOpenMPWorksharingDirective(StackElem.Directive)) {
588  const DoacrossDependMapTy &Ref = StackElem.DoacrossDepends;
589  return llvm::make_range(Ref.begin(), Ref.end());
590  }
591  return llvm::make_range(StackElem.DoacrossDepends.end(),
592  StackElem.DoacrossDepends.end());
593  }
594 };
595 bool isParallelOrTaskRegion(OpenMPDirectiveKind DKind) {
596  return isOpenMPParallelDirective(DKind) || isOpenMPTaskingDirective(DKind) ||
597  isOpenMPTeamsDirective(DKind) || DKind == OMPD_unknown;
598 }
599 
600 } // namespace
601 
602 static const Expr *getExprAsWritten(const Expr *E) {
603  if (const auto *ExprTemp = dyn_cast<ExprWithCleanups>(E))
604  E = ExprTemp->getSubExpr();
605 
606  if (const auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E))
607  E = MTE->GetTemporaryExpr();
608 
609  while (const auto *Binder = dyn_cast<CXXBindTemporaryExpr>(E))
610  E = Binder->getSubExpr();
611 
612  if (const auto *ICE = dyn_cast<ImplicitCastExpr>(E))
613  E = ICE->getSubExprAsWritten();
614  return E->IgnoreParens();
615 }
616 
618  return const_cast<Expr *>(getExprAsWritten(const_cast<const Expr *>(E)));
619 }
620 
621 static const ValueDecl *getCanonicalDecl(const ValueDecl *D) {
622  if (const auto *CED = dyn_cast<OMPCapturedExprDecl>(D))
623  if (const auto *ME = dyn_cast<MemberExpr>(getExprAsWritten(CED->getInit())))
624  D = ME->getMemberDecl();
625  const auto *VD = dyn_cast<VarDecl>(D);
626  const auto *FD = dyn_cast<FieldDecl>(D);
627  if (VD != nullptr) {
628  VD = VD->getCanonicalDecl();
629  D = VD;
630  } else {
631  assert(FD);
632  FD = FD->getCanonicalDecl();
633  D = FD;
634  }
635  return D;
636 }
637 
639  return const_cast<ValueDecl *>(
640  getCanonicalDecl(const_cast<const ValueDecl *>(D)));
641 }
642 
643 DSAStackTy::DSAVarData DSAStackTy::getDSA(iterator &Iter,
644  ValueDecl *D) const {
645  D = getCanonicalDecl(D);
646  auto *VD = dyn_cast<VarDecl>(D);
647  const auto *FD = dyn_cast<FieldDecl>(D);
648  DSAVarData DVar;
649  if (isStackEmpty() || Iter == Stack.back().first.rend()) {
650  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
651  // in a region but not in construct]
652  // File-scope or namespace-scope variables referenced in called routines
653  // in the region are shared unless they appear in a threadprivate
654  // directive.
655  if (VD && !VD->isFunctionOrMethodVarDecl() && !isa<ParmVarDecl>(VD))
656  DVar.CKind = OMPC_shared;
657 
658  // OpenMP [2.9.1.2, Data-sharing Attribute Rules for Variables Referenced
659  // in a region but not in construct]
660  // Variables with static storage duration that are declared in called
661  // routines in the region are shared.
662  if (VD && VD->hasGlobalStorage())
663  DVar.CKind = OMPC_shared;
664 
665  // Non-static data members are shared by default.
666  if (FD)
667  DVar.CKind = OMPC_shared;
668 
669  return DVar;
670  }
671 
672  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
673  // in a Construct, C/C++, predetermined, p.1]
674  // Variables with automatic storage duration that are declared in a scope
675  // inside the construct are private.
676  if (VD && isOpenMPLocal(VD, Iter) && VD->isLocalVarDecl() &&
677  (VD->getStorageClass() == SC_Auto || VD->getStorageClass() == SC_None)) {
678  DVar.CKind = OMPC_private;
679  return DVar;
680  }
681 
682  DVar.DKind = Iter->Directive;
683  // Explicitly specified attributes and local variables with predetermined
684  // attributes.
685  if (Iter->SharingMap.count(D)) {
686  const DSAInfo &Data = Iter->SharingMap.lookup(D);
687  DVar.RefExpr = Data.RefExpr.getPointer();
688  DVar.PrivateCopy = Data.PrivateCopy;
689  DVar.CKind = Data.Attributes;
690  DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
691  return DVar;
692  }
693 
694  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
695  // in a Construct, C/C++, implicitly determined, p.1]
696  // In a parallel or task construct, the data-sharing attributes of these
697  // variables are determined by the default clause, if present.
698  switch (Iter->DefaultAttr) {
699  case DSA_shared:
700  DVar.CKind = OMPC_shared;
701  DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
702  return DVar;
703  case DSA_none:
704  return DVar;
705  case DSA_unspecified:
706  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
707  // in a Construct, implicitly determined, p.2]
708  // In a parallel construct, if no default clause is present, these
709  // variables are shared.
710  DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
711  if (isOpenMPParallelDirective(DVar.DKind) ||
712  isOpenMPTeamsDirective(DVar.DKind)) {
713  DVar.CKind = OMPC_shared;
714  return DVar;
715  }
716 
717  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
718  // in a Construct, implicitly determined, p.4]
719  // In a task construct, if no default clause is present, a variable that in
720  // the enclosing context is determined to be shared by all implicit tasks
721  // bound to the current team is shared.
722  if (isOpenMPTaskingDirective(DVar.DKind)) {
723  DSAVarData DVarTemp;
724  iterator I = Iter, E = Stack.back().first.rend();
725  do {
726  ++I;
727  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables
728  // Referenced in a Construct, implicitly determined, p.6]
729  // In a task construct, if no default clause is present, a variable
730  // whose data-sharing attribute is not determined by the rules above is
731  // firstprivate.
732  DVarTemp = getDSA(I, D);
733  if (DVarTemp.CKind != OMPC_shared) {
734  DVar.RefExpr = nullptr;
735  DVar.CKind = OMPC_firstprivate;
736  return DVar;
737  }
738  } while (I != E && !isParallelOrTaskRegion(I->Directive));
739  DVar.CKind =
740  (DVarTemp.CKind == OMPC_unknown) ? OMPC_firstprivate : OMPC_shared;
741  return DVar;
742  }
743  }
744  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
745  // in a Construct, implicitly determined, p.3]
746  // For constructs other than task, if no default clause is present, these
747  // variables inherit their data-sharing attributes from the enclosing
748  // context.
749  return getDSA(++Iter, D);
750 }
751 
752 const Expr *DSAStackTy::addUniqueAligned(const ValueDecl *D,
753  const Expr *NewDE) {
754  assert(!isStackEmpty() && "Data sharing attributes stack is empty");
755  D = getCanonicalDecl(D);
756  SharingMapTy &StackElem = Stack.back().first.back();
757  auto It = StackElem.AlignedMap.find(D);
758  if (It == StackElem.AlignedMap.end()) {
759  assert(NewDE && "Unexpected nullptr expr to be added into aligned map");
760  StackElem.AlignedMap[D] = NewDE;
761  return nullptr;
762  }
763  assert(It->second && "Unexpected nullptr expr in the aligned map");
764  return It->second;
765 }
766 
767 void DSAStackTy::addLoopControlVariable(const ValueDecl *D, VarDecl *Capture) {
768  assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
769  D = getCanonicalDecl(D);
770  SharingMapTy &StackElem = Stack.back().first.back();
771  StackElem.LCVMap.try_emplace(
772  D, LCDeclInfo(StackElem.LCVMap.size() + 1, Capture));
773 }
774 
775 const DSAStackTy::LCDeclInfo
776 DSAStackTy::isLoopControlVariable(const ValueDecl *D) const {
777  assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
778  D = getCanonicalDecl(D);
779  const SharingMapTy &StackElem = Stack.back().first.back();
780  auto It = StackElem.LCVMap.find(D);
781  if (It != StackElem.LCVMap.end())
782  return It->second;
783  return {0, nullptr};
784 }
785 
786 const DSAStackTy::LCDeclInfo
787 DSAStackTy::isParentLoopControlVariable(const ValueDecl *D) const {
788  assert(!isStackEmpty() && Stack.back().first.size() > 1 &&
789  "Data-sharing attributes stack is empty");
790  D = getCanonicalDecl(D);
791  const SharingMapTy &StackElem = *std::next(Stack.back().first.rbegin());
792  auto It = StackElem.LCVMap.find(D);
793  if (It != StackElem.LCVMap.end())
794  return It->second;
795  return {0, nullptr};
796 }
797 
798 const ValueDecl *DSAStackTy::getParentLoopControlVariable(unsigned I) const {
799  assert(!isStackEmpty() && Stack.back().first.size() > 1 &&
800  "Data-sharing attributes stack is empty");
801  const SharingMapTy &StackElem = *std::next(Stack.back().first.rbegin());
802  if (StackElem.LCVMap.size() < I)
803  return nullptr;
804  for (const auto &Pair : StackElem.LCVMap)
805  if (Pair.second.first == I)
806  return Pair.first;
807  return nullptr;
808 }
809 
810 void DSAStackTy::addDSA(const ValueDecl *D, const Expr *E, OpenMPClauseKind A,
811  DeclRefExpr *PrivateCopy) {
812  D = getCanonicalDecl(D);
813  if (A == OMPC_threadprivate) {
814  DSAInfo &Data = Threadprivates[D];
815  Data.Attributes = A;
816  Data.RefExpr.setPointer(E);
817  Data.PrivateCopy = nullptr;
818  } else {
819  assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
820  DSAInfo &Data = Stack.back().first.back().SharingMap[D];
821  assert(Data.Attributes == OMPC_unknown || (A == Data.Attributes) ||
822  (A == OMPC_firstprivate && Data.Attributes == OMPC_lastprivate) ||
823  (A == OMPC_lastprivate && Data.Attributes == OMPC_firstprivate) ||
824  (isLoopControlVariable(D).first && A == OMPC_private));
825  if (A == OMPC_lastprivate && Data.Attributes == OMPC_firstprivate) {
826  Data.RefExpr.setInt(/*IntVal=*/true);
827  return;
828  }
829  const bool IsLastprivate =
830  A == OMPC_lastprivate || Data.Attributes == OMPC_lastprivate;
831  Data.Attributes = A;
832  Data.RefExpr.setPointerAndInt(E, IsLastprivate);
833  Data.PrivateCopy = PrivateCopy;
834  if (PrivateCopy) {
835  DSAInfo &Data =
836  Stack.back().first.back().SharingMap[PrivateCopy->getDecl()];
837  Data.Attributes = A;
838  Data.RefExpr.setPointerAndInt(PrivateCopy, IsLastprivate);
839  Data.PrivateCopy = nullptr;
840  }
841  }
842 }
843 
844 /// Build a variable declaration for OpenMP loop iteration variable.
846  StringRef Name, const AttrVec *Attrs = nullptr,
847  DeclRefExpr *OrigRef = nullptr) {
848  DeclContext *DC = SemaRef.CurContext;
849  IdentifierInfo *II = &SemaRef.PP.getIdentifierTable().get(Name);
850  TypeSourceInfo *TInfo = SemaRef.Context.getTrivialTypeSourceInfo(Type, Loc);
851  auto *Decl =
852  VarDecl::Create(SemaRef.Context, DC, Loc, Loc, II, Type, TInfo, SC_None);
853  if (Attrs) {
854  for (specific_attr_iterator<AlignedAttr> I(Attrs->begin()), E(Attrs->end());
855  I != E; ++I)
856  Decl->addAttr(*I);
857  }
858  Decl->setImplicit();
859  if (OrigRef) {
860  Decl->addAttr(
861  OMPReferencedVarAttr::CreateImplicit(SemaRef.Context, OrigRef));
862  }
863  return Decl;
864 }
865 
867  SourceLocation Loc,
868  bool RefersToCapture = false) {
869  D->setReferenced();
870  D->markUsed(S.Context);
872  SourceLocation(), D, RefersToCapture, Loc, Ty,
873  VK_LValue);
874 }
875 
876 void DSAStackTy::addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
877  BinaryOperatorKind BOK) {
878  D = getCanonicalDecl(D);
879  assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
880  assert(
881  Stack.back().first.back().SharingMap[D].Attributes == OMPC_reduction &&
882  "Additional reduction info may be specified only for reduction items.");
883  ReductionData &ReductionData = Stack.back().first.back().ReductionMap[D];
884  assert(ReductionData.ReductionRange.isInvalid() &&
885  Stack.back().first.back().Directive == OMPD_taskgroup &&
886  "Additional reduction info may be specified only once for reduction "
887  "items.");
888  ReductionData.set(BOK, SR);
889  Expr *&TaskgroupReductionRef =
890  Stack.back().first.back().TaskgroupReductionRef;
891  if (!TaskgroupReductionRef) {
892  VarDecl *VD = buildVarDecl(SemaRef, SR.getBegin(),
893  SemaRef.Context.VoidPtrTy, ".task_red.");
894  TaskgroupReductionRef =
895  buildDeclRefExpr(SemaRef, VD, SemaRef.Context.VoidPtrTy, SR.getBegin());
896  }
897 }
898 
899 void DSAStackTy::addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
900  const Expr *ReductionRef) {
901  D = getCanonicalDecl(D);
902  assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
903  assert(
904  Stack.back().first.back().SharingMap[D].Attributes == OMPC_reduction &&
905  "Additional reduction info may be specified only for reduction items.");
906  ReductionData &ReductionData = Stack.back().first.back().ReductionMap[D];
907  assert(ReductionData.ReductionRange.isInvalid() &&
908  Stack.back().first.back().Directive == OMPD_taskgroup &&
909  "Additional reduction info may be specified only once for reduction "
910  "items.");
911  ReductionData.set(ReductionRef, SR);
912  Expr *&TaskgroupReductionRef =
913  Stack.back().first.back().TaskgroupReductionRef;
914  if (!TaskgroupReductionRef) {
915  VarDecl *VD = buildVarDecl(SemaRef, SR.getBegin(),
916  SemaRef.Context.VoidPtrTy, ".task_red.");
917  TaskgroupReductionRef =
918  buildDeclRefExpr(SemaRef, VD, SemaRef.Context.VoidPtrTy, SR.getBegin());
919  }
920 }
921 
922 const DSAStackTy::DSAVarData DSAStackTy::getTopMostTaskgroupReductionData(
923  const ValueDecl *D, SourceRange &SR, BinaryOperatorKind &BOK,
924  Expr *&TaskgroupDescriptor) const {
925  D = getCanonicalDecl(D);
926  assert(!isStackEmpty() && "Data-sharing attributes stack is empty.");
927  if (Stack.back().first.empty())
928  return DSAVarData();
929  for (iterator I = std::next(Stack.back().first.rbegin(), 1),
930  E = Stack.back().first.rend();
931  I != E; std::advance(I, 1)) {
932  const DSAInfo &Data = I->SharingMap.lookup(D);
933  if (Data.Attributes != OMPC_reduction || I->Directive != OMPD_taskgroup)
934  continue;
935  const ReductionData &ReductionData = I->ReductionMap.lookup(D);
936  if (!ReductionData.ReductionOp ||
937  ReductionData.ReductionOp.is<const Expr *>())
938  return DSAVarData();
939  SR = ReductionData.ReductionRange;
940  BOK = ReductionData.ReductionOp.get<ReductionData::BOKPtrType>();
941  assert(I->TaskgroupReductionRef && "taskgroup reduction reference "
942  "expression for the descriptor is not "
943  "set.");
944  TaskgroupDescriptor = I->TaskgroupReductionRef;
945  return DSAVarData(OMPD_taskgroup, OMPC_reduction, Data.RefExpr.getPointer(),
946  Data.PrivateCopy, I->DefaultAttrLoc);
947  }
948  return DSAVarData();
949 }
950 
951 const DSAStackTy::DSAVarData DSAStackTy::getTopMostTaskgroupReductionData(
952  const ValueDecl *D, SourceRange &SR, const Expr *&ReductionRef,
953  Expr *&TaskgroupDescriptor) const {
954  D = getCanonicalDecl(D);
955  assert(!isStackEmpty() && "Data-sharing attributes stack is empty.");
956  if (Stack.back().first.empty())
957  return DSAVarData();
958  for (iterator I = std::next(Stack.back().first.rbegin(), 1),
959  E = Stack.back().first.rend();
960  I != E; std::advance(I, 1)) {
961  const DSAInfo &Data = I->SharingMap.lookup(D);
962  if (Data.Attributes != OMPC_reduction || I->Directive != OMPD_taskgroup)
963  continue;
964  const ReductionData &ReductionData = I->ReductionMap.lookup(D);
965  if (!ReductionData.ReductionOp ||
966  !ReductionData.ReductionOp.is<const Expr *>())
967  return DSAVarData();
968  SR = ReductionData.ReductionRange;
969  ReductionRef = ReductionData.ReductionOp.get<const Expr *>();
970  assert(I->TaskgroupReductionRef && "taskgroup reduction reference "
971  "expression for the descriptor is not "
972  "set.");
973  TaskgroupDescriptor = I->TaskgroupReductionRef;
974  return DSAVarData(OMPD_taskgroup, OMPC_reduction, Data.RefExpr.getPointer(),
975  Data.PrivateCopy, I->DefaultAttrLoc);
976  }
977  return DSAVarData();
978 }
979 
980 bool DSAStackTy::isOpenMPLocal(VarDecl *D, iterator Iter) const {
981  D = D->getCanonicalDecl();
982  if (!isStackEmpty()) {
983  iterator I = Iter, E = Stack.back().first.rend();
984  Scope *TopScope = nullptr;
985  while (I != E && !isParallelOrTaskRegion(I->Directive) &&
986  !isOpenMPTargetExecutionDirective(I->Directive))
987  ++I;
988  if (I == E)
989  return false;
990  TopScope = I->CurScope ? I->CurScope->getParent() : nullptr;
991  Scope *CurScope = getCurScope();
992  while (CurScope != TopScope && !CurScope->isDeclScope(D))
993  CurScope = CurScope->getParent();
994  return CurScope != TopScope;
995  }
996  return false;
997 }
998 
999 const DSAStackTy::DSAVarData DSAStackTy::getTopDSA(ValueDecl *D,
1000  bool FromParent) {
1001  D = getCanonicalDecl(D);
1002  DSAVarData DVar;
1003 
1004  auto *VD = dyn_cast<VarDecl>(D);
1005  auto TI = Threadprivates.find(D);
1006  if (TI != Threadprivates.end()) {
1007  DVar.RefExpr = TI->getSecond().RefExpr.getPointer();
1008  DVar.CKind = OMPC_threadprivate;
1009  return DVar;
1010  }
1011  if (VD && VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
1012  DVar.RefExpr = buildDeclRefExpr(
1013  SemaRef, VD, D->getType().getNonReferenceType(),
1014  VD->getAttr<OMPThreadPrivateDeclAttr>()->getLocation());
1015  DVar.CKind = OMPC_threadprivate;
1016  addDSA(D, DVar.RefExpr, OMPC_threadprivate);
1017  return DVar;
1018  }
1019  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1020  // in a Construct, C/C++, predetermined, p.1]
1021  // Variables appearing in threadprivate directives are threadprivate.
1022  if ((VD && VD->getTLSKind() != VarDecl::TLS_None &&
1023  !(VD->hasAttr<OMPThreadPrivateDeclAttr>() &&
1024  SemaRef.getLangOpts().OpenMPUseTLS &&
1025  SemaRef.getASTContext().getTargetInfo().isTLSSupported())) ||
1026  (VD && VD->getStorageClass() == SC_Register &&
1027  VD->hasAttr<AsmLabelAttr>() && !VD->isLocalVarDecl())) {
1028  DVar.RefExpr = buildDeclRefExpr(
1029  SemaRef, VD, D->getType().getNonReferenceType(), D->getLocation());
1030  DVar.CKind = OMPC_threadprivate;
1031  addDSA(D, DVar.RefExpr, OMPC_threadprivate);
1032  return DVar;
1033  }
1034  if (SemaRef.getLangOpts().OpenMPCUDAMode && VD &&
1035  VD->isLocalVarDeclOrParm() && !isStackEmpty() &&
1036  !isLoopControlVariable(D).first) {
1037  iterator IterTarget =
1038  std::find_if(Stack.back().first.rbegin(), Stack.back().first.rend(),
1039  [](const SharingMapTy &Data) {
1040  return isOpenMPTargetExecutionDirective(Data.Directive);
1041  });
1042  if (IterTarget != Stack.back().first.rend()) {
1043  iterator ParentIterTarget = std::next(IterTarget, 1);
1044  for (iterator Iter = Stack.back().first.rbegin();
1045  Iter != ParentIterTarget; std::advance(Iter, 1)) {
1046  if (isOpenMPLocal(VD, Iter)) {
1047  DVar.RefExpr =
1048  buildDeclRefExpr(SemaRef, VD, D->getType().getNonReferenceType(),
1049  D->getLocation());
1050  DVar.CKind = OMPC_threadprivate;
1051  return DVar;
1052  }
1053  }
1054  if (!isClauseParsingMode() || IterTarget != Stack.back().first.rbegin()) {
1055  auto DSAIter = IterTarget->SharingMap.find(D);
1056  if (DSAIter != IterTarget->SharingMap.end() &&
1057  isOpenMPPrivate(DSAIter->getSecond().Attributes)) {
1058  DVar.RefExpr = DSAIter->getSecond().RefExpr.getPointer();
1059  DVar.CKind = OMPC_threadprivate;
1060  return DVar;
1061  }
1062  iterator End = Stack.back().first.rend();
1063  if (!SemaRef.isOpenMPCapturedByRef(
1064  D, std::distance(ParentIterTarget, End))) {
1065  DVar.RefExpr =
1066  buildDeclRefExpr(SemaRef, VD, D->getType().getNonReferenceType(),
1067  IterTarget->ConstructLoc);
1068  DVar.CKind = OMPC_threadprivate;
1069  return DVar;
1070  }
1071  }
1072  }
1073  }
1074 
1075  if (isStackEmpty())
1076  // Not in OpenMP execution region and top scope was already checked.
1077  return DVar;
1078 
1079  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1080  // in a Construct, C/C++, predetermined, p.4]
1081  // Static data members are shared.
1082  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1083  // in a Construct, C/C++, predetermined, p.7]
1084  // Variables with static storage duration that are declared in a scope
1085  // inside the construct are shared.
1086  auto &&MatchesAlways = [](OpenMPDirectiveKind) { return true; };
1087  if (VD && VD->isStaticDataMember()) {
1088  DSAVarData DVarTemp = hasDSA(D, isOpenMPPrivate, MatchesAlways, FromParent);
1089  if (DVarTemp.CKind != OMPC_unknown && DVarTemp.RefExpr)
1090  return DVar;
1091 
1092  DVar.CKind = OMPC_shared;
1093  return DVar;
1094  }
1095 
1097  bool IsConstant = Type.isConstant(SemaRef.getASTContext());
1098  Type = SemaRef.getASTContext().getBaseElementType(Type);
1099  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1100  // in a Construct, C/C++, predetermined, p.6]
1101  // Variables with const qualified type having no mutable member are
1102  // shared.
1103  const CXXRecordDecl *RD =
1104  SemaRef.getLangOpts().CPlusPlus ? Type->getAsCXXRecordDecl() : nullptr;
1105  if (const auto *CTSD = dyn_cast_or_null<ClassTemplateSpecializationDecl>(RD))
1106  if (const ClassTemplateDecl *CTD = CTSD->getSpecializedTemplate())
1107  RD = CTD->getTemplatedDecl();
1108  if (IsConstant &&
1109  !(SemaRef.getLangOpts().CPlusPlus && RD && RD->hasDefinition() &&
1110  RD->hasMutableFields())) {
1111  // Variables with const-qualified type having no mutable member may be
1112  // listed in a firstprivate clause, even if they are static data members.
1113  DSAVarData DVarTemp =
1114  hasDSA(D, [](OpenMPClauseKind C) { return C == OMPC_firstprivate; },
1115  MatchesAlways, FromParent);
1116  if (DVarTemp.CKind == OMPC_firstprivate && DVarTemp.RefExpr)
1117  return DVarTemp;
1118 
1119  DVar.CKind = OMPC_shared;
1120  return DVar;
1121  }
1122 
1123  // Explicitly specified attributes and local variables with predetermined
1124  // attributes.
1125  iterator I = Stack.back().first.rbegin();
1126  iterator EndI = Stack.back().first.rend();
1127  if (FromParent && I != EndI)
1128  std::advance(I, 1);
1129  auto It = I->SharingMap.find(D);
1130  if (It != I->SharingMap.end()) {
1131  const DSAInfo &Data = It->getSecond();
1132  DVar.RefExpr = Data.RefExpr.getPointer();
1133  DVar.PrivateCopy = Data.PrivateCopy;
1134  DVar.CKind = Data.Attributes;
1135  DVar.ImplicitDSALoc = I->DefaultAttrLoc;
1136  DVar.DKind = I->Directive;
1137  }
1138 
1139  return DVar;
1140 }
1141 
1142 const DSAStackTy::DSAVarData DSAStackTy::getImplicitDSA(ValueDecl *D,
1143  bool FromParent) const {
1144  if (isStackEmpty()) {
1145  iterator I;
1146  return getDSA(I, D);
1147  }
1148  D = getCanonicalDecl(D);
1149  iterator StartI = Stack.back().first.rbegin();
1150  iterator EndI = Stack.back().first.rend();
1151  if (FromParent && StartI != EndI)
1152  std::advance(StartI, 1);
1153  return getDSA(StartI, D);
1154 }
1155 
1156 const DSAStackTy::DSAVarData
1157 DSAStackTy::hasDSA(ValueDecl *D,
1158  const llvm::function_ref<bool(OpenMPClauseKind)> CPred,
1159  const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
1160  bool FromParent) const {
1161  if (isStackEmpty())
1162  return {};
1163  D = getCanonicalDecl(D);
1164  iterator I = Stack.back().first.rbegin();
1165  iterator EndI = Stack.back().first.rend();
1166  if (FromParent && I != EndI)
1167  std::advance(I, 1);
1168  for (; I != EndI; std::advance(I, 1)) {
1169  if (!DPred(I->Directive) && !isParallelOrTaskRegion(I->Directive))
1170  continue;
1171  iterator NewI = I;
1172  DSAVarData DVar = getDSA(NewI, D);
1173  if (I == NewI && CPred(DVar.CKind))
1174  return DVar;
1175  }
1176  return {};
1177 }
1178 
1179 const DSAStackTy::DSAVarData DSAStackTy::hasInnermostDSA(
1180  ValueDecl *D, const llvm::function_ref<bool(OpenMPClauseKind)> CPred,
1181  const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
1182  bool FromParent) const {
1183  if (isStackEmpty())
1184  return {};
1185  D = getCanonicalDecl(D);
1186  iterator StartI = Stack.back().first.rbegin();
1187  iterator EndI = Stack.back().first.rend();
1188  if (FromParent && StartI != EndI)
1189  std::advance(StartI, 1);
1190  if (StartI == EndI || !DPred(StartI->Directive))
1191  return {};
1192  iterator NewI = StartI;
1193  DSAVarData DVar = getDSA(NewI, D);
1194  return (NewI == StartI && CPred(DVar.CKind)) ? DVar : DSAVarData();
1195 }
1196 
1197 bool DSAStackTy::hasExplicitDSA(
1198  const ValueDecl *D, const llvm::function_ref<bool(OpenMPClauseKind)> CPred,
1199  unsigned Level, bool NotLastprivate) const {
1200  if (isStackEmpty())
1201  return false;
1202  D = getCanonicalDecl(D);
1203  auto StartI = Stack.back().first.begin();
1204  auto EndI = Stack.back().first.end();
1205  if (std::distance(StartI, EndI) <= (int)Level)
1206  return false;
1207  std::advance(StartI, Level);
1208  auto I = StartI->SharingMap.find(D);
1209  return (I != StartI->SharingMap.end()) &&
1210  I->getSecond().RefExpr.getPointer() &&
1211  CPred(I->getSecond().Attributes) &&
1212  (!NotLastprivate || !I->getSecond().RefExpr.getInt());
1213 }
1214 
1215 bool DSAStackTy::hasExplicitDirective(
1216  const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
1217  unsigned Level) const {
1218  if (isStackEmpty())
1219  return false;
1220  auto StartI = Stack.back().first.begin();
1221  auto EndI = Stack.back().first.end();
1222  if (std::distance(StartI, EndI) <= (int)Level)
1223  return false;
1224  std::advance(StartI, Level);
1225  return DPred(StartI->Directive);
1226 }
1227 
1228 bool DSAStackTy::hasDirective(
1229  const llvm::function_ref<bool(OpenMPDirectiveKind,
1231  DPred,
1232  bool FromParent) const {
1233  // We look only in the enclosing region.
1234  if (isStackEmpty())
1235  return false;
1236  auto StartI = std::next(Stack.back().first.rbegin());
1237  auto EndI = Stack.back().first.rend();
1238  if (FromParent && StartI != EndI)
1239  StartI = std::next(StartI);
1240  for (auto I = StartI, EE = EndI; I != EE; ++I) {
1241  if (DPred(I->Directive, I->DirectiveName, I->ConstructLoc))
1242  return true;
1243  }
1244  return false;
1245 }
1246 
1247 void Sema::InitDataSharingAttributesStack() {
1248  VarDataSharingAttributesStack = new DSAStackTy(*this);
1249 }
1250 
1251 #define DSAStack static_cast<DSAStackTy *>(VarDataSharingAttributesStack)
1252 
1253 void Sema::pushOpenMPFunctionRegion() {
1254  DSAStack->pushFunction();
1255 }
1256 
1257 void Sema::popOpenMPFunctionRegion(const FunctionScopeInfo *OldFSI) {
1258  DSAStack->popFunction(OldFSI);
1259 }
1260 
1261 bool Sema::isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level) const {
1262  assert(LangOpts.OpenMP && "OpenMP is not allowed");
1263 
1264  ASTContext &Ctx = getASTContext();
1265  bool IsByRef = true;
1266 
1267  // Find the directive that is associated with the provided scope.
1268  D = cast<ValueDecl>(D->getCanonicalDecl());
1269  QualType Ty = D->getType();
1270 
1271  if (DSAStack->hasExplicitDirective(isOpenMPTargetExecutionDirective, Level)) {
1272  // This table summarizes how a given variable should be passed to the device
1273  // given its type and the clauses where it appears. This table is based on
1274  // the description in OpenMP 4.5 [2.10.4, target Construct] and
1275  // OpenMP 4.5 [2.15.5, Data-mapping Attribute Rules and Clauses].
1276  //
1277  // =========================================================================
1278  // | type | defaultmap | pvt | first | is_device_ptr | map | res. |
1279  // | |(tofrom:scalar)| | pvt | | | |
1280  // =========================================================================
1281  // | scl | | | | - | | bycopy|
1282  // | scl | | - | x | - | - | bycopy|
1283  // | scl | | x | - | - | - | null |
1284  // | scl | x | | | - | | byref |
1285  // | scl | x | - | x | - | - | bycopy|
1286  // | scl | x | x | - | - | - | null |
1287  // | scl | | - | - | - | x | byref |
1288  // | scl | x | - | - | - | x | byref |
1289  //
1290  // | agg | n.a. | | | - | | byref |
1291  // | agg | n.a. | - | x | - | - | byref |
1292  // | agg | n.a. | x | - | - | - | null |
1293  // | agg | n.a. | - | - | - | x | byref |
1294  // | agg | n.a. | - | - | - | x[] | byref |
1295  //
1296  // | ptr | n.a. | | | - | | bycopy|
1297  // | ptr | n.a. | - | x | - | - | bycopy|
1298  // | ptr | n.a. | x | - | - | - | null |
1299  // | ptr | n.a. | - | - | - | x | byref |
1300  // | ptr | n.a. | - | - | - | x[] | bycopy|
1301  // | ptr | n.a. | - | - | x | | bycopy|
1302  // | ptr | n.a. | - | - | x | x | bycopy|
1303  // | ptr | n.a. | - | - | x | x[] | bycopy|
1304  // =========================================================================
1305  // Legend:
1306  // scl - scalar
1307  // ptr - pointer
1308  // agg - aggregate
1309  // x - applies
1310  // - - invalid in this combination
1311  // [] - mapped with an array section
1312  // byref - should be mapped by reference
1313  // byval - should be mapped by value
1314  // null - initialize a local variable to null on the device
1315  //
1316  // Observations:
1317  // - All scalar declarations that show up in a map clause have to be passed
1318  // by reference, because they may have been mapped in the enclosing data
1319  // environment.
1320  // - If the scalar value does not fit the size of uintptr, it has to be
1321  // passed by reference, regardless the result in the table above.
1322  // - For pointers mapped by value that have either an implicit map or an
1323  // array section, the runtime library may pass the NULL value to the
1324  // device instead of the value passed to it by the compiler.
1325 
1326  if (Ty->isReferenceType())
1327  Ty = Ty->castAs<ReferenceType>()->getPointeeType();
1328 
1329  // Locate map clauses and see if the variable being captured is referred to
1330  // in any of those clauses. Here we only care about variables, not fields,
1331  // because fields are part of aggregates.
1332  bool IsVariableUsedInMapClause = false;
1333  bool IsVariableAssociatedWithSection = false;
1334 
1335  DSAStack->checkMappableExprComponentListsForDeclAtLevel(
1336  D, Level,
1337  [&IsVariableUsedInMapClause, &IsVariableAssociatedWithSection, D](
1339  MapExprComponents,
1340  OpenMPClauseKind WhereFoundClauseKind) {
1341  // Only the map clause information influences how a variable is
1342  // captured. E.g. is_device_ptr does not require changing the default
1343  // behavior.
1344  if (WhereFoundClauseKind != OMPC_map)
1345  return false;
1346 
1347  auto EI = MapExprComponents.rbegin();
1348  auto EE = MapExprComponents.rend();
1349 
1350  assert(EI != EE && "Invalid map expression!");
1351 
1352  if (isa<DeclRefExpr>(EI->getAssociatedExpression()))
1353  IsVariableUsedInMapClause |= EI->getAssociatedDeclaration() == D;
1354 
1355  ++EI;
1356  if (EI == EE)
1357  return false;
1358 
1359  if (isa<ArraySubscriptExpr>(EI->getAssociatedExpression()) ||
1360  isa<OMPArraySectionExpr>(EI->getAssociatedExpression()) ||
1361  isa<MemberExpr>(EI->getAssociatedExpression())) {
1362  IsVariableAssociatedWithSection = true;
1363  // There is nothing more we need to know about this variable.
1364  return true;
1365  }
1366 
1367  // Keep looking for more map info.
1368  return false;
1369  });
1370 
1371  if (IsVariableUsedInMapClause) {
1372  // If variable is identified in a map clause it is always captured by
1373  // reference except if it is a pointer that is dereferenced somehow.
1374  IsByRef = !(Ty->isPointerType() && IsVariableAssociatedWithSection);
1375  } else {
1376  // By default, all the data that has a scalar type is mapped by copy
1377  // (except for reduction variables).
1378  IsByRef =
1379  !Ty->isScalarType() ||
1380  DSAStack->getDefaultDMAAtLevel(Level) == DMA_tofrom_scalar ||
1381  DSAStack->hasExplicitDSA(
1382  D, [](OpenMPClauseKind K) { return K == OMPC_reduction; }, Level);
1383  }
1384  }
1385 
1386  if (IsByRef && Ty.getNonReferenceType()->isScalarType()) {
1387  IsByRef =
1388  !DSAStack->hasExplicitDSA(
1389  D,
1390  [](OpenMPClauseKind K) -> bool { return K == OMPC_firstprivate; },
1391  Level, /*NotLastprivate=*/true) &&
1392  // If the variable is artificial and must be captured by value - try to
1393  // capture by value.
1394  !(isa<OMPCapturedExprDecl>(D) && !D->hasAttr<OMPCaptureNoInitAttr>() &&
1395  !cast<OMPCapturedExprDecl>(D)->getInit()->isGLValue());
1396  }
1397 
1398  // When passing data by copy, we need to make sure it fits the uintptr size
1399  // and alignment, because the runtime library only deals with uintptr types.
1400  // If it does not fit the uintptr size, we need to pass the data by reference
1401  // instead.
1402  if (!IsByRef &&
1403  (Ctx.getTypeSizeInChars(Ty) >
1404  Ctx.getTypeSizeInChars(Ctx.getUIntPtrType()) ||
1405  Ctx.getDeclAlign(D) > Ctx.getTypeAlignInChars(Ctx.getUIntPtrType()))) {
1406  IsByRef = true;
1407  }
1408 
1409  return IsByRef;
1410 }
1411 
1412 unsigned Sema::getOpenMPNestingLevel() const {
1413  assert(getLangOpts().OpenMP);
1414  return DSAStack->getNestingLevel();
1415 }
1416 
1418  return (isOpenMPTargetExecutionDirective(DSAStack->getCurrentDirective()) &&
1419  !DSAStack->isClauseParsingMode()) ||
1420  DSAStack->hasDirective(
1422  SourceLocation) -> bool {
1423  return isOpenMPTargetExecutionDirective(K);
1424  },
1425  false);
1426 }
1427 
1429  assert(LangOpts.OpenMP && "OpenMP is not allowed");
1430  D = getCanonicalDecl(D);
1431 
1432  // If we are attempting to capture a global variable in a directive with
1433  // 'target' we return true so that this global is also mapped to the device.
1434  //
1435  auto *VD = dyn_cast<VarDecl>(D);
1436  if (VD && !VD->hasLocalStorage()) {
1437  if (isInOpenMPDeclareTargetContext() &&
1438  (getCurCapturedRegion() || getCurBlock() || getCurLambda())) {
1439  // Try to mark variable as declare target if it is used in capturing
1440  // regions.
1441  if (!OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
1442  checkDeclIsAllowedInOpenMPTarget(nullptr, VD);
1443  return nullptr;
1444  } else if (isInOpenMPTargetExecutionDirective()) {
1445  // If the declaration is enclosed in a 'declare target' directive,
1446  // then it should not be captured.
1447  //
1448  if (OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
1449  return nullptr;
1450  return VD;
1451  }
1452  }
1453 
1454  if (DSAStack->getCurrentDirective() != OMPD_unknown &&
1455  (!DSAStack->isClauseParsingMode() ||
1456  DSAStack->getParentDirective() != OMPD_unknown)) {
1457  auto &&Info = DSAStack->isLoopControlVariable(D);
1458  if (Info.first ||
1459  (VD && VD->hasLocalStorage() &&
1460  isParallelOrTaskRegion(DSAStack->getCurrentDirective())) ||
1461  (VD && DSAStack->isForceVarCapturing()))
1462  return VD ? VD : Info.second;
1463  DSAStackTy::DSAVarData DVarPrivate =
1464  DSAStack->getTopDSA(D, DSAStack->isClauseParsingMode());
1465  if (DVarPrivate.CKind != OMPC_unknown && isOpenMPPrivate(DVarPrivate.CKind))
1466  return VD ? VD : cast<VarDecl>(DVarPrivate.PrivateCopy->getDecl());
1467  DVarPrivate = DSAStack->hasDSA(D, isOpenMPPrivate,
1468  [](OpenMPDirectiveKind) { return true; },
1469  DSAStack->isClauseParsingMode());
1470  if (DVarPrivate.CKind != OMPC_unknown)
1471  return VD ? VD : cast<VarDecl>(DVarPrivate.PrivateCopy->getDecl());
1472  }
1473  return nullptr;
1474 }
1475 
1476 void Sema::adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
1477  unsigned Level) const {
1479  getOpenMPCaptureRegions(Regions, DSAStack->getDirective(Level));
1480  FunctionScopesIndex -= Regions.size();
1481 }
1482 
1483 bool Sema::isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const {
1484  assert(LangOpts.OpenMP && "OpenMP is not allowed");
1485  return DSAStack->hasExplicitDSA(
1486  D, [](OpenMPClauseKind K) { return K == OMPC_private; }, Level) ||
1487  (DSAStack->isClauseParsingMode() &&
1488  DSAStack->getClauseParsingMode() == OMPC_private) ||
1489  // Consider taskgroup reduction descriptor variable a private to avoid
1490  // possible capture in the region.
1491  (DSAStack->hasExplicitDirective(
1492  [](OpenMPDirectiveKind K) { return K == OMPD_taskgroup; },
1493  Level) &&
1494  DSAStack->isTaskgroupReductionRef(D, Level));
1495 }
1496 
1498  unsigned Level) {
1499  assert(LangOpts.OpenMP && "OpenMP is not allowed");
1500  D = getCanonicalDecl(D);
1502  for (unsigned I = DSAStack->getNestingLevel() + 1; I > Level; --I) {
1503  const unsigned NewLevel = I - 1;
1504  if (DSAStack->hasExplicitDSA(D,
1505  [&OMPC](const OpenMPClauseKind K) {
1506  if (isOpenMPPrivate(K)) {
1507  OMPC = K;
1508  return true;
1509  }
1510  return false;
1511  },
1512  NewLevel))
1513  break;
1514  if (DSAStack->checkMappableExprComponentListsForDeclAtLevel(
1515  D, NewLevel,
1517  OpenMPClauseKind) { return true; })) {
1518  OMPC = OMPC_map;
1519  break;
1520  }
1521  if (DSAStack->hasExplicitDirective(isOpenMPTargetExecutionDirective,
1522  NewLevel)) {
1523  OMPC = OMPC_map;
1524  if (D->getType()->isScalarType() &&
1525  DSAStack->getDefaultDMAAtLevel(NewLevel) !=
1526  DefaultMapAttributes::DMA_tofrom_scalar)
1527  OMPC = OMPC_firstprivate;
1528  break;
1529  }
1530  }
1531  if (OMPC != OMPC_unknown)
1532  FD->addAttr(OMPCaptureKindAttr::CreateImplicit(Context, OMPC));
1533 }
1534 
1536  unsigned Level) const {
1537  assert(LangOpts.OpenMP && "OpenMP is not allowed");
1538  // Return true if the current level is no longer enclosed in a target region.
1539 
1540  const auto *VD = dyn_cast<VarDecl>(D);
1541  return VD && !VD->hasLocalStorage() &&
1542  DSAStack->hasExplicitDirective(isOpenMPTargetExecutionDirective,
1543  Level);
1544 }
1545 
1546 void Sema::DestroyDataSharingAttributesStack() { delete DSAStack; }
1547 
1549  const DeclarationNameInfo &DirName,
1550  Scope *CurScope, SourceLocation Loc) {
1551  DSAStack->push(DKind, DirName, CurScope, Loc);
1552  PushExpressionEvaluationContext(
1553  ExpressionEvaluationContext::PotentiallyEvaluated);
1554 }
1555 
1557  DSAStack->setClauseParsingMode(K);
1558 }
1559 
1561  DSAStack->setClauseParsingMode(/*K=*/OMPC_unknown);
1562 }
1563 
1564 void Sema::EndOpenMPDSABlock(Stmt *CurDirective) {
1565  // OpenMP [2.14.3.5, Restrictions, C/C++, p.1]
1566  // A variable of class type (or array thereof) that appears in a lastprivate
1567  // clause requires an accessible, unambiguous default constructor for the
1568  // class type, unless the list item is also specified in a firstprivate
1569  // clause.
1570  if (const auto *D = dyn_cast_or_null<OMPExecutableDirective>(CurDirective)) {
1571  for (OMPClause *C : D->clauses()) {
1572  if (auto *Clause = dyn_cast<OMPLastprivateClause>(C)) {
1573  SmallVector<Expr *, 8> PrivateCopies;
1574  for (Expr *DE : Clause->varlists()) {
1575  if (DE->isValueDependent() || DE->isTypeDependent()) {
1576  PrivateCopies.push_back(nullptr);
1577  continue;
1578  }
1579  auto *DRE = cast<DeclRefExpr>(DE->IgnoreParens());
1580  auto *VD = cast<VarDecl>(DRE->getDecl());
1581  QualType Type = VD->getType().getNonReferenceType();
1582  const DSAStackTy::DSAVarData DVar =
1583  DSAStack->getTopDSA(VD, /*FromParent=*/false);
1584  if (DVar.CKind == OMPC_lastprivate) {
1585  // Generate helper private variable and initialize it with the
1586  // default value. The address of the original variable is replaced
1587  // by the address of the new private variable in CodeGen. This new
1588  // variable is not added to IdResolver, so the code in the OpenMP
1589  // region uses original variable for proper diagnostics.
1590  VarDecl *VDPrivate = buildVarDecl(
1591  *this, DE->getExprLoc(), Type.getUnqualifiedType(),
1592  VD->getName(), VD->hasAttrs() ? &VD->getAttrs() : nullptr, DRE);
1593  ActOnUninitializedDecl(VDPrivate);
1594  if (VDPrivate->isInvalidDecl())
1595  continue;
1596  PrivateCopies.push_back(buildDeclRefExpr(
1597  *this, VDPrivate, DE->getType(), DE->getExprLoc()));
1598  } else {
1599  // The variable is also a firstprivate, so initialization sequence
1600  // for private copy is generated already.
1601  PrivateCopies.push_back(nullptr);
1602  }
1603  }
1604  // Set initializers to private copies if no errors were found.
1605  if (PrivateCopies.size() == Clause->varlist_size())
1606  Clause->setPrivateCopies(PrivateCopies);
1607  }
1608  }
1609  }
1610 
1611  DSAStack->pop();
1612  DiscardCleanupsInEvaluationContext();
1613  PopExpressionEvaluationContext();
1614 }
1615 
1616 static bool FinishOpenMPLinearClause(OMPLinearClause &Clause, DeclRefExpr *IV,
1617  Expr *NumIterations, Sema &SemaRef,
1618  Scope *S, DSAStackTy *Stack);
1619 
1620 namespace {
1621 
1622 class VarDeclFilterCCC final : public CorrectionCandidateCallback {
1623 private:
1624  Sema &SemaRef;
1625 
1626 public:
1627  explicit VarDeclFilterCCC(Sema &S) : SemaRef(S) {}
1628  bool ValidateCandidate(const TypoCorrection &Candidate) override {
1629  NamedDecl *ND = Candidate.getCorrectionDecl();
1630  if (const auto *VD = dyn_cast_or_null<VarDecl>(ND)) {
1631  return VD->hasGlobalStorage() &&
1632  SemaRef.isDeclInScope(ND, SemaRef.getCurLexicalContext(),
1633  SemaRef.getCurScope());
1634  }
1635  return false;
1636  }
1637 };
1638 
1639 class VarOrFuncDeclFilterCCC final : public CorrectionCandidateCallback {
1640 private:
1641  Sema &SemaRef;
1642 
1643 public:
1644  explicit VarOrFuncDeclFilterCCC(Sema &S) : SemaRef(S) {}
1645  bool ValidateCandidate(const TypoCorrection &Candidate) override {
1646  NamedDecl *ND = Candidate.getCorrectionDecl();
1647  if (ND && (isa<VarDecl>(ND) || isa<FunctionDecl>(ND))) {
1648  return SemaRef.isDeclInScope(ND, SemaRef.getCurLexicalContext(),
1649  SemaRef.getCurScope());
1650  }
1651  return false;
1652  }
1653 };
1654 
1655 } // namespace
1656 
1658  CXXScopeSpec &ScopeSpec,
1659  const DeclarationNameInfo &Id) {
1660  LookupResult Lookup(*this, Id, LookupOrdinaryName);
1661  LookupParsedName(Lookup, CurScope, &ScopeSpec, true);
1662 
1663  if (Lookup.isAmbiguous())
1664  return ExprError();
1665 
1666  VarDecl *VD;
1667  if (!Lookup.isSingleResult()) {
1668  if (TypoCorrection Corrected = CorrectTypo(
1669  Id, LookupOrdinaryName, CurScope, nullptr,
1670  llvm::make_unique<VarDeclFilterCCC>(*this), CTK_ErrorRecovery)) {
1671  diagnoseTypo(Corrected,
1672  PDiag(Lookup.empty()
1673  ? diag::err_undeclared_var_use_suggest
1674  : diag::err_omp_expected_var_arg_suggest)
1675  << Id.getName());
1676  VD = Corrected.getCorrectionDeclAs<VarDecl>();
1677  } else {
1678  Diag(Id.getLoc(), Lookup.empty() ? diag::err_undeclared_var_use
1679  : diag::err_omp_expected_var_arg)
1680  << Id.getName();
1681  return ExprError();
1682  }
1683  } else if (!(VD = Lookup.getAsSingle<VarDecl>())) {
1684  Diag(Id.getLoc(), diag::err_omp_expected_var_arg) << Id.getName();
1685  Diag(Lookup.getFoundDecl()->getLocation(), diag::note_declared_at);
1686  return ExprError();
1687  }
1688  Lookup.suppressDiagnostics();
1689 
1690  // OpenMP [2.9.2, Syntax, C/C++]
1691  // Variables must be file-scope, namespace-scope, or static block-scope.
1692  if (!VD->hasGlobalStorage()) {
1693  Diag(Id.getLoc(), diag::err_omp_global_var_arg)
1694  << getOpenMPDirectiveName(OMPD_threadprivate) << !VD->isStaticLocal();
1695  bool IsDecl =
1697  Diag(VD->getLocation(),
1698  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
1699  << VD;
1700  return ExprError();
1701  }
1702 
1703  VarDecl *CanonicalVD = VD->getCanonicalDecl();
1704  NamedDecl *ND = CanonicalVD;
1705  // OpenMP [2.9.2, Restrictions, C/C++, p.2]
1706  // A threadprivate directive for file-scope variables must appear outside
1707  // any definition or declaration.
1708  if (CanonicalVD->getDeclContext()->isTranslationUnit() &&
1709  !getCurLexicalContext()->isTranslationUnit()) {
1710  Diag(Id.getLoc(), diag::err_omp_var_scope)
1711  << getOpenMPDirectiveName(OMPD_threadprivate) << VD;
1712  bool IsDecl =
1714  Diag(VD->getLocation(),
1715  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
1716  << VD;
1717  return ExprError();
1718  }
1719  // OpenMP [2.9.2, Restrictions, C/C++, p.3]
1720  // A threadprivate directive for static class member variables must appear
1721  // in the class definition, in the same scope in which the member
1722  // variables are declared.
1723  if (CanonicalVD->isStaticDataMember() &&
1724  !CanonicalVD->getDeclContext()->Equals(getCurLexicalContext())) {
1725  Diag(Id.getLoc(), diag::err_omp_var_scope)
1726  << getOpenMPDirectiveName(OMPD_threadprivate) << VD;
1727  bool IsDecl =
1729  Diag(VD->getLocation(),
1730  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
1731  << VD;
1732  return ExprError();
1733  }
1734  // OpenMP [2.9.2, Restrictions, C/C++, p.4]
1735  // A threadprivate directive for namespace-scope variables must appear
1736  // outside any definition or declaration other than the namespace
1737  // definition itself.
1738  if (CanonicalVD->getDeclContext()->isNamespace() &&
1739  (!getCurLexicalContext()->isFileContext() ||
1740  !getCurLexicalContext()->Encloses(CanonicalVD->getDeclContext()))) {
1741  Diag(Id.getLoc(), diag::err_omp_var_scope)
1742  << getOpenMPDirectiveName(OMPD_threadprivate) << VD;
1743  bool IsDecl =
1745  Diag(VD->getLocation(),
1746  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
1747  << VD;
1748  return ExprError();
1749  }
1750  // OpenMP [2.9.2, Restrictions, C/C++, p.6]
1751  // A threadprivate directive for static block-scope variables must appear
1752  // in the scope of the variable and not in a nested scope.
1753  if (CanonicalVD->isStaticLocal() && CurScope &&
1754  !isDeclInScope(ND, getCurLexicalContext(), CurScope)) {
1755  Diag(Id.getLoc(), diag::err_omp_var_scope)
1756  << getOpenMPDirectiveName(OMPD_threadprivate) << VD;
1757  bool IsDecl =
1759  Diag(VD->getLocation(),
1760  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
1761  << VD;
1762  return ExprError();
1763  }
1764 
1765  // OpenMP [2.9.2, Restrictions, C/C++, p.2-6]
1766  // A threadprivate directive must lexically precede all references to any
1767  // of the variables in its list.
1768  if (VD->isUsed() && !DSAStack->isThreadPrivate(VD)) {
1769  Diag(Id.getLoc(), diag::err_omp_var_used)
1770  << getOpenMPDirectiveName(OMPD_threadprivate) << VD;
1771  return ExprError();
1772  }
1773 
1774  QualType ExprType = VD->getType().getNonReferenceType();
1775  return DeclRefExpr::Create(Context, NestedNameSpecifierLoc(),
1776  SourceLocation(), VD,
1777  /*RefersToEnclosingVariableOrCapture=*/false,
1778  Id.getLoc(), ExprType, VK_LValue);
1779 }
1780 
1783  ArrayRef<Expr *> VarList) {
1784  if (OMPThreadPrivateDecl *D = CheckOMPThreadPrivateDecl(Loc, VarList)) {
1785  CurContext->addDecl(D);
1786  return DeclGroupPtrTy::make(DeclGroupRef(D));
1787  }
1788  return nullptr;
1789 }
1790 
1791 namespace {
1792 class LocalVarRefChecker final
1793  : public ConstStmtVisitor<LocalVarRefChecker, bool> {
1794  Sema &SemaRef;
1795 
1796 public:
1797  bool VisitDeclRefExpr(const DeclRefExpr *E) {
1798  if (const auto *VD = dyn_cast<VarDecl>(E->getDecl())) {
1799  if (VD->hasLocalStorage()) {
1800  SemaRef.Diag(E->getBeginLoc(),
1801  diag::err_omp_local_var_in_threadprivate_init)
1802  << E->getSourceRange();
1803  SemaRef.Diag(VD->getLocation(), diag::note_defined_here)
1804  << VD << VD->getSourceRange();
1805  return true;
1806  }
1807  }
1808  return false;
1809  }
1810  bool VisitStmt(const Stmt *S) {
1811  for (const Stmt *Child : S->children()) {
1812  if (Child && Visit(Child))
1813  return true;
1814  }
1815  return false;
1816  }
1817  explicit LocalVarRefChecker(Sema &SemaRef) : SemaRef(SemaRef) {}
1818 };
1819 } // namespace
1820 
1824  for (Expr *RefExpr : VarList) {
1825  auto *DE = cast<DeclRefExpr>(RefExpr);
1826  auto *VD = cast<VarDecl>(DE->getDecl());
1827  SourceLocation ILoc = DE->getExprLoc();
1828 
1829  // Mark variable as used.
1830  VD->setReferenced();
1831  VD->markUsed(Context);
1832 
1833  QualType QType = VD->getType();
1834  if (QType->isDependentType() || QType->isInstantiationDependentType()) {
1835  // It will be analyzed later.
1836  Vars.push_back(DE);
1837  continue;
1838  }
1839 
1840  // OpenMP [2.9.2, Restrictions, C/C++, p.10]
1841  // A threadprivate variable must not have an incomplete type.
1842  if (RequireCompleteType(ILoc, VD->getType(),
1843  diag::err_omp_threadprivate_incomplete_type)) {
1844  continue;
1845  }
1846 
1847  // OpenMP [2.9.2, Restrictions, C/C++, p.10]
1848  // A threadprivate variable must not have a reference type.
1849  if (VD->getType()->isReferenceType()) {
1850  Diag(ILoc, diag::err_omp_ref_type_arg)
1851  << getOpenMPDirectiveName(OMPD_threadprivate) << VD->getType();
1852  bool IsDecl =
1853  VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
1854  Diag(VD->getLocation(),
1855  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
1856  << VD;
1857  continue;
1858  }
1859 
1860  // Check if this is a TLS variable. If TLS is not being supported, produce
1861  // the corresponding diagnostic.
1862  if ((VD->getTLSKind() != VarDecl::TLS_None &&
1863  !(VD->hasAttr<OMPThreadPrivateDeclAttr>() &&
1864  getLangOpts().OpenMPUseTLS &&
1865  getASTContext().getTargetInfo().isTLSSupported())) ||
1866  (VD->getStorageClass() == SC_Register && VD->hasAttr<AsmLabelAttr>() &&
1867  !VD->isLocalVarDecl())) {
1868  Diag(ILoc, diag::err_omp_var_thread_local)
1869  << VD << ((VD->getTLSKind() != VarDecl::TLS_None) ? 0 : 1);
1870  bool IsDecl =
1871  VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
1872  Diag(VD->getLocation(),
1873  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
1874  << VD;
1875  continue;
1876  }
1877 
1878  // Check if initial value of threadprivate variable reference variable with
1879  // local storage (it is not supported by runtime).
1880  if (const Expr *Init = VD->getAnyInitializer()) {
1881  LocalVarRefChecker Checker(*this);
1882  if (Checker.Visit(Init))
1883  continue;
1884  }
1885 
1886  Vars.push_back(RefExpr);
1887  DSAStack->addDSA(VD, DE, OMPC_threadprivate);
1888  VD->addAttr(OMPThreadPrivateDeclAttr::CreateImplicit(
1889  Context, SourceRange(Loc, Loc)));
1890  if (ASTMutationListener *ML = Context.getASTMutationListener())
1891  ML->DeclarationMarkedOpenMPThreadPrivate(VD);
1892  }
1893  OMPThreadPrivateDecl *D = nullptr;
1894  if (!Vars.empty()) {
1895  D = OMPThreadPrivateDecl::Create(Context, getCurLexicalContext(), Loc,
1896  Vars);
1897  D->setAccess(AS_public);
1898  }
1899  return D;
1900 }
1901 
1902 static void reportOriginalDsa(Sema &SemaRef, const DSAStackTy *Stack,
1903  const ValueDecl *D,
1904  const DSAStackTy::DSAVarData &DVar,
1905  bool IsLoopIterVar = false) {
1906  if (DVar.RefExpr) {
1907  SemaRef.Diag(DVar.RefExpr->getExprLoc(), diag::note_omp_explicit_dsa)
1908  << getOpenMPClauseName(DVar.CKind);
1909  return;
1910  }
1911  enum {
1912  PDSA_StaticMemberShared,
1913  PDSA_StaticLocalVarShared,
1914  PDSA_LoopIterVarPrivate,
1915  PDSA_LoopIterVarLinear,
1916  PDSA_LoopIterVarLastprivate,
1917  PDSA_ConstVarShared,
1918  PDSA_GlobalVarShared,
1919  PDSA_TaskVarFirstprivate,
1920  PDSA_LocalVarPrivate,
1921  PDSA_Implicit
1922  } Reason = PDSA_Implicit;
1923  bool ReportHint = false;
1924  auto ReportLoc = D->getLocation();
1925  auto *VD = dyn_cast<VarDecl>(D);
1926  if (IsLoopIterVar) {
1927  if (DVar.CKind == OMPC_private)
1928  Reason = PDSA_LoopIterVarPrivate;
1929  else if (DVar.CKind == OMPC_lastprivate)
1930  Reason = PDSA_LoopIterVarLastprivate;
1931  else
1932  Reason = PDSA_LoopIterVarLinear;
1933  } else if (isOpenMPTaskingDirective(DVar.DKind) &&
1934  DVar.CKind == OMPC_firstprivate) {
1935  Reason = PDSA_TaskVarFirstprivate;
1936  ReportLoc = DVar.ImplicitDSALoc;
1937  } else if (VD && VD->isStaticLocal())
1938  Reason = PDSA_StaticLocalVarShared;
1939  else if (VD && VD->isStaticDataMember())
1940  Reason = PDSA_StaticMemberShared;
1941  else if (VD && VD->isFileVarDecl())
1942  Reason = PDSA_GlobalVarShared;
1943  else if (D->getType().isConstant(SemaRef.getASTContext()))
1944  Reason = PDSA_ConstVarShared;
1945  else if (VD && VD->isLocalVarDecl() && DVar.CKind == OMPC_private) {
1946  ReportHint = true;
1947  Reason = PDSA_LocalVarPrivate;
1948  }
1949  if (Reason != PDSA_Implicit) {
1950  SemaRef.Diag(ReportLoc, diag::note_omp_predetermined_dsa)
1951  << Reason << ReportHint
1952  << getOpenMPDirectiveName(Stack->getCurrentDirective());
1953  } else if (DVar.ImplicitDSALoc.isValid()) {
1954  SemaRef.Diag(DVar.ImplicitDSALoc, diag::note_omp_implicit_dsa)
1955  << getOpenMPClauseName(DVar.CKind);
1956  }
1957 }
1958 
1959 namespace {
1960 class DSAAttrChecker final : public StmtVisitor<DSAAttrChecker, void> {
1961  DSAStackTy *Stack;
1962  Sema &SemaRef;
1963  bool ErrorFound = false;
1964  CapturedStmt *CS = nullptr;
1965  llvm::SmallVector<Expr *, 4> ImplicitFirstprivate;
1966  llvm::SmallVector<Expr *, 4> ImplicitMap;
1967  Sema::VarsWithInheritedDSAType VarsWithInheritedDSA;
1968  llvm::SmallDenseSet<const ValueDecl *, 4> ImplicitDeclarations;
1969 
1970 public:
1971  void VisitDeclRefExpr(DeclRefExpr *E) {
1972  if (E->isTypeDependent() || E->isValueDependent() ||
1974  return;
1975  if (auto *VD = dyn_cast<VarDecl>(E->getDecl())) {
1976  VD = VD->getCanonicalDecl();
1977  // Skip internally declared variables.
1978  if (VD->hasLocalStorage() && !CS->capturesVariable(VD))
1979  return;
1980 
1981  DSAStackTy::DSAVarData DVar = Stack->getTopDSA(VD, /*FromParent=*/false);
1982  // Check if the variable has explicit DSA set and stop analysis if it so.
1983  if (DVar.RefExpr || !ImplicitDeclarations.insert(VD).second)
1984  return;
1985 
1986  // Skip internally declared static variables.
1988  OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
1989  if (VD->hasGlobalStorage() && !CS->capturesVariable(VD) &&
1990  (!Res || *Res != OMPDeclareTargetDeclAttr::MT_Link))
1991  return;
1992 
1993  SourceLocation ELoc = E->getExprLoc();
1994  OpenMPDirectiveKind DKind = Stack->getCurrentDirective();
1995  // The default(none) clause requires that each variable that is referenced
1996  // in the construct, and does not have a predetermined data-sharing
1997  // attribute, must have its data-sharing attribute explicitly determined
1998  // by being listed in a data-sharing attribute clause.
1999  if (DVar.CKind == OMPC_unknown && Stack->getDefaultDSA() == DSA_none &&
2000  isParallelOrTaskRegion(DKind) &&
2001  VarsWithInheritedDSA.count(VD) == 0) {
2002  VarsWithInheritedDSA[VD] = E;
2003  return;
2004  }
2005 
2006  if (isOpenMPTargetExecutionDirective(DKind) &&
2007  !Stack->isLoopControlVariable(VD).first) {
2008  if (!Stack->checkMappableExprComponentListsForDecl(
2009  VD, /*CurrentRegionOnly=*/true,
2011  StackComponents,
2012  OpenMPClauseKind) {
2013  // Variable is used if it has been marked as an array, array
2014  // section or the variable iself.
2015  return StackComponents.size() == 1 ||
2016  std::all_of(
2017  std::next(StackComponents.rbegin()),
2018  StackComponents.rend(),
2019  [](const OMPClauseMappableExprCommon::
2020  MappableComponent &MC) {
2021  return MC.getAssociatedDeclaration() ==
2022  nullptr &&
2023  (isa<OMPArraySectionExpr>(
2024  MC.getAssociatedExpression()) ||
2025  isa<ArraySubscriptExpr>(
2026  MC.getAssociatedExpression()));
2027  });
2028  })) {
2029  bool IsFirstprivate = false;
2030  // By default lambdas are captured as firstprivates.
2031  if (const auto *RD =
2032  VD->getType().getNonReferenceType()->getAsCXXRecordDecl())
2033  IsFirstprivate = RD->isLambda();
2034  IsFirstprivate =
2035  IsFirstprivate ||
2036  (VD->getType().getNonReferenceType()->isScalarType() &&
2037  Stack->getDefaultDMA() != DMA_tofrom_scalar && !Res);
2038  if (IsFirstprivate)
2039  ImplicitFirstprivate.emplace_back(E);
2040  else
2041  ImplicitMap.emplace_back(E);
2042  return;
2043  }
2044  }
2045 
2046  // OpenMP [2.9.3.6, Restrictions, p.2]
2047  // A list item that appears in a reduction clause of the innermost
2048  // enclosing worksharing or parallel construct may not be accessed in an
2049  // explicit task.
2050  DVar = Stack->hasInnermostDSA(
2051  VD, [](OpenMPClauseKind C) { return C == OMPC_reduction; },
2052  [](OpenMPDirectiveKind K) {
2053  return isOpenMPParallelDirective(K) ||
2055  },
2056  /*FromParent=*/true);
2057  if (isOpenMPTaskingDirective(DKind) && DVar.CKind == OMPC_reduction) {
2058  ErrorFound = true;
2059  SemaRef.Diag(ELoc, diag::err_omp_reduction_in_task);
2060  reportOriginalDsa(SemaRef, Stack, VD, DVar);
2061  return;
2062  }
2063 
2064  // Define implicit data-sharing attributes for task.
2065  DVar = Stack->getImplicitDSA(VD, /*FromParent=*/false);
2066  if (isOpenMPTaskingDirective(DKind) && DVar.CKind != OMPC_shared &&
2067  !Stack->isLoopControlVariable(VD).first)
2068  ImplicitFirstprivate.push_back(E);
2069  }
2070  }
2071  void VisitMemberExpr(MemberExpr *E) {
2072  if (E->isTypeDependent() || E->isValueDependent() ||
2074  return;
2075  auto *FD = dyn_cast<FieldDecl>(E->getMemberDecl());
2076  OpenMPDirectiveKind DKind = Stack->getCurrentDirective();
2077  if (isa<CXXThisExpr>(E->getBase()->IgnoreParens())) {
2078  if (!FD)
2079  return;
2080  DSAStackTy::DSAVarData DVar = Stack->getTopDSA(FD, /*FromParent=*/false);
2081  // Check if the variable has explicit DSA set and stop analysis if it
2082  // so.
2083  if (DVar.RefExpr || !ImplicitDeclarations.insert(FD).second)
2084  return;
2085 
2086  if (isOpenMPTargetExecutionDirective(DKind) &&
2087  !Stack->isLoopControlVariable(FD).first &&
2088  !Stack->checkMappableExprComponentListsForDecl(
2089  FD, /*CurrentRegionOnly=*/true,
2091  StackComponents,
2092  OpenMPClauseKind) {
2093  return isa<CXXThisExpr>(
2094  cast<MemberExpr>(
2095  StackComponents.back().getAssociatedExpression())
2096  ->getBase()
2097  ->IgnoreParens());
2098  })) {
2099  // OpenMP 4.5 [2.15.5.1, map Clause, Restrictions, C/C++, p.3]
2100  // A bit-field cannot appear in a map clause.
2101  //
2102  if (FD->isBitField())
2103  return;
2104  ImplicitMap.emplace_back(E);
2105  return;
2106  }
2107 
2108  SourceLocation ELoc = E->getExprLoc();
2109  // OpenMP [2.9.3.6, Restrictions, p.2]
2110  // A list item that appears in a reduction clause of the innermost
2111  // enclosing worksharing or parallel construct may not be accessed in
2112  // an explicit task.
2113  DVar = Stack->hasInnermostDSA(
2114  FD, [](OpenMPClauseKind C) { return C == OMPC_reduction; },
2115  [](OpenMPDirectiveKind K) {
2116  return isOpenMPParallelDirective(K) ||
2118  },
2119  /*FromParent=*/true);
2120  if (isOpenMPTaskingDirective(DKind) && DVar.CKind == OMPC_reduction) {
2121  ErrorFound = true;
2122  SemaRef.Diag(ELoc, diag::err_omp_reduction_in_task);
2123  reportOriginalDsa(SemaRef, Stack, FD, DVar);
2124  return;
2125  }
2126 
2127  // Define implicit data-sharing attributes for task.
2128  DVar = Stack->getImplicitDSA(FD, /*FromParent=*/false);
2129  if (isOpenMPTaskingDirective(DKind) && DVar.CKind != OMPC_shared &&
2130  !Stack->isLoopControlVariable(FD).first)
2131  ImplicitFirstprivate.push_back(E);
2132  return;
2133  }
2134  if (isOpenMPTargetExecutionDirective(DKind)) {
2136  if (!checkMapClauseExpressionBase(SemaRef, E, CurComponents, OMPC_map,
2137  /*NoDiagnose=*/true))
2138  return;
2139  const auto *VD = cast<ValueDecl>(
2140  CurComponents.back().getAssociatedDeclaration()->getCanonicalDecl());
2141  if (!Stack->checkMappableExprComponentListsForDecl(
2142  VD, /*CurrentRegionOnly=*/true,
2143  [&CurComponents](
2145  StackComponents,
2146  OpenMPClauseKind) {
2147  auto CCI = CurComponents.rbegin();
2148  auto CCE = CurComponents.rend();
2149  for (const auto &SC : llvm::reverse(StackComponents)) {
2150  // Do both expressions have the same kind?
2151  if (CCI->getAssociatedExpression()->getStmtClass() !=
2152  SC.getAssociatedExpression()->getStmtClass())
2153  if (!(isa<OMPArraySectionExpr>(
2154  SC.getAssociatedExpression()) &&
2155  isa<ArraySubscriptExpr>(
2156  CCI->getAssociatedExpression())))
2157  return false;
2158 
2159  const Decl *CCD = CCI->getAssociatedDeclaration();
2160  const Decl *SCD = SC.getAssociatedDeclaration();
2161  CCD = CCD ? CCD->getCanonicalDecl() : nullptr;
2162  SCD = SCD ? SCD->getCanonicalDecl() : nullptr;
2163  if (SCD != CCD)
2164  return false;
2165  std::advance(CCI, 1);
2166  if (CCI == CCE)
2167  break;
2168  }
2169  return true;
2170  })) {
2171  Visit(E->getBase());
2172  }
2173  } else {
2174  Visit(E->getBase());
2175  }
2176  }
2177  void VisitOMPExecutableDirective(OMPExecutableDirective *S) {
2178  for (OMPClause *C : S->clauses()) {
2179  // Skip analysis of arguments of implicitly defined firstprivate clause
2180  // for task|target directives.
2181  // Skip analysis of arguments of implicitly defined map clause for target
2182  // directives.
2183  if (C && !((isa<OMPFirstprivateClause>(C) || isa<OMPMapClause>(C)) &&
2184  C->isImplicit())) {
2185  for (Stmt *CC : C->children()) {
2186  if (CC)
2187  Visit(CC);
2188  }
2189  }
2190  }
2191  }
2192  void VisitStmt(Stmt *S) {
2193  for (Stmt *C : S->children()) {
2194  if (C && !isa<OMPExecutableDirective>(C))
2195  Visit(C);
2196  }
2197  }
2198 
2199  bool isErrorFound() const { return ErrorFound; }
2200  ArrayRef<Expr *> getImplicitFirstprivate() const {
2201  return ImplicitFirstprivate;
2202  }
2203  ArrayRef<Expr *> getImplicitMap() const { return ImplicitMap; }
2204  const Sema::VarsWithInheritedDSAType &getVarsWithInheritedDSA() const {
2205  return VarsWithInheritedDSA;
2206  }
2207 
2208  DSAAttrChecker(DSAStackTy *S, Sema &SemaRef, CapturedStmt *CS)
2209  : Stack(S), SemaRef(SemaRef), ErrorFound(false), CS(CS) {}
2210 };
2211 } // namespace
2212 
2214  switch (DKind) {
2215  case OMPD_parallel:
2216  case OMPD_parallel_for:
2217  case OMPD_parallel_for_simd:
2218  case OMPD_parallel_sections:
2219  case OMPD_teams:
2220  case OMPD_teams_distribute:
2221  case OMPD_teams_distribute_simd: {
2222  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
2223  QualType KmpInt32PtrTy =
2224  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
2225  Sema::CapturedParamNameType Params[] = {
2226  std::make_pair(".global_tid.", KmpInt32PtrTy),
2227  std::make_pair(".bound_tid.", KmpInt32PtrTy),
2228  std::make_pair(StringRef(), QualType()) // __context with shared vars
2229  };
2230  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2231  Params);
2232  break;
2233  }
2234  case OMPD_target_teams:
2235  case OMPD_target_parallel:
2236  case OMPD_target_parallel_for:
2237  case OMPD_target_parallel_for_simd:
2238  case OMPD_target_teams_distribute:
2239  case OMPD_target_teams_distribute_simd: {
2240  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
2241  QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
2242  QualType KmpInt32PtrTy =
2243  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
2244  QualType Args[] = {VoidPtrTy};
2246  EPI.Variadic = true;
2247  QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
2248  Sema::CapturedParamNameType Params[] = {
2249  std::make_pair(".global_tid.", KmpInt32Ty),
2250  std::make_pair(".part_id.", KmpInt32PtrTy),
2251  std::make_pair(".privates.", VoidPtrTy),
2252  std::make_pair(
2253  ".copy_fn.",
2254  Context.getPointerType(CopyFnType).withConst().withRestrict()),
2255  std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
2256  std::make_pair(StringRef(), QualType()) // __context with shared vars
2257  };
2258  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2259  Params);
2260  // Mark this captured region as inlined, because we don't use outlined
2261  // function directly.
2262  getCurCapturedRegion()->TheCapturedDecl->addAttr(
2263  AlwaysInlineAttr::CreateImplicit(
2264  Context, AlwaysInlineAttr::Keyword_forceinline));
2265  Sema::CapturedParamNameType ParamsTarget[] = {
2266  std::make_pair(StringRef(), QualType()) // __context with shared vars
2267  };
2268  // Start a captured region for 'target' with no implicit parameters.
2269  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2270  ParamsTarget);
2271  Sema::CapturedParamNameType ParamsTeamsOrParallel[] = {
2272  std::make_pair(".global_tid.", KmpInt32PtrTy),
2273  std::make_pair(".bound_tid.", KmpInt32PtrTy),
2274  std::make_pair(StringRef(), QualType()) // __context with shared vars
2275  };
2276  // Start a captured region for 'teams' or 'parallel'. Both regions have
2277  // the same implicit parameters.
2278  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2279  ParamsTeamsOrParallel);
2280  break;
2281  }
2282  case OMPD_target:
2283  case OMPD_target_simd: {
2284  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
2285  QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
2286  QualType KmpInt32PtrTy =
2287  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
2288  QualType Args[] = {VoidPtrTy};
2290  EPI.Variadic = true;
2291  QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
2292  Sema::CapturedParamNameType Params[] = {
2293  std::make_pair(".global_tid.", KmpInt32Ty),
2294  std::make_pair(".part_id.", KmpInt32PtrTy),
2295  std::make_pair(".privates.", VoidPtrTy),
2296  std::make_pair(
2297  ".copy_fn.",
2298  Context.getPointerType(CopyFnType).withConst().withRestrict()),
2299  std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
2300  std::make_pair(StringRef(), QualType()) // __context with shared vars
2301  };
2302  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2303  Params);
2304  // Mark this captured region as inlined, because we don't use outlined
2305  // function directly.
2306  getCurCapturedRegion()->TheCapturedDecl->addAttr(
2307  AlwaysInlineAttr::CreateImplicit(
2308  Context, AlwaysInlineAttr::Keyword_forceinline));
2309  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2310  std::make_pair(StringRef(), QualType()));
2311  break;
2312  }
2313  case OMPD_simd:
2314  case OMPD_for:
2315  case OMPD_for_simd:
2316  case OMPD_sections:
2317  case OMPD_section:
2318  case OMPD_single:
2319  case OMPD_master:
2320  case OMPD_critical:
2321  case OMPD_taskgroup:
2322  case OMPD_distribute:
2323  case OMPD_distribute_simd:
2324  case OMPD_ordered:
2325  case OMPD_atomic:
2326  case OMPD_target_data: {
2327  Sema::CapturedParamNameType Params[] = {
2328  std::make_pair(StringRef(), QualType()) // __context with shared vars
2329  };
2330  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2331  Params);
2332  break;
2333  }
2334  case OMPD_task: {
2335  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
2336  QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
2337  QualType KmpInt32PtrTy =
2338  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
2339  QualType Args[] = {VoidPtrTy};
2341  EPI.Variadic = true;
2342  QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
2343  Sema::CapturedParamNameType Params[] = {
2344  std::make_pair(".global_tid.", KmpInt32Ty),
2345  std::make_pair(".part_id.", KmpInt32PtrTy),
2346  std::make_pair(".privates.", VoidPtrTy),
2347  std::make_pair(
2348  ".copy_fn.",
2349  Context.getPointerType(CopyFnType).withConst().withRestrict()),
2350  std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
2351  std::make_pair(StringRef(), QualType()) // __context with shared vars
2352  };
2353  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2354  Params);
2355  // Mark this captured region as inlined, because we don't use outlined
2356  // function directly.
2357  getCurCapturedRegion()->TheCapturedDecl->addAttr(
2358  AlwaysInlineAttr::CreateImplicit(
2359  Context, AlwaysInlineAttr::Keyword_forceinline));
2360  break;
2361  }
2362  case OMPD_taskloop:
2363  case OMPD_taskloop_simd: {
2364  QualType KmpInt32Ty =
2365  Context.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1)
2366  .withConst();
2367  QualType KmpUInt64Ty =
2368  Context.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0)
2369  .withConst();
2370  QualType KmpInt64Ty =
2371  Context.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1)
2372  .withConst();
2373  QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
2374  QualType KmpInt32PtrTy =
2375  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
2376  QualType Args[] = {VoidPtrTy};
2378  EPI.Variadic = true;
2379  QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
2380  Sema::CapturedParamNameType Params[] = {
2381  std::make_pair(".global_tid.", KmpInt32Ty),
2382  std::make_pair(".part_id.", KmpInt32PtrTy),
2383  std::make_pair(".privates.", VoidPtrTy),
2384  std::make_pair(
2385  ".copy_fn.",
2386  Context.getPointerType(CopyFnType).withConst().withRestrict()),
2387  std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
2388  std::make_pair(".lb.", KmpUInt64Ty),
2389  std::make_pair(".ub.", KmpUInt64Ty),
2390  std::make_pair(".st.", KmpInt64Ty),
2391  std::make_pair(".liter.", KmpInt32Ty),
2392  std::make_pair(".reductions.", VoidPtrTy),
2393  std::make_pair(StringRef(), QualType()) // __context with shared vars
2394  };
2395  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2396  Params);
2397  // Mark this captured region as inlined, because we don't use outlined
2398  // function directly.
2399  getCurCapturedRegion()->TheCapturedDecl->addAttr(
2400  AlwaysInlineAttr::CreateImplicit(
2401  Context, AlwaysInlineAttr::Keyword_forceinline));
2402  break;
2403  }
2404  case OMPD_distribute_parallel_for_simd:
2405  case OMPD_distribute_parallel_for: {
2406  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
2407  QualType KmpInt32PtrTy =
2408  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
2409  Sema::CapturedParamNameType Params[] = {
2410  std::make_pair(".global_tid.", KmpInt32PtrTy),
2411  std::make_pair(".bound_tid.", KmpInt32PtrTy),
2412  std::make_pair(".previous.lb.", Context.getSizeType().withConst()),
2413  std::make_pair(".previous.ub.", Context.getSizeType().withConst()),
2414  std::make_pair(StringRef(), QualType()) // __context with shared vars
2415  };
2416  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2417  Params);
2418  break;
2419  }
2420  case OMPD_target_teams_distribute_parallel_for:
2421  case OMPD_target_teams_distribute_parallel_for_simd: {
2422  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
2423  QualType KmpInt32PtrTy =
2424  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
2425  QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
2426 
2427  QualType Args[] = {VoidPtrTy};
2429  EPI.Variadic = true;
2430  QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
2431  Sema::CapturedParamNameType Params[] = {
2432  std::make_pair(".global_tid.", KmpInt32Ty),
2433  std::make_pair(".part_id.", KmpInt32PtrTy),
2434  std::make_pair(".privates.", VoidPtrTy),
2435  std::make_pair(
2436  ".copy_fn.",
2437  Context.getPointerType(CopyFnType).withConst().withRestrict()),
2438  std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
2439  std::make_pair(StringRef(), QualType()) // __context with shared vars
2440  };
2441  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2442  Params);
2443  // Mark this captured region as inlined, because we don't use outlined
2444  // function directly.
2445  getCurCapturedRegion()->TheCapturedDecl->addAttr(
2446  AlwaysInlineAttr::CreateImplicit(
2447  Context, AlwaysInlineAttr::Keyword_forceinline));
2448  Sema::CapturedParamNameType ParamsTarget[] = {
2449  std::make_pair(StringRef(), QualType()) // __context with shared vars
2450  };
2451  // Start a captured region for 'target' with no implicit parameters.
2452  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2453  ParamsTarget);
2454 
2455  Sema::CapturedParamNameType ParamsTeams[] = {
2456  std::make_pair(".global_tid.", KmpInt32PtrTy),
2457  std::make_pair(".bound_tid.", KmpInt32PtrTy),
2458  std::make_pair(StringRef(), QualType()) // __context with shared vars
2459  };
2460  // Start a captured region for 'target' with no implicit parameters.
2461  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2462  ParamsTeams);
2463 
2464  Sema::CapturedParamNameType ParamsParallel[] = {
2465  std::make_pair(".global_tid.", KmpInt32PtrTy),
2466  std::make_pair(".bound_tid.", KmpInt32PtrTy),
2467  std::make_pair(".previous.lb.", Context.getSizeType().withConst()),
2468  std::make_pair(".previous.ub.", Context.getSizeType().withConst()),
2469  std::make_pair(StringRef(), QualType()) // __context with shared vars
2470  };
2471  // Start a captured region for 'teams' or 'parallel'. Both regions have
2472  // the same implicit parameters.
2473  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2474  ParamsParallel);
2475  break;
2476  }
2477 
2478  case OMPD_teams_distribute_parallel_for:
2479  case OMPD_teams_distribute_parallel_for_simd: {
2480  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
2481  QualType KmpInt32PtrTy =
2482  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
2483 
2484  Sema::CapturedParamNameType ParamsTeams[] = {
2485  std::make_pair(".global_tid.", KmpInt32PtrTy),
2486  std::make_pair(".bound_tid.", KmpInt32PtrTy),
2487  std::make_pair(StringRef(), QualType()) // __context with shared vars
2488  };
2489  // Start a captured region for 'target' with no implicit parameters.
2490  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2491  ParamsTeams);
2492 
2493  Sema::CapturedParamNameType ParamsParallel[] = {
2494  std::make_pair(".global_tid.", KmpInt32PtrTy),
2495  std::make_pair(".bound_tid.", KmpInt32PtrTy),
2496  std::make_pair(".previous.lb.", Context.getSizeType().withConst()),
2497  std::make_pair(".previous.ub.", Context.getSizeType().withConst()),
2498  std::make_pair(StringRef(), QualType()) // __context with shared vars
2499  };
2500  // Start a captured region for 'teams' or 'parallel'. Both regions have
2501  // the same implicit parameters.
2502  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2503  ParamsParallel);
2504  break;
2505  }
2506  case OMPD_target_update:
2507  case OMPD_target_enter_data:
2508  case OMPD_target_exit_data: {
2509  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
2510  QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
2511  QualType KmpInt32PtrTy =
2512  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
2513  QualType Args[] = {VoidPtrTy};
2515  EPI.Variadic = true;
2516  QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
2517  Sema::CapturedParamNameType Params[] = {
2518  std::make_pair(".global_tid.", KmpInt32Ty),
2519  std::make_pair(".part_id.", KmpInt32PtrTy),
2520  std::make_pair(".privates.", VoidPtrTy),
2521  std::make_pair(
2522  ".copy_fn.",
2523  Context.getPointerType(CopyFnType).withConst().withRestrict()),
2524  std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
2525  std::make_pair(StringRef(), QualType()) // __context with shared vars
2526  };
2527  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2528  Params);
2529  // Mark this captured region as inlined, because we don't use outlined
2530  // function directly.
2531  getCurCapturedRegion()->TheCapturedDecl->addAttr(
2532  AlwaysInlineAttr::CreateImplicit(
2533  Context, AlwaysInlineAttr::Keyword_forceinline));
2534  break;
2535  }
2536  case OMPD_threadprivate:
2537  case OMPD_taskyield:
2538  case OMPD_barrier:
2539  case OMPD_taskwait:
2540  case OMPD_cancellation_point:
2541  case OMPD_cancel:
2542  case OMPD_flush:
2543  case OMPD_declare_reduction:
2544  case OMPD_declare_simd:
2545  case OMPD_declare_target:
2546  case OMPD_end_declare_target:
2547  llvm_unreachable("OpenMP Directive is not allowed");
2548  case OMPD_unknown:
2549  llvm_unreachable("Unknown OpenMP directive");
2550  }
2551 }
2552 
2554  SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
2555  getOpenMPCaptureRegions(CaptureRegions, DKind);
2556  return CaptureRegions.size();
2557 }
2558 
2560  Expr *CaptureExpr, bool WithInit,
2561  bool AsExpression) {
2562  assert(CaptureExpr);
2563  ASTContext &C = S.getASTContext();
2564  Expr *Init = AsExpression ? CaptureExpr : CaptureExpr->IgnoreImpCasts();
2565  QualType Ty = Init->getType();
2566  if (CaptureExpr->getObjectKind() == OK_Ordinary && CaptureExpr->isGLValue()) {
2567  if (S.getLangOpts().CPlusPlus) {
2568  Ty = C.getLValueReferenceType(Ty);
2569  } else {
2570  Ty = C.getPointerType(Ty);
2571  ExprResult Res =
2572  S.CreateBuiltinUnaryOp(CaptureExpr->getExprLoc(), UO_AddrOf, Init);
2573  if (!Res.isUsable())
2574  return nullptr;
2575  Init = Res.get();
2576  }
2577  WithInit = true;
2578  }
2579  auto *CED = OMPCapturedExprDecl::Create(C, S.CurContext, Id, Ty,
2580  CaptureExpr->getBeginLoc());
2581  if (!WithInit)
2582  CED->addAttr(OMPCaptureNoInitAttr::CreateImplicit(C));
2583  S.CurContext->addHiddenDecl(CED);
2584  S.AddInitializerToDecl(CED, Init, /*DirectInit=*/false);
2585  return CED;
2586 }
2587 
2588 static DeclRefExpr *buildCapture(Sema &S, ValueDecl *D, Expr *CaptureExpr,
2589  bool WithInit) {
2590  OMPCapturedExprDecl *CD;
2591  if (VarDecl *VD = S.isOpenMPCapturedDecl(D))
2592  CD = cast<OMPCapturedExprDecl>(VD);
2593  else
2594  CD = buildCaptureDecl(S, D->getIdentifier(), CaptureExpr, WithInit,
2595  /*AsExpression=*/false);
2596  return buildDeclRefExpr(S, CD, CD->getType().getNonReferenceType(),
2597  CaptureExpr->getExprLoc());
2598 }
2599 
2600 static ExprResult buildCapture(Sema &S, Expr *CaptureExpr, DeclRefExpr *&Ref) {
2601  CaptureExpr = S.DefaultLvalueConversion(CaptureExpr).get();
2602  if (!Ref) {
2604  S, &S.getASTContext().Idents.get(".capture_expr."), CaptureExpr,
2605  /*WithInit=*/true, /*AsExpression=*/true);
2606  Ref = buildDeclRefExpr(S, CD, CD->getType().getNonReferenceType(),
2607  CaptureExpr->getExprLoc());
2608  }
2609  ExprResult Res = Ref;
2610  if (!S.getLangOpts().CPlusPlus &&
2611  CaptureExpr->getObjectKind() == OK_Ordinary && CaptureExpr->isGLValue() &&
2612  Ref->getType()->isPointerType()) {
2613  Res = S.CreateBuiltinUnaryOp(CaptureExpr->getExprLoc(), UO_Deref, Ref);
2614  if (!Res.isUsable())
2615  return ExprError();
2616  }
2617  return S.DefaultLvalueConversion(Res.get());
2618 }
2619 
2620 namespace {
2621 // OpenMP directives parsed in this section are represented as a
2622 // CapturedStatement with an associated statement. If a syntax error
2623 // is detected during the parsing of the associated statement, the
2624 // compiler must abort processing and close the CapturedStatement.
2625 //
2626 // Combined directives such as 'target parallel' have more than one
2627 // nested CapturedStatements. This RAII ensures that we unwind out
2628 // of all the nested CapturedStatements when an error is found.
2629 class CaptureRegionUnwinderRAII {
2630 private:
2631  Sema &S;
2632  bool &ErrorFound;
2634 
2635 public:
2636  CaptureRegionUnwinderRAII(Sema &S, bool &ErrorFound,
2637  OpenMPDirectiveKind DKind)
2638  : S(S), ErrorFound(ErrorFound), DKind(DKind) {}
2639  ~CaptureRegionUnwinderRAII() {
2640  if (ErrorFound) {
2641  int ThisCaptureLevel = S.getOpenMPCaptureLevels(DKind);
2642  while (--ThisCaptureLevel >= 0)
2644  }
2645  }
2646 };
2647 } // namespace
2648 
2650  ArrayRef<OMPClause *> Clauses) {
2651  bool ErrorFound = false;
2652  CaptureRegionUnwinderRAII CaptureRegionUnwinder(
2653  *this, ErrorFound, DSAStack->getCurrentDirective());
2654  if (!S.isUsable()) {
2655  ErrorFound = true;
2656  return StmtError();
2657  }
2658 
2659  SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
2660  getOpenMPCaptureRegions(CaptureRegions, DSAStack->getCurrentDirective());
2661  OMPOrderedClause *OC = nullptr;
2662  OMPScheduleClause *SC = nullptr;
2665  // This is required for proper codegen.
2666  for (OMPClause *Clause : Clauses) {
2667  if (isOpenMPTaskingDirective(DSAStack->getCurrentDirective()) &&
2668  Clause->getClauseKind() == OMPC_in_reduction) {
2669  // Capture taskgroup task_reduction descriptors inside the tasking regions
2670  // with the corresponding in_reduction items.
2671  auto *IRC = cast<OMPInReductionClause>(Clause);
2672  for (Expr *E : IRC->taskgroup_descriptors())
2673  if (E)
2674  MarkDeclarationsReferencedInExpr(E);
2675  }
2676  if (isOpenMPPrivate(Clause->getClauseKind()) ||
2677  Clause->getClauseKind() == OMPC_copyprivate ||
2678  (getLangOpts().OpenMPUseTLS &&
2679  getASTContext().getTargetInfo().isTLSSupported() &&
2680  Clause->getClauseKind() == OMPC_copyin)) {
2681  DSAStack->setForceVarCapturing(Clause->getClauseKind() == OMPC_copyin);
2682  // Mark all variables in private list clauses as used in inner region.
2683  for (Stmt *VarRef : Clause->children()) {
2684  if (auto *E = cast_or_null<Expr>(VarRef)) {
2685  MarkDeclarationsReferencedInExpr(E);
2686  }
2687  }
2688  DSAStack->setForceVarCapturing(/*V=*/false);
2689  } else if (CaptureRegions.size() > 1 ||
2690  CaptureRegions.back() != OMPD_unknown) {
2691  if (auto *C = OMPClauseWithPreInit::get(Clause))
2692  PICs.push_back(C);
2693  if (auto *C = OMPClauseWithPostUpdate::get(Clause)) {
2694  if (Expr *E = C->getPostUpdateExpr())
2695  MarkDeclarationsReferencedInExpr(E);
2696  }
2697  }
2698  if (Clause->getClauseKind() == OMPC_schedule)
2699  SC = cast<OMPScheduleClause>(Clause);
2700  else if (Clause->getClauseKind() == OMPC_ordered)
2701  OC = cast<OMPOrderedClause>(Clause);
2702  else if (Clause->getClauseKind() == OMPC_linear)
2703  LCs.push_back(cast<OMPLinearClause>(Clause));
2704  }
2705  // OpenMP, 2.7.1 Loop Construct, Restrictions
2706  // The nonmonotonic modifier cannot be specified if an ordered clause is
2707  // specified.
2708  if (SC &&
2709  (SC->getFirstScheduleModifier() == OMPC_SCHEDULE_MODIFIER_nonmonotonic ||
2710  SC->getSecondScheduleModifier() ==
2711  OMPC_SCHEDULE_MODIFIER_nonmonotonic) &&
2712  OC) {
2713  Diag(SC->getFirstScheduleModifier() == OMPC_SCHEDULE_MODIFIER_nonmonotonic
2716  diag::err_omp_schedule_nonmonotonic_ordered)
2717  << SourceRange(OC->getBeginLoc(), OC->getEndLoc());
2718  ErrorFound = true;
2719  }
2720  if (!LCs.empty() && OC && OC->getNumForLoops()) {
2721  for (const OMPLinearClause *C : LCs) {
2722  Diag(C->getBeginLoc(), diag::err_omp_linear_ordered)
2723  << SourceRange(OC->getBeginLoc(), OC->getEndLoc());
2724  }
2725  ErrorFound = true;
2726  }
2727  if (isOpenMPWorksharingDirective(DSAStack->getCurrentDirective()) &&
2728  isOpenMPSimdDirective(DSAStack->getCurrentDirective()) && OC &&
2729  OC->getNumForLoops()) {
2730  Diag(OC->getBeginLoc(), diag::err_omp_ordered_simd)
2731  << getOpenMPDirectiveName(DSAStack->getCurrentDirective());
2732  ErrorFound = true;
2733  }
2734  if (ErrorFound) {
2735  return StmtError();
2736  }
2737  StmtResult SR = S;
2738  for (OpenMPDirectiveKind ThisCaptureRegion : llvm::reverse(CaptureRegions)) {
2739  // Mark all variables in private list clauses as used in inner region.
2740  // Required for proper codegen of combined directives.
2741  // TODO: add processing for other clauses.
2742  if (ThisCaptureRegion != OMPD_unknown) {
2743  for (const clang::OMPClauseWithPreInit *C : PICs) {
2744  OpenMPDirectiveKind CaptureRegion = C->getCaptureRegion();
2745  // Find the particular capture region for the clause if the
2746  // directive is a combined one with multiple capture regions.
2747  // If the directive is not a combined one, the capture region
2748  // associated with the clause is OMPD_unknown and is generated
2749  // only once.
2750  if (CaptureRegion == ThisCaptureRegion ||
2751  CaptureRegion == OMPD_unknown) {
2752  if (auto *DS = cast_or_null<DeclStmt>(C->getPreInitStmt())) {
2753  for (Decl *D : DS->decls())
2754  MarkVariableReferenced(D->getLocation(), cast<VarDecl>(D));
2755  }
2756  }
2757  }
2758  }
2759  SR = ActOnCapturedRegionEnd(SR.get());
2760  }
2761  return SR;
2762 }
2763 
2764 static bool checkCancelRegion(Sema &SemaRef, OpenMPDirectiveKind CurrentRegion,
2765  OpenMPDirectiveKind CancelRegion,
2766  SourceLocation StartLoc) {
2767  // CancelRegion is only needed for cancel and cancellation_point.
2768  if (CurrentRegion != OMPD_cancel && CurrentRegion != OMPD_cancellation_point)
2769  return false;
2770 
2771  if (CancelRegion == OMPD_parallel || CancelRegion == OMPD_for ||
2772  CancelRegion == OMPD_sections || CancelRegion == OMPD_taskgroup)
2773  return false;
2774 
2775  SemaRef.Diag(StartLoc, diag::err_omp_wrong_cancel_region)
2776  << getOpenMPDirectiveName(CancelRegion);
2777  return true;
2778 }
2779 
2780 static bool checkNestingOfRegions(Sema &SemaRef, const DSAStackTy *Stack,
2781  OpenMPDirectiveKind CurrentRegion,
2782  const DeclarationNameInfo &CurrentName,
2783  OpenMPDirectiveKind CancelRegion,
2784  SourceLocation StartLoc) {
2785  if (Stack->getCurScope()) {
2786  OpenMPDirectiveKind ParentRegion = Stack->getParentDirective();
2787  OpenMPDirectiveKind OffendingRegion = ParentRegion;
2788  bool NestingProhibited = false;
2789  bool CloseNesting = true;
2790  bool OrphanSeen = false;
2791  enum {
2792  NoRecommend,
2793  ShouldBeInParallelRegion,
2794  ShouldBeInOrderedRegion,
2795  ShouldBeInTargetRegion,
2796  ShouldBeInTeamsRegion
2797  } Recommend = NoRecommend;
2798  if (isOpenMPSimdDirective(ParentRegion) && CurrentRegion != OMPD_ordered) {
2799  // OpenMP [2.16, Nesting of Regions]
2800  // OpenMP constructs may not be nested inside a simd region.
2801  // OpenMP [2.8.1,simd Construct, Restrictions]
2802  // An ordered construct with the simd clause is the only OpenMP
2803  // construct that can appear in the simd region.
2804  // Allowing a SIMD construct nested in another SIMD construct is an
2805  // extension. The OpenMP 4.5 spec does not allow it. Issue a warning
2806  // message.
2807  SemaRef.Diag(StartLoc, (CurrentRegion != OMPD_simd)
2808  ? diag::err_omp_prohibited_region_simd
2809  : diag::warn_omp_nesting_simd);
2810  return CurrentRegion != OMPD_simd;
2811  }
2812  if (ParentRegion == OMPD_atomic) {
2813  // OpenMP [2.16, Nesting of Regions]
2814  // OpenMP constructs may not be nested inside an atomic region.
2815  SemaRef.Diag(StartLoc, diag::err_omp_prohibited_region_atomic);
2816  return true;
2817  }
2818  if (CurrentRegion == OMPD_section) {
2819  // OpenMP [2.7.2, sections Construct, Restrictions]
2820  // Orphaned section directives are prohibited. That is, the section
2821  // directives must appear within the sections construct and must not be
2822  // encountered elsewhere in the sections region.
2823  if (ParentRegion != OMPD_sections &&
2824  ParentRegion != OMPD_parallel_sections) {
2825  SemaRef.Diag(StartLoc, diag::err_omp_orphaned_section_directive)
2826  << (ParentRegion != OMPD_unknown)
2827  << getOpenMPDirectiveName(ParentRegion);
2828  return true;
2829  }
2830  return false;
2831  }
2832  // Allow some constructs (except teams) to be orphaned (they could be
2833  // used in functions, called from OpenMP regions with the required
2834  // preconditions).
2835  if (ParentRegion == OMPD_unknown &&
2836  !isOpenMPNestingTeamsDirective(CurrentRegion))
2837  return false;
2838  if (CurrentRegion == OMPD_cancellation_point ||
2839  CurrentRegion == OMPD_cancel) {
2840  // OpenMP [2.16, Nesting of Regions]
2841  // A cancellation point construct for which construct-type-clause is
2842  // taskgroup must be nested inside a task construct. A cancellation
2843  // point construct for which construct-type-clause is not taskgroup must
2844  // be closely nested inside an OpenMP construct that matches the type
2845  // specified in construct-type-clause.
2846  // A cancel construct for which construct-type-clause is taskgroup must be
2847  // nested inside a task construct. A cancel construct for which
2848  // construct-type-clause is not taskgroup must be closely nested inside an
2849  // OpenMP construct that matches the type specified in
2850  // construct-type-clause.
2851  NestingProhibited =
2852  !((CancelRegion == OMPD_parallel &&
2853  (ParentRegion == OMPD_parallel ||
2854  ParentRegion == OMPD_target_parallel)) ||
2855  (CancelRegion == OMPD_for &&
2856  (ParentRegion == OMPD_for || ParentRegion == OMPD_parallel_for ||
2857  ParentRegion == OMPD_target_parallel_for ||
2858  ParentRegion == OMPD_distribute_parallel_for ||
2859  ParentRegion == OMPD_teams_distribute_parallel_for ||
2860  ParentRegion == OMPD_target_teams_distribute_parallel_for)) ||
2861  (CancelRegion == OMPD_taskgroup && ParentRegion == OMPD_task) ||
2862  (CancelRegion == OMPD_sections &&
2863  (ParentRegion == OMPD_section || ParentRegion == OMPD_sections ||
2864  ParentRegion == OMPD_parallel_sections)));
2865  } else if (CurrentRegion == OMPD_master) {
2866  // OpenMP [2.16, Nesting of Regions]
2867  // A master region may not be closely nested inside a worksharing,
2868  // atomic, or explicit task region.
2869  NestingProhibited = isOpenMPWorksharingDirective(ParentRegion) ||
2870  isOpenMPTaskingDirective(ParentRegion);
2871  } else if (CurrentRegion == OMPD_critical && CurrentName.getName()) {
2872  // OpenMP [2.16, Nesting of Regions]
2873  // A critical region may not be nested (closely or otherwise) inside a
2874  // critical region with the same name. Note that this restriction is not
2875  // sufficient to prevent deadlock.
2876  SourceLocation PreviousCriticalLoc;
2877  bool DeadLock = Stack->hasDirective(
2878  [CurrentName, &PreviousCriticalLoc](OpenMPDirectiveKind K,
2879  const DeclarationNameInfo &DNI,
2880  SourceLocation Loc) {
2881  if (K == OMPD_critical && DNI.getName() == CurrentName.getName()) {
2882  PreviousCriticalLoc = Loc;
2883  return true;
2884  }
2885  return false;
2886  },
2887  false /* skip top directive */);
2888  if (DeadLock) {
2889  SemaRef.Diag(StartLoc,
2890  diag::err_omp_prohibited_region_critical_same_name)
2891  << CurrentName.getName();
2892  if (PreviousCriticalLoc.isValid())
2893  SemaRef.Diag(PreviousCriticalLoc,
2894  diag::note_omp_previous_critical_region);
2895  return true;
2896  }
2897  } else if (CurrentRegion == OMPD_barrier) {
2898  // OpenMP [2.16, Nesting of Regions]
2899  // A barrier region may not be closely nested inside a worksharing,
2900  // explicit task, critical, ordered, atomic, or master region.
2901  NestingProhibited = isOpenMPWorksharingDirective(ParentRegion) ||
2902  isOpenMPTaskingDirective(ParentRegion) ||
2903  ParentRegion == OMPD_master ||
2904  ParentRegion == OMPD_critical ||
2905  ParentRegion == OMPD_ordered;
2906  } else if (isOpenMPWorksharingDirective(CurrentRegion) &&
2907  !isOpenMPParallelDirective(CurrentRegion) &&
2908  !isOpenMPTeamsDirective(CurrentRegion)) {
2909  // OpenMP [2.16, Nesting of Regions]
2910  // A worksharing region may not be closely nested inside a worksharing,
2911  // explicit task, critical, ordered, atomic, or master region.
2912  NestingProhibited = isOpenMPWorksharingDirective(ParentRegion) ||
2913  isOpenMPTaskingDirective(ParentRegion) ||
2914  ParentRegion == OMPD_master ||
2915  ParentRegion == OMPD_critical ||
2916  ParentRegion == OMPD_ordered;
2917  Recommend = ShouldBeInParallelRegion;
2918  } else if (CurrentRegion == OMPD_ordered) {
2919  // OpenMP [2.16, Nesting of Regions]
2920  // An ordered region may not be closely nested inside a critical,
2921  // atomic, or explicit task region.
2922  // An ordered region must be closely nested inside a loop region (or
2923  // parallel loop region) with an ordered clause.
2924  // OpenMP [2.8.1,simd Construct, Restrictions]
2925  // An ordered construct with the simd clause is the only OpenMP construct
2926  // that can appear in the simd region.
2927  NestingProhibited = ParentRegion == OMPD_critical ||
2928  isOpenMPTaskingDirective(ParentRegion) ||
2929  !(isOpenMPSimdDirective(ParentRegion) ||
2930  Stack->isParentOrderedRegion());
2931  Recommend = ShouldBeInOrderedRegion;
2932  } else if (isOpenMPNestingTeamsDirective(CurrentRegion)) {
2933  // OpenMP [2.16, Nesting of Regions]
2934  // If specified, a teams construct must be contained within a target
2935  // construct.
2936  NestingProhibited = ParentRegion != OMPD_target;
2937  OrphanSeen = ParentRegion == OMPD_unknown;
2938  Recommend = ShouldBeInTargetRegion;
2939  }
2940  if (!NestingProhibited &&
2941  !isOpenMPTargetExecutionDirective(CurrentRegion) &&
2942  !isOpenMPTargetDataManagementDirective(CurrentRegion) &&
2943  (ParentRegion == OMPD_teams || ParentRegion == OMPD_target_teams)) {
2944  // OpenMP [2.16, Nesting of Regions]
2945  // distribute, parallel, parallel sections, parallel workshare, and the
2946  // parallel loop and parallel loop SIMD constructs are the only OpenMP
2947  // constructs that can be closely nested in the teams region.
2948  NestingProhibited = !isOpenMPParallelDirective(CurrentRegion) &&
2949  !isOpenMPDistributeDirective(CurrentRegion);
2950  Recommend = ShouldBeInParallelRegion;
2951  }
2952  if (!NestingProhibited &&
2953  isOpenMPNestingDistributeDirective(CurrentRegion)) {
2954  // OpenMP 4.5 [2.17 Nesting of Regions]
2955  // The region associated with the distribute construct must be strictly
2956  // nested inside a teams region
2957  NestingProhibited =
2958  (ParentRegion != OMPD_teams && ParentRegion != OMPD_target_teams);
2959  Recommend = ShouldBeInTeamsRegion;
2960  }
2961  if (!NestingProhibited &&
2962  (isOpenMPTargetExecutionDirective(CurrentRegion) ||
2963  isOpenMPTargetDataManagementDirective(CurrentRegion))) {
2964  // OpenMP 4.5 [2.17 Nesting of Regions]
2965  // If a target, target update, target data, target enter data, or
2966  // target exit data construct is encountered during execution of a
2967  // target region, the behavior is unspecified.
2968  NestingProhibited = Stack->hasDirective(
2969  [&OffendingRegion](OpenMPDirectiveKind K, const DeclarationNameInfo &,
2970  SourceLocation) {
2972  OffendingRegion = K;
2973  return true;
2974  }
2975  return false;
2976  },
2977  false /* don't skip top directive */);
2978  CloseNesting = false;
2979  }
2980  if (NestingProhibited) {
2981  if (OrphanSeen) {
2982  SemaRef.Diag(StartLoc, diag::err_omp_orphaned_device_directive)
2983  << getOpenMPDirectiveName(CurrentRegion) << Recommend;
2984  } else {
2985  SemaRef.Diag(StartLoc, diag::err_omp_prohibited_region)
2986  << CloseNesting << getOpenMPDirectiveName(OffendingRegion)
2987  << Recommend << getOpenMPDirectiveName(CurrentRegion);
2988  }
2989  return true;
2990  }
2991  }
2992  return false;
2993 }
2994 
2996  ArrayRef<OMPClause *> Clauses,
2997  ArrayRef<OpenMPDirectiveKind> AllowedNameModifiers) {
2998  bool ErrorFound = false;
2999  unsigned NamedModifiersNumber = 0;
3001  OMPD_unknown + 1);
3002  SmallVector<SourceLocation, 4> NameModifierLoc;
3003  for (const OMPClause *C : Clauses) {
3004  if (const auto *IC = dyn_cast_or_null<OMPIfClause>(C)) {
3005  // At most one if clause without a directive-name-modifier can appear on
3006  // the directive.
3007  OpenMPDirectiveKind CurNM = IC->getNameModifier();
3008  if (FoundNameModifiers[CurNM]) {
3009  S.Diag(C->getBeginLoc(), diag::err_omp_more_one_clause)
3010  << getOpenMPDirectiveName(Kind) << getOpenMPClauseName(OMPC_if)
3011  << (CurNM != OMPD_unknown) << getOpenMPDirectiveName(CurNM);
3012  ErrorFound = true;
3013  } else if (CurNM != OMPD_unknown) {
3014  NameModifierLoc.push_back(IC->getNameModifierLoc());
3015  ++NamedModifiersNumber;
3016  }
3017  FoundNameModifiers[CurNM] = IC;
3018  if (CurNM == OMPD_unknown)
3019  continue;
3020  // Check if the specified name modifier is allowed for the current
3021  // directive.
3022  // At most one if clause with the particular directive-name-modifier can
3023  // appear on the directive.
3024  bool MatchFound = false;
3025  for (auto NM : AllowedNameModifiers) {
3026  if (CurNM == NM) {
3027  MatchFound = true;
3028  break;
3029  }
3030  }
3031  if (!MatchFound) {
3032  S.Diag(IC->getNameModifierLoc(),
3033  diag::err_omp_wrong_if_directive_name_modifier)
3035  ErrorFound = true;
3036  }
3037  }
3038  }
3039  // If any if clause on the directive includes a directive-name-modifier then
3040  // all if clauses on the directive must include a directive-name-modifier.
3041  if (FoundNameModifiers[OMPD_unknown] && NamedModifiersNumber > 0) {
3042  if (NamedModifiersNumber == AllowedNameModifiers.size()) {
3043  S.Diag(FoundNameModifiers[OMPD_unknown]->getBeginLoc(),
3044  diag::err_omp_no_more_if_clause);
3045  } else {
3046  std::string Values;
3047  std::string Sep(", ");
3048  unsigned AllowedCnt = 0;
3049  unsigned TotalAllowedNum =
3050  AllowedNameModifiers.size() - NamedModifiersNumber;
3051  for (unsigned Cnt = 0, End = AllowedNameModifiers.size(); Cnt < End;
3052  ++Cnt) {
3053  OpenMPDirectiveKind NM = AllowedNameModifiers[Cnt];
3054  if (!FoundNameModifiers[NM]) {
3055  Values += "'";
3056  Values += getOpenMPDirectiveName(NM);
3057  Values += "'";
3058  if (AllowedCnt + 2 == TotalAllowedNum)
3059  Values += " or ";
3060  else if (AllowedCnt + 1 != TotalAllowedNum)
3061  Values += Sep;
3062  ++AllowedCnt;
3063  }
3064  }
3065  S.Diag(FoundNameModifiers[OMPD_unknown]->getCondition()->getBeginLoc(),
3066  diag::err_omp_unnamed_if_clause)
3067  << (TotalAllowedNum > 1) << Values;
3068  }
3069  for (SourceLocation Loc : NameModifierLoc) {
3070  S.Diag(Loc, diag::note_omp_previous_named_if_clause);
3071  }
3072  ErrorFound = true;
3073  }
3074  return ErrorFound;
3075 }
3076 
3079  OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
3080  Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc) {
3081  StmtResult Res = StmtError();
3082  // First check CancelRegion which is then used in checkNestingOfRegions.
3083  if (checkCancelRegion(*this, Kind, CancelRegion, StartLoc) ||
3084  checkNestingOfRegions(*this, DSAStack, Kind, DirName, CancelRegion,
3085  StartLoc))
3086  return StmtError();
3087 
3088  llvm::SmallVector<OMPClause *, 8> ClausesWithImplicit;
3089  VarsWithInheritedDSAType VarsWithInheritedDSA;
3090  bool ErrorFound = false;
3091  ClausesWithImplicit.append(Clauses.begin(), Clauses.end());
3092  if (AStmt && !CurContext->isDependentContext()) {
3093  assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
3094 
3095  // Check default data sharing attributes for referenced variables.
3096  DSAAttrChecker DSAChecker(DSAStack, *this, cast<CapturedStmt>(AStmt));
3097  int ThisCaptureLevel = getOpenMPCaptureLevels(Kind);
3098  Stmt *S = AStmt;
3099  while (--ThisCaptureLevel >= 0)
3100  S = cast<CapturedStmt>(S)->getCapturedStmt();
3101  DSAChecker.Visit(S);
3102  if (DSAChecker.isErrorFound())
3103  return StmtError();
3104  // Generate list of implicitly defined firstprivate variables.
3105  VarsWithInheritedDSA = DSAChecker.getVarsWithInheritedDSA();
3106 
3107  SmallVector<Expr *, 4> ImplicitFirstprivates(
3108  DSAChecker.getImplicitFirstprivate().begin(),
3109  DSAChecker.getImplicitFirstprivate().end());
3110  SmallVector<Expr *, 4> ImplicitMaps(DSAChecker.getImplicitMap().begin(),
3111  DSAChecker.getImplicitMap().end());
3112  // Mark taskgroup task_reduction descriptors as implicitly firstprivate.
3113  for (OMPClause *C : Clauses) {
3114  if (auto *IRC = dyn_cast<OMPInReductionClause>(C)) {
3115  for (Expr *E : IRC->taskgroup_descriptors())
3116  if (E)
3117  ImplicitFirstprivates.emplace_back(E);
3118  }
3119  }
3120  if (!ImplicitFirstprivates.empty()) {
3121  if (OMPClause *Implicit = ActOnOpenMPFirstprivateClause(
3122  ImplicitFirstprivates, SourceLocation(), SourceLocation(),
3123  SourceLocation())) {
3124  ClausesWithImplicit.push_back(Implicit);
3125  ErrorFound = cast<OMPFirstprivateClause>(Implicit)->varlist_size() !=
3126  ImplicitFirstprivates.size();
3127  } else {
3128  ErrorFound = true;
3129  }
3130  }
3131  if (!ImplicitMaps.empty()) {
3132  if (OMPClause *Implicit = ActOnOpenMPMapClause(
3133  OMPC_MAP_unknown, OMPC_MAP_tofrom, /*IsMapTypeImplicit=*/true,
3134  SourceLocation(), SourceLocation(), ImplicitMaps,
3136  ClausesWithImplicit.emplace_back(Implicit);
3137  ErrorFound |=
3138  cast<OMPMapClause>(Implicit)->varlist_size() != ImplicitMaps.size();
3139  } else {
3140  ErrorFound = true;
3141  }
3142  }
3143  }
3144 
3145  llvm::SmallVector<OpenMPDirectiveKind, 4> AllowedNameModifiers;
3146  switch (Kind) {
3147  case OMPD_parallel:
3148  Res = ActOnOpenMPParallelDirective(ClausesWithImplicit, AStmt, StartLoc,
3149  EndLoc);
3150  AllowedNameModifiers.push_back(OMPD_parallel);
3151  break;
3152  case OMPD_simd:
3153  Res = ActOnOpenMPSimdDirective(ClausesWithImplicit, AStmt, StartLoc, EndLoc,
3154  VarsWithInheritedDSA);
3155  break;
3156  case OMPD_for:
3157  Res = ActOnOpenMPForDirective(ClausesWithImplicit, AStmt, StartLoc, EndLoc,
3158  VarsWithInheritedDSA);
3159  break;
3160  case OMPD_for_simd:
3161  Res = ActOnOpenMPForSimdDirective(ClausesWithImplicit, AStmt, StartLoc,
3162  EndLoc, VarsWithInheritedDSA);
3163  break;
3164  case OMPD_sections:
3165  Res = ActOnOpenMPSectionsDirective(ClausesWithImplicit, AStmt, StartLoc,
3166  EndLoc);
3167  break;
3168  case OMPD_section:
3169  assert(ClausesWithImplicit.empty() &&
3170  "No clauses are allowed for 'omp section' directive");
3171  Res = ActOnOpenMPSectionDirective(AStmt, StartLoc, EndLoc);
3172  break;
3173  case OMPD_single:
3174  Res = ActOnOpenMPSingleDirective(ClausesWithImplicit, AStmt, StartLoc,
3175  EndLoc);
3176  break;
3177  case OMPD_master:
3178  assert(ClausesWithImplicit.empty() &&
3179  "No clauses are allowed for 'omp master' directive");
3180  Res = ActOnOpenMPMasterDirective(AStmt, StartLoc, EndLoc);
3181  break;
3182  case OMPD_critical:
3183  Res = ActOnOpenMPCriticalDirective(DirName, ClausesWithImplicit, AStmt,
3184  StartLoc, EndLoc);
3185  break;
3186  case OMPD_parallel_for:
3187  Res = ActOnOpenMPParallelForDirective(ClausesWithImplicit, AStmt, StartLoc,
3188  EndLoc, VarsWithInheritedDSA);
3189  AllowedNameModifiers.push_back(OMPD_parallel);
3190  break;
3191  case OMPD_parallel_for_simd:
3192  Res = ActOnOpenMPParallelForSimdDirective(
3193  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
3194  AllowedNameModifiers.push_back(OMPD_parallel);
3195  break;
3196  case OMPD_parallel_sections:
3197  Res = ActOnOpenMPParallelSectionsDirective(ClausesWithImplicit, AStmt,
3198  StartLoc, EndLoc);
3199  AllowedNameModifiers.push_back(OMPD_parallel);
3200  break;
3201  case OMPD_task:
3202  Res =
3203  ActOnOpenMPTaskDirective(ClausesWithImplicit, AStmt, StartLoc, EndLoc);
3204  AllowedNameModifiers.push_back(OMPD_task);
3205  break;
3206  case OMPD_taskyield:
3207  assert(ClausesWithImplicit.empty() &&
3208  "No clauses are allowed for 'omp taskyield' directive");
3209  assert(AStmt == nullptr &&
3210  "No associated statement allowed for 'omp taskyield' directive");
3211  Res = ActOnOpenMPTaskyieldDirective(StartLoc, EndLoc);
3212  break;
3213  case OMPD_barrier:
3214  assert(ClausesWithImplicit.empty() &&
3215  "No clauses are allowed for 'omp barrier' directive");
3216  assert(AStmt == nullptr &&
3217  "No associated statement allowed for 'omp barrier' directive");
3218  Res = ActOnOpenMPBarrierDirective(StartLoc, EndLoc);
3219  break;
3220  case OMPD_taskwait:
3221  assert(ClausesWithImplicit.empty() &&
3222  "No clauses are allowed for 'omp taskwait' directive");
3223  assert(AStmt == nullptr &&
3224  "No associated statement allowed for 'omp taskwait' directive");
3225  Res = ActOnOpenMPTaskwaitDirective(StartLoc, EndLoc);
3226  break;
3227  case OMPD_taskgroup:
3228  Res = ActOnOpenMPTaskgroupDirective(ClausesWithImplicit, AStmt, StartLoc,
3229  EndLoc);
3230  break;
3231  case OMPD_flush:
3232  assert(AStmt == nullptr &&
3233  "No associated statement allowed for 'omp flush' directive");
3234  Res = ActOnOpenMPFlushDirective(ClausesWithImplicit, StartLoc, EndLoc);
3235  break;
3236  case OMPD_ordered:
3237  Res = ActOnOpenMPOrderedDirective(ClausesWithImplicit, AStmt, StartLoc,
3238  EndLoc);
3239  break;
3240  case OMPD_atomic:
3241  Res = ActOnOpenMPAtomicDirective(ClausesWithImplicit, AStmt, StartLoc,
3242  EndLoc);
3243  break;
3244  case OMPD_teams:
3245  Res =
3246  ActOnOpenMPTeamsDirective(ClausesWithImplicit, AStmt, StartLoc, EndLoc);
3247  break;
3248  case OMPD_target:
3249  Res = ActOnOpenMPTargetDirective(ClausesWithImplicit, AStmt, StartLoc,
3250  EndLoc);
3251  AllowedNameModifiers.push_back(OMPD_target);
3252  break;
3253  case OMPD_target_parallel:
3254  Res = ActOnOpenMPTargetParallelDirective(ClausesWithImplicit, AStmt,
3255  StartLoc, EndLoc);
3256  AllowedNameModifiers.push_back(OMPD_target);
3257  AllowedNameModifiers.push_back(OMPD_parallel);
3258  break;
3259  case OMPD_target_parallel_for:
3260  Res = ActOnOpenMPTargetParallelForDirective(
3261  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
3262  AllowedNameModifiers.push_back(OMPD_target);
3263  AllowedNameModifiers.push_back(OMPD_parallel);
3264  break;
3265  case OMPD_cancellation_point:
3266  assert(ClausesWithImplicit.empty() &&
3267  "No clauses are allowed for 'omp cancellation point' directive");
3268  assert(AStmt == nullptr && "No associated statement allowed for 'omp "
3269  "cancellation point' directive");
3270  Res = ActOnOpenMPCancellationPointDirective(StartLoc, EndLoc, CancelRegion);
3271  break;
3272  case OMPD_cancel:
3273  assert(AStmt == nullptr &&
3274  "No associated statement allowed for 'omp cancel' directive");
3275  Res = ActOnOpenMPCancelDirective(ClausesWithImplicit, StartLoc, EndLoc,
3276  CancelRegion);
3277  AllowedNameModifiers.push_back(OMPD_cancel);
3278  break;
3279  case OMPD_target_data:
3280  Res = ActOnOpenMPTargetDataDirective(ClausesWithImplicit, AStmt, StartLoc,
3281  EndLoc);
3282  AllowedNameModifiers.push_back(OMPD_target_data);
3283  break;
3284  case OMPD_target_enter_data:
3285  Res = ActOnOpenMPTargetEnterDataDirective(ClausesWithImplicit, StartLoc,
3286  EndLoc, AStmt);
3287  AllowedNameModifiers.push_back(OMPD_target_enter_data);
3288  break;
3289  case OMPD_target_exit_data:
3290  Res = ActOnOpenMPTargetExitDataDirective(ClausesWithImplicit, StartLoc,
3291  EndLoc, AStmt);
3292  AllowedNameModifiers.push_back(OMPD_target_exit_data);
3293  break;
3294  case OMPD_taskloop:
3295  Res = ActOnOpenMPTaskLoopDirective(ClausesWithImplicit, AStmt, StartLoc,
3296  EndLoc, VarsWithInheritedDSA);
3297  AllowedNameModifiers.push_back(OMPD_taskloop);
3298  break;
3299  case OMPD_taskloop_simd:
3300  Res = ActOnOpenMPTaskLoopSimdDirective(ClausesWithImplicit, AStmt, StartLoc,
3301  EndLoc, VarsWithInheritedDSA);
3302  AllowedNameModifiers.push_back(OMPD_taskloop);
3303  break;
3304  case OMPD_distribute:
3305  Res = ActOnOpenMPDistributeDirective(ClausesWithImplicit, AStmt, StartLoc,
3306  EndLoc, VarsWithInheritedDSA);
3307  break;
3308  case OMPD_target_update:
3309  Res = ActOnOpenMPTargetUpdateDirective(ClausesWithImplicit, StartLoc,
3310  EndLoc, AStmt);
3311  AllowedNameModifiers.push_back(OMPD_target_update);
3312  break;
3313  case OMPD_distribute_parallel_for:
3314  Res = ActOnOpenMPDistributeParallelForDirective(
3315  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
3316  AllowedNameModifiers.push_back(OMPD_parallel);
3317  break;
3318  case OMPD_distribute_parallel_for_simd:
3319  Res = ActOnOpenMPDistributeParallelForSimdDirective(
3320  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
3321  AllowedNameModifiers.push_back(OMPD_parallel);
3322  break;
3323  case OMPD_distribute_simd:
3324  Res = ActOnOpenMPDistributeSimdDirective(
3325  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
3326  break;
3327  case OMPD_target_parallel_for_simd:
3328  Res = ActOnOpenMPTargetParallelForSimdDirective(
3329  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
3330  AllowedNameModifiers.push_back(OMPD_target);
3331  AllowedNameModifiers.push_back(OMPD_parallel);
3332  break;
3333  case OMPD_target_simd:
3334  Res = ActOnOpenMPTargetSimdDirective(ClausesWithImplicit, AStmt, StartLoc,
3335  EndLoc, VarsWithInheritedDSA);
3336  AllowedNameModifiers.push_back(OMPD_target);
3337  break;
3338  case OMPD_teams_distribute:
3339  Res = ActOnOpenMPTeamsDistributeDirective(
3340  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
3341  break;
3342  case OMPD_teams_distribute_simd:
3343  Res = ActOnOpenMPTeamsDistributeSimdDirective(
3344  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
3345  break;
3346  case OMPD_teams_distribute_parallel_for_simd:
3347  Res = ActOnOpenMPTeamsDistributeParallelForSimdDirective(
3348  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
3349  AllowedNameModifiers.push_back(OMPD_parallel);
3350  break;
3351  case OMPD_teams_distribute_parallel_for:
3352  Res = ActOnOpenMPTeamsDistributeParallelForDirective(
3353  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
3354  AllowedNameModifiers.push_back(OMPD_parallel);
3355  break;
3356  case OMPD_target_teams:
3357  Res = ActOnOpenMPTargetTeamsDirective(ClausesWithImplicit, AStmt, StartLoc,
3358  EndLoc);
3359  AllowedNameModifiers.push_back(OMPD_target);
3360  break;
3361  case OMPD_target_teams_distribute:
3362  Res = ActOnOpenMPTargetTeamsDistributeDirective(
3363  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
3364  AllowedNameModifiers.push_back(OMPD_target);
3365  break;
3366  case OMPD_target_teams_distribute_parallel_for:
3367  Res = ActOnOpenMPTargetTeamsDistributeParallelForDirective(
3368  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
3369  AllowedNameModifiers.push_back(OMPD_target);
3370  AllowedNameModifiers.push_back(OMPD_parallel);
3371  break;
3372  case OMPD_target_teams_distribute_parallel_for_simd:
3373  Res = ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective(
3374  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
3375  AllowedNameModifiers.push_back(OMPD_target);
3376  AllowedNameModifiers.push_back(OMPD_parallel);
3377  break;
3378  case OMPD_target_teams_distribute_simd:
3379  Res = ActOnOpenMPTargetTeamsDistributeSimdDirective(
3380  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
3381  AllowedNameModifiers.push_back(OMPD_target);
3382  break;
3383  case OMPD_declare_target:
3384  case OMPD_end_declare_target:
3385  case OMPD_threadprivate:
3386  case OMPD_declare_reduction:
3387  case OMPD_declare_simd:
3388  llvm_unreachable("OpenMP Directive is not allowed");
3389  case OMPD_unknown:
3390  llvm_unreachable("Unknown OpenMP directive");
3391  }
3392 
3393  for (const auto &P : VarsWithInheritedDSA) {
3394  Diag(P.second->getExprLoc(), diag::err_omp_no_dsa_for_variable)
3395  << P.first << P.second->getSourceRange();
3396  }
3397  ErrorFound = !VarsWithInheritedDSA.empty() || ErrorFound;
3398 
3399  if (!AllowedNameModifiers.empty())
3400  ErrorFound = checkIfClauses(*this, Kind, Clauses, AllowedNameModifiers) ||
3401  ErrorFound;
3402 
3403  if (ErrorFound)
3404  return StmtError();
3405  return Res;
3406 }
3407 
3409  DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS, Expr *Simdlen,
3410  ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds,
3411  ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears,
3412  ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR) {
3413  assert(Aligneds.size() == Alignments.size());
3414  assert(Linears.size() == LinModifiers.size());
3415  assert(Linears.size() == Steps.size());
3416  if (!DG || DG.get().isNull())
3417  return DeclGroupPtrTy();
3418 
3419  if (!DG.get().isSingleDecl()) {
3420  Diag(SR.getBegin(), diag::err_omp_single_decl_in_declare_simd);
3421  return DG;
3422  }
3423  Decl *ADecl = DG.get().getSingleDecl();
3424  if (auto *FTD = dyn_cast<FunctionTemplateDecl>(ADecl))
3425  ADecl = FTD->getTemplatedDecl();
3426 
3427  auto *FD = dyn_cast<FunctionDecl>(ADecl);
3428  if (!FD) {
3429  Diag(ADecl->getLocation(), diag::err_omp_function_expected);
3430  return DeclGroupPtrTy();
3431  }
3432 
3433  // OpenMP [2.8.2, declare simd construct, Description]
3434  // The parameter of the simdlen clause must be a constant positive integer
3435  // expression.
3436  ExprResult SL;
3437  if (Simdlen)
3438  SL = VerifyPositiveIntegerConstantInClause(Simdlen, OMPC_simdlen);
3439  // OpenMP [2.8.2, declare simd construct, Description]
3440  // The special this pointer can be used as if was one of the arguments to the
3441  // function in any of the linear, aligned, or uniform clauses.
3442  // The uniform clause declares one or more arguments to have an invariant
3443  // value for all concurrent invocations of the function in the execution of a
3444  // single SIMD loop.
3445  llvm::DenseMap<const Decl *, const Expr *> UniformedArgs;
3446  const Expr *UniformedLinearThis = nullptr;
3447  for (const Expr *E : Uniforms) {
3448  E = E->IgnoreParenImpCasts();
3449  if (const auto *DRE = dyn_cast<DeclRefExpr>(E))
3450  if (const auto *PVD = dyn_cast<ParmVarDecl>(DRE->getDecl()))
3451  if (FD->getNumParams() > PVD->getFunctionScopeIndex() &&
3452  FD->getParamDecl(PVD->getFunctionScopeIndex())
3453  ->getCanonicalDecl() == PVD->getCanonicalDecl()) {
3454  UniformedArgs.try_emplace(PVD->getCanonicalDecl(), E);
3455  continue;
3456  }
3457  if (isa<CXXThisExpr>(E)) {
3458  UniformedLinearThis = E;
3459  continue;
3460  }
3461  Diag(E->getExprLoc(), diag::err_omp_param_or_this_in_clause)
3462  << FD->getDeclName() << (isa<CXXMethodDecl>(ADecl) ? 1 : 0);
3463  }
3464  // OpenMP [2.8.2, declare simd construct, Description]
3465  // The aligned clause declares that the object to which each list item points
3466  // is aligned to the number of bytes expressed in the optional parameter of
3467  // the aligned clause.
3468  // The special this pointer can be used as if was one of the arguments to the
3469  // function in any of the linear, aligned, or uniform clauses.
3470  // The type of list items appearing in the aligned clause must be array,
3471  // pointer, reference to array, or reference to pointer.
3472  llvm::DenseMap<const Decl *, const Expr *> AlignedArgs;
3473  const Expr *AlignedThis = nullptr;
3474  for (const Expr *E : Aligneds) {
3475  E = E->IgnoreParenImpCasts();
3476  if (const auto *DRE = dyn_cast<DeclRefExpr>(E))
3477  if (const auto *PVD = dyn_cast<ParmVarDecl>(DRE->getDecl())) {
3478  const VarDecl *CanonPVD = PVD->getCanonicalDecl();
3479  if (FD->getNumParams() > PVD->getFunctionScopeIndex() &&
3480  FD->getParamDecl(PVD->getFunctionScopeIndex())
3481  ->getCanonicalDecl() == CanonPVD) {
3482  // OpenMP [2.8.1, simd construct, Restrictions]
3483  // A list-item cannot appear in more than one aligned clause.
3484  if (AlignedArgs.count(CanonPVD) > 0) {
3485  Diag(E->getExprLoc(), diag::err_omp_aligned_twice)
3486  << 1 << E->getSourceRange();
3487  Diag(AlignedArgs[CanonPVD]->getExprLoc(),
3488  diag::note_omp_explicit_dsa)
3489  << getOpenMPClauseName(OMPC_aligned);
3490  continue;
3491  }
3492  AlignedArgs[CanonPVD] = E;
3493  QualType QTy = PVD->getType()
3494  .getNonReferenceType()
3495  .getUnqualifiedType()
3496  .getCanonicalType();
3497  const Type *Ty = QTy.getTypePtrOrNull();
3498  if (!Ty || (!Ty->isArrayType() && !Ty->isPointerType())) {
3499  Diag(E->getExprLoc(), diag::err_omp_aligned_expected_array_or_ptr)
3500  << QTy << getLangOpts().CPlusPlus << E->getSourceRange();
3501  Diag(PVD->getLocation(), diag::note_previous_decl) << PVD;
3502  }
3503  continue;
3504  }
3505  }
3506  if (isa<CXXThisExpr>(E)) {
3507  if (AlignedThis) {
3508  Diag(E->getExprLoc(), diag::err_omp_aligned_twice)
3509  << 2 << E->getSourceRange();
3510  Diag(AlignedThis->getExprLoc(), diag::note_omp_explicit_dsa)
3511  << getOpenMPClauseName(OMPC_aligned);
3512  }
3513  AlignedThis = E;
3514  continue;
3515  }
3516  Diag(E->getExprLoc(), diag::err_omp_param_or_this_in_clause)
3517  << FD->getDeclName() << (isa<CXXMethodDecl>(ADecl) ? 1 : 0);
3518  }
3519  // The optional parameter of the aligned clause, alignment, must be a constant
3520  // positive integer expression. If no optional parameter is specified,
3521  // implementation-defined default alignments for SIMD instructions on the
3522  // target platforms are assumed.
3523  SmallVector<const Expr *, 4> NewAligns;
3524  for (Expr *E : Alignments) {
3525  ExprResult Align;
3526  if (E)
3527  Align = VerifyPositiveIntegerConstantInClause(E, OMPC_aligned);
3528  NewAligns.push_back(Align.get());
3529  }
3530  // OpenMP [2.8.2, declare simd construct, Description]
3531  // The linear clause declares one or more list items to be private to a SIMD
3532  // lane and to have a linear relationship with respect to the iteration space
3533  // of a loop.
3534  // The special this pointer can be used as if was one of the arguments to the
3535  // function in any of the linear, aligned, or uniform clauses.
3536  // When a linear-step expression is specified in a linear clause it must be
3537  // either a constant integer expression or an integer-typed parameter that is
3538  // specified in a uniform clause on the directive.
3539  llvm::DenseMap<const Decl *, const Expr *> LinearArgs;
3540  const bool IsUniformedThis = UniformedLinearThis != nullptr;
3541  auto MI = LinModifiers.begin();
3542  for (const Expr *E : Linears) {
3543  auto LinKind = static_cast<OpenMPLinearClauseKind>(*MI);
3544  ++MI;
3545  E = E->IgnoreParenImpCasts();
3546  if (const auto *DRE = dyn_cast<DeclRefExpr>(E))
3547  if (const auto *PVD = dyn_cast<ParmVarDecl>(DRE->getDecl())) {
3548  const VarDecl *CanonPVD = PVD->getCanonicalDecl();
3549  if (FD->getNumParams() > PVD->getFunctionScopeIndex() &&
3550  FD->getParamDecl(PVD->getFunctionScopeIndex())
3551  ->getCanonicalDecl() == CanonPVD) {
3552  // OpenMP [2.15.3.7, linear Clause, Restrictions]
3553  // A list-item cannot appear in more than one linear clause.
3554  if (LinearArgs.count(CanonPVD) > 0) {
3555  Diag(E->getExprLoc(), diag::err_omp_wrong_dsa)
3556  << getOpenMPClauseName(OMPC_linear)
3557  << getOpenMPClauseName(OMPC_linear) << E->getSourceRange();
3558  Diag(LinearArgs[CanonPVD]->getExprLoc(),
3559  diag::note_omp_explicit_dsa)
3560  << getOpenMPClauseName(OMPC_linear);
3561  continue;
3562  }
3563  // Each argument can appear in at most one uniform or linear clause.
3564  if (UniformedArgs.count(CanonPVD) > 0) {
3565  Diag(E->getExprLoc(), diag::err_omp_wrong_dsa)
3566  << getOpenMPClauseName(OMPC_linear)
3568  Diag(UniformedArgs[CanonPVD]->getExprLoc(),
3569  diag::note_omp_explicit_dsa)
3571  continue;
3572  }
3573  LinearArgs[CanonPVD] = E;
3574  if (E->isValueDependent() || E->isTypeDependent() ||
3575  E->isInstantiationDependent() ||
3577  continue;
3578  (void)CheckOpenMPLinearDecl(CanonPVD, E->getExprLoc(), LinKind,
3579  PVD->getOriginalType());
3580  continue;
3581  }
3582  }
3583  if (isa<CXXThisExpr>(E)) {
3584  if (UniformedLinearThis) {
3585  Diag(E->getExprLoc(), diag::err_omp_wrong_dsa)
3586  << getOpenMPClauseName(OMPC_linear)
3587  << getOpenMPClauseName(IsUniformedThis ? OMPC_uniform : OMPC_linear)
3588  << E->getSourceRange();
3589  Diag(UniformedLinearThis->getExprLoc(), diag::note_omp_explicit_dsa)
3590  << getOpenMPClauseName(IsUniformedThis ? OMPC_uniform
3591  : OMPC_linear);
3592  continue;
3593  }
3594  UniformedLinearThis = E;
3595  if (E->isValueDependent() || E->isTypeDependent() ||
3597  continue;
3598  (void)CheckOpenMPLinearDecl(/*D=*/nullptr, E->getExprLoc(), LinKind,
3599  E->getType());
3600  continue;
3601  }
3602  Diag(E->getExprLoc(), diag::err_omp_param_or_this_in_clause)
3603  << FD->getDeclName() << (isa<CXXMethodDecl>(ADecl) ? 1 : 0);
3604  }
3605  Expr *Step = nullptr;
3606  Expr *NewStep = nullptr;
3607  SmallVector<Expr *, 4> NewSteps;
3608  for (Expr *E : Steps) {
3609  // Skip the same step expression, it was checked already.
3610  if (Step == E || !E) {
3611  NewSteps.push_back(E ? NewStep : nullptr);
3612  continue;
3613  }
3614  Step = E;
3615  if (const auto *DRE = dyn_cast<DeclRefExpr>(Step))
3616  if (const auto *PVD = dyn_cast<ParmVarDecl>(DRE->getDecl())) {
3617  const VarDecl *CanonPVD = PVD->getCanonicalDecl();
3618  if (UniformedArgs.count(CanonPVD) == 0) {
3619  Diag(Step->getExprLoc(), diag::err_omp_expected_uniform_param)
3620  << Step->getSourceRange();
3621  } else if (E->isValueDependent() || E->isTypeDependent() ||
3622  E->isInstantiationDependent() ||
3624  CanonPVD->getType()->hasIntegerRepresentation()) {
3625  NewSteps.push_back(Step);
3626  } else {
3627  Diag(Step->getExprLoc(), diag::err_omp_expected_int_param)
3628  << Step->getSourceRange();
3629  }
3630  continue;
3631  }
3632  NewStep = Step;
3633  if (Step && !Step->isValueDependent() && !Step->isTypeDependent() &&
3634  !Step->isInstantiationDependent() &&
3636  NewStep = PerformOpenMPImplicitIntegerConversion(Step->getExprLoc(), Step)
3637  .get();
3638  if (NewStep)
3639  NewStep = VerifyIntegerConstantExpression(NewStep).get();
3640  }
3641  NewSteps.push_back(NewStep);
3642  }
3643  auto *NewAttr = OMPDeclareSimdDeclAttr::CreateImplicit(
3644  Context, BS, SL.get(), const_cast<Expr **>(Uniforms.data()),
3645  Uniforms.size(), const_cast<Expr **>(Aligneds.data()), Aligneds.size(),
3646  const_cast<Expr **>(NewAligns.data()), NewAligns.size(),
3647  const_cast<Expr **>(Linears.data()), Linears.size(),
3648  const_cast<unsigned *>(LinModifiers.data()), LinModifiers.size(),
3649  NewSteps.data(), NewSteps.size(), SR);
3650  ADecl->addAttr(NewAttr);
3651  return ConvertDeclToDeclGroup(ADecl);
3652 }
3653 
3655  Stmt *AStmt,
3656  SourceLocation StartLoc,
3657  SourceLocation EndLoc) {
3658  if (!AStmt)
3659  return StmtError();
3660 
3661  auto *CS = cast<CapturedStmt>(AStmt);
3662  // 1.2.2 OpenMP Language Terminology
3663  // Structured block - An executable statement with a single entry at the
3664  // top and a single exit at the bottom.
3665  // The point of exit cannot be a branch out of the structured block.
3666  // longjmp() and throw() must not violate the entry/exit criteria.
3667  CS->getCapturedDecl()->setNothrow();
3668 
3669  setFunctionHasBranchProtectedScope();
3670 
3671  return OMPParallelDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt,
3672  DSAStack->isCancelRegion());
3673 }
3674 
3675 namespace {
3676 /// Helper class for checking canonical form of the OpenMP loops and
3677 /// extracting iteration space of each loop in the loop nest, that will be used
3678 /// for IR generation.
3679 class OpenMPIterationSpaceChecker {
3680  /// Reference to Sema.
3681  Sema &SemaRef;
3682  /// A location for diagnostics (when there is no some better location).
3683  SourceLocation DefaultLoc;
3684  /// A location for diagnostics (when increment is not compatible).
3685  SourceLocation ConditionLoc;
3686  /// A source location for referring to loop init later.
3687  SourceRange InitSrcRange;
3688  /// A source location for referring to condition later.
3689  SourceRange ConditionSrcRange;
3690  /// A source location for referring to increment later.
3691  SourceRange IncrementSrcRange;
3692  /// Loop variable.
3693  ValueDecl *LCDecl = nullptr;
3694  /// Reference to loop variable.
3695  Expr *LCRef = nullptr;
3696  /// Lower bound (initializer for the var).
3697  Expr *LB = nullptr;
3698  /// Upper bound.
3699  Expr *UB = nullptr;
3700  /// Loop step (increment).
3701  Expr *Step = nullptr;
3702  /// This flag is true when condition is one of:
3703  /// Var < UB
3704  /// Var <= UB
3705  /// UB > Var
3706  /// UB >= Var
3707  bool TestIsLessOp = false;
3708  /// This flag is true when condition is strict ( < or > ).
3709  bool TestIsStrictOp = false;
3710  /// This flag is true when step is subtracted on each iteration.
3711  bool SubtractStep = false;
3712 
3713 public:
3714  OpenMPIterationSpaceChecker(Sema &SemaRef, SourceLocation DefaultLoc)
3715  : SemaRef(SemaRef), DefaultLoc(DefaultLoc), ConditionLoc(DefaultLoc) {}
3716  /// Check init-expr for canonical loop form and save loop counter
3717  /// variable - #Var and its initialization value - #LB.
3718  bool checkAndSetInit(Stmt *S, bool EmitDiags = true);
3719  /// Check test-expr for canonical form, save upper-bound (#UB), flags
3720  /// for less/greater and for strict/non-strict comparison.
3721  bool checkAndSetCond(Expr *S);
3722  /// Check incr-expr for canonical loop form and return true if it
3723  /// does not conform, otherwise save loop step (#Step).
3724  bool checkAndSetInc(Expr *S);
3725  /// Return the loop counter variable.
3726  ValueDecl *getLoopDecl() const { return LCDecl; }
3727  /// Return the reference expression to loop counter variable.
3728  Expr *getLoopDeclRefExpr() const { return LCRef; }
3729  /// Source range of the loop init.
3730  SourceRange getInitSrcRange() const { return InitSrcRange; }
3731  /// Source range of the loop condition.
3732  SourceRange getConditionSrcRange() const { return ConditionSrcRange; }
3733  /// Source range of the loop increment.
3734  SourceRange getIncrementSrcRange() const { return IncrementSrcRange; }
3735  /// True if the step should be subtracted.
3736  bool shouldSubtractStep() const { return SubtractStep; }
3737  /// Build the expression to calculate the number of iterations.
3738  Expr *buildNumIterations(
3739  Scope *S, const bool LimitedType,
3740  llvm::MapVector<const Expr *, DeclRefExpr *> &Captures) const;
3741  /// Build the precondition expression for the loops.
3742  Expr *
3743  buildPreCond(Scope *S, Expr *Cond,
3744  llvm::MapVector<const Expr *, DeclRefExpr *> &Captures) const;
3745  /// Build reference expression to the counter be used for codegen.
3746  DeclRefExpr *
3747  buildCounterVar(llvm::MapVector<const Expr *, DeclRefExpr *> &Captures,
3748  DSAStackTy &DSA) const;
3749  /// Build reference expression to the private counter be used for
3750  /// codegen.
3751  Expr *buildPrivateCounterVar() const;
3752  /// Build initialization of the counter be used for codegen.
3753  Expr *buildCounterInit() const;
3754  /// Build step of the counter be used for codegen.
3755  Expr *buildCounterStep() const;
3756  /// Build loop data with counter value for depend clauses in ordered
3757  /// directives.
3758  Expr *
3759  buildOrderedLoopData(Scope *S, Expr *Counter,
3760  llvm::MapVector<const Expr *, DeclRefExpr *> &Captures,
3761  SourceLocation Loc, Expr *Inc = nullptr,
3762  OverloadedOperatorKind OOK = OO_Amp);
3763  /// Return true if any expression is dependent.
3764  bool dependent() const;
3765 
3766 private:
3767  /// Check the right-hand side of an assignment in the increment
3768  /// expression.
3769  bool checkAndSetIncRHS(Expr *RHS);
3770  /// Helper to set loop counter variable and its initializer.
3771  bool setLCDeclAndLB(ValueDecl *NewLCDecl, Expr *NewDeclRefExpr, Expr *NewLB);
3772  /// Helper to set upper bound.
3773  bool setUB(Expr *NewUB, bool LessOp, bool StrictOp, SourceRange SR,
3774  SourceLocation SL);
3775  /// Helper to set loop increment.
3776  bool setStep(Expr *NewStep, bool Subtract);
3777 };
3778 
3779 bool OpenMPIterationSpaceChecker::dependent() const {
3780  if (!LCDecl) {
3781  assert(!LB && !UB && !Step);
3782  return false;
3783  }
3784  return LCDecl->getType()->isDependentType() ||
3785  (LB && LB->isValueDependent()) || (UB && UB->isValueDependent()) ||
3786  (Step && Step->isValueDependent());
3787 }
3788 
3789 bool OpenMPIterationSpaceChecker::setLCDeclAndLB(ValueDecl *NewLCDecl,
3790  Expr *NewLCRefExpr,
3791  Expr *NewLB) {
3792  // State consistency checking to ensure correct usage.
3793  assert(LCDecl == nullptr && LB == nullptr && LCRef == nullptr &&
3794  UB == nullptr && Step == nullptr && !TestIsLessOp && !TestIsStrictOp);
3795  if (!NewLCDecl || !NewLB)
3796  return true;
3797  LCDecl = getCanonicalDecl(NewLCDecl);
3798  LCRef = NewLCRefExpr;
3799  if (auto *CE = dyn_cast_or_null<CXXConstructExpr>(NewLB))
3800  if (const CXXConstructorDecl *Ctor = CE->getConstructor())
3801  if ((Ctor->isCopyOrMoveConstructor() ||
3802  Ctor->isConvertingConstructor(/*AllowExplicit=*/false)) &&
3803  CE->getNumArgs() > 0 && CE->getArg(0) != nullptr)
3804  NewLB = CE->getArg(0)->IgnoreParenImpCasts();
3805  LB = NewLB;
3806  return false;
3807 }
3808 
3809 bool OpenMPIterationSpaceChecker::setUB(Expr *NewUB, bool LessOp, bool StrictOp,
3810  SourceRange SR, SourceLocation SL) {
3811  // State consistency checking to ensure correct usage.
3812  assert(LCDecl != nullptr && LB != nullptr && UB == nullptr &&
3813  Step == nullptr && !TestIsLessOp && !TestIsStrictOp);
3814  if (!NewUB)
3815  return true;
3816  UB = NewUB;
3817  TestIsLessOp = LessOp;
3818  TestIsStrictOp = StrictOp;
3819  ConditionSrcRange = SR;
3820  ConditionLoc = SL;
3821  return false;
3822 }
3823 
3824 bool OpenMPIterationSpaceChecker::setStep(Expr *NewStep, bool Subtract) {
3825  // State consistency checking to ensure correct usage.
3826  assert(LCDecl != nullptr && LB != nullptr && Step == nullptr);
3827  if (!NewStep)
3828  return true;
3829  if (!NewStep->isValueDependent()) {
3830  // Check that the step is integer expression.
3831  SourceLocation StepLoc = NewStep->getBeginLoc();
3833  StepLoc, getExprAsWritten(NewStep));
3834  if (Val.isInvalid())
3835  return true;
3836  NewStep = Val.get();
3837 
3838  // OpenMP [2.6, Canonical Loop Form, Restrictions]
3839  // If test-expr is of form var relational-op b and relational-op is < or
3840  // <= then incr-expr must cause var to increase on each iteration of the
3841  // loop. If test-expr is of form var relational-op b and relational-op is
3842  // > or >= then incr-expr must cause var to decrease on each iteration of
3843  // the loop.
3844  // If test-expr is of form b relational-op var and relational-op is < or
3845  // <= then incr-expr must cause var to decrease on each iteration of the
3846  // loop. If test-expr is of form b relational-op var and relational-op is
3847  // > or >= then incr-expr must cause var to increase on each iteration of
3848  // the loop.
3849  llvm::APSInt Result;
3850  bool IsConstant = NewStep->isIntegerConstantExpr(Result, SemaRef.Context);
3851  bool IsUnsigned = !NewStep->getType()->hasSignedIntegerRepresentation();
3852  bool IsConstNeg =
3853  IsConstant && Result.isSigned() && (Subtract != Result.isNegative());
3854  bool IsConstPos =
3855  IsConstant && Result.isSigned() && (Subtract == Result.isNegative());
3856  bool IsConstZero = IsConstant && !Result.getBoolValue();
3857  if (UB && (IsConstZero ||
3858  (TestIsLessOp ? (IsConstNeg || (IsUnsigned && Subtract))
3859  : (IsConstPos || (IsUnsigned && !Subtract))))) {
3860  SemaRef.Diag(NewStep->getExprLoc(),
3861  diag::err_omp_loop_incr_not_compatible)
3862  << LCDecl << TestIsLessOp << NewStep->getSourceRange();
3863  SemaRef.Diag(ConditionLoc,
3864  diag::note_omp_loop_cond_requres_compatible_incr)
3865  << TestIsLessOp << ConditionSrcRange;
3866  return true;
3867  }
3868  if (TestIsLessOp == Subtract) {
3869  NewStep =
3870  SemaRef.CreateBuiltinUnaryOp(NewStep->getExprLoc(), UO_Minus, NewStep)
3871  .get();
3872  Subtract = !Subtract;
3873  }
3874  }
3875 
3876  Step = NewStep;
3877  SubtractStep = Subtract;
3878  return false;
3879 }
3880 
3881 bool OpenMPIterationSpaceChecker::checkAndSetInit(Stmt *S, bool EmitDiags) {
3882  // Check init-expr for canonical loop form and save loop counter
3883  // variable - #Var and its initialization value - #LB.
3884  // OpenMP [2.6] Canonical loop form. init-expr may be one of the following:
3885  // var = lb
3886  // integer-type var = lb
3887  // random-access-iterator-type var = lb
3888  // pointer-type var = lb
3889  //
3890  if (!S) {
3891  if (EmitDiags) {
3892  SemaRef.Diag(DefaultLoc, diag::err_omp_loop_not_canonical_init);
3893  }
3894  return true;
3895  }
3896  if (auto *ExprTemp = dyn_cast<ExprWithCleanups>(S))
3897  if (!ExprTemp->cleanupsHaveSideEffects())
3898  S = ExprTemp->getSubExpr();
3899 
3900  InitSrcRange = S->getSourceRange();
3901  if (Expr *E = dyn_cast<Expr>(S))
3902  S = E->IgnoreParens();
3903  if (auto *BO = dyn_cast<BinaryOperator>(S)) {
3904  if (BO->getOpcode() == BO_Assign) {
3905  Expr *LHS = BO->getLHS()->IgnoreParens();
3906  if (auto *DRE = dyn_cast<DeclRefExpr>(LHS)) {
3907  if (auto *CED = dyn_cast<OMPCapturedExprDecl>(DRE->getDecl()))
3908  if (auto *ME = dyn_cast<MemberExpr>(getExprAsWritten(CED->getInit())))
3909  return setLCDeclAndLB(ME->getMemberDecl(), ME, BO->getRHS());
3910  return setLCDeclAndLB(DRE->getDecl(), DRE, BO->getRHS());
3911  }
3912  if (auto *ME = dyn_cast<MemberExpr>(LHS)) {
3913  if (ME->isArrow() &&
3914  isa<CXXThisExpr>(ME->getBase()->IgnoreParenImpCasts()))
3915  return setLCDeclAndLB(ME->getMemberDecl(), ME, BO->getRHS());
3916  }
3917  }
3918  } else if (auto *DS = dyn_cast<DeclStmt>(S)) {
3919  if (DS->isSingleDecl()) {
3920  if (auto *Var = dyn_cast_or_null<VarDecl>(DS->getSingleDecl())) {
3921  if (Var->hasInit() && !Var->getType()->isReferenceType()) {
3922  // Accept non-canonical init form here but emit ext. warning.
3923  if (Var->getInitStyle() != VarDecl::CInit && EmitDiags)
3924  SemaRef.Diag(S->getBeginLoc(),
3925  diag::ext_omp_loop_not_canonical_init)
3926  << S->getSourceRange();
3927  return setLCDeclAndLB(
3928  Var,
3929  buildDeclRefExpr(SemaRef, Var,
3930  Var->getType().getNonReferenceType(),
3931  DS->getBeginLoc()),
3932  Var->getInit());
3933  }
3934  }
3935  }
3936  } else if (auto *CE = dyn_cast<CXXOperatorCallExpr>(S)) {
3937  if (CE->getOperator() == OO_Equal) {
3938  Expr *LHS = CE->getArg(0);
3939  if (auto *DRE = dyn_cast<DeclRefExpr>(LHS)) {
3940  if (auto *CED = dyn_cast<OMPCapturedExprDecl>(DRE->getDecl()))
3941  if (auto *ME = dyn_cast<MemberExpr>(getExprAsWritten(CED->getInit())))
3942  return setLCDeclAndLB(ME->getMemberDecl(), ME, BO->getRHS());
3943  return setLCDeclAndLB(DRE->getDecl(), DRE, CE->getArg(1));
3944  }
3945  if (auto *ME = dyn_cast<MemberExpr>(LHS)) {
3946  if (ME->isArrow() &&
3947  isa<CXXThisExpr>(ME->getBase()->IgnoreParenImpCasts()))
3948  return setLCDeclAndLB(ME->getMemberDecl(), ME, BO->getRHS());
3949  }
3950  }
3951  }
3952 
3953  if (dependent() || SemaRef.CurContext->isDependentContext())
3954  return false;
3955  if (EmitDiags) {
3956  SemaRef.Diag(S->getBeginLoc(), diag::err_omp_loop_not_canonical_init)
3957  << S->getSourceRange();
3958  }
3959  return true;
3960 }
3961 
3962 /// Ignore parenthesizes, implicit casts, copy constructor and return the
3963 /// variable (which may be the loop variable) if possible.
3964 static const ValueDecl *getInitLCDecl(const Expr *E) {
3965  if (!E)
3966  return nullptr;
3967  E = getExprAsWritten(E);
3968  if (const auto *CE = dyn_cast_or_null<CXXConstructExpr>(E))
3969  if (const CXXConstructorDecl *Ctor = CE->getConstructor())
3970  if ((Ctor->isCopyOrMoveConstructor() ||
3971  Ctor->isConvertingConstructor(/*AllowExplicit=*/false)) &&
3972  CE->getNumArgs() > 0 && CE->getArg(0) != nullptr)
3973  E = CE->getArg(0)->IgnoreParenImpCasts();
3974  if (const auto *DRE = dyn_cast_or_null<DeclRefExpr>(E)) {
3975  if (const auto *VD = dyn_cast<VarDecl>(DRE->getDecl()))
3976  return getCanonicalDecl(VD);
3977  }
3978  if (const auto *ME = dyn_cast_or_null<MemberExpr>(E))
3979  if (ME->isArrow() && isa<CXXThisExpr>(ME->getBase()->IgnoreParenImpCasts()))
3980  return getCanonicalDecl(ME->getMemberDecl());
3981  return nullptr;
3982 }
3983 
3984 bool OpenMPIterationSpaceChecker::checkAndSetCond(Expr *S) {
3985  // Check test-expr for canonical form, save upper-bound UB, flags for
3986  // less/greater and for strict/non-strict comparison.
3987  // OpenMP [2.6] Canonical loop form. Test-expr may be one of the following:
3988  // var relational-op b
3989  // b relational-op var
3990  //
3991  if (!S) {
3992  SemaRef.Diag(DefaultLoc, diag::err_omp_loop_not_canonical_cond) << LCDecl;
3993  return true;
3994  }
3995  S = getExprAsWritten(S);
3996  SourceLocation CondLoc = S->getBeginLoc();
3997  if (auto *BO = dyn_cast<BinaryOperator>(S)) {
3998  if (BO->isRelationalOp()) {
3999  if (getInitLCDecl(BO->getLHS()) == LCDecl)
4000  return setUB(BO->getRHS(),
4001  (BO->getOpcode() == BO_LT || BO->getOpcode() == BO_LE),
4002  (BO->getOpcode() == BO_LT || BO->getOpcode() == BO_GT),
4003  BO->getSourceRange(), BO->getOperatorLoc());
4004  if (getInitLCDecl(BO->getRHS()) == LCDecl)
4005  return setUB(BO->getLHS(),
4006  (BO->getOpcode() == BO_GT || BO->getOpcode() == BO_GE),
4007  (BO->getOpcode() == BO_LT || BO->getOpcode() == BO_GT),
4008  BO->getSourceRange(), BO->getOperatorLoc());
4009  }
4010  } else if (auto *CE = dyn_cast<CXXOperatorCallExpr>(S)) {
4011  if (CE->getNumArgs() == 2) {
4012  auto Op = CE->getOperator();
4013  switch (Op) {
4014  case OO_Greater:
4015  case OO_GreaterEqual:
4016  case OO_Less:
4017  case OO_LessEqual:
4018  if (getInitLCDecl(CE->getArg(0)) == LCDecl)
4019  return setUB(CE->getArg(1), Op == OO_Less || Op == OO_LessEqual,
4020  Op == OO_Less || Op == OO_Greater, CE->getSourceRange(),
4021  CE->getOperatorLoc());
4022  if (getInitLCDecl(CE->getArg(1)) == LCDecl)
4023  return setUB(CE->getArg(0), Op == OO_Greater || Op == OO_GreaterEqual,
4024  Op == OO_Less || Op == OO_Greater, CE->getSourceRange(),
4025  CE->getOperatorLoc());
4026  break;
4027  default:
4028  break;
4029  }
4030  }
4031  }
4032  if (dependent() || SemaRef.CurContext->isDependentContext())
4033  return false;
4034  SemaRef.Diag(CondLoc, diag::err_omp_loop_not_canonical_cond)
4035  << S->getSourceRange() << LCDecl;
4036  return true;
4037 }
4038 
4039 bool OpenMPIterationSpaceChecker::checkAndSetIncRHS(Expr *RHS) {
4040  // RHS of canonical loop form increment can be:
4041  // var + incr
4042  // incr + var
4043  // var - incr
4044  //
4045  RHS = RHS->IgnoreParenImpCasts();
4046  if (auto *BO = dyn_cast<BinaryOperator>(RHS)) {
4047  if (BO->isAdditiveOp()) {
4048  bool IsAdd = BO->getOpcode() == BO_Add;
4049  if (getInitLCDecl(BO->getLHS()) == LCDecl)
4050  return setStep(BO->getRHS(), !IsAdd);
4051  if (IsAdd && getInitLCDecl(BO->getRHS()) == LCDecl)
4052  return setStep(BO->getLHS(), /*Subtract=*/false);
4053  }
4054  } else if (auto *CE = dyn_cast<CXXOperatorCallExpr>(RHS)) {
4055  bool IsAdd = CE->getOperator() == OO_Plus;
4056  if ((IsAdd || CE->getOperator() == OO_Minus) && CE->getNumArgs() == 2) {
4057  if (getInitLCDecl(CE->getArg(0)) == LCDecl)
4058  return setStep(CE->getArg(1), !IsAdd);
4059  if (IsAdd && getInitLCDecl(CE->getArg(1)) == LCDecl)
4060  return setStep(CE->getArg(0), /*Subtract=*/false);
4061  }
4062  }
4063  if (dependent() || SemaRef.CurContext->isDependentContext())
4064  return false;
4065  SemaRef.Diag(RHS->getBeginLoc(), diag::err_omp_loop_not_canonical_incr)
4066  << RHS->getSourceRange() << LCDecl;
4067  return true;
4068 }
4069 
4070 bool OpenMPIterationSpaceChecker::checkAndSetInc(Expr *S) {
4071  // Check incr-expr for canonical loop form and return true if it
4072  // does not conform.
4073  // OpenMP [2.6] Canonical loop form. Test-expr may be one of the following:
4074  // ++var
4075  // var++
4076  // --var
4077  // var--
4078  // var += incr
4079  // var -= incr
4080  // var = var + incr
4081  // var = incr + var
4082  // var = var - incr
4083  //
4084  if (!S) {
4085  SemaRef.Diag(DefaultLoc, diag::err_omp_loop_not_canonical_incr) << LCDecl;
4086  return true;
4087  }
4088  if (auto *ExprTemp = dyn_cast<ExprWithCleanups>(S))
4089  if (!ExprTemp->cleanupsHaveSideEffects())
4090  S = ExprTemp->getSubExpr();
4091 
4092  IncrementSrcRange = S->getSourceRange();
4093  S = S->IgnoreParens();
4094  if (auto *UO = dyn_cast<UnaryOperator>(S)) {
4095  if (UO->isIncrementDecrementOp() &&
4096  getInitLCDecl(UO->getSubExpr()) == LCDecl)
4097  return setStep(SemaRef
4098  .ActOnIntegerConstant(UO->getBeginLoc(),
4099  (UO->isDecrementOp() ? -1 : 1))
4100  .get(),
4101  /*Subtract=*/false);
4102  } else if (auto *BO = dyn_cast<BinaryOperator>(S)) {
4103  switch (BO->getOpcode()) {
4104  case BO_AddAssign:
4105  case BO_SubAssign:
4106  if (getInitLCDecl(BO->getLHS()) == LCDecl)
4107  return setStep(BO->getRHS(), BO->getOpcode() == BO_SubAssign);
4108  break;
4109  case BO_Assign:
4110  if (getInitLCDecl(BO->getLHS()) == LCDecl)
4111  return checkAndSetIncRHS(BO->getRHS());
4112  break;
4113  default:
4114  break;
4115  }
4116  } else if (auto *CE = dyn_cast<CXXOperatorCallExpr>(S)) {
4117  switch (CE->getOperator()) {
4118  case OO_PlusPlus:
4119  case OO_MinusMinus:
4120  if (getInitLCDecl(CE->getArg(0)) == LCDecl)
4121  return setStep(SemaRef
4122  .ActOnIntegerConstant(
4123  CE->getBeginLoc(),
4124  ((CE->getOperator() == OO_MinusMinus) ? -1 : 1))
4125  .get(),
4126  /*Subtract=*/false);
4127  break;
4128  case OO_PlusEqual:
4129  case OO_MinusEqual:
4130  if (getInitLCDecl(CE->getArg(0)) == LCDecl)
4131  return setStep(CE->getArg(1), CE->getOperator() == OO_MinusEqual);
4132  break;
4133  case OO_Equal:
4134  if (getInitLCDecl(CE->getArg(0)) == LCDecl)
4135  return checkAndSetIncRHS(CE->getArg(1));
4136  break;
4137  default:
4138  break;
4139  }
4140  }
4141  if (dependent() || SemaRef.CurContext->isDependentContext())
4142  return false;
4143  SemaRef.Diag(S->getBeginLoc(), diag::err_omp_loop_not_canonical_incr)
4144  << S->getSourceRange() << LCDecl;
4145  return true;
4146 }
4147 
4148 static ExprResult
4149 tryBuildCapture(Sema &SemaRef, Expr *Capture,
4150  llvm::MapVector<const Expr *, DeclRefExpr *> &Captures) {
4151  if (SemaRef.CurContext->isDependentContext())
4152  return ExprResult(Capture);
4153  if (Capture->isEvaluatable(SemaRef.Context, Expr::SE_AllowSideEffects))
4154  return SemaRef.PerformImplicitConversion(
4155  Capture->IgnoreImpCasts(), Capture->getType(), Sema::AA_Converting,
4156  /*AllowExplicit=*/true);
4157  auto I = Captures.find(Capture);
4158  if (I != Captures.end())
4159  return buildCapture(SemaRef, Capture, I->second);
4160  DeclRefExpr *Ref = nullptr;
4161  ExprResult Res = buildCapture(SemaRef, Capture, Ref);
4162  Captures[Capture] = Ref;
4163  return Res;
4164 }
4165 
4166 /// Build the expression to calculate the number of iterations.
4167 Expr *OpenMPIterationSpaceChecker::buildNumIterations(
4168  Scope *S, const bool LimitedType,
4169  llvm::MapVector<const Expr *, DeclRefExpr *> &Captures) const {
4170  ExprResult Diff;
4171  QualType VarType = LCDecl->getType().getNonReferenceType();
4172  if (VarType->isIntegerType() || VarType->isPointerType() ||
4173  SemaRef.getLangOpts().CPlusPlus) {
4174  // Upper - Lower
4175  Expr *UBExpr = TestIsLessOp ? UB : LB;
4176  Expr *LBExpr = TestIsLessOp ? LB : UB;
4177  Expr *Upper = tryBuildCapture(SemaRef, UBExpr, Captures).get();
4178  Expr *Lower = tryBuildCapture(SemaRef, LBExpr, Captures).get();
4179  if (!Upper || !Lower)
4180  return nullptr;
4181 
4182  Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Sub, Upper, Lower);
4183 
4184  if (!Diff.isUsable() && VarType->getAsCXXRecordDecl()) {
4185  // BuildBinOp already emitted error, this one is to point user to upper
4186  // and lower bound, and to tell what is passed to 'operator-'.
4187  SemaRef.Diag(Upper->getBeginLoc(), diag::err_omp_loop_diff_cxx)
4188  << Upper->getSourceRange() << Lower->getSourceRange();
4189  return nullptr;
4190  }
4191  }
4192 
4193  if (!Diff.isUsable())
4194  return nullptr;
4195 
4196  // Upper - Lower [- 1]
4197  if (TestIsStrictOp)
4198  Diff = SemaRef.BuildBinOp(
4199  S, DefaultLoc, BO_Sub, Diff.get(),
4200  SemaRef.ActOnIntegerConstant(SourceLocation(), 1).get());
4201  if (!Diff.isUsable())
4202  return nullptr;
4203 
4204  // Upper - Lower [- 1] + Step
4205  ExprResult NewStep = tryBuildCapture(SemaRef, Step, Captures);
4206  if (!NewStep.isUsable())
4207  return nullptr;
4208  Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Add, Diff.get(), NewStep.get());
4209  if (!Diff.isUsable())
4210  return nullptr;
4211 
4212  // Parentheses (for dumping/debugging purposes only).
4213  Diff = SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, Diff.get());
4214  if (!Diff.isUsable())
4215  return nullptr;
4216 
4217  // (Upper - Lower [- 1] + Step) / Step
4218  Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Div, Diff.get(), NewStep.get());
4219  if (!Diff.isUsable())
4220  return nullptr;
4221 
4222  // OpenMP runtime requires 32-bit or 64-bit loop variables.
4223  QualType Type = Diff.get()->getType();
4224  ASTContext &C = SemaRef.Context;
4225  bool UseVarType = VarType->hasIntegerRepresentation() &&
4226  C.getTypeSize(Type) > C.getTypeSize(VarType);
4227  if (!Type->isIntegerType() || UseVarType) {
4228  unsigned NewSize =
4229  UseVarType ? C.getTypeSize(VarType) : C.getTypeSize(Type);
4230  bool IsSigned = UseVarType ? VarType->hasSignedIntegerRepresentation()
4232  Type = C.getIntTypeForBitwidth(NewSize, IsSigned);
4233  if (!SemaRef.Context.hasSameType(Diff.get()->getType(), Type)) {
4234  Diff = SemaRef.PerformImplicitConversion(
4235  Diff.get(), Type, Sema::AA_Converting, /*AllowExplicit=*/true);
4236  if (!Diff.isUsable())
4237  return nullptr;
4238  }
4239  }
4240  if (LimitedType) {
4241  unsigned NewSize = (C.getTypeSize(Type) > 32) ? 64 : 32;
4242  if (NewSize != C.getTypeSize(Type)) {
4243  if (NewSize < C.getTypeSize(Type)) {
4244  assert(NewSize == 64 && "incorrect loop var size");
4245  SemaRef.Diag(DefaultLoc, diag::warn_omp_loop_64_bit_var)
4246  << InitSrcRange << ConditionSrcRange;
4247  }
4248  QualType NewType = C.getIntTypeForBitwidth(
4249  NewSize, Type->hasSignedIntegerRepresentation() ||
4250  C.getTypeSize(Type) < NewSize);
4251  if (!SemaRef.Context.hasSameType(Diff.get()->getType(), NewType)) {
4252  Diff = SemaRef.PerformImplicitConversion(Diff.get(), NewType,
4253  Sema::AA_Converting, true);
4254  if (!Diff.isUsable())
4255  return nullptr;
4256  }
4257  }
4258  }
4259 
4260  return Diff.get();
4261 }
4262 
4263 Expr *OpenMPIterationSpaceChecker::buildPreCond(
4264  Scope *S, Expr *Cond,
4265  llvm::MapVector<const Expr *, DeclRefExpr *> &Captures) const {
4266  // Try to build LB <op> UB, where <op> is <, >, <=, or >=.
4267  bool Suppress = SemaRef.getDiagnostics().getSuppressAllDiagnostics();
4268  SemaRef.getDiagnostics().setSuppressAllDiagnostics(/*Val=*/true);
4269 
4270  ExprResult NewLB = tryBuildCapture(SemaRef, LB, Captures);
4271  ExprResult NewUB = tryBuildCapture(SemaRef, UB, Captures);
4272  if (!NewLB.isUsable() || !NewUB.isUsable())
4273  return nullptr;
4274 
4275  ExprResult CondExpr =
4276  SemaRef.BuildBinOp(S, DefaultLoc,
4277  TestIsLessOp ? (TestIsStrictOp ? BO_LT : BO_LE)
4278  : (TestIsStrictOp ? BO_GT : BO_GE),
4279  NewLB.get(), NewUB.get());
4280  if (CondExpr.isUsable()) {
4281  if (!SemaRef.Context.hasSameUnqualifiedType(CondExpr.get()->getType(),
4282  SemaRef.Context.BoolTy))
4283  CondExpr = SemaRef.PerformImplicitConversion(
4284  CondExpr.get(), SemaRef.Context.BoolTy, /*Action=*/Sema::AA_Casting,
4285  /*AllowExplicit=*/true);
4286  }
4287  SemaRef.getDiagnostics().setSuppressAllDiagnostics(Suppress);
4288  // Otherwise use original loop conditon and evaluate it in runtime.
4289  return CondExpr.isUsable() ? CondExpr.get() : Cond;
4290 }
4291 
4292 /// Build reference expression to the counter be used for codegen.
4293 DeclRefExpr *OpenMPIterationSpaceChecker::buildCounterVar(
4294  llvm::MapVector<const Expr *, DeclRefExpr *> &Captures,
4295  DSAStackTy &DSA) const {
4296  auto *VD = dyn_cast<VarDecl>(LCDecl);
4297  if (!VD) {
4298  VD = SemaRef.isOpenMPCapturedDecl(LCDecl);
4300  SemaRef, VD, VD->getType().getNonReferenceType(), DefaultLoc);
4301  const DSAStackTy::DSAVarData Data =
4302  DSA.getTopDSA(LCDecl, /*FromParent=*/false);
4303  // If the loop control decl is explicitly marked as private, do not mark it
4304  // as captured again.
4305  if (!isOpenMPPrivate(Data.CKind) || !Data.RefExpr)
4306  Captures.insert(std::make_pair(LCRef, Ref));
4307  return Ref;
4308  }
4309  return buildDeclRefExpr(SemaRef, VD, VD->getType().getNonReferenceType(),
4310  DefaultLoc);
4311 }
4312 
4313 Expr *OpenMPIterationSpaceChecker::buildPrivateCounterVar() const {
4314  if (LCDecl && !LCDecl->isInvalidDecl()) {
4315  QualType Type = LCDecl->getType().getNonReferenceType();
4316  VarDecl *PrivateVar = buildVarDecl(
4317  SemaRef, DefaultLoc, Type, LCDecl->getName(),
4318  LCDecl->hasAttrs() ? &LCDecl->getAttrs() : nullptr,
4319  isa<VarDecl>(LCDecl)
4320  ? buildDeclRefExpr(SemaRef, cast<VarDecl>(LCDecl), Type, DefaultLoc)
4321  : nullptr);
4322  if (PrivateVar->isInvalidDecl())
4323  return nullptr;
4324  return buildDeclRefExpr(SemaRef, PrivateVar, Type, DefaultLoc);
4325  }
4326  return nullptr;
4327 }
4328 
4329 /// Build initialization of the counter to be used for codegen.
4331 
4332 /// Build step of the counter be used for codegen.
4333 Expr *OpenMPIterationSpaceChecker::buildCounterStep() const { return Step; }
4334 
4335 Expr *OpenMPIterationSpaceChecker::buildOrderedLoopData(
4336  Scope *S, Expr *Counter,
4337  llvm::MapVector<const Expr *, DeclRefExpr *> &Captures, SourceLocation Loc,
4338  Expr *Inc, OverloadedOperatorKind OOK) {
4339  Expr *Cnt = SemaRef.DefaultLvalueConversion(Counter).get();
4340  if (!Cnt)
4341  return nullptr;
4342  if (Inc) {
4343  assert((OOK == OO_Plus || OOK == OO_Minus) &&
4344  "Expected only + or - operations for depend clauses.");
4345  BinaryOperatorKind BOK = (OOK == OO_Plus) ? BO_Add : BO_Sub;
4346  Cnt = SemaRef.BuildBinOp(S, Loc, BOK, Cnt, Inc).get();
4347  if (!Cnt)
4348  return nullptr;
4349  }
4350  ExprResult Diff;
4351  QualType VarType = LCDecl->getType().getNonReferenceType();
4352  if (VarType->isIntegerType() || VarType->isPointerType() ||
4353  SemaRef.getLangOpts().CPlusPlus) {
4354  // Upper - Lower
4355  Expr *Upper =
4356  TestIsLessOp ? Cnt : tryBuildCapture(SemaRef, UB, Captures).get();
4357  Expr *Lower =
4358  TestIsLessOp ? tryBuildCapture(SemaRef, LB, Captures).get() : Cnt;
4359  if (!Upper || !Lower)
4360  return nullptr;
4361 
4362  Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Sub, Upper, Lower);
4363 
4364  if (!Diff.isUsable() && VarType->getAsCXXRecordDecl()) {
4365  // BuildBinOp already emitted error, this one is to point user to upper
4366  // and lower bound, and to tell what is passed to 'operator-'.
4367  SemaRef.Diag(Upper->getBeginLoc(), diag::err_omp_loop_diff_cxx)
4368  << Upper->getSourceRange() << Lower->getSourceRange();
4369  return nullptr;
4370  }
4371  }
4372 
4373  if (!Diff.isUsable())
4374  return nullptr;
4375 
4376  // Parentheses (for dumping/debugging purposes only).
4377  Diff = SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, Diff.get());
4378  if (!Diff.isUsable())
4379  return nullptr;
4380 
4381  ExprResult NewStep = tryBuildCapture(SemaRef, Step, Captures);
4382  if (!NewStep.isUsable())
4383  return nullptr;
4384  // (Upper - Lower) / Step
4385  Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Div, Diff.get(), NewStep.get());
4386  if (!Diff.isUsable())
4387  return nullptr;
4388 
4389  return Diff.get();
4390 }
4391 
4392 /// Iteration space of a single for loop.
4393 struct LoopIterationSpace final {
4394  /// Condition of the loop.
4395  Expr *PreCond = nullptr;
4396  /// This expression calculates the number of iterations in the loop.
4397  /// It is always possible to calculate it before starting the loop.
4398  Expr *NumIterations = nullptr;
4399  /// The loop counter variable.
4400  Expr *CounterVar = nullptr;
4401  /// Private loop counter variable.
4402  Expr *PrivateCounterVar = nullptr;
4403  /// This is initializer for the initial value of #CounterVar.
4404  Expr *CounterInit = nullptr;
4405  /// This is step for the #CounterVar used to generate its update:
4406  /// #CounterVar = #CounterInit + #CounterStep * CurrentIteration.
4407  Expr *CounterStep = nullptr;
4408  /// Should step be subtracted?
4409  bool Subtract = false;
4410  /// Source range of the loop init.
4411  SourceRange InitSrcRange;
4412  /// Source range of the loop condition.
4413  SourceRange CondSrcRange;
4414  /// Source range of the loop increment.
4415  SourceRange IncSrcRange;
4416 };
4417 
4418 } // namespace
4419 
4421  assert(getLangOpts().OpenMP && "OpenMP is not active.");
4422  assert(Init && "Expected loop in canonical form.");
4423  unsigned AssociatedLoops = DSAStack->getAssociatedLoops();
4424  if (AssociatedLoops > 0 &&
4425  isOpenMPLoopDirective(DSAStack->getCurrentDirective())) {
4426  OpenMPIterationSpaceChecker ISC(*this, ForLoc);
4427  if (!ISC.checkAndSetInit(Init, /*EmitDiags=*/false)) {
4428  if (ValueDecl *D = ISC.getLoopDecl()) {
4429  auto *VD = dyn_cast<VarDecl>(D);
4430  if (!VD) {
4431  if (VarDecl *Private = isOpenMPCapturedDecl(D)) {
4432  VD = Private;
4433  } else {
4434  DeclRefExpr *Ref = buildCapture(*this, D, ISC.getLoopDeclRefExpr(),
4435  /*WithInit=*/false);
4436  VD = cast<VarDecl>(Ref->getDecl());
4437  }
4438  }
4439  DSAStack->addLoopControlVariable(D, VD);
4440  }
4441  }
4442  DSAStack->setAssociatedLoops(AssociatedLoops - 1);
4443  }
4444 }
4445 
4446 /// Called on a for stmt to check and extract its iteration space
4447 /// for further processing (such as collapsing).
4449  OpenMPDirectiveKind DKind, Stmt *S, Sema &SemaRef, DSAStackTy &DSA,
4450  unsigned CurrentNestedLoopCount, unsigned NestedLoopCount,
4451  unsigned TotalNestedLoopCount, Expr *CollapseLoopCountExpr,
4452  Expr *OrderedLoopCountExpr,
4453  Sema::VarsWithInheritedDSAType &VarsWithImplicitDSA,
4454  LoopIterationSpace &ResultIterSpace,
4455  llvm::MapVector<const Expr *, DeclRefExpr *> &Captures) {
4456  // OpenMP [2.6, Canonical Loop Form]
4457  // for (init-expr; test-expr; incr-expr) structured-block
4458  auto *For = dyn_cast_or_null<ForStmt>(S);
4459  if (!For) {
4460  SemaRef.Diag(S->getBeginLoc(), diag::err_omp_not_for)
4461  << (CollapseLoopCountExpr != nullptr || OrderedLoopCountExpr != nullptr)
4462  << getOpenMPDirectiveName(DKind) << TotalNestedLoopCount
4463  << (CurrentNestedLoopCount > 0) << CurrentNestedLoopCount;
4464  if (TotalNestedLoopCount > 1) {
4465  if (CollapseLoopCountExpr && OrderedLoopCountExpr)
4466  SemaRef.Diag(DSA.getConstructLoc(),
4467  diag::note_omp_collapse_ordered_expr)
4468  << 2 << CollapseLoopCountExpr->getSourceRange()
4469  << OrderedLoopCountExpr->getSourceRange();
4470  else if (CollapseLoopCountExpr)
4471  SemaRef.Diag(CollapseLoopCountExpr->getExprLoc(),
4472  diag::note_omp_collapse_ordered_expr)
4473  << 0 << CollapseLoopCountExpr->getSourceRange();
4474  else
4475  SemaRef.Diag(OrderedLoopCountExpr->getExprLoc(),
4476  diag::note_omp_collapse_ordered_expr)
4477  << 1 << OrderedLoopCountExpr->getSourceRange();
4478  }
4479  return true;
4480  }
4481  assert(For->getBody());
4482 
4483  OpenMPIterationSpaceChecker ISC(SemaRef, For->getForLoc());
4484 
4485  // Check init.
4486  Stmt *Init = For->getInit();
4487  if (ISC.checkAndSetInit(Init))
4488  return true;
4489 
4490  bool HasErrors = false;
4491 
4492  // Check loop variable's type.
4493  if (ValueDecl *LCDecl = ISC.getLoopDecl()) {
4494  Expr *LoopDeclRefExpr = ISC.getLoopDeclRefExpr();
4495 
4496  // OpenMP [2.6, Canonical Loop Form]
4497  // Var is one of the following:
4498  // A variable of signed or unsigned integer type.
4499  // For C++, a variable of a random access iterator type.
4500  // For C, a variable of a pointer type.
4501  QualType VarType = LCDecl->getType().getNonReferenceType();
4502  if (!VarType->isDependentType() && !VarType->isIntegerType() &&
4503  !VarType->isPointerType() &&
4504  !(SemaRef.getLangOpts().CPlusPlus && VarType->isOverloadableType())) {
4505  SemaRef.Diag(Init->getBeginLoc(), diag::err_omp_loop_variable_type)
4506  << SemaRef.getLangOpts().CPlusPlus;
4507  HasErrors = true;
4508  }
4509 
4510  // OpenMP, 2.14.1.1 Data-sharing Attribute Rules for Variables Referenced in
4511  // a Construct
4512  // The loop iteration variable(s) in the associated for-loop(s) of a for or
4513  // parallel for construct is (are) private.
4514  // The loop iteration variable in the associated for-loop of a simd
4515  // construct with just one associated for-loop is linear with a
4516  // constant-linear-step that is the increment of the associated for-loop.
4517  // Exclude loop var from the list of variables with implicitly defined data
4518  // sharing attributes.
4519  VarsWithImplicitDSA.erase(LCDecl);
4520 
4521  // OpenMP [2.14.1.1, Data-sharing Attribute Rules for Variables Referenced
4522  // in a Construct, C/C++].
4523  // The loop iteration variable in the associated for-loop of a simd
4524  // construct with just one associated for-loop may be listed in a linear
4525  // clause with a constant-linear-step that is the increment of the
4526  // associated for-loop.
4527  // The loop iteration variable(s) in the associated for-loop(s) of a for or
4528  // parallel for construct may be listed in a private or lastprivate clause.
4529  DSAStackTy::DSAVarData DVar = DSA.getTopDSA(LCDecl, false);
4530  // If LoopVarRefExpr is nullptr it means the corresponding loop variable is
4531  // declared in the loop and it is predetermined as a private.
4532  OpenMPClauseKind PredeterminedCKind =
4533  isOpenMPSimdDirective(DKind)
4534  ? ((NestedLoopCount == 1) ? OMPC_linear : OMPC_lastprivate)
4535  : OMPC_private;
4536  if (((isOpenMPSimdDirective(DKind) && DVar.CKind != OMPC_unknown &&
4537  DVar.CKind != PredeterminedCKind) ||
4538  ((isOpenMPWorksharingDirective(DKind) || DKind == OMPD_taskloop ||
4539  isOpenMPDistributeDirective(DKind)) &&
4540  !isOpenMPSimdDirective(DKind) && DVar.CKind != OMPC_unknown &&
4541  DVar.CKind != OMPC_private && DVar.CKind != OMPC_lastprivate)) &&
4542  (DVar.CKind != OMPC_private || DVar.RefExpr != nullptr)) {
4543  SemaRef.Diag(Init->getBeginLoc(), diag::err_omp_loop_var_dsa)
4544  << getOpenMPClauseName(DVar.CKind) << getOpenMPDirectiveName(DKind)
4545  << getOpenMPClauseName(PredeterminedCKind);
4546  if (DVar.RefExpr == nullptr)
4547  DVar.CKind = PredeterminedCKind;
4548  reportOriginalDsa(SemaRef, &DSA, LCDecl, DVar, /*IsLoopIterVar=*/true);
4549  HasErrors = true;
4550  } else if (LoopDeclRefExpr != nullptr) {
4551  // Make the loop iteration variable private (for worksharing constructs),
4552  // linear (for simd directives with the only one associated loop) or
4553  // lastprivate (for simd directives with several collapsed or ordered
4554  // loops).
4555  if (DVar.CKind == OMPC_unknown)
4556  DVar = DSA.hasDSA(LCDecl, isOpenMPPrivate,
4557  [](OpenMPDirectiveKind) -> bool { return true; },
4558  /*FromParent=*/false);
4559  DSA.addDSA(LCDecl, LoopDeclRefExpr, PredeterminedCKind);
4560  }
4561 
4562  assert(isOpenMPLoopDirective(DKind) && "DSA for non-loop vars");
4563 
4564  // Check test-expr.
4565  HasErrors |= ISC.checkAndSetCond(For->getCond());
4566 
4567  // Check incr-expr.
4568  HasErrors |= ISC.checkAndSetInc(For->getInc());
4569  }
4570 
4571  if (ISC.dependent() || SemaRef.CurContext->isDependentContext() || HasErrors)
4572  return HasErrors;
4573 
4574  // Build the loop's iteration space representation.
4575  ResultIterSpace.PreCond =
4576  ISC.buildPreCond(DSA.getCurScope(), For->getCond(), Captures);
4577  ResultIterSpace.NumIterations = ISC.buildNumIterations(
4578  DSA.getCurScope(),
4579  (isOpenMPWorksharingDirective(DKind) ||
4581  Captures);
4582  ResultIterSpace.CounterVar = ISC.buildCounterVar(Captures, DSA);
4583  ResultIterSpace.PrivateCounterVar = ISC.buildPrivateCounterVar();
4584  ResultIterSpace.CounterInit = ISC.buildCounterInit();
4585  ResultIterSpace.CounterStep = ISC.buildCounterStep();
4586  ResultIterSpace.InitSrcRange = ISC.getInitSrcRange();
4587  ResultIterSpace.CondSrcRange = ISC.getConditionSrcRange();
4588  ResultIterSpace.IncSrcRange = ISC.getIncrementSrcRange();
4589  ResultIterSpace.Subtract = ISC.shouldSubtractStep();
4590 
4591  HasErrors |= (ResultIterSpace.PreCond == nullptr ||
4592  ResultIterSpace.NumIterations == nullptr ||
4593  ResultIterSpace.CounterVar == nullptr ||
4594  ResultIterSpace.PrivateCounterVar == nullptr ||
4595  ResultIterSpace.CounterInit == nullptr ||
4596  ResultIterSpace.CounterStep == nullptr);
4597  if (!HasErrors && DSA.isOrderedRegion()) {
4598  if (DSA.getOrderedRegionParam().second->getNumForLoops()) {
4599  if (CurrentNestedLoopCount <
4600  DSA.getOrderedRegionParam().second->getLoopNumIterations().size()) {
4601  DSA.getOrderedRegionParam().second->setLoopNumIterations(
4602  CurrentNestedLoopCount, ResultIterSpace.NumIterations);
4603  DSA.getOrderedRegionParam().second->setLoopCounter(
4604  CurrentNestedLoopCount, ResultIterSpace.CounterVar);
4605  }
4606  }
4607  for (auto &Pair : DSA.getDoacrossDependClauses()) {
4608  if (CurrentNestedLoopCount >= Pair.first->getNumLoops()) {
4609  // Erroneous case - clause has some problems.
4610  continue;
4611  }
4612  if (Pair.first->getDependencyKind() == OMPC_DEPEND_sink &&
4613  Pair.second.size() <= CurrentNestedLoopCount) {
4614  // Erroneous case - clause has some problems.
4615  Pair.first->setLoopData(CurrentNestedLoopCount, nullptr);
4616  continue;
4617  }
4618  Expr *CntValue;
4619  if (Pair.first->getDependencyKind() == OMPC_DEPEND_source)
4620  CntValue = ISC.buildOrderedLoopData(
4621  DSA.getCurScope(), ResultIterSpace.CounterVar, Captures,
4622  Pair.first->getDependencyLoc());
4623  else
4624  CntValue = ISC.buildOrderedLoopData(
4625  DSA.getCurScope(), ResultIterSpace.CounterVar, Captures,
4626  Pair.first->getDependencyLoc(),
4627  Pair.second[CurrentNestedLoopCount].first,
4628  Pair.second[CurrentNestedLoopCount].second);
4629  Pair.first->setLoopData(CurrentNestedLoopCount, CntValue);
4630  }
4631  }
4632 
4633  return HasErrors;
4634 }
4635 
4636 /// Build 'VarRef = Start.
4637 static ExprResult
4639  ExprResult Start,
4640  llvm::MapVector<const Expr *, DeclRefExpr *> &Captures) {
4641  // Build 'VarRef = Start.
4642  ExprResult NewStart = tryBuildCapture(SemaRef, Start.get(), Captures);
4643  if (!NewStart.isUsable())
4644  return ExprError();
4645  if (!SemaRef.Context.hasSameType(NewStart.get()->getType(),
4646  VarRef.get()->getType())) {
4647  NewStart = SemaRef.PerformImplicitConversion(
4648  NewStart.get(), VarRef.get()->getType(), Sema::AA_Converting,
4649  /*AllowExplicit=*/true);
4650  if (!NewStart.isUsable())
4651  return ExprError();
4652  }
4653 
4654  ExprResult Init =
4655  SemaRef.BuildBinOp(S, Loc, BO_Assign, VarRef.get(), NewStart.get());
4656  return Init;
4657 }
4658 
4659 /// Build 'VarRef = Start + Iter * Step'.
4661  Sema &SemaRef, Scope *S, SourceLocation Loc, ExprResult VarRef,
4662  ExprResult Start, ExprResult Iter, ExprResult Step, bool Subtract,
4663  llvm::MapVector<const Expr *, DeclRefExpr *> *Captures = nullptr) {
4664  // Add parentheses (for debugging purposes only).
4665  Iter = SemaRef.ActOnParenExpr(Loc, Loc, Iter.get());
4666  if (!VarRef.isUsable() || !Start.isUsable() || !Iter.isUsable() ||
4667  !Step.isUsable())
4668  return ExprError();
4669 
4670  ExprResult NewStep = Step;
4671  if (Captures)
4672  NewStep = tryBuildCapture(SemaRef, Step.get(), *Captures);
4673  if (NewStep.isInvalid())
4674  return ExprError();
4675  ExprResult Update =
4676  SemaRef.BuildBinOp(S, Loc, BO_Mul, Iter.get(), NewStep.get());
4677  if (!Update.isUsable())
4678  return ExprError();
4679 
4680  // Try to build 'VarRef = Start, VarRef (+|-)= Iter * Step' or
4681  // 'VarRef = Start (+|-) Iter * Step'.
4682  ExprResult NewStart = Start;
4683  if (Captures)
4684  NewStart = tryBuildCapture(SemaRef, Start.get(), *Captures);
4685  if (NewStart.isInvalid())
4686  return ExprError();
4687 
4688  // First attempt: try to build 'VarRef = Start, VarRef += Iter * Step'.
4689  ExprResult SavedUpdate = Update;
4690  ExprResult UpdateVal;
4691  if (VarRef.get()->getType()->isOverloadableType() ||
4692  NewStart.get()->getType()->isOverloadableType() ||
4693  Update.get()->getType()->isOverloadableType()) {
4694  bool Suppress = SemaRef.getDiagnostics().getSuppressAllDiagnostics();
4695  SemaRef.getDiagnostics().setSuppressAllDiagnostics(/*Val=*/true);
4696  Update =
4697  SemaRef.BuildBinOp(S, Loc, BO_Assign, VarRef.get(), NewStart.get());
4698  if (Update.isUsable()) {
4699  UpdateVal =
4700  SemaRef.BuildBinOp(S, Loc, Subtract ? BO_SubAssign : BO_AddAssign,
4701  VarRef.get(), SavedUpdate.get());
4702  if (UpdateVal.isUsable()) {
4703  Update = SemaRef.CreateBuiltinBinOp(Loc, BO_Comma, Update.get(),
4704  UpdateVal.get());
4705  }
4706  }
4707  SemaRef.getDiagnostics().setSuppressAllDiagnostics(Suppress);
4708  }
4709 
4710  // Second attempt: try to build 'VarRef = Start (+|-) Iter * Step'.
4711  if (!Update.isUsable() || !UpdateVal.isUsable()) {
4712  Update = SemaRef.BuildBinOp(S, Loc, Subtract ? BO_Sub : BO_Add,
4713  NewStart.get(), SavedUpdate.get());
4714  if (!Update.isUsable())
4715  return ExprError();
4716 
4717  if (!SemaRef.Context.hasSameType(Update.get()->getType(),
4718  VarRef.get()->getType())) {
4719  Update = SemaRef.PerformImplicitConversion(
4720  Update.get(), VarRef.get()->getType(), Sema::AA_Converting, true);
4721  if (!Update.isUsable())
4722  return ExprError();
4723  }
4724 
4725  Update = SemaRef.BuildBinOp(S, Loc, BO_Assign, VarRef.get(), Update.get());
4726  }
4727  return Update;
4728 }
4729 
4730 /// Convert integer expression \a E to make it have at least \a Bits
4731 /// bits.
4732 static ExprResult widenIterationCount(unsigned Bits, Expr *E, Sema &SemaRef) {
4733  if (E == nullptr)
4734  return ExprError();
4735  ASTContext &C = SemaRef.Context;
4736  QualType OldType = E->getType();
4737  unsigned HasBits = C.getTypeSize(OldType);
4738  if (HasBits >= Bits)
4739  return ExprResult(E);
4740  // OK to convert to signed, because new type has more bits than old.
4741  QualType NewType = C.getIntTypeForBitwidth(Bits, /* Signed */ true);
4742  return SemaRef.PerformImplicitConversion(E, NewType, Sema::AA_Converting,
4743  true);
4744 }
4745 
4746 /// Check if the given expression \a E is a constant integer that fits
4747 /// into \a Bits bits.
4748 static bool fitsInto(unsigned Bits, bool Signed, const Expr *E, Sema &SemaRef) {
4749  if (E == nullptr)
4750  return false;
4751  llvm::APSInt Result;
4752  if (E->isIntegerConstantExpr(Result, SemaRef.Context))
4753  return Signed ? Result.isSignedIntN(Bits) : Result.isIntN(Bits);
4754  return false;
4755 }
4756 
4757 /// Build preinits statement for the given declarations.
4758 static Stmt *buildPreInits(ASTContext &Context,
4759  MutableArrayRef<Decl *> PreInits) {
4760  if (!PreInits.empty()) {
4761  return new (Context) DeclStmt(
4762  DeclGroupRef::Create(Context, PreInits.begin(), PreInits.size()),
4764  }
4765  return nullptr;
4766 }
4767 
4768 /// Build preinits statement for the given declarations.
4769 static Stmt *
4771  const llvm::MapVector<const Expr *, DeclRefExpr *> &Captures) {
4772  if (!Captures.empty()) {
4773  SmallVector<Decl *, 16> PreInits;
4774  for (const auto &Pair : Captures)
4775  PreInits.push_back(Pair.second->getDecl());
4776  return buildPreInits(Context, PreInits);
4777  }
4778  return nullptr;
4779 }
4780 
4781 /// Build postupdate expression for the given list of postupdates expressions.
4782 static Expr *buildPostUpdate(Sema &S, ArrayRef<Expr *> PostUpdates) {
4783  Expr *PostUpdate = nullptr;
4784  if (!PostUpdates.empty()) {
4785  for (Expr *E : PostUpdates) {
4786  Expr *ConvE = S.BuildCStyleCastExpr(
4787  E->getExprLoc(),
4789  E->getExprLoc(), E)
4790  .get();
4791  PostUpdate = PostUpdate
4792  ? S.CreateBuiltinBinOp(ConvE->getExprLoc(), BO_Comma,
4793  PostUpdate, ConvE)
4794  .get()
4795  : ConvE;
4796  }
4797  }
4798  return PostUpdate;
4799 }
4800 
4801 /// Called on a for stmt to check itself and nested loops (if any).
4802 /// \return Returns 0 if one of the collapsed stmts is not canonical for loop,
4803 /// number of collapsed loops otherwise.
4804 static unsigned
4805 checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
4806  Expr *OrderedLoopCountExpr, Stmt *AStmt, Sema &SemaRef,
4807  DSAStackTy &DSA,
4808  Sema::VarsWithInheritedDSAType &VarsWithImplicitDSA,
4810  unsigned NestedLoopCount = 1;
4811  if (CollapseLoopCountExpr) {
4812  // Found 'collapse' clause - calculate collapse number.
4813  llvm::APSInt Result;
4814  if (CollapseLoopCountExpr->EvaluateAsInt(Result, SemaRef.getASTContext()))
4815  NestedLoopCount = Result.getLimitedValue();
4816  }
4817  unsigned OrderedLoopCount = 1;
4818  if (OrderedLoopCountExpr) {
4819  // Found 'ordered' clause - calculate collapse number.
4820  llvm::APSInt Result;
4821  if (OrderedLoopCountExpr->EvaluateAsInt(Result, SemaRef.getASTContext())) {
4822  if (Result.getLimitedValue() < NestedLoopCount) {
4823  SemaRef.Diag(OrderedLoopCountExpr->getExprLoc(),
4824  diag::err_omp_wrong_ordered_loop_count)
4825  << OrderedLoopCountExpr->getSourceRange();
4826  SemaRef.Diag(CollapseLoopCountExpr->getExprLoc(),
4827  diag::note_collapse_loop_count)
4828  << CollapseLoopCountExpr->getSourceRange();
4829  }
4830  OrderedLoopCount = Result.getLimitedValue();
4831  }
4832  }
4833  // This is helper routine for loop directives (e.g., 'for', 'simd',
4834  // 'for simd', etc.).
4835  llvm::MapVector<const Expr *, DeclRefExpr *> Captures;
4837  IterSpaces.resize(std::max(OrderedLoopCount, NestedLoopCount));
4838  Stmt *CurStmt = AStmt->IgnoreContainers(/* IgnoreCaptured */ true);
4839  for (unsigned Cnt = 0; Cnt < NestedLoopCount; ++Cnt) {
4841  DKind, CurStmt, SemaRef, DSA, Cnt, NestedLoopCount,
4842  std::max(OrderedLoopCount, NestedLoopCount), CollapseLoopCountExpr,
4843  OrderedLoopCountExpr, VarsWithImplicitDSA, IterSpaces[Cnt],
4844  Captures))
4845  return 0;
4846  // Move on to the next nested for loop, or to the loop body.
4847  // OpenMP [2.8.1, simd construct, Restrictions]
4848  // All loops associated with the construct must be perfectly nested; that
4849  // is, there must be no intervening code nor any OpenMP directive between
4850  // any two loops.
4851  CurStmt = cast<ForStmt>(CurStmt)->getBody()->IgnoreContainers();
4852  }
4853  for (unsigned Cnt = NestedLoopCount; Cnt < OrderedLoopCount; ++Cnt) {
4855  DKind, CurStmt, SemaRef, DSA, Cnt, NestedLoopCount,
4856  std::max(OrderedLoopCount, NestedLoopCount), CollapseLoopCountExpr,
4857  OrderedLoopCountExpr, VarsWithImplicitDSA, IterSpaces[Cnt],
4858  Captures))
4859  return 0;
4860  if (Cnt > 0 && IterSpaces[Cnt].CounterVar) {
4861  // Handle initialization of captured loop iterator variables.
4862  auto *DRE = cast<DeclRefExpr>(IterSpaces[Cnt].CounterVar);
4863  if (isa<OMPCapturedExprDecl>(DRE->getDecl())) {
4864  Captures[DRE] = DRE;
4865  }
4866  }
4867  // Move on to the next nested for loop, or to the loop body.
4868  // OpenMP [2.8.1, simd construct, Restrictions]
4869  // All loops associated with the construct must be perfectly nested; that
4870  // is, there must be no intervening code nor any OpenMP directive between
4871  // any two loops.
4872  CurStmt = cast<ForStmt>(CurStmt)->getBody()->IgnoreContainers();
4873  }
4874 
4875  Built.clear(/* size */ NestedLoopCount);
4876 
4877  if (SemaRef.CurContext->isDependentContext())
4878  return NestedLoopCount;
4879 
4880  // An example of what is generated for the following code:
4881  //
4882  // #pragma omp simd collapse(2) ordered(2)
4883  // for (i = 0; i < NI; ++i)
4884  // for (k = 0; k < NK; ++k)
4885  // for (j = J0; j < NJ; j+=2) {
4886  // <loop body>
4887  // }
4888  //
4889  // We generate the code below.
4890  // Note: the loop body may be outlined in CodeGen.
4891  // Note: some counters may be C++ classes, operator- is used to find number of
4892  // iterations and operator+= to calculate counter value.
4893  // Note: decltype(NumIterations) must be integer type (in 'omp for', only i32
4894  // or i64 is currently supported).
4895  //
4896  // #define NumIterations (NI * ((NJ - J0 - 1 + 2) / 2))
4897  // for (int[32|64]_t IV = 0; IV < NumIterations; ++IV ) {
4898  // .local.i = IV / ((NJ - J0 - 1 + 2) / 2);
4899  // .local.j = J0 + (IV % ((NJ - J0 - 1 + 2) / 2)) * 2;
4900  // // similar updates for vars in clauses (e.g. 'linear')
4901  // <loop body (using local i and j)>
4902  // }
4903  // i = NI; // assign final values of counters
4904  // j = NJ;
4905  //
4906 
4907  // Last iteration number is (I1 * I2 * ... In) - 1, where I1, I2 ... In are
4908  // the iteration counts of the collapsed for loops.
4909  // Precondition tests if there is at least one iteration (all conditions are
4910  // true).
4911  auto PreCond = ExprResult(IterSpaces[0].PreCond);
4912  Expr *N0 = IterSpaces[0].NumIterations;
4913  ExprResult LastIteration32 =
4914  widenIterationCount(/*Bits=*/32,
4915  SemaRef
4916  .PerformImplicitConversion(
4917  N0->IgnoreImpCasts(), N0->getType(),
4918  Sema::AA_Converting, /*AllowExplicit=*/true)
4919  .get(),
4920  SemaRef);
4921  ExprResult LastIteration64 = widenIterationCount(
4922  /*Bits=*/64,
4923  SemaRef
4924  .PerformImplicitConversion(N0->IgnoreImpCasts(), N0->getType(),
4926  /*AllowExplicit=*/true)
4927  .get(),
4928  SemaRef);
4929 
4930  if (!LastIteration32.isUsable() || !LastIteration64.isUsable())
4931  return NestedLoopCount;
4932 
4933  ASTContext &C = SemaRef.Context;
4934  bool AllCountsNeedLessThan32Bits = C.getTypeSize(N0->getType()) < 32;
4935 
4936  Scope *CurScope = DSA.getCurScope();
4937  for (unsigned Cnt = 1; Cnt < NestedLoopCount; ++Cnt) {
4938  if (PreCond.isUsable()) {
4939  PreCond =
4940  SemaRef.BuildBinOp(CurScope, PreCond.get()->getExprLoc(), BO_LAnd,
4941  PreCond.get(), IterSpaces[Cnt].PreCond);
4942  }
4943  Expr *N = IterSpaces[Cnt].NumIterations;
4944  SourceLocation Loc = N->getExprLoc();
4945  AllCountsNeedLessThan32Bits &= C.getTypeSize(N->getType()) < 32;
4946  if (LastIteration32.isUsable())
4947  LastIteration32 = SemaRef.BuildBinOp(
4948  CurScope, Loc, BO_Mul, LastIteration32.get(),
4949  SemaRef
4952  /*AllowExplicit=*/true)
4953  .get());
4954  if (LastIteration64.isUsable())
4955  LastIteration64 = SemaRef.BuildBinOp(
4956  CurScope, Loc, BO_Mul, LastIteration64.get(),
4957  SemaRef
4960  /*AllowExplicit=*/true)
4961  .get());
4962  }
4963 
4964  // Choose either the 32-bit or 64-bit version.
4965  ExprResult LastIteration = LastIteration64;
4966  if (LastIteration32.isUsable() &&
4967  C.getTypeSize(LastIteration32.get()->getType()) == 32 &&
4968  (AllCountsNeedLessThan32Bits || NestedLoopCount == 1 ||
4969  fitsInto(
4970  /*Bits=*/32,
4971  LastIteration32.get()->getType()->hasSignedIntegerRepresentation(),
4972  LastIteration64.get(), SemaRef)))
4973  LastIteration = LastIteration32;
4974  QualType VType = LastIteration.get()->getType();
4975  QualType RealVType = VType;
4976  QualType StrideVType = VType;
4977  if (isOpenMPTaskLoopDirective(DKind)) {
4978  VType =
4979  SemaRef.Context.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0);
4980  StrideVType =
4981  SemaRef.Context.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1);
4982  }
4983 
4984  if (!LastIteration.isUsable())
4985  return 0;
4986 
4987  // Save the number of iterations.
4988  ExprResult NumIterations = LastIteration;
4989  {
4990  LastIteration = SemaRef.BuildBinOp(
4991  CurScope, LastIteration.get()->getExprLoc(), BO_Sub,
4992  LastIteration.get(),
4993  SemaRef.ActOnIntegerConstant(SourceLocation(), 1).get());
4994  if (!LastIteration.isUsable())
4995  return 0;
4996  }
4997 
4998  // Calculate the last iteration number beforehand instead of doing this on
4999  // each iteration. Do not do this if the number of iterations may be kfold-ed.
5000  llvm::APSInt Result;
5001  bool IsConstant =
5002  LastIteration.get()->isIntegerConstantExpr(Result, SemaRef.Context);
5003  ExprResult CalcLastIteration;
5004  if (!IsConstant) {
5005  ExprResult SaveRef =
5006  tryBuildCapture(SemaRef, LastIteration.get(), Captures);
5007  LastIteration = SaveRef;
5008 
5009  // Prepare SaveRef + 1.
5010  NumIterations = SemaRef.BuildBinOp(
5011  CurScope, SaveRef.get()->getExprLoc(), BO_Add, SaveRef.get(),
5012  SemaRef.ActOnIntegerConstant(SourceLocation(), 1).get());
5013  if (!NumIterations.isUsable())
5014  return 0;
5015  }
5016 
5017  SourceLocation InitLoc = IterSpaces[0].InitSrcRange.getBegin();
5018 
5019  // Build variables passed into runtime, necessary for worksharing directives.
5020  ExprResult LB, UB, IL, ST, EUB, CombLB, CombUB, PrevLB, PrevUB, CombEUB;
5022  isOpenMPDistributeDirective(DKind)) {
5023  // Lower bound variable, initialized with zero.
5024  VarDecl *LBDecl = buildVarDecl(SemaRef, InitLoc, VType, ".omp.lb");
5025  LB = buildDeclRefExpr(SemaRef, LBDecl, VType, InitLoc);
5026  SemaRef.AddInitializerToDecl(LBDecl,
5027  SemaRef.ActOnIntegerConstant(InitLoc, 0).get(),
5028  /*DirectInit*/ false);
5029 
5030  // Upper bound variable, initialized with last iteration number.
5031  VarDecl *UBDecl = buildVarDecl(SemaRef, InitLoc, VType, ".omp.ub");
5032  UB = buildDeclRefExpr(SemaRef, UBDecl, VType, InitLoc);
5033  SemaRef.AddInitializerToDecl(UBDecl, LastIteration.get(),
5034  /*DirectInit*/ false);
5035 
5036  // A 32-bit variable-flag where runtime returns 1 for the last iteration.
5037  // This will be used to implement clause 'lastprivate'.
5038  QualType Int32Ty = SemaRef.Context.getIntTypeForBitwidth(32, true);
5039  VarDecl *ILDecl = buildVarDecl(SemaRef, InitLoc, Int32Ty, ".omp.is_last");
5040  IL = buildDeclRefExpr(SemaRef, ILDecl, Int32Ty, InitLoc);
5041  SemaRef.AddInitializerToDecl(ILDecl,
5042  SemaRef.ActOnIntegerConstant(InitLoc, 0).get(),
5043  /*DirectInit*/ false);
5044 
5045  // Stride variable returned by runtime (we initialize it to 1 by default).
5046  VarDecl *STDecl =
5047  buildVarDecl(SemaRef, InitLoc, StrideVType, ".omp.stride");
5048  ST = buildDeclRefExpr(SemaRef, STDecl, StrideVType, InitLoc);
5049  SemaRef.AddInitializerToDecl(STDecl,
5050  SemaRef.ActOnIntegerConstant(InitLoc, 1).get(),
5051  /*DirectInit*/ false);
5052 
5053  // Build expression: UB = min(UB, LastIteration)
5054  // It is necessary for CodeGen of directives with static scheduling.
5055  ExprResult IsUBGreater = SemaRef.BuildBinOp(CurScope, InitLoc, BO_GT,
5056  UB.get(), LastIteration.get());
5057  ExprResult CondOp = SemaRef.ActOnConditionalOp(
5058  LastIteration.get()->getExprLoc(), InitLoc, IsUBGreater.get(),
5059  LastIteration.get(), UB.get());
5060  EUB = SemaRef.BuildBinOp(CurScope, InitLoc, BO_Assign, UB.get(),
5061  CondOp.get());
5062  EUB = SemaRef.ActOnFinishFullExpr(EUB.get());
5063 
5064  // If we have a combined directive that combines 'distribute', 'for' or
5065  // 'simd' we need to be able to access the bounds of the schedule of the
5066  // enclosing region. E.g. in 'distribute parallel for' the bounds obtained
5067  // by scheduling 'distribute' have to be passed to the schedule of 'for'.
5068  if (isOpenMPLoopBoundSharingDirective(DKind)) {
5069  // Lower bound variable, initialized with zero.
5070  VarDecl *CombLBDecl =
5071  buildVarDecl(SemaRef, InitLoc, VType, ".omp.comb.lb");
5072  CombLB = buildDeclRefExpr(SemaRef, CombLBDecl, VType, InitLoc);
5073  SemaRef.AddInitializerToDecl(
5074  CombLBDecl, SemaRef.ActOnIntegerConstant(InitLoc, 0).get(),
5075  /*DirectInit*/ false);
5076 
5077  // Upper bound variable, initialized with last iteration number.
5078  VarDecl *CombUBDecl =
5079  buildVarDecl(SemaRef, InitLoc, VType, ".omp.comb.ub");
5080  CombUB = buildDeclRefExpr(SemaRef, CombUBDecl, VType, InitLoc);
5081  SemaRef.AddInitializerToDecl(CombUBDecl, LastIteration.get(),
5082  /*DirectInit*/ false);
5083 
5084  ExprResult CombIsUBGreater = SemaRef.BuildBinOp(
5085  CurScope, InitLoc, BO_GT, CombUB.get(), LastIteration.get());
5086  ExprResult CombCondOp =
5087  SemaRef.ActOnConditionalOp(InitLoc, InitLoc, CombIsUBGreater.get(),
5088  LastIteration.get(), CombUB.get());
5089  CombEUB = SemaRef.BuildBinOp(CurScope, InitLoc, BO_Assign, CombUB.get(),
5090  CombCondOp.get());
5091  CombEUB = SemaRef.ActOnFinishFullExpr(CombEUB.get());
5092 
5093  const CapturedDecl *CD = cast<CapturedStmt>(AStmt)->getCapturedDecl();
5094  // We expect to have at least 2 more parameters than the 'parallel'
5095  // directive does - the lower and upper bounds of the previous schedule.
5096  assert(CD->getNumParams() >= 4 &&
5097  "Unexpected number of parameters in loop combined directive");
5098 
5099  // Set the proper type for the bounds given what we learned from the
5100  // enclosed loops.
5101  ImplicitParamDecl *PrevLBDecl = CD->getParam(/*PrevLB=*/2);
5102  ImplicitParamDecl *PrevUBDecl = CD->getParam(/*PrevUB=*/3);
5103 
5104  // Previous lower and upper bounds are obtained from the region
5105  // parameters.
5106  PrevLB =
5107  buildDeclRefExpr(SemaRef, PrevLBDecl, PrevLBDecl->getType(), InitLoc);
5108  PrevUB =
5109  buildDeclRefExpr(SemaRef, PrevUBDecl, PrevUBDecl->getType(), InitLoc);
5110  }
5111  }
5112 
5113  // Build the iteration variable and its initialization before loop.
5114  ExprResult IV;
5115  ExprResult Init, CombInit;
5116  {
5117  VarDecl *IVDecl = buildVarDecl(SemaRef, InitLoc, RealVType, ".omp.iv");
5118  IV = buildDeclRefExpr(SemaRef, IVDecl, RealVType, InitLoc);
5119  Expr *RHS =
5120  (isOpenMPWorksharingDirective(DKind) ||
5122  ? LB.get()
5123  : SemaRef.ActOnIntegerConstant(SourceLocation(), 0).get();
5124  Init = SemaRef.BuildBinOp(CurScope, InitLoc, BO_Assign, IV.get(), RHS);
5125  Init = SemaRef.ActOnFinishFullExpr(Init.get());
5126 
5127  if (isOpenMPLoopBoundSharingDirective(DKind)) {
5128  Expr *CombRHS =
5129  (isOpenMPWorksharingDirective(DKind) ||
5130  isOpenMPTaskLoopDirective(DKind) ||
5132  ? CombLB.get()
5133  : SemaRef.ActOnIntegerConstant(SourceLocation(), 0).get();
5134  CombInit =
5135  SemaRef.BuildBinOp(CurScope, InitLoc, BO_Assign, IV.get(), CombRHS);
5136  CombInit = SemaRef.ActOnFinishFullExpr(CombInit.get());
5137  }
5138  }
5139 
5140  // Loop condition (IV < NumIterations) or (IV <= UB) for worksharing loops.
5141  SourceLocation CondLoc = AStmt->getBeginLoc();
5142  ExprResult Cond =
5143  (isOpenMPWorksharingDirective(DKind) ||
5145  ? SemaRef.BuildBinOp(CurScope, CondLoc, BO_LE, IV.get(), UB.get())
5146  : SemaRef.BuildBinOp(CurScope, CondLoc, BO_LT, IV.get(),
5147  NumIterations.get());
5148  ExprResult CombCond;
5149  if (isOpenMPLoopBoundSharingDirective(DKind)) {
5150  CombCond =
5151  SemaRef.BuildBinOp(CurScope, CondLoc, BO_LE, IV.get(), CombUB.get());
5152  }
5153  // Loop increment (IV = IV + 1)
5154  SourceLocation IncLoc = AStmt->getBeginLoc();
5155  ExprResult Inc =
5156  SemaRef.BuildBinOp(CurScope, IncLoc, BO_Add, IV.get(),
5157  SemaRef.ActOnIntegerConstant(IncLoc, 1).get());
5158  if (!Inc.isUsable())
5159  return 0;
5160  Inc = SemaRef.BuildBinOp(CurScope, IncLoc, BO_Assign, IV.get(), Inc.get());
5161  Inc = SemaRef.ActOnFinishFullExpr(Inc.get());
5162  if (!Inc.isUsable())
5163  return 0;
5164 
5165  // Increments for worksharing loops (LB = LB + ST; UB = UB + ST).
5166  // Used for directives with static scheduling.
5167  // In combined construct, add combined version that use CombLB and CombUB
5168  // base variables for the update
5169  ExprResult NextLB, NextUB, CombNextLB, CombNextUB;
5171  isOpenMPDistributeDirective(DKind)) {
5172  // LB + ST
5173  NextLB = SemaRef.BuildBinOp(CurScope, IncLoc, BO_Add, LB.get(), ST.get());
5174  if (!NextLB.isUsable())
5175  return 0;
5176  // LB = LB + ST
5177  NextLB =
5178  SemaRef.BuildBinOp(CurScope, IncLoc, BO_Assign, LB.get(), NextLB.get());
5179  NextLB = SemaRef.ActOnFinishFullExpr(NextLB.get());
5180  if (!NextLB.isUsable())
5181  return 0;
5182  // UB + ST
5183  NextUB = SemaRef.BuildBinOp(CurScope, IncLoc, BO_Add, UB.get(), ST.get());
5184  if (!NextUB.isUsable())
5185  return 0;
5186  // UB = UB + ST
5187  NextUB =
5188  SemaRef.BuildBinOp(CurScope, IncLoc, BO_Assign, UB.get(), NextUB.get());
5189  NextUB = SemaRef.ActOnFinishFullExpr(NextUB.get());
5190  if (!NextUB.isUsable())
5191  return 0;
5192  if (isOpenMPLoopBoundSharingDirective(DKind)) {
5193  CombNextLB =
5194  SemaRef.BuildBinOp(CurScope, IncLoc, BO_Add, CombLB.get(), ST.get());
5195  if (!NextLB.isUsable())
5196  return 0;
5197  // LB = LB + ST
5198  CombNextLB = SemaRef.BuildBinOp(CurScope, IncLoc, BO_Assign, CombLB.get(),
5199  CombNextLB.get());
5200  CombNextLB = SemaRef.ActOnFinishFullExpr(CombNextLB.get());
5201  if (!CombNextLB.isUsable())
5202  return 0;
5203  // UB + ST
5204  CombNextUB =
5205  SemaRef.BuildBinOp(CurScope, IncLoc, BO_Add, CombUB.get(), ST.get());
5206  if (!CombNextUB.isUsable())
5207  return 0;
5208  // UB = UB + ST
5209  CombNextUB = SemaRef.BuildBinOp(CurScope, IncLoc, BO_Assign, CombUB.get(),
5210  CombNextUB.get());
5211  CombNextUB = SemaRef.ActOnFinishFullExpr(CombNextUB.get());
5212  if (!CombNextUB.isUsable())
5213  return 0;
5214  }
5215  }
5216 
5217  // Create increment expression for distribute loop when combined in a same
5218  // directive with for as IV = IV + ST; ensure upper bound expression based
5219  // on PrevUB instead of NumIterations - used to implement 'for' when found
5220  // in combination with 'distribute', like in 'distribute parallel for'
5221  SourceLocation DistIncLoc = AStmt->getBeginLoc();
5222  ExprResult DistCond, DistInc, PrevEUB;
5223  if (isOpenMPLoopBoundSharingDirective(DKind)) {
5224  DistCond = SemaRef.BuildBinOp(CurScope, CondLoc, BO_LE, IV.get(), UB.get());
5225  assert(DistCond.isUsable() && "distribute cond expr was not built");
5226 
5227  DistInc =
5228  SemaRef.BuildBinOp(CurScope, DistIncLoc, BO_Add, IV.get(), ST.get());
5229  assert(DistInc.isUsable() && "distribute inc expr was not built");
5230  DistInc = SemaRef.BuildBinOp(CurScope, DistIncLoc, BO_Assign, IV.get(),
5231  DistInc.get());
5232  DistInc = SemaRef.ActOnFinishFullExpr(DistInc.get());
5233  assert(DistInc.isUsable() && "distribute inc expr was not built");
5234 
5235  // Build expression: UB = min(UB, prevUB) for #for in composite or combined
5236  // construct
5237  SourceLocation DistEUBLoc = AStmt->getBeginLoc();
5238  ExprResult IsUBGreater =
5239  SemaRef.BuildBinOp(CurScope, DistEUBLoc, BO_GT, UB.get(), PrevUB.get());
5240  ExprResult CondOp = SemaRef.ActOnConditionalOp(
5241  DistEUBLoc, DistEUBLoc, IsUBGreater.get(), PrevUB.get(), UB.get());
5242  PrevEUB = SemaRef.BuildBinOp(CurScope, DistIncLoc, BO_Assign, UB.get(),
5243  CondOp.get());
5244  PrevEUB = SemaRef.ActOnFinishFullExpr(PrevEUB.get());
5245  }
5246 
5247  // Build updates and final values of the loop counters.
5248  bool HasErrors = false;
5249  Built.Counters.resize(NestedLoopCount);
5250  Built.Inits.resize(NestedLoopCount);
5251  Built.Updates.resize(NestedLoopCount);
5252  Built.Finals.resize(NestedLoopCount);
5253  {
5254  ExprResult Div;
5255  // Go from inner nested loop to outer.
5256  for (int Cnt = NestedLoopCount - 1; Cnt >= 0; --Cnt) {
5257  LoopIterationSpace &IS = IterSpaces[Cnt];
5258  SourceLocation UpdLoc = IS.IncSrcRange.getBegin();
5259  // Build: Iter = (IV / Div) % IS.NumIters
5260  // where Div is product of previous iterations' IS.NumIters.
5261  ExprResult Iter;
5262  if (Div.isUsable()) {
5263  Iter =
5264  SemaRef.BuildBinOp(CurScope, UpdLoc, BO_Div, IV.get(), Div.get());
5265  } else {
5266  Iter = IV;
5267  assert((Cnt == (int)NestedLoopCount - 1) &&
5268  "unusable div expected on first iteration only");
5269  }
5270 
5271  if (Cnt != 0 && Iter.isUsable())
5272  Iter = SemaRef.BuildBinOp(CurScope, UpdLoc, BO_Rem, Iter.get(),
5273  IS.NumIterations);
5274  if (!Iter.isUsable()) {
5275  HasErrors = true;
5276  break;
5277  }
5278 
5279  // Build update: IS.CounterVar(Private) = IS.Start + Iter * IS.Step
5280  auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IS.CounterVar)->getDecl());
5281  DeclRefExpr *CounterVar = buildDeclRefExpr(
5282  SemaRef, VD, IS.CounterVar->getType(), IS.CounterVar->getExprLoc(),
5283  /*RefersToCapture=*/true);
5284  ExprResult Init = buildCounterInit(SemaRef, CurScope, UpdLoc, CounterVar,
5285  IS.CounterInit, Captures);
5286  if (!Init.isUsable()) {
5287  HasErrors = true;
5288  break;
5289  }
5291  SemaRef, CurScope, UpdLoc, CounterVar, IS.CounterInit, Iter,
5292  IS.CounterStep, IS.Subtract, &Captures);
5293  if (!Update.isUsable()) {
5294  HasErrors = true;
5295  break;
5296  }
5297 
5298  // Build final: IS.CounterVar = IS.Start + IS.NumIters * IS.Step
5300  SemaRef, CurScope, UpdLoc, CounterVar, IS.CounterInit,
5301  IS.NumIterations, IS.CounterStep, IS.Subtract, &Captures);
5302  if (!Final.isUsable()) {
5303  HasErrors = true;
5304  break;
5305  }
5306 
5307  // Build Div for the next iteration: Div <- Div * IS.NumIters
5308  if (Cnt != 0) {
5309  if (Div.isUnset())
5310  Div = IS.NumIterations;
5311  else
5312  Div = SemaRef.BuildBinOp(CurScope, UpdLoc, BO_Mul, Div.get(),
5313  IS.NumIterations);
5314 
5315  // Add parentheses (for debugging purposes only).
5316  if (Div.isUsable())
5317  Div = tryBuildCapture(SemaRef, Div.get(), Captures);
5318  if (!Div.isUsable()) {
5319  HasErrors = true;
5320  break;
5321  }
5322  }
5323  if (!Update.isUsable() || !Final.isUsable()) {
5324  HasErrors = true;
5325  break;
5326  }
5327  // Save results
5328  Built.Counters[Cnt] = IS.CounterVar;
5329  Built.PrivateCounters[Cnt] = IS.PrivateCounterVar;
5330  Built.Inits[Cnt] = Init.get();
5331  Built.Updates[Cnt] = Update.get();
5332  Built.Finals[Cnt] = Final.get();
5333  }
5334  }
5335 
5336  if (HasErrors)
5337  return 0;
5338 
5339  // Save results
5340  Built.IterationVarRef = IV.get();
5341  Built.LastIteration = LastIteration.get();
5342  Built.NumIterations = NumIterations.get();
5343  Built.CalcLastIteration =
5344  SemaRef.ActOnFinishFullExpr(CalcLastIteration.get()).get();
5345  Built.PreCond = PreCond.get();
5346  Built.PreInits = buildPreInits(C, Captures);
5347  Built.Cond = Cond.get();
5348  Built.Init = Init.get();
5349  Built.Inc = Inc.get();
5350  Built.LB = LB.get();
5351  Built.UB = UB.get();
5352  Built.IL = IL.get();
5353  Built.ST = ST.get();
5354  Built.EUB = EUB.get();
5355  Built.NLB = NextLB.get();
5356  Built.NUB = NextUB.get();
5357  Built.PrevLB = PrevLB.get();
5358  Built.PrevUB = PrevUB.get();
5359  Built.DistInc = DistInc.get();
5360  Built.PrevEUB = PrevEUB.get();
5361  Built.DistCombinedFields.LB = CombLB.get();
5362  Built.DistCombinedFields.UB = CombUB.get();
5363  Built.DistCombinedFields.EUB = CombEUB.get();
5364  Built.DistCombinedFields.Init = CombInit.get();
5365  Built.DistCombinedFields.Cond = CombCond.get();
5366  Built.DistCombinedFields.NLB = CombNextLB.get();
5367  Built.DistCombinedFields.NUB = CombNextUB.get();
5368 
5369  return NestedLoopCount;
5370 }
5371 
5373  auto CollapseClauses =
5374  OMPExecutableDirective::getClausesOfKind<OMPCollapseClause>(Clauses);
5375  if (CollapseClauses.begin() != CollapseClauses.end())
5376  return (*CollapseClauses.begin())->getNumForLoops();
5377  return nullptr;
5378 }
5379 
5381  auto OrderedClauses =
5382  OMPExecutableDirective::getClausesOfKind<OMPOrderedClause>(Clauses);
5383  if (OrderedClauses.begin() != OrderedClauses.end())
5384  return (*OrderedClauses.begin())->getNumForLoops();
5385  return nullptr;
5386 }
5387 
5389  const ArrayRef<OMPClause *> Clauses) {
5390  const OMPSafelenClause *Safelen = nullptr;
5391  const OMPSimdlenClause *Simdlen = nullptr;
5392 
5393  for (const OMPClause *Clause : Clauses) {
5394  if (Clause->getClauseKind() == OMPC_safelen)
5395  Safelen = cast<OMPSafelenClause>(Clause);
5396  else if (Clause->getClauseKind() == OMPC_simdlen)
5397  Simdlen = cast<OMPSimdlenClause>(Clause);
5398  if (Safelen && Simdlen)
5399  break;
5400  }
5401 
5402  if (Simdlen && Safelen) {
5403  llvm::APSInt SimdlenRes, SafelenRes;
5404  const Expr *SimdlenLength = Simdlen->getSimdlen();
5405  const Expr *SafelenLength = Safelen->getSafelen();
5406  if (SimdlenLength->isValueDependent() || SimdlenLength->isTypeDependent() ||
5407  SimdlenLength->isInstantiationDependent() ||
5408  SimdlenLength->containsUnexpandedParameterPack())
5409  return false;
5410  if (SafelenLength->isValueDependent() || SafelenLength->isTypeDependent() ||
5411  SafelenLength->isInstantiationDependent() ||
5412  SafelenLength->containsUnexpandedParameterPack())
5413  return false;
5414  SimdlenLength->EvaluateAsInt(SimdlenRes, S.Context);
5415  SafelenLength->EvaluateAsInt(SafelenRes, S.Context);
5416  // OpenMP 4.5 [2.8.1, simd Construct, Restrictions]
5417  // If both simdlen and safelen clauses are specified, the value of the
5418  // simdlen parameter must be less than or equal to the value of the safelen
5419  // parameter.
5420  if (SimdlenRes > SafelenRes) {
5421  S.Diag(SimdlenLength->getExprLoc(),
5422  diag::err_omp_wrong_simdlen_safelen_values)
5423  << SimdlenLength->getSourceRange() << SafelenLength->getSourceRange();
5424  return true;
5425  }
5426  }
5427  return false;
5428 }
5429 
5430 StmtResult
5432  SourceLocation StartLoc, SourceLocation EndLoc,
5433  VarsWithInheritedDSAType &VarsWithImplicitDSA) {
5434  if (!AStmt)
5435  return StmtError();
5436 
5437  assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
5439  // In presence of clause 'collapse' or 'ordered' with number of loops, it will
5440  // define the nested loops number.
5441  unsigned NestedLoopCount = checkOpenMPLoop(
5442  OMPD_simd, getCollapseNumberExpr(Clauses), getOrderedNumberExpr(Clauses),
5443  AStmt, *this, *DSAStack, VarsWithImplicitDSA, B);
5444  if (NestedLoopCount == 0)
5445  return StmtError();
5446 
5447  assert((CurContext->isDependentContext() || B.builtAll()) &&
5448  "omp simd loop exprs were not built");
5449 
5450  if (!CurContext->isDependentContext()) {
5451  // Finalize the clauses that need pre-built expressions for CodeGen.
5452  for (OMPClause *C : Clauses) {
5453  if (auto *LC = dyn_cast<OMPLinearClause>(C))
5454  if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
5455  B.NumIterations, *this, CurScope,
5456  DSAStack))
5457  return StmtError();
5458  }
5459  }
5460 
5461  if (checkSimdlenSafelenSpecified(*this, Clauses))
5462  return StmtError();
5463 
5464  setFunctionHasBranchProtectedScope();
5465  return OMPSimdDirective::Create(Context, StartLoc, EndLoc, NestedLoopCount,
5466  Clauses, AStmt, B);
5467 }
5468 
5469 StmtResult
5471  SourceLocation StartLoc, SourceLocation EndLoc,
5472  VarsWithInheritedDSAType &VarsWithImplicitDSA) {
5473  if (!AStmt)
5474  return StmtError();
5475 
5476  assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
5478  // In presence of clause 'collapse' or 'ordered' with number of loops, it will
5479  // define the nested loops number.
5480  unsigned NestedLoopCount = checkOpenMPLoop(
5481  OMPD_for, getCollapseNumberExpr(Clauses), getOrderedNumberExpr(Clauses),
5482  AStmt, *this, *DSAStack, VarsWithImplicitDSA, B);
5483  if (NestedLoopCount == 0)
5484  return StmtError();
5485 
5486  assert((CurContext->isDependentContext() || B.builtAll()) &&
5487  "omp for loop exprs were not built");
5488 
5489  if (!CurContext->isDependentContext()) {
5490  // Finalize the clauses that need pre-built expressions for CodeGen.
5491  for (OMPClause *C : Clauses) {
5492  if (auto *LC = dyn_cast<OMPLinearClause>(C))
5493  if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
5494  B.NumIterations, *this, CurScope,
5495  DSAStack))
5496  return StmtError();
5497  }
5498  }
5499 
5500  setFunctionHasBranchProtectedScope();
5501  return OMPForDirective::Create(Context, StartLoc, EndLoc, NestedLoopCount,
5502  Clauses, AStmt, B, DSAStack->isCancelRegion());
5503 }
5504 
5506  ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
5507  SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
5508  if (!AStmt)
5509  return StmtError();
5510 
5511  assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
5513  // In presence of clause 'collapse' or 'ordered' with number of loops, it will
5514  // define the nested loops number.
5515  unsigned NestedLoopCount =
5516  checkOpenMPLoop(OMPD_for_simd, getCollapseNumberExpr(Clauses),
5517  getOrderedNumberExpr(Clauses), AStmt, *this, *DSAStack,
5518  VarsWithImplicitDSA, B);
5519  if (NestedLoopCount == 0)
5520  return StmtError();
5521 
5522  assert((CurContext->isDependentContext() || B.builtAll()) &&
5523  "omp for simd loop exprs were not built");
5524 
5525  if (!CurContext->isDependentContext()) {
5526  // Finalize the clauses that need pre-built expressions for CodeGen.
5527  for (OMPClause *C : Clauses) {
5528  if (auto *LC = dyn_cast<OMPLinearClause>(C))
5529  if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
5530  B.NumIterations, *this, CurScope,
5531  DSAStack))
5532  return StmtError();
5533  }
5534  }
5535 
5536  if (checkSimdlenSafelenSpecified(*this, Clauses))
5537  return StmtError();
5538 
5539  setFunctionHasBranchProtectedScope();
5540  return OMPForSimdDirective::Create(Context, StartLoc, EndLoc, NestedLoopCount,
5541  Clauses, AStmt, B);
5542 }
5543 
5545  Stmt *AStmt,
5546  SourceLocation StartLoc,
5547  SourceLocation EndLoc) {
5548  if (!AStmt)
5549  return StmtError();
5550 
5551  assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
5552  auto BaseStmt = AStmt;
5553  while (auto *CS = dyn_cast_or_null<CapturedStmt>(BaseStmt))
5554  BaseStmt = CS->getCapturedStmt();
5555  if (auto *C = dyn_cast_or_null<CompoundStmt>(BaseStmt)) {
5556  auto S = C->children();
5557  if (S.begin() == S.end())
5558  return StmtError();
5559  // All associated statements must be '#pragma omp section' except for
5560  // the first one.
5561  for (Stmt *SectionStmt : llvm::make_range(std::next(S.begin()), S.end())) {
5562  if (!SectionStmt || !isa<OMPSectionDirective>(SectionStmt)) {
5563  if (SectionStmt)
5564  Diag(SectionStmt->getBeginLoc(),
5565  diag::err_omp_sections_substmt_not_section);
5566  return StmtError();
5567  }
5568  cast<OMPSectionDirective>(SectionStmt)
5569  ->setHasCancel(DSAStack->isCancelRegion());
5570  }
5571  } else {
5572  Diag(AStmt->getBeginLoc(), diag::err_omp_sections_not_compound_stmt);
5573  return StmtError();
5574  }
5575 
5576  setFunctionHasBranchProtectedScope();
5577 
5578  return OMPSectionsDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt,
5579  DSAStack->isCancelRegion());
5580 }
5581 
5583  SourceLocation StartLoc,
5584  SourceLocation EndLoc) {
5585  if (!AStmt)
5586  return StmtError();
5587 
5588  assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
5589 
5590  setFunctionHasBranchProtectedScope();
5591  DSAStack->setParentCancelRegion(DSAStack->isCancelRegion());
5592 
5593  return OMPSectionDirective::Create(Context, StartLoc, EndLoc, AStmt,
5594  DSAStack->isCancelRegion());
5595 }
5596 
5598  Stmt *AStmt,
5599  SourceLocation StartLoc,
5600  SourceLocation EndLoc) {
5601  if (!AStmt)
5602  return StmtError();
5603 
5604  assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
5605 
5606  setFunctionHasBranchProtectedScope();
5607 
5608  // OpenMP [2.7.3, single Construct, Restrictions]
5609  // The copyprivate clause must not be used with the nowait clause.
5610  const OMPClause *Nowait = nullptr;
5611  const OMPClause *Copyprivate = nullptr;
5612  for (const OMPClause *Clause : Clauses) {
5613  if (Clause->getClauseKind() == OMPC_nowait)
5614  Nowait = Clause;
5615  else if (Clause->getClauseKind() == OMPC_copyprivate)
5616  Copyprivate = Clause;
5617  if (Copyprivate && Nowait) {
5618  Diag(Copyprivate->getBeginLoc(),
5619  diag::err_omp_single_copyprivate_with_nowait);
5620  Diag(Nowait->getBeginLoc(), diag::note_omp_nowait_clause_here);
5621  return StmtError();
5622  }
5623  }
5624 
5625  return OMPSingleDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt);
5626 }
5627 
5629  SourceLocation StartLoc,
5630  SourceLocation EndLoc) {
5631  if (!AStmt)
5632  return StmtError();
5633 
5634  assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
5635 
5636  setFunctionHasBranchProtectedScope();
5637 
5638  return OMPMasterDirective::Create(Context, StartLoc, EndLoc, AStmt);
5639 }
5640 
5642  const DeclarationNameInfo &DirName, ArrayRef<OMPClause *> Clauses,
5643  Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc) {
5644  if (!AStmt)
5645  return StmtError();
5646 
5647  assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
5648 
5649  bool ErrorFound = false;
5650  llvm::APSInt Hint;
5651  SourceLocation HintLoc;
5652  bool DependentHint = false;
5653  for (const OMPClause *C : Clauses) {
5654  if (C->getClauseKind() == OMPC_hint) {
5655  if (!DirName.getName()) {
5656  Diag(C->getBeginLoc(), diag::err_omp_hint_clause_no_name);
5657  ErrorFound = true;
5658  }
5659  Expr *E = cast<OMPHintClause>(C)->getHint();
5660  if (E->isTypeDependent() || E->isValueDependent() ||
5661  E->isInstantiationDependent()) {
5662  DependentHint = true;
5663  } else {
5664  Hint = E->EvaluateKnownConstInt(Context);
5665  HintLoc = C->getBeginLoc();
5666  }
5667  }
5668  }
5669  if (ErrorFound)
5670  return StmtErr