clang  8.0.0svn
SemaOpenMP.cpp
Go to the documentation of this file.
1 //===--- SemaOpenMP.cpp - Semantic Analysis for OpenMP constructs ---------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 /// \file
10 /// This file implements semantic analysis for OpenMP directives and
11 /// clauses.
12 ///
13 //===----------------------------------------------------------------------===//
14 
15 #include "TreeTransform.h"
16 #include "clang/AST/ASTContext.h"
19 #include "clang/AST/Decl.h"
20 #include "clang/AST/DeclCXX.h"
21 #include "clang/AST/DeclOpenMP.h"
22 #include "clang/AST/StmtCXX.h"
23 #include "clang/AST/StmtOpenMP.h"
24 #include "clang/AST/StmtVisitor.h"
27 #include "clang/Sema/Lookup.h"
28 #include "clang/Sema/Scope.h"
29 #include "clang/Sema/ScopeInfo.h"
31 #include "llvm/ADT/PointerEmbeddedInt.h"
32 using namespace clang;
33 
34 //===----------------------------------------------------------------------===//
35 // Stack of data-sharing attributes for variables
36 //===----------------------------------------------------------------------===//
37 
39  Sema &SemaRef, Expr *E,
41  OpenMPClauseKind CKind, bool NoDiagnose);
42 
43 namespace {
44 /// Default data sharing attributes, which can be applied to directive.
46  DSA_unspecified = 0, /// Data sharing attribute not specified.
47  DSA_none = 1 << 0, /// Default data sharing attribute 'none'.
48  DSA_shared = 1 << 1, /// Default data sharing attribute 'shared'.
49 };
50 
51 /// Attributes of the defaultmap clause.
53  DMA_unspecified, /// Default mapping is not specified.
54  DMA_tofrom_scalar, /// Default mapping is 'tofrom:scalar'.
55 };
56 
57 /// Stack for tracking declarations used in OpenMP directives and
58 /// clauses and their data-sharing attributes.
59 class DSAStackTy {
60 public:
61  struct DSAVarData {
64  const Expr *RefExpr = nullptr;
65  DeclRefExpr *PrivateCopy = nullptr;
66  SourceLocation ImplicitDSALoc;
67  DSAVarData() = default;
68  DSAVarData(OpenMPDirectiveKind DKind, OpenMPClauseKind CKind,
69  const Expr *RefExpr, DeclRefExpr *PrivateCopy,
70  SourceLocation ImplicitDSALoc)
71  : DKind(DKind), CKind(CKind), RefExpr(RefExpr),
72  PrivateCopy(PrivateCopy), ImplicitDSALoc(ImplicitDSALoc) {}
73  };
74  using OperatorOffsetTy =
76  using DoacrossDependMapTy =
77  llvm::DenseMap<OMPDependClause *, OperatorOffsetTy>;
78 
79 private:
80  struct DSAInfo {
81  OpenMPClauseKind Attributes = OMPC_unknown;
82  /// Pointer to a reference expression and a flag which shows that the
83  /// variable is marked as lastprivate(true) or not (false).
84  llvm::PointerIntPair<const Expr *, 1, bool> RefExpr;
85  DeclRefExpr *PrivateCopy = nullptr;
86  };
87  using DeclSAMapTy = llvm::SmallDenseMap<const ValueDecl *, DSAInfo, 8>;
88  using AlignedMapTy = llvm::SmallDenseMap<const ValueDecl *, const Expr *, 8>;
89  using LCDeclInfo = std::pair<unsigned, VarDecl *>;
90  using LoopControlVariablesMapTy =
91  llvm::SmallDenseMap<const ValueDecl *, LCDeclInfo, 8>;
92  /// Struct that associates a component with the clause kind where they are
93  /// found.
94  struct MappedExprComponentTy {
97  };
98  using MappedExprComponentsTy =
99  llvm::DenseMap<const ValueDecl *, MappedExprComponentTy>;
100  using CriticalsWithHintsTy =
101  llvm::StringMap<std::pair<const OMPCriticalDirective *, llvm::APSInt>>;
102  struct ReductionData {
103  using BOKPtrType = llvm::PointerEmbeddedInt<BinaryOperatorKind, 16>;
104  SourceRange ReductionRange;
105  llvm::PointerUnion<const Expr *, BOKPtrType> ReductionOp;
106  ReductionData() = default;
107  void set(BinaryOperatorKind BO, SourceRange RR) {
108  ReductionRange = RR;
109  ReductionOp = BO;
110  }
111  void set(const Expr *RefExpr, SourceRange RR) {
112  ReductionRange = RR;
113  ReductionOp = RefExpr;
114  }
115  };
116  using DeclReductionMapTy =
117  llvm::SmallDenseMap<const ValueDecl *, ReductionData, 4>;
118 
119  struct SharingMapTy {
120  DeclSAMapTy SharingMap;
121  DeclReductionMapTy ReductionMap;
122  AlignedMapTy AlignedMap;
123  MappedExprComponentsTy MappedExprComponents;
124  LoopControlVariablesMapTy LCVMap;
125  DefaultDataSharingAttributes DefaultAttr = DSA_unspecified;
126  SourceLocation DefaultAttrLoc;
127  DefaultMapAttributes DefaultMapAttr = DMA_unspecified;
128  SourceLocation DefaultMapAttrLoc;
130  DeclarationNameInfo DirectiveName;
131  Scope *CurScope = nullptr;
132  SourceLocation ConstructLoc;
133  /// Set of 'depend' clauses with 'sink|source' dependence kind. Required to
134  /// get the data (loop counters etc.) about enclosing loop-based construct.
135  /// This data is required during codegen.
136  DoacrossDependMapTy DoacrossDepends;
137  /// first argument (Expr *) contains optional argument of the
138  /// 'ordered' clause, the second one is true if the regions has 'ordered'
139  /// clause, false otherwise.
141  unsigned AssociatedLoops = 1;
142  const Decl *PossiblyLoopCounter = nullptr;
143  bool NowaitRegion = false;
144  bool CancelRegion = false;
145  bool LoopStart = false;
146  SourceLocation InnerTeamsRegionLoc;
147  /// Reference to the taskgroup task_reduction reference expression.
148  Expr *TaskgroupReductionRef = nullptr;
149  SharingMapTy(OpenMPDirectiveKind DKind, DeclarationNameInfo Name,
150  Scope *CurScope, SourceLocation Loc)
151  : Directive(DKind), DirectiveName(Name), CurScope(CurScope),
152  ConstructLoc(Loc) {}
153  SharingMapTy() = default;
154  };
155 
156  using StackTy = SmallVector<SharingMapTy, 4>;
157 
158  /// Stack of used declaration and their data-sharing attributes.
159  DeclSAMapTy Threadprivates;
160  const FunctionScopeInfo *CurrentNonCapturingFunctionScope = nullptr;
162  /// true, if check for DSA must be from parent directive, false, if
163  /// from current directive.
164  OpenMPClauseKind ClauseKindMode = OMPC_unknown;
165  Sema &SemaRef;
166  bool ForceCapturing = false;
167  /// true if all the vaiables in the target executable directives must be
168  /// captured by reference.
169  bool ForceCaptureByReferenceInTargetExecutable = false;
170  CriticalsWithHintsTy Criticals;
171 
172  using iterator = StackTy::const_reverse_iterator;
173 
174  DSAVarData getDSA(iterator &Iter, ValueDecl *D) const;
175 
176  /// Checks if the variable is a local for OpenMP region.
177  bool isOpenMPLocal(VarDecl *D, iterator Iter) const;
178 
179  bool isStackEmpty() const {
180  return Stack.empty() ||
181  Stack.back().second != CurrentNonCapturingFunctionScope ||
182  Stack.back().first.empty();
183  }
184 
185  /// Vector of previously declared requires directives
187 
188 public:
189  explicit DSAStackTy(Sema &S) : SemaRef(S) {}
190 
191  bool isClauseParsingMode() const { return ClauseKindMode != OMPC_unknown; }
192  OpenMPClauseKind getClauseParsingMode() const {
193  assert(isClauseParsingMode() && "Must be in clause parsing mode.");
194  return ClauseKindMode;
195  }
196  void setClauseParsingMode(OpenMPClauseKind K) { ClauseKindMode = K; }
197 
198  bool isForceVarCapturing() const { return ForceCapturing; }
199  void setForceVarCapturing(bool V) { ForceCapturing = V; }
200 
201  void setForceCaptureByReferenceInTargetExecutable(bool V) {
202  ForceCaptureByReferenceInTargetExecutable = V;
203  }
204  bool isForceCaptureByReferenceInTargetExecutable() const {
205  return ForceCaptureByReferenceInTargetExecutable;
206  }
207 
208  void push(OpenMPDirectiveKind DKind, const DeclarationNameInfo &DirName,
209  Scope *CurScope, SourceLocation Loc) {
210  if (Stack.empty() ||
211  Stack.back().second != CurrentNonCapturingFunctionScope)
212  Stack.emplace_back(StackTy(), CurrentNonCapturingFunctionScope);
213  Stack.back().first.emplace_back(DKind, DirName, CurScope, Loc);
214  Stack.back().first.back().DefaultAttrLoc = Loc;
215  }
216 
217  void pop() {
218  assert(!Stack.back().first.empty() &&
219  "Data-sharing attributes stack is empty!");
220  Stack.back().first.pop_back();
221  }
222 
223  /// Marks that we're started loop parsing.
224  void loopInit() {
225  assert(isOpenMPLoopDirective(getCurrentDirective()) &&
226  "Expected loop-based directive.");
227  Stack.back().first.back().LoopStart = true;
228  }
229  /// Start capturing of the variables in the loop context.
230  void loopStart() {
231  assert(isOpenMPLoopDirective(getCurrentDirective()) &&
232  "Expected loop-based directive.");
233  Stack.back().first.back().LoopStart = false;
234  }
235  /// true, if variables are captured, false otherwise.
236  bool isLoopStarted() const {
237  assert(isOpenMPLoopDirective(getCurrentDirective()) &&
238  "Expected loop-based directive.");
239  return !Stack.back().first.back().LoopStart;
240  }
241  /// Marks (or clears) declaration as possibly loop counter.
242  void resetPossibleLoopCounter(const Decl *D = nullptr) {
243  Stack.back().first.back().PossiblyLoopCounter =
244  D ? D->getCanonicalDecl() : D;
245  }
246  /// Gets the possible loop counter decl.
247  const Decl *getPossiblyLoopCunter() const {
248  return Stack.back().first.back().PossiblyLoopCounter;
249  }
250  /// Start new OpenMP region stack in new non-capturing function.
251  void pushFunction() {
252  const FunctionScopeInfo *CurFnScope = SemaRef.getCurFunction();
253  assert(!isa<CapturingScopeInfo>(CurFnScope));
254  CurrentNonCapturingFunctionScope = CurFnScope;
255  }
256  /// Pop region stack for non-capturing function.
257  void popFunction(const FunctionScopeInfo *OldFSI) {
258  if (!Stack.empty() && Stack.back().second == OldFSI) {
259  assert(Stack.back().first.empty());
260  Stack.pop_back();
261  }
262  CurrentNonCapturingFunctionScope = nullptr;
263  for (const FunctionScopeInfo *FSI : llvm::reverse(SemaRef.FunctionScopes)) {
264  if (!isa<CapturingScopeInfo>(FSI)) {
265  CurrentNonCapturingFunctionScope = FSI;
266  break;
267  }
268  }
269  }
270 
271  void addCriticalWithHint(const OMPCriticalDirective *D, llvm::APSInt Hint) {
272  Criticals.try_emplace(D->getDirectiveName().getAsString(), D, Hint);
273  }
274  const std::pair<const OMPCriticalDirective *, llvm::APSInt>
275  getCriticalWithHint(const DeclarationNameInfo &Name) const {
276  auto I = Criticals.find(Name.getAsString());
277  if (I != Criticals.end())
278  return I->second;
279  return std::make_pair(nullptr, llvm::APSInt());
280  }
281  /// If 'aligned' declaration for given variable \a D was not seen yet,
282  /// add it and return NULL; otherwise return previous occurrence's expression
283  /// for diagnostics.
284  const Expr *addUniqueAligned(const ValueDecl *D, const Expr *NewDE);
285 
286  /// Register specified variable as loop control variable.
287  void addLoopControlVariable(const ValueDecl *D, VarDecl *Capture);
288  /// Check if the specified variable is a loop control variable for
289  /// current region.
290  /// \return The index of the loop control variable in the list of associated
291  /// for-loops (from outer to inner).
292  const LCDeclInfo isLoopControlVariable(const ValueDecl *D) const;
293  /// Check if the specified variable is a loop control variable for
294  /// parent region.
295  /// \return The index of the loop control variable in the list of associated
296  /// for-loops (from outer to inner).
297  const LCDeclInfo isParentLoopControlVariable(const ValueDecl *D) const;
298  /// Get the loop control variable for the I-th loop (or nullptr) in
299  /// parent directive.
300  const ValueDecl *getParentLoopControlVariable(unsigned I) const;
301 
302  /// Adds explicit data sharing attribute to the specified declaration.
303  void addDSA(const ValueDecl *D, const Expr *E, OpenMPClauseKind A,
304  DeclRefExpr *PrivateCopy = nullptr);
305 
306  /// Adds additional information for the reduction items with the reduction id
307  /// represented as an operator.
308  void addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
309  BinaryOperatorKind BOK);
310  /// Adds additional information for the reduction items with the reduction id
311  /// represented as reduction identifier.
312  void addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
313  const Expr *ReductionRef);
314  /// Returns the location and reduction operation from the innermost parent
315  /// region for the given \p D.
316  const DSAVarData
317  getTopMostTaskgroupReductionData(const ValueDecl *D, SourceRange &SR,
318  BinaryOperatorKind &BOK,
319  Expr *&TaskgroupDescriptor) const;
320  /// Returns the location and reduction operation from the innermost parent
321  /// region for the given \p D.
322  const DSAVarData
323  getTopMostTaskgroupReductionData(const ValueDecl *D, SourceRange &SR,
324  const Expr *&ReductionRef,
325  Expr *&TaskgroupDescriptor) const;
326  /// Return reduction reference expression for the current taskgroup.
327  Expr *getTaskgroupReductionRef() const {
328  assert(Stack.back().first.back().Directive == OMPD_taskgroup &&
329  "taskgroup reference expression requested for non taskgroup "
330  "directive.");
331  return Stack.back().first.back().TaskgroupReductionRef;
332  }
333  /// Checks if the given \p VD declaration is actually a taskgroup reduction
334  /// descriptor variable at the \p Level of OpenMP regions.
335  bool isTaskgroupReductionRef(const ValueDecl *VD, unsigned Level) const {
336  return Stack.back().first[Level].TaskgroupReductionRef &&
337  cast<DeclRefExpr>(Stack.back().first[Level].TaskgroupReductionRef)
338  ->getDecl() == VD;
339  }
340 
341  /// Returns data sharing attributes from top of the stack for the
342  /// specified declaration.
343  const DSAVarData getTopDSA(ValueDecl *D, bool FromParent);
344  /// Returns data-sharing attributes for the specified declaration.
345  const DSAVarData getImplicitDSA(ValueDecl *D, bool FromParent) const;
346  /// Checks if the specified variables has data-sharing attributes which
347  /// match specified \a CPred predicate in any directive which matches \a DPred
348  /// predicate.
349  const DSAVarData
350  hasDSA(ValueDecl *D, const llvm::function_ref<bool(OpenMPClauseKind)> CPred,
351  const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
352  bool FromParent) const;
353  /// Checks if the specified variables has data-sharing attributes which
354  /// match specified \a CPred predicate in any innermost directive which
355  /// matches \a DPred predicate.
356  const DSAVarData
357  hasInnermostDSA(ValueDecl *D,
358  const llvm::function_ref<bool(OpenMPClauseKind)> CPred,
359  const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
360  bool FromParent) const;
361  /// Checks if the specified variables has explicit data-sharing
362  /// attributes which match specified \a CPred predicate at the specified
363  /// OpenMP region.
364  bool hasExplicitDSA(const ValueDecl *D,
365  const llvm::function_ref<bool(OpenMPClauseKind)> CPred,
366  unsigned Level, bool NotLastprivate = false) const;
367 
368  /// Returns true if the directive at level \Level matches in the
369  /// specified \a DPred predicate.
370  bool hasExplicitDirective(
371  const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
372  unsigned Level) const;
373 
374  /// Finds a directive which matches specified \a DPred predicate.
375  bool hasDirective(
376  const llvm::function_ref<bool(
378  DPred,
379  bool FromParent) const;
380 
381  /// Returns currently analyzed directive.
382  OpenMPDirectiveKind getCurrentDirective() const {
383  return isStackEmpty() ? OMPD_unknown : Stack.back().first.back().Directive;
384  }
385  /// Returns directive kind at specified level.
386  OpenMPDirectiveKind getDirective(unsigned Level) const {
387  assert(!isStackEmpty() && "No directive at specified level.");
388  return Stack.back().first[Level].Directive;
389  }
390  /// Returns parent directive.
391  OpenMPDirectiveKind getParentDirective() const {
392  if (isStackEmpty() || Stack.back().first.size() == 1)
393  return OMPD_unknown;
394  return std::next(Stack.back().first.rbegin())->Directive;
395  }
396 
397  /// Add requires decl to internal vector
398  void addRequiresDecl(OMPRequiresDecl *RD) {
399  RequiresDecls.push_back(RD);
400  }
401 
402  /// Checks for a duplicate clause amongst previously declared requires
403  /// directives
404  bool hasDuplicateRequiresClause(ArrayRef<OMPClause *> ClauseList) const {
405  bool IsDuplicate = false;
406  for (OMPClause *CNew : ClauseList) {
407  for (const OMPRequiresDecl *D : RequiresDecls) {
408  for (const OMPClause *CPrev : D->clauselists()) {
409  if (CNew->getClauseKind() == CPrev->getClauseKind()) {
410  SemaRef.Diag(CNew->getBeginLoc(),
411  diag::err_omp_requires_clause_redeclaration)
412  << getOpenMPClauseName(CNew->getClauseKind());
413  SemaRef.Diag(CPrev->getBeginLoc(),
414  diag::note_omp_requires_previous_clause)
415  << getOpenMPClauseName(CPrev->getClauseKind());
416  IsDuplicate = true;
417  }
418  }
419  }
420  }
421  return IsDuplicate;
422  }
423 
424  /// Set default data sharing attribute to none.
425  void setDefaultDSANone(SourceLocation Loc) {
426  assert(!isStackEmpty());
427  Stack.back().first.back().DefaultAttr = DSA_none;
428  Stack.back().first.back().DefaultAttrLoc = Loc;
429  }
430  /// Set default data sharing attribute to shared.
431  void setDefaultDSAShared(SourceLocation Loc) {
432  assert(!isStackEmpty());
433  Stack.back().first.back().DefaultAttr = DSA_shared;
434  Stack.back().first.back().DefaultAttrLoc = Loc;
435  }
436  /// Set default data mapping attribute to 'tofrom:scalar'.
437  void setDefaultDMAToFromScalar(SourceLocation Loc) {
438  assert(!isStackEmpty());
439  Stack.back().first.back().DefaultMapAttr = DMA_tofrom_scalar;
440  Stack.back().first.back().DefaultMapAttrLoc = Loc;
441  }
442 
443  DefaultDataSharingAttributes getDefaultDSA() const {
444  return isStackEmpty() ? DSA_unspecified
445  : Stack.back().first.back().DefaultAttr;
446  }
447  SourceLocation getDefaultDSALocation() const {
448  return isStackEmpty() ? SourceLocation()
449  : Stack.back().first.back().DefaultAttrLoc;
450  }
451  DefaultMapAttributes getDefaultDMA() const {
452  return isStackEmpty() ? DMA_unspecified
453  : Stack.back().first.back().DefaultMapAttr;
454  }
455  DefaultMapAttributes getDefaultDMAAtLevel(unsigned Level) const {
456  return Stack.back().first[Level].DefaultMapAttr;
457  }
458  SourceLocation getDefaultDMALocation() const {
459  return isStackEmpty() ? SourceLocation()
460  : Stack.back().first.back().DefaultMapAttrLoc;
461  }
462 
463  /// Checks if the specified variable is a threadprivate.
464  bool isThreadPrivate(VarDecl *D) {
465  const DSAVarData DVar = getTopDSA(D, false);
466  return isOpenMPThreadPrivate(DVar.CKind);
467  }
468 
469  /// Marks current region as ordered (it has an 'ordered' clause).
470  void setOrderedRegion(bool IsOrdered, const Expr *Param,
471  OMPOrderedClause *Clause) {
472  assert(!isStackEmpty());
473  if (IsOrdered)
474  Stack.back().first.back().OrderedRegion.emplace(Param, Clause);
475  else
476  Stack.back().first.back().OrderedRegion.reset();
477  }
478  /// Returns true, if region is ordered (has associated 'ordered' clause),
479  /// false - otherwise.
480  bool isOrderedRegion() const {
481  if (isStackEmpty())
482  return false;
483  return Stack.back().first.rbegin()->OrderedRegion.hasValue();
484  }
485  /// Returns optional parameter for the ordered region.
486  std::pair<const Expr *, OMPOrderedClause *> getOrderedRegionParam() const {
487  if (isStackEmpty() ||
488  !Stack.back().first.rbegin()->OrderedRegion.hasValue())
489  return std::make_pair(nullptr, nullptr);
490  return Stack.back().first.rbegin()->OrderedRegion.getValue();
491  }
492  /// Returns true, if parent region is ordered (has associated
493  /// 'ordered' clause), false - otherwise.
494  bool isParentOrderedRegion() const {
495  if (isStackEmpty() || Stack.back().first.size() == 1)
496  return false;
497  return std::next(Stack.back().first.rbegin())->OrderedRegion.hasValue();
498  }
499  /// Returns optional parameter for the ordered region.
500  std::pair<const Expr *, OMPOrderedClause *>
501  getParentOrderedRegionParam() const {
502  if (isStackEmpty() || Stack.back().first.size() == 1 ||
503  !std::next(Stack.back().first.rbegin())->OrderedRegion.hasValue())
504  return std::make_pair(nullptr, nullptr);
505  return std::next(Stack.back().first.rbegin())->OrderedRegion.getValue();
506  }
507  /// Marks current region as nowait (it has a 'nowait' clause).
508  void setNowaitRegion(bool IsNowait = true) {
509  assert(!isStackEmpty());
510  Stack.back().first.back().NowaitRegion = IsNowait;
511  }
512  /// Returns true, if parent region is nowait (has associated
513  /// 'nowait' clause), false - otherwise.
514  bool isParentNowaitRegion() const {
515  if (isStackEmpty() || Stack.back().first.size() == 1)
516  return false;
517  return std::next(Stack.back().first.rbegin())->NowaitRegion;
518  }
519  /// Marks parent region as cancel region.
520  void setParentCancelRegion(bool Cancel = true) {
521  if (!isStackEmpty() && Stack.back().first.size() > 1) {
522  auto &StackElemRef = *std::next(Stack.back().first.rbegin());
523  StackElemRef.CancelRegion |= StackElemRef.CancelRegion || Cancel;
524  }
525  }
526  /// Return true if current region has inner cancel construct.
527  bool isCancelRegion() const {
528  return isStackEmpty() ? false : Stack.back().first.back().CancelRegion;
529  }
530 
531  /// Set collapse value for the region.
532  void setAssociatedLoops(unsigned Val) {
533  assert(!isStackEmpty());
534  Stack.back().first.back().AssociatedLoops = Val;
535  }
536  /// Return collapse value for region.
537  unsigned getAssociatedLoops() const {
538  return isStackEmpty() ? 0 : Stack.back().first.back().AssociatedLoops;
539  }
540 
541  /// Marks current target region as one with closely nested teams
542  /// region.
543  void setParentTeamsRegionLoc(SourceLocation TeamsRegionLoc) {
544  if (!isStackEmpty() && Stack.back().first.size() > 1) {
545  std::next(Stack.back().first.rbegin())->InnerTeamsRegionLoc =
546  TeamsRegionLoc;
547  }
548  }
549  /// Returns true, if current region has closely nested teams region.
550  bool hasInnerTeamsRegion() const {
551  return getInnerTeamsRegionLoc().isValid();
552  }
553  /// Returns location of the nested teams region (if any).
554  SourceLocation getInnerTeamsRegionLoc() const {
555  return isStackEmpty() ? SourceLocation()
556  : Stack.back().first.back().InnerTeamsRegionLoc;
557  }
558 
559  Scope *getCurScope() const {
560  return isStackEmpty() ? nullptr : Stack.back().first.back().CurScope;
561  }
562  SourceLocation getConstructLoc() const {
563  return isStackEmpty() ? SourceLocation()
564  : Stack.back().first.back().ConstructLoc;
565  }
566 
567  /// Do the check specified in \a Check to all component lists and return true
568  /// if any issue is found.
569  bool checkMappableExprComponentListsForDecl(
570  const ValueDecl *VD, bool CurrentRegionOnly,
571  const llvm::function_ref<
574  Check) const {
575  if (isStackEmpty())
576  return false;
577  auto SI = Stack.back().first.rbegin();
578  auto SE = Stack.back().first.rend();
579 
580  if (SI == SE)
581  return false;
582 
583  if (CurrentRegionOnly)
584  SE = std::next(SI);
585  else
586  std::advance(SI, 1);
587 
588  for (; SI != SE; ++SI) {
589  auto MI = SI->MappedExprComponents.find(VD);
590  if (MI != SI->MappedExprComponents.end())
592  MI->second.Components)
593  if (Check(L, MI->second.Kind))
594  return true;
595  }
596  return false;
597  }
598 
599  /// Do the check specified in \a Check to all component lists at a given level
600  /// and return true if any issue is found.
601  bool checkMappableExprComponentListsForDeclAtLevel(
602  const ValueDecl *VD, unsigned Level,
603  const llvm::function_ref<
606  Check) const {
607  if (isStackEmpty())
608  return false;
609 
610  auto StartI = Stack.back().first.begin();
611  auto EndI = Stack.back().first.end();
612  if (std::distance(StartI, EndI) <= (int)Level)
613  return false;
614  std::advance(StartI, Level);
615 
616  auto MI = StartI->MappedExprComponents.find(VD);
617  if (MI != StartI->MappedExprComponents.end())
619  MI->second.Components)
620  if (Check(L, MI->second.Kind))
621  return true;
622  return false;
623  }
624 
625  /// Create a new mappable expression component list associated with a given
626  /// declaration and initialize it with the provided list of components.
627  void addMappableExpressionComponents(
628  const ValueDecl *VD,
630  OpenMPClauseKind WhereFoundClauseKind) {
631  assert(!isStackEmpty() &&
632  "Not expecting to retrieve components from a empty stack!");
633  MappedExprComponentTy &MEC =
634  Stack.back().first.back().MappedExprComponents[VD];
635  // Create new entry and append the new components there.
636  MEC.Components.resize(MEC.Components.size() + 1);
637  MEC.Components.back().append(Components.begin(), Components.end());
638  MEC.Kind = WhereFoundClauseKind;
639  }
640 
641  unsigned getNestingLevel() const {
642  assert(!isStackEmpty());
643  return Stack.back().first.size() - 1;
644  }
645  void addDoacrossDependClause(OMPDependClause *C,
646  const OperatorOffsetTy &OpsOffs) {
647  assert(!isStackEmpty() && Stack.back().first.size() > 1);
648  SharingMapTy &StackElem = *std::next(Stack.back().first.rbegin());
649  assert(isOpenMPWorksharingDirective(StackElem.Directive));
650  StackElem.DoacrossDepends.try_emplace(C, OpsOffs);
651  }
652  llvm::iterator_range<DoacrossDependMapTy::const_iterator>
653  getDoacrossDependClauses() const {
654  assert(!isStackEmpty());
655  const SharingMapTy &StackElem = Stack.back().first.back();
656  if (isOpenMPWorksharingDirective(StackElem.Directive)) {
657  const DoacrossDependMapTy &Ref = StackElem.DoacrossDepends;
658  return llvm::make_range(Ref.begin(), Ref.end());
659  }
660  return llvm::make_range(StackElem.DoacrossDepends.end(),
661  StackElem.DoacrossDepends.end());
662  }
663 };
664 bool isParallelOrTaskRegion(OpenMPDirectiveKind DKind) {
665  return isOpenMPParallelDirective(DKind) || isOpenMPTaskingDirective(DKind) ||
666  isOpenMPTeamsDirective(DKind) || DKind == OMPD_unknown;
667 }
668 
669 } // namespace
670 
671 static const Expr *getExprAsWritten(const Expr *E) {
672  if (const auto *FE = dyn_cast<FullExpr>(E))
673  E = FE->getSubExpr();
674 
675  if (const auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E))
676  E = MTE->GetTemporaryExpr();
677 
678  while (const auto *Binder = dyn_cast<CXXBindTemporaryExpr>(E))
679  E = Binder->getSubExpr();
680 
681  if (const auto *ICE = dyn_cast<ImplicitCastExpr>(E))
682  E = ICE->getSubExprAsWritten();
683  return E->IgnoreParens();
684 }
685 
687  return const_cast<Expr *>(getExprAsWritten(const_cast<const Expr *>(E)));
688 }
689 
690 static const ValueDecl *getCanonicalDecl(const ValueDecl *D) {
691  if (const auto *CED = dyn_cast<OMPCapturedExprDecl>(D))
692  if (const auto *ME = dyn_cast<MemberExpr>(getExprAsWritten(CED->getInit())))
693  D = ME->getMemberDecl();
694  const auto *VD = dyn_cast<VarDecl>(D);
695  const auto *FD = dyn_cast<FieldDecl>(D);
696  if (VD != nullptr) {
697  VD = VD->getCanonicalDecl();
698  D = VD;
699  } else {
700  assert(FD);
701  FD = FD->getCanonicalDecl();
702  D = FD;
703  }
704  return D;
705 }
706 
708  return const_cast<ValueDecl *>(
709  getCanonicalDecl(const_cast<const ValueDecl *>(D)));
710 }
711 
712 DSAStackTy::DSAVarData DSAStackTy::getDSA(iterator &Iter,
713  ValueDecl *D) const {
714  D = getCanonicalDecl(D);
715  auto *VD = dyn_cast<VarDecl>(D);
716  const auto *FD = dyn_cast<FieldDecl>(D);
717  DSAVarData DVar;
718  if (isStackEmpty() || Iter == Stack.back().first.rend()) {
719  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
720  // in a region but not in construct]
721  // File-scope or namespace-scope variables referenced in called routines
722  // in the region are shared unless they appear in a threadprivate
723  // directive.
724  if (VD && !VD->isFunctionOrMethodVarDecl() && !isa<ParmVarDecl>(VD))
725  DVar.CKind = OMPC_shared;
726 
727  // OpenMP [2.9.1.2, Data-sharing Attribute Rules for Variables Referenced
728  // in a region but not in construct]
729  // Variables with static storage duration that are declared in called
730  // routines in the region are shared.
731  if (VD && VD->hasGlobalStorage())
732  DVar.CKind = OMPC_shared;
733 
734  // Non-static data members are shared by default.
735  if (FD)
736  DVar.CKind = OMPC_shared;
737 
738  return DVar;
739  }
740 
741  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
742  // in a Construct, C/C++, predetermined, p.1]
743  // Variables with automatic storage duration that are declared in a scope
744  // inside the construct are private.
745  if (VD && isOpenMPLocal(VD, Iter) && VD->isLocalVarDecl() &&
746  (VD->getStorageClass() == SC_Auto || VD->getStorageClass() == SC_None)) {
747  DVar.CKind = OMPC_private;
748  return DVar;
749  }
750 
751  DVar.DKind = Iter->Directive;
752  // Explicitly specified attributes and local variables with predetermined
753  // attributes.
754  if (Iter->SharingMap.count(D)) {
755  const DSAInfo &Data = Iter->SharingMap.lookup(D);
756  DVar.RefExpr = Data.RefExpr.getPointer();
757  DVar.PrivateCopy = Data.PrivateCopy;
758  DVar.CKind = Data.Attributes;
759  DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
760  return DVar;
761  }
762 
763  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
764  // in a Construct, C/C++, implicitly determined, p.1]
765  // In a parallel or task construct, the data-sharing attributes of these
766  // variables are determined by the default clause, if present.
767  switch (Iter->DefaultAttr) {
768  case DSA_shared:
769  DVar.CKind = OMPC_shared;
770  DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
771  return DVar;
772  case DSA_none:
773  return DVar;
774  case DSA_unspecified:
775  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
776  // in a Construct, implicitly determined, p.2]
777  // In a parallel construct, if no default clause is present, these
778  // variables are shared.
779  DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
780  if (isOpenMPParallelDirective(DVar.DKind) ||
781  isOpenMPTeamsDirective(DVar.DKind)) {
782  DVar.CKind = OMPC_shared;
783  return DVar;
784  }
785 
786  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
787  // in a Construct, implicitly determined, p.4]
788  // In a task construct, if no default clause is present, a variable that in
789  // the enclosing context is determined to be shared by all implicit tasks
790  // bound to the current team is shared.
791  if (isOpenMPTaskingDirective(DVar.DKind)) {
792  DSAVarData DVarTemp;
793  iterator I = Iter, E = Stack.back().first.rend();
794  do {
795  ++I;
796  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables
797  // Referenced in a Construct, implicitly determined, p.6]
798  // In a task construct, if no default clause is present, a variable
799  // whose data-sharing attribute is not determined by the rules above is
800  // firstprivate.
801  DVarTemp = getDSA(I, D);
802  if (DVarTemp.CKind != OMPC_shared) {
803  DVar.RefExpr = nullptr;
804  DVar.CKind = OMPC_firstprivate;
805  return DVar;
806  }
807  } while (I != E && !isParallelOrTaskRegion(I->Directive));
808  DVar.CKind =
809  (DVarTemp.CKind == OMPC_unknown) ? OMPC_firstprivate : OMPC_shared;
810  return DVar;
811  }
812  }
813  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
814  // in a Construct, implicitly determined, p.3]
815  // For constructs other than task, if no default clause is present, these
816  // variables inherit their data-sharing attributes from the enclosing
817  // context.
818  return getDSA(++Iter, D);
819 }
820 
821 const Expr *DSAStackTy::addUniqueAligned(const ValueDecl *D,
822  const Expr *NewDE) {
823  assert(!isStackEmpty() && "Data sharing attributes stack is empty");
824  D = getCanonicalDecl(D);
825  SharingMapTy &StackElem = Stack.back().first.back();
826  auto It = StackElem.AlignedMap.find(D);
827  if (It == StackElem.AlignedMap.end()) {
828  assert(NewDE && "Unexpected nullptr expr to be added into aligned map");
829  StackElem.AlignedMap[D] = NewDE;
830  return nullptr;
831  }
832  assert(It->second && "Unexpected nullptr expr in the aligned map");
833  return It->second;
834 }
835 
836 void DSAStackTy::addLoopControlVariable(const ValueDecl *D, VarDecl *Capture) {
837  assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
838  D = getCanonicalDecl(D);
839  SharingMapTy &StackElem = Stack.back().first.back();
840  StackElem.LCVMap.try_emplace(
841  D, LCDeclInfo(StackElem.LCVMap.size() + 1, Capture));
842 }
843 
844 const DSAStackTy::LCDeclInfo
845 DSAStackTy::isLoopControlVariable(const ValueDecl *D) const {
846  assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
847  D = getCanonicalDecl(D);
848  const SharingMapTy &StackElem = Stack.back().first.back();
849  auto It = StackElem.LCVMap.find(D);
850  if (It != StackElem.LCVMap.end())
851  return It->second;
852  return {0, nullptr};
853 }
854 
855 const DSAStackTy::LCDeclInfo
856 DSAStackTy::isParentLoopControlVariable(const ValueDecl *D) const {
857  assert(!isStackEmpty() && Stack.back().first.size() > 1 &&
858  "Data-sharing attributes stack is empty");
859  D = getCanonicalDecl(D);
860  const SharingMapTy &StackElem = *std::next(Stack.back().first.rbegin());
861  auto It = StackElem.LCVMap.find(D);
862  if (It != StackElem.LCVMap.end())
863  return It->second;
864  return {0, nullptr};
865 }
866 
867 const ValueDecl *DSAStackTy::getParentLoopControlVariable(unsigned I) const {
868  assert(!isStackEmpty() && Stack.back().first.size() > 1 &&
869  "Data-sharing attributes stack is empty");
870  const SharingMapTy &StackElem = *std::next(Stack.back().first.rbegin());
871  if (StackElem.LCVMap.size() < I)
872  return nullptr;
873  for (const auto &Pair : StackElem.LCVMap)
874  if (Pair.second.first == I)
875  return Pair.first;
876  return nullptr;
877 }
878 
879 void DSAStackTy::addDSA(const ValueDecl *D, const Expr *E, OpenMPClauseKind A,
880  DeclRefExpr *PrivateCopy) {
881  D = getCanonicalDecl(D);
882  if (A == OMPC_threadprivate) {
883  DSAInfo &Data = Threadprivates[D];
884  Data.Attributes = A;
885  Data.RefExpr.setPointer(E);
886  Data.PrivateCopy = nullptr;
887  } else {
888  assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
889  DSAInfo &Data = Stack.back().first.back().SharingMap[D];
890  assert(Data.Attributes == OMPC_unknown || (A == Data.Attributes) ||
891  (A == OMPC_firstprivate && Data.Attributes == OMPC_lastprivate) ||
892  (A == OMPC_lastprivate && Data.Attributes == OMPC_firstprivate) ||
893  (isLoopControlVariable(D).first && A == OMPC_private));
894  if (A == OMPC_lastprivate && Data.Attributes == OMPC_firstprivate) {
895  Data.RefExpr.setInt(/*IntVal=*/true);
896  return;
897  }
898  const bool IsLastprivate =
899  A == OMPC_lastprivate || Data.Attributes == OMPC_lastprivate;
900  Data.Attributes = A;
901  Data.RefExpr.setPointerAndInt(E, IsLastprivate);
902  Data.PrivateCopy = PrivateCopy;
903  if (PrivateCopy) {
904  DSAInfo &Data =
905  Stack.back().first.back().SharingMap[PrivateCopy->getDecl()];
906  Data.Attributes = A;
907  Data.RefExpr.setPointerAndInt(PrivateCopy, IsLastprivate);
908  Data.PrivateCopy = nullptr;
909  }
910  }
911 }
912 
913 /// Build a variable declaration for OpenMP loop iteration variable.
915  StringRef Name, const AttrVec *Attrs = nullptr,
916  DeclRefExpr *OrigRef = nullptr) {
917  DeclContext *DC = SemaRef.CurContext;
918  IdentifierInfo *II = &SemaRef.PP.getIdentifierTable().get(Name);
919  TypeSourceInfo *TInfo = SemaRef.Context.getTrivialTypeSourceInfo(Type, Loc);
920  auto *Decl =
921  VarDecl::Create(SemaRef.Context, DC, Loc, Loc, II, Type, TInfo, SC_None);
922  if (Attrs) {
923  for (specific_attr_iterator<AlignedAttr> I(Attrs->begin()), E(Attrs->end());
924  I != E; ++I)
925  Decl->addAttr(*I);
926  }
927  Decl->setImplicit();
928  if (OrigRef) {
929  Decl->addAttr(
930  OMPReferencedVarAttr::CreateImplicit(SemaRef.Context, OrigRef));
931  }
932  return Decl;
933 }
934 
936  SourceLocation Loc,
937  bool RefersToCapture = false) {
938  D->setReferenced();
939  D->markUsed(S.Context);
941  SourceLocation(), D, RefersToCapture, Loc, Ty,
942  VK_LValue);
943 }
944 
945 void DSAStackTy::addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
946  BinaryOperatorKind BOK) {
947  D = getCanonicalDecl(D);
948  assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
949  assert(
950  Stack.back().first.back().SharingMap[D].Attributes == OMPC_reduction &&
951  "Additional reduction info may be specified only for reduction items.");
952  ReductionData &ReductionData = Stack.back().first.back().ReductionMap[D];
953  assert(ReductionData.ReductionRange.isInvalid() &&
954  Stack.back().first.back().Directive == OMPD_taskgroup &&
955  "Additional reduction info may be specified only once for reduction "
956  "items.");
957  ReductionData.set(BOK, SR);
958  Expr *&TaskgroupReductionRef =
959  Stack.back().first.back().TaskgroupReductionRef;
960  if (!TaskgroupReductionRef) {
961  VarDecl *VD = buildVarDecl(SemaRef, SR.getBegin(),
962  SemaRef.Context.VoidPtrTy, ".task_red.");
963  TaskgroupReductionRef =
964  buildDeclRefExpr(SemaRef, VD, SemaRef.Context.VoidPtrTy, SR.getBegin());
965  }
966 }
967 
968 void DSAStackTy::addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
969  const Expr *ReductionRef) {
970  D = getCanonicalDecl(D);
971  assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
972  assert(
973  Stack.back().first.back().SharingMap[D].Attributes == OMPC_reduction &&
974  "Additional reduction info may be specified only for reduction items.");
975  ReductionData &ReductionData = Stack.back().first.back().ReductionMap[D];
976  assert(ReductionData.ReductionRange.isInvalid() &&
977  Stack.back().first.back().Directive == OMPD_taskgroup &&
978  "Additional reduction info may be specified only once for reduction "
979  "items.");
980  ReductionData.set(ReductionRef, SR);
981  Expr *&TaskgroupReductionRef =
982  Stack.back().first.back().TaskgroupReductionRef;
983  if (!TaskgroupReductionRef) {
984  VarDecl *VD = buildVarDecl(SemaRef, SR.getBegin(),
985  SemaRef.Context.VoidPtrTy, ".task_red.");
986  TaskgroupReductionRef =
987  buildDeclRefExpr(SemaRef, VD, SemaRef.Context.VoidPtrTy, SR.getBegin());
988  }
989 }
990 
991 const DSAStackTy::DSAVarData DSAStackTy::getTopMostTaskgroupReductionData(
992  const ValueDecl *D, SourceRange &SR, BinaryOperatorKind &BOK,
993  Expr *&TaskgroupDescriptor) const {
994  D = getCanonicalDecl(D);
995  assert(!isStackEmpty() && "Data-sharing attributes stack is empty.");
996  if (Stack.back().first.empty())
997  return DSAVarData();
998  for (iterator I = std::next(Stack.back().first.rbegin(), 1),
999  E = Stack.back().first.rend();
1000  I != E; std::advance(I, 1)) {
1001  const DSAInfo &Data = I->SharingMap.lookup(D);
1002  if (Data.Attributes != OMPC_reduction || I->Directive != OMPD_taskgroup)
1003  continue;
1004  const ReductionData &ReductionData = I->ReductionMap.lookup(D);
1005  if (!ReductionData.ReductionOp ||
1006  ReductionData.ReductionOp.is<const Expr *>())
1007  return DSAVarData();
1008  SR = ReductionData.ReductionRange;
1009  BOK = ReductionData.ReductionOp.get<ReductionData::BOKPtrType>();
1010  assert(I->TaskgroupReductionRef && "taskgroup reduction reference "
1011  "expression for the descriptor is not "
1012  "set.");
1013  TaskgroupDescriptor = I->TaskgroupReductionRef;
1014  return DSAVarData(OMPD_taskgroup, OMPC_reduction, Data.RefExpr.getPointer(),
1015  Data.PrivateCopy, I->DefaultAttrLoc);
1016  }
1017  return DSAVarData();
1018 }
1019 
1020 const DSAStackTy::DSAVarData DSAStackTy::getTopMostTaskgroupReductionData(
1021  const ValueDecl *D, SourceRange &SR, const Expr *&ReductionRef,
1022  Expr *&TaskgroupDescriptor) const {
1023  D = getCanonicalDecl(D);
1024  assert(!isStackEmpty() && "Data-sharing attributes stack is empty.");
1025  if (Stack.back().first.empty())
1026  return DSAVarData();
1027  for (iterator I = std::next(Stack.back().first.rbegin(), 1),
1028  E = Stack.back().first.rend();
1029  I != E; std::advance(I, 1)) {
1030  const DSAInfo &Data = I->SharingMap.lookup(D);
1031  if (Data.Attributes != OMPC_reduction || I->Directive != OMPD_taskgroup)
1032  continue;
1033  const ReductionData &ReductionData = I->ReductionMap.lookup(D);
1034  if (!ReductionData.ReductionOp ||
1035  !ReductionData.ReductionOp.is<const Expr *>())
1036  return DSAVarData();
1037  SR = ReductionData.ReductionRange;
1038  ReductionRef = ReductionData.ReductionOp.get<const Expr *>();
1039  assert(I->TaskgroupReductionRef && "taskgroup reduction reference "
1040  "expression for the descriptor is not "
1041  "set.");
1042  TaskgroupDescriptor = I->TaskgroupReductionRef;
1043  return DSAVarData(OMPD_taskgroup, OMPC_reduction, Data.RefExpr.getPointer(),
1044  Data.PrivateCopy, I->DefaultAttrLoc);
1045  }
1046  return DSAVarData();
1047 }
1048 
1049 bool DSAStackTy::isOpenMPLocal(VarDecl *D, iterator Iter) const {
1050  D = D->getCanonicalDecl();
1051  if (!isStackEmpty()) {
1052  iterator I = Iter, E = Stack.back().first.rend();
1053  Scope *TopScope = nullptr;
1054  while (I != E && !isParallelOrTaskRegion(I->Directive) &&
1055  !isOpenMPTargetExecutionDirective(I->Directive))
1056  ++I;
1057  if (I == E)
1058  return false;
1059  TopScope = I->CurScope ? I->CurScope->getParent() : nullptr;
1060  Scope *CurScope = getCurScope();
1061  while (CurScope != TopScope && !CurScope->isDeclScope(D))
1062  CurScope = CurScope->getParent();
1063  return CurScope != TopScope;
1064  }
1065  return false;
1066 }
1067 
1068 const DSAStackTy::DSAVarData DSAStackTy::getTopDSA(ValueDecl *D,
1069  bool FromParent) {
1070  D = getCanonicalDecl(D);
1071  DSAVarData DVar;
1072 
1073  auto *VD = dyn_cast<VarDecl>(D);
1074  auto TI = Threadprivates.find(D);
1075  if (TI != Threadprivates.end()) {
1076  DVar.RefExpr = TI->getSecond().RefExpr.getPointer();
1077  DVar.CKind = OMPC_threadprivate;
1078  return DVar;
1079  }
1080  if (VD && VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
1081  DVar.RefExpr = buildDeclRefExpr(
1082  SemaRef, VD, D->getType().getNonReferenceType(),
1083  VD->getAttr<OMPThreadPrivateDeclAttr>()->getLocation());
1084  DVar.CKind = OMPC_threadprivate;
1085  addDSA(D, DVar.RefExpr, OMPC_threadprivate);
1086  return DVar;
1087  }
1088  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1089  // in a Construct, C/C++, predetermined, p.1]
1090  // Variables appearing in threadprivate directives are threadprivate.
1091  if ((VD && VD->getTLSKind() != VarDecl::TLS_None &&
1092  !(VD->hasAttr<OMPThreadPrivateDeclAttr>() &&
1093  SemaRef.getLangOpts().OpenMPUseTLS &&
1094  SemaRef.getASTContext().getTargetInfo().isTLSSupported())) ||
1095  (VD && VD->getStorageClass() == SC_Register &&
1096  VD->hasAttr<AsmLabelAttr>() && !VD->isLocalVarDecl())) {
1097  DVar.RefExpr = buildDeclRefExpr(
1098  SemaRef, VD, D->getType().getNonReferenceType(), D->getLocation());
1099  DVar.CKind = OMPC_threadprivate;
1100  addDSA(D, DVar.RefExpr, OMPC_threadprivate);
1101  return DVar;
1102  }
1103  if (SemaRef.getLangOpts().OpenMPCUDAMode && VD &&
1104  VD->isLocalVarDeclOrParm() && !isStackEmpty() &&
1105  !isLoopControlVariable(D).first) {
1106  iterator IterTarget =
1107  std::find_if(Stack.back().first.rbegin(), Stack.back().first.rend(),
1108  [](const SharingMapTy &Data) {
1109  return isOpenMPTargetExecutionDirective(Data.Directive);
1110  });
1111  if (IterTarget != Stack.back().first.rend()) {
1112  iterator ParentIterTarget = std::next(IterTarget, 1);
1113  for (iterator Iter = Stack.back().first.rbegin();
1114  Iter != ParentIterTarget; std::advance(Iter, 1)) {
1115  if (isOpenMPLocal(VD, Iter)) {
1116  DVar.RefExpr =
1117  buildDeclRefExpr(SemaRef, VD, D->getType().getNonReferenceType(),
1118  D->getLocation());
1119  DVar.CKind = OMPC_threadprivate;
1120  return DVar;
1121  }
1122  }
1123  if (!isClauseParsingMode() || IterTarget != Stack.back().first.rbegin()) {
1124  auto DSAIter = IterTarget->SharingMap.find(D);
1125  if (DSAIter != IterTarget->SharingMap.end() &&
1126  isOpenMPPrivate(DSAIter->getSecond().Attributes)) {
1127  DVar.RefExpr = DSAIter->getSecond().RefExpr.getPointer();
1128  DVar.CKind = OMPC_threadprivate;
1129  return DVar;
1130  }
1131  iterator End = Stack.back().first.rend();
1132  if (!SemaRef.isOpenMPCapturedByRef(
1133  D, std::distance(ParentIterTarget, End))) {
1134  DVar.RefExpr =
1135  buildDeclRefExpr(SemaRef, VD, D->getType().getNonReferenceType(),
1136  IterTarget->ConstructLoc);
1137  DVar.CKind = OMPC_threadprivate;
1138  return DVar;
1139  }
1140  }
1141  }
1142  }
1143 
1144  if (isStackEmpty())
1145  // Not in OpenMP execution region and top scope was already checked.
1146  return DVar;
1147 
1148  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1149  // in a Construct, C/C++, predetermined, p.4]
1150  // Static data members are shared.
1151  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1152  // in a Construct, C/C++, predetermined, p.7]
1153  // Variables with static storage duration that are declared in a scope
1154  // inside the construct are shared.
1155  auto &&MatchesAlways = [](OpenMPDirectiveKind) { return true; };
1156  if (VD && VD->isStaticDataMember()) {
1157  DSAVarData DVarTemp = hasDSA(D, isOpenMPPrivate, MatchesAlways, FromParent);
1158  if (DVarTemp.CKind != OMPC_unknown && DVarTemp.RefExpr)
1159  return DVar;
1160 
1161  DVar.CKind = OMPC_shared;
1162  return DVar;
1163  }
1164 
1166  bool IsConstant = Type.isConstant(SemaRef.getASTContext());
1167  Type = SemaRef.getASTContext().getBaseElementType(Type);
1168  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1169  // in a Construct, C/C++, predetermined, p.6]
1170  // Variables with const qualified type having no mutable member are
1171  // shared.
1172  const CXXRecordDecl *RD =
1173  SemaRef.getLangOpts().CPlusPlus ? Type->getAsCXXRecordDecl() : nullptr;
1174  if (const auto *CTSD = dyn_cast_or_null<ClassTemplateSpecializationDecl>(RD))
1175  if (const ClassTemplateDecl *CTD = CTSD->getSpecializedTemplate())
1176  RD = CTD->getTemplatedDecl();
1177  if (IsConstant &&
1178  !(SemaRef.getLangOpts().CPlusPlus && RD && RD->hasDefinition() &&
1179  RD->hasMutableFields())) {
1180  // Variables with const-qualified type having no mutable member may be
1181  // listed in a firstprivate clause, even if they are static data members.
1182  DSAVarData DVarTemp =
1183  hasDSA(D, [](OpenMPClauseKind C) { return C == OMPC_firstprivate; },
1184  MatchesAlways, FromParent);
1185  if (DVarTemp.CKind == OMPC_firstprivate && DVarTemp.RefExpr)
1186  return DVarTemp;
1187 
1188  DVar.CKind = OMPC_shared;
1189  return DVar;
1190  }
1191 
1192  // Explicitly specified attributes and local variables with predetermined
1193  // attributes.
1194  iterator I = Stack.back().first.rbegin();
1195  iterator EndI = Stack.back().first.rend();
1196  if (FromParent && I != EndI)
1197  std::advance(I, 1);
1198  auto It = I->SharingMap.find(D);
1199  if (It != I->SharingMap.end()) {
1200  const DSAInfo &Data = It->getSecond();
1201  DVar.RefExpr = Data.RefExpr.getPointer();
1202  DVar.PrivateCopy = Data.PrivateCopy;
1203  DVar.CKind = Data.Attributes;
1204  DVar.ImplicitDSALoc = I->DefaultAttrLoc;
1205  DVar.DKind = I->Directive;
1206  }
1207 
1208  return DVar;
1209 }
1210 
1211 const DSAStackTy::DSAVarData DSAStackTy::getImplicitDSA(ValueDecl *D,
1212  bool FromParent) const {
1213  if (isStackEmpty()) {
1214  iterator I;
1215  return getDSA(I, D);
1216  }
1217  D = getCanonicalDecl(D);
1218  iterator StartI = Stack.back().first.rbegin();
1219  iterator EndI = Stack.back().first.rend();
1220  if (FromParent && StartI != EndI)
1221  std::advance(StartI, 1);
1222  return getDSA(StartI, D);
1223 }
1224 
1225 const DSAStackTy::DSAVarData
1226 DSAStackTy::hasDSA(ValueDecl *D,
1227  const llvm::function_ref<bool(OpenMPClauseKind)> CPred,
1228  const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
1229  bool FromParent) const {
1230  if (isStackEmpty())
1231  return {};
1232  D = getCanonicalDecl(D);
1233  iterator I = Stack.back().first.rbegin();
1234  iterator EndI = Stack.back().first.rend();
1235  if (FromParent && I != EndI)
1236  std::advance(I, 1);
1237  for (; I != EndI; std::advance(I, 1)) {
1238  if (!DPred(I->Directive) && !isParallelOrTaskRegion(I->Directive))
1239  continue;
1240  iterator NewI = I;
1241  DSAVarData DVar = getDSA(NewI, D);
1242  if (I == NewI && CPred(DVar.CKind))
1243  return DVar;
1244  }
1245  return {};
1246 }
1247 
1248 const DSAStackTy::DSAVarData DSAStackTy::hasInnermostDSA(
1249  ValueDecl *D, const llvm::function_ref<bool(OpenMPClauseKind)> CPred,
1250  const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
1251  bool FromParent) const {
1252  if (isStackEmpty())
1253  return {};
1254  D = getCanonicalDecl(D);
1255  iterator StartI = Stack.back().first.rbegin();
1256  iterator EndI = Stack.back().first.rend();
1257  if (FromParent && StartI != EndI)
1258  std::advance(StartI, 1);
1259  if (StartI == EndI || !DPred(StartI->Directive))
1260  return {};
1261  iterator NewI = StartI;
1262  DSAVarData DVar = getDSA(NewI, D);
1263  return (NewI == StartI && CPred(DVar.CKind)) ? DVar : DSAVarData();
1264 }
1265 
1266 bool DSAStackTy::hasExplicitDSA(
1267  const ValueDecl *D, const llvm::function_ref<bool(OpenMPClauseKind)> CPred,
1268  unsigned Level, bool NotLastprivate) const {
1269  if (isStackEmpty())
1270  return false;
1271  D = getCanonicalDecl(D);
1272  auto StartI = Stack.back().first.begin();
1273  auto EndI = Stack.back().first.end();
1274  if (std::distance(StartI, EndI) <= (int)Level)
1275  return false;
1276  std::advance(StartI, Level);
1277  auto I = StartI->SharingMap.find(D);
1278  if ((I != StartI->SharingMap.end()) &&
1279  I->getSecond().RefExpr.getPointer() &&
1280  CPred(I->getSecond().Attributes) &&
1281  (!NotLastprivate || !I->getSecond().RefExpr.getInt()))
1282  return true;
1283  // Check predetermined rules for the loop control variables.
1284  auto LI = StartI->LCVMap.find(D);
1285  if (LI != StartI->LCVMap.end())
1286  return CPred(OMPC_private);
1287  return false;
1288 }
1289 
1290 bool DSAStackTy::hasExplicitDirective(
1291  const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
1292  unsigned Level) const {
1293  if (isStackEmpty())
1294  return false;
1295  auto StartI = Stack.back().first.begin();
1296  auto EndI = Stack.back().first.end();
1297  if (std::distance(StartI, EndI) <= (int)Level)
1298  return false;
1299  std::advance(StartI, Level);
1300  return DPred(StartI->Directive);
1301 }
1302 
1303 bool DSAStackTy::hasDirective(
1304  const llvm::function_ref<bool(OpenMPDirectiveKind,
1306  DPred,
1307  bool FromParent) const {
1308  // We look only in the enclosing region.
1309  if (isStackEmpty())
1310  return false;
1311  auto StartI = std::next(Stack.back().first.rbegin());
1312  auto EndI = Stack.back().first.rend();
1313  if (FromParent && StartI != EndI)
1314  StartI = std::next(StartI);
1315  for (auto I = StartI, EE = EndI; I != EE; ++I) {
1316  if (DPred(I->Directive, I->DirectiveName, I->ConstructLoc))
1317  return true;
1318  }
1319  return false;
1320 }
1321 
1322 void Sema::InitDataSharingAttributesStack() {
1323  VarDataSharingAttributesStack = new DSAStackTy(*this);
1324 }
1325 
1326 #define DSAStack static_cast<DSAStackTy *>(VarDataSharingAttributesStack)
1327 
1328 void Sema::pushOpenMPFunctionRegion() {
1329  DSAStack->pushFunction();
1330 }
1331 
1332 void Sema::popOpenMPFunctionRegion(const FunctionScopeInfo *OldFSI) {
1333  DSAStack->popFunction(OldFSI);
1334 }
1335 
1336 bool Sema::isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level) const {
1337  assert(LangOpts.OpenMP && "OpenMP is not allowed");
1338 
1339  ASTContext &Ctx = getASTContext();
1340  bool IsByRef = true;
1341 
1342  // Find the directive that is associated with the provided scope.
1343  D = cast<ValueDecl>(D->getCanonicalDecl());
1344  QualType Ty = D->getType();
1345 
1346  if (DSAStack->hasExplicitDirective(isOpenMPTargetExecutionDirective, Level)) {
1347  // This table summarizes how a given variable should be passed to the device
1348  // given its type and the clauses where it appears. This table is based on
1349  // the description in OpenMP 4.5 [2.10.4, target Construct] and
1350  // OpenMP 4.5 [2.15.5, Data-mapping Attribute Rules and Clauses].
1351  //
1352  // =========================================================================
1353  // | type | defaultmap | pvt | first | is_device_ptr | map | res. |
1354  // | |(tofrom:scalar)| | pvt | | | |
1355  // =========================================================================
1356  // | scl | | | | - | | bycopy|
1357  // | scl | | - | x | - | - | bycopy|
1358  // | scl | | x | - | - | - | null |
1359  // | scl | x | | | - | | byref |
1360  // | scl | x | - | x | - | - | bycopy|
1361  // | scl | x | x | - | - | - | null |
1362  // | scl | | - | - | - | x | byref |
1363  // | scl | x | - | - | - | x | byref |
1364  //
1365  // | agg | n.a. | | | - | | byref |
1366  // | agg | n.a. | - | x | - | - | byref |
1367  // | agg | n.a. | x | - | - | - | null |
1368  // | agg | n.a. | - | - | - | x | byref |
1369  // | agg | n.a. | - | - | - | x[] | byref |
1370  //
1371  // | ptr | n.a. | | | - | | bycopy|
1372  // | ptr | n.a. | - | x | - | - | bycopy|
1373  // | ptr | n.a. | x | - | - | - | null |
1374  // | ptr | n.a. | - | - | - | x | byref |
1375  // | ptr | n.a. | - | - | - | x[] | bycopy|
1376  // | ptr | n.a. | - | - | x | | bycopy|
1377  // | ptr | n.a. | - | - | x | x | bycopy|
1378  // | ptr | n.a. | - | - | x | x[] | bycopy|
1379  // =========================================================================
1380  // Legend:
1381  // scl - scalar
1382  // ptr - pointer
1383  // agg - aggregate
1384  // x - applies
1385  // - - invalid in this combination
1386  // [] - mapped with an array section
1387  // byref - should be mapped by reference
1388  // byval - should be mapped by value
1389  // null - initialize a local variable to null on the device
1390  //
1391  // Observations:
1392  // - All scalar declarations that show up in a map clause have to be passed
1393  // by reference, because they may have been mapped in the enclosing data
1394  // environment.
1395  // - If the scalar value does not fit the size of uintptr, it has to be
1396  // passed by reference, regardless the result in the table above.
1397  // - For pointers mapped by value that have either an implicit map or an
1398  // array section, the runtime library may pass the NULL value to the
1399  // device instead of the value passed to it by the compiler.
1400 
1401  if (Ty->isReferenceType())
1402  Ty = Ty->castAs<ReferenceType>()->getPointeeType();
1403 
1404  // Locate map clauses and see if the variable being captured is referred to
1405  // in any of those clauses. Here we only care about variables, not fields,
1406  // because fields are part of aggregates.
1407  bool IsVariableUsedInMapClause = false;
1408  bool IsVariableAssociatedWithSection = false;
1409 
1410  DSAStack->checkMappableExprComponentListsForDeclAtLevel(
1411  D, Level,
1412  [&IsVariableUsedInMapClause, &IsVariableAssociatedWithSection, D](
1414  MapExprComponents,
1415  OpenMPClauseKind WhereFoundClauseKind) {
1416  // Only the map clause information influences how a variable is
1417  // captured. E.g. is_device_ptr does not require changing the default
1418  // behavior.
1419  if (WhereFoundClauseKind != OMPC_map)
1420  return false;
1421 
1422  auto EI = MapExprComponents.rbegin();
1423  auto EE = MapExprComponents.rend();
1424 
1425  assert(EI != EE && "Invalid map expression!");
1426 
1427  if (isa<DeclRefExpr>(EI->getAssociatedExpression()))
1428  IsVariableUsedInMapClause |= EI->getAssociatedDeclaration() == D;
1429 
1430  ++EI;
1431  if (EI == EE)
1432  return false;
1433 
1434  if (isa<ArraySubscriptExpr>(EI->getAssociatedExpression()) ||
1435  isa<OMPArraySectionExpr>(EI->getAssociatedExpression()) ||
1436  isa<MemberExpr>(EI->getAssociatedExpression())) {
1437  IsVariableAssociatedWithSection = true;
1438  // There is nothing more we need to know about this variable.
1439  return true;
1440  }
1441 
1442  // Keep looking for more map info.
1443  return false;
1444  });
1445 
1446  if (IsVariableUsedInMapClause) {
1447  // If variable is identified in a map clause it is always captured by
1448  // reference except if it is a pointer that is dereferenced somehow.
1449  IsByRef = !(Ty->isPointerType() && IsVariableAssociatedWithSection);
1450  } else {
1451  // By default, all the data that has a scalar type is mapped by copy
1452  // (except for reduction variables).
1453  IsByRef =
1454  (DSAStack->isForceCaptureByReferenceInTargetExecutable() &&
1455  !Ty->isAnyPointerType()) ||
1456  !Ty->isScalarType() ||
1457  DSAStack->getDefaultDMAAtLevel(Level) == DMA_tofrom_scalar ||
1458  DSAStack->hasExplicitDSA(
1459  D, [](OpenMPClauseKind K) { return K == OMPC_reduction; }, Level);
1460  }
1461  }
1462 
1463  if (IsByRef && Ty.getNonReferenceType()->isScalarType()) {
1464  IsByRef =
1465  ((DSAStack->isForceCaptureByReferenceInTargetExecutable() &&
1466  !Ty->isAnyPointerType()) ||
1467  !DSAStack->hasExplicitDSA(
1468  D,
1469  [](OpenMPClauseKind K) -> bool { return K == OMPC_firstprivate; },
1470  Level, /*NotLastprivate=*/true)) &&
1471  // If the variable is artificial and must be captured by value - try to
1472  // capture by value.
1473  !(isa<OMPCapturedExprDecl>(D) && !D->hasAttr<OMPCaptureNoInitAttr>() &&
1474  !cast<OMPCapturedExprDecl>(D)->getInit()->isGLValue());
1475  }
1476 
1477  // When passing data by copy, we need to make sure it fits the uintptr size
1478  // and alignment, because the runtime library only deals with uintptr types.
1479  // If it does not fit the uintptr size, we need to pass the data by reference
1480  // instead.
1481  if (!IsByRef &&
1482  (Ctx.getTypeSizeInChars(Ty) >
1483  Ctx.getTypeSizeInChars(Ctx.getUIntPtrType()) ||
1484  Ctx.getDeclAlign(D) > Ctx.getTypeAlignInChars(Ctx.getUIntPtrType()))) {
1485  IsByRef = true;
1486  }
1487 
1488  return IsByRef;
1489 }
1490 
1491 unsigned Sema::getOpenMPNestingLevel() const {
1492  assert(getLangOpts().OpenMP);
1493  return DSAStack->getNestingLevel();
1494 }
1495 
1497  return (isOpenMPTargetExecutionDirective(DSAStack->getCurrentDirective()) &&
1498  !DSAStack->isClauseParsingMode()) ||
1499  DSAStack->hasDirective(
1501  SourceLocation) -> bool {
1502  return isOpenMPTargetExecutionDirective(K);
1503  },
1504  false);
1505 }
1506 
1508  assert(LangOpts.OpenMP && "OpenMP is not allowed");
1509  D = getCanonicalDecl(D);
1510 
1511  // If we are attempting to capture a global variable in a directive with
1512  // 'target' we return true so that this global is also mapped to the device.
1513  //
1514  auto *VD = dyn_cast<VarDecl>(D);
1515  if (VD && !VD->hasLocalStorage()) {
1516  if (isInOpenMPDeclareTargetContext() &&
1517  (getCurCapturedRegion() || getCurBlock() || getCurLambda())) {
1518  // Try to mark variable as declare target if it is used in capturing
1519  // regions.
1520  if (!OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
1521  checkDeclIsAllowedInOpenMPTarget(nullptr, VD);
1522  return nullptr;
1523  } else if (isInOpenMPTargetExecutionDirective()) {
1524  // If the declaration is enclosed in a 'declare target' directive,
1525  // then it should not be captured.
1526  //
1527  if (OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
1528  return nullptr;
1529  return VD;
1530  }
1531  }
1532  // Capture variables captured by reference in lambdas for target-based
1533  // directives.
1534  if (VD && !DSAStack->isClauseParsingMode()) {
1535  if (const auto *RD = VD->getType()
1536  .getCanonicalType()
1537  .getNonReferenceType()
1538  ->getAsCXXRecordDecl()) {
1539  bool SavedForceCaptureByReferenceInTargetExecutable =
1540  DSAStack->isForceCaptureByReferenceInTargetExecutable();
1541  DSAStack->setForceCaptureByReferenceInTargetExecutable(/*V=*/true);
1542  if (RD->isLambda()) {
1543  llvm::DenseMap<const VarDecl *, FieldDecl *> Captures;
1544  FieldDecl *ThisCapture;
1545  RD->getCaptureFields(Captures, ThisCapture);
1546  for (const LambdaCapture &LC : RD->captures()) {
1547  if (LC.getCaptureKind() == LCK_ByRef) {
1548  VarDecl *VD = LC.getCapturedVar();
1549  DeclContext *VDC = VD->getDeclContext();
1550  if (!VDC->Encloses(CurContext))
1551  continue;
1552  DSAStackTy::DSAVarData DVarPrivate =
1553  DSAStack->getTopDSA(VD, /*FromParent=*/false);
1554  // Do not capture already captured variables.
1555  if (!OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD) &&
1556  DVarPrivate.CKind == OMPC_unknown &&
1557  !DSAStack->checkMappableExprComponentListsForDecl(
1558  D, /*CurrentRegionOnly=*/true,
1560  MappableExprComponentListRef,
1561  OpenMPClauseKind) { return true; }))
1562  MarkVariableReferenced(LC.getLocation(), LC.getCapturedVar());
1563  } else if (LC.getCaptureKind() == LCK_This) {
1564  QualType ThisTy = getCurrentThisType();
1565  if (!ThisTy.isNull() &&
1566  Context.typesAreCompatible(ThisTy, ThisCapture->getType()))
1567  CheckCXXThisCapture(LC.getLocation());
1568  }
1569  }
1570  }
1571  DSAStack->setForceCaptureByReferenceInTargetExecutable(
1572  SavedForceCaptureByReferenceInTargetExecutable);
1573  }
1574  }
1575 
1576  if (DSAStack->getCurrentDirective() != OMPD_unknown &&
1577  (!DSAStack->isClauseParsingMode() ||
1578  DSAStack->getParentDirective() != OMPD_unknown)) {
1579  auto &&Info = DSAStack->isLoopControlVariable(D);
1580  if (Info.first ||
1581  (VD && VD->hasLocalStorage() &&
1582  isParallelOrTaskRegion(DSAStack->getCurrentDirective())) ||
1583  (VD && DSAStack->isForceVarCapturing()))
1584  return VD ? VD : Info.second;
1585  DSAStackTy::DSAVarData DVarPrivate =
1586  DSAStack->getTopDSA(D, DSAStack->isClauseParsingMode());
1587  if (DVarPrivate.CKind != OMPC_unknown && isOpenMPPrivate(DVarPrivate.CKind))
1588  return VD ? VD : cast<VarDecl>(DVarPrivate.PrivateCopy->getDecl());
1589  DVarPrivate = DSAStack->hasDSA(D, isOpenMPPrivate,
1590  [](OpenMPDirectiveKind) { return true; },
1591  DSAStack->isClauseParsingMode());
1592  if (DVarPrivate.CKind != OMPC_unknown)
1593  return VD ? VD : cast<VarDecl>(DVarPrivate.PrivateCopy->getDecl());
1594  }
1595  return nullptr;
1596 }
1597 
1598 void Sema::adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
1599  unsigned Level) const {
1601  getOpenMPCaptureRegions(Regions, DSAStack->getDirective(Level));
1602  FunctionScopesIndex -= Regions.size();
1603 }
1604 
1606  assert(LangOpts.OpenMP && "OpenMP must be enabled.");
1607  if (isOpenMPLoopDirective(DSAStack->getCurrentDirective()))
1608  DSAStack->loopInit();
1609 }
1610 
1611 bool Sema::isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const {
1612  assert(LangOpts.OpenMP && "OpenMP is not allowed");
1613  if (isOpenMPLoopDirective(DSAStack->getCurrentDirective())) {
1614  if (DSAStack->getAssociatedLoops() > 0 &&
1615  !DSAStack->isLoopStarted()) {
1616  DSAStack->resetPossibleLoopCounter(D);
1617  DSAStack->loopStart();
1618  return true;
1619  }
1620  if ((DSAStack->getPossiblyLoopCunter() == D->getCanonicalDecl() ||
1621  DSAStack->isLoopControlVariable(D).first) &&
1622  !DSAStack->hasExplicitDSA(
1623  D, [](OpenMPClauseKind K) { return K != OMPC_private; }, Level) &&
1624  !isOpenMPSimdDirective(DSAStack->getCurrentDirective()))
1625  return true;
1626  }
1627  return DSAStack->hasExplicitDSA(
1628  D, [](OpenMPClauseKind K) { return K == OMPC_private; }, Level) ||
1629  (DSAStack->isClauseParsingMode() &&
1630  DSAStack->getClauseParsingMode() == OMPC_private) ||
1631  // Consider taskgroup reduction descriptor variable a private to avoid
1632  // possible capture in the region.
1633  (DSAStack->hasExplicitDirective(
1634  [](OpenMPDirectiveKind K) { return K == OMPD_taskgroup; },
1635  Level) &&
1636  DSAStack->isTaskgroupReductionRef(D, Level));
1637 }
1638 
1640  unsigned Level) {
1641  assert(LangOpts.OpenMP && "OpenMP is not allowed");
1642  D = getCanonicalDecl(D);
1644  for (unsigned I = DSAStack->getNestingLevel() + 1; I > Level; --I) {
1645  const unsigned NewLevel = I - 1;
1646  if (DSAStack->hasExplicitDSA(D,
1647  [&OMPC](const OpenMPClauseKind K) {
1648  if (isOpenMPPrivate(K)) {
1649  OMPC = K;
1650  return true;
1651  }
1652  return false;
1653  },
1654  NewLevel))
1655  break;
1656  if (DSAStack->checkMappableExprComponentListsForDeclAtLevel(
1657  D, NewLevel,
1659  OpenMPClauseKind) { return true; })) {
1660  OMPC = OMPC_map;
1661  break;
1662  }
1663  if (DSAStack->hasExplicitDirective(isOpenMPTargetExecutionDirective,
1664  NewLevel)) {
1665  OMPC = OMPC_map;
1666  if (D->getType()->isScalarType() &&
1667  DSAStack->getDefaultDMAAtLevel(NewLevel) !=
1668  DefaultMapAttributes::DMA_tofrom_scalar)
1669  OMPC = OMPC_firstprivate;
1670  break;
1671  }
1672  }
1673  if (OMPC != OMPC_unknown)
1674  FD->addAttr(OMPCaptureKindAttr::CreateImplicit(Context, OMPC));
1675 }
1676 
1678  unsigned Level) const {
1679  assert(LangOpts.OpenMP && "OpenMP is not allowed");
1680  // Return true if the current level is no longer enclosed in a target region.
1681 
1682  const auto *VD = dyn_cast<VarDecl>(D);
1683  return VD && !VD->hasLocalStorage() &&
1684  DSAStack->hasExplicitDirective(isOpenMPTargetExecutionDirective,
1685  Level);
1686 }
1687 
1688 void Sema::DestroyDataSharingAttributesStack() { delete DSAStack; }
1689 
1691  const DeclarationNameInfo &DirName,
1692  Scope *CurScope, SourceLocation Loc) {
1693  DSAStack->push(DKind, DirName, CurScope, Loc);
1694  PushExpressionEvaluationContext(
1695  ExpressionEvaluationContext::PotentiallyEvaluated);
1696 }
1697 
1699  DSAStack->setClauseParsingMode(K);
1700 }
1701 
1703  DSAStack->setClauseParsingMode(/*K=*/OMPC_unknown);
1704 }
1705 
1706 void Sema::EndOpenMPDSABlock(Stmt *CurDirective) {
1707  // OpenMP [2.14.3.5, Restrictions, C/C++, p.1]
1708  // A variable of class type (or array thereof) that appears in a lastprivate
1709  // clause requires an accessible, unambiguous default constructor for the
1710  // class type, unless the list item is also specified in a firstprivate
1711  // clause.
1712  if (const auto *D = dyn_cast_or_null<OMPExecutableDirective>(CurDirective)) {
1713  for (OMPClause *C : D->clauses()) {
1714  if (auto *Clause = dyn_cast<OMPLastprivateClause>(C)) {
1715  SmallVector<Expr *, 8> PrivateCopies;
1716  for (Expr *DE : Clause->varlists()) {
1717  if (DE->isValueDependent() || DE->isTypeDependent()) {
1718  PrivateCopies.push_back(nullptr);
1719  continue;
1720  }
1721  auto *DRE = cast<DeclRefExpr>(DE->IgnoreParens());
1722  auto *VD = cast<VarDecl>(DRE->getDecl());
1723  QualType Type = VD->getType().getNonReferenceType();
1724  const DSAStackTy::DSAVarData DVar =
1725  DSAStack->getTopDSA(VD, /*FromParent=*/false);
1726  if (DVar.CKind == OMPC_lastprivate) {
1727  // Generate helper private variable and initialize it with the
1728  // default value. The address of the original variable is replaced
1729  // by the address of the new private variable in CodeGen. This new
1730  // variable is not added to IdResolver, so the code in the OpenMP
1731  // region uses original variable for proper diagnostics.
1732  VarDecl *VDPrivate = buildVarDecl(
1733  *this, DE->getExprLoc(), Type.getUnqualifiedType(),
1734  VD->getName(), VD->hasAttrs() ? &VD->getAttrs() : nullptr, DRE);
1735  ActOnUninitializedDecl(VDPrivate);
1736  if (VDPrivate->isInvalidDecl())
1737  continue;
1738  PrivateCopies.push_back(buildDeclRefExpr(
1739  *this, VDPrivate, DE->getType(), DE->getExprLoc()));
1740  } else {
1741  // The variable is also a firstprivate, so initialization sequence
1742  // for private copy is generated already.
1743  PrivateCopies.push_back(nullptr);
1744  }
1745  }
1746  // Set initializers to private copies if no errors were found.
1747  if (PrivateCopies.size() == Clause->varlist_size())
1748  Clause->setPrivateCopies(PrivateCopies);
1749  }
1750  }
1751  }
1752 
1753  DSAStack->pop();
1754  DiscardCleanupsInEvaluationContext();
1755  PopExpressionEvaluationContext();
1756 }
1757 
1758 static bool FinishOpenMPLinearClause(OMPLinearClause &Clause, DeclRefExpr *IV,
1759  Expr *NumIterations, Sema &SemaRef,
1760  Scope *S, DSAStackTy *Stack);
1761 
1762 namespace {
1763 
1764 class VarDeclFilterCCC final : public CorrectionCandidateCallback {
1765 private:
1766  Sema &SemaRef;
1767 
1768 public:
1769  explicit VarDeclFilterCCC(Sema &S) : SemaRef(S) {}
1770  bool ValidateCandidate(const TypoCorrection &Candidate) override {
1771  NamedDecl *ND = Candidate.getCorrectionDecl();
1772  if (const auto *VD = dyn_cast_or_null<VarDecl>(ND)) {
1773  return VD->hasGlobalStorage() &&
1774  SemaRef.isDeclInScope(ND, SemaRef.getCurLexicalContext(),
1775  SemaRef.getCurScope());
1776  }
1777  return false;
1778  }
1779 };
1780 
1781 class VarOrFuncDeclFilterCCC final : public CorrectionCandidateCallback {
1782 private:
1783  Sema &SemaRef;
1784 
1785 public:
1786  explicit VarOrFuncDeclFilterCCC(Sema &S) : SemaRef(S) {}
1787  bool ValidateCandidate(const TypoCorrection &Candidate) override {
1788  NamedDecl *ND = Candidate.getCorrectionDecl();
1789  if (ND && (isa<VarDecl>(ND) || isa<FunctionDecl>(ND))) {
1790  return SemaRef.isDeclInScope(ND, SemaRef.getCurLexicalContext(),
1791  SemaRef.getCurScope());
1792  }
1793  return false;
1794  }
1795 };
1796 
1797 } // namespace
1798 
1800  CXXScopeSpec &ScopeSpec,
1801  const DeclarationNameInfo &Id) {
1802  LookupResult Lookup(*this, Id, LookupOrdinaryName);
1803  LookupParsedName(Lookup, CurScope, &ScopeSpec, true);
1804 
1805  if (Lookup.isAmbiguous())
1806  return ExprError();
1807 
1808  VarDecl *VD;
1809  if (!Lookup.isSingleResult()) {
1810  if (TypoCorrection Corrected = CorrectTypo(
1811  Id, LookupOrdinaryName, CurScope, nullptr,
1812  llvm::make_unique<VarDeclFilterCCC>(*this), CTK_ErrorRecovery)) {
1813  diagnoseTypo(Corrected,
1814  PDiag(Lookup.empty()
1815  ? diag::err_undeclared_var_use_suggest
1816  : diag::err_omp_expected_var_arg_suggest)
1817  << Id.getName());
1818  VD = Corrected.getCorrectionDeclAs<VarDecl>();
1819  } else {
1820  Diag(Id.getLoc(), Lookup.empty() ? diag::err_undeclared_var_use
1821  : diag::err_omp_expected_var_arg)
1822  << Id.getName();
1823  return ExprError();
1824  }
1825  } else if (!(VD = Lookup.getAsSingle<VarDecl>())) {
1826  Diag(Id.getLoc(), diag::err_omp_expected_var_arg) << Id.getName();
1827  Diag(Lookup.getFoundDecl()->getLocation(), diag::note_declared_at);
1828  return ExprError();
1829  }
1830  Lookup.suppressDiagnostics();
1831 
1832  // OpenMP [2.9.2, Syntax, C/C++]
1833  // Variables must be file-scope, namespace-scope, or static block-scope.
1834  if (!VD->hasGlobalStorage()) {
1835  Diag(Id.getLoc(), diag::err_omp_global_var_arg)
1836  << getOpenMPDirectiveName(OMPD_threadprivate) << !VD->isStaticLocal();
1837  bool IsDecl =
1839  Diag(VD->getLocation(),
1840  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
1841  << VD;
1842  return ExprError();
1843  }
1844 
1845  VarDecl *CanonicalVD = VD->getCanonicalDecl();
1846  NamedDecl *ND = CanonicalVD;
1847  // OpenMP [2.9.2, Restrictions, C/C++, p.2]
1848  // A threadprivate directive for file-scope variables must appear outside
1849  // any definition or declaration.
1850  if (CanonicalVD->getDeclContext()->isTranslationUnit() &&
1851  !getCurLexicalContext()->isTranslationUnit()) {
1852  Diag(Id.getLoc(), diag::err_omp_var_scope)
1853  << getOpenMPDirectiveName(OMPD_threadprivate) << VD;
1854  bool IsDecl =
1856  Diag(VD->getLocation(),
1857  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
1858  << VD;
1859  return ExprError();
1860  }
1861  // OpenMP [2.9.2, Restrictions, C/C++, p.3]
1862  // A threadprivate directive for static class member variables must appear
1863  // in the class definition, in the same scope in which the member
1864  // variables are declared.
1865  if (CanonicalVD->isStaticDataMember() &&
1866  !CanonicalVD->getDeclContext()->Equals(getCurLexicalContext())) {
1867  Diag(Id.getLoc(), diag::err_omp_var_scope)
1868  << getOpenMPDirectiveName(OMPD_threadprivate) << VD;
1869  bool IsDecl =
1871  Diag(VD->getLocation(),
1872  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
1873  << VD;
1874  return ExprError();
1875  }
1876  // OpenMP [2.9.2, Restrictions, C/C++, p.4]
1877  // A threadprivate directive for namespace-scope variables must appear
1878  // outside any definition or declaration other than the namespace
1879  // definition itself.
1880  if (CanonicalVD->getDeclContext()->isNamespace() &&
1881  (!getCurLexicalContext()->isFileContext() ||
1882  !getCurLexicalContext()->Encloses(CanonicalVD->getDeclContext()))) {
1883  Diag(Id.getLoc(), diag::err_omp_var_scope)
1884  << getOpenMPDirectiveName(OMPD_threadprivate) << VD;
1885  bool IsDecl =
1887  Diag(VD->getLocation(),
1888  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
1889  << VD;
1890  return ExprError();
1891  }
1892  // OpenMP [2.9.2, Restrictions, C/C++, p.6]
1893  // A threadprivate directive for static block-scope variables must appear
1894  // in the scope of the variable and not in a nested scope.
1895  if (CanonicalVD->isStaticLocal() && CurScope &&
1896  !isDeclInScope(ND, getCurLexicalContext(), CurScope)) {
1897  Diag(Id.getLoc(), diag::err_omp_var_scope)
1898  << getOpenMPDirectiveName(OMPD_threadprivate) << VD;
1899  bool IsDecl =
1901  Diag(VD->getLocation(),
1902  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
1903  << VD;
1904  return ExprError();
1905  }
1906 
1907  // OpenMP [2.9.2, Restrictions, C/C++, p.2-6]
1908  // A threadprivate directive must lexically precede all references to any
1909  // of the variables in its list.
1910  if (VD->isUsed() && !DSAStack->isThreadPrivate(VD)) {
1911  Diag(Id.getLoc(), diag::err_omp_var_used)
1912  << getOpenMPDirectiveName(OMPD_threadprivate) << VD;
1913  return ExprError();
1914  }
1915 
1916  QualType ExprType = VD->getType().getNonReferenceType();
1917  return DeclRefExpr::Create(Context, NestedNameSpecifierLoc(),
1918  SourceLocation(), VD,
1919  /*RefersToEnclosingVariableOrCapture=*/false,
1920  Id.getLoc(), ExprType, VK_LValue);
1921 }
1922 
1925  ArrayRef<Expr *> VarList) {
1926  if (OMPThreadPrivateDecl *D = CheckOMPThreadPrivateDecl(Loc, VarList)) {
1927  CurContext->addDecl(D);
1928  return DeclGroupPtrTy::make(DeclGroupRef(D));
1929  }
1930  return nullptr;
1931 }
1932 
1933 namespace {
1934 class LocalVarRefChecker final
1935  : public ConstStmtVisitor<LocalVarRefChecker, bool> {
1936  Sema &SemaRef;
1937 
1938 public:
1939  bool VisitDeclRefExpr(const DeclRefExpr *E) {
1940  if (const auto *VD = dyn_cast<VarDecl>(E->getDecl())) {
1941  if (VD->hasLocalStorage()) {
1942  SemaRef.Diag(E->getBeginLoc(),
1943  diag::err_omp_local_var_in_threadprivate_init)
1944  << E->getSourceRange();
1945  SemaRef.Diag(VD->getLocation(), diag::note_defined_here)
1946  << VD << VD->getSourceRange();
1947  return true;
1948  }
1949  }
1950  return false;
1951  }
1952  bool VisitStmt(const Stmt *S) {
1953  for (const Stmt *Child : S->children()) {
1954  if (Child && Visit(Child))
1955  return true;
1956  }
1957  return false;
1958  }
1959  explicit LocalVarRefChecker(Sema &SemaRef) : SemaRef(SemaRef) {}
1960 };
1961 } // namespace
1962 
1966  for (Expr *RefExpr : VarList) {
1967  auto *DE = cast<DeclRefExpr>(RefExpr);
1968  auto *VD = cast<VarDecl>(DE->getDecl());
1969  SourceLocation ILoc = DE->getExprLoc();
1970 
1971  // Mark variable as used.
1972  VD->setReferenced();
1973  VD->markUsed(Context);
1974 
1975  QualType QType = VD->getType();
1976  if (QType->isDependentType() || QType->isInstantiationDependentType()) {
1977  // It will be analyzed later.
1978  Vars.push_back(DE);
1979  continue;
1980  }
1981 
1982  // OpenMP [2.9.2, Restrictions, C/C++, p.10]
1983  // A threadprivate variable must not have an incomplete type.
1984  if (RequireCompleteType(ILoc, VD->getType(),
1985  diag::err_omp_threadprivate_incomplete_type)) {
1986  continue;
1987  }
1988 
1989  // OpenMP [2.9.2, Restrictions, C/C++, p.10]
1990  // A threadprivate variable must not have a reference type.
1991  if (VD->getType()->isReferenceType()) {
1992  Diag(ILoc, diag::err_omp_ref_type_arg)
1993  << getOpenMPDirectiveName(OMPD_threadprivate) << VD->getType();
1994  bool IsDecl =
1995  VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
1996  Diag(VD->getLocation(),
1997  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
1998  << VD;
1999  continue;
2000  }
2001 
2002  // Check if this is a TLS variable. If TLS is not being supported, produce
2003  // the corresponding diagnostic.
2004  if ((VD->getTLSKind() != VarDecl::TLS_None &&
2005  !(VD->hasAttr<OMPThreadPrivateDeclAttr>() &&
2006  getLangOpts().OpenMPUseTLS &&
2007  getASTContext().getTargetInfo().isTLSSupported())) ||
2008  (VD->getStorageClass() == SC_Register && VD->hasAttr<AsmLabelAttr>() &&
2009  !VD->isLocalVarDecl())) {
2010  Diag(ILoc, diag::err_omp_var_thread_local)
2011  << VD << ((VD->getTLSKind() != VarDecl::TLS_None) ? 0 : 1);
2012  bool IsDecl =
2013  VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
2014  Diag(VD->getLocation(),
2015  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
2016  << VD;
2017  continue;
2018  }
2019 
2020  // Check if initial value of threadprivate variable reference variable with
2021  // local storage (it is not supported by runtime).
2022  if (const Expr *Init = VD->getAnyInitializer()) {
2023  LocalVarRefChecker Checker(*this);
2024  if (Checker.Visit(Init))
2025  continue;
2026  }
2027 
2028  Vars.push_back(RefExpr);
2029  DSAStack->addDSA(VD, DE, OMPC_threadprivate);
2030  VD->addAttr(OMPThreadPrivateDeclAttr::CreateImplicit(
2031  Context, SourceRange(Loc, Loc)));
2032  if (ASTMutationListener *ML = Context.getASTMutationListener())
2033  ML->DeclarationMarkedOpenMPThreadPrivate(VD);
2034  }
2035  OMPThreadPrivateDecl *D = nullptr;
2036  if (!Vars.empty()) {
2037  D = OMPThreadPrivateDecl::Create(Context, getCurLexicalContext(), Loc,
2038  Vars);
2039  D->setAccess(AS_public);
2040  }
2041  return D;
2042 }
2043 
2046  ArrayRef<OMPClause *> ClauseList) {
2047  OMPRequiresDecl *D = nullptr;
2048  if (!CurContext->isFileContext()) {
2049  Diag(Loc, diag::err_omp_invalid_scope) << "requires";
2050  } else {
2051  D = CheckOMPRequiresDecl(Loc, ClauseList);
2052  if (D) {
2053  CurContext->addDecl(D);
2054  DSAStack->addRequiresDecl(D);
2055  }
2056  }
2057  return DeclGroupPtrTy::make(DeclGroupRef(D));
2058 }
2059 
2061  ArrayRef<OMPClause *> ClauseList) {
2062  if (!DSAStack->hasDuplicateRequiresClause(ClauseList))
2063  return OMPRequiresDecl::Create(Context, getCurLexicalContext(), Loc,
2064  ClauseList);
2065  return nullptr;
2066 }
2067 
2068 static void reportOriginalDsa(Sema &SemaRef, const DSAStackTy *Stack,
2069  const ValueDecl *D,
2070  const DSAStackTy::DSAVarData &DVar,
2071  bool IsLoopIterVar = false) {
2072  if (DVar.RefExpr) {
2073  SemaRef.Diag(DVar.RefExpr->getExprLoc(), diag::note_omp_explicit_dsa)
2074  << getOpenMPClauseName(DVar.CKind);
2075  return;
2076  }
2077  enum {
2078  PDSA_StaticMemberShared,
2079  PDSA_StaticLocalVarShared,
2080  PDSA_LoopIterVarPrivate,
2081  PDSA_LoopIterVarLinear,
2082  PDSA_LoopIterVarLastprivate,
2083  PDSA_ConstVarShared,
2084  PDSA_GlobalVarShared,
2085  PDSA_TaskVarFirstprivate,
2086  PDSA_LocalVarPrivate,
2087  PDSA_Implicit
2088  } Reason = PDSA_Implicit;
2089  bool ReportHint = false;
2090  auto ReportLoc = D->getLocation();
2091  auto *VD = dyn_cast<VarDecl>(D);
2092  if (IsLoopIterVar) {
2093  if (DVar.CKind == OMPC_private)
2094  Reason = PDSA_LoopIterVarPrivate;
2095  else if (DVar.CKind == OMPC_lastprivate)
2096  Reason = PDSA_LoopIterVarLastprivate;
2097  else
2098  Reason = PDSA_LoopIterVarLinear;
2099  } else if (isOpenMPTaskingDirective(DVar.DKind) &&
2100  DVar.CKind == OMPC_firstprivate) {
2101  Reason = PDSA_TaskVarFirstprivate;
2102  ReportLoc = DVar.ImplicitDSALoc;
2103  } else if (VD && VD->isStaticLocal())
2104  Reason = PDSA_StaticLocalVarShared;
2105  else if (VD && VD->isStaticDataMember())
2106  Reason = PDSA_StaticMemberShared;
2107  else if (VD && VD->isFileVarDecl())
2108  Reason = PDSA_GlobalVarShared;
2109  else if (D->getType().isConstant(SemaRef.getASTContext()))
2110  Reason = PDSA_ConstVarShared;
2111  else if (VD && VD->isLocalVarDecl() && DVar.CKind == OMPC_private) {
2112  ReportHint = true;
2113  Reason = PDSA_LocalVarPrivate;
2114  }
2115  if (Reason != PDSA_Implicit) {
2116  SemaRef.Diag(ReportLoc, diag::note_omp_predetermined_dsa)
2117  << Reason << ReportHint
2118  << getOpenMPDirectiveName(Stack->getCurrentDirective());
2119  } else if (DVar.ImplicitDSALoc.isValid()) {
2120  SemaRef.Diag(DVar.ImplicitDSALoc, diag::note_omp_implicit_dsa)
2121  << getOpenMPClauseName(DVar.CKind);
2122  }
2123 }
2124 
2125 namespace {
2126 class DSAAttrChecker final : public StmtVisitor<DSAAttrChecker, void> {
2127  DSAStackTy *Stack;
2128  Sema &SemaRef;
2129  bool ErrorFound = false;
2130  CapturedStmt *CS = nullptr;
2131  llvm::SmallVector<Expr *, 4> ImplicitFirstprivate;
2132  llvm::SmallVector<Expr *, 4> ImplicitMap;
2133  Sema::VarsWithInheritedDSAType VarsWithInheritedDSA;
2134  llvm::SmallDenseSet<const ValueDecl *, 4> ImplicitDeclarations;
2135 
2136  void VisitSubCaptures(OMPExecutableDirective *S) {
2137  // Check implicitly captured variables.
2138  if (!S->hasAssociatedStmt() || !S->getAssociatedStmt())
2139  return;
2140  for (const CapturedStmt::Capture &Cap :
2142  if (!Cap.capturesVariable())
2143  continue;
2144  VarDecl *VD = Cap.getCapturedVar();
2145  // Do not try to map the variable if it or its sub-component was mapped
2146  // already.
2147  if (isOpenMPTargetExecutionDirective(Stack->getCurrentDirective()) &&
2148  Stack->checkMappableExprComponentListsForDecl(
2149  VD, /*CurrentRegionOnly=*/true,
2151  OpenMPClauseKind) { return true; }))
2152  continue;
2154  SemaRef, VD, VD->getType().getNonLValueExprType(SemaRef.Context),
2155  Cap.getLocation(), /*RefersToCapture=*/true);
2156  Visit(DRE);
2157  }
2158  }
2159 
2160 public:
2161  void VisitDeclRefExpr(DeclRefExpr *E) {
2162  if (E->isTypeDependent() || E->isValueDependent() ||
2164  return;
2165  if (auto *VD = dyn_cast<VarDecl>(E->getDecl())) {
2166  VD = VD->getCanonicalDecl();
2167  // Skip internally declared variables.
2168  if (VD->hasLocalStorage() && !CS->capturesVariable(VD))
2169  return;
2170 
2171  DSAStackTy::DSAVarData DVar = Stack->getTopDSA(VD, /*FromParent=*/false);
2172  // Check if the variable has explicit DSA set and stop analysis if it so.
2173  if (DVar.RefExpr || !ImplicitDeclarations.insert(VD).second)
2174  return;
2175 
2176  // Skip internally declared static variables.
2178  OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
2179  if (VD->hasGlobalStorage() && !CS->capturesVariable(VD) &&
2180  (!Res || *Res != OMPDeclareTargetDeclAttr::MT_Link))
2181  return;
2182 
2183  SourceLocation ELoc = E->getExprLoc();
2184  OpenMPDirectiveKind DKind = Stack->getCurrentDirective();
2185  // The default(none) clause requires that each variable that is referenced
2186  // in the construct, and does not have a predetermined data-sharing
2187  // attribute, must have its data-sharing attribute explicitly determined
2188  // by being listed in a data-sharing attribute clause.
2189  if (DVar.CKind == OMPC_unknown && Stack->getDefaultDSA() == DSA_none &&
2190  isParallelOrTaskRegion(DKind) &&
2191  VarsWithInheritedDSA.count(VD) == 0) {
2192  VarsWithInheritedDSA[VD] = E;
2193  return;
2194  }
2195 
2196  if (isOpenMPTargetExecutionDirective(DKind) &&
2197  !Stack->isLoopControlVariable(VD).first) {
2198  if (!Stack->checkMappableExprComponentListsForDecl(
2199  VD, /*CurrentRegionOnly=*/true,
2201  StackComponents,
2202  OpenMPClauseKind) {
2203  // Variable is used if it has been marked as an array, array
2204  // section or the variable iself.
2205  return StackComponents.size() == 1 ||
2206  std::all_of(
2207  std::next(StackComponents.rbegin()),
2208  StackComponents.rend(),
2209  [](const OMPClauseMappableExprCommon::
2210  MappableComponent &MC) {
2211  return MC.getAssociatedDeclaration() ==
2212  nullptr &&
2213  (isa<OMPArraySectionExpr>(
2214  MC.getAssociatedExpression()) ||
2215  isa<ArraySubscriptExpr>(
2216  MC.getAssociatedExpression()));
2217  });
2218  })) {
2219  bool IsFirstprivate = false;
2220  // By default lambdas are captured as firstprivates.
2221  if (const auto *RD =
2222  VD->getType().getNonReferenceType()->getAsCXXRecordDecl())
2223  IsFirstprivate = RD->isLambda();
2224  IsFirstprivate =
2225  IsFirstprivate ||
2226  (VD->getType().getNonReferenceType()->isScalarType() &&
2227  Stack->getDefaultDMA() != DMA_tofrom_scalar && !Res);
2228  if (IsFirstprivate)
2229  ImplicitFirstprivate.emplace_back(E);
2230  else
2231  ImplicitMap.emplace_back(E);
2232  return;
2233  }
2234  }
2235 
2236  // OpenMP [2.9.3.6, Restrictions, p.2]
2237  // A list item that appears in a reduction clause of the innermost
2238  // enclosing worksharing or parallel construct may not be accessed in an
2239  // explicit task.
2240  DVar = Stack->hasInnermostDSA(
2241  VD, [](OpenMPClauseKind C) { return C == OMPC_reduction; },
2242  [](OpenMPDirectiveKind K) {
2243  return isOpenMPParallelDirective(K) ||
2245  },
2246  /*FromParent=*/true);
2247  if (isOpenMPTaskingDirective(DKind) && DVar.CKind == OMPC_reduction) {
2248  ErrorFound = true;
2249  SemaRef.Diag(ELoc, diag::err_omp_reduction_in_task);
2250  reportOriginalDsa(SemaRef, Stack, VD, DVar);
2251  return;
2252  }
2253 
2254  // Define implicit data-sharing attributes for task.
2255  DVar = Stack->getImplicitDSA(VD, /*FromParent=*/false);
2256  if (isOpenMPTaskingDirective(DKind) && DVar.CKind != OMPC_shared &&
2257  !Stack->isLoopControlVariable(VD).first)
2258  ImplicitFirstprivate.push_back(E);
2259  }
2260  }
2261  void VisitMemberExpr(MemberExpr *E) {
2262  if (E->isTypeDependent() || E->isValueDependent() ||
2264  return;
2265  auto *FD = dyn_cast<FieldDecl>(E->getMemberDecl());
2266  OpenMPDirectiveKind DKind = Stack->getCurrentDirective();
2267  if (isa<CXXThisExpr>(E->getBase()->IgnoreParens())) {
2268  if (!FD)
2269  return;
2270  DSAStackTy::DSAVarData DVar = Stack->getTopDSA(FD, /*FromParent=*/false);
2271  // Check if the variable has explicit DSA set and stop analysis if it
2272  // so.
2273  if (DVar.RefExpr || !ImplicitDeclarations.insert(FD).second)
2274  return;
2275 
2276  if (isOpenMPTargetExecutionDirective(DKind) &&
2277  !Stack->isLoopControlVariable(FD).first &&
2278  !Stack->checkMappableExprComponentListsForDecl(
2279  FD, /*CurrentRegionOnly=*/true,
2281  StackComponents,
2282  OpenMPClauseKind) {
2283  return isa<CXXThisExpr>(
2284  cast<MemberExpr>(
2285  StackComponents.back().getAssociatedExpression())
2286  ->getBase()
2287  ->IgnoreParens());
2288  })) {
2289  // OpenMP 4.5 [2.15.5.1, map Clause, Restrictions, C/C++, p.3]
2290  // A bit-field cannot appear in a map clause.
2291  //
2292  if (FD->isBitField())
2293  return;
2294  ImplicitMap.emplace_back(E);
2295  return;
2296  }
2297 
2298  SourceLocation ELoc = E->getExprLoc();
2299  // OpenMP [2.9.3.6, Restrictions, p.2]
2300  // A list item that appears in a reduction clause of the innermost
2301  // enclosing worksharing or parallel construct may not be accessed in
2302  // an explicit task.
2303  DVar = Stack->hasInnermostDSA(
2304  FD, [](OpenMPClauseKind C) { return C == OMPC_reduction; },
2305  [](OpenMPDirectiveKind K) {
2306  return isOpenMPParallelDirective(K) ||
2308  },
2309  /*FromParent=*/true);
2310  if (isOpenMPTaskingDirective(DKind) && DVar.CKind == OMPC_reduction) {
2311  ErrorFound = true;
2312  SemaRef.Diag(ELoc, diag::err_omp_reduction_in_task);
2313  reportOriginalDsa(SemaRef, Stack, FD, DVar);
2314  return;
2315  }
2316 
2317  // Define implicit data-sharing attributes for task.
2318  DVar = Stack->getImplicitDSA(FD, /*FromParent=*/false);
2319  if (isOpenMPTaskingDirective(DKind) && DVar.CKind != OMPC_shared &&
2320  !Stack->isLoopControlVariable(FD).first) {
2321  // Check if there is a captured expression for the current field in the
2322  // region. Do not mark it as firstprivate unless there is no captured
2323  // expression.
2324  // TODO: try to make it firstprivate.
2325  if (DVar.CKind != OMPC_unknown)
2326  ImplicitFirstprivate.push_back(E);
2327  }
2328  return;
2329  }
2330  if (isOpenMPTargetExecutionDirective(DKind)) {
2332  if (!checkMapClauseExpressionBase(SemaRef, E, CurComponents, OMPC_map,
2333  /*NoDiagnose=*/true))
2334  return;
2335  const auto *VD = cast<ValueDecl>(
2336  CurComponents.back().getAssociatedDeclaration()->getCanonicalDecl());
2337  if (!Stack->checkMappableExprComponentListsForDecl(
2338  VD, /*CurrentRegionOnly=*/true,
2339  [&CurComponents](
2341  StackComponents,
2342  OpenMPClauseKind) {
2343  auto CCI = CurComponents.rbegin();
2344  auto CCE = CurComponents.rend();
2345  for (const auto &SC : llvm::reverse(StackComponents)) {
2346  // Do both expressions have the same kind?
2347  if (CCI->getAssociatedExpression()->getStmtClass() !=
2348  SC.getAssociatedExpression()->getStmtClass())
2349  if (!(isa<OMPArraySectionExpr>(
2350  SC.getAssociatedExpression()) &&
2351  isa<ArraySubscriptExpr>(
2352  CCI->getAssociatedExpression())))
2353  return false;
2354 
2355  const Decl *CCD = CCI->getAssociatedDeclaration();
2356  const Decl *SCD = SC.getAssociatedDeclaration();
2357  CCD = CCD ? CCD->getCanonicalDecl() : nullptr;
2358  SCD = SCD ? SCD->getCanonicalDecl() : nullptr;
2359  if (SCD != CCD)
2360  return false;
2361  std::advance(CCI, 1);
2362  if (CCI == CCE)
2363  break;
2364  }
2365  return true;
2366  })) {
2367  Visit(E->getBase());
2368  }
2369  } else {
2370  Visit(E->getBase());
2371  }
2372  }
2373  void VisitOMPExecutableDirective(OMPExecutableDirective *S) {
2374  for (OMPClause *C : S->clauses()) {
2375  // Skip analysis of arguments of implicitly defined firstprivate clause
2376  // for task|target directives.
2377  // Skip analysis of arguments of implicitly defined map clause for target
2378  // directives.
2379  if (C && !((isa<OMPFirstprivateClause>(C) || isa<OMPMapClause>(C)) &&
2380  C->isImplicit())) {
2381  for (Stmt *CC : C->children()) {
2382  if (CC)
2383  Visit(CC);
2384  }
2385  }
2386  }
2387  // Check implicitly captured variables.
2388  VisitSubCaptures(S);
2389  }
2390  void VisitStmt(Stmt *S) {
2391  for (Stmt *C : S->children()) {
2392  if (C) {
2393  if (auto *OED = dyn_cast<OMPExecutableDirective>(C)) {
2394  // Check implicitly captured variables in the task-based directives to
2395  // check if they must be firstprivatized.
2396  VisitSubCaptures(OED);
2397  } else {
2398  Visit(C);
2399  }
2400  }
2401  }
2402  }
2403 
2404  bool isErrorFound() const { return ErrorFound; }
2405  ArrayRef<Expr *> getImplicitFirstprivate() const {
2406  return ImplicitFirstprivate;
2407  }
2408  ArrayRef<Expr *> getImplicitMap() const { return ImplicitMap; }
2409  const Sema::VarsWithInheritedDSAType &getVarsWithInheritedDSA() const {
2410  return VarsWithInheritedDSA;
2411  }
2412 
2413  DSAAttrChecker(DSAStackTy *S, Sema &SemaRef, CapturedStmt *CS)
2414  : Stack(S), SemaRef(SemaRef), ErrorFound(false), CS(CS) {}
2415 };
2416 } // namespace
2417 
2419  switch (DKind) {
2420  case OMPD_parallel:
2421  case OMPD_parallel_for:
2422  case OMPD_parallel_for_simd:
2423  case OMPD_parallel_sections:
2424  case OMPD_teams:
2425  case OMPD_teams_distribute:
2426  case OMPD_teams_distribute_simd: {
2427  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
2428  QualType KmpInt32PtrTy =
2429  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
2430  Sema::CapturedParamNameType Params[] = {
2431  std::make_pair(".global_tid.", KmpInt32PtrTy),
2432  std::make_pair(".bound_tid.", KmpInt32PtrTy),
2433  std::make_pair(StringRef(), QualType()) // __context with shared vars
2434  };
2435  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2436  Params);
2437  break;
2438  }
2439  case OMPD_target_teams:
2440  case OMPD_target_parallel:
2441  case OMPD_target_parallel_for:
2442  case OMPD_target_parallel_for_simd:
2443  case OMPD_target_teams_distribute:
2444  case OMPD_target_teams_distribute_simd: {
2445  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
2446  QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
2447  QualType KmpInt32PtrTy =
2448  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
2449  QualType Args[] = {VoidPtrTy};
2451  EPI.Variadic = true;
2452  QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
2453  Sema::CapturedParamNameType Params[] = {
2454  std::make_pair(".global_tid.", KmpInt32Ty),
2455  std::make_pair(".part_id.", KmpInt32PtrTy),
2456  std::make_pair(".privates.", VoidPtrTy),
2457  std::make_pair(
2458  ".copy_fn.",
2459  Context.getPointerType(CopyFnType).withConst().withRestrict()),
2460  std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
2461  std::make_pair(StringRef(), QualType()) // __context with shared vars
2462  };
2463  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2464  Params);
2465  // Mark this captured region as inlined, because we don't use outlined
2466  // function directly.
2467  getCurCapturedRegion()->TheCapturedDecl->addAttr(
2468  AlwaysInlineAttr::CreateImplicit(
2469  Context, AlwaysInlineAttr::Keyword_forceinline));
2470  Sema::CapturedParamNameType ParamsTarget[] = {
2471  std::make_pair(StringRef(), QualType()) // __context with shared vars
2472  };
2473  // Start a captured region for 'target' with no implicit parameters.
2474  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2475  ParamsTarget);
2476  Sema::CapturedParamNameType ParamsTeamsOrParallel[] = {
2477  std::make_pair(".global_tid.", KmpInt32PtrTy),
2478  std::make_pair(".bound_tid.", KmpInt32PtrTy),
2479  std::make_pair(StringRef(), QualType()) // __context with shared vars
2480  };
2481  // Start a captured region for 'teams' or 'parallel'. Both regions have
2482  // the same implicit parameters.
2483  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2484  ParamsTeamsOrParallel);
2485  break;
2486  }
2487  case OMPD_target:
2488  case OMPD_target_simd: {
2489  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
2490  QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
2491  QualType KmpInt32PtrTy =
2492  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
2493  QualType Args[] = {VoidPtrTy};
2495  EPI.Variadic = true;
2496  QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
2497  Sema::CapturedParamNameType Params[] = {
2498  std::make_pair(".global_tid.", KmpInt32Ty),
2499  std::make_pair(".part_id.", KmpInt32PtrTy),
2500  std::make_pair(".privates.", VoidPtrTy),
2501  std::make_pair(
2502  ".copy_fn.",
2503  Context.getPointerType(CopyFnType).withConst().withRestrict()),
2504  std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
2505  std::make_pair(StringRef(), QualType()) // __context with shared vars
2506  };
2507  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2508  Params);
2509  // Mark this captured region as inlined, because we don't use outlined
2510  // function directly.
2511  getCurCapturedRegion()->TheCapturedDecl->addAttr(
2512  AlwaysInlineAttr::CreateImplicit(
2513  Context, AlwaysInlineAttr::Keyword_forceinline));
2514  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2515  std::make_pair(StringRef(), QualType()));
2516  break;
2517  }
2518  case OMPD_simd:
2519  case OMPD_for:
2520  case OMPD_for_simd:
2521  case OMPD_sections:
2522  case OMPD_section:
2523  case OMPD_single:
2524  case OMPD_master:
2525  case OMPD_critical:
2526  case OMPD_taskgroup:
2527  case OMPD_distribute:
2528  case OMPD_distribute_simd:
2529  case OMPD_ordered:
2530  case OMPD_atomic:
2531  case OMPD_target_data: {
2532  Sema::CapturedParamNameType Params[] = {
2533  std::make_pair(StringRef(), QualType()) // __context with shared vars
2534  };
2535  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2536  Params);
2537  break;
2538  }
2539  case OMPD_task: {
2540  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
2541  QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
2542  QualType KmpInt32PtrTy =
2543  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
2544  QualType Args[] = {VoidPtrTy};
2546  EPI.Variadic = true;
2547  QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
2548  Sema::CapturedParamNameType Params[] = {
2549  std::make_pair(".global_tid.", KmpInt32Ty),
2550  std::make_pair(".part_id.", KmpInt32PtrTy),
2551  std::make_pair(".privates.", VoidPtrTy),
2552  std::make_pair(
2553  ".copy_fn.",
2554  Context.getPointerType(CopyFnType).withConst().withRestrict()),
2555  std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
2556  std::make_pair(StringRef(), QualType()) // __context with shared vars
2557  };
2558  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2559  Params);
2560  // Mark this captured region as inlined, because we don't use outlined
2561  // function directly.
2562  getCurCapturedRegion()->TheCapturedDecl->addAttr(
2563  AlwaysInlineAttr::CreateImplicit(
2564  Context, AlwaysInlineAttr::Keyword_forceinline));
2565  break;
2566  }
2567  case OMPD_taskloop:
2568  case OMPD_taskloop_simd: {
2569  QualType KmpInt32Ty =
2570  Context.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1)
2571  .withConst();
2572  QualType KmpUInt64Ty =
2573  Context.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0)
2574  .withConst();
2575  QualType KmpInt64Ty =
2576  Context.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1)
2577  .withConst();
2578  QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
2579  QualType KmpInt32PtrTy =
2580  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
2581  QualType Args[] = {VoidPtrTy};
2583  EPI.Variadic = true;
2584  QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
2585  Sema::CapturedParamNameType Params[] = {
2586  std::make_pair(".global_tid.", KmpInt32Ty),
2587  std::make_pair(".part_id.", KmpInt32PtrTy),
2588  std::make_pair(".privates.", VoidPtrTy),
2589  std::make_pair(
2590  ".copy_fn.",
2591  Context.getPointerType(CopyFnType).withConst().withRestrict()),
2592  std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
2593  std::make_pair(".lb.", KmpUInt64Ty),
2594  std::make_pair(".ub.", KmpUInt64Ty),
2595  std::make_pair(".st.", KmpInt64Ty),
2596  std::make_pair(".liter.", KmpInt32Ty),
2597  std::make_pair(".reductions.", VoidPtrTy),
2598  std::make_pair(StringRef(), QualType()) // __context with shared vars
2599  };
2600  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2601  Params);
2602  // Mark this captured region as inlined, because we don't use outlined
2603  // function directly.
2604  getCurCapturedRegion()->TheCapturedDecl->addAttr(
2605  AlwaysInlineAttr::CreateImplicit(
2606  Context, AlwaysInlineAttr::Keyword_forceinline));
2607  break;
2608  }
2609  case OMPD_distribute_parallel_for_simd:
2610  case OMPD_distribute_parallel_for: {
2611  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
2612  QualType KmpInt32PtrTy =
2613  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
2614  Sema::CapturedParamNameType Params[] = {
2615  std::make_pair(".global_tid.", KmpInt32PtrTy),
2616  std::make_pair(".bound_tid.", KmpInt32PtrTy),
2617  std::make_pair(".previous.lb.", Context.getSizeType().withConst()),
2618  std::make_pair(".previous.ub.", Context.getSizeType().withConst()),
2619  std::make_pair(StringRef(), QualType()) // __context with shared vars
2620  };
2621  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2622  Params);
2623  break;
2624  }
2625  case OMPD_target_teams_distribute_parallel_for:
2626  case OMPD_target_teams_distribute_parallel_for_simd: {
2627  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
2628  QualType KmpInt32PtrTy =
2629  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
2630  QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
2631 
2632  QualType Args[] = {VoidPtrTy};
2634  EPI.Variadic = true;
2635  QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
2636  Sema::CapturedParamNameType Params[] = {
2637  std::make_pair(".global_tid.", KmpInt32Ty),
2638  std::make_pair(".part_id.", KmpInt32PtrTy),
2639  std::make_pair(".privates.", VoidPtrTy),
2640  std::make_pair(
2641  ".copy_fn.",
2642  Context.getPointerType(CopyFnType).withConst().withRestrict()),
2643  std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
2644  std::make_pair(StringRef(), QualType()) // __context with shared vars
2645  };
2646  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2647  Params);
2648  // Mark this captured region as inlined, because we don't use outlined
2649  // function directly.
2650  getCurCapturedRegion()->TheCapturedDecl->addAttr(
2651  AlwaysInlineAttr::CreateImplicit(
2652  Context, AlwaysInlineAttr::Keyword_forceinline));
2653  Sema::CapturedParamNameType ParamsTarget[] = {
2654  std::make_pair(StringRef(), QualType()) // __context with shared vars
2655  };
2656  // Start a captured region for 'target' with no implicit parameters.
2657  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2658  ParamsTarget);
2659 
2660  Sema::CapturedParamNameType ParamsTeams[] = {
2661  std::make_pair(".global_tid.", KmpInt32PtrTy),
2662  std::make_pair(".bound_tid.", KmpInt32PtrTy),
2663  std::make_pair(StringRef(), QualType()) // __context with shared vars
2664  };
2665  // Start a captured region for 'target' with no implicit parameters.
2666  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2667  ParamsTeams);
2668 
2669  Sema::CapturedParamNameType ParamsParallel[] = {
2670  std::make_pair(".global_tid.", KmpInt32PtrTy),
2671  std::make_pair(".bound_tid.", KmpInt32PtrTy),
2672  std::make_pair(".previous.lb.", Context.getSizeType().withConst()),
2673  std::make_pair(".previous.ub.", Context.getSizeType().withConst()),
2674  std::make_pair(StringRef(), QualType()) // __context with shared vars
2675  };
2676  // Start a captured region for 'teams' or 'parallel'. Both regions have
2677  // the same implicit parameters.
2678  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2679  ParamsParallel);
2680  break;
2681  }
2682 
2683  case OMPD_teams_distribute_parallel_for:
2684  case OMPD_teams_distribute_parallel_for_simd: {
2685  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
2686  QualType KmpInt32PtrTy =
2687  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
2688 
2689  Sema::CapturedParamNameType ParamsTeams[] = {
2690  std::make_pair(".global_tid.", KmpInt32PtrTy),
2691  std::make_pair(".bound_tid.", KmpInt32PtrTy),
2692  std::make_pair(StringRef(), QualType()) // __context with shared vars
2693  };
2694  // Start a captured region for 'target' with no implicit parameters.
2695  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2696  ParamsTeams);
2697 
2698  Sema::CapturedParamNameType ParamsParallel[] = {
2699  std::make_pair(".global_tid.", KmpInt32PtrTy),
2700  std::make_pair(".bound_tid.", KmpInt32PtrTy),
2701  std::make_pair(".previous.lb.", Context.getSizeType().withConst()),
2702  std::make_pair(".previous.ub.", Context.getSizeType().withConst()),
2703  std::make_pair(StringRef(), QualType()) // __context with shared vars
2704  };
2705  // Start a captured region for 'teams' or 'parallel'. Both regions have
2706  // the same implicit parameters.
2707  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2708  ParamsParallel);
2709  break;
2710  }
2711  case OMPD_target_update:
2712  case OMPD_target_enter_data:
2713  case OMPD_target_exit_data: {
2714  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
2715  QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
2716  QualType KmpInt32PtrTy =
2717  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
2718  QualType Args[] = {VoidPtrTy};
2720  EPI.Variadic = true;
2721  QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
2722  Sema::CapturedParamNameType Params[] = {
2723  std::make_pair(".global_tid.", KmpInt32Ty),
2724  std::make_pair(".part_id.", KmpInt32PtrTy),
2725  std::make_pair(".privates.", VoidPtrTy),
2726  std::make_pair(
2727  ".copy_fn.",
2728  Context.getPointerType(CopyFnType).withConst().withRestrict()),
2729  std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
2730  std::make_pair(StringRef(), QualType()) // __context with shared vars
2731  };
2732  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2733  Params);
2734  // Mark this captured region as inlined, because we don't use outlined
2735  // function directly.
2736  getCurCapturedRegion()->TheCapturedDecl->addAttr(
2737  AlwaysInlineAttr::CreateImplicit(
2738  Context, AlwaysInlineAttr::Keyword_forceinline));
2739  break;
2740  }
2741  case OMPD_threadprivate:
2742  case OMPD_taskyield:
2743  case OMPD_barrier:
2744  case OMPD_taskwait:
2745  case OMPD_cancellation_point:
2746  case OMPD_cancel:
2747  case OMPD_flush:
2748  case OMPD_declare_reduction:
2749  case OMPD_declare_simd:
2750  case OMPD_declare_target:
2751  case OMPD_end_declare_target:
2752  case OMPD_requires:
2753  llvm_unreachable("OpenMP Directive is not allowed");
2754  case OMPD_unknown:
2755  llvm_unreachable("Unknown OpenMP directive");
2756  }
2757 }
2758 
2760  SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
2761  getOpenMPCaptureRegions(CaptureRegions, DKind);
2762  return CaptureRegions.size();
2763 }
2764 
2766  Expr *CaptureExpr, bool WithInit,
2767  bool AsExpression) {
2768  assert(CaptureExpr);
2769  ASTContext &C = S.getASTContext();
2770  Expr *Init = AsExpression ? CaptureExpr : CaptureExpr->IgnoreImpCasts();
2771  QualType Ty = Init->getType();
2772  if (CaptureExpr->getObjectKind() == OK_Ordinary && CaptureExpr->isGLValue()) {
2773  if (S.getLangOpts().CPlusPlus) {
2774  Ty = C.getLValueReferenceType(Ty);
2775  } else {
2776  Ty = C.getPointerType(Ty);
2777  ExprResult Res =
2778  S.CreateBuiltinUnaryOp(CaptureExpr->getExprLoc(), UO_AddrOf, Init);
2779  if (!Res.isUsable())
2780  return nullptr;
2781  Init = Res.get();
2782  }
2783  WithInit = true;
2784  }
2785  auto *CED = OMPCapturedExprDecl::Create(C, S.CurContext, Id, Ty,
2786  CaptureExpr->getBeginLoc());
2787  if (!WithInit)
2788  CED->addAttr(OMPCaptureNoInitAttr::CreateImplicit(C));
2789  S.CurContext->addHiddenDecl(CED);
2790  S.AddInitializerToDecl(CED, Init, /*DirectInit=*/false);
2791  return CED;
2792 }
2793 
2794 static DeclRefExpr *buildCapture(Sema &S, ValueDecl *D, Expr *CaptureExpr,
2795  bool WithInit) {
2796  OMPCapturedExprDecl *CD;
2797  if (VarDecl *VD = S.isOpenMPCapturedDecl(D))
2798  CD = cast<OMPCapturedExprDecl>(VD);
2799  else
2800  CD = buildCaptureDecl(S, D->getIdentifier(), CaptureExpr, WithInit,
2801  /*AsExpression=*/false);
2802  return buildDeclRefExpr(S, CD, CD->getType().getNonReferenceType(),
2803  CaptureExpr->getExprLoc());
2804 }
2805 
2806 static ExprResult buildCapture(Sema &S, Expr *CaptureExpr, DeclRefExpr *&Ref) {
2807  CaptureExpr = S.DefaultLvalueConversion(CaptureExpr).get();
2808  if (!Ref) {
2810  S, &S.getASTContext().Idents.get(".capture_expr."), CaptureExpr,
2811  /*WithInit=*/true, /*AsExpression=*/true);
2812  Ref = buildDeclRefExpr(S, CD, CD->getType().getNonReferenceType(),
2813  CaptureExpr->getExprLoc());
2814  }
2815  ExprResult Res = Ref;
2816  if (!S.getLangOpts().CPlusPlus &&
2817  CaptureExpr->getObjectKind() == OK_Ordinary && CaptureExpr->isGLValue() &&
2818  Ref->getType()->isPointerType()) {
2819  Res = S.CreateBuiltinUnaryOp(CaptureExpr->getExprLoc(), UO_Deref, Ref);
2820  if (!Res.isUsable())
2821  return ExprError();
2822  }
2823  return S.DefaultLvalueConversion(Res.get());
2824 }
2825 
2826 namespace {
2827 // OpenMP directives parsed in this section are represented as a
2828 // CapturedStatement with an associated statement. If a syntax error
2829 // is detected during the parsing of the associated statement, the
2830 // compiler must abort processing and close the CapturedStatement.
2831 //
2832 // Combined directives such as 'target parallel' have more than one
2833 // nested CapturedStatements. This RAII ensures that we unwind out
2834 // of all the nested CapturedStatements when an error is found.
2835 class CaptureRegionUnwinderRAII {
2836 private:
2837  Sema &S;
2838  bool &ErrorFound;
2840 
2841 public:
2842  CaptureRegionUnwinderRAII(Sema &S, bool &ErrorFound,
2843  OpenMPDirectiveKind DKind)
2844  : S(S), ErrorFound(ErrorFound), DKind(DKind) {}
2845  ~CaptureRegionUnwinderRAII() {
2846  if (ErrorFound) {
2847  int ThisCaptureLevel = S.getOpenMPCaptureLevels(DKind);
2848  while (--ThisCaptureLevel >= 0)
2850  }
2851  }
2852 };
2853 } // namespace
2854 
2856  ArrayRef<OMPClause *> Clauses) {
2857  bool ErrorFound = false;
2858  CaptureRegionUnwinderRAII CaptureRegionUnwinder(
2859  *this, ErrorFound, DSAStack->getCurrentDirective());
2860  if (!S.isUsable()) {
2861  ErrorFound = true;
2862  return StmtError();
2863  }
2864 
2865  SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
2866  getOpenMPCaptureRegions(CaptureRegions, DSAStack->getCurrentDirective());
2867  OMPOrderedClause *OC = nullptr;
2868  OMPScheduleClause *SC = nullptr;
2871  // This is required for proper codegen.
2872  for (OMPClause *Clause : Clauses) {
2873  if (isOpenMPTaskingDirective(DSAStack->getCurrentDirective()) &&
2874  Clause->getClauseKind() == OMPC_in_reduction) {
2875  // Capture taskgroup task_reduction descriptors inside the tasking regions
2876  // with the corresponding in_reduction items.
2877  auto *IRC = cast<OMPInReductionClause>(Clause);
2878  for (Expr *E : IRC->taskgroup_descriptors())
2879  if (E)
2880  MarkDeclarationsReferencedInExpr(E);
2881  }
2882  if (isOpenMPPrivate(Clause->getClauseKind()) ||
2883  Clause->getClauseKind() == OMPC_copyprivate ||
2884  (getLangOpts().OpenMPUseTLS &&
2885  getASTContext().getTargetInfo().isTLSSupported() &&
2886  Clause->getClauseKind() == OMPC_copyin)) {
2887  DSAStack->setForceVarCapturing(Clause->getClauseKind() == OMPC_copyin);
2888  // Mark all variables in private list clauses as used in inner region.
2889  for (Stmt *VarRef : Clause->children()) {
2890  if (auto *E = cast_or_null<Expr>(VarRef)) {
2891  MarkDeclarationsReferencedInExpr(E);
2892  }
2893  }
2894  DSAStack->setForceVarCapturing(/*V=*/false);
2895  } else if (CaptureRegions.size() > 1 ||
2896  CaptureRegions.back() != OMPD_unknown) {
2897  if (auto *C = OMPClauseWithPreInit::get(Clause))
2898  PICs.push_back(C);
2899  if (auto *C = OMPClauseWithPostUpdate::get(Clause)) {
2900  if (Expr *E = C->getPostUpdateExpr())
2901  MarkDeclarationsReferencedInExpr(E);
2902  }
2903  }
2904  if (Clause->getClauseKind() == OMPC_schedule)
2905  SC = cast<OMPScheduleClause>(Clause);
2906  else if (Clause->getClauseKind() == OMPC_ordered)
2907  OC = cast<OMPOrderedClause>(Clause);
2908  else if (Clause->getClauseKind() == OMPC_linear)
2909  LCs.push_back(cast<OMPLinearClause>(Clause));
2910  }
2911  // OpenMP, 2.7.1 Loop Construct, Restrictions
2912  // The nonmonotonic modifier cannot be specified if an ordered clause is
2913  // specified.
2914  if (SC &&
2915  (SC->getFirstScheduleModifier() == OMPC_SCHEDULE_MODIFIER_nonmonotonic ||
2916  SC->getSecondScheduleModifier() ==
2917  OMPC_SCHEDULE_MODIFIER_nonmonotonic) &&
2918  OC) {
2919  Diag(SC->getFirstScheduleModifier() == OMPC_SCHEDULE_MODIFIER_nonmonotonic
2922  diag::err_omp_schedule_nonmonotonic_ordered)
2923  << SourceRange(OC->getBeginLoc(), OC->getEndLoc());
2924  ErrorFound = true;
2925  }
2926  if (!LCs.empty() && OC && OC->getNumForLoops()) {
2927  for (const OMPLinearClause *C : LCs) {
2928  Diag(C->getBeginLoc(), diag::err_omp_linear_ordered)
2929  << SourceRange(OC->getBeginLoc(), OC->getEndLoc());
2930  }
2931  ErrorFound = true;
2932  }
2933  if (isOpenMPWorksharingDirective(DSAStack->getCurrentDirective()) &&
2934  isOpenMPSimdDirective(DSAStack->getCurrentDirective()) && OC &&
2935  OC->getNumForLoops()) {
2936  Diag(OC->getBeginLoc(), diag::err_omp_ordered_simd)
2937  << getOpenMPDirectiveName(DSAStack->getCurrentDirective());
2938  ErrorFound = true;
2939  }
2940  if (ErrorFound) {
2941  return StmtError();
2942  }
2943  StmtResult SR = S;
2944  for (OpenMPDirectiveKind ThisCaptureRegion : llvm::reverse(CaptureRegions)) {
2945  // Mark all variables in private list clauses as used in inner region.
2946  // Required for proper codegen of combined directives.
2947  // TODO: add processing for other clauses.
2948  if (ThisCaptureRegion != OMPD_unknown) {
2949  for (const clang::OMPClauseWithPreInit *C : PICs) {
2950  OpenMPDirectiveKind CaptureRegion = C->getCaptureRegion();
2951  // Find the particular capture region for the clause if the
2952  // directive is a combined one with multiple capture regions.
2953  // If the directive is not a combined one, the capture region
2954  // associated with the clause is OMPD_unknown and is generated
2955  // only once.
2956  if (CaptureRegion == ThisCaptureRegion ||
2957  CaptureRegion == OMPD_unknown) {
2958  if (auto *DS = cast_or_null<DeclStmt>(C->getPreInitStmt())) {
2959  for (Decl *D : DS->decls())
2960  MarkVariableReferenced(D->getLocation(), cast<VarDecl>(D));
2961  }
2962  }
2963  }
2964  }
2965  SR = ActOnCapturedRegionEnd(SR.get());
2966  }
2967  return SR;
2968 }
2969 
2970 static bool checkCancelRegion(Sema &SemaRef, OpenMPDirectiveKind CurrentRegion,
2971  OpenMPDirectiveKind CancelRegion,
2972  SourceLocation StartLoc) {
2973  // CancelRegion is only needed for cancel and cancellation_point.
2974  if (CurrentRegion != OMPD_cancel && CurrentRegion != OMPD_cancellation_point)
2975  return false;
2976 
2977  if (CancelRegion == OMPD_parallel || CancelRegion == OMPD_for ||
2978  CancelRegion == OMPD_sections || CancelRegion == OMPD_taskgroup)
2979  return false;
2980 
2981  SemaRef.Diag(StartLoc, diag::err_omp_wrong_cancel_region)
2982  << getOpenMPDirectiveName(CancelRegion);
2983  return true;
2984 }
2985 
2986 static bool checkNestingOfRegions(Sema &SemaRef, const DSAStackTy *Stack,
2987  OpenMPDirectiveKind CurrentRegion,
2988  const DeclarationNameInfo &CurrentName,
2989  OpenMPDirectiveKind CancelRegion,
2990  SourceLocation StartLoc) {
2991  if (Stack->getCurScope()) {
2992  OpenMPDirectiveKind ParentRegion = Stack->getParentDirective();
2993  OpenMPDirectiveKind OffendingRegion = ParentRegion;
2994  bool NestingProhibited = false;
2995  bool CloseNesting = true;
2996  bool OrphanSeen = false;
2997  enum {
2998  NoRecommend,
2999  ShouldBeInParallelRegion,
3000  ShouldBeInOrderedRegion,
3001  ShouldBeInTargetRegion,
3002  ShouldBeInTeamsRegion
3003  } Recommend = NoRecommend;
3004  if (isOpenMPSimdDirective(ParentRegion) && CurrentRegion != OMPD_ordered) {
3005  // OpenMP [2.16, Nesting of Regions]
3006  // OpenMP constructs may not be nested inside a simd region.
3007  // OpenMP [2.8.1,simd Construct, Restrictions]
3008  // An ordered construct with the simd clause is the only OpenMP
3009  // construct that can appear in the simd region.
3010  // Allowing a SIMD construct nested in another SIMD construct is an
3011  // extension. The OpenMP 4.5 spec does not allow it. Issue a warning
3012  // message.
3013  SemaRef.Diag(StartLoc, (CurrentRegion != OMPD_simd)
3014  ? diag::err_omp_prohibited_region_simd
3015  : diag::warn_omp_nesting_simd);
3016  return CurrentRegion != OMPD_simd;
3017  }
3018  if (ParentRegion == OMPD_atomic) {
3019  // OpenMP [2.16, Nesting of Regions]
3020  // OpenMP constructs may not be nested inside an atomic region.
3021  SemaRef.Diag(StartLoc, diag::err_omp_prohibited_region_atomic);
3022  return true;
3023  }
3024  if (CurrentRegion == OMPD_section) {
3025  // OpenMP [2.7.2, sections Construct, Restrictions]
3026  // Orphaned section directives are prohibited. That is, the section
3027  // directives must appear within the sections construct and must not be
3028  // encountered elsewhere in the sections region.
3029  if (ParentRegion != OMPD_sections &&
3030  ParentRegion != OMPD_parallel_sections) {
3031  SemaRef.Diag(StartLoc, diag::err_omp_orphaned_section_directive)
3032  << (ParentRegion != OMPD_unknown)
3033  << getOpenMPDirectiveName(ParentRegion);
3034  return true;
3035  }
3036  return false;
3037  }
3038  // Allow some constructs (except teams) to be orphaned (they could be
3039  // used in functions, called from OpenMP regions with the required
3040  // preconditions).
3041  if (ParentRegion == OMPD_unknown &&
3042  !isOpenMPNestingTeamsDirective(CurrentRegion))
3043  return false;
3044  if (CurrentRegion == OMPD_cancellation_point ||
3045  CurrentRegion == OMPD_cancel) {
3046  // OpenMP [2.16, Nesting of Regions]
3047  // A cancellation point construct for which construct-type-clause is
3048  // taskgroup must be nested inside a task construct. A cancellation
3049  // point construct for which construct-type-clause is not taskgroup must
3050  // be closely nested inside an OpenMP construct that matches the type
3051  // specified in construct-type-clause.
3052  // A cancel construct for which construct-type-clause is taskgroup must be
3053  // nested inside a task construct. A cancel construct for which
3054  // construct-type-clause is not taskgroup must be closely nested inside an
3055  // OpenMP construct that matches the type specified in
3056  // construct-type-clause.
3057  NestingProhibited =
3058  !((CancelRegion == OMPD_parallel &&
3059  (ParentRegion == OMPD_parallel ||
3060  ParentRegion == OMPD_target_parallel)) ||
3061  (CancelRegion == OMPD_for &&
3062  (ParentRegion == OMPD_for || ParentRegion == OMPD_parallel_for ||
3063  ParentRegion == OMPD_target_parallel_for ||
3064  ParentRegion == OMPD_distribute_parallel_for ||
3065  ParentRegion == OMPD_teams_distribute_parallel_for ||
3066  ParentRegion == OMPD_target_teams_distribute_parallel_for)) ||
3067  (CancelRegion == OMPD_taskgroup && ParentRegion == OMPD_task) ||
3068  (CancelRegion == OMPD_sections &&
3069  (ParentRegion == OMPD_section || ParentRegion == OMPD_sections ||
3070  ParentRegion == OMPD_parallel_sections)));
3071  } else if (CurrentRegion == OMPD_master) {
3072  // OpenMP [2.16, Nesting of Regions]
3073  // A master region may not be closely nested inside a worksharing,
3074  // atomic, or explicit task region.
3075  NestingProhibited = isOpenMPWorksharingDirective(ParentRegion) ||
3076  isOpenMPTaskingDirective(ParentRegion);
3077  } else if (CurrentRegion == OMPD_critical && CurrentName.getName()) {
3078  // OpenMP [2.16, Nesting of Regions]
3079  // A critical region may not be nested (closely or otherwise) inside a
3080  // critical region with the same name. Note that this restriction is not
3081  // sufficient to prevent deadlock.
3082  SourceLocation PreviousCriticalLoc;
3083  bool DeadLock = Stack->hasDirective(
3084  [CurrentName, &PreviousCriticalLoc](OpenMPDirectiveKind K,
3085  const DeclarationNameInfo &DNI,
3086  SourceLocation Loc) {
3087  if (K == OMPD_critical && DNI.getName() == CurrentName.getName()) {
3088  PreviousCriticalLoc = Loc;
3089  return true;
3090  }
3091  return false;
3092  },
3093  false /* skip top directive */);
3094  if (DeadLock) {
3095  SemaRef.Diag(StartLoc,
3096  diag::err_omp_prohibited_region_critical_same_name)
3097  << CurrentName.getName();
3098  if (PreviousCriticalLoc.isValid())
3099  SemaRef.Diag(PreviousCriticalLoc,
3100  diag::note_omp_previous_critical_region);
3101  return true;
3102  }
3103  } else if (CurrentRegion == OMPD_barrier) {
3104  // OpenMP [2.16, Nesting of Regions]
3105  // A barrier region may not be closely nested inside a worksharing,
3106  // explicit task, critical, ordered, atomic, or master region.
3107  NestingProhibited = isOpenMPWorksharingDirective(ParentRegion) ||
3108  isOpenMPTaskingDirective(ParentRegion) ||
3109  ParentRegion == OMPD_master ||
3110  ParentRegion == OMPD_critical ||
3111  ParentRegion == OMPD_ordered;
3112  } else if (isOpenMPWorksharingDirective(CurrentRegion) &&
3113  !isOpenMPParallelDirective(CurrentRegion) &&
3114  !isOpenMPTeamsDirective(CurrentRegion)) {
3115  // OpenMP [2.16, Nesting of Regions]
3116  // A worksharing region may not be closely nested inside a worksharing,
3117  // explicit task, critical, ordered, atomic, or master region.
3118  NestingProhibited = isOpenMPWorksharingDirective(ParentRegion) ||
3119  isOpenMPTaskingDirective(ParentRegion) ||
3120  ParentRegion == OMPD_master ||
3121  ParentRegion == OMPD_critical ||
3122  ParentRegion == OMPD_ordered;
3123  Recommend = ShouldBeInParallelRegion;
3124  } else if (CurrentRegion == OMPD_ordered) {
3125  // OpenMP [2.16, Nesting of Regions]
3126  // An ordered region may not be closely nested inside a critical,
3127  // atomic, or explicit task region.
3128  // An ordered region must be closely nested inside a loop region (or
3129  // parallel loop region) with an ordered clause.
3130  // OpenMP [2.8.1,simd Construct, Restrictions]
3131  // An ordered construct with the simd clause is the only OpenMP construct
3132  // that can appear in the simd region.
3133  NestingProhibited = ParentRegion == OMPD_critical ||
3134  isOpenMPTaskingDirective(ParentRegion) ||
3135  !(isOpenMPSimdDirective(ParentRegion) ||
3136  Stack->isParentOrderedRegion());
3137  Recommend = ShouldBeInOrderedRegion;
3138  } else if (isOpenMPNestingTeamsDirective(CurrentRegion)) {
3139  // OpenMP [2.16, Nesting of Regions]
3140  // If specified, a teams construct must be contained within a target
3141  // construct.
3142  NestingProhibited = ParentRegion != OMPD_target;
3143  OrphanSeen = ParentRegion == OMPD_unknown;
3144  Recommend = ShouldBeInTargetRegion;
3145  }
3146  if (!NestingProhibited &&
3147  !isOpenMPTargetExecutionDirective(CurrentRegion) &&
3148  !isOpenMPTargetDataManagementDirective(CurrentRegion) &&
3149  (ParentRegion == OMPD_teams || ParentRegion == OMPD_target_teams)) {
3150  // OpenMP [2.16, Nesting of Regions]
3151  // distribute, parallel, parallel sections, parallel workshare, and the
3152  // parallel loop and parallel loop SIMD constructs are the only OpenMP
3153  // constructs that can be closely nested in the teams region.
3154  NestingProhibited = !isOpenMPParallelDirective(CurrentRegion) &&
3155  !isOpenMPDistributeDirective(CurrentRegion);
3156  Recommend = ShouldBeInParallelRegion;
3157  }
3158  if (!NestingProhibited &&
3159  isOpenMPNestingDistributeDirective(CurrentRegion)) {
3160  // OpenMP 4.5 [2.17 Nesting of Regions]
3161  // The region associated with the distribute construct must be strictly
3162  // nested inside a teams region
3163  NestingProhibited =
3164  (ParentRegion != OMPD_teams && ParentRegion != OMPD_target_teams);
3165  Recommend = ShouldBeInTeamsRegion;
3166  }
3167  if (!NestingProhibited &&
3168  (isOpenMPTargetExecutionDirective(CurrentRegion) ||
3169  isOpenMPTargetDataManagementDirective(CurrentRegion))) {
3170  // OpenMP 4.5 [2.17 Nesting of Regions]
3171  // If a target, target update, target data, target enter data, or
3172  // target exit data construct is encountered during execution of a
3173  // target region, the behavior is unspecified.
3174  NestingProhibited = Stack->hasDirective(
3175  [&OffendingRegion](OpenMPDirectiveKind K, const DeclarationNameInfo &,
3176  SourceLocation) {
3178  OffendingRegion = K;
3179  return true;
3180  }
3181  return false;
3182  },
3183  false /* don't skip top directive */);
3184  CloseNesting = false;
3185  }
3186  if (NestingProhibited) {
3187  if (OrphanSeen) {
3188  SemaRef.Diag(StartLoc, diag::err_omp_orphaned_device_directive)
3189  << getOpenMPDirectiveName(CurrentRegion) << Recommend;
3190  } else {
3191  SemaRef.Diag(StartLoc, diag::err_omp_prohibited_region)
3192  << CloseNesting << getOpenMPDirectiveName(OffendingRegion)
3193  << Recommend << getOpenMPDirectiveName(CurrentRegion);
3194  }
3195  return true;
3196  }
3197  }
3198  return false;
3199 }
3200 
3202  ArrayRef<OMPClause *> Clauses,
3203  ArrayRef<OpenMPDirectiveKind> AllowedNameModifiers) {
3204  bool ErrorFound = false;
3205  unsigned NamedModifiersNumber = 0;
3207  OMPD_unknown + 1);
3208  SmallVector<SourceLocation, 4> NameModifierLoc;
3209  for (const OMPClause *C : Clauses) {
3210  if (const auto *IC = dyn_cast_or_null<OMPIfClause>(C)) {
3211  // At most one if clause without a directive-name-modifier can appear on
3212  // the directive.
3213  OpenMPDirectiveKind CurNM = IC->getNameModifier();
3214  if (FoundNameModifiers[CurNM]) {
3215  S.Diag(C->getBeginLoc(), diag::err_omp_more_one_clause)
3216  << getOpenMPDirectiveName(Kind) << getOpenMPClauseName(OMPC_if)
3217  << (CurNM != OMPD_unknown) << getOpenMPDirectiveName(CurNM);
3218  ErrorFound = true;
3219  } else if (CurNM != OMPD_unknown) {
3220  NameModifierLoc.push_back(IC->getNameModifierLoc());
3221  ++NamedModifiersNumber;
3222  }
3223  FoundNameModifiers[CurNM] = IC;
3224  if (CurNM == OMPD_unknown)
3225  continue;
3226  // Check if the specified name modifier is allowed for the current
3227  // directive.
3228  // At most one if clause with the particular directive-name-modifier can
3229  // appear on the directive.
3230  bool MatchFound = false;
3231  for (auto NM : AllowedNameModifiers) {
3232  if (CurNM == NM) {
3233  MatchFound = true;
3234  break;
3235  }
3236  }
3237  if (!MatchFound) {
3238  S.Diag(IC->getNameModifierLoc(),
3239  diag::err_omp_wrong_if_directive_name_modifier)
3241  ErrorFound = true;
3242  }
3243  }
3244  }
3245  // If any if clause on the directive includes a directive-name-modifier then
3246  // all if clauses on the directive must include a directive-name-modifier.
3247  if (FoundNameModifiers[OMPD_unknown] && NamedModifiersNumber > 0) {
3248  if (NamedModifiersNumber == AllowedNameModifiers.size()) {
3249  S.Diag(FoundNameModifiers[OMPD_unknown]->getBeginLoc(),
3250  diag::err_omp_no_more_if_clause);
3251  } else {
3252  std::string Values;
3253  std::string Sep(", ");
3254  unsigned AllowedCnt = 0;
3255  unsigned TotalAllowedNum =
3256  AllowedNameModifiers.size() - NamedModifiersNumber;
3257  for (unsigned Cnt = 0, End = AllowedNameModifiers.size(); Cnt < End;
3258  ++Cnt) {
3259  OpenMPDirectiveKind NM = AllowedNameModifiers[Cnt];
3260  if (!FoundNameModifiers[NM]) {
3261  Values += "'";
3262  Values += getOpenMPDirectiveName(NM);
3263  Values += "'";
3264  if (AllowedCnt + 2 == TotalAllowedNum)
3265  Values += " or ";
3266  else if (AllowedCnt + 1 != TotalAllowedNum)
3267  Values += Sep;
3268  ++AllowedCnt;
3269  }
3270  }
3271  S.Diag(FoundNameModifiers[OMPD_unknown]->getCondition()->getBeginLoc(),
3272  diag::err_omp_unnamed_if_clause)
3273  << (TotalAllowedNum > 1) << Values;
3274  }
3275  for (SourceLocation Loc : NameModifierLoc) {
3276  S.Diag(Loc, diag::note_omp_previous_named_if_clause);
3277  }
3278  ErrorFound = true;
3279  }
3280  return ErrorFound;
3281 }
3282 
3285  OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
3286  Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc) {
3287  StmtResult Res = StmtError();
3288  // First check CancelRegion which is then used in checkNestingOfRegions.
3289  if (checkCancelRegion(*this, Kind, CancelRegion, StartLoc) ||
3290  checkNestingOfRegions(*this, DSAStack, Kind, DirName, CancelRegion,
3291  StartLoc))
3292  return StmtError();
3293 
3294  llvm::SmallVector<OMPClause *, 8> ClausesWithImplicit;
3295  VarsWithInheritedDSAType VarsWithInheritedDSA;
3296  bool ErrorFound = false;
3297  ClausesWithImplicit.append(Clauses.begin(), Clauses.end());
3298  if (AStmt && !CurContext->isDependentContext()) {
3299  assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
3300 
3301  // Check default data sharing attributes for referenced variables.
3302  DSAAttrChecker DSAChecker(DSAStack, *this, cast<CapturedStmt>(AStmt));
3303  int ThisCaptureLevel = getOpenMPCaptureLevels(Kind);
3304  Stmt *S = AStmt;
3305  while (--ThisCaptureLevel >= 0)
3306  S = cast<CapturedStmt>(S)->getCapturedStmt();
3307  DSAChecker.Visit(S);
3308  if (DSAChecker.isErrorFound())
3309  return StmtError();
3310  // Generate list of implicitly defined firstprivate variables.
3311  VarsWithInheritedDSA = DSAChecker.getVarsWithInheritedDSA();
3312 
3313  SmallVector<Expr *, 4> ImplicitFirstprivates(
3314  DSAChecker.getImplicitFirstprivate().begin(),
3315  DSAChecker.getImplicitFirstprivate().end());
3316  SmallVector<Expr *, 4> ImplicitMaps(DSAChecker.getImplicitMap().begin(),
3317  DSAChecker.getImplicitMap().end());
3318  // Mark taskgroup task_reduction descriptors as implicitly firstprivate.
3319  for (OMPClause *C : Clauses) {
3320  if (auto *IRC = dyn_cast<OMPInReductionClause>(C)) {
3321  for (Expr *E : IRC->taskgroup_descriptors())
3322  if (E)
3323  ImplicitFirstprivates.emplace_back(E);
3324  }
3325  }
3326  if (!ImplicitFirstprivates.empty()) {
3327  if (OMPClause *Implicit = ActOnOpenMPFirstprivateClause(
3328  ImplicitFirstprivates, SourceLocation(), SourceLocation(),
3329  SourceLocation())) {
3330  ClausesWithImplicit.push_back(Implicit);
3331  ErrorFound = cast<OMPFirstprivateClause>(Implicit)->varlist_size() !=
3332  ImplicitFirstprivates.size();
3333  } else {
3334  ErrorFound = true;
3335  }
3336  }
3337  if (!ImplicitMaps.empty()) {
3338  if (OMPClause *Implicit = ActOnOpenMPMapClause(
3339  OMPC_MAP_unknown, OMPC_MAP_tofrom, /*IsMapTypeImplicit=*/true,
3340  SourceLocation(), SourceLocation(), ImplicitMaps,
3342  ClausesWithImplicit.emplace_back(Implicit);
3343  ErrorFound |=
3344  cast<OMPMapClause>(Implicit)->varlist_size() != ImplicitMaps.size();
3345  } else {
3346  ErrorFound = true;
3347  }
3348  }
3349  }
3350 
3351  llvm::SmallVector<OpenMPDirectiveKind, 4> AllowedNameModifiers;
3352  switch (Kind) {
3353  case OMPD_parallel:
3354  Res = ActOnOpenMPParallelDirective(ClausesWithImplicit, AStmt, StartLoc,
3355  EndLoc);
3356  AllowedNameModifiers.push_back(OMPD_parallel);
3357  break;
3358  case OMPD_simd:
3359  Res = ActOnOpenMPSimdDirective(ClausesWithImplicit, AStmt, StartLoc, EndLoc,
3360  VarsWithInheritedDSA);
3361  break;
3362  case OMPD_for:
3363  Res = ActOnOpenMPForDirective(ClausesWithImplicit, AStmt, StartLoc, EndLoc,
3364  VarsWithInheritedDSA);
3365  break;
3366  case OMPD_for_simd:
3367  Res = ActOnOpenMPForSimdDirective(ClausesWithImplicit, AStmt, StartLoc,
3368  EndLoc, VarsWithInheritedDSA);
3369  break;
3370  case OMPD_sections:
3371  Res = ActOnOpenMPSectionsDirective(ClausesWithImplicit, AStmt, StartLoc,
3372  EndLoc);
3373  break;
3374  case OMPD_section:
3375  assert(ClausesWithImplicit.empty() &&
3376  "No clauses are allowed for 'omp section' directive");
3377  Res = ActOnOpenMPSectionDirective(AStmt, StartLoc, EndLoc);
3378  break;
3379  case OMPD_single:
3380  Res = ActOnOpenMPSingleDirective(ClausesWithImplicit, AStmt, StartLoc,
3381  EndLoc);
3382  break;
3383  case OMPD_master:
3384  assert(ClausesWithImplicit.empty() &&
3385  "No clauses are allowed for 'omp master' directive");
3386  Res = ActOnOpenMPMasterDirective(AStmt, StartLoc, EndLoc);
3387  break;
3388  case OMPD_critical:
3389  Res = ActOnOpenMPCriticalDirective(DirName, ClausesWithImplicit, AStmt,
3390  StartLoc, EndLoc);
3391  break;
3392  case OMPD_parallel_for:
3393  Res = ActOnOpenMPParallelForDirective(ClausesWithImplicit, AStmt, StartLoc,
3394  EndLoc, VarsWithInheritedDSA);
3395  AllowedNameModifiers.push_back(OMPD_parallel);
3396  break;
3397  case OMPD_parallel_for_simd:
3398  Res = ActOnOpenMPParallelForSimdDirective(
3399  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
3400  AllowedNameModifiers.push_back(OMPD_parallel);
3401  break;
3402  case OMPD_parallel_sections:
3403  Res = ActOnOpenMPParallelSectionsDirective(ClausesWithImplicit, AStmt,
3404  StartLoc, EndLoc);
3405  AllowedNameModifiers.push_back(OMPD_parallel);
3406  break;
3407  case OMPD_task:
3408  Res =
3409  ActOnOpenMPTaskDirective(ClausesWithImplicit, AStmt, StartLoc, EndLoc);
3410  AllowedNameModifiers.push_back(OMPD_task);
3411  break;
3412  case OMPD_taskyield:
3413  assert(ClausesWithImplicit.empty() &&
3414  "No clauses are allowed for 'omp taskyield' directive");
3415  assert(AStmt == nullptr &&
3416  "No associated statement allowed for 'omp taskyield' directive");
3417  Res = ActOnOpenMPTaskyieldDirective(StartLoc, EndLoc);
3418  break;
3419  case OMPD_barrier:
3420  assert(ClausesWithImplicit.empty() &&
3421  "No clauses are allowed for 'omp barrier' directive");
3422  assert(AStmt == nullptr &&
3423  "No associated statement allowed for 'omp barrier' directive");
3424  Res = ActOnOpenMPBarrierDirective(StartLoc, EndLoc);
3425  break;
3426  case OMPD_taskwait:
3427  assert(ClausesWithImplicit.empty() &&
3428  "No clauses are allowed for 'omp taskwait' directive");
3429  assert(AStmt == nullptr &&
3430  "No associated statement allowed for 'omp taskwait' directive");
3431  Res = ActOnOpenMPTaskwaitDirective(StartLoc, EndLoc);
3432  break;
3433  case OMPD_taskgroup:
3434  Res = ActOnOpenMPTaskgroupDirective(ClausesWithImplicit, AStmt, StartLoc,
3435  EndLoc);
3436  break;
3437  case OMPD_flush:
3438  assert(AStmt == nullptr &&
3439  "No associated statement allowed for 'omp flush' directive");
3440  Res = ActOnOpenMPFlushDirective(ClausesWithImplicit, StartLoc, EndLoc);
3441  break;
3442  case OMPD_ordered:
3443  Res = ActOnOpenMPOrderedDirective(ClausesWithImplicit, AStmt, StartLoc,
3444  EndLoc);
3445  break;
3446  case OMPD_atomic:
3447  Res = ActOnOpenMPAtomicDirective(ClausesWithImplicit, AStmt, StartLoc,
3448  EndLoc);
3449  break;
3450  case OMPD_teams:
3451  Res =
3452  ActOnOpenMPTeamsDirective(ClausesWithImplicit, AStmt, StartLoc, EndLoc);
3453  break;
3454  case OMPD_target:
3455  Res = ActOnOpenMPTargetDirective(ClausesWithImplicit, AStmt, StartLoc,
3456  EndLoc);
3457  AllowedNameModifiers.push_back(OMPD_target);
3458  break;
3459  case OMPD_target_parallel:
3460  Res = ActOnOpenMPTargetParallelDirective(ClausesWithImplicit, AStmt,
3461  StartLoc, EndLoc);
3462  AllowedNameModifiers.push_back(OMPD_target);
3463  AllowedNameModifiers.push_back(OMPD_parallel);
3464  break;
3465  case OMPD_target_parallel_for:
3466  Res = ActOnOpenMPTargetParallelForDirective(
3467  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
3468  AllowedNameModifiers.push_back(OMPD_target);
3469  AllowedNameModifiers.push_back(OMPD_parallel);
3470  break;
3471  case OMPD_cancellation_point:
3472  assert(ClausesWithImplicit.empty() &&
3473  "No clauses are allowed for 'omp cancellation point' directive");
3474  assert(AStmt == nullptr && "No associated statement allowed for 'omp "
3475  "cancellation point' directive");
3476  Res = ActOnOpenMPCancellationPointDirective(StartLoc, EndLoc, CancelRegion);
3477  break;
3478  case OMPD_cancel:
3479  assert(AStmt == nullptr &&
3480  "No associated statement allowed for 'omp cancel' directive");
3481  Res = ActOnOpenMPCancelDirective(ClausesWithImplicit, StartLoc, EndLoc,
3482  CancelRegion);
3483  AllowedNameModifiers.push_back(OMPD_cancel);
3484  break;
3485  case OMPD_target_data:
3486  Res = ActOnOpenMPTargetDataDirective(ClausesWithImplicit, AStmt, StartLoc,
3487  EndLoc);
3488  AllowedNameModifiers.push_back(OMPD_target_data);
3489  break;
3490  case OMPD_target_enter_data:
3491  Res = ActOnOpenMPTargetEnterDataDirective(ClausesWithImplicit, StartLoc,
3492  EndLoc, AStmt);
3493  AllowedNameModifiers.push_back(OMPD_target_enter_data);
3494  break;
3495  case OMPD_target_exit_data:
3496  Res = ActOnOpenMPTargetExitDataDirective(ClausesWithImplicit, StartLoc,
3497  EndLoc, AStmt);
3498  AllowedNameModifiers.push_back(OMPD_target_exit_data);
3499  break;
3500  case OMPD_taskloop:
3501  Res = ActOnOpenMPTaskLoopDirective(ClausesWithImplicit, AStmt, StartLoc,
3502  EndLoc, VarsWithInheritedDSA);
3503  AllowedNameModifiers.push_back(OMPD_taskloop);
3504  break;
3505  case OMPD_taskloop_simd:
3506  Res = ActOnOpenMPTaskLoopSimdDirective(ClausesWithImplicit, AStmt, StartLoc,
3507  EndLoc, VarsWithInheritedDSA);
3508  AllowedNameModifiers.push_back(OMPD_taskloop);
3509  break;
3510  case OMPD_distribute:
3511  Res = ActOnOpenMPDistributeDirective(ClausesWithImplicit, AStmt, StartLoc,
3512  EndLoc, VarsWithInheritedDSA);
3513  break;
3514  case OMPD_target_update:
3515  Res = ActOnOpenMPTargetUpdateDirective(ClausesWithImplicit, StartLoc,
3516  EndLoc, AStmt);
3517  AllowedNameModifiers.push_back(OMPD_target_update);
3518  break;
3519  case OMPD_distribute_parallel_for:
3520  Res = ActOnOpenMPDistributeParallelForDirective(
3521  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
3522  AllowedNameModifiers.push_back(OMPD_parallel);
3523  break;
3524  case OMPD_distribute_parallel_for_simd:
3525  Res = ActOnOpenMPDistributeParallelForSimdDirective(
3526  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
3527  AllowedNameModifiers.push_back(OMPD_parallel);
3528  break;
3529  case OMPD_distribute_simd:
3530  Res = ActOnOpenMPDistributeSimdDirective(
3531  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
3532  break;
3533  case OMPD_target_parallel_for_simd:
3534  Res = ActOnOpenMPTargetParallelForSimdDirective(
3535  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
3536  AllowedNameModifiers.push_back(OMPD_target);
3537  AllowedNameModifiers.push_back(OMPD_parallel);
3538  break;
3539  case OMPD_target_simd:
3540  Res = ActOnOpenMPTargetSimdDirective(ClausesWithImplicit, AStmt, StartLoc,
3541  EndLoc, VarsWithInheritedDSA);
3542  AllowedNameModifiers.push_back(OMPD_target);
3543  break;
3544  case OMPD_teams_distribute:
3545  Res = ActOnOpenMPTeamsDistributeDirective(
3546  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
3547  break;
3548  case OMPD_teams_distribute_simd:
3549  Res = ActOnOpenMPTeamsDistributeSimdDirective(
3550  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
3551  break;
3552  case OMPD_teams_distribute_parallel_for_simd:
3553  Res = ActOnOpenMPTeamsDistributeParallelForSimdDirective(
3554  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
3555  AllowedNameModifiers.push_back(OMPD_parallel);
3556  break;
3557  case OMPD_teams_distribute_parallel_for:
3558  Res = ActOnOpenMPTeamsDistributeParallelForDirective(
3559  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
3560  AllowedNameModifiers.push_back(OMPD_parallel);
3561  break;
3562  case OMPD_target_teams:
3563  Res = ActOnOpenMPTargetTeamsDirective(ClausesWithImplicit, AStmt, StartLoc,
3564  EndLoc);
3565  AllowedNameModifiers.push_back(OMPD_target);
3566  break;
3567  case OMPD_target_teams_distribute:
3568  Res = ActOnOpenMPTargetTeamsDistributeDirective(
3569  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
3570  AllowedNameModifiers.push_back(OMPD_target);
3571  break;
3572  case OMPD_target_teams_distribute_parallel_for:
3573  Res = ActOnOpenMPTargetTeamsDistributeParallelForDirective(
3574  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
3575  AllowedNameModifiers.push_back(OMPD_target);
3576  AllowedNameModifiers.push_back(OMPD_parallel);
3577  break;
3578  case OMPD_target_teams_distribute_parallel_for_simd:
3579  Res = ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective(
3580  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
3581  AllowedNameModifiers.push_back(OMPD_target);
3582  AllowedNameModifiers.push_back(OMPD_parallel);
3583  break;
3584  case OMPD_target_teams_distribute_simd:
3585  Res = ActOnOpenMPTargetTeamsDistributeSimdDirective(
3586  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
3587  AllowedNameModifiers.push_back(OMPD_target);
3588  break;
3589  case OMPD_declare_target:
3590  case OMPD_end_declare_target:
3591  case OMPD_threadprivate:
3592  case OMPD_declare_reduction:
3593  case OMPD_declare_simd:
3594  case OMPD_requires:
3595  llvm_unreachable("OpenMP Directive is not allowed");
3596  case OMPD_unknown:
3597  llvm_unreachable("Unknown OpenMP directive");
3598  }
3599 
3600  for (const auto &P : VarsWithInheritedDSA) {
3601  Diag(P.second->getExprLoc(), diag::err_omp_no_dsa_for_variable)
3602  << P.first << P.second->getSourceRange();
3603  }
3604  ErrorFound = !VarsWithInheritedDSA.empty() || ErrorFound;
3605 
3606  if (!AllowedNameModifiers.empty())
3607  ErrorFound = checkIfClauses(*this, Kind, Clauses, AllowedNameModifiers) ||
3608  ErrorFound;
3609 
3610  if (ErrorFound)
3611  return StmtError();
3612  return Res;
3613 }
3614 
3616  DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS, Expr *Simdlen,
3617  ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds,
3618  ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears,
3619  ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR) {
3620  assert(Aligneds.size() == Alignments.size());
3621  assert(Linears.size() == LinModifiers.size());
3622  assert(Linears.size() == Steps.size());
3623  if (!DG || DG.get().isNull())
3624  return DeclGroupPtrTy();
3625 
3626  if (!DG.get().isSingleDecl()) {
3627  Diag(SR.getBegin(), diag::err_omp_single_decl_in_declare_simd);
3628  return DG;
3629  }
3630  Decl *ADecl = DG.get().getSingleDecl();
3631  if (auto *FTD = dyn_cast<FunctionTemplateDecl>(ADecl))
3632  ADecl = FTD->getTemplatedDecl();
3633 
3634  auto *FD = dyn_cast<FunctionDecl>(ADecl);
3635  if (!FD) {
3636  Diag(ADecl->getLocation(), diag::err_omp_function_expected);
3637  return DeclGroupPtrTy();
3638  }
3639 
3640  // OpenMP [2.8.2, declare simd construct, Description]
3641  // The parameter of the simdlen clause must be a constant positive integer
3642  // expression.
3643  ExprResult SL;
3644  if (Simdlen)
3645  SL = VerifyPositiveIntegerConstantInClause(Simdlen, OMPC_simdlen);
3646  // OpenMP [2.8.2, declare simd construct, Description]
3647  // The special this pointer can be used as if was one of the arguments to the
3648  // function in any of the linear, aligned, or uniform clauses.
3649  // The uniform clause declares one or more arguments to have an invariant
3650  // value for all concurrent invocations of the function in the execution of a
3651  // single SIMD loop.
3652  llvm::DenseMap<const Decl *, const Expr *> UniformedArgs;
3653  const Expr *UniformedLinearThis = nullptr;
3654  for (const Expr *E : Uniforms) {
3655  E = E->IgnoreParenImpCasts();
3656  if (const auto *DRE = dyn_cast<DeclRefExpr>(E))
3657  if (const auto *PVD = dyn_cast<ParmVarDecl>(DRE->getDecl()))
3658  if (FD->getNumParams() > PVD->getFunctionScopeIndex() &&
3659  FD->getParamDecl(PVD->getFunctionScopeIndex())
3660  ->getCanonicalDecl() == PVD->getCanonicalDecl()) {
3661  UniformedArgs.try_emplace(PVD->getCanonicalDecl(), E);
3662  continue;
3663  }
3664  if (isa<CXXThisExpr>(E)) {
3665  UniformedLinearThis = E;
3666  continue;
3667  }
3668  Diag(E->getExprLoc(), diag::err_omp_param_or_this_in_clause)
3669  << FD->getDeclName() << (isa<CXXMethodDecl>(ADecl) ? 1 : 0);
3670  }
3671  // OpenMP [2.8.2, declare simd construct, Description]
3672  // The aligned clause declares that the object to which each list item points
3673  // is aligned to the number of bytes expressed in the optional parameter of
3674  // the aligned clause.
3675  // The special this pointer can be used as if was one of the arguments to the
3676  // function in any of the linear, aligned, or uniform clauses.
3677  // The type of list items appearing in the aligned clause must be array,
3678  // pointer, reference to array, or reference to pointer.
3679  llvm::DenseMap<const Decl *, const Expr *> AlignedArgs;
3680  const Expr *AlignedThis = nullptr;
3681  for (const Expr *E : Aligneds) {
3682  E = E->IgnoreParenImpCasts();
3683  if (const auto *DRE = dyn_cast<DeclRefExpr>(E))
3684  if (const auto *PVD = dyn_cast<ParmVarDecl>(DRE->getDecl())) {
3685  const VarDecl *CanonPVD = PVD->getCanonicalDecl();
3686  if (FD->getNumParams() > PVD->getFunctionScopeIndex() &&
3687  FD->getParamDecl(PVD->getFunctionScopeIndex())
3688  ->getCanonicalDecl() == CanonPVD) {
3689  // OpenMP [2.8.1, simd construct, Restrictions]
3690  // A list-item cannot appear in more than one aligned clause.
3691  if (AlignedArgs.count(CanonPVD) > 0) {
3692  Diag(E->getExprLoc(), diag::err_omp_aligned_twice)
3693  << 1 << E->getSourceRange();
3694  Diag(AlignedArgs[CanonPVD]->getExprLoc(),
3695  diag::note_omp_explicit_dsa)
3696  << getOpenMPClauseName(OMPC_aligned);
3697  continue;
3698  }
3699  AlignedArgs[CanonPVD] = E;
3700  QualType QTy = PVD->getType()
3701  .getNonReferenceType()
3702  .getUnqualifiedType()
3703  .getCanonicalType();
3704  const Type *Ty = QTy.getTypePtrOrNull();
3705  if (!Ty || (!Ty->isArrayType() && !Ty->isPointerType())) {
3706  Diag(E->getExprLoc(), diag::err_omp_aligned_expected_array_or_ptr)
3707  << QTy << getLangOpts().CPlusPlus << E->getSourceRange();
3708  Diag(PVD->getLocation(), diag::note_previous_decl) << PVD;
3709  }
3710  continue;
3711  }
3712  }
3713  if (isa<CXXThisExpr>(E)) {
3714  if (AlignedThis) {
3715  Diag(E->getExprLoc(), diag::err_omp_aligned_twice)
3716  << 2 << E->getSourceRange();
3717  Diag(AlignedThis->getExprLoc(), diag::note_omp_explicit_dsa)
3718  << getOpenMPClauseName(OMPC_aligned);
3719  }
3720  AlignedThis = E;
3721  continue;
3722  }
3723  Diag(E->getExprLoc(), diag::err_omp_param_or_this_in_clause)
3724  << FD->getDeclName() << (isa<CXXMethodDecl>(ADecl) ? 1 : 0);
3725  }
3726  // The optional parameter of the aligned clause, alignment, must be a constant
3727  // positive integer expression. If no optional parameter is specified,
3728  // implementation-defined default alignments for SIMD instructions on the
3729  // target platforms are assumed.
3730  SmallVector<const Expr *, 4> NewAligns;
3731  for (Expr *E : Alignments) {
3732  ExprResult Align;
3733  if (E)
3734  Align = VerifyPositiveIntegerConstantInClause(E, OMPC_aligned);
3735  NewAligns.push_back(Align.get());
3736  }
3737  // OpenMP [2.8.2, declare simd construct, Description]
3738  // The linear clause declares one or more list items to be private to a SIMD
3739  // lane and to have a linear relationship with respect to the iteration space
3740  // of a loop.
3741  // The special this pointer can be used as if was one of the arguments to the
3742  // function in any of the linear, aligned, or uniform clauses.
3743  // When a linear-step expression is specified in a linear clause it must be
3744  // either a constant integer expression or an integer-typed parameter that is
3745  // specified in a uniform clause on the directive.
3746  llvm::DenseMap<const Decl *, const Expr *> LinearArgs;
3747  const bool IsUniformedThis = UniformedLinearThis != nullptr;
3748  auto MI = LinModifiers.begin();
3749  for (const Expr *E : Linears) {
3750  auto LinKind = static_cast<OpenMPLinearClauseKind>(*MI);
3751  ++MI;
3752  E = E->IgnoreParenImpCasts();
3753  if (const auto *DRE = dyn_cast<DeclRefExpr>(E))
3754  if (const auto *PVD = dyn_cast<ParmVarDecl>(DRE->getDecl())) {
3755  const VarDecl *CanonPVD = PVD->getCanonicalDecl();
3756  if (FD->getNumParams() > PVD->getFunctionScopeIndex() &&
3757  FD->getParamDecl(PVD->getFunctionScopeIndex())
3758  ->getCanonicalDecl() == CanonPVD) {
3759  // OpenMP [2.15.3.7, linear Clause, Restrictions]
3760  // A list-item cannot appear in more than one linear clause.
3761  if (LinearArgs.count(CanonPVD) > 0) {
3762  Diag(E->getExprLoc(), diag::err_omp_wrong_dsa)
3763  << getOpenMPClauseName(OMPC_linear)
3764  << getOpenMPClauseName(OMPC_linear) << E->getSourceRange();
3765  Diag(LinearArgs[CanonPVD]->getExprLoc(),
3766  diag::note_omp_explicit_dsa)
3767  << getOpenMPClauseName(OMPC_linear);
3768  continue;
3769  }
3770  // Each argument can appear in at most one uniform or linear clause.
3771  if (UniformedArgs.count(CanonPVD) > 0) {
3772  Diag(E->getExprLoc(), diag::err_omp_wrong_dsa)
3773  << getOpenMPClauseName(OMPC_linear)
3775  Diag(UniformedArgs[CanonPVD]->getExprLoc(),
3776  diag::note_omp_explicit_dsa)
3778  continue;
3779  }
3780  LinearArgs[CanonPVD] = E;
3781  if (E->isValueDependent() || E->isTypeDependent() ||
3782  E->isInstantiationDependent() ||
3784  continue;
3785  (void)CheckOpenMPLinearDecl(CanonPVD, E->getExprLoc(), LinKind,
3786  PVD->getOriginalType());
3787  continue;
3788  }
3789  }
3790  if (isa<CXXThisExpr>(E)) {
3791  if (UniformedLinearThis) {
3792  Diag(E->getExprLoc(), diag::err_omp_wrong_dsa)
3793  << getOpenMPClauseName(OMPC_linear)
3794  << getOpenMPClauseName(IsUniformedThis ? OMPC_uniform : OMPC_linear)
3795  << E->getSourceRange();
3796  Diag(UniformedLinearThis->getExprLoc(), diag::note_omp_explicit_dsa)
3797  << getOpenMPClauseName(IsUniformedThis ? OMPC_uniform
3798  : OMPC_linear);
3799  continue;
3800  }
3801  UniformedLinearThis = E;
3802  if (E->isValueDependent() || E->isTypeDependent() ||
3804  continue;
3805  (void)CheckOpenMPLinearDecl(/*D=*/nullptr, E->getExprLoc(), LinKind,
3806  E->getType());
3807  continue;
3808  }
3809  Diag(E->getExprLoc(), diag::err_omp_param_or_this_in_clause)
3810  << FD->getDeclName() << (isa<CXXMethodDecl>(ADecl) ? 1 : 0);
3811  }
3812  Expr *Step = nullptr;
3813  Expr *NewStep = nullptr;
3814  SmallVector<Expr *, 4> NewSteps;
3815  for (Expr *E : Steps) {
3816  // Skip the same step expression, it was checked already.
3817  if (Step == E || !E) {
3818  NewSteps.push_back(E ? NewStep : nullptr);
3819  continue;
3820  }
3821  Step = E;
3822  if (const auto *DRE = dyn_cast<DeclRefExpr>(Step))
3823  if (const auto *PVD = dyn_cast<ParmVarDecl>(DRE->getDecl())) {
3824  const VarDecl *CanonPVD = PVD->getCanonicalDecl();
3825  if (UniformedArgs.count(CanonPVD) == 0) {
3826  Diag(Step->getExprLoc(), diag::err_omp_expected_uniform_param)
3827  << Step->getSourceRange();
3828  } else if (E->isValueDependent() || E->isTypeDependent() ||
3829  E->isInstantiationDependent() ||
3831  CanonPVD->getType()->hasIntegerRepresentation()) {
3832  NewSteps.push_back(Step);
3833  } else {
3834  Diag(Step->getExprLoc(), diag::err_omp_expected_int_param)
3835  << Step->getSourceRange();
3836  }
3837  continue;
3838  }
3839  NewStep = Step;
3840  if (Step && !Step->isValueDependent() && !Step->isTypeDependent() &&
3841  !Step->isInstantiationDependent() &&
3843  NewStep = PerformOpenMPImplicitIntegerConversion(Step->getExprLoc(), Step)
3844  .get();
3845  if (NewStep)
3846  NewStep = VerifyIntegerConstantExpression(NewStep).get();
3847  }
3848  NewSteps.push_back(NewStep);
3849  }
3850  auto *NewAttr = OMPDeclareSimdDeclAttr::CreateImplicit(
3851  Context, BS, SL.get(), const_cast<Expr **>(Uniforms.data()),
3852  Uniforms.size(), const_cast<Expr **>(Aligneds.data()), Aligneds.size(),
3853  const_cast<Expr **>(NewAligns.data()), NewAligns.size(),
3854  const_cast<Expr **>(Linears.data()), Linears.size(),
3855  const_cast<unsigned *>(LinModifiers.data()), LinModifiers.size(),
3856  NewSteps.data(), NewSteps.size(), SR);
3857  ADecl->addAttr(NewAttr);
3858  return ConvertDeclToDeclGroup(ADecl);
3859 }
3860 
3862  Stmt *AStmt,
3863  SourceLocation StartLoc,
3864  SourceLocation EndLoc) {
3865  if (!AStmt)
3866  return StmtError();
3867 
3868  auto *CS = cast<CapturedStmt>(AStmt);
3869  // 1.2.2 OpenMP Language Terminology
3870  // Structured block - An executable statement with a single entry at the
3871  // top and a single exit at the bottom.
3872  // The point of exit cannot be a branch out of the structured block.
3873  // longjmp() and throw() must not violate the entry/exit criteria.
3874  CS->getCapturedDecl()->setNothrow();
3875 
3876  setFunctionHasBranchProtectedScope();
3877 
3878  return OMPParallelDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt,
3879  DSAStack->isCancelRegion());
3880 }
3881 
3882 namespace {
3883 /// Helper class for checking canonical form of the OpenMP loops and
3884 /// extracting iteration space of each loop in the loop nest, that will be used
3885 /// for IR generation.
3886 class OpenMPIterationSpaceChecker {
3887  /// Reference to Sema.
3888  Sema &SemaRef;
3889  /// A location for diagnostics (when there is no some better location).
3890  SourceLocation DefaultLoc;
3891  /// A location for diagnostics (when increment is not compatible).
3892  SourceLocation ConditionLoc;
3893  /// A source location for referring to loop init later.
3894  SourceRange InitSrcRange;
3895  /// A source location for referring to condition later.
3896  SourceRange ConditionSrcRange;
3897  /// A source location for referring to increment later.
3898  SourceRange IncrementSrcRange;
3899  /// Loop variable.
3900  ValueDecl *LCDecl = nullptr;
3901  /// Reference to loop variable.
3902  Expr *LCRef = nullptr;
3903  /// Lower bound (initializer for the var).
3904  Expr *LB = nullptr;
3905  /// Upper bound.
3906  Expr *UB = nullptr;
3907  /// Loop step (increment).
3908  Expr *Step = nullptr;
3909  /// This flag is true when condition is one of:
3910  /// Var < UB
3911  /// Var <= UB
3912  /// UB > Var
3913  /// UB >= Var
3914  /// This will have no value when the condition is !=
3915  llvm::Optional<bool> TestIsLessOp;
3916  /// This flag is true when condition is strict ( < or > ).
3917  bool TestIsStrictOp = false;
3918  /// This flag is true when step is subtracted on each iteration.
3919  bool SubtractStep = false;
3920 
3921 public:
3922  OpenMPIterationSpaceChecker(Sema &SemaRef, SourceLocation DefaultLoc)
3923  : SemaRef(SemaRef), DefaultLoc(DefaultLoc), ConditionLoc(DefaultLoc) {}
3924  /// Check init-expr for canonical loop form and save loop counter
3925  /// variable - #Var and its initialization value - #LB.
3926  bool checkAndSetInit(Stmt *S, bool EmitDiags = true);
3927  /// Check test-expr for canonical form, save upper-bound (#UB), flags
3928  /// for less/greater and for strict/non-strict comparison.
3929  bool checkAndSetCond(Expr *S);
3930  /// Check incr-expr for canonical loop form and return true if it
3931  /// does not conform, otherwise save loop step (#Step).
3932  bool checkAndSetInc(Expr *S);
3933  /// Return the loop counter variable.
3934  ValueDecl *getLoopDecl() const { return LCDecl; }
3935  /// Return the reference expression to loop counter variable.
3936  Expr *getLoopDeclRefExpr() const { return LCRef; }
3937  /// Source range of the loop init.
3938  SourceRange getInitSrcRange() const { return InitSrcRange; }
3939  /// Source range of the loop condition.
3940  SourceRange getConditionSrcRange() const { return ConditionSrcRange; }
3941  /// Source range of the loop increment.
3942  SourceRange getIncrementSrcRange() const { return IncrementSrcRange; }
3943  /// True if the step should be subtracted.
3944  bool shouldSubtractStep() const { return SubtractStep; }
3945  /// Build the expression to calculate the number of iterations.
3946  Expr *buildNumIterations(
3947  Scope *S, const bool LimitedType,
3948  llvm::MapVector<const Expr *, DeclRefExpr *> &Captures) const;
3949  /// Build the precondition expression for the loops.
3950  Expr *
3951  buildPreCond(Scope *S, Expr *Cond,
3952  llvm::MapVector<const Expr *, DeclRefExpr *> &Captures) const;
3953  /// Build reference expression to the counter be used for codegen.
3954  DeclRefExpr *
3955  buildCounterVar(llvm::MapVector<const Expr *, DeclRefExpr *> &Captures,
3956  DSAStackTy &DSA) const;
3957  /// Build reference expression to the private counter be used for
3958  /// codegen.
3959  Expr *buildPrivateCounterVar() const;
3960  /// Build initialization of the counter be used for codegen.
3961  Expr *buildCounterInit() const;
3962  /// Build step of the counter be used for codegen.
3963  Expr *buildCounterStep() const;
3964  /// Build loop data with counter value for depend clauses in ordered
3965  /// directives.
3966  Expr *
3967  buildOrderedLoopData(Scope *S, Expr *Counter,
3968  llvm::MapVector<const Expr *, DeclRefExpr *> &Captures,
3969  SourceLocation Loc, Expr *Inc = nullptr,
3970  OverloadedOperatorKind OOK = OO_Amp);
3971  /// Return true if any expression is dependent.
3972  bool dependent() const;
3973 
3974 private:
3975  /// Check the right-hand side of an assignment in the increment
3976  /// expression.
3977  bool checkAndSetIncRHS(Expr *RHS);
3978  /// Helper to set loop counter variable and its initializer.
3979  bool setLCDeclAndLB(ValueDecl *NewLCDecl, Expr *NewDeclRefExpr, Expr *NewLB);
3980  /// Helper to set upper bound.
3981  bool setUB(Expr *NewUB, llvm::Optional<bool> LessOp, bool StrictOp,
3982  SourceRange SR, SourceLocation SL);
3983  /// Helper to set loop increment.
3984  bool setStep(Expr *NewStep, bool Subtract);
3985 };
3986 
3987 bool OpenMPIterationSpaceChecker::dependent() const {
3988  if (!LCDecl) {
3989  assert(!LB && !UB && !Step);
3990  return false;
3991  }
3992  return LCDecl->getType()->isDependentType() ||
3993  (LB && LB->isValueDependent()) || (UB && UB->isValueDependent()) ||
3994  (Step && Step->isValueDependent());
3995 }
3996 
3997 bool OpenMPIterationSpaceChecker::setLCDeclAndLB(ValueDecl *NewLCDecl,
3998  Expr *NewLCRefExpr,
3999  Expr *NewLB) {
4000  // State consistency checking to ensure correct usage.
4001  assert(LCDecl == nullptr && LB == nullptr && LCRef == nullptr &&
4002  UB == nullptr && Step == nullptr && !TestIsLessOp && !TestIsStrictOp);
4003  if (!NewLCDecl || !NewLB)
4004  return true;
4005  LCDecl = getCanonicalDecl(NewLCDecl);
4006  LCRef = NewLCRefExpr;
4007  if (auto *CE = dyn_cast_or_null<CXXConstructExpr>(NewLB))
4008  if (const CXXConstructorDecl *Ctor = CE->getConstructor())
4009  if ((Ctor->isCopyOrMoveConstructor() ||
4010  Ctor->isConvertingConstructor(/*AllowExplicit=*/false)) &&
4011  CE->getNumArgs() > 0 && CE->getArg(0) != nullptr)
4012  NewLB = CE->getArg(0)->IgnoreParenImpCasts();
4013  LB = NewLB;
4014  return false;
4015 }
4016 
4017 bool OpenMPIterationSpaceChecker::setUB(Expr *NewUB, llvm::Optional<bool> LessOp,
4018  bool StrictOp, SourceRange SR,
4019  SourceLocation SL) {
4020  // State consistency checking to ensure correct usage.
4021  assert(LCDecl != nullptr && LB != nullptr && UB == nullptr &&
4022  Step == nullptr && !TestIsLessOp && !TestIsStrictOp);
4023  if (!NewUB)
4024  return true;
4025  UB = NewUB;
4026  if (LessOp)
4027  TestIsLessOp = LessOp;
4028  TestIsStrictOp = StrictOp;
4029  ConditionSrcRange = SR;
4030  ConditionLoc = SL;
4031  return false;
4032 }
4033 
4034 bool OpenMPIterationSpaceChecker::setStep(Expr *NewStep, bool Subtract) {
4035  // State consistency checking to ensure correct usage.
4036  assert(LCDecl != nullptr && LB != nullptr && Step == nullptr);
4037  if (!NewStep)
4038  return true;
4039  if (!NewStep->isValueDependent()) {
4040  // Check that the step is integer expression.
4041  SourceLocation StepLoc = NewStep->getBeginLoc();
4043  StepLoc, getExprAsWritten(NewStep));
4044  if (Val.isInvalid())
4045  return true;
4046  NewStep = Val.get();
4047 
4048  // OpenMP [2.6, Canonical Loop Form, Restrictions]
4049  // If test-expr is of form var relational-op b and relational-op is < or
4050  // <= then incr-expr must cause var to increase on each iteration of the
4051  // loop. If test-expr is of form var relational-op b and relational-op is
4052  // > or >= then incr-expr must cause var to decrease on each iteration of
4053  // the loop.
4054  // If test-expr is of form b relational-op var and relational-op is < or
4055  // <= then incr-expr must cause var to decrease on each iteration of the
4056  // loop. If test-expr is of form b relational-op var and relational-op is
4057  // > or >= then incr-expr must cause var to increase on each iteration of
4058  // the loop.
4059  llvm::APSInt Result;
4060  bool IsConstant = NewStep->isIntegerConstantExpr(Result, SemaRef.Context);
4061  bool IsUnsigned = !NewStep->getType()->hasSignedIntegerRepresentation();
4062  bool IsConstNeg =
4063  IsConstant && Result.isSigned() && (Subtract != Result.isNegative());
4064  bool IsConstPos =
4065  IsConstant && Result.isSigned() && (Subtract == Result.isNegative());
4066  bool IsConstZero = IsConstant && !Result.getBoolValue();
4067 
4068  // != with increment is treated as <; != with decrement is treated as >
4069  if (!TestIsLessOp.hasValue())
4070  TestIsLessOp = IsConstPos || (IsUnsigned && !Subtract);
4071  if (UB && (IsConstZero ||
4072  (TestIsLessOp.getValue() ?
4073  (IsConstNeg || (IsUnsigned && Subtract)) :
4074  (IsConstPos || (IsUnsigned && !Subtract))))) {
4075  SemaRef.Diag(NewStep->getExprLoc(),
4076  diag::err_omp_loop_incr_not_compatible)
4077  << LCDecl << TestIsLessOp.getValue() << NewStep->getSourceRange();
4078  SemaRef.Diag(ConditionLoc,
4079  diag::note_omp_loop_cond_requres_compatible_incr)
4080  << TestIsLessOp.getValue() << ConditionSrcRange;
4081  return true;
4082  }
4083  if (TestIsLessOp.getValue() == Subtract) {
4084  NewStep =
4085  SemaRef.CreateBuiltinUnaryOp(NewStep->getExprLoc(), UO_Minus, NewStep)
4086  .get();
4087  Subtract = !Subtract;
4088  }
4089  }
4090 
4091  Step = NewStep;
4092  SubtractStep = Subtract;
4093  return false;
4094 }
4095 
4096 bool OpenMPIterationSpaceChecker::checkAndSetInit(Stmt *S, bool EmitDiags) {
4097  // Check init-expr for canonical loop form and save loop counter
4098  // variable - #Var and its initialization value - #LB.
4099  // OpenMP [2.6] Canonical loop form. init-expr may be one of the following:
4100  // var = lb
4101  // integer-type var = lb
4102  // random-access-iterator-type var = lb
4103  // pointer-type var = lb
4104  //
4105  if (!S) {
4106  if (EmitDiags) {
4107  SemaRef.Diag(DefaultLoc, diag::err_omp_loop_not_canonical_init);
4108  }
4109  return true;
4110  }
4111  if (auto *ExprTemp = dyn_cast<ExprWithCleanups>(S))
4112  if (!ExprTemp->cleanupsHaveSideEffects())
4113  S = ExprTemp->getSubExpr();
4114 
4115  InitSrcRange = S->getSourceRange();
4116  if (Expr *E = dyn_cast<Expr>(S))
4117  S = E->IgnoreParens();
4118  if (auto *BO = dyn_cast<BinaryOperator>(S)) {
4119  if (BO->getOpcode() == BO_Assign) {
4120  Expr *LHS = BO->getLHS()->IgnoreParens();
4121  if (auto *DRE = dyn_cast<DeclRefExpr>(LHS)) {
4122  if (auto *CED = dyn_cast<OMPCapturedExprDecl>(DRE->getDecl()))
4123  if (auto *ME = dyn_cast<MemberExpr>(getExprAsWritten(CED->getInit())))
4124  return setLCDeclAndLB(ME->getMemberDecl(), ME, BO->getRHS());
4125  return setLCDeclAndLB(DRE->getDecl(), DRE, BO->getRHS());
4126  }
4127  if (auto *ME = dyn_cast<MemberExpr>(LHS)) {
4128  if (ME->isArrow() &&
4129  isa<CXXThisExpr>(ME->getBase()->IgnoreParenImpCasts()))
4130  return setLCDeclAndLB(ME->getMemberDecl(), ME, BO->getRHS());
4131  }
4132  }
4133  } else if (auto *DS = dyn_cast<DeclStmt>(S)) {
4134  if (DS->isSingleDecl()) {
4135  if (auto *Var = dyn_cast_or_null<VarDecl>(DS->getSingleDecl())) {
4136  if (Var->hasInit() && !Var->getType()->isReferenceType()) {
4137  // Accept non-canonical init form here but emit ext. warning.
4138  if (Var->getInitStyle() != VarDecl::CInit && EmitDiags)
4139  SemaRef.Diag(S->getBeginLoc(),
4140  diag::ext_omp_loop_not_canonical_init)
4141  << S->getSourceRange();
4142  return setLCDeclAndLB(
4143  Var,
4144  buildDeclRefExpr(SemaRef, Var,
4145  Var->getType().getNonReferenceType(),
4146  DS->getBeginLoc()),
4147  Var->getInit());
4148  }
4149  }
4150  }
4151  } else if (auto *CE = dyn_cast<CXXOperatorCallExpr>(S)) {
4152  if (CE->getOperator() == OO_Equal) {
4153  Expr *LHS = CE->getArg(0);
4154  if (auto *DRE = dyn_cast<DeclRefExpr>(LHS)) {
4155  if (auto *CED = dyn_cast<OMPCapturedExprDecl>(DRE->getDecl()))
4156  if (auto *ME = dyn_cast<MemberExpr>(getExprAsWritten(CED->getInit())))
4157  return setLCDeclAndLB(ME->getMemberDecl(), ME, BO->getRHS());
4158  return setLCDeclAndLB(DRE->getDecl(), DRE, CE->getArg(1));
4159  }
4160  if (auto *ME = dyn_cast<MemberExpr>(LHS)) {
4161  if (ME->isArrow() &&
4162  isa<CXXThisExpr>(ME->getBase()->IgnoreParenImpCasts()))
4163  return setLCDeclAndLB(ME->getMemberDecl(), ME, BO->getRHS());
4164  }
4165  }
4166  }
4167 
4168  if (dependent() || SemaRef.CurContext->isDependentContext())
4169  return false;
4170  if (EmitDiags) {
4171  SemaRef.Diag(S->getBeginLoc(), diag::err_omp_loop_not_canonical_init)
4172  << S->getSourceRange();
4173  }
4174  return true;
4175 }
4176 
4177 /// Ignore parenthesizes, implicit casts, copy constructor and return the
4178 /// variable (which may be the loop variable) if possible.
4179 static const ValueDecl *getInitLCDecl(const Expr *E) {
4180  if (!E)
4181  return nullptr;
4182  E = getExprAsWritten(E);
4183  if (const auto *CE = dyn_cast_or_null<CXXConstructExpr>(E))
4184  if (const CXXConstructorDecl *Ctor = CE->getConstructor())
4185  if ((Ctor->isCopyOrMoveConstructor() ||
4186  Ctor->isConvertingConstructor(/*AllowExplicit=*/false)) &&
4187  CE->getNumArgs() > 0 && CE->getArg(0) != nullptr)
4188  E = CE->getArg(0)->IgnoreParenImpCasts();
4189  if (const auto *DRE = dyn_cast_or_null<DeclRefExpr>(E)) {
4190  if (const auto *VD = dyn_cast<VarDecl>(DRE->getDecl()))
4191  return getCanonicalDecl(VD);
4192  }
4193  if (const auto *ME = dyn_cast_or_null<MemberExpr>(E))
4194  if (ME->isArrow() && isa<CXXThisExpr>(ME->getBase()->IgnoreParenImpCasts()))
4195  return getCanonicalDecl(ME->getMemberDecl());
4196  return nullptr;
4197 }
4198 
4199 bool OpenMPIterationSpaceChecker::checkAndSetCond(Expr *S) {
4200  // Check test-expr for canonical form, save upper-bound UB, flags for
4201  // less/greater and for strict/non-strict comparison.
4202  // OpenMP [2.6] Canonical loop form. Test-expr may be one of the following:
4203  // var relational-op b
4204  // b relational-op var
4205  //
4206  if (!S) {
4207  SemaRef.Diag(DefaultLoc, diag::err_omp_loop_not_canonical_cond) << LCDecl;
4208  return true;
4209  }
4210  S = getExprAsWritten(S);
4211  SourceLocation CondLoc = S->getBeginLoc();
4212  if (auto *BO = dyn_cast<BinaryOperator>(S)) {
4213  if (BO->isRelationalOp()) {
4214  if (getInitLCDecl(BO->getLHS()) == LCDecl)
4215  return setUB(BO->getRHS(),
4216  (BO->getOpcode() == BO_LT || BO->getOpcode() == BO_LE),
4217  (BO->getOpcode() == BO_LT || BO->getOpcode() == BO_GT),
4218  BO->getSourceRange(), BO->getOperatorLoc());
4219  if (getInitLCDecl(BO->getRHS()) == LCDecl)
4220  return setUB(BO->getLHS(),
4221  (BO->getOpcode() == BO_GT || BO->getOpcode() == BO_GE),
4222  (BO->getOpcode() == BO_LT || BO->getOpcode() == BO_GT),
4223  BO->getSourceRange(), BO->getOperatorLoc());
4224  } else if (BO->getOpcode() == BO_NE)
4225  return setUB(getInitLCDecl(BO->getLHS()) == LCDecl ?
4226  BO->getRHS() : BO->getLHS(),
4227  /*LessOp=*/llvm::None,
4228  /*StrictOp=*/true,
4229  BO->getSourceRange(), BO->getOperatorLoc());
4230  } else if (auto *CE = dyn_cast<CXXOperatorCallExpr>(S)) {
4231  if (CE->getNumArgs() == 2) {
4232  auto Op = CE->getOperator();
4233  switch (Op) {
4234  case OO_Greater:
4235  case OO_GreaterEqual:
4236  case OO_Less:
4237  case OO_LessEqual:
4238  if (getInitLCDecl(CE->getArg(0)) == LCDecl)
4239  return setUB(CE->getArg(1), Op == OO_Less || Op == OO_LessEqual,
4240  Op == OO_Less || Op == OO_Greater, CE->getSourceRange(),
4241  CE->getOperatorLoc());
4242  if (getInitLCDecl(CE->getArg(1)) == LCDecl)
4243  return setUB(CE->getArg(0), Op == OO_Greater || Op == OO_GreaterEqual,
4244  Op == OO_Less || Op == OO_Greater, CE->getSourceRange(),
4245  CE->getOperatorLoc());
4246  break;
4247  case OO_ExclaimEqual:
4248  return setUB(getInitLCDecl(CE->getArg(0)) == LCDecl ?
4249  CE->getArg(1) : CE->getArg(0),
4250  /*LessOp=*/llvm::None,
4251  /*StrictOp=*/true,
4252  CE->getSourceRange(),
4253  CE->getOperatorLoc());
4254  break;
4255  default:
4256  break;
4257  }
4258  }
4259  }
4260  if (dependent() || SemaRef.CurContext->isDependentContext())
4261  return false;
4262  SemaRef.Diag(CondLoc, diag::err_omp_loop_not_canonical_cond)
4263  << S->getSourceRange() << LCDecl;
4264  return true;
4265 }
4266 
4267 bool OpenMPIterationSpaceChecker::checkAndSetIncRHS(Expr *RHS) {
4268  // RHS of canonical loop form increment can be:
4269  // var + incr
4270  // incr + var
4271  // var - incr
4272  //
4273  RHS = RHS->IgnoreParenImpCasts();
4274  if (auto *BO = dyn_cast<BinaryOperator>(RHS)) {
4275  if (BO->isAdditiveOp()) {
4276  bool IsAdd = BO->getOpcode() == BO_Add;
4277  if (getInitLCDecl(BO->getLHS()) == LCDecl)
4278  return setStep(BO->getRHS(), !IsAdd);
4279  if (IsAdd && getInitLCDecl(BO->getRHS()) == LCDecl)
4280  return setStep(BO->getLHS(), /*Subtract=*/false);
4281  }
4282  } else if (auto *CE = dyn_cast<CXXOperatorCallExpr>(RHS)) {
4283  bool IsAdd = CE->getOperator() == OO_Plus;
4284  if ((IsAdd || CE->getOperator() == OO_Minus) && CE->getNumArgs() == 2) {
4285  if (getInitLCDecl(CE->getArg(0)) == LCDecl)
4286  return setStep(CE->getArg(1), !IsAdd);
4287  if (IsAdd && getInitLCDecl(CE->getArg(1)) == LCDecl)
4288  return setStep(CE->getArg(0), /*Subtract=*/false);
4289  }
4290  }
4291  if (dependent() || SemaRef.CurContext->isDependentContext())
4292  return false;
4293  SemaRef.Diag(RHS->getBeginLoc(), diag::err_omp_loop_not_canonical_incr)
4294  << RHS->getSourceRange() << LCDecl;
4295  return true;
4296 }
4297 
4298 bool OpenMPIterationSpaceChecker::checkAndSetInc(Expr *S) {
4299  // Check incr-expr for canonical loop form and return true if it
4300  // does not conform.
4301  // OpenMP [2.6] Canonical loop form. Test-expr may be one of the following:
4302  // ++var
4303  // var++
4304  // --var
4305  // var--
4306  // var += incr
4307  // var -= incr
4308  // var = var + incr
4309  // var = incr + var
4310  // var = var - incr
4311  //
4312  if (!S) {
4313  SemaRef.Diag(DefaultLoc, diag::err_omp_loop_not_canonical_incr) << LCDecl;
4314  return true;
4315  }
4316  if (auto *ExprTemp = dyn_cast<ExprWithCleanups>(S))
4317  if (!ExprTemp->cleanupsHaveSideEffects())
4318  S = ExprTemp->getSubExpr();
4319 
4320  IncrementSrcRange = S->getSourceRange();
4321  S = S->IgnoreParens();
4322  if (auto *UO = dyn_cast<UnaryOperator>(S)) {
4323  if (UO->isIncrementDecrementOp() &&
4324  getInitLCDecl(UO->getSubExpr()) == LCDecl)
4325  return setStep(SemaRef
4326  .ActOnIntegerConstant(UO->getBeginLoc(),
4327  (UO->isDecrementOp() ? -1 : 1))
4328  .get(),
4329  /*Subtract=*/false);
4330  } else if (auto *BO = dyn_cast<BinaryOperator>(S)) {
4331  switch (BO->getOpcode()) {
4332  case BO_AddAssign:
4333  case BO_SubAssign:
4334  if (getInitLCDecl(BO->getLHS()) == LCDecl)
4335  return setStep(BO->getRHS(), BO->getOpcode() == BO_SubAssign);
4336  break;
4337  case BO_Assign:
4338  if (getInitLCDecl(BO->getLHS()) == LCDecl)
4339  return checkAndSetIncRHS(BO->getRHS());
4340  break;
4341  default:
4342  break;
4343  }
4344  } else if (auto *CE = dyn_cast<CXXOperatorCallExpr>(S)) {
4345  switch (CE->getOperator()) {
4346  case OO_PlusPlus:
4347  case OO_MinusMinus:
4348  if (getInitLCDecl(CE->getArg(0)) == LCDecl)
4349  return setStep(SemaRef
4350  .ActOnIntegerConstant(
4351  CE->getBeginLoc(),
4352  ((CE->getOperator() == OO_MinusMinus) ? -1 : 1))
4353  .get(),
4354  /*Subtract=*/false);
4355  break;
4356  case OO_PlusEqual:
4357  case OO_MinusEqual:
4358  if (getInitLCDecl(CE->getArg(0)) == LCDecl)
4359  return setStep(CE->getArg(1), CE->getOperator() == OO_MinusEqual);
4360  break;
4361  case OO_Equal:
4362  if (getInitLCDecl(CE->getArg(0)) == LCDecl)
4363  return checkAndSetIncRHS(CE->getArg(1));
4364  break;
4365  default:
4366  break;
4367  }
4368  }
4369  if (dependent() || SemaRef.CurContext->isDependentContext())
4370  return false;
4371  SemaRef.Diag(S->getBeginLoc(), diag::err_omp_loop_not_canonical_incr)
4372  << S->getSourceRange() << LCDecl;
4373  return true;
4374 }
4375 
4376 static ExprResult
4377 tryBuildCapture(Sema &SemaRef, Expr *Capture,
4378  llvm::MapVector<const Expr *, DeclRefExpr *> &Captures) {
4379  if (SemaRef.CurContext->isDependentContext())
4380  return ExprResult(Capture);
4381  if (Capture->isEvaluatable(SemaRef.Context, Expr::SE_AllowSideEffects))
4382  return SemaRef.PerformImplicitConversion(
4383  Capture->IgnoreImpCasts(), Capture->getType(), Sema::AA_Converting,
4384  /*AllowExplicit=*/true);
4385  auto I = Captures.find(Capture);
4386  if (I != Captures.end())
4387  return buildCapture(SemaRef, Capture, I->second);
4388  DeclRefExpr *Ref = nullptr;
4389  ExprResult Res = buildCapture(SemaRef, Capture, Ref);
4390  Captures[Capture] = Ref;
4391  return Res;
4392 }
4393 
4394 /// Build the expression to calculate the number of iterations.
4395 Expr *OpenMPIterationSpaceChecker::buildNumIterations(
4396  Scope *S, const bool LimitedType,
4397  llvm::MapVector<const Expr *, DeclRefExpr *> &Captures) const {
4398  ExprResult Diff;
4399  QualType VarType = LCDecl->getType().getNonReferenceType();
4400  if (VarType->isIntegerType() || VarType->isPointerType() ||
4401  SemaRef.getLangOpts().CPlusPlus) {
4402  // Upper - Lower
4403  Expr *UBExpr = TestIsLessOp.getValue() ? UB : LB;
4404  Expr *LBExpr = TestIsLessOp.getValue() ? LB : UB;
4405  Expr *Upper = tryBuildCapture(SemaRef, UBExpr, Captures).get();
4406  Expr *Lower = tryBuildCapture(SemaRef, LBExpr, Captures).get();
4407  if (!Upper || !Lower)
4408  return nullptr;
4409 
4410  Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Sub, Upper, Lower);
4411 
4412  if (!Diff.isUsable() && VarType->getAsCXXRecordDecl()) {
4413  // BuildBinOp already emitted error, this one is to point user to upper
4414  // and lower bound, and to tell what is passed to 'operator-'.
4415  SemaRef.Diag(Upper->getBeginLoc(), diag::err_omp_loop_diff_cxx)
4416  << Upper->getSourceRange() << Lower->getSourceRange();
4417  return nullptr;
4418  }
4419  }
4420 
4421  if (!Diff.isUsable())
4422  return nullptr;
4423 
4424  // Upper - Lower [- 1]
4425  if (TestIsStrictOp)
4426  Diff = SemaRef.BuildBinOp(
4427  S, DefaultLoc, BO_Sub, Diff.get(),
4428  SemaRef.ActOnIntegerConstant(SourceLocation(), 1).get());
4429  if (!Diff.isUsable())
4430  return nullptr;
4431 
4432  // Upper - Lower [- 1] + Step
4433  ExprResult NewStep = tryBuildCapture(SemaRef, Step, Captures);
4434  if (!NewStep.isUsable())
4435  return nullptr;
4436  Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Add, Diff.get(), NewStep.get());
4437  if (!Diff.isUsable())
4438  return nullptr;
4439 
4440  // Parentheses (for dumping/debugging purposes only).
4441  Diff = SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, Diff.get());
4442  if (!Diff.isUsable())
4443  return nullptr;
4444 
4445  // (Upper - Lower [- 1] + Step) / Step
4446  Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Div, Diff.get(), NewStep.get());
4447  if (!Diff.isUsable())
4448  return nullptr;
4449 
4450  // OpenMP runtime requires 32-bit or 64-bit loop variables.
4451  QualType Type = Diff.get()->getType();
4452  ASTContext &C = SemaRef.Context;
4453  bool UseVarType = VarType->hasIntegerRepresentation() &&
4454  C.getTypeSize(Type) > C.getTypeSize(VarType);
4455  if (!Type->isIntegerType() || UseVarType) {
4456  unsigned NewSize =
4457  UseVarType ? C.getTypeSize(VarType) : C.getTypeSize(Type);
4458  bool IsSigned = UseVarType ? VarType->hasSignedIntegerRepresentation()
4460  Type = C.getIntTypeForBitwidth(NewSize, IsSigned);
4461  if (!SemaRef.Context.hasSameType(Diff.get()->getType(), Type)) {
4462  Diff = SemaRef.PerformImplicitConversion(
4463  Diff.get(), Type, Sema::AA_Converting, /*AllowExplicit=*/true);
4464  if (!Diff.isUsable())
4465  return nullptr;
4466  }
4467  }
4468  if (LimitedType) {
4469  unsigned NewSize = (C.getTypeSize(Type) > 32) ? 64 : 32;
4470  if (NewSize != C.getTypeSize(Type)) {
4471  if (NewSize < C.getTypeSize(Type)) {
4472  assert(NewSize == 64 && "incorrect loop var size");
4473  SemaRef.Diag(DefaultLoc, diag::warn_omp_loop_64_bit_var)
4474  << InitSrcRange << ConditionSrcRange;
4475  }
4476  QualType NewType = C.getIntTypeForBitwidth(
4477  NewSize, Type->hasSignedIntegerRepresentation() ||
4478  C.getTypeSize(Type) < NewSize);
4479  if (!SemaRef.Context.hasSameType(Diff.get()->getType(), NewType)) {
4480  Diff = SemaRef.PerformImplicitConversion(Diff.get(), NewType,
4481  Sema::AA_Converting, true);
4482  if (!Diff.isUsable())
4483  return nullptr;
4484  }
4485  }
4486  }
4487 
4488  return Diff.get();
4489 }
4490 
4491 Expr *OpenMPIterationSpaceChecker::buildPreCond(
4492  Scope *S, Expr *Cond,
4493  llvm::MapVector<const Expr *, DeclRefExpr *> &Captures) const {
4494  // Try to build LB <op> UB, where <op> is <, >, <=, or >=.
4495  bool Suppress = SemaRef.getDiagnostics().getSuppressAllDiagnostics();
4496  SemaRef.getDiagnostics().setSuppressAllDiagnostics(/*Val=*/true);
4497 
4498  ExprResult NewLB = tryBuildCapture(SemaRef, LB, Captures);
4499  ExprResult NewUB = tryBuildCapture(SemaRef, UB, Captures);
4500  if (!NewLB.isUsable() || !NewUB.isUsable())
4501  return nullptr;
4502 
4503  ExprResult CondExpr =
4504  SemaRef.BuildBinOp(S, DefaultLoc,
4505  TestIsLessOp.getValue() ?
4506  (TestIsStrictOp ? BO_LT : BO_LE) :
4507  (TestIsStrictOp ? BO_GT : BO_GE),
4508  NewLB.get(), NewUB.get());
4509  if (CondExpr.isUsable()) {
4510  if (!SemaRef.Context.hasSameUnqualifiedType(CondExpr.get()->getType(),
4511  SemaRef.Context.BoolTy))
4512  CondExpr = SemaRef.PerformImplicitConversion(
4513  CondExpr.get(), SemaRef.Context.BoolTy, /*Action=*/Sema::AA_Casting,
4514  /*AllowExplicit=*/true);
4515  }
4516  SemaRef.getDiagnostics().setSuppressAllDiagnostics(Suppress);
4517  // Otherwise use original loop conditon and evaluate it in runtime.
4518  return CondExpr.isUsable() ? CondExpr.get() : Cond;
4519 }
4520 
4521 /// Build reference expression to the counter be used for codegen.
4522 DeclRefExpr *OpenMPIterationSpaceChecker::buildCounterVar(
4523  llvm::MapVector<const Expr *, DeclRefExpr *> &Captures,
4524  DSAStackTy &DSA) const {
4525  auto *VD = dyn_cast<VarDecl>(LCDecl);
4526  if (!VD) {
4527  VD = SemaRef.isOpenMPCapturedDecl(LCDecl);
4529  SemaRef, VD, VD->getType().getNonReferenceType(), DefaultLoc);
4530  const DSAStackTy::DSAVarData Data =
4531  DSA.getTopDSA(LCDecl, /*FromParent=*/false);
4532  // If the loop control decl is explicitly marked as private, do not mark it
4533  // as captured again.
4534  if (!isOpenMPPrivate(Data.CKind) || !Data.RefExpr)
4535  Captures.insert(std::make_pair(LCRef, Ref));
4536  return Ref;
4537  }
4538  return buildDeclRefExpr(SemaRef, VD, VD->getType().getNonReferenceType(),
4539  DefaultLoc);
4540 }
4541 
4542 Expr *OpenMPIterationSpaceChecker::buildPrivateCounterVar() const {
4543  if (LCDecl && !LCDecl->isInvalidDecl()) {
4544  QualType Type = LCDecl->getType().getNonReferenceType();
4545  VarDecl *PrivateVar = buildVarDecl(
4546  SemaRef, DefaultLoc, Type, LCDecl->getName(),
4547  LCDecl->hasAttrs() ? &LCDecl->getAttrs() : nullptr,
4548  isa<VarDecl>(LCDecl)
4549  ? buildDeclRefExpr(SemaRef, cast<VarDecl>(LCDecl), Type, DefaultLoc)
4550  : nullptr);
4551  if (PrivateVar->isInvalidDecl())
4552  return nullptr;
4553  return buildDeclRefExpr(SemaRef, PrivateVar, Type, DefaultLoc);
4554  }
4555  return nullptr;
4556 }
4557 
4558 /// Build initialization of the counter to be used for codegen.
4560 
4561 /// Build step of the counter be used for codegen.
4562 Expr *OpenMPIterationSpaceChecker::buildCounterStep() const { return Step; }
4563 
4564 Expr *OpenMPIterationSpaceChecker::buildOrderedLoopData(
4565  Scope *S, Expr *Counter,
4566  llvm::MapVector<const Expr *, DeclRefExpr *> &Captures, SourceLocation Loc,
4567  Expr *Inc, OverloadedOperatorKind OOK) {
4568  Expr *Cnt = SemaRef.DefaultLvalueConversion(Counter).get();
4569  if (!Cnt)
4570  return nullptr;
4571  if (Inc) {
4572  assert((OOK == OO_Plus || OOK == OO_Minus) &&
4573  "Expected only + or - operations for depend clauses.");
4574  BinaryOperatorKind BOK = (OOK == OO_Plus) ? BO_Add : BO_Sub;
4575  Cnt = SemaRef.BuildBinOp(S, Loc, BOK, Cnt, Inc).get();
4576  if (!Cnt)
4577  return nullptr;
4578  }
4579  ExprResult Diff;
4580  QualType VarType = LCDecl->getType().getNonReferenceType();
4581  if (VarType->isIntegerType() || VarType->isPointerType() ||
4582  SemaRef.getLangOpts().CPlusPlus) {
4583  // Upper - Lower
4584  Expr *Upper =
4585  TestIsLessOp.getValue() ? Cnt : tryBuildCapture(SemaRef, UB, Captures).get();
4586  Expr *Lower =
4587  TestIsLessOp.getValue() ? tryBuildCapture(SemaRef, LB, Captures).get() : Cnt;
4588  if (!Upper || !Lower)
4589  return nullptr;
4590 
4591  Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Sub, Upper, Lower);
4592 
4593  if (!Diff.isUsable() && VarType->getAsCXXRecordDecl()) {
4594  // BuildBinOp already emitted error, this one is to point user to upper
4595  // and lower bound, and to tell what is passed to 'operator-'.
4596  SemaRef.Diag(Upper->getBeginLoc(), diag::err_omp_loop_diff_cxx)
4597  << Upper->getSourceRange() << Lower->getSourceRange();
4598  return nullptr;
4599  }
4600  }
4601 
4602  if (!Diff.isUsable())
4603  return nullptr;
4604 
4605  // Parentheses (for dumping/debugging purposes only).
4606  Diff = SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, Diff.get());
4607  if (!Diff.isUsable())
4608  return nullptr;
4609 
4610  ExprResult NewStep = tryBuildCapture(SemaRef, Step, Captures);
4611  if (!NewStep.isUsable())
4612  return nullptr;
4613  // (Upper - Lower) / Step
4614  Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Div, Diff.get(), NewStep.get());
4615  if (!Diff.isUsable())
4616  return nullptr;
4617 
4618  return Diff.get();
4619 }
4620 
4621 /// Iteration space of a single for loop.
4622 struct LoopIterationSpace final {
4623  /// Condition of the loop.
4624  Expr *PreCond = nullptr;
4625  /// This expression calculates the number of iterations in the loop.
4626  /// It is always possible to calculate it before starting the loop.
4627  Expr *NumIterations = nullptr;
4628  /// The loop counter variable.
4629  Expr *CounterVar = nullptr;
4630  /// Private loop counter variable.
4631  Expr *PrivateCounterVar = nullptr;
4632  /// This is initializer for the initial value of #CounterVar.
4633  Expr *CounterInit = nullptr;
4634  /// This is step for the #CounterVar used to generate its update:
4635  /// #CounterVar = #CounterInit + #CounterStep * CurrentIteration.
4636  Expr *CounterStep = nullptr;
4637  /// Should step be subtracted?
4638  bool Subtract = false;
4639  /// Source range of the loop init.
4640  SourceRange InitSrcRange;
4641  /// Source range of the loop condition.
4642  SourceRange CondSrcRange;
4643  /// Source range of the loop increment.
4644  SourceRange IncSrcRange;
4645 };
4646 
4647 } // namespace
4648 
4650  assert(getLangOpts().OpenMP && "OpenMP is not active.");
4651  assert(Init && "Expected loop in canonical form.");
4652  unsigned AssociatedLoops = DSAStack->getAssociatedLoops();
4653  if (AssociatedLoops > 0 &&
4654  isOpenMPLoopDirective(DSAStack->getCurrentDirective())) {
4655  OpenMPIterationSpaceChecker ISC(*this, ForLoc);
4656  if (!ISC.checkAndSetInit(Init, /*EmitDiags=*/false)) {
4657  if (ValueDecl *D = ISC.getLoopDecl()) {
4658  auto *VD = dyn_cast<VarDecl>(D);
4659  if (!VD) {
4660  if (VarDecl *Private = isOpenMPCapturedDecl(D)) {
4661  VD = Private;
4662  } else {
4663  DeclRefExpr *Ref = buildCapture(*this, D, ISC.getLoopDeclRefExpr(),
4664  /*WithInit=*/false);
4665  VD = cast<VarDecl>(Ref->getDecl());
4666  }
4667  }
4668  DSAStack->addLoopControlVariable(D, VD);
4669  const Decl *LD = DSAStack->getPossiblyLoopCunter();
4670  if (LD != D->getCanonicalDecl()) {
4671  DSAStack->resetPossibleLoopCounter();
4672  if (auto *Var = dyn_cast_or_null<VarDecl>(LD))
4673  MarkDeclarationsReferencedInExpr(
4674  buildDeclRefExpr(*this, const_cast<VarDecl *>(Var),
4675  Var->getType().getNonLValueExprType(Context),
4676  ForLoc, /*RefersToCapture=*/true));
4677  }
4678  }
4679  }
4680  DSAStack->setAssociatedLoops(AssociatedLoops - 1);
4681  }
4682 }
4683 
4684 /// Called on a for stmt to check and extract its iteration space
4685 /// for further processing (such as collapsing).
4687  OpenMPDirectiveKind DKind, Stmt *S, Sema &SemaRef, DSAStackTy &DSA,
4688  unsigned CurrentNestedLoopCount, unsigned NestedLoopCount,
4689  unsigned TotalNestedLoopCount, Expr *CollapseLoopCountExpr,
4690  Expr *OrderedLoopCountExpr,
4691  Sema::VarsWithInheritedDSAType &VarsWithImplicitDSA,
4692  LoopIterationSpace &ResultIterSpace,
4693  llvm::MapVector<const Expr *, DeclRefExpr *> &Captures) {
4694  // OpenMP [2.6, Canonical Loop Form]
4695  // for (init-expr; test-expr; incr-expr) structured-block
4696  auto *For = dyn_cast_or_null<ForStmt>(S);
4697  if (!For) {
4698  SemaRef.Diag(S->getBeginLoc(), diag::err_omp_not_for)
4699  << (CollapseLoopCountExpr != nullptr || OrderedLoopCountExpr != nullptr)
4700  << getOpenMPDirectiveName(DKind) << TotalNestedLoopCount
4701  << (CurrentNestedLoopCount > 0) << CurrentNestedLoopCount;
4702  if (TotalNestedLoopCount > 1) {
4703  if (CollapseLoopCountExpr && OrderedLoopCountExpr)
4704  SemaRef.Diag(DSA.getConstructLoc(),
4705  diag::note_omp_collapse_ordered_expr)
4706  << 2 << CollapseLoopCountExpr->getSourceRange()
4707  << OrderedLoopCountExpr->getSourceRange();
4708  else if (CollapseLoopCountExpr)
4709  SemaRef.Diag(CollapseLoopCountExpr->getExprLoc(),
4710  diag::note_omp_collapse_ordered_expr)
4711  << 0 << CollapseLoopCountExpr->getSourceRange();
4712  else
4713  SemaRef.Diag(OrderedLoopCountExpr->getExprLoc(),
4714  diag::note_omp_collapse_ordered_expr)
4715  << 1 << OrderedLoopCountExpr->getSourceRange();
4716  }
4717  return true;
4718  }
4719  assert(For->getBody());
4720 
4721  OpenMPIterationSpaceChecker ISC(SemaRef, For->getForLoc());
4722 
4723  // Check init.
4724  Stmt *Init = For->getInit();
4725  if (ISC.checkAndSetInit(Init))
4726  return true;
4727 
4728  bool HasErrors = false;
4729 
4730  // Check loop variable's type.
4731  if (ValueDecl *LCDecl = ISC.getLoopDecl()) {
4732  Expr *LoopDeclRefExpr = ISC.getLoopDeclRefExpr();
4733 
4734  // OpenMP [2.6, Canonical Loop Form]
4735  // Var is one of the following:
4736  // A variable of signed or unsigned integer type.
4737  // For C++, a variable of a random access iterator type.
4738  // For C, a variable of a pointer type.
4739  QualType VarType = LCDecl->getType().getNonReferenceType();
4740  if (!VarType->isDependentType() && !VarType->isIntegerType() &&
4741  !VarType->isPointerType() &&
4742  !(SemaRef.getLangOpts().CPlusPlus && VarType->isOverloadableType())) {
4743  SemaRef.Diag(Init->getBeginLoc(), diag::err_omp_loop_variable_type)
4744  << SemaRef.getLangOpts().CPlusPlus;
4745  HasErrors = true;
4746  }
4747 
4748  // OpenMP, 2.14.1.1 Data-sharing Attribute Rules for Variables Referenced in
4749  // a Construct
4750  // The loop iteration variable(s) in the associated for-loop(s) of a for or
4751  // parallel for construct is (are) private.
4752  // The loop iteration variable in the associated for-loop of a simd
4753  // construct with just one associated for-loop is linear with a
4754  // constant-linear-step that is the increment of the associated for-loop.
4755  // Exclude loop var from the list of variables with implicitly defined data
4756  // sharing attributes.
4757  VarsWithImplicitDSA.erase(LCDecl);
4758 
4759  // OpenMP [2.14.1.1, Data-sharing Attribute Rules for Variables Referenced
4760  // in a Construct, C/C++].
4761  // The loop iteration variable in the associated for-loop of a simd
4762  // construct with just one associated for-loop may be listed in a linear
4763  // clause with a constant-linear-step that is the increment of the
4764  // associated for-loop.
4765  // The loop iteration variable(s) in the associated for-loop(s) of a for or
4766  // parallel for construct may be listed in a private or lastprivate clause.
4767  DSAStackTy::DSAVarData DVar = DSA.getTopDSA(LCDecl, false);
4768  // If LoopVarRefExpr is nullptr it means the corresponding loop variable is
4769  // declared in the loop and it is predetermined as a private.
4770  OpenMPClauseKind PredeterminedCKind =
4771  isOpenMPSimdDirective(DKind)
4772  ? ((NestedLoopCount == 1) ? OMPC_linear : OMPC_lastprivate)
4773  : OMPC_private;
4774  if (((isOpenMPSimdDirective(DKind) && DVar.CKind != OMPC_unknown &&
4775  DVar.CKind != PredeterminedCKind) ||
4776  ((isOpenMPWorksharingDirective(DKind) || DKind == OMPD_taskloop ||
4777  isOpenMPDistributeDirective(DKind)) &&
4778  !isOpenMPSimdDirective(DKind) && DVar.CKind != OMPC_unknown &&
4779  DVar.CKind != OMPC_private && DVar.CKind != OMPC_lastprivate)) &&
4780  (DVar.CKind != OMPC_private || DVar.RefExpr != nullptr)) {
4781  SemaRef.Diag(Init->getBeginLoc(), diag::err_omp_loop_var_dsa)
4782  << getOpenMPClauseName(DVar.CKind) << getOpenMPDirectiveName(DKind)
4783  << getOpenMPClauseName(PredeterminedCKind);
4784  if (DVar.RefExpr == nullptr)
4785  DVar.CKind = PredeterminedCKind;
4786  reportOriginalDsa(SemaRef, &DSA, LCDecl, DVar, /*IsLoopIterVar=*/true);
4787  HasErrors = true;
4788  } else if (LoopDeclRefExpr != nullptr) {
4789  // Make the loop iteration variable private (for worksharing constructs),
4790  // linear (for simd directives with the only one associated loop) or
4791  // lastprivate (for simd directives with several collapsed or ordered
4792  // loops).
4793  if (DVar.CKind == OMPC_unknown)
4794  DVar = DSA.hasDSA(LCDecl, isOpenMPPrivate,
4795  [](OpenMPDirectiveKind) -> bool { return true; },
4796  /*FromParent=*/false);
4797  DSA.addDSA(LCDecl, LoopDeclRefExpr, PredeterminedCKind);
4798  }
4799 
4800  assert(isOpenMPLoopDirective(DKind) && "DSA for non-loop vars");
4801 
4802  // Check test-expr.
4803  HasErrors |= ISC.checkAndSetCond(For->getCond());
4804 
4805  // Check incr-expr.
4806  HasErrors |= ISC.checkAndSetInc(For->getInc());
4807  }
4808 
4809  if (ISC.dependent() || SemaRef.CurContext->isDependentContext() || HasErrors)
4810  return HasErrors;
4811 
4812  // Build the loop's iteration space representation.
4813  ResultIterSpace.PreCond =
4814  ISC.buildPreCond(DSA.getCurScope(), For->getCond(), Captures);
4815  ResultIterSpace.NumIterations = ISC.buildNumIterations(
4816  DSA.getCurScope(),
4817  (isOpenMPWorksharingDirective(DKind) ||
4819  Captures);
4820  ResultIterSpace.CounterVar = ISC.buildCounterVar(Captures, DSA);
4821  ResultIterSpace.PrivateCounterVar = ISC.buildPrivateCounterVar();
4822  ResultIterSpace.CounterInit = ISC.buildCounterInit();
4823  ResultIterSpace.CounterStep = ISC.buildCounterStep();
4824  ResultIterSpace.InitSrcRange = ISC.getInitSrcRange();
4825  ResultIterSpace.CondSrcRange = ISC.getConditionSrcRange();
4826  ResultIterSpace.IncSrcRange = ISC.getIncrementSrcRange();
4827  ResultIterSpace.Subtract = ISC.shouldSubtractStep();
4828 
4829  HasErrors |= (ResultIterSpace.PreCond == nullptr ||
4830  ResultIterSpace.NumIterations == nullptr ||
4831  ResultIterSpace.CounterVar == nullptr ||
4832  ResultIterSpace.PrivateCounterVar == nullptr ||
4833  ResultIterSpace.CounterInit == nullptr ||
4834  ResultIterSpace.CounterStep == nullptr);
4835  if (!HasErrors && DSA.isOrderedRegion()) {
4836  if (DSA.getOrderedRegionParam().second->getNumForLoops()) {
4837  if (CurrentNestedLoopCount <
4838  DSA.getOrderedRegionParam().second->getLoopNumIterations().size()) {
4839  DSA.getOrderedRegionParam().second->setLoopNumIterations(
4840  CurrentNestedLoopCount, ResultIterSpace.NumIterations);
4841  DSA.getOrderedRegionParam().second->setLoopCounter(
4842  CurrentNestedLoopCount, ResultIterSpace.CounterVar);
4843  }
4844  }
4845  for (auto &Pair : DSA.getDoacrossDependClauses()) {
4846  if (CurrentNestedLoopCount >= Pair.first->getNumLoops()) {
4847  // Erroneous case - clause has some problems.
4848  continue;
4849  }
4850  if (Pair.first->getDependencyKind() == OMPC_DEPEND_sink &&
4851  Pair.second.size() <= CurrentNestedLoopCount) {
4852  // Erroneous case - clause has some problems.
4853  Pair.first->setLoopData(CurrentNestedLoopCount, nullptr);
4854  continue;
4855  }
4856  Expr *CntValue;
4857  if (Pair.first->getDependencyKind() == OMPC_DEPEND_source)
4858  CntValue = ISC.buildOrderedLoopData(
4859  DSA.getCurScope(), ResultIterSpace.CounterVar, Captures,
4860  Pair.first->getDependencyLoc());
4861  else
4862  CntValue = ISC.buildOrderedLoopData(
4863  DSA.getCurScope(), ResultIterSpace.CounterVar, Captures,
4864  Pair.first->getDependencyLoc(),
4865  Pair.second[CurrentNestedLoopCount].first,
4866  Pair.second[CurrentNestedLoopCount].second);
4867  Pair.first->setLoopData(CurrentNestedLoopCount, CntValue);
4868  }
4869  }
4870 
4871  return HasErrors;
4872 }
4873 
4874 /// Build 'VarRef = Start.
4875 static ExprResult
4877  ExprResult Start,
4878  llvm::MapVector<const Expr *, DeclRefExpr *> &Captures) {
4879  // Build 'VarRef = Start.
4880  ExprResult NewStart = tryBuildCapture(SemaRef, Start.get(), Captures);
4881  if (!NewStart.isUsable())
4882  return ExprError();
4883  if (!SemaRef.Context.hasSameType(NewStart.get()->getType(),
4884  VarRef.get()->getType())) {
4885  NewStart = SemaRef.PerformImplicitConversion(
4886  NewStart.get(), VarRef.get()->getType(), Sema::AA_Converting,
4887  /*AllowExplicit=*/true);
4888  if (!NewStart.isUsable())
4889  return ExprError();
4890  }
4891 
4892  ExprResult Init =
4893  SemaRef.BuildBinOp(S, Loc, BO_Assign, VarRef.get(), NewStart.get());
4894  return Init;
4895 }
4896 
4897 /// Build 'VarRef = Start + Iter * Step'.
4899  Sema &SemaRef, Scope *S, SourceLocation Loc, ExprResult VarRef,
4900  ExprResult Start, ExprResult Iter, ExprResult Step, bool Subtract,
4901  llvm::MapVector<const Expr *, DeclRefExpr *> *Captures = nullptr) {
4902  // Add parentheses (for debugging purposes only).
4903  Iter = SemaRef.ActOnParenExpr(Loc, Loc, Iter.get());
4904  if (!VarRef.isUsable() || !Start.isUsable() || !Iter.isUsable() ||
4905  !Step.isUsable())
4906  return ExprError();
4907 
4908  ExprResult NewStep = Step;
4909  if (Captures)
4910  NewStep = tryBuildCapture(SemaRef, Step.get(), *Captures);
4911  if (NewStep.isInvalid())
4912  return ExprError();
4913  ExprResult Update =
4914  SemaRef.BuildBinOp(S, Loc, BO_Mul, Iter.get(), NewStep.get());
4915  if (!Update.isUsable())
4916  return ExprError();
4917 
4918  // Try to build 'VarRef = Start, VarRef (+|-)= Iter * Step' or
4919  // 'VarRef = Start (+|-) Iter * Step'.
4920  ExprResult NewStart = Start;
4921  if (Captures)
4922  NewStart = tryBuildCapture(SemaRef, Start.get(), *Captures);
4923  if (NewStart.isInvalid())
4924  return ExprError();
4925 
4926  // First attempt: try to build 'VarRef = Start, VarRef += Iter * Step'.
4927  ExprResult SavedUpdate = Update;
4928  ExprResult UpdateVal;
4929  if (VarRef.get()->getType()->isOverloadableType() ||
4930  NewStart.get()->getType()->isOverloadableType() ||
4931  Update.get()->getType()->isOverloadableType()) {
4932  bool Suppress = SemaRef.getDiagnostics().getSuppressAllDiagnostics();
4933  SemaRef.getDiagnostics().setSuppressAllDiagnostics(/*Val=*/true);
4934  Update =
4935  SemaRef.BuildBinOp(S, Loc, BO_Assign, VarRef.get(), NewStart.get());
4936  if (Update.isUsable()) {
4937  UpdateVal =
4938  SemaRef.BuildBinOp(S, Loc, Subtract ? BO_SubAssign : BO_AddAssign,
4939  VarRef.get(), SavedUpdate.get());
4940  if (UpdateVal.isUsable()) {
4941  Update = SemaRef.CreateBuiltinBinOp(Loc, BO_Comma, Update.get(),
4942  UpdateVal.get());
4943  }
4944  }
4945  SemaRef.getDiagnostics().setSuppressAllDiagnostics(Suppress);
4946  }
4947 
4948  // Second attempt: try to build 'VarRef = Start (+|-) Iter * Step'.
4949  if (!Update.isUsable() || !UpdateVal.isUsable()) {
4950  Update = SemaRef.BuildBinOp(S, Loc, Subtract ? BO_Sub : BO_Add,
4951  NewStart.get(), SavedUpdate.get());
4952  if (!Update.isUsable())
4953  return ExprError();
4954 
4955  if (!SemaRef.Context.hasSameType(Update.get()->getType(),
4956  VarRef.get()->getType())) {
4957  Update = SemaRef.PerformImplicitConversion(
4958  Update.get(), VarRef.get()->getType(), Sema::AA_Converting, true);
4959  if (!Update.isUsable())
4960  return ExprError();
4961  }
4962 
4963  Update = SemaRef.BuildBinOp(S, Loc, BO_Assign, VarRef.get(), Update.get());
4964  }
4965  return Update;
4966 }
4967 
4968 /// Convert integer expression \a E to make it have at least \a Bits
4969 /// bits.
4970 static ExprResult widenIterationCount(unsigned Bits, Expr *E, Sema &SemaRef) {
4971  if (E == nullptr)
4972  return ExprError();
4973  ASTContext &C = SemaRef.Context;
4974  QualType OldType = E->getType();
4975  unsigned HasBits = C.getTypeSize(OldType);
4976  if (HasBits >= Bits)
4977  return ExprResult(E);
4978  // OK to convert to signed, because new type has more bits than old.
4979  QualType NewType = C.getIntTypeForBitwidth(Bits, /* Signed */ true);
4980  return SemaRef.PerformImplicitConversion(E, NewType, Sema::AA_Converting,
4981  true);
4982 }
4983 
4984 /// Check if the given expression \a E is a constant integer that fits
4985 /// into \a Bits bits.
4986 static bool fitsInto(unsigned Bits, bool Signed, const Expr *E, Sema &SemaRef) {
4987  if (E == nullptr)
4988  return false;
4989  llvm::APSInt Result;
4990  if (E->isIntegerConstantExpr(Result, SemaRef.Context))
4991  return Signed ? Result.isSignedIntN(Bits) : Result.isIntN(Bits);
4992  return false;
4993 }
4994 
4995 /// Build preinits statement for the given declarations.
4996 static Stmt *buildPreInits(ASTContext &Context,
4997  MutableArrayRef<Decl *> PreInits) {
4998  if (!PreInits.empty()) {
4999  return new (Context) DeclStmt(
5000  DeclGroupRef::Create(Context, PreInits.begin(), PreInits.size()),
5002  }
5003  return nullptr;
5004 }
5005 
5006 /// Build preinits statement for the given declarations.
5007 static Stmt *
5009  const llvm::MapVector<const Expr *, DeclRefExpr *> &Captures) {
5010  if (!Captures.empty()) {
5011  SmallVector<Decl *, 16> PreInits;
5012  for (const auto &Pair : Captures)
5013  PreInits.push_back(Pair.second->getDecl());
5014  return buildPreInits(Context, PreInits);
5015  }
5016  return nullptr;
5017 }
5018 
5019 /// Build postupdate expression for the given list of postupdates expressions.
5020 static Expr *buildPostUpdate(Sema &S, ArrayRef<Expr *> PostUpdates) {
5021  Expr *PostUpdate = nullptr;
5022  if (!PostUpdates.empty()) {
5023  for (Expr *E : PostUpdates) {
5024  Expr *ConvE = S.BuildCStyleCastExpr(
5025  E->getExprLoc(),
5027  E->getExprLoc(), E)
5028  .get();
5029  PostUpdate = PostUpdate
5030  ? S.CreateBuiltinBinOp(ConvE->getExprLoc(), BO_Comma,
5031  PostUpdate, ConvE)
5032  .get()
5033  : ConvE;
5034  }
5035  }
5036  return PostUpdate;
5037 }
5038 
5039 /// Called on a for stmt to check itself and nested loops (if any).
5040 /// \return Returns 0 if one of the collapsed stmts is not canonical for loop,
5041 /// number of collapsed loops otherwise.
5042 static unsigned
5043 checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
5044  Expr *OrderedLoopCountExpr, Stmt *AStmt, Sema &SemaRef,
5045  DSAStackTy &DSA,
5046  Sema::VarsWithInheritedDSAType &VarsWithImplicitDSA,
5048  unsigned NestedLoopCount = 1;
5049  if (CollapseLoopCountExpr) {
5050  // Found 'collapse' clause - calculate collapse number.
5052  if (CollapseLoopCountExpr->EvaluateAsInt(Result, SemaRef.getASTContext()))
5053  NestedLoopCount = Result.Val.getInt().getLimitedValue();
5054  }
5055  unsigned OrderedLoopCount = 1;
5056  if (OrderedLoopCountExpr) {
5057  // Found 'ordered' clause - calculate collapse number.
5058  Expr::EvalResult EVResult;
5059  if (OrderedLoopCountExpr->EvaluateAsInt(EVResult, SemaRef.getASTContext())) {
5060  llvm::APSInt Result = EVResult.Val.getInt();
5061  if (Result.getLimitedValue() < NestedLoopCount) {
5062  SemaRef.Diag(OrderedLoopCountExpr->getExprLoc(),
5063  diag::err_omp_wrong_ordered_loop_count)
5064  << OrderedLoopCountExpr->getSourceRange();
5065  SemaRef.Diag(CollapseLoopCountExpr->getExprLoc(),
5066  diag::note_collapse_loop_count)
5067  << CollapseLoopCountExpr->getSourceRange();
5068  }
5069  OrderedLoopCount = Result.getLimitedValue();
5070  }
5071  }
5072  // This is helper routine for loop directives (e.g., 'for', 'simd',
5073  // 'for simd', etc.).
5074  llvm::MapVector<const Expr *, DeclRefExpr *> Captures;
5076  IterSpaces.resize(std::max(OrderedLoopCount, NestedLoopCount));
5077  Stmt *CurStmt = AStmt->IgnoreContainers(/* IgnoreCaptured */ true);
5078  for (unsigned Cnt = 0; Cnt < NestedLoopCount; ++Cnt) {
5080  DKind, CurStmt, SemaRef, DSA, Cnt, NestedLoopCount,
5081  std::max(OrderedLoopCount, NestedLoopCount), CollapseLoopCountExpr,
5082  OrderedLoopCountExpr, VarsWithImplicitDSA, IterSpaces[Cnt],
5083  Captures))
5084  return 0;
5085  // Move on to the next nested for loop, or to the loop body.
5086  // OpenMP [2.8.1, simd construct, Restrictions]
5087  // All loops associated with the construct must be perfectly nested; that
5088  // is, there must be no intervening code nor any OpenMP directive between
5089  // any two loops.
5090  CurStmt = cast<ForStmt>(CurStmt)->getBody()->IgnoreContainers();
5091  }
5092  for (unsigned Cnt = NestedLoopCount; Cnt < OrderedLoopCount; ++Cnt) {
5094  DKind, CurStmt, SemaRef, DSA, Cnt, NestedLoopCount,
5095  std::max(OrderedLoopCount, NestedLoopCount), CollapseLoopCountExpr,
5096  OrderedLoopCountExpr, VarsWithImplicitDSA, IterSpaces[Cnt],
5097  Captures))
5098  return 0;
5099  if (Cnt > 0 && IterSpaces[Cnt].CounterVar) {
5100  // Handle initialization of captured loop iterator variables.
5101  auto *DRE = cast<DeclRefExpr>(IterSpaces[Cnt].CounterVar);
5102  if (isa<OMPCapturedExprDecl>(DRE->getDecl())) {
5103  Captures[DRE] = DRE;
5104  }
5105  }
5106  // Move on to the next nested for loop, or to the loop body.
5107  // OpenMP [2.8.1, simd construct, Restrictions]
5108  // All loops associated with the construct must be perfectly nested; that
5109  // is, there must be no intervening code nor any OpenMP directive between
5110  // any two loops.
5111  CurStmt = cast<ForStmt>(CurStmt)->getBody()->IgnoreContainers();
5112  }
5113 
5114  Built.clear(/* size */ NestedLoopCount);
5115 
5116  if (SemaRef.CurContext->isDependentContext())
5117  return NestedLoopCount;
5118 
5119  // An example of what is generated for the following code:
5120  //
5121  // #pragma omp simd collapse(2) ordered(2)
5122  // for (i = 0; i < NI; ++i)
5123  // for (k = 0; k < NK; ++k)
5124  // for (j = J0; j < NJ; j+=2) {
5125  // <loop body>
5126  // }
5127  //
5128  // We generate the code below.
5129  // Note: the loop body may be outlined in CodeGen.
5130  // Note: some counters may be C++ classes, operator- is used to find number of
5131  // iterations and operator+= to calculate counter value.
5132  // Note: decltype(NumIterations) must be integer type (in 'omp for', only i32
5133  // or i64 is currently supported).
5134  //
5135  // #define NumIterations (NI * ((NJ - J0 - 1 + 2) / 2))
5136  // for (int[32|64]_t IV = 0; IV < NumIterations; ++IV ) {
5137  // .local.i = IV / ((NJ - J0 - 1 + 2) / 2);
5138  // .local.j = J0 + (IV % ((NJ - J0 - 1 + 2) / 2)) * 2;
5139  // // similar updates for vars in clauses (e.g. 'linear')
5140  // <loop body (using local i and j)>
5141  // }
5142  // i = NI; // assign final values of counters
5143  // j = NJ;
5144  //
5145 
5146  // Last iteration number is (I1 * I2 * ... In) - 1, where I1, I2 ... In are
5147  // the iteration counts of the collapsed for loops.
5148  // Precondition tests if there is at least one iteration (all conditions are
5149  // true).
5150  auto PreCond = ExprResult(IterSpaces[0].PreCond);
5151  Expr *N0 = IterSpaces[0].NumIterations;
5152  ExprResult LastIteration32 =
5153  widenIterationCount(/*Bits=*/32,
5154  SemaRef
5155  .PerformImplicitConversion(
5156  N0->IgnoreImpCasts(), N0->getType(),
5157  Sema::AA_Converting, /*AllowExplicit=*/true)
5158  .get(),
5159  SemaRef);
5160  ExprResult LastIteration64 = widenIterationCount(
5161  /*Bits=*/64,
5162  SemaRef
5163  .PerformImplicitConversion(N0->IgnoreImpCasts(), N0->getType(),
5165  /*AllowExplicit=*/true)
5166  .get(),
5167  SemaRef);
5168 
5169  if (!LastIteration32.isUsable() || !LastIteration64.isUsable())
5170  return NestedLoopCount;
5171 
5172  ASTContext &C = SemaRef.Context;
5173  bool AllCountsNeedLessThan32Bits = C.getTypeSize(N0->getType()) < 32;
5174 
5175  Scope *CurScope = DSA.getCurScope();
5176  for (unsigned Cnt = 1; Cnt < NestedLoopCount; ++Cnt) {
5177  if (PreCond.isUsable()) {
5178  PreCond =
5179  SemaRef.BuildBinOp(CurScope, PreCond.get()->getExprLoc(), BO_LAnd,
5180  PreCond.get(), IterSpaces[Cnt].PreCond);
5181  }
5182  Expr *N = IterSpaces[Cnt].NumIterations;
5183  SourceLocation Loc = N->getExprLoc();
5184  AllCountsNeedLessThan32Bits &= C.getTypeSize(N->getType()) < 32;
5185  if (LastIteration32.isUsable())
5186  LastIteration32 = SemaRef.BuildBinOp(
5187  CurScope, Loc, BO_Mul, LastIteration32.get(),
5188  SemaRef
5191  /*AllowExplicit=*/true)
5192  .get());
5193  if (LastIteration64.isUsable())
5194  LastIteration64 = SemaRef.BuildBinOp(
5195  CurScope, Loc, BO_Mul, LastIteration64.get(),
5196  SemaRef
5199  /*AllowExplicit=*/true)
5200  .get());
5201  }
5202 
5203  // Choose either the 32-bit or 64-bit version.
5204  ExprResult LastIteration = LastIteration64;
5205  if (LastIteration32.isUsable() &&
5206  C.getTypeSize(LastIteration32.get()->getType()) == 32 &&
5207  (AllCountsNeedLessThan32Bits || NestedLoopCount == 1 ||
5208  fitsInto(
5209  /*Bits=*/32,
5210  LastIteration32.get()->getType()->hasSignedIntegerRepresentation(),
5211  LastIteration64.get(), SemaRef)))
5212  LastIteration = LastIteration32;
5213  QualType VType = LastIteration.get()->getType();
5214  QualType RealVType = VType;
5215  QualType StrideVType = VType;
5216  if (isOpenMPTaskLoopDirective(DKind)) {
5217  VType =
5218  SemaRef.Context.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0);
5219  StrideVType =
5220  SemaRef.Context.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1);
5221  }
5222 
5223  if (!LastIteration.isUsable())
5224  return 0;
5225 
5226  // Save the number of iterations.
5227  ExprResult NumIterations = LastIteration;
5228  {
5229  LastIteration = SemaRef.BuildBinOp(
5230  CurScope, LastIteration.get()->getExprLoc(), BO_Sub,
5231  LastIteration.get(),
5232  SemaRef.ActOnIntegerConstant(SourceLocation(), 1).get());
5233  if (!LastIteration.isUsable())
5234  return 0;
5235  }
5236 
5237  // Calculate the last iteration number beforehand instead of doing this on
5238  // each iteration. Do not do this if the number of iterations may be kfold-ed.
5239  llvm::APSInt Result;
5240  bool IsConstant =
5241  LastIteration.get()->isIntegerConstantExpr(Result, SemaRef.Context);
5242  ExprResult CalcLastIteration;
5243  if (!IsConstant) {
5244  ExprResult SaveRef =
5245  tryBuildCapture(SemaRef, LastIteration.get(), Captures);
5246  LastIteration = SaveRef;
5247 
5248  // Prepare SaveRef + 1.
5249  NumIterations = SemaRef.BuildBinOp(
5250  CurScope, SaveRef.get()->getExprLoc(), BO_Add, SaveRef.get(),
5251  SemaRef.ActOnIntegerConstant(SourceLocation(), 1).get());
5252  if (!NumIterations.isUsable())
5253  return 0;
5254  }
5255 
5256  SourceLocation InitLoc = IterSpaces[0].InitSrcRange.getBegin();
5257 
5258  // Build variables passed into runtime, necessary for worksharing directives.
5259  ExprResult LB, UB, IL, ST, EUB, CombLB, CombUB, PrevLB, PrevUB, CombEUB;
5261  isOpenMPDistributeDirective(DKind)) {
5262  // Lower bound variable, initialized with zero.
5263  VarDecl *LBDecl = buildVarDecl(SemaRef, InitLoc, VType, ".omp.lb");
5264  LB = buildDeclRefExpr(SemaRef, LBDecl, VType, InitLoc);
5265  SemaRef.AddInitializerToDecl(LBDecl,
5266  SemaRef.ActOnIntegerConstant(InitLoc, 0).get(),
5267  /*DirectInit*/ false);
5268 
5269  // Upper bound variable, initialized with last iteration number.
5270  VarDecl *UBDecl = buildVarDecl(SemaRef, InitLoc, VType, ".omp.ub");
5271  UB = buildDeclRefExpr(SemaRef, UBDecl, VType, InitLoc);
5272  SemaRef.AddInitializerToDecl(UBDecl, LastIteration.get(),
5273  /*DirectInit*/ false);
5274 
5275  // A 32-bit variable-flag where runtime returns 1 for the last iteration.
5276  // This will be used to implement clause 'lastprivate'.
5277  QualType Int32Ty = SemaRef.Context.getIntTypeForBitwidth(32, true);
5278  VarDecl *ILDecl = buildVarDecl(SemaRef, InitLoc, Int32Ty, ".omp.is_last");
5279  IL = buildDeclRefExpr(SemaRef, ILDecl, Int32Ty, InitLoc);
5280  SemaRef.AddInitializerToDecl(ILDecl,
5281  SemaRef.ActOnIntegerConstant(InitLoc, 0).get(),
5282  /*DirectInit*/ false);
5283 
5284  // Stride variable returned by runtime (we initialize it to 1 by default).
5285  VarDecl *STDecl =
5286  buildVarDecl(SemaRef, InitLoc, StrideVType, ".omp.stride");
5287  ST = buildDeclRefExpr(SemaRef, STDecl, StrideVType, InitLoc);
5288  SemaRef.AddInitializerToDecl(STDecl,
5289  SemaRef.ActOnIntegerConstant(InitLoc, 1).get(),
5290  /*DirectInit*/ false);
5291 
5292  // Build expression: UB = min(UB, LastIteration)
5293  // It is necessary for CodeGen of directives with static scheduling.
5294  ExprResult IsUBGreater = SemaRef.BuildBinOp(CurScope, InitLoc, BO_GT,
5295  UB.get(), LastIteration.get());
5296  ExprResult CondOp = SemaRef.ActOnConditionalOp(
5297  LastIteration.get()->getExprLoc(), InitLoc, IsUBGreater.get(),
5298  LastIteration.get(), UB.get());
5299  EUB = SemaRef.BuildBinOp(CurScope, InitLoc, BO_Assign, UB.get(),
5300  CondOp.get());
5301  EUB = SemaRef.ActOnFinishFullExpr(EUB.get());
5302 
5303  // If we have a combined directive that combines 'distribute', 'for' or
5304  // 'simd' we need to be able to access the bounds of the schedule of the
5305  // enclosing region. E.g. in 'distribute parallel for' the bounds obtained
5306  // by scheduling 'distribute' have to be passed to the schedule of 'for'.
5307  if (isOpenMPLoopBoundSharingDirective(DKind)) {
5308  // Lower bound variable, initialized with zero.
5309  VarDecl *CombLBDecl =
5310  buildVarDecl(SemaRef, InitLoc, VType, ".omp.comb.lb");
5311  CombLB = buildDeclRefExpr(SemaRef, CombLBDecl, VType, InitLoc);
5312  SemaRef.AddInitializerToDecl(
5313  CombLBDecl, SemaRef.ActOnIntegerConstant(InitLoc, 0).get(),
5314  /*DirectInit*/ false);
5315 
5316  // Upper bound variable, initialized with last iteration number.
5317  VarDecl *CombUBDecl =
5318  buildVarDecl(SemaRef, InitLoc, VType, ".omp.comb.ub");
5319  CombUB = buildDeclRefExpr(SemaRef, CombUBDecl, VType, InitLoc);
5320  SemaRef.AddInitializerToDecl(CombUBDecl, LastIteration.get(),
5321  /*DirectInit*/ false);
5322 
5323  ExprResult CombIsUBGreater = SemaRef.BuildBinOp(
5324  CurScope, InitLoc, BO_GT, CombUB.get(), LastIteration.get());
5325  ExprResult CombCondOp =
5326  SemaRef.ActOnConditionalOp(InitLoc, InitLoc, CombIsUBGreater.get(),
5327  LastIteration.get(), CombUB.get());
5328  CombEUB = SemaRef.BuildBinOp(CurScope, InitLoc, BO_Assign, CombUB.get(),
5329  CombCondOp.get());
5330  CombEUB = SemaRef.ActOnFinishFullExpr(CombEUB.get());
5331 
5332  const CapturedDecl *CD = cast<CapturedStmt>(AStmt)->getCapturedDecl();
5333  // We expect to have at least 2 more parameters than the 'parallel'
5334  // directive does - the lower and upper bounds of the previous schedule.
5335  assert(CD->getNumParams() >= 4 &&
5336  "Unexpected number of parameters in loop combined directive");
5337 
5338  // Set the proper type for the bounds given what we learned from the
5339  // enclosed loops.
5340  ImplicitParamDecl *PrevLBDecl = CD->getParam(/*PrevLB=*/2);
5341  ImplicitParamDecl *PrevUBDecl = CD->getParam(/*PrevUB=*/3);
5342 
5343  // Previous lower and upper bounds are obtained from the region
5344  // parameters.
5345  PrevLB =
5346  buildDeclRefExpr(SemaRef, PrevLBDecl, PrevLBDecl->getType(), InitLoc);
5347  PrevUB =
5348  buildDeclRefExpr(SemaRef, PrevUBDecl, PrevUBDecl->getType(), InitLoc);
5349  }
5350  }
5351 
5352  // Build the iteration variable and its initialization before loop.
5353  ExprResult IV;
5354  ExprResult Init, CombInit;
5355  {
5356  VarDecl *IVDecl = buildVarDecl(SemaRef, InitLoc, RealVType, ".omp.iv");
5357  IV = buildDeclRefExpr(SemaRef, IVDecl, RealVType, InitLoc);
5358  Expr *RHS =
5359  (isOpenMPWorksharingDirective(DKind) ||
5361  ? LB.get()
5362  : SemaRef.ActOnIntegerConstant(SourceLocation(), 0).get();
5363  Init = SemaRef.BuildBinOp(CurScope, InitLoc, BO_Assign, IV.get(), RHS);
5364  Init = SemaRef.ActOnFinishFullExpr(Init.get());
5365 
5366  if (isOpenMPLoopBoundSharingDirective(DKind)) {
5367  Expr *CombRHS =
5368  (isOpenMPWorksharingDirective(DKind) ||
5369  isOpenMPTaskLoopDirective(DKind) ||
5371  ? CombLB.get()
5372  : SemaRef.ActOnIntegerConstant(SourceLocation(), 0).get();
5373  CombInit =
5374  SemaRef.BuildBinOp(CurScope, InitLoc, BO_Assign, IV.get(), CombRHS);
5375  CombInit = SemaRef.ActOnFinishFullExpr(CombInit.get());
5376  }
5377  }
5378 
5379  // Loop condition (IV < NumIterations) or (IV <= UB) for worksharing loops.
5380  SourceLocation CondLoc = AStmt->getBeginLoc();
5381  ExprResult Cond =
5382  (isOpenMPWorksharingDirective(DKind) ||
5384  ? SemaRef.BuildBinOp(CurScope, CondLoc, BO_LE, IV.get(), UB.get())
5385  : SemaRef.BuildBinOp(CurScope, CondLoc, BO_LT, IV.get(),
5386  NumIterations.get());
5387  ExprResult CombDistCond;
5388  if (isOpenMPLoopBoundSharingDirective(DKind)) {
5389  CombDistCond =
5390  SemaRef.BuildBinOp(
5391  CurScope, CondLoc, BO_LT, IV.get(), NumIterations.get());
5392  }
5393 
5394  ExprResult CombCond;
5395  if (isOpenMPLoopBoundSharingDirective(DKind)) {
5396  CombCond =
5397  SemaRef.BuildBinOp(CurScope, CondLoc, BO_LE, IV.get(), CombUB.get());
5398  }
5399  // Loop increment (IV = IV + 1)
5400  SourceLocation IncLoc = AStmt->getBeginLoc();
5401  ExprResult Inc =
5402  SemaRef.BuildBinOp(CurScope, IncLoc, BO_Add, IV.get(),
5403  SemaRef.ActOnIntegerConstant(IncLoc, 1).get());
5404  if (!Inc.isUsable())
5405  return 0;
5406  Inc = SemaRef.BuildBinOp(CurScope, IncLoc, BO_Assign, IV.get(), Inc.get());
5407  Inc = SemaRef.ActOnFinishFullExpr(Inc.get());
5408  if (!Inc.isUsable())
5409  return 0;
5410 
5411  // Increments for worksharing loops (LB = LB + ST; UB = UB + ST).
5412  // Used for directives with static scheduling.
5413  // In combined construct, add combined version that use CombLB and CombUB
5414  // base variables for the update
5415  ExprResult NextLB, NextUB, CombNextLB, CombNextUB;
5417  isOpenMPDistributeDirective(DKind)) {
5418  // LB + ST
5419  NextLB = SemaRef.BuildBinOp(CurScope, IncLoc, BO_Add, LB.get(), ST.get());
5420  if (!NextLB.isUsable())
5421  return 0;
5422  // LB = LB + ST
5423  NextLB =
5424  SemaRef.BuildBinOp(CurScope, IncLoc, BO_Assign, LB.get(), NextLB.get());
5425  NextLB = SemaRef.ActOnFinishFullExpr(NextLB.get());
5426  if (!NextLB.isUsable())
5427  return 0;
5428  // UB + ST
5429  NextUB = SemaRef.BuildBinOp(CurScope, IncLoc, BO_Add, UB.get(), ST.get());
5430  if (!NextUB.isUsable())
5431  return 0;
5432  // UB = UB + ST
5433  NextUB =
5434  SemaRef.BuildBinOp(CurScope, IncLoc, BO_Assign, UB.get(), NextUB.get());
5435  NextUB = SemaRef.ActOnFinishFullExpr(NextUB.get());
5436  if (!NextUB.isUsable())
5437  return 0;
5438  if (isOpenMPLoopBoundSharingDirective(DKind)) {
5439  CombNextLB =
5440  SemaRef.BuildBinOp(CurScope, IncLoc, BO_Add, CombLB.get(), ST.get());
5441  if (!NextLB.isUsable())
5442  return 0;
5443  // LB = LB + ST
5444  CombNextLB = SemaRef.BuildBinOp(CurScope, IncLoc, BO_Assign, CombLB.get(),
5445  CombNextLB.get());
5446  CombNextLB = SemaRef.ActOnFinishFullExpr(CombNextLB.get());
5447  if (!CombNextLB.isUsable())
5448  return 0;
5449  // UB + ST
5450  CombNextUB =
5451  SemaRef.BuildBinOp(CurScope, IncLoc, BO_Add, CombUB.get(), ST.get());
5452  if (!CombNextUB.isUsable())
5453  return 0;
5454  // UB = UB + ST
5455  CombNextUB = SemaRef.BuildBinOp(CurScope, IncLoc, BO_Assign, CombUB.get(),
5456  CombNextUB.get());
5457  CombNextUB = SemaRef.ActOnFinishFullExpr(CombNextUB.get());
5458  if (!CombNextUB.isUsable())
5459  return 0;
5460  }
5461  }
5462 
5463  // Create increment expression for distribute loop when combined in a same
5464  // directive with for as IV = IV + ST; ensure upper bound expression based
5465  // on PrevUB instead of NumIterations - used to implement 'for' when found
5466  // in combination with 'distribute', like in 'distribute parallel for'
5467  SourceLocation DistIncLoc = AStmt->getBeginLoc();
5468  ExprResult DistCond, DistInc, PrevEUB, ParForInDistCond;
5469  if (isOpenMPLoopBoundSharingDirective(DKind)) {
5470  DistCond = SemaRef.BuildBinOp(CurScope, CondLoc, BO_LE, IV.get(), UB.get());
5471  assert(DistCond.isUsable() && "distribute cond expr was not built");
5472 
5473  DistInc =
5474  SemaRef.BuildBinOp(CurScope, DistIncLoc, BO_Add, IV.get(), ST.get());
5475  assert(DistInc.isUsable() && "distribute inc expr was not built");
5476  DistInc = SemaRef.BuildBinOp(CurScope, DistIncLoc, BO_Assign, IV.get(),
5477  DistInc.get());
5478  DistInc = SemaRef.ActOnFinishFullExpr(DistInc.get());
5479  assert(DistInc.isUsable() && "distribute inc expr was not built");
5480 
5481  // Build expression: UB = min(UB, prevUB) for #for in composite or combined
5482  // construct
5483  SourceLocation DistEUBLoc = AStmt->getBeginLoc();
5484  ExprResult IsUBGreater =
5485  SemaRef.BuildBinOp(CurScope, DistEUBLoc, BO_GT, UB.get(), PrevUB.get());
5486  ExprResult CondOp = SemaRef.ActOnConditionalOp(
5487  DistEUBLoc, DistEUBLoc, IsUBGreater.get(), PrevUB.get(), UB.get());
5488  PrevEUB = SemaRef.BuildBinOp(CurScope, DistIncLoc, BO_Assign, UB.get(),
5489  CondOp.get());
5490  PrevEUB = SemaRef.ActOnFinishFullExpr(PrevEUB.get());
5491 
5492  // Build IV <= PrevUB to be used in parallel for is in combination with
5493  // a distribute directive with schedule(static, 1)
5494  ParForInDistCond =
5495  SemaRef.BuildBinOp(CurScope, CondLoc, BO_LE, IV.get(), PrevUB.get());
5496  }
5497 
5498  // Build updates and final values of the loop counters.
5499  bool HasErrors = false;
5500  Built.Counters.resize(NestedLoopCount);
5501  Built.Inits.resize(NestedLoopCount);
5502  Built.Updates.resize(NestedLoopCount);
5503  Built.Finals.resize(NestedLoopCount);
5504  {
5505  ExprResult Div;
5506  // Go from inner nested loop to outer.
5507  for (int Cnt = NestedLoopCount - 1; Cnt >= 0; --Cnt) {
5508  LoopIterationSpace &IS = IterSpaces[Cnt];
5509  SourceLocation UpdLoc = IS.IncSrcRange.getBegin();
5510  // Build: Iter = (IV / Div) % IS.NumIters
5511  // where Div is product of previous iterations' IS.NumIters.
5512  ExprResult Iter;
5513  if (Div.isUsable()) {
5514  Iter =
5515  SemaRef.BuildBinOp(CurScope, UpdLoc, BO_Div, IV.get(), Div.get());
5516  } else {
5517  Iter = IV;
5518  assert((Cnt == (int)NestedLoopCount - 1) &&
5519  "unusable div expected on first iteration only");
5520  }
5521 
5522  if (Cnt != 0 && Iter.isUsable())
5523  Iter = SemaRef.BuildBinOp(CurScope, UpdLoc, BO_Rem, Iter.get(),
5524  IS.NumIterations);
5525  if (!Iter.isUsable()) {
5526  HasErrors = true;
5527  break;
5528  }
5529 
5530  // Build update: IS.CounterVar(Private) = IS.Start + Iter * IS.Step
5531  auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IS.CounterVar)->getDecl());
5532  DeclRefExpr *CounterVar = buildDeclRefExpr(
5533  SemaRef, VD, IS.CounterVar->getType(), IS.CounterVar->getExprLoc(),
5534  /*RefersToCapture=*/true);
5535  ExprResult Init = buildCounterInit(SemaRef, CurScope, UpdLoc, CounterVar,
5536  IS.CounterInit, Captures);
5537  if (!Init.isUsable()) {
5538  HasErrors = true;
5539  break;
5540  }
5542  SemaRef, CurScope, UpdLoc, CounterVar, IS.CounterInit, Iter,
5543  IS.CounterStep, IS.Subtract, &Captures);
5544  if (!Update.isUsable()) {
5545  HasErrors = true;
5546  break;
5547  }
5548 
5549  // Build final: IS.CounterVar = IS.Start + IS.NumIters * IS.Step
5551  SemaRef, CurScope, UpdLoc, CounterVar, IS.CounterInit,
5552  IS.NumIterations, IS.CounterStep, IS.Subtract, &Captures);
5553  if (!Final.isUsable()) {
5554  HasErrors = true;
5555  break;
5556  }
5557 
5558  // Build Div for the next iteration: Div <- Div * IS.NumIters
5559  if (Cnt != 0) {
5560  if (Div.isUnset())
5561  Div = IS.NumIterations;
5562  else
5563  Div = SemaRef.BuildBinOp(CurScope, UpdLoc, BO_Mul, Div.get(),
5564  IS.NumIterations);
5565 
5566  // Add parentheses (for debugging purposes only).
5567  if (Div.isUsable())
5568  Div = tryBuildCapture(SemaRef, Div.get(), Captures);
5569  if (!Div.isUsable()) {
5570  HasErrors = true;
5571  break;
5572  }
5573  }
5574  if (!Update.isUsable() || !Final.isUsable()) {
5575  HasErrors = true;
5576  break;
5577  }
5578  // Save results
5579  Built.Counters[Cnt] = IS.CounterVar;
5580  Built.PrivateCounters[Cnt] = IS.PrivateCounterVar;
5581  Built.Inits[Cnt] = Init.get();
5582  Built.Updates[Cnt] = Update.get();
5583  Built.Finals[Cnt] = Final.get();
5584  }
5585  }
5586 
5587  if (HasErrors)
5588  return 0;
5589 
5590  // Save results
5591  Built.IterationVarRef = IV.get();
5592  Built.LastIteration = LastIteration.get();
5593  Built.NumIterations = NumIterations.get();
5594  Built.CalcLastIteration =
5595  SemaRef.ActOnFinishFullExpr(CalcLastIteration.get()).get();
5596  Built.PreCond = PreCond.get();
5597  Built.PreInits = buildPreInits(C, Captures);
5598  Built.Cond = Cond.get();
5599  Built.Init = Init.get();
5600  Built.Inc = Inc.get();
5601  Built.LB = LB.get();
5602  Built.UB = UB.get();
5603  Built.IL = IL.get();
5604  Built.ST = ST.get();
5605  Built.EUB = EUB.get();
5606  Built.NLB = NextLB.get();
5607  Built.NUB = NextUB.get();
5608  Built.PrevLB = PrevLB.get();
5609  Built.PrevUB = PrevUB.get();
5610  Built.DistInc = DistInc.get();
5611  Built.PrevEUB = PrevEUB.get();
5612  Built.DistCombinedFields.LB = CombLB.get();
5613  Built.DistCombinedFields.UB = CombUB.get();
5614  Built.DistCombinedFields.EUB = CombEUB.get();
5615  Built.DistCombinedFields.Init = CombInit.get();
5616  Built.DistCombinedFields.Cond = CombCond.get();
5617  Built.DistCombinedFields.NLB = CombNextLB.get();
5618  Built.DistCombinedFields.NUB = CombNextUB.get();
5619  Built.DistCombinedFields.DistCond = CombDistCond.get();
5620  Built.DistCombinedFields.ParForInDistCond = ParForInDistCond.get();
5621 
5622  return NestedLoopCount;
5623 }
5624 
5626  auto CollapseClauses =
5627  OMPExecutableDirective::getClausesOfKind<OMPCollapseClause>(Clauses);
5628  if (CollapseClauses.begin() != CollapseClauses.end())
5629  return (*CollapseClauses.begin())->getNumForLoops();
5630  return nullptr;
5631 }
5632 
5634  auto OrderedClauses =
5635  OMPExecutableDirective::getClausesOfKind<OMPOrderedClause>(Clauses);
5636  if (OrderedClauses.begin() != OrderedClauses.end())
5637  return (*OrderedClauses.begin())->getNumForLoops();
5638  return nullptr;
5639 }
5640 
5642  const ArrayRef<OMPClause *> Clauses) {
5643  const OMPSafelenClause *Safelen = nullptr;
5644  const OMPSimdlenClause *Simdlen = nullptr;
5645 
5646  for (const OMPClause *Clause : Clauses) {
5647  if (Clause->getClauseKind() == OMPC_safelen)
5648  Safelen = cast<OMPSafelenClause>(Clause);
5649  else if (Clause->getClauseKind() == OMPC_simdlen)
5650  Simdlen = cast<OMPSimdlenClause>(Clause);
5651  if (Safelen && Simdlen)
5652  break;
5653  }
5654 
5655  if (Simdlen && Safelen) {
5656  const Expr *SimdlenLength = Simdlen->getSimdlen();
5657  const Expr *SafelenLength = Safelen->getSafelen();
5658  if (SimdlenLength->isValueDependent() || SimdlenLength->isTypeDependent() ||
5659  SimdlenLength->isInstantiationDependent() ||
5660  SimdlenLength->containsUnexpandedParameterPack())
5661  return false;
5662  if (SafelenLength->isValueDependent() || SafelenLength->isTypeDependent() ||
5663  SafelenLength->isInstantiationDependent() ||
5664  SafelenLength->containsUnexpandedParameterPack())
5665  return false;
5666  Expr::EvalResult SimdlenResult, SafelenResult;
5667  SimdlenLength->EvaluateAsInt(SimdlenResult, S.Context);
5668  SafelenLength->EvaluateAsInt(SafelenResult, S.Context);
5669  llvm::APSInt SimdlenRes = SimdlenResult.Val.getInt();
5670  llvm::APSInt SafelenRes = SafelenResult.Val.getInt();
5671  // OpenMP 4.5 [2.8.1, simd Construct, Restrictions]
5672  // If both simdlen and safelen clauses are specified, the value of the
5673  // simdlen parameter must be less than or equal to the value of the safelen
5674  // parameter.
5675  if (SimdlenRes > SafelenRes) {
5676  S.Diag(SimdlenLength->getExprLoc(),
5677  diag::err_omp_wrong_simdlen_safelen_values)
5678  << SimdlenLength->getSourceRange() << SafelenLength->getSourceRange();
5679  <