clang  9.0.0svn
SemaOpenMP.cpp
Go to the documentation of this file.
1 //===--- SemaOpenMP.cpp - Semantic Analysis for OpenMP constructs ---------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements semantic analysis for OpenMP directives and
10 /// clauses.
11 ///
12 //===----------------------------------------------------------------------===//
13 
14 #include "TreeTransform.h"
15 #include "clang/AST/ASTContext.h"
18 #include "clang/AST/Decl.h"
19 #include "clang/AST/DeclCXX.h"
20 #include "clang/AST/DeclOpenMP.h"
21 #include "clang/AST/StmtCXX.h"
22 #include "clang/AST/StmtOpenMP.h"
23 #include "clang/AST/StmtVisitor.h"
24 #include "clang/AST/TypeOrdering.h"
27 #include "clang/Sema/Lookup.h"
28 #include "clang/Sema/Scope.h"
29 #include "clang/Sema/ScopeInfo.h"
31 #include "llvm/ADT/PointerEmbeddedInt.h"
32 using namespace clang;
33 
34 //===----------------------------------------------------------------------===//
35 // Stack of data-sharing attributes for variables
36 //===----------------------------------------------------------------------===//
37 
39  Sema &SemaRef, Expr *E,
41  OpenMPClauseKind CKind, bool NoDiagnose);
42 
43 namespace {
44 /// Default data sharing attributes, which can be applied to directive.
46  DSA_unspecified = 0, /// Data sharing attribute not specified.
47  DSA_none = 1 << 0, /// Default data sharing attribute 'none'.
48  DSA_shared = 1 << 1, /// Default data sharing attribute 'shared'.
49 };
50 
51 /// Attributes of the defaultmap clause.
53  DMA_unspecified, /// Default mapping is not specified.
54  DMA_tofrom_scalar, /// Default mapping is 'tofrom:scalar'.
55 };
56 
57 /// Stack for tracking declarations used in OpenMP directives and
58 /// clauses and their data-sharing attributes.
59 class DSAStackTy {
60 public:
61  struct DSAVarData {
64  const Expr *RefExpr = nullptr;
65  DeclRefExpr *PrivateCopy = nullptr;
66  SourceLocation ImplicitDSALoc;
67  DSAVarData() = default;
68  DSAVarData(OpenMPDirectiveKind DKind, OpenMPClauseKind CKind,
69  const Expr *RefExpr, DeclRefExpr *PrivateCopy,
70  SourceLocation ImplicitDSALoc)
71  : DKind(DKind), CKind(CKind), RefExpr(RefExpr),
72  PrivateCopy(PrivateCopy), ImplicitDSALoc(ImplicitDSALoc) {}
73  };
74  using OperatorOffsetTy =
76  using DoacrossDependMapTy =
77  llvm::DenseMap<OMPDependClause *, OperatorOffsetTy>;
78 
79 private:
80  struct DSAInfo {
81  OpenMPClauseKind Attributes = OMPC_unknown;
82  /// Pointer to a reference expression and a flag which shows that the
83  /// variable is marked as lastprivate(true) or not (false).
84  llvm::PointerIntPair<const Expr *, 1, bool> RefExpr;
85  DeclRefExpr *PrivateCopy = nullptr;
86  };
87  using DeclSAMapTy = llvm::SmallDenseMap<const ValueDecl *, DSAInfo, 8>;
88  using AlignedMapTy = llvm::SmallDenseMap<const ValueDecl *, const Expr *, 8>;
89  using LCDeclInfo = std::pair<unsigned, VarDecl *>;
90  using LoopControlVariablesMapTy =
91  llvm::SmallDenseMap<const ValueDecl *, LCDeclInfo, 8>;
92  /// Struct that associates a component with the clause kind where they are
93  /// found.
94  struct MappedExprComponentTy {
97  };
98  using MappedExprComponentsTy =
99  llvm::DenseMap<const ValueDecl *, MappedExprComponentTy>;
100  using CriticalsWithHintsTy =
101  llvm::StringMap<std::pair<const OMPCriticalDirective *, llvm::APSInt>>;
102  struct ReductionData {
103  using BOKPtrType = llvm::PointerEmbeddedInt<BinaryOperatorKind, 16>;
104  SourceRange ReductionRange;
105  llvm::PointerUnion<const Expr *, BOKPtrType> ReductionOp;
106  ReductionData() = default;
107  void set(BinaryOperatorKind BO, SourceRange RR) {
108  ReductionRange = RR;
109  ReductionOp = BO;
110  }
111  void set(const Expr *RefExpr, SourceRange RR) {
112  ReductionRange = RR;
113  ReductionOp = RefExpr;
114  }
115  };
116  using DeclReductionMapTy =
117  llvm::SmallDenseMap<const ValueDecl *, ReductionData, 4>;
118 
119  struct SharingMapTy {
120  DeclSAMapTy SharingMap;
121  DeclReductionMapTy ReductionMap;
122  AlignedMapTy AlignedMap;
123  MappedExprComponentsTy MappedExprComponents;
124  LoopControlVariablesMapTy LCVMap;
125  DefaultDataSharingAttributes DefaultAttr = DSA_unspecified;
126  SourceLocation DefaultAttrLoc;
127  DefaultMapAttributes DefaultMapAttr = DMA_unspecified;
128  SourceLocation DefaultMapAttrLoc;
130  DeclarationNameInfo DirectiveName;
131  Scope *CurScope = nullptr;
132  SourceLocation ConstructLoc;
133  /// Set of 'depend' clauses with 'sink|source' dependence kind. Required to
134  /// get the data (loop counters etc.) about enclosing loop-based construct.
135  /// This data is required during codegen.
136  DoacrossDependMapTy DoacrossDepends;
137  /// First argument (Expr *) contains optional argument of the
138  /// 'ordered' clause, the second one is true if the regions has 'ordered'
139  /// clause, false otherwise.
141  unsigned AssociatedLoops = 1;
142  const Decl *PossiblyLoopCounter = nullptr;
143  bool NowaitRegion = false;
144  bool CancelRegion = false;
145  bool LoopStart = false;
146  SourceLocation InnerTeamsRegionLoc;
147  /// Reference to the taskgroup task_reduction reference expression.
148  Expr *TaskgroupReductionRef = nullptr;
149  llvm::DenseSet<QualType> MappedClassesQualTypes;
150  SharingMapTy(OpenMPDirectiveKind DKind, DeclarationNameInfo Name,
151  Scope *CurScope, SourceLocation Loc)
152  : Directive(DKind), DirectiveName(Name), CurScope(CurScope),
153  ConstructLoc(Loc) {}
154  SharingMapTy() = default;
155  };
156 
157  using StackTy = SmallVector<SharingMapTy, 4>;
158 
159  /// Stack of used declaration and their data-sharing attributes.
160  DeclSAMapTy Threadprivates;
161  const FunctionScopeInfo *CurrentNonCapturingFunctionScope = nullptr;
163  /// true, if check for DSA must be from parent directive, false, if
164  /// from current directive.
165  OpenMPClauseKind ClauseKindMode = OMPC_unknown;
166  Sema &SemaRef;
167  bool ForceCapturing = false;
168  /// true if all the vaiables in the target executable directives must be
169  /// captured by reference.
170  bool ForceCaptureByReferenceInTargetExecutable = false;
171  CriticalsWithHintsTy Criticals;
172 
173  using iterator = StackTy::const_reverse_iterator;
174 
175  DSAVarData getDSA(iterator &Iter, ValueDecl *D) const;
176 
177  /// Checks if the variable is a local for OpenMP region.
178  bool isOpenMPLocal(VarDecl *D, iterator Iter) const;
179 
180  bool isStackEmpty() const {
181  return Stack.empty() ||
182  Stack.back().second != CurrentNonCapturingFunctionScope ||
183  Stack.back().first.empty();
184  }
185 
186  /// Vector of previously declared requires directives
188 
189 public:
190  explicit DSAStackTy(Sema &S) : SemaRef(S) {}
191 
192  bool isClauseParsingMode() const { return ClauseKindMode != OMPC_unknown; }
193  OpenMPClauseKind getClauseParsingMode() const {
194  assert(isClauseParsingMode() && "Must be in clause parsing mode.");
195  return ClauseKindMode;
196  }
197  void setClauseParsingMode(OpenMPClauseKind K) { ClauseKindMode = K; }
198 
199  bool isForceVarCapturing() const { return ForceCapturing; }
200  void setForceVarCapturing(bool V) { ForceCapturing = V; }
201 
202  void setForceCaptureByReferenceInTargetExecutable(bool V) {
203  ForceCaptureByReferenceInTargetExecutable = V;
204  }
205  bool isForceCaptureByReferenceInTargetExecutable() const {
206  return ForceCaptureByReferenceInTargetExecutable;
207  }
208 
209  void push(OpenMPDirectiveKind DKind, const DeclarationNameInfo &DirName,
210  Scope *CurScope, SourceLocation Loc) {
211  if (Stack.empty() ||
212  Stack.back().second != CurrentNonCapturingFunctionScope)
213  Stack.emplace_back(StackTy(), CurrentNonCapturingFunctionScope);
214  Stack.back().first.emplace_back(DKind, DirName, CurScope, Loc);
215  Stack.back().first.back().DefaultAttrLoc = Loc;
216  }
217 
218  void pop() {
219  assert(!Stack.back().first.empty() &&
220  "Data-sharing attributes stack is empty!");
221  Stack.back().first.pop_back();
222  }
223 
224  /// Marks that we're started loop parsing.
225  void loopInit() {
226  assert(isOpenMPLoopDirective(getCurrentDirective()) &&
227  "Expected loop-based directive.");
228  Stack.back().first.back().LoopStart = true;
229  }
230  /// Start capturing of the variables in the loop context.
231  void loopStart() {
232  assert(isOpenMPLoopDirective(getCurrentDirective()) &&
233  "Expected loop-based directive.");
234  Stack.back().first.back().LoopStart = false;
235  }
236  /// true, if variables are captured, false otherwise.
237  bool isLoopStarted() const {
238  assert(isOpenMPLoopDirective(getCurrentDirective()) &&
239  "Expected loop-based directive.");
240  return !Stack.back().first.back().LoopStart;
241  }
242  /// Marks (or clears) declaration as possibly loop counter.
243  void resetPossibleLoopCounter(const Decl *D = nullptr) {
244  Stack.back().first.back().PossiblyLoopCounter =
245  D ? D->getCanonicalDecl() : D;
246  }
247  /// Gets the possible loop counter decl.
248  const Decl *getPossiblyLoopCunter() const {
249  return Stack.back().first.back().PossiblyLoopCounter;
250  }
251  /// Start new OpenMP region stack in new non-capturing function.
252  void pushFunction() {
253  const FunctionScopeInfo *CurFnScope = SemaRef.getCurFunction();
254  assert(!isa<CapturingScopeInfo>(CurFnScope));
255  CurrentNonCapturingFunctionScope = CurFnScope;
256  }
257  /// Pop region stack for non-capturing function.
258  void popFunction(const FunctionScopeInfo *OldFSI) {
259  if (!Stack.empty() && Stack.back().second == OldFSI) {
260  assert(Stack.back().first.empty());
261  Stack.pop_back();
262  }
263  CurrentNonCapturingFunctionScope = nullptr;
264  for (const FunctionScopeInfo *FSI : llvm::reverse(SemaRef.FunctionScopes)) {
265  if (!isa<CapturingScopeInfo>(FSI)) {
266  CurrentNonCapturingFunctionScope = FSI;
267  break;
268  }
269  }
270  }
271 
272  void addCriticalWithHint(const OMPCriticalDirective *D, llvm::APSInt Hint) {
273  Criticals.try_emplace(D->getDirectiveName().getAsString(), D, Hint);
274  }
275  const std::pair<const OMPCriticalDirective *, llvm::APSInt>
276  getCriticalWithHint(const DeclarationNameInfo &Name) const {
277  auto I = Criticals.find(Name.getAsString());
278  if (I != Criticals.end())
279  return I->second;
280  return std::make_pair(nullptr, llvm::APSInt());
281  }
282  /// If 'aligned' declaration for given variable \a D was not seen yet,
283  /// add it and return NULL; otherwise return previous occurrence's expression
284  /// for diagnostics.
285  const Expr *addUniqueAligned(const ValueDecl *D, const Expr *NewDE);
286 
287  /// Register specified variable as loop control variable.
288  void addLoopControlVariable(const ValueDecl *D, VarDecl *Capture);
289  /// Check if the specified variable is a loop control variable for
290  /// current region.
291  /// \return The index of the loop control variable in the list of associated
292  /// for-loops (from outer to inner).
293  const LCDeclInfo isLoopControlVariable(const ValueDecl *D) const;
294  /// Check if the specified variable is a loop control variable for
295  /// parent region.
296  /// \return The index of the loop control variable in the list of associated
297  /// for-loops (from outer to inner).
298  const LCDeclInfo isParentLoopControlVariable(const ValueDecl *D) const;
299  /// Get the loop control variable for the I-th loop (or nullptr) in
300  /// parent directive.
301  const ValueDecl *getParentLoopControlVariable(unsigned I) const;
302 
303  /// Adds explicit data sharing attribute to the specified declaration.
304  void addDSA(const ValueDecl *D, const Expr *E, OpenMPClauseKind A,
305  DeclRefExpr *PrivateCopy = nullptr);
306 
307  /// Adds additional information for the reduction items with the reduction id
308  /// represented as an operator.
309  void addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
310  BinaryOperatorKind BOK);
311  /// Adds additional information for the reduction items with the reduction id
312  /// represented as reduction identifier.
313  void addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
314  const Expr *ReductionRef);
315  /// Returns the location and reduction operation from the innermost parent
316  /// region for the given \p D.
317  const DSAVarData
318  getTopMostTaskgroupReductionData(const ValueDecl *D, SourceRange &SR,
319  BinaryOperatorKind &BOK,
320  Expr *&TaskgroupDescriptor) const;
321  /// Returns the location and reduction operation from the innermost parent
322  /// region for the given \p D.
323  const DSAVarData
324  getTopMostTaskgroupReductionData(const ValueDecl *D, SourceRange &SR,
325  const Expr *&ReductionRef,
326  Expr *&TaskgroupDescriptor) const;
327  /// Return reduction reference expression for the current taskgroup.
328  Expr *getTaskgroupReductionRef() const {
329  assert(Stack.back().first.back().Directive == OMPD_taskgroup &&
330  "taskgroup reference expression requested for non taskgroup "
331  "directive.");
332  return Stack.back().first.back().TaskgroupReductionRef;
333  }
334  /// Checks if the given \p VD declaration is actually a taskgroup reduction
335  /// descriptor variable at the \p Level of OpenMP regions.
336  bool isTaskgroupReductionRef(const ValueDecl *VD, unsigned Level) const {
337  return Stack.back().first[Level].TaskgroupReductionRef &&
338  cast<DeclRefExpr>(Stack.back().first[Level].TaskgroupReductionRef)
339  ->getDecl() == VD;
340  }
341 
342  /// Returns data sharing attributes from top of the stack for the
343  /// specified declaration.
344  const DSAVarData getTopDSA(ValueDecl *D, bool FromParent);
345  /// Returns data-sharing attributes for the specified declaration.
346  const DSAVarData getImplicitDSA(ValueDecl *D, bool FromParent) const;
347  /// Checks if the specified variables has data-sharing attributes which
348  /// match specified \a CPred predicate in any directive which matches \a DPred
349  /// predicate.
350  const DSAVarData
351  hasDSA(ValueDecl *D, const llvm::function_ref<bool(OpenMPClauseKind)> CPred,
352  const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
353  bool FromParent) const;
354  /// Checks if the specified variables has data-sharing attributes which
355  /// match specified \a CPred predicate in any innermost directive which
356  /// matches \a DPred predicate.
357  const DSAVarData
358  hasInnermostDSA(ValueDecl *D,
359  const llvm::function_ref<bool(OpenMPClauseKind)> CPred,
360  const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
361  bool FromParent) const;
362  /// Checks if the specified variables has explicit data-sharing
363  /// attributes which match specified \a CPred predicate at the specified
364  /// OpenMP region.
365  bool hasExplicitDSA(const ValueDecl *D,
366  const llvm::function_ref<bool(OpenMPClauseKind)> CPred,
367  unsigned Level, bool NotLastprivate = false) const;
368 
369  /// Returns true if the directive at level \Level matches in the
370  /// specified \a DPred predicate.
371  bool hasExplicitDirective(
372  const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
373  unsigned Level) const;
374 
375  /// Finds a directive which matches specified \a DPred predicate.
376  bool hasDirective(
377  const llvm::function_ref<bool(
379  DPred,
380  bool FromParent) const;
381 
382  /// Returns currently analyzed directive.
383  OpenMPDirectiveKind getCurrentDirective() const {
384  return isStackEmpty() ? OMPD_unknown : Stack.back().first.back().Directive;
385  }
386  /// Returns directive kind at specified level.
387  OpenMPDirectiveKind getDirective(unsigned Level) const {
388  assert(!isStackEmpty() && "No directive at specified level.");
389  return Stack.back().first[Level].Directive;
390  }
391  /// Returns parent directive.
392  OpenMPDirectiveKind getParentDirective() const {
393  if (isStackEmpty() || Stack.back().first.size() == 1)
394  return OMPD_unknown;
395  return std::next(Stack.back().first.rbegin())->Directive;
396  }
397 
398  /// Add requires decl to internal vector
399  void addRequiresDecl(OMPRequiresDecl *RD) {
400  RequiresDecls.push_back(RD);
401  }
402 
403  /// Checks for a duplicate clause amongst previously declared requires
404  /// directives
405  bool hasDuplicateRequiresClause(ArrayRef<OMPClause *> ClauseList) const {
406  bool IsDuplicate = false;
407  for (OMPClause *CNew : ClauseList) {
408  for (const OMPRequiresDecl *D : RequiresDecls) {
409  for (const OMPClause *CPrev : D->clauselists()) {
410  if (CNew->getClauseKind() == CPrev->getClauseKind()) {
411  SemaRef.Diag(CNew->getBeginLoc(),
412  diag::err_omp_requires_clause_redeclaration)
413  << getOpenMPClauseName(CNew->getClauseKind());
414  SemaRef.Diag(CPrev->getBeginLoc(),
415  diag::note_omp_requires_previous_clause)
416  << getOpenMPClauseName(CPrev->getClauseKind());
417  IsDuplicate = true;
418  }
419  }
420  }
421  }
422  return IsDuplicate;
423  }
424 
425  /// Set default data sharing attribute to none.
426  void setDefaultDSANone(SourceLocation Loc) {
427  assert(!isStackEmpty());
428  Stack.back().first.back().DefaultAttr = DSA_none;
429  Stack.back().first.back().DefaultAttrLoc = Loc;
430  }
431  /// Set default data sharing attribute to shared.
432  void setDefaultDSAShared(SourceLocation Loc) {
433  assert(!isStackEmpty());
434  Stack.back().first.back().DefaultAttr = DSA_shared;
435  Stack.back().first.back().DefaultAttrLoc = Loc;
436  }
437  /// Set default data mapping attribute to 'tofrom:scalar'.
438  void setDefaultDMAToFromScalar(SourceLocation Loc) {
439  assert(!isStackEmpty());
440  Stack.back().first.back().DefaultMapAttr = DMA_tofrom_scalar;
441  Stack.back().first.back().DefaultMapAttrLoc = Loc;
442  }
443 
444  DefaultDataSharingAttributes getDefaultDSA() const {
445  return isStackEmpty() ? DSA_unspecified
446  : Stack.back().first.back().DefaultAttr;
447  }
448  SourceLocation getDefaultDSALocation() const {
449  return isStackEmpty() ? SourceLocation()
450  : Stack.back().first.back().DefaultAttrLoc;
451  }
452  DefaultMapAttributes getDefaultDMA() const {
453  return isStackEmpty() ? DMA_unspecified
454  : Stack.back().first.back().DefaultMapAttr;
455  }
456  DefaultMapAttributes getDefaultDMAAtLevel(unsigned Level) const {
457  return Stack.back().first[Level].DefaultMapAttr;
458  }
459  SourceLocation getDefaultDMALocation() const {
460  return isStackEmpty() ? SourceLocation()
461  : Stack.back().first.back().DefaultMapAttrLoc;
462  }
463 
464  /// Checks if the specified variable is a threadprivate.
465  bool isThreadPrivate(VarDecl *D) {
466  const DSAVarData DVar = getTopDSA(D, false);
467  return isOpenMPThreadPrivate(DVar.CKind);
468  }
469 
470  /// Marks current region as ordered (it has an 'ordered' clause).
471  void setOrderedRegion(bool IsOrdered, const Expr *Param,
472  OMPOrderedClause *Clause) {
473  assert(!isStackEmpty());
474  if (IsOrdered)
475  Stack.back().first.back().OrderedRegion.emplace(Param, Clause);
476  else
477  Stack.back().first.back().OrderedRegion.reset();
478  }
479  /// Returns true, if region is ordered (has associated 'ordered' clause),
480  /// false - otherwise.
481  bool isOrderedRegion() const {
482  if (isStackEmpty())
483  return false;
484  return Stack.back().first.rbegin()->OrderedRegion.hasValue();
485  }
486  /// Returns optional parameter for the ordered region.
487  std::pair<const Expr *, OMPOrderedClause *> getOrderedRegionParam() const {
488  if (isStackEmpty() ||
489  !Stack.back().first.rbegin()->OrderedRegion.hasValue())
490  return std::make_pair(nullptr, nullptr);
491  return Stack.back().first.rbegin()->OrderedRegion.getValue();
492  }
493  /// Returns true, if parent region is ordered (has associated
494  /// 'ordered' clause), false - otherwise.
495  bool isParentOrderedRegion() const {
496  if (isStackEmpty() || Stack.back().first.size() == 1)
497  return false;
498  return std::next(Stack.back().first.rbegin())->OrderedRegion.hasValue();
499  }
500  /// Returns optional parameter for the ordered region.
501  std::pair<const Expr *, OMPOrderedClause *>
502  getParentOrderedRegionParam() const {
503  if (isStackEmpty() || Stack.back().first.size() == 1 ||
504  !std::next(Stack.back().first.rbegin())->OrderedRegion.hasValue())
505  return std::make_pair(nullptr, nullptr);
506  return std::next(Stack.back().first.rbegin())->OrderedRegion.getValue();
507  }
508  /// Marks current region as nowait (it has a 'nowait' clause).
509  void setNowaitRegion(bool IsNowait = true) {
510  assert(!isStackEmpty());
511  Stack.back().first.back().NowaitRegion = IsNowait;
512  }
513  /// Returns true, if parent region is nowait (has associated
514  /// 'nowait' clause), false - otherwise.
515  bool isParentNowaitRegion() const {
516  if (isStackEmpty() || Stack.back().first.size() == 1)
517  return false;
518  return std::next(Stack.back().first.rbegin())->NowaitRegion;
519  }
520  /// Marks parent region as cancel region.
521  void setParentCancelRegion(bool Cancel = true) {
522  if (!isStackEmpty() && Stack.back().first.size() > 1) {
523  auto &StackElemRef = *std::next(Stack.back().first.rbegin());
524  StackElemRef.CancelRegion |= StackElemRef.CancelRegion || Cancel;
525  }
526  }
527  /// Return true if current region has inner cancel construct.
528  bool isCancelRegion() const {
529  return isStackEmpty() ? false : Stack.back().first.back().CancelRegion;
530  }
531 
532  /// Set collapse value for the region.
533  void setAssociatedLoops(unsigned Val) {
534  assert(!isStackEmpty());
535  Stack.back().first.back().AssociatedLoops = Val;
536  }
537  /// Return collapse value for region.
538  unsigned getAssociatedLoops() const {
539  return isStackEmpty() ? 0 : Stack.back().first.back().AssociatedLoops;
540  }
541 
542  /// Marks current target region as one with closely nested teams
543  /// region.
544  void setParentTeamsRegionLoc(SourceLocation TeamsRegionLoc) {
545  if (!isStackEmpty() && Stack.back().first.size() > 1) {
546  std::next(Stack.back().first.rbegin())->InnerTeamsRegionLoc =
547  TeamsRegionLoc;
548  }
549  }
550  /// Returns true, if current region has closely nested teams region.
551  bool hasInnerTeamsRegion() const {
552  return getInnerTeamsRegionLoc().isValid();
553  }
554  /// Returns location of the nested teams region (if any).
555  SourceLocation getInnerTeamsRegionLoc() const {
556  return isStackEmpty() ? SourceLocation()
557  : Stack.back().first.back().InnerTeamsRegionLoc;
558  }
559 
560  Scope *getCurScope() const {
561  return isStackEmpty() ? nullptr : Stack.back().first.back().CurScope;
562  }
563  SourceLocation getConstructLoc() const {
564  return isStackEmpty() ? SourceLocation()
565  : Stack.back().first.back().ConstructLoc;
566  }
567 
568  /// Do the check specified in \a Check to all component lists and return true
569  /// if any issue is found.
570  bool checkMappableExprComponentListsForDecl(
571  const ValueDecl *VD, bool CurrentRegionOnly,
572  const llvm::function_ref<
575  Check) const {
576  if (isStackEmpty())
577  return false;
578  auto SI = Stack.back().first.rbegin();
579  auto SE = Stack.back().first.rend();
580 
581  if (SI == SE)
582  return false;
583 
584  if (CurrentRegionOnly)
585  SE = std::next(SI);
586  else
587  std::advance(SI, 1);
588 
589  for (; SI != SE; ++SI) {
590  auto MI = SI->MappedExprComponents.find(VD);
591  if (MI != SI->MappedExprComponents.end())
593  MI->second.Components)
594  if (Check(L, MI->second.Kind))
595  return true;
596  }
597  return false;
598  }
599 
600  /// Do the check specified in \a Check to all component lists at a given level
601  /// and return true if any issue is found.
602  bool checkMappableExprComponentListsForDeclAtLevel(
603  const ValueDecl *VD, unsigned Level,
604  const llvm::function_ref<
607  Check) const {
608  if (isStackEmpty())
609  return false;
610 
611  auto StartI = Stack.back().first.begin();
612  auto EndI = Stack.back().first.end();
613  if (std::distance(StartI, EndI) <= (int)Level)
614  return false;
615  std::advance(StartI, Level);
616 
617  auto MI = StartI->MappedExprComponents.find(VD);
618  if (MI != StartI->MappedExprComponents.end())
620  MI->second.Components)
621  if (Check(L, MI->second.Kind))
622  return true;
623  return false;
624  }
625 
626  /// Create a new mappable expression component list associated with a given
627  /// declaration and initialize it with the provided list of components.
628  void addMappableExpressionComponents(
629  const ValueDecl *VD,
631  OpenMPClauseKind WhereFoundClauseKind) {
632  assert(!isStackEmpty() &&
633  "Not expecting to retrieve components from a empty stack!");
634  MappedExprComponentTy &MEC =
635  Stack.back().first.back().MappedExprComponents[VD];
636  // Create new entry and append the new components there.
637  MEC.Components.resize(MEC.Components.size() + 1);
638  MEC.Components.back().append(Components.begin(), Components.end());
639  MEC.Kind = WhereFoundClauseKind;
640  }
641 
642  unsigned getNestingLevel() const {
643  assert(!isStackEmpty());
644  return Stack.back().first.size() - 1;
645  }
646  void addDoacrossDependClause(OMPDependClause *C,
647  const OperatorOffsetTy &OpsOffs) {
648  assert(!isStackEmpty() && Stack.back().first.size() > 1);
649  SharingMapTy &StackElem = *std::next(Stack.back().first.rbegin());
650  assert(isOpenMPWorksharingDirective(StackElem.Directive));
651  StackElem.DoacrossDepends.try_emplace(C, OpsOffs);
652  }
653  llvm::iterator_range<DoacrossDependMapTy::const_iterator>
654  getDoacrossDependClauses() const {
655  assert(!isStackEmpty());
656  const SharingMapTy &StackElem = Stack.back().first.back();
657  if (isOpenMPWorksharingDirective(StackElem.Directive)) {
658  const DoacrossDependMapTy &Ref = StackElem.DoacrossDepends;
659  return llvm::make_range(Ref.begin(), Ref.end());
660  }
661  return llvm::make_range(StackElem.DoacrossDepends.end(),
662  StackElem.DoacrossDepends.end());
663  }
664 
665  // Store types of classes which have been explicitly mapped
666  void addMappedClassesQualTypes(QualType QT) {
667  SharingMapTy &StackElem = Stack.back().first.back();
668  StackElem.MappedClassesQualTypes.insert(QT);
669  }
670 
671  // Return set of mapped classes types
672  bool isClassPreviouslyMapped(QualType QT) const {
673  const SharingMapTy &StackElem = Stack.back().first.back();
674  return StackElem.MappedClassesQualTypes.count(QT) != 0;
675  }
676 
677 };
678 
679 bool isImplicitTaskingRegion(OpenMPDirectiveKind DKind) {
680  return isOpenMPParallelDirective(DKind) || isOpenMPTeamsDirective(DKind);
681 }
682 
683 bool isImplicitOrExplicitTaskingRegion(OpenMPDirectiveKind DKind) {
684  return isImplicitTaskingRegion(DKind) || isOpenMPTaskingDirective(DKind) || DKind == OMPD_unknown;
685 }
686 
687 } // namespace
688 
689 static const Expr *getExprAsWritten(const Expr *E) {
690  if (const auto *FE = dyn_cast<FullExpr>(E))
691  E = FE->getSubExpr();
692 
693  if (const auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E))
694  E = MTE->GetTemporaryExpr();
695 
696  while (const auto *Binder = dyn_cast<CXXBindTemporaryExpr>(E))
697  E = Binder->getSubExpr();
698 
699  if (const auto *ICE = dyn_cast<ImplicitCastExpr>(E))
700  E = ICE->getSubExprAsWritten();
701  return E->IgnoreParens();
702 }
703 
705  return const_cast<Expr *>(getExprAsWritten(const_cast<const Expr *>(E)));
706 }
707 
708 static const ValueDecl *getCanonicalDecl(const ValueDecl *D) {
709  if (const auto *CED = dyn_cast<OMPCapturedExprDecl>(D))
710  if (const auto *ME = dyn_cast<MemberExpr>(getExprAsWritten(CED->getInit())))
711  D = ME->getMemberDecl();
712  const auto *VD = dyn_cast<VarDecl>(D);
713  const auto *FD = dyn_cast<FieldDecl>(D);
714  if (VD != nullptr) {
715  VD = VD->getCanonicalDecl();
716  D = VD;
717  } else {
718  assert(FD);
719  FD = FD->getCanonicalDecl();
720  D = FD;
721  }
722  return D;
723 }
724 
726  return const_cast<ValueDecl *>(
727  getCanonicalDecl(const_cast<const ValueDecl *>(D)));
728 }
729 
730 DSAStackTy::DSAVarData DSAStackTy::getDSA(iterator &Iter,
731  ValueDecl *D) const {
732  D = getCanonicalDecl(D);
733  auto *VD = dyn_cast<VarDecl>(D);
734  const auto *FD = dyn_cast<FieldDecl>(D);
735  DSAVarData DVar;
736  if (isStackEmpty() || Iter == Stack.back().first.rend()) {
737  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
738  // in a region but not in construct]
739  // File-scope or namespace-scope variables referenced in called routines
740  // in the region are shared unless they appear in a threadprivate
741  // directive.
742  if (VD && !VD->isFunctionOrMethodVarDecl() && !isa<ParmVarDecl>(VD))
743  DVar.CKind = OMPC_shared;
744 
745  // OpenMP [2.9.1.2, Data-sharing Attribute Rules for Variables Referenced
746  // in a region but not in construct]
747  // Variables with static storage duration that are declared in called
748  // routines in the region are shared.
749  if (VD && VD->hasGlobalStorage())
750  DVar.CKind = OMPC_shared;
751 
752  // Non-static data members are shared by default.
753  if (FD)
754  DVar.CKind = OMPC_shared;
755 
756  return DVar;
757  }
758 
759  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
760  // in a Construct, C/C++, predetermined, p.1]
761  // Variables with automatic storage duration that are declared in a scope
762  // inside the construct are private.
763  if (VD && isOpenMPLocal(VD, Iter) && VD->isLocalVarDecl() &&
764  (VD->getStorageClass() == SC_Auto || VD->getStorageClass() == SC_None)) {
765  DVar.CKind = OMPC_private;
766  return DVar;
767  }
768 
769  DVar.DKind = Iter->Directive;
770  // Explicitly specified attributes and local variables with predetermined
771  // attributes.
772  if (Iter->SharingMap.count(D)) {
773  const DSAInfo &Data = Iter->SharingMap.lookup(D);
774  DVar.RefExpr = Data.RefExpr.getPointer();
775  DVar.PrivateCopy = Data.PrivateCopy;
776  DVar.CKind = Data.Attributes;
777  DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
778  return DVar;
779  }
780 
781  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
782  // in a Construct, C/C++, implicitly determined, p.1]
783  // In a parallel or task construct, the data-sharing attributes of these
784  // variables are determined by the default clause, if present.
785  switch (Iter->DefaultAttr) {
786  case DSA_shared:
787  DVar.CKind = OMPC_shared;
788  DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
789  return DVar;
790  case DSA_none:
791  return DVar;
792  case DSA_unspecified:
793  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
794  // in a Construct, implicitly determined, p.2]
795  // In a parallel construct, if no default clause is present, these
796  // variables are shared.
797  DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
798  if (isOpenMPParallelDirective(DVar.DKind) ||
799  isOpenMPTeamsDirective(DVar.DKind)) {
800  DVar.CKind = OMPC_shared;
801  return DVar;
802  }
803 
804  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
805  // in a Construct, implicitly determined, p.4]
806  // In a task construct, if no default clause is present, a variable that in
807  // the enclosing context is determined to be shared by all implicit tasks
808  // bound to the current team is shared.
809  if (isOpenMPTaskingDirective(DVar.DKind)) {
810  DSAVarData DVarTemp;
811  iterator I = Iter, E = Stack.back().first.rend();
812  do {
813  ++I;
814  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables
815  // Referenced in a Construct, implicitly determined, p.6]
816  // In a task construct, if no default clause is present, a variable
817  // whose data-sharing attribute is not determined by the rules above is
818  // firstprivate.
819  DVarTemp = getDSA(I, D);
820  if (DVarTemp.CKind != OMPC_shared) {
821  DVar.RefExpr = nullptr;
822  DVar.CKind = OMPC_firstprivate;
823  return DVar;
824  }
825  } while (I != E && !isImplicitTaskingRegion(I->Directive));
826  DVar.CKind =
827  (DVarTemp.CKind == OMPC_unknown) ? OMPC_firstprivate : OMPC_shared;
828  return DVar;
829  }
830  }
831  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
832  // in a Construct, implicitly determined, p.3]
833  // For constructs other than task, if no default clause is present, these
834  // variables inherit their data-sharing attributes from the enclosing
835  // context.
836  return getDSA(++Iter, D);
837 }
838 
839 const Expr *DSAStackTy::addUniqueAligned(const ValueDecl *D,
840  const Expr *NewDE) {
841  assert(!isStackEmpty() && "Data sharing attributes stack is empty");
842  D = getCanonicalDecl(D);
843  SharingMapTy &StackElem = Stack.back().first.back();
844  auto It = StackElem.AlignedMap.find(D);
845  if (It == StackElem.AlignedMap.end()) {
846  assert(NewDE && "Unexpected nullptr expr to be added into aligned map");
847  StackElem.AlignedMap[D] = NewDE;
848  return nullptr;
849  }
850  assert(It->second && "Unexpected nullptr expr in the aligned map");
851  return It->second;
852 }
853 
854 void DSAStackTy::addLoopControlVariable(const ValueDecl *D, VarDecl *Capture) {
855  assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
856  D = getCanonicalDecl(D);
857  SharingMapTy &StackElem = Stack.back().first.back();
858  StackElem.LCVMap.try_emplace(
859  D, LCDeclInfo(StackElem.LCVMap.size() + 1, Capture));
860 }
861 
862 const DSAStackTy::LCDeclInfo
863 DSAStackTy::isLoopControlVariable(const ValueDecl *D) const {
864  assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
865  D = getCanonicalDecl(D);
866  const SharingMapTy &StackElem = Stack.back().first.back();
867  auto It = StackElem.LCVMap.find(D);
868  if (It != StackElem.LCVMap.end())
869  return It->second;
870  return {0, nullptr};
871 }
872 
873 const DSAStackTy::LCDeclInfo
874 DSAStackTy::isParentLoopControlVariable(const ValueDecl *D) const {
875  assert(!isStackEmpty() && Stack.back().first.size() > 1 &&
876  "Data-sharing attributes stack is empty");
877  D = getCanonicalDecl(D);
878  const SharingMapTy &StackElem = *std::next(Stack.back().first.rbegin());
879  auto It = StackElem.LCVMap.find(D);
880  if (It != StackElem.LCVMap.end())
881  return It->second;
882  return {0, nullptr};
883 }
884 
885 const ValueDecl *DSAStackTy::getParentLoopControlVariable(unsigned I) const {
886  assert(!isStackEmpty() && Stack.back().first.size() > 1 &&
887  "Data-sharing attributes stack is empty");
888  const SharingMapTy &StackElem = *std::next(Stack.back().first.rbegin());
889  if (StackElem.LCVMap.size() < I)
890  return nullptr;
891  for (const auto &Pair : StackElem.LCVMap)
892  if (Pair.second.first == I)
893  return Pair.first;
894  return nullptr;
895 }
896 
897 void DSAStackTy::addDSA(const ValueDecl *D, const Expr *E, OpenMPClauseKind A,
898  DeclRefExpr *PrivateCopy) {
899  D = getCanonicalDecl(D);
900  if (A == OMPC_threadprivate) {
901  DSAInfo &Data = Threadprivates[D];
902  Data.Attributes = A;
903  Data.RefExpr.setPointer(E);
904  Data.PrivateCopy = nullptr;
905  } else {
906  assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
907  DSAInfo &Data = Stack.back().first.back().SharingMap[D];
908  assert(Data.Attributes == OMPC_unknown || (A == Data.Attributes) ||
909  (A == OMPC_firstprivate && Data.Attributes == OMPC_lastprivate) ||
910  (A == OMPC_lastprivate && Data.Attributes == OMPC_firstprivate) ||
911  (isLoopControlVariable(D).first && A == OMPC_private));
912  if (A == OMPC_lastprivate && Data.Attributes == OMPC_firstprivate) {
913  Data.RefExpr.setInt(/*IntVal=*/true);
914  return;
915  }
916  const bool IsLastprivate =
917  A == OMPC_lastprivate || Data.Attributes == OMPC_lastprivate;
918  Data.Attributes = A;
919  Data.RefExpr.setPointerAndInt(E, IsLastprivate);
920  Data.PrivateCopy = PrivateCopy;
921  if (PrivateCopy) {
922  DSAInfo &Data =
923  Stack.back().first.back().SharingMap[PrivateCopy->getDecl()];
924  Data.Attributes = A;
925  Data.RefExpr.setPointerAndInt(PrivateCopy, IsLastprivate);
926  Data.PrivateCopy = nullptr;
927  }
928  }
929 }
930 
931 /// Build a variable declaration for OpenMP loop iteration variable.
933  StringRef Name, const AttrVec *Attrs = nullptr,
934  DeclRefExpr *OrigRef = nullptr) {
935  DeclContext *DC = SemaRef.CurContext;
936  IdentifierInfo *II = &SemaRef.PP.getIdentifierTable().get(Name);
937  TypeSourceInfo *TInfo = SemaRef.Context.getTrivialTypeSourceInfo(Type, Loc);
938  auto *Decl =
939  VarDecl::Create(SemaRef.Context, DC, Loc, Loc, II, Type, TInfo, SC_None);
940  if (Attrs) {
941  for (specific_attr_iterator<AlignedAttr> I(Attrs->begin()), E(Attrs->end());
942  I != E; ++I)
943  Decl->addAttr(*I);
944  }
945  Decl->setImplicit();
946  if (OrigRef) {
947  Decl->addAttr(
948  OMPReferencedVarAttr::CreateImplicit(SemaRef.Context, OrigRef));
949  }
950  return Decl;
951 }
952 
954  SourceLocation Loc,
955  bool RefersToCapture = false) {
956  D->setReferenced();
957  D->markUsed(S.Context);
959  SourceLocation(), D, RefersToCapture, Loc, Ty,
960  VK_LValue);
961 }
962 
963 void DSAStackTy::addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
964  BinaryOperatorKind BOK) {
965  D = getCanonicalDecl(D);
966  assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
967  assert(
968  Stack.back().first.back().SharingMap[D].Attributes == OMPC_reduction &&
969  "Additional reduction info may be specified only for reduction items.");
970  ReductionData &ReductionData = Stack.back().first.back().ReductionMap[D];
971  assert(ReductionData.ReductionRange.isInvalid() &&
972  Stack.back().first.back().Directive == OMPD_taskgroup &&
973  "Additional reduction info may be specified only once for reduction "
974  "items.");
975  ReductionData.set(BOK, SR);
976  Expr *&TaskgroupReductionRef =
977  Stack.back().first.back().TaskgroupReductionRef;
978  if (!TaskgroupReductionRef) {
979  VarDecl *VD = buildVarDecl(SemaRef, SR.getBegin(),
980  SemaRef.Context.VoidPtrTy, ".task_red.");
981  TaskgroupReductionRef =
982  buildDeclRefExpr(SemaRef, VD, SemaRef.Context.VoidPtrTy, SR.getBegin());
983  }
984 }
985 
986 void DSAStackTy::addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
987  const Expr *ReductionRef) {
988  D = getCanonicalDecl(D);
989  assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
990  assert(
991  Stack.back().first.back().SharingMap[D].Attributes == OMPC_reduction &&
992  "Additional reduction info may be specified only for reduction items.");
993  ReductionData &ReductionData = Stack.back().first.back().ReductionMap[D];
994  assert(ReductionData.ReductionRange.isInvalid() &&
995  Stack.back().first.back().Directive == OMPD_taskgroup &&
996  "Additional reduction info may be specified only once for reduction "
997  "items.");
998  ReductionData.set(ReductionRef, SR);
999  Expr *&TaskgroupReductionRef =
1000  Stack.back().first.back().TaskgroupReductionRef;
1001  if (!TaskgroupReductionRef) {
1002  VarDecl *VD = buildVarDecl(SemaRef, SR.getBegin(),
1003  SemaRef.Context.VoidPtrTy, ".task_red.");
1004  TaskgroupReductionRef =
1005  buildDeclRefExpr(SemaRef, VD, SemaRef.Context.VoidPtrTy, SR.getBegin());
1006  }
1007 }
1008 
1009 const DSAStackTy::DSAVarData DSAStackTy::getTopMostTaskgroupReductionData(
1010  const ValueDecl *D, SourceRange &SR, BinaryOperatorKind &BOK,
1011  Expr *&TaskgroupDescriptor) const {
1012  D = getCanonicalDecl(D);
1013  assert(!isStackEmpty() && "Data-sharing attributes stack is empty.");
1014  if (Stack.back().first.empty())
1015  return DSAVarData();
1016  for (iterator I = std::next(Stack.back().first.rbegin(), 1),
1017  E = Stack.back().first.rend();
1018  I != E; std::advance(I, 1)) {
1019  const DSAInfo &Data = I->SharingMap.lookup(D);
1020  if (Data.Attributes != OMPC_reduction || I->Directive != OMPD_taskgroup)
1021  continue;
1022  const ReductionData &ReductionData = I->ReductionMap.lookup(D);
1023  if (!ReductionData.ReductionOp ||
1024  ReductionData.ReductionOp.is<const Expr *>())
1025  return DSAVarData();
1026  SR = ReductionData.ReductionRange;
1027  BOK = ReductionData.ReductionOp.get<ReductionData::BOKPtrType>();
1028  assert(I->TaskgroupReductionRef && "taskgroup reduction reference "
1029  "expression for the descriptor is not "
1030  "set.");
1031  TaskgroupDescriptor = I->TaskgroupReductionRef;
1032  return DSAVarData(OMPD_taskgroup, OMPC_reduction, Data.RefExpr.getPointer(),
1033  Data.PrivateCopy, I->DefaultAttrLoc);
1034  }
1035  return DSAVarData();
1036 }
1037 
1038 const DSAStackTy::DSAVarData DSAStackTy::getTopMostTaskgroupReductionData(
1039  const ValueDecl *D, SourceRange &SR, const Expr *&ReductionRef,
1040  Expr *&TaskgroupDescriptor) const {
1041  D = getCanonicalDecl(D);
1042  assert(!isStackEmpty() && "Data-sharing attributes stack is empty.");
1043  if (Stack.back().first.empty())
1044  return DSAVarData();
1045  for (iterator I = std::next(Stack.back().first.rbegin(), 1),
1046  E = Stack.back().first.rend();
1047  I != E; std::advance(I, 1)) {
1048  const DSAInfo &Data = I->SharingMap.lookup(D);
1049  if (Data.Attributes != OMPC_reduction || I->Directive != OMPD_taskgroup)
1050  continue;
1051  const ReductionData &ReductionData = I->ReductionMap.lookup(D);
1052  if (!ReductionData.ReductionOp ||
1053  !ReductionData.ReductionOp.is<const Expr *>())
1054  return DSAVarData();
1055  SR = ReductionData.ReductionRange;
1056  ReductionRef = ReductionData.ReductionOp.get<const Expr *>();
1057  assert(I->TaskgroupReductionRef && "taskgroup reduction reference "
1058  "expression for the descriptor is not "
1059  "set.");
1060  TaskgroupDescriptor = I->TaskgroupReductionRef;
1061  return DSAVarData(OMPD_taskgroup, OMPC_reduction, Data.RefExpr.getPointer(),
1062  Data.PrivateCopy, I->DefaultAttrLoc);
1063  }
1064  return DSAVarData();
1065 }
1066 
1067 bool DSAStackTy::isOpenMPLocal(VarDecl *D, iterator Iter) const {
1068  D = D->getCanonicalDecl();
1069  if (!isStackEmpty()) {
1070  iterator I = Iter, E = Stack.back().first.rend();
1071  Scope *TopScope = nullptr;
1072  while (I != E && !isImplicitOrExplicitTaskingRegion(I->Directive) &&
1073  !isOpenMPTargetExecutionDirective(I->Directive))
1074  ++I;
1075  if (I == E)
1076  return false;
1077  TopScope = I->CurScope ? I->CurScope->getParent() : nullptr;
1078  Scope *CurScope = getCurScope();
1079  while (CurScope != TopScope && !CurScope->isDeclScope(D))
1080  CurScope = CurScope->getParent();
1081  return CurScope != TopScope;
1082  }
1083  return false;
1084 }
1085 
1086 static bool isConstNotMutableType(Sema &SemaRef, QualType Type,
1087  bool AcceptIfMutable = true,
1088  bool *IsClassType = nullptr) {
1089  ASTContext &Context = SemaRef.getASTContext();
1090  Type = Type.getNonReferenceType().getCanonicalType();
1091  bool IsConstant = Type.isConstant(Context);
1092  Type = Context.getBaseElementType(Type);
1093  const CXXRecordDecl *RD = AcceptIfMutable && SemaRef.getLangOpts().CPlusPlus
1094  ? Type->getAsCXXRecordDecl()
1095  : nullptr;
1096  if (const auto *CTSD = dyn_cast_or_null<ClassTemplateSpecializationDecl>(RD))
1097  if (const ClassTemplateDecl *CTD = CTSD->getSpecializedTemplate())
1098  RD = CTD->getTemplatedDecl();
1099  if (IsClassType)
1100  *IsClassType = RD;
1101  return IsConstant && !(SemaRef.getLangOpts().CPlusPlus && RD &&
1102  RD->hasDefinition() && RD->hasMutableFields());
1103 }
1104 
1105 static bool rejectConstNotMutableType(Sema &SemaRef, const ValueDecl *D,
1107  SourceLocation ELoc,
1108  bool AcceptIfMutable = true,
1109  bool ListItemNotVar = false) {
1110  ASTContext &Context = SemaRef.getASTContext();
1111  bool IsClassType;
1112  if (isConstNotMutableType(SemaRef, Type, AcceptIfMutable, &IsClassType)) {
1113  unsigned Diag = ListItemNotVar
1114  ? diag::err_omp_const_list_item
1115  : IsClassType ? diag::err_omp_const_not_mutable_variable
1116  : diag::err_omp_const_variable;
1117  SemaRef.Diag(ELoc, Diag) << getOpenMPClauseName(CKind);
1118  if (!ListItemNotVar && D) {
1119  const VarDecl *VD = dyn_cast<VarDecl>(D);
1120  bool IsDecl = !VD || VD->isThisDeclarationADefinition(Context) ==
1122  SemaRef.Diag(D->getLocation(),
1123  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
1124  << D;
1125  }
1126  return true;
1127  }
1128  return false;
1129 }
1130 
1131 const DSAStackTy::DSAVarData DSAStackTy::getTopDSA(ValueDecl *D,
1132  bool FromParent) {
1133  D = getCanonicalDecl(D);
1134  DSAVarData DVar;
1135 
1136  auto *VD = dyn_cast<VarDecl>(D);
1137  auto TI = Threadprivates.find(D);
1138  if (TI != Threadprivates.end()) {
1139  DVar.RefExpr = TI->getSecond().RefExpr.getPointer();
1140  DVar.CKind = OMPC_threadprivate;
1141  return DVar;
1142  }
1143  if (VD && VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
1144  DVar.RefExpr = buildDeclRefExpr(
1145  SemaRef, VD, D->getType().getNonReferenceType(),
1146  VD->getAttr<OMPThreadPrivateDeclAttr>()->getLocation());
1147  DVar.CKind = OMPC_threadprivate;
1148  addDSA(D, DVar.RefExpr, OMPC_threadprivate);
1149  return DVar;
1150  }
1151  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1152  // in a Construct, C/C++, predetermined, p.1]
1153  // Variables appearing in threadprivate directives are threadprivate.
1154  if ((VD && VD->getTLSKind() != VarDecl::TLS_None &&
1155  !(VD->hasAttr<OMPThreadPrivateDeclAttr>() &&
1156  SemaRef.getLangOpts().OpenMPUseTLS &&
1157  SemaRef.getASTContext().getTargetInfo().isTLSSupported())) ||
1158  (VD && VD->getStorageClass() == SC_Register &&
1159  VD->hasAttr<AsmLabelAttr>() && !VD->isLocalVarDecl())) {
1160  DVar.RefExpr = buildDeclRefExpr(
1161  SemaRef, VD, D->getType().getNonReferenceType(), D->getLocation());
1162  DVar.CKind = OMPC_threadprivate;
1163  addDSA(D, DVar.RefExpr, OMPC_threadprivate);
1164  return DVar;
1165  }
1166  if (SemaRef.getLangOpts().OpenMPCUDAMode && VD &&
1167  VD->isLocalVarDeclOrParm() && !isStackEmpty() &&
1168  !isLoopControlVariable(D).first) {
1169  iterator IterTarget =
1170  std::find_if(Stack.back().first.rbegin(), Stack.back().first.rend(),
1171  [](const SharingMapTy &Data) {
1172  return isOpenMPTargetExecutionDirective(Data.Directive);
1173  });
1174  if (IterTarget != Stack.back().first.rend()) {
1175  iterator ParentIterTarget = std::next(IterTarget, 1);
1176  for (iterator Iter = Stack.back().first.rbegin();
1177  Iter != ParentIterTarget; std::advance(Iter, 1)) {
1178  if (isOpenMPLocal(VD, Iter)) {
1179  DVar.RefExpr =
1180  buildDeclRefExpr(SemaRef, VD, D->getType().getNonReferenceType(),
1181  D->getLocation());
1182  DVar.CKind = OMPC_threadprivate;
1183  return DVar;
1184  }
1185  }
1186  if (!isClauseParsingMode() || IterTarget != Stack.back().first.rbegin()) {
1187  auto DSAIter = IterTarget->SharingMap.find(D);
1188  if (DSAIter != IterTarget->SharingMap.end() &&
1189  isOpenMPPrivate(DSAIter->getSecond().Attributes)) {
1190  DVar.RefExpr = DSAIter->getSecond().RefExpr.getPointer();
1191  DVar.CKind = OMPC_threadprivate;
1192  return DVar;
1193  }
1194  iterator End = Stack.back().first.rend();
1195  if (!SemaRef.isOpenMPCapturedByRef(
1196  D, std::distance(ParentIterTarget, End))) {
1197  DVar.RefExpr =
1198  buildDeclRefExpr(SemaRef, VD, D->getType().getNonReferenceType(),
1199  IterTarget->ConstructLoc);
1200  DVar.CKind = OMPC_threadprivate;
1201  return DVar;
1202  }
1203  }
1204  }
1205  }
1206 
1207  if (isStackEmpty())
1208  // Not in OpenMP execution region and top scope was already checked.
1209  return DVar;
1210 
1211  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1212  // in a Construct, C/C++, predetermined, p.4]
1213  // Static data members are shared.
1214  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1215  // in a Construct, C/C++, predetermined, p.7]
1216  // Variables with static storage duration that are declared in a scope
1217  // inside the construct are shared.
1218  auto &&MatchesAlways = [](OpenMPDirectiveKind) { return true; };
1219  if (VD && VD->isStaticDataMember()) {
1220  DSAVarData DVarTemp = hasDSA(D, isOpenMPPrivate, MatchesAlways, FromParent);
1221  if (DVarTemp.CKind != OMPC_unknown && DVarTemp.RefExpr)
1222  return DVar;
1223 
1224  DVar.CKind = OMPC_shared;
1225  return DVar;
1226  }
1227 
1228  // The predetermined shared attribute for const-qualified types having no
1229  // mutable members was removed after OpenMP 3.1.
1230  if (SemaRef.LangOpts.OpenMP <= 31) {
1231  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1232  // in a Construct, C/C++, predetermined, p.6]
1233  // Variables with const qualified type having no mutable member are
1234  // shared.
1235  if (isConstNotMutableType(SemaRef, D->getType())) {
1236  // Variables with const-qualified type having no mutable member may be
1237  // listed in a firstprivate clause, even if they are static data members.
1238  DSAVarData DVarTemp = hasInnermostDSA(
1239  D,
1240  [](OpenMPClauseKind C) {
1241  return C == OMPC_firstprivate || C == OMPC_shared;
1242  },
1243  MatchesAlways, FromParent);
1244  if (DVarTemp.CKind != OMPC_unknown && DVarTemp.RefExpr)
1245  return DVarTemp;
1246 
1247  DVar.CKind = OMPC_shared;
1248  return DVar;
1249  }
1250  }
1251 
1252  // Explicitly specified attributes and local variables with predetermined
1253  // attributes.
1254  iterator I = Stack.back().first.rbegin();
1255  iterator EndI = Stack.back().first.rend();
1256  if (FromParent && I != EndI)
1257  std::advance(I, 1);
1258  auto It = I->SharingMap.find(D);
1259  if (It != I->SharingMap.end()) {
1260  const DSAInfo &Data = It->getSecond();
1261  DVar.RefExpr = Data.RefExpr.getPointer();
1262  DVar.PrivateCopy = Data.PrivateCopy;
1263  DVar.CKind = Data.Attributes;
1264  DVar.ImplicitDSALoc = I->DefaultAttrLoc;
1265  DVar.DKind = I->Directive;
1266  }
1267 
1268  return DVar;
1269 }
1270 
1271 const DSAStackTy::DSAVarData DSAStackTy::getImplicitDSA(ValueDecl *D,
1272  bool FromParent) const {
1273  if (isStackEmpty()) {
1274  iterator I;
1275  return getDSA(I, D);
1276  }
1277  D = getCanonicalDecl(D);
1278  iterator StartI = Stack.back().first.rbegin();
1279  iterator EndI = Stack.back().first.rend();
1280  if (FromParent && StartI != EndI)
1281  std::advance(StartI, 1);
1282  return getDSA(StartI, D);
1283 }
1284 
1285 const DSAStackTy::DSAVarData
1286 DSAStackTy::hasDSA(ValueDecl *D,
1287  const llvm::function_ref<bool(OpenMPClauseKind)> CPred,
1288  const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
1289  bool FromParent) const {
1290  if (isStackEmpty())
1291  return {};
1292  D = getCanonicalDecl(D);
1293  iterator I = Stack.back().first.rbegin();
1294  iterator EndI = Stack.back().first.rend();
1295  if (FromParent && I != EndI)
1296  std::advance(I, 1);
1297  for (; I != EndI; std::advance(I, 1)) {
1298  if (!DPred(I->Directive) && !isImplicitOrExplicitTaskingRegion(I->Directive))
1299  continue;
1300  iterator NewI = I;
1301  DSAVarData DVar = getDSA(NewI, D);
1302  if (I == NewI && CPred(DVar.CKind))
1303  return DVar;
1304  }
1305  return {};
1306 }
1307 
1308 const DSAStackTy::DSAVarData DSAStackTy::hasInnermostDSA(
1309  ValueDecl *D, const llvm::function_ref<bool(OpenMPClauseKind)> CPred,
1310  const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
1311  bool FromParent) const {
1312  if (isStackEmpty())
1313  return {};
1314  D = getCanonicalDecl(D);
1315  iterator StartI = Stack.back().first.rbegin();
1316  iterator EndI = Stack.back().first.rend();
1317  if (FromParent && StartI != EndI)
1318  std::advance(StartI, 1);
1319  if (StartI == EndI || !DPred(StartI->Directive))
1320  return {};
1321  iterator NewI = StartI;
1322  DSAVarData DVar = getDSA(NewI, D);
1323  return (NewI == StartI && CPred(DVar.CKind)) ? DVar : DSAVarData();
1324 }
1325 
1326 bool DSAStackTy::hasExplicitDSA(
1327  const ValueDecl *D, const llvm::function_ref<bool(OpenMPClauseKind)> CPred,
1328  unsigned Level, bool NotLastprivate) const {
1329  if (isStackEmpty())
1330  return false;
1331  D = getCanonicalDecl(D);
1332  auto StartI = Stack.back().first.begin();
1333  auto EndI = Stack.back().first.end();
1334  if (std::distance(StartI, EndI) <= (int)Level)
1335  return false;
1336  std::advance(StartI, Level);
1337  auto I = StartI->SharingMap.find(D);
1338  if ((I != StartI->SharingMap.end()) &&
1339  I->getSecond().RefExpr.getPointer() &&
1340  CPred(I->getSecond().Attributes) &&
1341  (!NotLastprivate || !I->getSecond().RefExpr.getInt()))
1342  return true;
1343  // Check predetermined rules for the loop control variables.
1344  auto LI = StartI->LCVMap.find(D);
1345  if (LI != StartI->LCVMap.end())
1346  return CPred(OMPC_private);
1347  return false;
1348 }
1349 
1350 bool DSAStackTy::hasExplicitDirective(
1351  const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
1352  unsigned Level) const {
1353  if (isStackEmpty())
1354  return false;
1355  auto StartI = Stack.back().first.begin();
1356  auto EndI = Stack.back().first.end();
1357  if (std::distance(StartI, EndI) <= (int)Level)
1358  return false;
1359  std::advance(StartI, Level);
1360  return DPred(StartI->Directive);
1361 }
1362 
1363 bool DSAStackTy::hasDirective(
1364  const llvm::function_ref<bool(OpenMPDirectiveKind,
1366  DPred,
1367  bool FromParent) const {
1368  // We look only in the enclosing region.
1369  if (isStackEmpty())
1370  return false;
1371  auto StartI = std::next(Stack.back().first.rbegin());
1372  auto EndI = Stack.back().first.rend();
1373  if (FromParent && StartI != EndI)
1374  StartI = std::next(StartI);
1375  for (auto I = StartI, EE = EndI; I != EE; ++I) {
1376  if (DPred(I->Directive, I->DirectiveName, I->ConstructLoc))
1377  return true;
1378  }
1379  return false;
1380 }
1381 
1382 void Sema::InitDataSharingAttributesStack() {
1383  VarDataSharingAttributesStack = new DSAStackTy(*this);
1384 }
1385 
1386 #define DSAStack static_cast<DSAStackTy *>(VarDataSharingAttributesStack)
1387 
1388 void Sema::pushOpenMPFunctionRegion() {
1389  DSAStack->pushFunction();
1390 }
1391 
1392 void Sema::popOpenMPFunctionRegion(const FunctionScopeInfo *OldFSI) {
1393  DSAStack->popFunction(OldFSI);
1394 }
1395 
1397  assert(S.LangOpts.OpenMP && S.LangOpts.OpenMPIsDevice &&
1398  "Expected OpenMP device compilation.");
1399  return !S.isInOpenMPTargetExecutionDirective() &&
1401 }
1402 
1403 /// Do we know that we will eventually codegen the given function?
1404 static bool isKnownEmitted(Sema &S, FunctionDecl *FD) {
1405  assert(S.LangOpts.OpenMP && S.LangOpts.OpenMPIsDevice &&
1406  "Expected OpenMP device compilation.");
1407  // Templates are emitted when they're instantiated.
1408  if (FD->isDependentContext())
1409  return false;
1410 
1411  if (OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(
1412  FD->getCanonicalDecl()))
1413  return true;
1414 
1415  // Otherwise, the function is known-emitted if it's in our set of
1416  // known-emitted functions.
1417  return S.DeviceKnownEmittedFns.count(FD) > 0;
1418 }
1419 
1421  unsigned DiagID) {
1422  assert(LangOpts.OpenMP && LangOpts.OpenMPIsDevice &&
1423  "Expected OpenMP device compilation.");
1425  !isKnownEmitted(*this, getCurFunctionDecl()))
1426  ? DeviceDiagBuilder::K_Deferred
1427  : DeviceDiagBuilder::K_Immediate,
1428  Loc, DiagID, getCurFunctionDecl(), *this);
1429 }
1430 
1431 void Sema::checkOpenMPDeviceFunction(SourceLocation Loc, FunctionDecl *Callee) {
1432  assert(LangOpts.OpenMP && LangOpts.OpenMPIsDevice &&
1433  "Expected OpenMP device compilation.");
1434  assert(Callee && "Callee may not be null.");
1435  FunctionDecl *Caller = getCurFunctionDecl();
1436 
1437  // If the caller is known-emitted, mark the callee as known-emitted.
1438  // Otherwise, mark the call in our call graph so we can traverse it later.
1439  if (!isOpenMPDeviceDelayedContext(*this) ||
1440  (Caller && isKnownEmitted(*this, Caller)))
1441  markKnownEmitted(*this, Caller, Callee, Loc, isKnownEmitted);
1442  else if (Caller)
1443  DeviceCallGraph[Caller].insert({Callee, Loc});
1444 }
1445 
1446 bool Sema::isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level) const {
1447  assert(LangOpts.OpenMP && "OpenMP is not allowed");
1448 
1449  ASTContext &Ctx = getASTContext();
1450  bool IsByRef = true;
1451 
1452  // Find the directive that is associated with the provided scope.
1453  D = cast<ValueDecl>(D->getCanonicalDecl());
1454  QualType Ty = D->getType();
1455 
1456  if (DSAStack->hasExplicitDirective(isOpenMPTargetExecutionDirective, Level)) {
1457  // This table summarizes how a given variable should be passed to the device
1458  // given its type and the clauses where it appears. This table is based on
1459  // the description in OpenMP 4.5 [2.10.4, target Construct] and
1460  // OpenMP 4.5 [2.15.5, Data-mapping Attribute Rules and Clauses].
1461  //
1462  // =========================================================================
1463  // | type | defaultmap | pvt | first | is_device_ptr | map | res. |
1464  // | |(tofrom:scalar)| | pvt | | | |
1465  // =========================================================================
1466  // | scl | | | | - | | bycopy|
1467  // | scl | | - | x | - | - | bycopy|
1468  // | scl | | x | - | - | - | null |
1469  // | scl | x | | | - | | byref |
1470  // | scl | x | - | x | - | - | bycopy|
1471  // | scl | x | x | - | - | - | null |
1472  // | scl | | - | - | - | x | byref |
1473  // | scl | x | - | - | - | x | byref |
1474  //
1475  // | agg | n.a. | | | - | | byref |
1476  // | agg | n.a. | - | x | - | - | byref |
1477  // | agg | n.a. | x | - | - | - | null |
1478  // | agg | n.a. | - | - | - | x | byref |
1479  // | agg | n.a. | - | - | - | x[] | byref |
1480  //
1481  // | ptr | n.a. | | | - | | bycopy|
1482  // | ptr | n.a. | - | x | - | - | bycopy|
1483  // | ptr | n.a. | x | - | - | - | null |
1484  // | ptr | n.a. | - | - | - | x | byref |
1485  // | ptr | n.a. | - | - | - | x[] | bycopy|
1486  // | ptr | n.a. | - | - | x | | bycopy|
1487  // | ptr | n.a. | - | - | x | x | bycopy|
1488  // | ptr | n.a. | - | - | x | x[] | bycopy|
1489  // =========================================================================
1490  // Legend:
1491  // scl - scalar
1492  // ptr - pointer
1493  // agg - aggregate
1494  // x - applies
1495  // - - invalid in this combination
1496  // [] - mapped with an array section
1497  // byref - should be mapped by reference
1498  // byval - should be mapped by value
1499  // null - initialize a local variable to null on the device
1500  //
1501  // Observations:
1502  // - All scalar declarations that show up in a map clause have to be passed
1503  // by reference, because they may have been mapped in the enclosing data
1504  // environment.
1505  // - If the scalar value does not fit the size of uintptr, it has to be
1506  // passed by reference, regardless the result in the table above.
1507  // - For pointers mapped by value that have either an implicit map or an
1508  // array section, the runtime library may pass the NULL value to the
1509  // device instead of the value passed to it by the compiler.
1510 
1511  if (Ty->isReferenceType())
1512  Ty = Ty->castAs<ReferenceType>()->getPointeeType();
1513 
1514  // Locate map clauses and see if the variable being captured is referred to
1515  // in any of those clauses. Here we only care about variables, not fields,
1516  // because fields are part of aggregates.
1517  bool IsVariableUsedInMapClause = false;
1518  bool IsVariableAssociatedWithSection = false;
1519 
1520  DSAStack->checkMappableExprComponentListsForDeclAtLevel(
1521  D, Level,
1522  [&IsVariableUsedInMapClause, &IsVariableAssociatedWithSection, D](
1524  MapExprComponents,
1525  OpenMPClauseKind WhereFoundClauseKind) {
1526  // Only the map clause information influences how a variable is
1527  // captured. E.g. is_device_ptr does not require changing the default
1528  // behavior.
1529  if (WhereFoundClauseKind != OMPC_map)
1530  return false;
1531 
1532  auto EI = MapExprComponents.rbegin();
1533  auto EE = MapExprComponents.rend();
1534 
1535  assert(EI != EE && "Invalid map expression!");
1536 
1537  if (isa<DeclRefExpr>(EI->getAssociatedExpression()))
1538  IsVariableUsedInMapClause |= EI->getAssociatedDeclaration() == D;
1539 
1540  ++EI;
1541  if (EI == EE)
1542  return false;
1543 
1544  if (isa<ArraySubscriptExpr>(EI->getAssociatedExpression()) ||
1545  isa<OMPArraySectionExpr>(EI->getAssociatedExpression()) ||
1546  isa<MemberExpr>(EI->getAssociatedExpression())) {
1547  IsVariableAssociatedWithSection = true;
1548  // There is nothing more we need to know about this variable.
1549  return true;
1550  }
1551 
1552  // Keep looking for more map info.
1553  return false;
1554  });
1555 
1556  if (IsVariableUsedInMapClause) {
1557  // If variable is identified in a map clause it is always captured by
1558  // reference except if it is a pointer that is dereferenced somehow.
1559  IsByRef = !(Ty->isPointerType() && IsVariableAssociatedWithSection);
1560  } else {
1561  // By default, all the data that has a scalar type is mapped by copy
1562  // (except for reduction variables).
1563  IsByRef =
1564  (DSAStack->isForceCaptureByReferenceInTargetExecutable() &&
1565  !Ty->isAnyPointerType()) ||
1566  !Ty->isScalarType() ||
1567  DSAStack->getDefaultDMAAtLevel(Level) == DMA_tofrom_scalar ||
1568  DSAStack->hasExplicitDSA(
1569  D, [](OpenMPClauseKind K) { return K == OMPC_reduction; }, Level);
1570  }
1571  }
1572 
1573  if (IsByRef && Ty.getNonReferenceType()->isScalarType()) {
1574  IsByRef =
1575  ((DSAStack->isForceCaptureByReferenceInTargetExecutable() &&
1576  !Ty->isAnyPointerType()) ||
1577  !DSAStack->hasExplicitDSA(
1578  D,
1579  [](OpenMPClauseKind K) -> bool { return K == OMPC_firstprivate; },
1580  Level, /*NotLastprivate=*/true)) &&
1581  // If the variable is artificial and must be captured by value - try to
1582  // capture by value.
1583  !(isa<OMPCapturedExprDecl>(D) && !D->hasAttr<OMPCaptureNoInitAttr>() &&
1584  !cast<OMPCapturedExprDecl>(D)->getInit()->isGLValue());
1585  }
1586 
1587  // When passing data by copy, we need to make sure it fits the uintptr size
1588  // and alignment, because the runtime library only deals with uintptr types.
1589  // If it does not fit the uintptr size, we need to pass the data by reference
1590  // instead.
1591  if (!IsByRef &&
1592  (Ctx.getTypeSizeInChars(Ty) >
1593  Ctx.getTypeSizeInChars(Ctx.getUIntPtrType()) ||
1594  Ctx.getDeclAlign(D) > Ctx.getTypeAlignInChars(Ctx.getUIntPtrType()))) {
1595  IsByRef = true;
1596  }
1597 
1598  return IsByRef;
1599 }
1600 
1601 unsigned Sema::getOpenMPNestingLevel() const {
1602  assert(getLangOpts().OpenMP);
1603  return DSAStack->getNestingLevel();
1604 }
1605 
1607  return (isOpenMPTargetExecutionDirective(DSAStack->getCurrentDirective()) &&
1608  !DSAStack->isClauseParsingMode()) ||
1609  DSAStack->hasDirective(
1611  SourceLocation) -> bool {
1612  return isOpenMPTargetExecutionDirective(K);
1613  },
1614  false);
1615 }
1616 
1618  assert(LangOpts.OpenMP && "OpenMP is not allowed");
1619  D = getCanonicalDecl(D);
1620 
1621  // If we are attempting to capture a global variable in a directive with
1622  // 'target' we return true so that this global is also mapped to the device.
1623  //
1624  auto *VD = dyn_cast<VarDecl>(D);
1625  if (VD && !VD->hasLocalStorage()) {
1626  if (isInOpenMPDeclareTargetContext() &&
1627  (getCurCapturedRegion() || getCurBlock() || getCurLambda())) {
1628  // Try to mark variable as declare target if it is used in capturing
1629  // regions.
1630  if (!OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
1631  checkDeclIsAllowedInOpenMPTarget(nullptr, VD);
1632  return nullptr;
1633  } else if (isInOpenMPTargetExecutionDirective()) {
1634  // If the declaration is enclosed in a 'declare target' directive,
1635  // then it should not be captured.
1636  //
1637  if (OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
1638  return nullptr;
1639  return VD;
1640  }
1641  }
1642  // Capture variables captured by reference in lambdas for target-based
1643  // directives.
1644  if (VD && !DSAStack->isClauseParsingMode()) {
1645  if (const auto *RD = VD->getType()
1646  .getCanonicalType()
1647  .getNonReferenceType()
1648  ->getAsCXXRecordDecl()) {
1649  bool SavedForceCaptureByReferenceInTargetExecutable =
1650  DSAStack->isForceCaptureByReferenceInTargetExecutable();
1651  DSAStack->setForceCaptureByReferenceInTargetExecutable(/*V=*/true);
1652  if (RD->isLambda()) {
1653  llvm::DenseMap<const VarDecl *, FieldDecl *> Captures;
1654  FieldDecl *ThisCapture;
1655  RD->getCaptureFields(Captures, ThisCapture);
1656  for (const LambdaCapture &LC : RD->captures()) {
1657  if (LC.getCaptureKind() == LCK_ByRef) {
1658  VarDecl *VD = LC.getCapturedVar();
1659  DeclContext *VDC = VD->getDeclContext();
1660  if (!VDC->Encloses(CurContext))
1661  continue;
1662  DSAStackTy::DSAVarData DVarPrivate =
1663  DSAStack->getTopDSA(VD, /*FromParent=*/false);
1664  // Do not capture already captured variables.
1665  if (!OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD) &&
1666  DVarPrivate.CKind == OMPC_unknown &&
1667  !DSAStack->checkMappableExprComponentListsForDecl(
1668  D, /*CurrentRegionOnly=*/true,
1670  MappableExprComponentListRef,
1671  OpenMPClauseKind) { return true; }))
1672  MarkVariableReferenced(LC.getLocation(), LC.getCapturedVar());
1673  } else if (LC.getCaptureKind() == LCK_This) {
1674  QualType ThisTy = getCurrentThisType();
1675  if (!ThisTy.isNull() &&
1676  Context.typesAreCompatible(ThisTy, ThisCapture->getType()))
1677  CheckCXXThisCapture(LC.getLocation());
1678  }
1679  }
1680  }
1681  DSAStack->setForceCaptureByReferenceInTargetExecutable(
1682  SavedForceCaptureByReferenceInTargetExecutable);
1683  }
1684  }
1685 
1686  if (DSAStack->getCurrentDirective() != OMPD_unknown &&
1687  (!DSAStack->isClauseParsingMode() ||
1688  DSAStack->getParentDirective() != OMPD_unknown)) {
1689  auto &&Info = DSAStack->isLoopControlVariable(D);
1690  if (Info.first ||
1691  (VD && VD->hasLocalStorage() &&
1692  isImplicitOrExplicitTaskingRegion(DSAStack->getCurrentDirective())) ||
1693  (VD && DSAStack->isForceVarCapturing()))
1694  return VD ? VD : Info.second;
1695  DSAStackTy::DSAVarData DVarPrivate =
1696  DSAStack->getTopDSA(D, DSAStack->isClauseParsingMode());
1697  if (DVarPrivate.CKind != OMPC_unknown && isOpenMPPrivate(DVarPrivate.CKind))
1698  return VD ? VD : cast<VarDecl>(DVarPrivate.PrivateCopy->getDecl());
1699  DVarPrivate = DSAStack->hasDSA(D, isOpenMPPrivate,
1700  [](OpenMPDirectiveKind) { return true; },
1701  DSAStack->isClauseParsingMode());
1702  if (DVarPrivate.CKind != OMPC_unknown)
1703  return VD ? VD : cast<VarDecl>(DVarPrivate.PrivateCopy->getDecl());
1704  }
1705  return nullptr;
1706 }
1707 
1708 void Sema::adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
1709  unsigned Level) const {
1711  getOpenMPCaptureRegions(Regions, DSAStack->getDirective(Level));
1712  FunctionScopesIndex -= Regions.size();
1713 }
1714 
1716  assert(LangOpts.OpenMP && "OpenMP must be enabled.");
1717  if (isOpenMPLoopDirective(DSAStack->getCurrentDirective()))
1718  DSAStack->loopInit();
1719 }
1720 
1721 bool Sema::isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const {
1722  assert(LangOpts.OpenMP && "OpenMP is not allowed");
1723  if (isOpenMPLoopDirective(DSAStack->getCurrentDirective())) {
1724  if (DSAStack->getAssociatedLoops() > 0 &&
1725  !DSAStack->isLoopStarted()) {
1726  DSAStack->resetPossibleLoopCounter(D);
1727  DSAStack->loopStart();
1728  return true;
1729  }
1730  if ((DSAStack->getPossiblyLoopCunter() == D->getCanonicalDecl() ||
1731  DSAStack->isLoopControlVariable(D).first) &&
1732  !DSAStack->hasExplicitDSA(
1733  D, [](OpenMPClauseKind K) { return K != OMPC_private; }, Level) &&
1734  !isOpenMPSimdDirective(DSAStack->getCurrentDirective()))
1735  return true;
1736  }
1737  return DSAStack->hasExplicitDSA(
1738  D, [](OpenMPClauseKind K) { return K == OMPC_private; }, Level) ||
1739  (DSAStack->isClauseParsingMode() &&
1740  DSAStack->getClauseParsingMode() == OMPC_private) ||
1741  // Consider taskgroup reduction descriptor variable a private to avoid
1742  // possible capture in the region.
1743  (DSAStack->hasExplicitDirective(
1744  [](OpenMPDirectiveKind K) { return K == OMPD_taskgroup; },
1745  Level) &&
1746  DSAStack->isTaskgroupReductionRef(D, Level));
1747 }
1748 
1750  unsigned Level) {
1751  assert(LangOpts.OpenMP && "OpenMP is not allowed");
1752  D = getCanonicalDecl(D);
1754  for (unsigned I = DSAStack->getNestingLevel() + 1; I > Level; --I) {
1755  const unsigned NewLevel = I - 1;
1756  if (DSAStack->hasExplicitDSA(D,
1757  [&OMPC](const OpenMPClauseKind K) {
1758  if (isOpenMPPrivate(K)) {
1759  OMPC = K;
1760  return true;
1761  }
1762  return false;
1763  },
1764  NewLevel))
1765  break;
1766  if (DSAStack->checkMappableExprComponentListsForDeclAtLevel(
1767  D, NewLevel,
1769  OpenMPClauseKind) { return true; })) {
1770  OMPC = OMPC_map;
1771  break;
1772  }
1773  if (DSAStack->hasExplicitDirective(isOpenMPTargetExecutionDirective,
1774  NewLevel)) {
1775  OMPC = OMPC_map;
1776  if (D->getType()->isScalarType() &&
1777  DSAStack->getDefaultDMAAtLevel(NewLevel) !=
1778  DefaultMapAttributes::DMA_tofrom_scalar)
1779  OMPC = OMPC_firstprivate;
1780  break;
1781  }
1782  }
1783  if (OMPC != OMPC_unknown)
1784  FD->addAttr(OMPCaptureKindAttr::CreateImplicit(Context, OMPC));
1785 }
1786 
1788  unsigned Level) const {
1789  assert(LangOpts.OpenMP && "OpenMP is not allowed");
1790  // Return true if the current level is no longer enclosed in a target region.
1791 
1792  const auto *VD = dyn_cast<VarDecl>(D);
1793  return VD && !VD->hasLocalStorage() &&
1794  DSAStack->hasExplicitDirective(isOpenMPTargetExecutionDirective,
1795  Level);
1796 }
1797 
1798 void Sema::DestroyDataSharingAttributesStack() { delete DSAStack; }
1799 
1801  const DeclarationNameInfo &DirName,
1802  Scope *CurScope, SourceLocation Loc) {
1803  DSAStack->push(DKind, DirName, CurScope, Loc);
1804  PushExpressionEvaluationContext(
1805  ExpressionEvaluationContext::PotentiallyEvaluated);
1806 }
1807 
1809  DSAStack->setClauseParsingMode(K);
1810 }
1811 
1813  DSAStack->setClauseParsingMode(/*K=*/OMPC_unknown);
1814 }
1815 
1816 void Sema::EndOpenMPDSABlock(Stmt *CurDirective) {
1817  // OpenMP [2.14.3.5, Restrictions, C/C++, p.1]
1818  // A variable of class type (or array thereof) that appears in a lastprivate
1819  // clause requires an accessible, unambiguous default constructor for the
1820  // class type, unless the list item is also specified in a firstprivate
1821  // clause.
1822  if (const auto *D = dyn_cast_or_null<OMPExecutableDirective>(CurDirective)) {
1823  for (OMPClause *C : D->clauses()) {
1824  if (auto *Clause = dyn_cast<OMPLastprivateClause>(C)) {
1825  SmallVector<Expr *, 8> PrivateCopies;
1826  for (Expr *DE : Clause->varlists()) {
1827  if (DE->isValueDependent() || DE->isTypeDependent()) {
1828  PrivateCopies.push_back(nullptr);
1829  continue;
1830  }
1831  auto *DRE = cast<DeclRefExpr>(DE->IgnoreParens());
1832  auto *VD = cast<VarDecl>(DRE->getDecl());
1833  QualType Type = VD->getType().getNonReferenceType();
1834  const DSAStackTy::DSAVarData DVar =
1835  DSAStack->getTopDSA(VD, /*FromParent=*/false);
1836  if (DVar.CKind == OMPC_lastprivate) {
1837  // Generate helper private variable and initialize it with the
1838  // default value. The address of the original variable is replaced
1839  // by the address of the new private variable in CodeGen. This new
1840  // variable is not added to IdResolver, so the code in the OpenMP
1841  // region uses original variable for proper diagnostics.
1842  VarDecl *VDPrivate = buildVarDecl(
1843  *this, DE->getExprLoc(), Type.getUnqualifiedType(),
1844  VD->getName(), VD->hasAttrs() ? &VD->getAttrs() : nullptr, DRE);
1845  ActOnUninitializedDecl(VDPrivate);
1846  if (VDPrivate->isInvalidDecl())
1847  continue;
1848  PrivateCopies.push_back(buildDeclRefExpr(
1849  *this, VDPrivate, DE->getType(), DE->getExprLoc()));
1850  } else {
1851  // The variable is also a firstprivate, so initialization sequence
1852  // for private copy is generated already.
1853  PrivateCopies.push_back(nullptr);
1854  }
1855  }
1856  // Set initializers to private copies if no errors were found.
1857  if (PrivateCopies.size() == Clause->varlist_size())
1858  Clause->setPrivateCopies(PrivateCopies);
1859  }
1860  }
1861  }
1862 
1863  DSAStack->pop();
1864  DiscardCleanupsInEvaluationContext();
1865  PopExpressionEvaluationContext();
1866 }
1867 
1868 static bool FinishOpenMPLinearClause(OMPLinearClause &Clause, DeclRefExpr *IV,
1869  Expr *NumIterations, Sema &SemaRef,
1870  Scope *S, DSAStackTy *Stack);
1871 
1872 namespace {
1873 
1874 class VarDeclFilterCCC final : public CorrectionCandidateCallback {
1875 private:
1876  Sema &SemaRef;
1877 
1878 public:
1879  explicit VarDeclFilterCCC(Sema &S) : SemaRef(S) {}
1880  bool ValidateCandidate(const TypoCorrection &Candidate) override {
1881  NamedDecl *ND = Candidate.getCorrectionDecl();
1882  if (const auto *VD = dyn_cast_or_null<VarDecl>(ND)) {
1883  return VD->hasGlobalStorage() &&
1884  SemaRef.isDeclInScope(ND, SemaRef.getCurLexicalContext(),
1885  SemaRef.getCurScope());
1886  }
1887  return false;
1888  }
1889 };
1890 
1891 class VarOrFuncDeclFilterCCC final : public CorrectionCandidateCallback {
1892 private:
1893  Sema &SemaRef;
1894 
1895 public:
1896  explicit VarOrFuncDeclFilterCCC(Sema &S) : SemaRef(S) {}
1897  bool ValidateCandidate(const TypoCorrection &Candidate) override {
1898  NamedDecl *ND = Candidate.getCorrectionDecl();
1899  if (ND && (isa<VarDecl>(ND) || isa<FunctionDecl>(ND))) {
1900  return SemaRef.isDeclInScope(ND, SemaRef.getCurLexicalContext(),
1901  SemaRef.getCurScope());
1902  }
1903  return false;
1904  }
1905 };
1906 
1907 } // namespace
1908 
1910  CXXScopeSpec &ScopeSpec,
1911  const DeclarationNameInfo &Id) {
1912  LookupResult Lookup(*this, Id, LookupOrdinaryName);
1913  LookupParsedName(Lookup, CurScope, &ScopeSpec, true);
1914 
1915  if (Lookup.isAmbiguous())
1916  return ExprError();
1917 
1918  VarDecl *VD;
1919  if (!Lookup.isSingleResult()) {
1920  if (TypoCorrection Corrected = CorrectTypo(
1921  Id, LookupOrdinaryName, CurScope, nullptr,
1922  llvm::make_unique<VarDeclFilterCCC>(*this), CTK_ErrorRecovery)) {
1923  diagnoseTypo(Corrected,
1924  PDiag(Lookup.empty()
1925  ? diag::err_undeclared_var_use_suggest
1926  : diag::err_omp_expected_var_arg_suggest)
1927  << Id.getName());
1928  VD = Corrected.getCorrectionDeclAs<VarDecl>();
1929  } else {
1930  Diag(Id.getLoc(), Lookup.empty() ? diag::err_undeclared_var_use
1931  : diag::err_omp_expected_var_arg)
1932  << Id.getName();
1933  return ExprError();
1934  }
1935  } else if (!(VD = Lookup.getAsSingle<VarDecl>())) {
1936  Diag(Id.getLoc(), diag::err_omp_expected_var_arg) << Id.getName();
1937  Diag(Lookup.getFoundDecl()->getLocation(), diag::note_declared_at);
1938  return ExprError();
1939  }
1940  Lookup.suppressDiagnostics();
1941 
1942  // OpenMP [2.9.2, Syntax, C/C++]
1943  // Variables must be file-scope, namespace-scope, or static block-scope.
1944  if (!VD->hasGlobalStorage()) {
1945  Diag(Id.getLoc(), diag::err_omp_global_var_arg)
1946  << getOpenMPDirectiveName(OMPD_threadprivate) << !VD->isStaticLocal();
1947  bool IsDecl =
1949  Diag(VD->getLocation(),
1950  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
1951  << VD;
1952  return ExprError();
1953  }
1954 
1955  VarDecl *CanonicalVD = VD->getCanonicalDecl();
1956  NamedDecl *ND = CanonicalVD;
1957  // OpenMP [2.9.2, Restrictions, C/C++, p.2]
1958  // A threadprivate directive for file-scope variables must appear outside
1959  // any definition or declaration.
1960  if (CanonicalVD->getDeclContext()->isTranslationUnit() &&
1961  !getCurLexicalContext()->isTranslationUnit()) {
1962  Diag(Id.getLoc(), diag::err_omp_var_scope)
1963  << getOpenMPDirectiveName(OMPD_threadprivate) << VD;
1964  bool IsDecl =
1966  Diag(VD->getLocation(),
1967  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
1968  << VD;
1969  return ExprError();
1970  }
1971  // OpenMP [2.9.2, Restrictions, C/C++, p.3]
1972  // A threadprivate directive for static class member variables must appear
1973  // in the class definition, in the same scope in which the member
1974  // variables are declared.
1975  if (CanonicalVD->isStaticDataMember() &&
1976  !CanonicalVD->getDeclContext()->Equals(getCurLexicalContext())) {
1977  Diag(Id.getLoc(), diag::err_omp_var_scope)
1978  << getOpenMPDirectiveName(OMPD_threadprivate) << VD;
1979  bool IsDecl =
1981  Diag(VD->getLocation(),
1982  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
1983  << VD;
1984  return ExprError();
1985  }
1986  // OpenMP [2.9.2, Restrictions, C/C++, p.4]
1987  // A threadprivate directive for namespace-scope variables must appear
1988  // outside any definition or declaration other than the namespace
1989  // definition itself.
1990  if (CanonicalVD->getDeclContext()->isNamespace() &&
1991  (!getCurLexicalContext()->isFileContext() ||
1992  !getCurLexicalContext()->Encloses(CanonicalVD->getDeclContext()))) {
1993  Diag(Id.getLoc(), diag::err_omp_var_scope)
1994  << getOpenMPDirectiveName(OMPD_threadprivate) << VD;
1995  bool IsDecl =
1997  Diag(VD->getLocation(),
1998  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
1999  << VD;
2000  return ExprError();
2001  }
2002  // OpenMP [2.9.2, Restrictions, C/C++, p.6]
2003  // A threadprivate directive for static block-scope variables must appear
2004  // in the scope of the variable and not in a nested scope.
2005  if (CanonicalVD->isStaticLocal() && CurScope &&
2006  !isDeclInScope(ND, getCurLexicalContext(), CurScope)) {
2007  Diag(Id.getLoc(), diag::err_omp_var_scope)
2008  << getOpenMPDirectiveName(OMPD_threadprivate) << VD;
2009  bool IsDecl =
2011  Diag(VD->getLocation(),
2012  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
2013  << VD;
2014  return ExprError();
2015  }
2016 
2017  // OpenMP [2.9.2, Restrictions, C/C++, p.2-6]
2018  // A threadprivate directive must lexically precede all references to any
2019  // of the variables in its list.
2020  if (VD->isUsed() && !DSAStack->isThreadPrivate(VD)) {
2021  Diag(Id.getLoc(), diag::err_omp_var_used)
2022  << getOpenMPDirectiveName(OMPD_threadprivate) << VD;
2023  return ExprError();
2024  }
2025 
2026  QualType ExprType = VD->getType().getNonReferenceType();
2027  return DeclRefExpr::Create(Context, NestedNameSpecifierLoc(),
2028  SourceLocation(), VD,
2029  /*RefersToEnclosingVariableOrCapture=*/false,
2030  Id.getLoc(), ExprType, VK_LValue);
2031 }
2032 
2035  ArrayRef<Expr *> VarList) {
2036  if (OMPThreadPrivateDecl *D = CheckOMPThreadPrivateDecl(Loc, VarList)) {
2037  CurContext->addDecl(D);
2038  return DeclGroupPtrTy::make(DeclGroupRef(D));
2039  }
2040  return nullptr;
2041 }
2042 
2043 namespace {
2044 class LocalVarRefChecker final
2045  : public ConstStmtVisitor<LocalVarRefChecker, bool> {
2046  Sema &SemaRef;
2047 
2048 public:
2049  bool VisitDeclRefExpr(const DeclRefExpr *E) {
2050  if (const auto *VD = dyn_cast<VarDecl>(E->getDecl())) {
2051  if (VD->hasLocalStorage()) {
2052  SemaRef.Diag(E->getBeginLoc(),
2053  diag::err_omp_local_var_in_threadprivate_init)
2054  << E->getSourceRange();
2055  SemaRef.Diag(VD->getLocation(), diag::note_defined_here)
2056  << VD << VD->getSourceRange();
2057  return true;
2058  }
2059  }
2060  return false;
2061  }
2062  bool VisitStmt(const Stmt *S) {
2063  for (const Stmt *Child : S->children()) {
2064  if (Child && Visit(Child))
2065  return true;
2066  }
2067  return false;
2068  }
2069  explicit LocalVarRefChecker(Sema &SemaRef) : SemaRef(SemaRef) {}
2070 };
2071 } // namespace
2072 
2076  for (Expr *RefExpr : VarList) {
2077  auto *DE = cast<DeclRefExpr>(RefExpr);
2078  auto *VD = cast<VarDecl>(DE->getDecl());
2079  SourceLocation ILoc = DE->getExprLoc();
2080 
2081  // Mark variable as used.
2082  VD->setReferenced();
2083  VD->markUsed(Context);
2084 
2085  QualType QType = VD->getType();
2086  if (QType->isDependentType() || QType->isInstantiationDependentType()) {
2087  // It will be analyzed later.
2088  Vars.push_back(DE);
2089  continue;
2090  }
2091 
2092  // OpenMP [2.9.2, Restrictions, C/C++, p.10]
2093  // A threadprivate variable must not have an incomplete type.
2094  if (RequireCompleteType(ILoc, VD->getType(),
2095  diag::err_omp_threadprivate_incomplete_type)) {
2096  continue;
2097  }
2098 
2099  // OpenMP [2.9.2, Restrictions, C/C++, p.10]
2100  // A threadprivate variable must not have a reference type.
2101  if (VD->getType()->isReferenceType()) {
2102  Diag(ILoc, diag::err_omp_ref_type_arg)
2103  << getOpenMPDirectiveName(OMPD_threadprivate) << VD->getType();
2104  bool IsDecl =
2105  VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
2106  Diag(VD->getLocation(),
2107  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
2108  << VD;
2109  continue;
2110  }
2111 
2112  // Check if this is a TLS variable. If TLS is not being supported, produce
2113  // the corresponding diagnostic.
2114  if ((VD->getTLSKind() != VarDecl::TLS_None &&
2115  !(VD->hasAttr<OMPThreadPrivateDeclAttr>() &&
2116  getLangOpts().OpenMPUseTLS &&
2117  getASTContext().getTargetInfo().isTLSSupported())) ||
2118  (VD->getStorageClass() == SC_Register && VD->hasAttr<AsmLabelAttr>() &&
2119  !VD->isLocalVarDecl())) {
2120  Diag(ILoc, diag::err_omp_var_thread_local)
2121  << VD << ((VD->getTLSKind() != VarDecl::TLS_None) ? 0 : 1);
2122  bool IsDecl =
2123  VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
2124  Diag(VD->getLocation(),
2125  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
2126  << VD;
2127  continue;
2128  }
2129 
2130  // Check if initial value of threadprivate variable reference variable with
2131  // local storage (it is not supported by runtime).
2132  if (const Expr *Init = VD->getAnyInitializer()) {
2133  LocalVarRefChecker Checker(*this);
2134  if (Checker.Visit(Init))
2135  continue;
2136  }
2137 
2138  Vars.push_back(RefExpr);
2139  DSAStack->addDSA(VD, DE, OMPC_threadprivate);
2140  VD->addAttr(OMPThreadPrivateDeclAttr::CreateImplicit(
2141  Context, SourceRange(Loc, Loc)));
2142  if (ASTMutationListener *ML = Context.getASTMutationListener())
2143  ML->DeclarationMarkedOpenMPThreadPrivate(VD);
2144  }
2145  OMPThreadPrivateDecl *D = nullptr;
2146  if (!Vars.empty()) {
2147  D = OMPThreadPrivateDecl::Create(Context, getCurLexicalContext(), Loc,
2148  Vars);
2149  D->setAccess(AS_public);
2150  }
2151  return D;
2152 }
2153 
2156  ArrayRef<OMPClause *> ClauseList) {
2157  OMPRequiresDecl *D = nullptr;
2158  if (!CurContext->isFileContext()) {
2159  Diag(Loc, diag::err_omp_invalid_scope) << "requires";
2160  } else {
2161  D = CheckOMPRequiresDecl(Loc, ClauseList);
2162  if (D) {
2163  CurContext->addDecl(D);
2164  DSAStack->addRequiresDecl(D);
2165  }
2166  }
2167  return DeclGroupPtrTy::make(DeclGroupRef(D));
2168 }
2169 
2171  ArrayRef<OMPClause *> ClauseList) {
2172  if (!DSAStack->hasDuplicateRequiresClause(ClauseList))
2173  return OMPRequiresDecl::Create(Context, getCurLexicalContext(), Loc,
2174  ClauseList);
2175  return nullptr;
2176 }
2177 
2178 static void reportOriginalDsa(Sema &SemaRef, const DSAStackTy *Stack,
2179  const ValueDecl *D,
2180  const DSAStackTy::DSAVarData &DVar,
2181  bool IsLoopIterVar = false) {
2182  if (DVar.RefExpr) {
2183  SemaRef.Diag(DVar.RefExpr->getExprLoc(), diag::note_omp_explicit_dsa)
2184  << getOpenMPClauseName(DVar.CKind);
2185  return;
2186  }
2187  enum {
2188  PDSA_StaticMemberShared,
2189  PDSA_StaticLocalVarShared,
2190  PDSA_LoopIterVarPrivate,
2191  PDSA_LoopIterVarLinear,
2192  PDSA_LoopIterVarLastprivate,
2193  PDSA_ConstVarShared,
2194  PDSA_GlobalVarShared,
2195  PDSA_TaskVarFirstprivate,
2196  PDSA_LocalVarPrivate,
2197  PDSA_Implicit
2198  } Reason = PDSA_Implicit;
2199  bool ReportHint = false;
2200  auto ReportLoc = D->getLocation();
2201  auto *VD = dyn_cast<VarDecl>(D);
2202  if (IsLoopIterVar) {
2203  if (DVar.CKind == OMPC_private)
2204  Reason = PDSA_LoopIterVarPrivate;
2205  else if (DVar.CKind == OMPC_lastprivate)
2206  Reason = PDSA_LoopIterVarLastprivate;
2207  else
2208  Reason = PDSA_LoopIterVarLinear;
2209  } else if (isOpenMPTaskingDirective(DVar.DKind) &&
2210  DVar.CKind == OMPC_firstprivate) {
2211  Reason = PDSA_TaskVarFirstprivate;
2212  ReportLoc = DVar.ImplicitDSALoc;
2213  } else if (VD && VD->isStaticLocal())
2214  Reason = PDSA_StaticLocalVarShared;
2215  else if (VD && VD->isStaticDataMember())
2216  Reason = PDSA_StaticMemberShared;
2217  else if (VD && VD->isFileVarDecl())
2218  Reason = PDSA_GlobalVarShared;
2219  else if (D->getType().isConstant(SemaRef.getASTContext()))
2220  Reason = PDSA_ConstVarShared;
2221  else if (VD && VD->isLocalVarDecl() && DVar.CKind == OMPC_private) {
2222  ReportHint = true;
2223  Reason = PDSA_LocalVarPrivate;
2224  }
2225  if (Reason != PDSA_Implicit) {
2226  SemaRef.Diag(ReportLoc, diag::note_omp_predetermined_dsa)
2227  << Reason << ReportHint
2228  << getOpenMPDirectiveName(Stack->getCurrentDirective());
2229  } else if (DVar.ImplicitDSALoc.isValid()) {
2230  SemaRef.Diag(DVar.ImplicitDSALoc, diag::note_omp_implicit_dsa)
2231  << getOpenMPClauseName(DVar.CKind);
2232  }
2233 }
2234 
2235 namespace {
2236 class DSAAttrChecker final : public StmtVisitor<DSAAttrChecker, void> {
2237  DSAStackTy *Stack;
2238  Sema &SemaRef;
2239  bool ErrorFound = false;
2240  CapturedStmt *CS = nullptr;
2241  llvm::SmallVector<Expr *, 4> ImplicitFirstprivate;
2242  llvm::SmallVector<Expr *, 4> ImplicitMap;
2243  Sema::VarsWithInheritedDSAType VarsWithInheritedDSA;
2244  llvm::SmallDenseSet<const ValueDecl *, 4> ImplicitDeclarations;
2245 
2246  void VisitSubCaptures(OMPExecutableDirective *S) {
2247  // Check implicitly captured variables.
2248  if (!S->hasAssociatedStmt() || !S->getAssociatedStmt())
2249  return;
2250  for (const CapturedStmt::Capture &Cap :
2252  if (!Cap.capturesVariable())
2253  continue;
2254  VarDecl *VD = Cap.getCapturedVar();
2255  // Do not try to map the variable if it or its sub-component was mapped
2256  // already.
2257  if (isOpenMPTargetExecutionDirective(Stack->getCurrentDirective()) &&
2258  Stack->checkMappableExprComponentListsForDecl(
2259  VD, /*CurrentRegionOnly=*/true,
2261  OpenMPClauseKind) { return true; }))
2262  continue;
2264  SemaRef, VD, VD->getType().getNonLValueExprType(SemaRef.Context),
2265  Cap.getLocation(), /*RefersToCapture=*/true);
2266  Visit(DRE);
2267  }
2268  }
2269 
2270 public:
2271  void VisitDeclRefExpr(DeclRefExpr *E) {
2272  if (E->isTypeDependent() || E->isValueDependent() ||
2274  return;
2275  if (auto *VD = dyn_cast<VarDecl>(E->getDecl())) {
2276  VD = VD->getCanonicalDecl();
2277  // Skip internally declared variables.
2278  if (VD->hasLocalStorage() && !CS->capturesVariable(VD))
2279  return;
2280 
2281  DSAStackTy::DSAVarData DVar = Stack->getTopDSA(VD, /*FromParent=*/false);
2282  // Check if the variable has explicit DSA set and stop analysis if it so.
2283  if (DVar.RefExpr || !ImplicitDeclarations.insert(VD).second)
2284  return;
2285 
2286  // Skip internally declared static variables.
2288  OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
2289  if (VD->hasGlobalStorage() && !CS->capturesVariable(VD) &&
2290  (!Res || *Res != OMPDeclareTargetDeclAttr::MT_Link))
2291  return;
2292 
2293  SourceLocation ELoc = E->getExprLoc();
2294  OpenMPDirectiveKind DKind = Stack->getCurrentDirective();
2295  // The default(none) clause requires that each variable that is referenced
2296  // in the construct, and does not have a predetermined data-sharing
2297  // attribute, must have its data-sharing attribute explicitly determined
2298  // by being listed in a data-sharing attribute clause.
2299  if (DVar.CKind == OMPC_unknown && Stack->getDefaultDSA() == DSA_none &&
2300  isImplicitOrExplicitTaskingRegion(DKind) &&
2301  VarsWithInheritedDSA.count(VD) == 0) {
2302  VarsWithInheritedDSA[VD] = E;
2303  return;
2304  }
2305 
2306  if (isOpenMPTargetExecutionDirective(DKind) &&
2307  !Stack->isLoopControlVariable(VD).first) {
2308  if (!Stack->checkMappableExprComponentListsForDecl(
2309  VD, /*CurrentRegionOnly=*/true,
2311  StackComponents,
2312  OpenMPClauseKind) {
2313  // Variable is used if it has been marked as an array, array
2314  // section or the variable iself.
2315  return StackComponents.size() == 1 ||
2316  std::all_of(
2317  std::next(StackComponents.rbegin()),
2318  StackComponents.rend(),
2319  [](const OMPClauseMappableExprCommon::
2320  MappableComponent &MC) {
2321  return MC.getAssociatedDeclaration() ==
2322  nullptr &&
2323  (isa<OMPArraySectionExpr>(
2324  MC.getAssociatedExpression()) ||
2325  isa<ArraySubscriptExpr>(
2326  MC.getAssociatedExpression()));
2327  });
2328  })) {
2329  bool IsFirstprivate = false;
2330  // By default lambdas are captured as firstprivates.
2331  if (const auto *RD =
2332  VD->getType().getNonReferenceType()->getAsCXXRecordDecl())
2333  IsFirstprivate = RD->isLambda();
2334  IsFirstprivate =
2335  IsFirstprivate ||
2336  (VD->getType().getNonReferenceType()->isScalarType() &&
2337  Stack->getDefaultDMA() != DMA_tofrom_scalar && !Res);
2338  if (IsFirstprivate)
2339  ImplicitFirstprivate.emplace_back(E);
2340  else
2341  ImplicitMap.emplace_back(E);
2342  return;
2343  }
2344  }
2345 
2346  // OpenMP [2.9.3.6, Restrictions, p.2]
2347  // A list item that appears in a reduction clause of the innermost
2348  // enclosing worksharing or parallel construct may not be accessed in an
2349  // explicit task.
2350  DVar = Stack->hasInnermostDSA(
2351  VD, [](OpenMPClauseKind C) { return C == OMPC_reduction; },
2352  [](OpenMPDirectiveKind K) {
2353  return isOpenMPParallelDirective(K) ||
2355  },
2356  /*FromParent=*/true);
2357  if (isOpenMPTaskingDirective(DKind) && DVar.CKind == OMPC_reduction) {
2358  ErrorFound = true;
2359  SemaRef.Diag(ELoc, diag::err_omp_reduction_in_task);
2360  reportOriginalDsa(SemaRef, Stack, VD, DVar);
2361  return;
2362  }
2363 
2364  // Define implicit data-sharing attributes for task.
2365  DVar = Stack->getImplicitDSA(VD, /*FromParent=*/false);
2366  if (isOpenMPTaskingDirective(DKind) && DVar.CKind != OMPC_shared &&
2367  !Stack->isLoopControlVariable(VD).first)
2368  ImplicitFirstprivate.push_back(E);
2369  }
2370  }
2371  void VisitMemberExpr(MemberExpr *E) {
2372  if (E->isTypeDependent() || E->isValueDependent() ||
2374  return;
2375  auto *FD = dyn_cast<FieldDecl>(E->getMemberDecl());
2376  OpenMPDirectiveKind DKind = Stack->getCurrentDirective();
2377  if (auto *TE = dyn_cast<CXXThisExpr>(E->getBase()->IgnoreParens())) {
2378  if (!FD)
2379  return;
2380  DSAStackTy::DSAVarData DVar = Stack->getTopDSA(FD, /*FromParent=*/false);
2381  // Check if the variable has explicit DSA set and stop analysis if it
2382  // so.
2383  if (DVar.RefExpr || !ImplicitDeclarations.insert(FD).second)
2384  return;
2385 
2386  if (isOpenMPTargetExecutionDirective(DKind) &&
2387  !Stack->isLoopControlVariable(FD).first &&
2388  !Stack->checkMappableExprComponentListsForDecl(
2389  FD, /*CurrentRegionOnly=*/true,
2391  StackComponents,
2392  OpenMPClauseKind) {
2393  return isa<CXXThisExpr>(
2394  cast<MemberExpr>(
2395  StackComponents.back().getAssociatedExpression())
2396  ->getBase()
2397  ->IgnoreParens());
2398  })) {
2399  // OpenMP 4.5 [2.15.5.1, map Clause, Restrictions, C/C++, p.3]
2400  // A bit-field cannot appear in a map clause.
2401  //
2402  if (FD->isBitField())
2403  return;
2404 
2405  // Check to see if the member expression is referencing a class that
2406  // has already been explicitly mapped
2407  if (Stack->isClassPreviouslyMapped(TE->getType()))
2408  return;
2409 
2410  ImplicitMap.emplace_back(E);
2411  return;
2412  }
2413 
2414  SourceLocation ELoc = E->getExprLoc();
2415  // OpenMP [2.9.3.6, Restrictions, p.2]
2416  // A list item that appears in a reduction clause of the innermost
2417  // enclosing worksharing or parallel construct may not be accessed in
2418  // an explicit task.
2419  DVar = Stack->hasInnermostDSA(
2420  FD, [](OpenMPClauseKind C) { return C == OMPC_reduction; },
2421  [](OpenMPDirectiveKind K) {
2422  return isOpenMPParallelDirective(K) ||
2424  },
2425  /*FromParent=*/true);
2426  if (isOpenMPTaskingDirective(DKind) && DVar.CKind == OMPC_reduction) {
2427  ErrorFound = true;
2428  SemaRef.Diag(ELoc, diag::err_omp_reduction_in_task);
2429  reportOriginalDsa(SemaRef, Stack, FD, DVar);
2430  return;
2431  }
2432 
2433  // Define implicit data-sharing attributes for task.
2434  DVar = Stack->getImplicitDSA(FD, /*FromParent=*/false);
2435  if (isOpenMPTaskingDirective(DKind) && DVar.CKind != OMPC_shared &&
2436  !Stack->isLoopControlVariable(FD).first) {
2437  // Check if there is a captured expression for the current field in the
2438  // region. Do not mark it as firstprivate unless there is no captured
2439  // expression.
2440  // TODO: try to make it firstprivate.
2441  if (DVar.CKind != OMPC_unknown)
2442  ImplicitFirstprivate.push_back(E);
2443  }
2444  return;
2445  }
2446  if (isOpenMPTargetExecutionDirective(DKind)) {
2448  if (!checkMapClauseExpressionBase(SemaRef, E, CurComponents, OMPC_map,
2449  /*NoDiagnose=*/true))
2450  return;
2451  const auto *VD = cast<ValueDecl>(
2452  CurComponents.back().getAssociatedDeclaration()->getCanonicalDecl());
2453  if (!Stack->checkMappableExprComponentListsForDecl(
2454  VD, /*CurrentRegionOnly=*/true,
2455  [&CurComponents](
2457  StackComponents,
2458  OpenMPClauseKind) {
2459  auto CCI = CurComponents.rbegin();
2460  auto CCE = CurComponents.rend();
2461  for (const auto &SC : llvm::reverse(StackComponents)) {
2462  // Do both expressions have the same kind?
2463  if (CCI->getAssociatedExpression()->getStmtClass() !=
2464  SC.getAssociatedExpression()->getStmtClass())
2465  if (!(isa<OMPArraySectionExpr>(
2466  SC.getAssociatedExpression()) &&
2467  isa<ArraySubscriptExpr>(
2468  CCI->getAssociatedExpression())))
2469  return false;
2470 
2471  const Decl *CCD = CCI->getAssociatedDeclaration();
2472  const Decl *SCD = SC.getAssociatedDeclaration();
2473  CCD = CCD ? CCD->getCanonicalDecl() : nullptr;
2474  SCD = SCD ? SCD->getCanonicalDecl() : nullptr;
2475  if (SCD != CCD)
2476  return false;
2477  std::advance(CCI, 1);
2478  if (CCI == CCE)
2479  break;
2480  }
2481  return true;
2482  })) {
2483  Visit(E->getBase());
2484  }
2485  } else {
2486  Visit(E->getBase());
2487  }
2488  }
2489  void VisitOMPExecutableDirective(OMPExecutableDirective *S) {
2490  for (OMPClause *C : S->clauses()) {
2491  // Skip analysis of arguments of implicitly defined firstprivate clause
2492  // for task|target directives.
2493  // Skip analysis of arguments of implicitly defined map clause for target
2494  // directives.
2495  if (C && !((isa<OMPFirstprivateClause>(C) || isa<OMPMapClause>(C)) &&
2496  C->isImplicit())) {
2497  for (Stmt *CC : C->children()) {
2498  if (CC)
2499  Visit(CC);
2500  }
2501  }
2502  }
2503  // Check implicitly captured variables.
2504  VisitSubCaptures(S);
2505  }
2506  void VisitStmt(Stmt *S) {
2507  for (Stmt *C : S->children()) {
2508  if (C) {
2509  // Check implicitly captured variables in the task-based directives to
2510  // check if they must be firstprivatized.
2511  Visit(C);
2512  }
2513  }
2514  }
2515 
2516  bool isErrorFound() const { return ErrorFound; }
2517  ArrayRef<Expr *> getImplicitFirstprivate() const {
2518  return ImplicitFirstprivate;
2519  }
2520  ArrayRef<Expr *> getImplicitMap() const { return ImplicitMap; }
2521  const Sema::VarsWithInheritedDSAType &getVarsWithInheritedDSA() const {
2522  return VarsWithInheritedDSA;
2523  }
2524 
2525  DSAAttrChecker(DSAStackTy *S, Sema &SemaRef, CapturedStmt *CS)
2526  : Stack(S), SemaRef(SemaRef), ErrorFound(false), CS(CS) {}
2527 };
2528 } // namespace
2529 
2531  switch (DKind) {
2532  case OMPD_parallel:
2533  case OMPD_parallel_for:
2534  case OMPD_parallel_for_simd:
2535  case OMPD_parallel_sections:
2536  case OMPD_teams:
2537  case OMPD_teams_distribute:
2538  case OMPD_teams_distribute_simd: {
2539  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
2540  QualType KmpInt32PtrTy =
2541  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
2542  Sema::CapturedParamNameType Params[] = {
2543  std::make_pair(".global_tid.", KmpInt32PtrTy),
2544  std::make_pair(".bound_tid.", KmpInt32PtrTy),
2545  std::make_pair(StringRef(), QualType()) // __context with shared vars
2546  };
2547  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2548  Params);
2549  break;
2550  }
2551  case OMPD_target_teams:
2552  case OMPD_target_parallel:
2553  case OMPD_target_parallel_for:
2554  case OMPD_target_parallel_for_simd:
2555  case OMPD_target_teams_distribute:
2556  case OMPD_target_teams_distribute_simd: {
2557  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
2558  QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
2559  QualType KmpInt32PtrTy =
2560  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
2561  QualType Args[] = {VoidPtrTy};
2563  EPI.Variadic = true;
2564  QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
2565  Sema::CapturedParamNameType Params[] = {
2566  std::make_pair(".global_tid.", KmpInt32Ty),
2567  std::make_pair(".part_id.", KmpInt32PtrTy),
2568  std::make_pair(".privates.", VoidPtrTy),
2569  std::make_pair(
2570  ".copy_fn.",
2571  Context.getPointerType(CopyFnType).withConst().withRestrict()),
2572  std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
2573  std::make_pair(StringRef(), QualType()) // __context with shared vars
2574  };
2575  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2576  Params);
2577  // Mark this captured region as inlined, because we don't use outlined
2578  // function directly.
2579  getCurCapturedRegion()->TheCapturedDecl->addAttr(
2580  AlwaysInlineAttr::CreateImplicit(
2581  Context, AlwaysInlineAttr::Keyword_forceinline));
2582  Sema::CapturedParamNameType ParamsTarget[] = {
2583  std::make_pair(StringRef(), QualType()) // __context with shared vars
2584  };
2585  // Start a captured region for 'target' with no implicit parameters.
2586  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2587  ParamsTarget);
2588  Sema::CapturedParamNameType ParamsTeamsOrParallel[] = {
2589  std::make_pair(".global_tid.", KmpInt32PtrTy),
2590  std::make_pair(".bound_tid.", KmpInt32PtrTy),
2591  std::make_pair(StringRef(), QualType()) // __context with shared vars
2592  };
2593  // Start a captured region for 'teams' or 'parallel'. Both regions have
2594  // the same implicit parameters.
2595  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2596  ParamsTeamsOrParallel);
2597  break;
2598  }
2599  case OMPD_target:
2600  case OMPD_target_simd: {
2601  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
2602  QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
2603  QualType KmpInt32PtrTy =
2604  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
2605  QualType Args[] = {VoidPtrTy};
2607  EPI.Variadic = true;
2608  QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
2609  Sema::CapturedParamNameType Params[] = {
2610  std::make_pair(".global_tid.", KmpInt32Ty),
2611  std::make_pair(".part_id.", KmpInt32PtrTy),
2612  std::make_pair(".privates.", VoidPtrTy),
2613  std::make_pair(
2614  ".copy_fn.",
2615  Context.getPointerType(CopyFnType).withConst().withRestrict()),
2616  std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
2617  std::make_pair(StringRef(), QualType()) // __context with shared vars
2618  };
2619  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2620  Params);
2621  // Mark this captured region as inlined, because we don't use outlined
2622  // function directly.
2623  getCurCapturedRegion()->TheCapturedDecl->addAttr(
2624  AlwaysInlineAttr::CreateImplicit(
2625  Context, AlwaysInlineAttr::Keyword_forceinline));
2626  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2627  std::make_pair(StringRef(), QualType()));
2628  break;
2629  }
2630  case OMPD_simd:
2631  case OMPD_for:
2632  case OMPD_for_simd:
2633  case OMPD_sections:
2634  case OMPD_section:
2635  case OMPD_single:
2636  case OMPD_master:
2637  case OMPD_critical:
2638  case OMPD_taskgroup:
2639  case OMPD_distribute:
2640  case OMPD_distribute_simd:
2641  case OMPD_ordered:
2642  case OMPD_atomic:
2643  case OMPD_target_data: {
2644  Sema::CapturedParamNameType Params[] = {
2645  std::make_pair(StringRef(), QualType()) // __context with shared vars
2646  };
2647  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2648  Params);
2649  break;
2650  }
2651  case OMPD_task: {
2652  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
2653  QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
2654  QualType KmpInt32PtrTy =
2655  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
2656  QualType Args[] = {VoidPtrTy};
2658  EPI.Variadic = true;
2659  QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
2660  Sema::CapturedParamNameType Params[] = {
2661  std::make_pair(".global_tid.", KmpInt32Ty),
2662  std::make_pair(".part_id.", KmpInt32PtrTy),
2663  std::make_pair(".privates.", VoidPtrTy),
2664  std::make_pair(
2665  ".copy_fn.",
2666  Context.getPointerType(CopyFnType).withConst().withRestrict()),
2667  std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
2668  std::make_pair(StringRef(), QualType()) // __context with shared vars
2669  };
2670  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2671  Params);
2672  // Mark this captured region as inlined, because we don't use outlined
2673  // function directly.
2674  getCurCapturedRegion()->TheCapturedDecl->addAttr(
2675  AlwaysInlineAttr::CreateImplicit(
2676  Context, AlwaysInlineAttr::Keyword_forceinline));
2677  break;
2678  }
2679  case OMPD_taskloop:
2680  case OMPD_taskloop_simd: {
2681  QualType KmpInt32Ty =
2682  Context.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1)
2683  .withConst();
2684  QualType KmpUInt64Ty =
2685  Context.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0)
2686  .withConst();
2687  QualType KmpInt64Ty =
2688  Context.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1)
2689  .withConst();
2690  QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
2691  QualType KmpInt32PtrTy =
2692  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
2693  QualType Args[] = {VoidPtrTy};
2695  EPI.Variadic = true;
2696  QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
2697  Sema::CapturedParamNameType Params[] = {
2698  std::make_pair(".global_tid.", KmpInt32Ty),
2699  std::make_pair(".part_id.", KmpInt32PtrTy),
2700  std::make_pair(".privates.", VoidPtrTy),
2701  std::make_pair(
2702  ".copy_fn.",
2703  Context.getPointerType(CopyFnType).withConst().withRestrict()),
2704  std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
2705  std::make_pair(".lb.", KmpUInt64Ty),
2706  std::make_pair(".ub.", KmpUInt64Ty),
2707  std::make_pair(".st.", KmpInt64Ty),
2708  std::make_pair(".liter.", KmpInt32Ty),
2709  std::make_pair(".reductions.", VoidPtrTy),
2710  std::make_pair(StringRef(), QualType()) // __context with shared vars
2711  };
2712  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2713  Params);
2714  // Mark this captured region as inlined, because we don't use outlined
2715  // function directly.
2716  getCurCapturedRegion()->TheCapturedDecl->addAttr(
2717  AlwaysInlineAttr::CreateImplicit(
2718  Context, AlwaysInlineAttr::Keyword_forceinline));
2719  break;
2720  }
2721  case OMPD_distribute_parallel_for_simd:
2722  case OMPD_distribute_parallel_for: {
2723  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
2724  QualType KmpInt32PtrTy =
2725  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
2726  Sema::CapturedParamNameType Params[] = {
2727  std::make_pair(".global_tid.", KmpInt32PtrTy),
2728  std::make_pair(".bound_tid.", KmpInt32PtrTy),
2729  std::make_pair(".previous.lb.", Context.getSizeType().withConst()),
2730  std::make_pair(".previous.ub.", Context.getSizeType().withConst()),
2731  std::make_pair(StringRef(), QualType()) // __context with shared vars
2732  };
2733  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2734  Params);
2735  break;
2736  }
2737  case OMPD_target_teams_distribute_parallel_for:
2738  case OMPD_target_teams_distribute_parallel_for_simd: {
2739  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
2740  QualType KmpInt32PtrTy =
2741  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
2742  QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
2743 
2744  QualType Args[] = {VoidPtrTy};
2746  EPI.Variadic = true;
2747  QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
2748  Sema::CapturedParamNameType Params[] = {
2749  std::make_pair(".global_tid.", KmpInt32Ty),
2750  std::make_pair(".part_id.", KmpInt32PtrTy),
2751  std::make_pair(".privates.", VoidPtrTy),
2752  std::make_pair(
2753  ".copy_fn.",
2754  Context.getPointerType(CopyFnType).withConst().withRestrict()),
2755  std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
2756  std::make_pair(StringRef(), QualType()) // __context with shared vars
2757  };
2758  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2759  Params);
2760  // Mark this captured region as inlined, because we don't use outlined
2761  // function directly.
2762  getCurCapturedRegion()->TheCapturedDecl->addAttr(
2763  AlwaysInlineAttr::CreateImplicit(
2764  Context, AlwaysInlineAttr::Keyword_forceinline));
2765  Sema::CapturedParamNameType ParamsTarget[] = {
2766  std::make_pair(StringRef(), QualType()) // __context with shared vars
2767  };
2768  // Start a captured region for 'target' with no implicit parameters.
2769  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2770  ParamsTarget);
2771 
2772  Sema::CapturedParamNameType ParamsTeams[] = {
2773  std::make_pair(".global_tid.", KmpInt32PtrTy),
2774  std::make_pair(".bound_tid.", KmpInt32PtrTy),
2775  std::make_pair(StringRef(), QualType()) // __context with shared vars
2776  };
2777  // Start a captured region for 'target' with no implicit parameters.
2778  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2779  ParamsTeams);
2780 
2781  Sema::CapturedParamNameType ParamsParallel[] = {
2782  std::make_pair(".global_tid.", KmpInt32PtrTy),
2783  std::make_pair(".bound_tid.", KmpInt32PtrTy),
2784  std::make_pair(".previous.lb.", Context.getSizeType().withConst()),
2785  std::make_pair(".previous.ub.", Context.getSizeType().withConst()),
2786  std::make_pair(StringRef(), QualType()) // __context with shared vars
2787  };
2788  // Start a captured region for 'teams' or 'parallel'. Both regions have
2789  // the same implicit parameters.
2790  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2791  ParamsParallel);
2792  break;
2793  }
2794 
2795  case OMPD_teams_distribute_parallel_for:
2796  case OMPD_teams_distribute_parallel_for_simd: {
2797  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
2798  QualType KmpInt32PtrTy =
2799  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
2800 
2801  Sema::CapturedParamNameType ParamsTeams[] = {
2802  std::make_pair(".global_tid.", KmpInt32PtrTy),
2803  std::make_pair(".bound_tid.", KmpInt32PtrTy),
2804  std::make_pair(StringRef(), QualType()) // __context with shared vars
2805  };
2806  // Start a captured region for 'target' with no implicit parameters.
2807  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2808  ParamsTeams);
2809 
2810  Sema::CapturedParamNameType ParamsParallel[] = {
2811  std::make_pair(".global_tid.", KmpInt32PtrTy),
2812  std::make_pair(".bound_tid.", KmpInt32PtrTy),
2813  std::make_pair(".previous.lb.", Context.getSizeType().withConst()),
2814  std::make_pair(".previous.ub.", Context.getSizeType().withConst()),
2815  std::make_pair(StringRef(), QualType()) // __context with shared vars
2816  };
2817  // Start a captured region for 'teams' or 'parallel'. Both regions have
2818  // the same implicit parameters.
2819  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2820  ParamsParallel);
2821  break;
2822  }
2823  case OMPD_target_update:
2824  case OMPD_target_enter_data:
2825  case OMPD_target_exit_data: {
2826  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
2827  QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
2828  QualType KmpInt32PtrTy =
2829  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
2830  QualType Args[] = {VoidPtrTy};
2832  EPI.Variadic = true;
2833  QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
2834  Sema::CapturedParamNameType Params[] = {
2835  std::make_pair(".global_tid.", KmpInt32Ty),
2836  std::make_pair(".part_id.", KmpInt32PtrTy),
2837  std::make_pair(".privates.", VoidPtrTy),
2838  std::make_pair(
2839  ".copy_fn.",
2840  Context.getPointerType(CopyFnType).withConst().withRestrict()),
2841  std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
2842  std::make_pair(StringRef(), QualType()) // __context with shared vars
2843  };
2844  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
2845  Params);
2846  // Mark this captured region as inlined, because we don't use outlined
2847  // function directly.
2848  getCurCapturedRegion()->TheCapturedDecl->addAttr(
2849  AlwaysInlineAttr::CreateImplicit(
2850  Context, AlwaysInlineAttr::Keyword_forceinline));
2851  break;
2852  }
2853  case OMPD_threadprivate:
2854  case OMPD_taskyield:
2855  case OMPD_barrier:
2856  case OMPD_taskwait:
2857  case OMPD_cancellation_point:
2858  case OMPD_cancel:
2859  case OMPD_flush:
2860  case OMPD_declare_reduction:
2861  case OMPD_declare_mapper:
2862  case OMPD_declare_simd:
2863  case OMPD_declare_target:
2864  case OMPD_end_declare_target:
2865  case OMPD_requires:
2866  llvm_unreachable("OpenMP Directive is not allowed");
2867  case OMPD_unknown:
2868  llvm_unreachable("Unknown OpenMP directive");
2869  }
2870 }
2871 
2873  SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
2874  getOpenMPCaptureRegions(CaptureRegions, DKind);
2875  return CaptureRegions.size();
2876 }
2877 
2879  Expr *CaptureExpr, bool WithInit,
2880  bool AsExpression) {
2881  assert(CaptureExpr);
2882  ASTContext &C = S.getASTContext();
2883  Expr *Init = AsExpression ? CaptureExpr : CaptureExpr->IgnoreImpCasts();
2884  QualType Ty = Init->getType();
2885  if (CaptureExpr->getObjectKind() == OK_Ordinary && CaptureExpr->isGLValue()) {
2886  if (S.getLangOpts().CPlusPlus) {
2887  Ty = C.getLValueReferenceType(Ty);
2888  } else {
2889  Ty = C.getPointerType(Ty);
2890  ExprResult Res =
2891  S.CreateBuiltinUnaryOp(CaptureExpr->getExprLoc(), UO_AddrOf, Init);
2892  if (!Res.isUsable())
2893  return nullptr;
2894  Init = Res.get();
2895  }
2896  WithInit = true;
2897  }
2898  auto *CED = OMPCapturedExprDecl::Create(C, S.CurContext, Id, Ty,
2899  CaptureExpr->getBeginLoc());
2900  if (!WithInit)
2901  CED->addAttr(OMPCaptureNoInitAttr::CreateImplicit(C));
2902  S.CurContext->addHiddenDecl(CED);
2903  S.AddInitializerToDecl(CED, Init, /*DirectInit=*/false);
2904  return CED;
2905 }
2906 
2907 static DeclRefExpr *buildCapture(Sema &S, ValueDecl *D, Expr *CaptureExpr,
2908  bool WithInit) {
2909  OMPCapturedExprDecl *CD;
2910  if (VarDecl *VD = S.isOpenMPCapturedDecl(D))
2911  CD = cast<OMPCapturedExprDecl>(VD);
2912  else
2913  CD = buildCaptureDecl(S, D->getIdentifier(), CaptureExpr, WithInit,
2914  /*AsExpression=*/false);
2915  return buildDeclRefExpr(S, CD, CD->getType().getNonReferenceType(),
2916  CaptureExpr->getExprLoc());
2917 }
2918 
2919 static ExprResult buildCapture(Sema &S, Expr *CaptureExpr, DeclRefExpr *&Ref) {
2920  CaptureExpr = S.DefaultLvalueConversion(CaptureExpr).get();
2921  if (!Ref) {
2923  S, &S.getASTContext().Idents.get(".capture_expr."), CaptureExpr,
2924  /*WithInit=*/true, /*AsExpression=*/true);
2925  Ref = buildDeclRefExpr(S, CD, CD->getType().getNonReferenceType(),
2926  CaptureExpr->getExprLoc());
2927  }
2928  ExprResult Res = Ref;
2929  if (!S.getLangOpts().CPlusPlus &&
2930  CaptureExpr->getObjectKind() == OK_Ordinary && CaptureExpr->isGLValue() &&
2931  Ref->getType()->isPointerType()) {
2932  Res = S.CreateBuiltinUnaryOp(CaptureExpr->getExprLoc(), UO_Deref, Ref);
2933  if (!Res.isUsable())
2934  return ExprError();
2935  }
2936  return S.DefaultLvalueConversion(Res.get());
2937 }
2938 
2939 namespace {
2940 // OpenMP directives parsed in this section are represented as a
2941 // CapturedStatement with an associated statement. If a syntax error
2942 // is detected during the parsing of the associated statement, the
2943 // compiler must abort processing and close the CapturedStatement.
2944 //
2945 // Combined directives such as 'target parallel' have more than one
2946 // nested CapturedStatements. This RAII ensures that we unwind out
2947 // of all the nested CapturedStatements when an error is found.
2948 class CaptureRegionUnwinderRAII {
2949 private:
2950  Sema &S;
2951  bool &ErrorFound;
2953 
2954 public:
2955  CaptureRegionUnwinderRAII(Sema &S, bool &ErrorFound,
2956  OpenMPDirectiveKind DKind)
2957  : S(S), ErrorFound(ErrorFound), DKind(DKind) {}
2958  ~CaptureRegionUnwinderRAII() {
2959  if (ErrorFound) {
2960  int ThisCaptureLevel = S.getOpenMPCaptureLevels(DKind);
2961  while (--ThisCaptureLevel >= 0)
2963  }
2964  }
2965 };
2966 } // namespace
2967 
2969  ArrayRef<OMPClause *> Clauses) {
2970  bool ErrorFound = false;
2971  CaptureRegionUnwinderRAII CaptureRegionUnwinder(
2972  *this, ErrorFound, DSAStack->getCurrentDirective());
2973  if (!S.isUsable()) {
2974  ErrorFound = true;
2975  return StmtError();
2976  }
2977 
2978  SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
2979  getOpenMPCaptureRegions(CaptureRegions, DSAStack->getCurrentDirective());
2980  OMPOrderedClause *OC = nullptr;
2981  OMPScheduleClause *SC = nullptr;
2984  // This is required for proper codegen.
2985  for (OMPClause *Clause : Clauses) {
2986  if (isOpenMPTaskingDirective(DSAStack->getCurrentDirective()) &&
2987  Clause->getClauseKind() == OMPC_in_reduction) {
2988  // Capture taskgroup task_reduction descriptors inside the tasking regions
2989  // with the corresponding in_reduction items.
2990  auto *IRC = cast<OMPInReductionClause>(Clause);
2991  for (Expr *E : IRC->taskgroup_descriptors())
2992  if (E)
2993  MarkDeclarationsReferencedInExpr(E);
2994  }
2995  if (isOpenMPPrivate(Clause->getClauseKind()) ||
2996  Clause->getClauseKind() == OMPC_copyprivate ||
2997  (getLangOpts().OpenMPUseTLS &&
2998  getASTContext().getTargetInfo().isTLSSupported() &&
2999  Clause->getClauseKind() == OMPC_copyin)) {
3000  DSAStack->setForceVarCapturing(Clause->getClauseKind() == OMPC_copyin);
3001  // Mark all variables in private list clauses as used in inner region.
3002  for (Stmt *VarRef : Clause->children()) {
3003  if (auto *E = cast_or_null<Expr>(VarRef)) {
3004  MarkDeclarationsReferencedInExpr(E);
3005  }
3006  }
3007  DSAStack->setForceVarCapturing(/*V=*/false);
3008  } else if (CaptureRegions.size() > 1 ||
3009  CaptureRegions.back() != OMPD_unknown) {
3010  if (auto *C = OMPClauseWithPreInit::get(Clause))
3011  PICs.push_back(C);
3012  if (auto *C = OMPClauseWithPostUpdate::get(Clause)) {
3013  if (Expr *E = C->getPostUpdateExpr())
3014  MarkDeclarationsReferencedInExpr(E);
3015  }
3016  }
3017  if (Clause->getClauseKind() == OMPC_schedule)
3018  SC = cast<OMPScheduleClause>(Clause);
3019  else if (Clause->getClauseKind() == OMPC_ordered)
3020  OC = cast<OMPOrderedClause>(Clause);
3021  else if (Clause->getClauseKind() == OMPC_linear)
3022  LCs.push_back(cast<OMPLinearClause>(Clause));
3023  }
3024  // OpenMP, 2.7.1 Loop Construct, Restrictions
3025  // The nonmonotonic modifier cannot be specified if an ordered clause is
3026  // specified.
3027  if (SC &&
3028  (SC->getFirstScheduleModifier() == OMPC_SCHEDULE_MODIFIER_nonmonotonic ||
3029  SC->getSecondScheduleModifier() ==
3030  OMPC_SCHEDULE_MODIFIER_nonmonotonic) &&
3031  OC) {
3032  Diag(SC->getFirstScheduleModifier() == OMPC_SCHEDULE_MODIFIER_nonmonotonic
3035  diag::err_omp_schedule_nonmonotonic_ordered)
3036  << SourceRange(OC->getBeginLoc(), OC->getEndLoc());
3037  ErrorFound = true;
3038  }
3039  if (!LCs.empty() && OC && OC->getNumForLoops()) {
3040  for (const OMPLinearClause *C : LCs) {
3041  Diag(C->getBeginLoc(), diag::err_omp_linear_ordered)
3042  << SourceRange(OC->getBeginLoc(), OC->getEndLoc());
3043  }
3044  ErrorFound = true;
3045  }
3046  if (isOpenMPWorksharingDirective(DSAStack->getCurrentDirective()) &&
3047  isOpenMPSimdDirective(DSAStack->getCurrentDirective()) && OC &&
3048  OC->getNumForLoops()) {
3049  Diag(OC->getBeginLoc(), diag::err_omp_ordered_simd)
3050  << getOpenMPDirectiveName(DSAStack->getCurrentDirective());
3051  ErrorFound = true;
3052  }
3053  if (ErrorFound) {
3054  return StmtError();
3055  }
3056  StmtResult SR = S;
3057  for (OpenMPDirectiveKind ThisCaptureRegion : llvm::reverse(CaptureRegions)) {
3058  // Mark all variables in private list clauses as used in inner region.
3059  // Required for proper codegen of combined directives.
3060  // TODO: add processing for other clauses.
3061  if (ThisCaptureRegion != OMPD_unknown) {
3062  for (const clang::OMPClauseWithPreInit *C : PICs) {
3063  OpenMPDirectiveKind CaptureRegion = C->getCaptureRegion();
3064  // Find the particular capture region for the clause if the
3065  // directive is a combined one with multiple capture regions.
3066  // If the directive is not a combined one, the capture region
3067  // associated with the clause is OMPD_unknown and is generated
3068  // only once.
3069  if (CaptureRegion == ThisCaptureRegion ||
3070  CaptureRegion == OMPD_unknown) {
3071  if (auto *DS = cast_or_null<DeclStmt>(C->getPreInitStmt())) {
3072  for (Decl *D : DS->decls())
3073  MarkVariableReferenced(D->getLocation(), cast<VarDecl>(D));
3074  }
3075  }
3076  }
3077  }
3078  SR = ActOnCapturedRegionEnd(SR.get());
3079  }
3080  return SR;
3081 }
3082 
3083 static bool checkCancelRegion(Sema &SemaRef, OpenMPDirectiveKind CurrentRegion,
3084  OpenMPDirectiveKind CancelRegion,
3085  SourceLocation StartLoc) {
3086  // CancelRegion is only needed for cancel and cancellation_point.
3087  if (CurrentRegion != OMPD_cancel && CurrentRegion != OMPD_cancellation_point)
3088  return false;
3089 
3090  if (CancelRegion == OMPD_parallel || CancelRegion == OMPD_for ||
3091  CancelRegion == OMPD_sections || CancelRegion == OMPD_taskgroup)
3092  return false;
3093 
3094  SemaRef.Diag(StartLoc, diag::err_omp_wrong_cancel_region)
3095  << getOpenMPDirectiveName(CancelRegion);
3096  return true;
3097 }
3098 
3099 static bool checkNestingOfRegions(Sema &SemaRef, const DSAStackTy *Stack,
3100  OpenMPDirectiveKind CurrentRegion,
3101  const DeclarationNameInfo &CurrentName,
3102  OpenMPDirectiveKind CancelRegion,
3103  SourceLocation StartLoc) {
3104  if (Stack->getCurScope()) {
3105  OpenMPDirectiveKind ParentRegion = Stack->getParentDirective();
3106  OpenMPDirectiveKind OffendingRegion = ParentRegion;
3107  bool NestingProhibited = false;
3108  bool CloseNesting = true;
3109  bool OrphanSeen = false;
3110  enum {
3111  NoRecommend,
3112  ShouldBeInParallelRegion,
3113  ShouldBeInOrderedRegion,
3114  ShouldBeInTargetRegion,
3115  ShouldBeInTeamsRegion
3116  } Recommend = NoRecommend;
3117  if (isOpenMPSimdDirective(ParentRegion) && CurrentRegion != OMPD_ordered) {
3118  // OpenMP [2.16, Nesting of Regions]
3119  // OpenMP constructs may not be nested inside a simd region.
3120  // OpenMP [2.8.1,simd Construct, Restrictions]
3121  // An ordered construct with the simd clause is the only OpenMP
3122  // construct that can appear in the simd region.
3123  // Allowing a SIMD construct nested in another SIMD construct is an
3124  // extension. The OpenMP 4.5 spec does not allow it. Issue a warning
3125  // message.
3126  SemaRef.Diag(StartLoc, (CurrentRegion != OMPD_simd)
3127  ? diag::err_omp_prohibited_region_simd
3128  : diag::warn_omp_nesting_simd);
3129  return CurrentRegion != OMPD_simd;
3130  }
3131  if (ParentRegion == OMPD_atomic) {
3132  // OpenMP [2.16, Nesting of Regions]
3133  // OpenMP constructs may not be nested inside an atomic region.
3134  SemaRef.Diag(StartLoc, diag::err_omp_prohibited_region_atomic);
3135  return true;
3136  }
3137  if (CurrentRegion == OMPD_section) {
3138  // OpenMP [2.7.2, sections Construct, Restrictions]
3139  // Orphaned section directives are prohibited. That is, the section
3140  // directives must appear within the sections construct and must not be
3141  // encountered elsewhere in the sections region.
3142  if (ParentRegion != OMPD_sections &&
3143  ParentRegion != OMPD_parallel_sections) {
3144  SemaRef.Diag(StartLoc, diag::err_omp_orphaned_section_directive)
3145  << (ParentRegion != OMPD_unknown)
3146  << getOpenMPDirectiveName(ParentRegion);
3147  return true;
3148  }
3149  return false;
3150  }
3151  // Allow some constructs (except teams and cancellation constructs) to be
3152  // orphaned (they could be used in functions, called from OpenMP regions
3153  // with the required preconditions).
3154  if (ParentRegion == OMPD_unknown &&
3155  !isOpenMPNestingTeamsDirective(CurrentRegion) &&
3156  CurrentRegion != OMPD_cancellation_point &&
3157  CurrentRegion != OMPD_cancel)
3158  return false;
3159  if (CurrentRegion == OMPD_cancellation_point ||
3160  CurrentRegion == OMPD_cancel) {
3161  // OpenMP [2.16, Nesting of Regions]
3162  // A cancellation point construct for which construct-type-clause is
3163  // taskgroup must be nested inside a task construct. A cancellation
3164  // point construct for which construct-type-clause is not taskgroup must
3165  // be closely nested inside an OpenMP construct that matches the type
3166  // specified in construct-type-clause.
3167  // A cancel construct for which construct-type-clause is taskgroup must be
3168  // nested inside a task construct. A cancel construct for which
3169  // construct-type-clause is not taskgroup must be closely nested inside an
3170  // OpenMP construct that matches the type specified in
3171  // construct-type-clause.
3172  NestingProhibited =
3173  !((CancelRegion == OMPD_parallel &&
3174  (ParentRegion == OMPD_parallel ||
3175  ParentRegion == OMPD_target_parallel)) ||
3176  (CancelRegion == OMPD_for &&
3177  (ParentRegion == OMPD_for || ParentRegion == OMPD_parallel_for ||
3178  ParentRegion == OMPD_target_parallel_for ||
3179  ParentRegion == OMPD_distribute_parallel_for ||
3180  ParentRegion == OMPD_teams_distribute_parallel_for ||
3181  ParentRegion == OMPD_target_teams_distribute_parallel_for)) ||
3182  (CancelRegion == OMPD_taskgroup && ParentRegion == OMPD_task) ||
3183  (CancelRegion == OMPD_sections &&
3184  (ParentRegion == OMPD_section || ParentRegion == OMPD_sections ||
3185  ParentRegion == OMPD_parallel_sections)));
3186  OrphanSeen = ParentRegion == OMPD_unknown;
3187  } else if (CurrentRegion == OMPD_master) {
3188  // OpenMP [2.16, Nesting of Regions]
3189  // A master region may not be closely nested inside a worksharing,
3190  // atomic, or explicit task region.
3191  NestingProhibited = isOpenMPWorksharingDirective(ParentRegion) ||
3192  isOpenMPTaskingDirective(ParentRegion);
3193  } else if (CurrentRegion == OMPD_critical && CurrentName.getName()) {
3194  // OpenMP [2.16, Nesting of Regions]
3195  // A critical region may not be nested (closely or otherwise) inside a
3196  // critical region with the same name. Note that this restriction is not
3197  // sufficient to prevent deadlock.
3198  SourceLocation PreviousCriticalLoc;
3199  bool DeadLock = Stack->hasDirective(
3200  [CurrentName, &PreviousCriticalLoc](OpenMPDirectiveKind K,
3201  const DeclarationNameInfo &DNI,
3202  SourceLocation Loc) {
3203  if (K == OMPD_critical && DNI.getName() == CurrentName.getName()) {
3204  PreviousCriticalLoc = Loc;
3205  return true;
3206  }
3207  return false;
3208  },
3209  false /* skip top directive */);
3210  if (DeadLock) {
3211  SemaRef.Diag(StartLoc,
3212  diag::err_omp_prohibited_region_critical_same_name)
3213  << CurrentName.getName();
3214  if (PreviousCriticalLoc.isValid())
3215  SemaRef.Diag(PreviousCriticalLoc,
3216  diag::note_omp_previous_critical_region);
3217  return true;
3218  }
3219  } else if (CurrentRegion == OMPD_barrier) {
3220  // OpenMP [2.16, Nesting of Regions]
3221  // A barrier region may not be closely nested inside a worksharing,
3222  // explicit task, critical, ordered, atomic, or master region.
3223  NestingProhibited = isOpenMPWorksharingDirective(ParentRegion) ||
3224  isOpenMPTaskingDirective(ParentRegion) ||
3225  ParentRegion == OMPD_master ||
3226  ParentRegion == OMPD_critical ||
3227  ParentRegion == OMPD_ordered;
3228  } else if (isOpenMPWorksharingDirective(CurrentRegion) &&
3229  !isOpenMPParallelDirective(CurrentRegion) &&
3230  !isOpenMPTeamsDirective(CurrentRegion)) {
3231  // OpenMP [2.16, Nesting of Regions]
3232  // A worksharing region may not be closely nested inside a worksharing,
3233  // explicit task, critical, ordered, atomic, or master region.
3234  NestingProhibited = isOpenMPWorksharingDirective(ParentRegion) ||
3235  isOpenMPTaskingDirective(ParentRegion) ||
3236  ParentRegion == OMPD_master ||
3237  ParentRegion == OMPD_critical ||
3238  ParentRegion == OMPD_ordered;
3239  Recommend = ShouldBeInParallelRegion;
3240  } else if (CurrentRegion == OMPD_ordered) {
3241  // OpenMP [2.16, Nesting of Regions]
3242  // An ordered region may not be closely nested inside a critical,
3243  // atomic, or explicit task region.
3244  // An ordered region must be closely nested inside a loop region (or
3245  // parallel loop region) with an ordered clause.
3246  // OpenMP [2.8.1,simd Construct, Restrictions]
3247  // An ordered construct with the simd clause is the only OpenMP construct
3248  // that can appear in the simd region.
3249  NestingProhibited = ParentRegion == OMPD_critical ||
3250  isOpenMPTaskingDirective(ParentRegion) ||
3251  !(isOpenMPSimdDirective(ParentRegion) ||
3252  Stack->isParentOrderedRegion());
3253  Recommend = ShouldBeInOrderedRegion;
3254  } else if (isOpenMPNestingTeamsDirective(CurrentRegion)) {
3255  // OpenMP [2.16, Nesting of Regions]
3256  // If specified, a teams construct must be contained within a target
3257  // construct.
3258  NestingProhibited = ParentRegion != OMPD_target;
3259  OrphanSeen = ParentRegion == OMPD_unknown;
3260  Recommend = ShouldBeInTargetRegion;
3261  }
3262  if (!NestingProhibited &&
3263  !isOpenMPTargetExecutionDirective(CurrentRegion) &&
3264  !isOpenMPTargetDataManagementDirective(CurrentRegion) &&
3265  (ParentRegion == OMPD_teams || ParentRegion == OMPD_target_teams)) {
3266  // OpenMP [2.16, Nesting of Regions]
3267  // distribute, parallel, parallel sections, parallel workshare, and the
3268  // parallel loop and parallel loop SIMD constructs are the only OpenMP
3269  // constructs that can be closely nested in the teams region.
3270  NestingProhibited = !isOpenMPParallelDirective(CurrentRegion) &&
3271  !isOpenMPDistributeDirective(CurrentRegion);
3272  Recommend = ShouldBeInParallelRegion;
3273  }
3274  if (!NestingProhibited &&
3275  isOpenMPNestingDistributeDirective(CurrentRegion)) {
3276  // OpenMP 4.5 [2.17 Nesting of Regions]
3277  // The region associated with the distribute construct must be strictly
3278  // nested inside a teams region
3279  NestingProhibited =
3280  (ParentRegion != OMPD_teams && ParentRegion != OMPD_target_teams);
3281  Recommend = ShouldBeInTeamsRegion;
3282  }
3283  if (!NestingProhibited &&
3284  (isOpenMPTargetExecutionDirective(CurrentRegion) ||
3285  isOpenMPTargetDataManagementDirective(CurrentRegion))) {
3286  // OpenMP 4.5 [2.17 Nesting of Regions]
3287  // If a target, target update, target data, target enter data, or
3288  // target exit data construct is encountered during execution of a
3289  // target region, the behavior is unspecified.
3290  NestingProhibited = Stack->hasDirective(
3291  [&OffendingRegion](OpenMPDirectiveKind K, const DeclarationNameInfo &,
3292  SourceLocation) {
3294  OffendingRegion = K;
3295  return true;
3296  }
3297  return false;
3298  },
3299  false /* don't skip top directive */);
3300  CloseNesting = false;
3301  }
3302  if (NestingProhibited) {
3303  if (OrphanSeen) {
3304  SemaRef.Diag(StartLoc, diag::err_omp_orphaned_device_directive)
3305  << getOpenMPDirectiveName(CurrentRegion) << Recommend;
3306  } else {
3307  SemaRef.Diag(StartLoc, diag::err_omp_prohibited_region)
3308  << CloseNesting << getOpenMPDirectiveName(OffendingRegion)
3309  << Recommend << getOpenMPDirectiveName(CurrentRegion);
3310  }
3311  return true;
3312  }
3313  }
3314  return false;
3315 }
3316 
3318  ArrayRef<OMPClause *> Clauses,
3319  ArrayRef<OpenMPDirectiveKind> AllowedNameModifiers) {
3320  bool ErrorFound = false;
3321  unsigned NamedModifiersNumber = 0;
3323  OMPD_unknown + 1);
3324  SmallVector<SourceLocation, 4> NameModifierLoc;
3325  for (const OMPClause *C : Clauses) {
3326  if (const auto *IC = dyn_cast_or_null<OMPIfClause>(C)) {
3327  // At most one if clause without a directive-name-modifier can appear on
3328  // the directive.
3329  OpenMPDirectiveKind CurNM = IC->getNameModifier();
3330  if (FoundNameModifiers[CurNM]) {
3331  S.Diag(C->getBeginLoc(), diag::err_omp_more_one_clause)
3332  << getOpenMPDirectiveName(Kind) << getOpenMPClauseName(OMPC_if)
3333  << (CurNM != OMPD_unknown) << getOpenMPDirectiveName(CurNM);
3334  ErrorFound = true;
3335  } else if (CurNM != OMPD_unknown) {
3336  NameModifierLoc.push_back(IC->getNameModifierLoc());
3337  ++NamedModifiersNumber;
3338  }
3339  FoundNameModifiers[CurNM] = IC;
3340  if (CurNM == OMPD_unknown)
3341  continue;
3342  // Check if the specified name modifier is allowed for the current
3343  // directive.
3344  // At most one if clause with the particular directive-name-modifier can
3345  // appear on the directive.
3346  bool MatchFound = false;
3347  for (auto NM : AllowedNameModifiers) {
3348  if (CurNM == NM) {
3349  MatchFound = true;
3350  break;
3351  }
3352  }
3353  if (!MatchFound) {
3354  S.Diag(IC->getNameModifierLoc(),
3355  diag::err_omp_wrong_if_directive_name_modifier)
3357  ErrorFound = true;
3358  }
3359  }
3360  }
3361  // If any if clause on the directive includes a directive-name-modifier then
3362  // all if clauses on the directive must include a directive-name-modifier.
3363  if (FoundNameModifiers[OMPD_unknown] && NamedModifiersNumber > 0) {
3364  if (NamedModifiersNumber == AllowedNameModifiers.size()) {
3365  S.Diag(FoundNameModifiers[OMPD_unknown]->getBeginLoc(),
3366  diag::err_omp_no_more_if_clause);
3367  } else {
3368  std::string Values;
3369  std::string Sep(", ");
3370  unsigned AllowedCnt = 0;
3371  unsigned TotalAllowedNum =
3372  AllowedNameModifiers.size() - NamedModifiersNumber;
3373  for (unsigned Cnt = 0, End = AllowedNameModifiers.size(); Cnt < End;
3374  ++Cnt) {
3375  OpenMPDirectiveKind NM = AllowedNameModifiers[Cnt];
3376  if (!FoundNameModifiers[NM]) {
3377  Values += "'";
3378  Values += getOpenMPDirectiveName(NM);
3379  Values += "'";
3380  if (AllowedCnt + 2 == TotalAllowedNum)
3381  Values += " or ";
3382  else if (AllowedCnt + 1 != TotalAllowedNum)
3383  Values += Sep;
3384  ++AllowedCnt;
3385  }
3386  }
3387  S.Diag(FoundNameModifiers[OMPD_unknown]->getCondition()->getBeginLoc(),
3388  diag::err_omp_unnamed_if_clause)
3389  << (TotalAllowedNum > 1) << Values;
3390  }
3391  for (SourceLocation Loc : NameModifierLoc) {
3392  S.Diag(Loc, diag::note_omp_previous_named_if_clause);
3393  }
3394  ErrorFound = true;
3395  }
3396  return ErrorFound;
3397 }
3398 
3401  OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
3402  Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc) {
3403  StmtResult Res = StmtError();
3404  // First check CancelRegion which is then used in checkNestingOfRegions.
3405  if (checkCancelRegion(*this, Kind, CancelRegion, StartLoc) ||
3406  checkNestingOfRegions(*this, DSAStack, Kind, DirName, CancelRegion,
3407  StartLoc))
3408  return StmtError();
3409 
3410  llvm::SmallVector<OMPClause *, 8> ClausesWithImplicit;
3411  VarsWithInheritedDSAType VarsWithInheritedDSA;
3412  bool ErrorFound = false;
3413  ClausesWithImplicit.append(Clauses.begin(), Clauses.end());
3414  if (AStmt && !CurContext->isDependentContext()) {
3415  assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
3416 
3417  // Check default data sharing attributes for referenced variables.
3418  DSAAttrChecker DSAChecker(DSAStack, *this, cast<CapturedStmt>(AStmt));
3419  int ThisCaptureLevel = getOpenMPCaptureLevels(Kind);
3420  Stmt *S = AStmt;
3421  while (--ThisCaptureLevel >= 0)
3422  S = cast<CapturedStmt>(S)->getCapturedStmt();
3423  DSAChecker.Visit(S);
3424  if (DSAChecker.isErrorFound())
3425  return StmtError();
3426  // Generate list of implicitly defined firstprivate variables.
3427  VarsWithInheritedDSA = DSAChecker.getVarsWithInheritedDSA();
3428 
3429  SmallVector<Expr *, 4> ImplicitFirstprivates(
3430  DSAChecker.getImplicitFirstprivate().begin(),
3431  DSAChecker.getImplicitFirstprivate().end());
3432  SmallVector<Expr *, 4> ImplicitMaps(DSAChecker.getImplicitMap().begin(),
3433  DSAChecker.getImplicitMap().end());
3434  // Mark taskgroup task_reduction descriptors as implicitly firstprivate.
3435  for (OMPClause *C : Clauses) {
3436  if (auto *IRC = dyn_cast<OMPInReductionClause>(C)) {
3437  for (Expr *E : IRC->taskgroup_descriptors())
3438  if (E)
3439  ImplicitFirstprivates.emplace_back(E);
3440  }
3441  }
3442  if (!ImplicitFirstprivates.empty()) {
3443  if (OMPClause *Implicit = ActOnOpenMPFirstprivateClause(
3444  ImplicitFirstprivates, SourceLocation(), SourceLocation(),
3445  SourceLocation())) {
3446  ClausesWithImplicit.push_back(Implicit);
3447  ErrorFound = cast<OMPFirstprivateClause>(Implicit)->varlist_size() !=
3448  ImplicitFirstprivates.size();
3449  } else {
3450  ErrorFound = true;
3451  }
3452  }
3453  if (!ImplicitMaps.empty()) {
3454  if (OMPClause *Implicit = ActOnOpenMPMapClause(
3455  llvm::None, llvm::None, OMPC_MAP_tofrom,
3456  /*IsMapTypeImplicit=*/true, SourceLocation(), SourceLocation(),
3457  ImplicitMaps, SourceLocation(), SourceLocation(),
3458  SourceLocation())) {
3459  ClausesWithImplicit.emplace_back(Implicit);
3460  ErrorFound |=
3461  cast<OMPMapClause>(Implicit)->varlist_size() != ImplicitMaps.size();
3462  } else {
3463  ErrorFound = true;
3464  }
3465  }
3466  }
3467 
3468  llvm::SmallVector<OpenMPDirectiveKind, 4> AllowedNameModifiers;
3469  switch (Kind) {
3470  case OMPD_parallel:
3471  Res = ActOnOpenMPParallelDirective(ClausesWithImplicit, AStmt, StartLoc,
3472  EndLoc);
3473  AllowedNameModifiers.push_back(OMPD_parallel);
3474  break;
3475  case OMPD_simd:
3476  Res = ActOnOpenMPSimdDirective(ClausesWithImplicit, AStmt, StartLoc, EndLoc,
3477  VarsWithInheritedDSA);
3478  break;
3479  case OMPD_for:
3480  Res = ActOnOpenMPForDirective(ClausesWithImplicit, AStmt, StartLoc, EndLoc,
3481  VarsWithInheritedDSA);
3482  break;
3483  case OMPD_for_simd:
3484  Res = ActOnOpenMPForSimdDirective(ClausesWithImplicit, AStmt, StartLoc,
3485  EndLoc, VarsWithInheritedDSA);
3486  break;
3487  case OMPD_sections:
3488  Res = ActOnOpenMPSectionsDirective(ClausesWithImplicit, AStmt, StartLoc,
3489  EndLoc);
3490  break;
3491  case OMPD_section:
3492  assert(ClausesWithImplicit.empty() &&
3493  "No clauses are allowed for 'omp section' directive");
3494  Res = ActOnOpenMPSectionDirective(AStmt, StartLoc, EndLoc);
3495  break;
3496  case OMPD_single:
3497  Res = ActOnOpenMPSingleDirective(ClausesWithImplicit, AStmt, StartLoc,
3498  EndLoc);
3499  break;
3500  case OMPD_master:
3501  assert(ClausesWithImplicit.empty() &&
3502  "No clauses are allowed for 'omp master' directive");
3503  Res = ActOnOpenMPMasterDirective(AStmt, StartLoc, EndLoc);
3504  break;
3505  case OMPD_critical:
3506  Res = ActOnOpenMPCriticalDirective(DirName, ClausesWithImplicit, AStmt,
3507  StartLoc, EndLoc);
3508  break;
3509  case OMPD_parallel_for:
3510  Res = ActOnOpenMPParallelForDirective(ClausesWithImplicit, AStmt, StartLoc,
3511  EndLoc, VarsWithInheritedDSA);
3512  AllowedNameModifiers.push_back(OMPD_parallel);
3513  break;
3514  case OMPD_parallel_for_simd:
3515  Res = ActOnOpenMPParallelForSimdDirective(
3516  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
3517  AllowedNameModifiers.push_back(OMPD_parallel);
3518  break;
3519  case OMPD_parallel_sections:
3520  Res = ActOnOpenMPParallelSectionsDirective(ClausesWithImplicit, AStmt,
3521  StartLoc, EndLoc);
3522  AllowedNameModifiers.push_back(OMPD_parallel);
3523  break;
3524  case OMPD_task:
3525  Res =
3526  ActOnOpenMPTaskDirective(ClausesWithImplicit, AStmt, StartLoc, EndLoc);
3527  AllowedNameModifiers.push_back(OMPD_task);
3528  break;
3529  case OMPD_taskyield:
3530  assert(ClausesWithImplicit.empty() &&
3531  "No clauses are allowed for 'omp taskyield' directive");
3532  assert(AStmt == nullptr &&
3533  "No associated statement allowed for 'omp taskyield' directive");
3534  Res = ActOnOpenMPTaskyieldDirective(StartLoc, EndLoc);
3535  break;
3536  case OMPD_barrier:
3537  assert(ClausesWithImplicit.empty() &&
3538  "No clauses are allowed for 'omp barrier' directive");
3539  assert(AStmt == nullptr &&
3540  "No associated statement allowed for 'omp barrier' directive");
3541  Res = ActOnOpenMPBarrierDirective(StartLoc, EndLoc);
3542  break;
3543  case OMPD_taskwait:
3544  assert(ClausesWithImplicit.empty() &&
3545  "No clauses are allowed for 'omp taskwait' directive");
3546  assert(AStmt == nullptr &&
3547  "No associated statement allowed for 'omp taskwait' directive");
3548  Res = ActOnOpenMPTaskwaitDirective(StartLoc, EndLoc);
3549  break;
3550  case OMPD_taskgroup:
3551  Res = ActOnOpenMPTaskgroupDirective(ClausesWithImplicit, AStmt, StartLoc,
3552  EndLoc);
3553  break;
3554  case OMPD_flush:
3555  assert(AStmt == nullptr &&
3556  "No associated statement allowed for 'omp flush' directive");
3557  Res = ActOnOpenMPFlushDirective(ClausesWithImplicit, StartLoc, EndLoc);
3558  break;
3559  case OMPD_ordered:
3560  Res = ActOnOpenMPOrderedDirective(ClausesWithImplicit, AStmt, StartLoc,
3561  EndLoc);
3562  break;
3563  case OMPD_atomic:
3564  Res = ActOnOpenMPAtomicDirective(ClausesWithImplicit, AStmt, StartLoc,
3565  EndLoc);
3566  break;
3567  case OMPD_teams:
3568  Res =
3569  ActOnOpenMPTeamsDirective(ClausesWithImplicit, AStmt, StartLoc, EndLoc);
3570  break;
3571  case OMPD_target:
3572  Res = ActOnOpenMPTargetDirective(ClausesWithImplicit, AStmt, StartLoc,
3573  EndLoc);
3574  AllowedNameModifiers.push_back(OMPD_target);
3575  break;
3576  case OMPD_target_parallel:
3577  Res = ActOnOpenMPTargetParallelDirective(ClausesWithImplicit, AStmt,
3578  StartLoc, EndLoc);
3579  AllowedNameModifiers.push_back(OMPD_target);
3580  AllowedNameModifiers.push_back(OMPD_parallel);
3581  break;
3582  case OMPD_target_parallel_for:
3583  Res = ActOnOpenMPTargetParallelForDirective(
3584  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
3585  AllowedNameModifiers.push_back(OMPD_target);
3586  AllowedNameModifiers.push_back(OMPD_parallel);
3587  break;
3588  case OMPD_cancellation_point:
3589  assert(ClausesWithImplicit.empty() &&
3590  "No clauses are allowed for 'omp cancellation point' directive");
3591  assert(AStmt == nullptr && "No associated statement allowed for 'omp "
3592  "cancellation point' directive");
3593  Res = ActOnOpenMPCancellationPointDirective(StartLoc, EndLoc, CancelRegion);
3594  break;
3595  case OMPD_cancel:
3596  assert(AStmt == nullptr &&
3597  "No associated statement allowed for 'omp cancel' directive");
3598  Res = ActOnOpenMPCancelDirective(ClausesWithImplicit, StartLoc, EndLoc,
3599  CancelRegion);
3600  AllowedNameModifiers.push_back(OMPD_cancel);
3601  break;
3602  case OMPD_target_data:
3603  Res = ActOnOpenMPTargetDataDirective(ClausesWithImplicit, AStmt, StartLoc,
3604  EndLoc);
3605  AllowedNameModifiers.push_back(OMPD_target_data);
3606  break;
3607  case OMPD_target_enter_data:
3608  Res = ActOnOpenMPTargetEnterDataDirective(ClausesWithImplicit, StartLoc,
3609  EndLoc, AStmt);
3610  AllowedNameModifiers.push_back(OMPD_target_enter_data);
3611  break;
3612  case OMPD_target_exit_data:
3613  Res = ActOnOpenMPTargetExitDataDirective(ClausesWithImplicit, StartLoc,
3614  EndLoc, AStmt);
3615  AllowedNameModifiers.push_back(OMPD_target_exit_data);
3616  break;
3617  case OMPD_taskloop:
3618  Res = ActOnOpenMPTaskLoopDirective(ClausesWithImplicit, AStmt, StartLoc,
3619  EndLoc, VarsWithInheritedDSA);
3620  AllowedNameModifiers.push_back(OMPD_taskloop);
3621  break;
3622  case OMPD_taskloop_simd:
3623  Res = ActOnOpenMPTaskLoopSimdDirective(ClausesWithImplicit, AStmt, StartLoc,
3624  EndLoc, VarsWithInheritedDSA);
3625  AllowedNameModifiers.push_back(OMPD_taskloop);
3626  break;
3627  case OMPD_distribute:
3628  Res = ActOnOpenMPDistributeDirective(ClausesWithImplicit, AStmt, StartLoc,
3629  EndLoc, VarsWithInheritedDSA);
3630  break;
3631  case OMPD_target_update:
3632  Res = ActOnOpenMPTargetUpdateDirective(ClausesWithImplicit, StartLoc,
3633  EndLoc, AStmt);
3634  AllowedNameModifiers.push_back(OMPD_target_update);
3635  break;
3636  case OMPD_distribute_parallel_for:
3637  Res = ActOnOpenMPDistributeParallelForDirective(
3638  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
3639  AllowedNameModifiers.push_back(OMPD_parallel);
3640  break;
3641  case OMPD_distribute_parallel_for_simd:
3642  Res = ActOnOpenMPDistributeParallelForSimdDirective(
3643  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
3644  AllowedNameModifiers.push_back(OMPD_parallel);
3645  break;
3646  case OMPD_distribute_simd:
3647  Res = ActOnOpenMPDistributeSimdDirective(
3648  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
3649  break;
3650  case OMPD_target_parallel_for_simd:
3651  Res = ActOnOpenMPTargetParallelForSimdDirective(
3652  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
3653  AllowedNameModifiers.push_back(OMPD_target);
3654  AllowedNameModifiers.push_back(OMPD_parallel);
3655  break;
3656  case OMPD_target_simd:
3657  Res = ActOnOpenMPTargetSimdDirective(ClausesWithImplicit, AStmt, StartLoc,
3658  EndLoc, VarsWithInheritedDSA);
3659  AllowedNameModifiers.push_back(OMPD_target);
3660  break;
3661  case OMPD_teams_distribute:
3662  Res = ActOnOpenMPTeamsDistributeDirective(
3663  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
3664  break;
3665  case OMPD_teams_distribute_simd:
3666  Res = ActOnOpenMPTeamsDistributeSimdDirective(
3667  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
3668  break;
3669  case OMPD_teams_distribute_parallel_for_simd:
3670  Res = ActOnOpenMPTeamsDistributeParallelForSimdDirective(
3671  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
3672  AllowedNameModifiers.push_back(OMPD_parallel);
3673  break;
3674  case OMPD_teams_distribute_parallel_for:
3675  Res = ActOnOpenMPTeamsDistributeParallelForDirective(
3676  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
3677  AllowedNameModifiers.push_back(OMPD_parallel);
3678  break;
3679  case OMPD_target_teams:
3680  Res = ActOnOpenMPTargetTeamsDirective(ClausesWithImplicit, AStmt, StartLoc,
3681  EndLoc);
3682  AllowedNameModifiers.push_back(OMPD_target);
3683  break;
3684  case OMPD_target_teams_distribute:
3685  Res = ActOnOpenMPTargetTeamsDistributeDirective(
3686  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
3687  AllowedNameModifiers.push_back(OMPD_target);
3688  break;
3689  case OMPD_target_teams_distribute_parallel_for:
3690  Res = ActOnOpenMPTargetTeamsDistributeParallelForDirective(
3691  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
3692  AllowedNameModifiers.push_back(OMPD_target);
3693  AllowedNameModifiers.push_back(OMPD_parallel);
3694  break;
3695  case OMPD_target_teams_distribute_parallel_for_simd:
3696  Res = ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective(
3697  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
3698  AllowedNameModifiers.push_back(OMPD_target);
3699  AllowedNameModifiers.push_back(OMPD_parallel);
3700  break;
3701  case OMPD_target_teams_distribute_simd:
3702  Res = ActOnOpenMPTargetTeamsDistributeSimdDirective(
3703  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
3704  AllowedNameModifiers.push_back(OMPD_target);
3705  break;
3706  case OMPD_declare_target:
3707  case OMPD_end_declare_target:
3708  case OMPD_threadprivate:
3709  case OMPD_declare_reduction:
3710  case OMPD_declare_mapper:
3711  case OMPD_declare_simd:
3712  case OMPD_requires:
3713  llvm_unreachable("OpenMP Directive is not allowed");
3714  case OMPD_unknown:
3715  llvm_unreachable("Unknown OpenMP directive");
3716  }
3717 
3718  for (const auto &P : VarsWithInheritedDSA) {
3719  Diag(P.second->getExprLoc(), diag::err_omp_no_dsa_for_variable)
3720  << P.first << P.second->getSourceRange();
3721  }
3722  ErrorFound = !VarsWithInheritedDSA.empty() || ErrorFound;
3723 
3724  if (!AllowedNameModifiers.empty())
3725  ErrorFound = checkIfClauses(*this, Kind, Clauses, AllowedNameModifiers) ||
3726  ErrorFound;
3727 
3728  if (ErrorFound)
3729  return StmtError();
3730  return Res;
3731 }
3732 
3734  DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS, Expr *Simdlen,
3735  ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds,
3736  ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears,
3737  ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR) {
3738  assert(Aligneds.size() == Alignments.size());
3739  assert(Linears.size() == LinModifiers.size());
3740  assert(Linears.size() == Steps.size());
3741  if (!DG || DG.get().isNull())
3742  return DeclGroupPtrTy();
3743 
3744  if (!DG.get().isSingleDecl()) {
3745  Diag(SR.getBegin(), diag::err_omp_single_decl_in_declare_simd);
3746  return DG;
3747  }
3748  Decl *ADecl = DG.get().getSingleDecl();
3749  if (auto *FTD = dyn_cast<FunctionTemplateDecl>(ADecl))
3750  ADecl = FTD->getTemplatedDecl();
3751 
3752  auto *FD = dyn_cast<FunctionDecl>(ADecl);
3753  if (!FD) {
3754  Diag(ADecl->getLocation(), diag::err_omp_function_expected);
3755  return DeclGroupPtrTy();
3756  }
3757 
3758  // OpenMP [2.8.2, declare simd construct, Description]
3759  // The parameter of the simdlen clause must be a constant positive integer
3760  // expression.
3761  ExprResult SL;
3762  if (Simdlen)
3763  SL = VerifyPositiveIntegerConstantInClause(Simdlen, OMPC_simdlen);
3764  // OpenMP [2.8.2, declare simd construct, Description]
3765  // The special this pointer can be used as if was one of the arguments to the
3766  // function in any of the linear, aligned, or uniform clauses.
3767  // The uniform clause declares one or more arguments to have an invariant
3768  // value for all concurrent invocations of the function in the execution of a
3769  // single SIMD loop.
3770  llvm::DenseMap<const Decl *, const Expr *> UniformedArgs;
3771  const Expr *UniformedLinearThis = nullptr;
3772  for (const Expr *E : Uniforms) {
3773  E = E->IgnoreParenImpCasts();
3774  if (const auto *DRE = dyn_cast<DeclRefExpr>(E))
3775  if (const auto *PVD = dyn_cast<ParmVarDecl>(DRE->getDecl()))
3776  if (FD->getNumParams() > PVD->getFunctionScopeIndex() &&
3777  FD->getParamDecl(PVD->getFunctionScopeIndex())
3778  ->getCanonicalDecl() == PVD->getCanonicalDecl()) {
3779  UniformedArgs.try_emplace(PVD->getCanonicalDecl(), E);
3780  continue;
3781  }
3782  if (isa<CXXThisExpr>(E)) {
3783  UniformedLinearThis = E;
3784  continue;
3785  }
3786  Diag(E->getExprLoc(), diag::err_omp_param_or_this_in_clause)
3787  << FD->getDeclName() << (isa<CXXMethodDecl>(ADecl) ? 1 : 0);
3788  }
3789  // OpenMP [2.8.2, declare simd construct, Description]
3790  // The aligned clause declares that the object to which each list item points
3791  // is aligned to the number of bytes expressed in the optional parameter of
3792  // the aligned clause.
3793  // The special this pointer can be used as if was one of the arguments to the
3794  // function in any of the linear, aligned, or uniform clauses.
3795  // The type of list items appearing in the aligned clause must be array,
3796  // pointer, reference to array, or reference to pointer.
3797  llvm::DenseMap<const Decl *, const Expr *> AlignedArgs;
3798  const Expr *AlignedThis = nullptr;
3799  for (const Expr *E : Aligneds) {
3800  E = E->IgnoreParenImpCasts();
3801  if (const auto *DRE = dyn_cast<DeclRefExpr>(E))
3802  if (const auto *PVD = dyn_cast<ParmVarDecl>(DRE->getDecl())) {
3803  const VarDecl *CanonPVD = PVD->getCanonicalDecl();
3804  if (FD->getNumParams() > PVD->getFunctionScopeIndex() &&
3805  FD->getParamDecl(PVD->getFunctionScopeIndex())
3806  ->getCanonicalDecl() == CanonPVD) {
3807  // OpenMP [2.8.1, simd construct, Restrictions]
3808  // A list-item cannot appear in more than one aligned clause.
3809  if (AlignedArgs.count(CanonPVD) > 0) {
3810  Diag(E->getExprLoc(), diag::err_omp_aligned_twice)
3811  << 1 << E->getSourceRange();
3812  Diag(AlignedArgs[CanonPVD]->getExprLoc(),
3813  diag::note_omp_explicit_dsa)
3814  << getOpenMPClauseName(OMPC_aligned);
3815  continue;
3816  }
3817  AlignedArgs[CanonPVD] = E;
3818  QualType QTy = PVD->getType()
3819  .getNonReferenceType()
3820  .getUnqualifiedType()
3821  .getCanonicalType();
3822  const Type *Ty = QTy.getTypePtrOrNull();
3823  if (!Ty || (!Ty->isArrayType() && !Ty->isPointerType())) {
3824  Diag(E->getExprLoc(), diag::err_omp_aligned_expected_array_or_ptr)
3825  << QTy << getLangOpts().CPlusPlus << E->getSourceRange();
3826  Diag(PVD->getLocation(), diag::note_previous_decl) << PVD;
3827  }
3828  continue;
3829  }
3830  }
3831  if (isa<CXXThisExpr>(E)) {
3832  if (AlignedThis) {
3833  Diag(E->getExprLoc(), diag::err_omp_aligned_twice)
3834  << 2 << E->getSourceRange();
3835  Diag(AlignedThis->getExprLoc(), diag::note_omp_explicit_dsa)
3836  << getOpenMPClauseName(OMPC_aligned);
3837  }
3838  AlignedThis = E;
3839  continue;
3840  }
3841  Diag(E->getExprLoc(), diag::err_omp_param_or_this_in_clause)
3842  << FD->getDeclName() << (isa<CXXMethodDecl>(ADecl) ? 1 : 0);
3843  }
3844  // The optional parameter of the aligned clause, alignment, must be a constant
3845  // positive integer expression. If no optional parameter is specified,
3846  // implementation-defined default alignments for SIMD instructions on the
3847  // target platforms are assumed.
3848  SmallVector<const Expr *, 4> NewAligns;
3849  for (Expr *E : Alignments) {
3850  ExprResult Align;
3851  if (E)
3852  Align = VerifyPositiveIntegerConstantInClause(E, OMPC_aligned);
3853  NewAligns.push_back(Align.get());
3854  }
3855  // OpenMP [2.8.2, declare simd construct, Description]
3856  // The linear clause declares one or more list items to be private to a SIMD
3857  // lane and to have a linear relationship with respect to the iteration space
3858  // of a loop.
3859  // The special this pointer can be used as if was one of the arguments to the
3860  // function in any of the linear, aligned, or uniform clauses.
3861  // When a linear-step expression is specified in a linear clause it must be
3862  // either a constant integer expression or an integer-typed parameter that is
3863  // specified in a uniform clause on the directive.
3864  llvm::DenseMap<const Decl *, const Expr *> LinearArgs;
3865  const bool IsUniformedThis = UniformedLinearThis != nullptr;
3866  auto MI = LinModifiers.begin();
3867  for (const Expr *E : Linears) {
3868  auto LinKind = static_cast<OpenMPLinearClauseKind>(*MI);
3869  ++MI;
3870  E = E->IgnoreParenImpCasts();
3871  if (const auto *DRE = dyn_cast<DeclRefExpr>(E))
3872  if (const auto *PVD = dyn_cast<ParmVarDecl>(DRE->getDecl())) {
3873  const VarDecl *CanonPVD = PVD->getCanonicalDecl();
3874  if (FD->getNumParams() > PVD->getFunctionScopeIndex() &&
3875  FD->getParamDecl(PVD->getFunctionScopeIndex())
3876  ->getCanonicalDecl() == CanonPVD) {
3877  // OpenMP [2.15.3.7, linear Clause, Restrictions]
3878  // A list-item cannot appear in more than one linear clause.
3879  if (LinearArgs.count(CanonPVD) > 0) {
3880  Diag(E->getExprLoc(), diag::err_omp_wrong_dsa)
3881  << getOpenMPClauseName(OMPC_linear)
3882  << getOpenMPClauseName(OMPC_linear) << E->getSourceRange();
3883  Diag(LinearArgs[CanonPVD]->getExprLoc(),
3884  diag::note_omp_explicit_dsa)
3885  << getOpenMPClauseName(OMPC_linear);
3886  continue;
3887  }
3888  // Each argument can appear in at most one uniform or linear clause.
3889  if (UniformedArgs.count(CanonPVD) > 0) {
3890  Diag(E->getExprLoc(), diag::err_omp_wrong_dsa)
3891  << getOpenMPClauseName(OMPC_linear)
3893  Diag(UniformedArgs[CanonPVD]->getExprLoc(),
3894  diag::note_omp_explicit_dsa)
3896  continue;
3897  }
3898  LinearArgs[CanonPVD] = E;
3899  if (E->isValueDependent() || E->isTypeDependent() ||
3900  E->isInstantiationDependent() ||
3902  continue;
3903  (void)CheckOpenMPLinearDecl(CanonPVD, E->getExprLoc(), LinKind,
3904  PVD->getOriginalType());
3905  continue;
3906  }
3907  }
3908  if (isa<CXXThisExpr>(E)) {
3909  if (UniformedLinearThis) {
3910  Diag(E->getExprLoc(), diag::err_omp_wrong_dsa)
3911  << getOpenMPClauseName(OMPC_linear)
3912  << getOpenMPClauseName(IsUniformedThis ? OMPC_uniform : OMPC_linear)
3913  << E->getSourceRange();
3914  Diag(UniformedLinearThis->getExprLoc(), diag::note_omp_explicit_dsa)
3915  << getOpenMPClauseName(IsUniformedThis ? OMPC_uniform
3916  : OMPC_linear);
3917  continue;
3918  }
3919  UniformedLinearThis = E;
3920  if (E->isValueDependent() || E->isTypeDependent() ||
3922  continue;
3923  (void)CheckOpenMPLinearDecl(/*D=*/nullptr, E->getExprLoc(), LinKind,
3924  E->getType());
3925  continue;
3926  }
3927  Diag(E->getExprLoc(), diag::err_omp_param_or_this_in_clause)
3928  << FD->getDeclName() << (isa<CXXMethodDecl>(ADecl) ? 1 : 0);
3929  }
3930  Expr *Step = nullptr;
3931  Expr *NewStep = nullptr;
3932  SmallVector<Expr *, 4> NewSteps;
3933  for (Expr *E : Steps) {
3934  // Skip the same step expression, it was checked already.
3935  if (Step == E || !E) {
3936  NewSteps.push_back(E ? NewStep : nullptr);
3937  continue;
3938  }
3939  Step = E;
3940  if (const auto *DRE = dyn_cast<DeclRefExpr>(Step))
3941  if (const auto *PVD = dyn_cast<ParmVarDecl>(DRE->getDecl())) {
3942  const VarDecl *CanonPVD = PVD->getCanonicalDecl();
3943  if (UniformedArgs.count(CanonPVD) == 0) {
3944  Diag(Step->getExprLoc(), diag::err_omp_expected_uniform_param)
3945  << Step->getSourceRange();
3946  } else if (E->isValueDependent() || E->isTypeDependent() ||
3947  E->isInstantiationDependent() ||
3949  CanonPVD->getType()->hasIntegerRepresentation()) {
3950  NewSteps.push_back(Step);
3951  } else {
3952  Diag(Step->getExprLoc(), diag::err_omp_expected_int_param)
3953  << Step->getSourceRange();
3954  }
3955  continue;
3956  }
3957  NewStep = Step;
3958  if (Step && !Step->isValueDependent() && !Step->isTypeDependent() &&
3959  !Step->isInstantiationDependent() &&
3961  NewStep = PerformOpenMPImplicitIntegerConversion(Step->getExprLoc(), Step)
3962  .get();
3963  if (NewStep)
3964  NewStep = VerifyIntegerConstantExpression(NewStep).get();
3965  }
3966  NewSteps.push_back(NewStep);
3967  }
3968  auto *NewAttr = OMPDeclareSimdDeclAttr::CreateImplicit(
3969  Context, BS, SL.get(), const_cast<Expr **>(Uniforms.data()),
3970  Uniforms.size(), const_cast<Expr **>(Aligneds.data()), Aligneds.size(),
3971  const_cast<Expr **>(NewAligns.data()), NewAligns.size(),
3972  const_cast<Expr **>(Linears.data()), Linears.size(),
3973  const_cast<unsigned *>(LinModifiers.data()), LinModifiers.size(),
3974  NewSteps.data(), NewSteps.size(), SR);
3975  ADecl->addAttr(NewAttr);
3976  return ConvertDeclToDeclGroup(ADecl);
3977 }
3978 
3980  Stmt *AStmt,
3981  SourceLocation StartLoc,
3982  SourceLocation EndLoc) {
3983  if (!AStmt)
3984  return StmtError();
3985 
3986  auto *CS = cast<CapturedStmt>(AStmt);
3987  // 1.2.2 OpenMP Language Terminology
3988  // Structured block - An executable statement with a single entry at the
3989  // top and a single exit at the bottom.
3990  // The point of exit cannot be a branch out of the structured block.
3991  // longjmp() and throw() must not violate the entry/exit criteria.
3992  CS->getCapturedDecl()->setNothrow();
3993 
3994  setFunctionHasBranchProtectedScope();
3995 
3996  return OMPParallelDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt,
3997  DSAStack->isCancelRegion());
3998 }
3999 
4000 namespace {
4001 /// Helper class for checking canonical form of the OpenMP loops and
4002 /// extracting iteration space of each loop in the loop nest, that will be used
4003 /// for IR generation.
4004 class OpenMPIterationSpaceChecker {
4005  /// Reference to Sema.
4006  Sema &SemaRef;
4007  /// A location for diagnostics (when there is no some better location).
4008  SourceLocation DefaultLoc;
4009  /// A location for diagnostics (when increment is not compatible).
4010  SourceLocation ConditionLoc;
4011  /// A source location for referring to loop init later.
4012  SourceRange InitSrcRange;
4013  /// A source location for referring to condition later.
4014  SourceRange ConditionSrcRange;
4015  /// A source location for referring to increment later.
4016  SourceRange IncrementSrcRange;
4017  /// Loop variable.
4018  ValueDecl *LCDecl = nullptr;
4019  /// Reference to loop variable.
4020  Expr *LCRef = nullptr;
4021  /// Lower bound (initializer for the var).
4022  Expr *LB = nullptr;
4023  /// Upper bound.
4024  Expr *UB = nullptr;
4025  /// Loop step (increment).
4026  Expr *Step = nullptr;
4027  /// This flag is true when condition is one of:
4028  /// Var < UB
4029  /// Var <= UB
4030  /// UB > Var
4031  /// UB >= Var
4032  /// This will have no value when the condition is !=
4033  llvm::Optional<bool> TestIsLessOp;
4034  /// This flag is true when condition is strict ( < or > ).
4035  bool TestIsStrictOp = false;
4036  /// This flag is true when step is subtracted on each iteration.
4037  bool SubtractStep = false;
4038 
4039 public:
4040  OpenMPIterationSpaceChecker(Sema &SemaRef, SourceLocation DefaultLoc)
4041  : SemaRef(SemaRef), DefaultLoc(DefaultLoc), ConditionLoc(DefaultLoc) {}
4042  /// Check init-expr for canonical loop form and save loop counter
4043  /// variable - #Var and its initialization value - #LB.
4044  bool checkAndSetInit(Stmt *S, bool EmitDiags = true);
4045  /// Check test-expr for canonical form, save upper-bound (#UB), flags
4046  /// for less/greater and for strict/non-strict comparison.
4047  bool checkAndSetCond(Expr *S);
4048  /// Check incr-expr for canonical loop form and return true if it
4049  /// does not conform, otherwise save loop step (#Step).
4050  bool checkAndSetInc(Expr *S);
4051  /// Return the loop counter variable.
4052  ValueDecl *getLoopDecl() const { return LCDecl; }
4053  /// Return the reference expression to loop counter variable.
4054  Expr *getLoopDeclRefExpr() const { return LCRef; }
4055  /// Source range of the loop init.
4056  SourceRange getInitSrcRange() const { return InitSrcRange; }
4057  /// Source range of the loop condition.
4058  SourceRange getConditionSrcRange() const { return ConditionSrcRange; }
4059  /// Source range of the loop increment.
4060  SourceRange getIncrementSrcRange() const { return IncrementSrcRange; }
4061  /// True if the step should be subtracted.
4062  bool shouldSubtractStep() const { return SubtractStep; }
4063  /// True, if the compare operator is strict (<, > or !=).
4064  bool isStrictTestOp() const { return TestIsStrictOp; }
4065  /// Build the expression to calculate the number of iterations.
4066  Expr *buildNumIterations(
4067  Scope *S, const bool LimitedType,
4068  llvm::MapVector<const Expr *, DeclRefExpr *> &Captures) const;
4069  /// Build the precondition expression for the loops.
4070  Expr *
4071  buildPreCond(Scope *S, Expr *Cond,
4072  llvm::MapVector<const Expr *, DeclRefExpr *> &Captures) const;
4073  /// Build reference expression to the counter be used for codegen.
4074  DeclRefExpr *
4075  buildCounterVar(llvm::MapVector<const Expr *, DeclRefExpr *> &Captures,
4076  DSAStackTy &DSA) const;
4077  /// Build reference expression to the private counter be used for
4078  /// codegen.
4079  Expr *buildPrivateCounterVar() const;
4080  /// Build initialization of the counter be used for codegen.
4081  Expr *buildCounterInit() const;
4082  /// Build step of the counter be used for codegen.
4083  Expr *buildCounterStep() const;
4084  /// Build loop data with counter value for depend clauses in ordered
4085  /// directives.
4086  Expr *
4087  buildOrderedLoopData(Scope *S, Expr *Counter,
4088  llvm::MapVector<const Expr *, DeclRefExpr *> &Captures,
4089  SourceLocation Loc, Expr *Inc = nullptr,
4090  OverloadedOperatorKind OOK = OO_Amp);
4091  /// Return true if any expression is dependent.
4092  bool dependent() const;
4093 
4094 private:
4095  /// Check the right-hand side of an assignment in the increment
4096  /// expression.
4097  bool checkAndSetIncRHS(Expr *RHS);
4098  /// Helper to set loop counter variable and its initializer.
4099  bool setLCDeclAndLB(ValueDecl *NewLCDecl, Expr *NewDeclRefExpr, Expr *NewLB);
4100  /// Helper to set upper bound.
4101  bool setUB(Expr *NewUB, llvm::Optional<bool> LessOp, bool StrictOp,
4102  SourceRange SR, SourceLocation SL);
4103  /// Helper to set loop increment.
4104  bool setStep(Expr *NewStep, bool Subtract);
4105 };
4106 
4107 bool OpenMPIterationSpaceChecker::dependent() const {
4108  if (!LCDecl) {
4109  assert(!LB && !UB && !Step);
4110  return false;
4111  }
4112  return LCDecl->getType()->isDependentType() ||
4113  (LB && LB->isValueDependent()) || (UB && UB->isValueDependent()) ||
4114  (Step && Step->isValueDependent());
4115 }
4116 
4117 bool OpenMPIterationSpaceChecker::setLCDeclAndLB(ValueDecl *NewLCDecl,
4118  Expr *NewLCRefExpr,
4119  Expr *NewLB) {
4120  // State consistency checking to ensure correct usage.
4121  assert(LCDecl == nullptr && LB == nullptr && LCRef == nullptr &&
4122  UB == nullptr && Step == nullptr && !TestIsLessOp && !TestIsStrictOp);
4123  if (!NewLCDecl || !NewLB)
4124  return true;
4125  LCDecl = getCanonicalDecl(NewLCDecl);
4126  LCRef = NewLCRefExpr;
4127  if (auto *CE = dyn_cast_or_null<CXXConstructExpr>(NewLB))
4128  if (const CXXConstructorDecl *Ctor = CE->getConstructor())
4129  if ((Ctor->isCopyOrMoveConstructor() ||
4130  Ctor->isConvertingConstructor(/*AllowExplicit=*/false)) &&
4131  CE->getNumArgs() > 0 && CE->getArg(0) != nullptr)
4132  NewLB = CE->getArg(0)->IgnoreParenImpCasts();
4133  LB = NewLB;
4134  return false;
4135 }
4136 
4137 bool OpenMPIterationSpaceChecker::setUB(Expr *NewUB,
4138  llvm::Optional<bool> LessOp,
4139  bool StrictOp, SourceRange SR,
4140  SourceLocation SL) {
4141  // State consistency checking to ensure correct usage.
4142  assert(LCDecl != nullptr && LB != nullptr && UB == nullptr &&
4143  Step == nullptr && !TestIsLessOp && !TestIsStrictOp);
4144  if (!NewUB)
4145  return true;
4146  UB = NewUB;
4147  if (LessOp)
4148  TestIsLessOp = LessOp;
4149  TestIsStrictOp = StrictOp;
4150  ConditionSrcRange = SR;
4151  ConditionLoc = SL;
4152  return false;
4153 }
4154 
4155 bool OpenMPIterationSpaceChecker::setStep(Expr *NewStep, bool Subtract) {
4156  // State consistency checking to ensure correct usage.
4157  assert(LCDecl != nullptr && LB != nullptr && Step == nullptr);
4158  if (!NewStep)
4159  return true;
4160  if (!NewStep->isValueDependent()) {
4161  // Check that the step is integer expression.
4162  SourceLocation StepLoc = NewStep->getBeginLoc();
4164  StepLoc, getExprAsWritten(NewStep));
4165  if (Val.isInvalid())
4166  return true;
4167  NewStep = Val.get();
4168 
4169  // OpenMP [2.6, Canonical Loop Form, Restrictions]
4170  // If test-expr is of form var relational-op b and relational-op is < or
4171  // <= then incr-expr must cause var to increase on each iteration of the
4172  // loop. If test-expr is of form var relational-op b and relational-op is
4173  // > or >= then incr-expr must cause var to decrease on each iteration of
4174  // the loop.
4175  // If test-expr is of form b relational-op var and relational-op is < or
4176  // <= then incr-expr must cause var to decrease on each iteration of the
4177  // loop. If test-expr is of form b relational-op var and relational-op is
4178  // > or >= then incr-expr must cause var to increase on each iteration of
4179  // the loop.
4180  llvm::APSInt Result;
4181  bool IsConstant = NewStep->isIntegerConstantExpr(Result, SemaRef.Context);
4182  bool IsUnsigned = !NewStep->getType()->hasSignedIntegerRepresentation();
4183  bool IsConstNeg =
4184  IsConstant && Result.isSigned() && (Subtract != Result.isNegative());
4185  bool IsConstPos =
4186  IsConstant && Result.isSigned() && (Subtract == Result.isNegative());
4187  bool IsConstZero = IsConstant && !Result.getBoolValue();
4188 
4189  // != with increment is treated as <; != with decrement is treated as >
4190  if (!TestIsLessOp.hasValue())
4191  TestIsLessOp = IsConstPos || (IsUnsigned && !Subtract);
4192  if (UB && (IsConstZero ||
4193  (TestIsLessOp.getValue() ?
4194  (IsConstNeg || (IsUnsigned && Subtract)) :
4195  (IsConstPos || (IsUnsigned && !Subtract))))) {
4196  SemaRef.Diag(NewStep->getExprLoc(),
4197  diag::err_omp_loop_incr_not_compatible)
4198  << LCDecl << TestIsLessOp.getValue() << NewStep->getSourceRange();
4199  SemaRef.Diag(ConditionLoc,
4200  diag::note_omp_loop_cond_requres_compatible_incr)
4201  << TestIsLessOp.getValue() << ConditionSrcRange;
4202  return true;
4203  }
4204  if (TestIsLessOp.getValue() == Subtract) {
4205  NewStep =
4206  SemaRef.CreateBuiltinUnaryOp(NewStep->getExprLoc(), UO_Minus, NewStep)
4207  .get();
4208  Subtract = !Subtract;
4209  }
4210  }
4211 
4212  Step = NewStep;
4213  SubtractStep = Subtract;
4214  return false;
4215 }
4216 
4217 bool OpenMPIterationSpaceChecker::checkAndSetInit(Stmt *S, bool EmitDiags) {
4218  // Check init-expr for canonical loop form and save loop counter
4219  // variable - #Var and its initialization value - #LB.
4220  // OpenMP [2.6] Canonical loop form. init-expr may be one of the following:
4221  // var = lb
4222  // integer-type var = lb
4223  // random-access-iterator-type var = lb
4224  // pointer-type var = lb
4225  //
4226  if (!S) {
4227  if (EmitDiags) {
4228  SemaRef.Diag(DefaultLoc, diag::err_omp_loop_not_canonical_init);
4229  }
4230  return true;
4231  }
4232  if (auto *ExprTemp = dyn_cast<ExprWithCleanups>(S))
4233  if (!ExprTemp->cleanupsHaveSideEffects())
4234  S = ExprTemp->getSubExpr();
4235 
4236  InitSrcRange = S->getSourceRange();
4237  if (Expr *E = dyn_cast<Expr>(S))
4238  S = E->IgnoreParens();
4239  if (auto *BO = dyn_cast<BinaryOperator>(S)) {
4240  if (BO->getOpcode() == BO_Assign) {
4241  Expr *LHS = BO->getLHS()->IgnoreParens();
4242  if (auto *DRE = dyn_cast<DeclRefExpr>(LHS)) {
4243  if (auto *CED = dyn_cast<OMPCapturedExprDecl>(DRE->getDecl()))
4244  if (auto *ME = dyn_cast<MemberExpr>(getExprAsWritten(CED->getInit())))
4245  return setLCDeclAndLB(ME->getMemberDecl(), ME, BO->getRHS());
4246  return setLCDeclAndLB(DRE->getDecl(), DRE, BO->getRHS());
4247  }
4248  if (auto *ME = dyn_cast<MemberExpr>(LHS)) {
4249  if (ME->isArrow() &&
4250  isa<CXXThisExpr>(ME->getBase()->IgnoreParenImpCasts()))
4251  return setLCDeclAndLB(ME->getMemberDecl(), ME, BO->getRHS());
4252  }
4253  }
4254  } else if (auto *DS = dyn_cast<DeclStmt>(S)) {
4255  if (DS->isSingleDecl()) {
4256  if (auto *Var = dyn_cast_or_null<VarDecl>(DS->getSingleDecl())) {
4257  if (Var->hasInit() && !Var->getType()->isReferenceType()) {
4258  // Accept non-canonical init form here but emit ext. warning.
4259  if (Var->getInitStyle() != VarDecl::CInit && EmitDiags)
4260  SemaRef.Diag(S->getBeginLoc(),
4261  diag::ext_omp_loop_not_canonical_init)
4262  << S->getSourceRange();
4263  return setLCDeclAndLB(
4264  Var,
4265  buildDeclRefExpr(SemaRef, Var,
4266  Var->getType().getNonReferenceType(),
4267  DS->getBeginLoc()),
4268  Var->getInit());
4269  }
4270  }
4271  }
4272  } else if (auto *CE = dyn_cast<CXXOperatorCallExpr>(S)) {
4273  if (CE->getOperator() == OO_Equal) {
4274  Expr *LHS = CE->getArg(0);
4275  if (auto *DRE = dyn_cast<DeclRefExpr>(LHS)) {
4276  if (auto *CED = dyn_cast<OMPCapturedExprDecl>(DRE->getDecl()))
4277  if (auto *ME = dyn_cast<MemberExpr>(getExprAsWritten(CED->getInit())))
4278  return setLCDeclAndLB(ME->getMemberDecl(), ME, BO->getRHS());
4279  return setLCDeclAndLB(DRE->getDecl(), DRE, CE->getArg(1));
4280  }
4281  if (auto *ME = dyn_cast<MemberExpr>(LHS)) {
4282  if (ME->isArrow() &&
4283  isa<CXXThisExpr>(ME->getBase()->IgnoreParenImpCasts()))
4284  return setLCDeclAndLB(ME->getMemberDecl(), ME, BO->getRHS());
4285  }
4286  }
4287  }
4288 
4289  if (dependent() || SemaRef.CurContext->isDependentContext())
4290  return false;
4291  if (EmitDiags) {
4292  SemaRef.Diag(S->getBeginLoc(), diag::err_omp_loop_not_canonical_init)
4293  << S->getSourceRange();
4294  }
4295  return true;
4296 }
4297 
4298 /// Ignore parenthesizes, implicit casts, copy constructor and return the
4299 /// variable (which may be the loop variable) if possible.
4300 static const ValueDecl *getInitLCDecl(const Expr *E) {
4301  if (!E)
4302  return nullptr;
4303  E = getExprAsWritten(E);
4304  if (const auto *CE = dyn_cast_or_null<CXXConstructExpr>(E))
4305  if (const CXXConstructorDecl *Ctor = CE->getConstructor())
4306  if ((Ctor->isCopyOrMoveConstructor() ||
4307  Ctor->isConvertingConstructor(/*AllowExplicit=*/false)) &&
4308  CE->getNumArgs() > 0 && CE->getArg(0) != nullptr)
4309  E = CE->getArg(0)->IgnoreParenImpCasts();
4310  if (const auto *DRE = dyn_cast_or_null<DeclRefExpr>(E)) {
4311  if (const auto *VD = dyn_cast<VarDecl>(DRE->getDecl()))
4312  return getCanonicalDecl(VD);
4313  }
4314  if (const auto *ME = dyn_cast_or_null<MemberExpr>(E))
4315  if (ME->isArrow() && isa<CXXThisExpr>(ME->getBase()->IgnoreParenImpCasts()))
4316  return getCanonicalDecl(ME->getMemberDecl());
4317  return nullptr;
4318 }
4319 
4320 bool OpenMPIterationSpaceChecker::checkAndSetCond(Expr *S) {
4321  // Check test-expr for canonical form, save upper-bound UB, flags for
4322  // less/greater and for strict/non-strict comparison.
4323  // OpenMP [2.6] Canonical loop form. Test-expr may be one of the following:
4324  // var relational-op b
4325  // b relational-op var
4326  //
4327  if (!S) {
4328  SemaRef.Diag(DefaultLoc, diag::err_omp_loop_not_canonical_cond) << LCDecl;
4329  return true;
4330  }
4331  S = getExprAsWritten(S);
4332  SourceLocation CondLoc = S->getBeginLoc();
4333  if (auto *BO = dyn_cast<BinaryOperator>(S)) {
4334  if (BO->isRelationalOp()) {
4335  if (getInitLCDecl(BO->getLHS()) == LCDecl)
4336  return setUB(BO->getRHS(),
4337  (BO->getOpcode() == BO_LT || BO->getOpcode() == BO_LE),
4338  (BO->getOpcode() == BO_LT || BO->getOpcode() == BO_GT),
4339  BO->getSourceRange(), BO->getOperatorLoc());
4340  if (getInitLCDecl(BO->getRHS()) == LCDecl)
4341  return setUB(BO->getLHS(),
4342  (BO->getOpcode() == BO_GT || BO->getOpcode() == BO_GE),
4343  (BO->getOpcode() == BO_LT || BO->getOpcode() == BO_GT),
4344  BO->getSourceRange(), BO->getOperatorLoc());
4345  } else if (BO->getOpcode() == BO_NE)
4346  return setUB(getInitLCDecl(BO->getLHS()) == LCDecl ?
4347  BO->getRHS() : BO->getLHS(),
4348  /*LessOp=*/llvm::None,
4349  /*StrictOp=*/true,
4350  BO->getSourceRange(), BO->getOperatorLoc());
4351  } else if (auto *CE = dyn_cast<CXXOperatorCallExpr>(S)) {
4352  if (CE->getNumArgs() == 2) {
4353  auto Op = CE->getOperator();
4354  switch (Op) {
4355  case OO_Greater:
4356  case OO_GreaterEqual:
4357  case OO_Less:
4358  case OO_LessEqual:
4359  if (getInitLCDecl(CE->getArg(0)) == LCDecl)
4360  return setUB(CE->getArg(1), Op == OO_Less || Op == OO_LessEqual,
4361  Op == OO_Less || Op == OO_Greater, CE->getSourceRange(),
4362  CE->getOperatorLoc());
4363  if (getInitLCDecl(CE->getArg(1)) == LCDecl)
4364  return setUB(CE->getArg(0), Op == OO_Greater || Op == OO_GreaterEqual,
4365  Op == OO_Less || Op == OO_Greater, CE->getSourceRange(),
4366  CE->getOperatorLoc());
4367  break;
4368  case OO_ExclaimEqual:
4369  return setUB(getInitLCDecl(CE->getArg(0)) == LCDecl ?
4370  CE->getArg(1) : CE->getArg(0),
4371  /*LessOp=*/llvm::None,
4372  /*StrictOp=*/true,
4373  CE->getSourceRange(),
4374  CE->getOperatorLoc());
4375  break;
4376  default:
4377  break;
4378  }
4379  }
4380  }
4381  if (dependent() || SemaRef.CurContext->isDependentContext())
4382  return false;
4383  SemaRef.Diag(CondLoc, diag::err_omp_loop_not_canonical_cond)
4384  << S->getSourceRange() << LCDecl;
4385  return true;
4386 }
4387 
4388 bool OpenMPIterationSpaceChecker::checkAndSetIncRHS(Expr *RHS) {
4389  // RHS of canonical loop form increment can be:
4390  // var + incr
4391  // incr + var
4392  // var - incr
4393  //
4394  RHS = RHS->IgnoreParenImpCasts();
4395  if (auto *BO = dyn_cast<BinaryOperator>(RHS)) {
4396  if (BO->isAdditiveOp()) {
4397  bool IsAdd = BO->getOpcode() == BO_Add;
4398  if (getInitLCDecl(BO->getLHS()) == LCDecl)
4399  return setStep(BO->getRHS(), !IsAdd);
4400  if (IsAdd && getInitLCDecl(BO->getRHS()) == LCDecl)
4401  return setStep(BO->getLHS(), /*Subtract=*/false);
4402  }
4403  } else if (auto *CE = dyn_cast<CXXOperatorCallExpr>(RHS)) {
4404  bool IsAdd = CE->getOperator() == OO_Plus;
4405  if ((IsAdd || CE->getOperator() == OO_Minus) && CE->getNumArgs() == 2) {
4406  if (getInitLCDecl(CE->getArg(0)) == LCDecl)
4407  return setStep(CE->getArg(1), !IsAdd);
4408  if (IsAdd && getInitLCDecl(CE->getArg(1)) == LCDecl)
4409  return setStep(CE->getArg(0), /*Subtract=*/false);
4410  }
4411  }
4412  if (dependent() || SemaRef.CurContext->isDependentContext())
4413  return false;
4414  SemaRef.Diag(RHS->getBeginLoc(), diag::err_omp_loop_not_canonical_incr)
4415  << RHS->getSourceRange() << LCDecl;
4416  return true;
4417 }
4418 
4419 bool OpenMPIterationSpaceChecker::checkAndSetInc(Expr *S) {
4420  // Check incr-expr for canonical loop form and return true if it
4421  // does not conform.
4422  // OpenMP [2.6] Canonical loop form. Test-expr may be one of the following:
4423  // ++var
4424  // var++
4425  // --var
4426  // var--
4427  // var += incr
4428  // var -= incr
4429  // var = var + incr
4430  // var = incr + var
4431  // var = var - incr
4432  //
4433  if (!S) {
4434  SemaRef.Diag(DefaultLoc, diag::err_omp_loop_not_canonical_incr) << LCDecl;
4435  return true;
4436  }
4437  if (auto *ExprTemp = dyn_cast<ExprWithCleanups>(S))
4438  if (!ExprTemp->cleanupsHaveSideEffects())
4439  S = ExprTemp->getSubExpr();
4440 
4441  IncrementSrcRange = S->getSourceRange();
4442  S = S->IgnoreParens();
4443  if (auto *UO = dyn_cast<UnaryOperator>(S)) {
4444  if (UO->isIncrementDecrementOp() &&
4445  getInitLCDecl(UO->getSubExpr()) == LCDecl)
4446  return setStep(SemaRef
4447  .ActOnIntegerConstant(UO->getBeginLoc(),
4448  (UO->isDecrementOp() ? -1 : 1))
4449  .get(),
4450  /*Subtract=*/false);
4451  } else if (auto *BO = dyn_cast<BinaryOperator>(S)) {
4452  switch (BO->getOpcode()) {
4453  case BO_AddAssign:
4454  case BO_SubAssign:
4455  if (getInitLCDecl(BO->getLHS()) == LCDecl)
4456  return setStep(BO->getRHS(), BO->getOpcode() == BO_SubAssign);
4457  break;
4458  case BO_Assign:
4459  if (getInitLCDecl(BO->getLHS()) == LCDecl)
4460  return checkAndSetIncRHS(BO->getRHS());
4461  break;
4462  default:
4463  break;
4464  }
4465  } else if (auto *CE = dyn_cast<CXXOperatorCallExpr>(S)) {
4466  switch (CE->getOperator()) {
4467  case OO_PlusPlus:
4468  case OO_MinusMinus:
4469  if (getInitLCDecl(CE->getArg(0)) == LCDecl)
4470  return setStep(SemaRef
4471  .ActOnIntegerConstant(
4472  CE->getBeginLoc(),
4473  ((CE->getOperator() == OO_MinusMinus) ? -1 : 1))
4474  .get(),
4475  /*Subtract=*/false);
4476  break;
4477  case OO_PlusEqual:
4478  case OO_MinusEqual:
4479  if (getInitLCDecl(CE->getArg(0)) == LCDecl)
4480  return setStep(CE->getArg(1), CE->getOperator() == OO_MinusEqual);
4481  break;
4482  case OO_Equal:
4483  if (getInitLCDecl(CE->getArg(0)) == LCDecl)
4484  return checkAndSetIncRHS(CE->getArg(1));
4485  break;
4486  default:
4487  break;
4488  }
4489  }
4490  if (dependent() || SemaRef.CurContext->isDependentContext())
4491  return false;
4492  SemaRef.Diag(S->getBeginLoc(), diag::err_omp_loop_not_canonical_incr)
4493  << S->getSourceRange() << LCDecl;
4494  return true;
4495 }
4496 
4497 static ExprResult
4498 tryBuildCapture(Sema &SemaRef, Expr *Capture,
4499  llvm::MapVector<const Expr *, DeclRefExpr *> &Captures) {
4500  if (SemaRef.CurContext->isDependentContext())
4501  return ExprResult(Capture);
4502  if (Capture->isEvaluatable(SemaRef.Context, Expr::SE_AllowSideEffects))
4503  return SemaRef.PerformImplicitConversion(
4504  Capture->IgnoreImpCasts(), Capture->getType(), Sema::AA_Converting,
4505  /*AllowExplicit=*/true);
4506  auto I = Captures.find(Capture);
4507  if (I != Captures.end())
4508  return buildCapture(SemaRef, Capture, I->second);
4509  DeclRefExpr *Ref = nullptr;
4510  ExprResult Res = buildCapture(SemaRef, Capture, Ref);
4511  Captures[Capture] = Ref;
4512  return Res;
4513 }
4514 
4515 /// Build the expression to calculate the number of iterations.
4516 Expr *OpenMPIterationSpaceChecker::buildNumIterations(
4517  Scope *S, const bool LimitedType,
4518  llvm::MapVector<const Expr *, DeclRefExpr *> &Captures) const {
4519  ExprResult Diff;
4520  QualType VarType = LCDecl->getType().getNonReferenceType();
4521  if (VarType->isIntegerType() || VarType->isPointerType() ||
4522  SemaRef.getLangOpts().CPlusPlus) {
4523  // Upper - Lower
4524  Expr *UBExpr = TestIsLessOp.getValue() ? UB : LB;
4525  Expr *LBExpr = TestIsLessOp.getValue() ? LB : UB;
4526  Expr *Upper = tryBuildCapture(SemaRef, UBExpr, Captures).get();
4527  Expr *Lower = tryBuildCapture(SemaRef, LBExpr, Captures).get();
4528  if (!Upper || !Lower)
4529  return nullptr;
4530 
4531  Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Sub, Upper, Lower);
4532 
4533  if (!Diff.isUsable() && VarType->getAsCXXRecordDecl()) {
4534  // BuildBinOp already emitted error, this one is to point user to upper
4535  // and lower bound, and to tell what is passed to 'operator-'.
4536  SemaRef.Diag(Upper->getBeginLoc(), diag::err_omp_loop_diff_cxx)
4537  << Upper->getSourceRange() << Lower->getSourceRange();
4538  return nullptr;
4539  }
4540  }
4541 
4542  if (!Diff.isUsable())
4543  return nullptr;
4544 
4545  // Upper - Lower [- 1]
4546  if (TestIsStrictOp)
4547  Diff = SemaRef.BuildBinOp(
4548  S, DefaultLoc, BO_Sub, Diff.get(),
4549  SemaRef.ActOnIntegerConstant(SourceLocation(), 1).get());
4550  if (!Diff.isUsable())
4551  return nullptr;
4552 
4553  // Upper - Lower [- 1] + Step
4554  ExprResult NewStep = tryBuildCapture(SemaRef, Step, Captures);
4555  if (!NewStep.isUsable())
4556  return nullptr;
4557  Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Add, Diff.get(), NewStep.get());
4558  if (!Diff.isUsable())
4559  return nullptr;
4560 
4561  // Parentheses (for dumping/debugging purposes only).
4562  Diff = SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, Diff.get());
4563  if (!Diff.isUsable())
4564  return nullptr;
4565 
4566  // (Upper - Lower [- 1] + Step) / Step
4567  Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Div, Diff.get(), NewStep.get());
4568  if (!Diff.isUsable())
4569  return nullptr;
4570 
4571  // OpenMP runtime requires 32-bit or 64-bit loop variables.
4572  QualType Type = Diff.get()->getType();
4573  ASTContext &C = SemaRef.Context;
4574  bool UseVarType = VarType->hasIntegerRepresentation() &&
4575  C.getTypeSize(Type) > C.getTypeSize(VarType);
4576  if (!Type->isIntegerType() || UseVarType) {
4577  unsigned NewSize =
4578  UseVarType ? C.getTypeSize(VarType) : C.getTypeSize(Type);
4579  bool IsSigned = UseVarType ? VarType->hasSignedIntegerRepresentation()
4581  Type = C.getIntTypeForBitwidth(NewSize, IsSigned);
4582  if (!SemaRef.Context.hasSameType(Diff.get()->getType(), Type)) {
4583  Diff = SemaRef.PerformImplicitConversion(
4584  Diff.get(), Type, Sema::AA_Converting, /*AllowExplicit=*/true);
4585  if (!Diff.isUsable())
4586  return nullptr;
4587  }
4588  }
4589  if (LimitedType) {
4590  unsigned NewSize = (C.getTypeSize(Type) > 32) ? 64 : 32;
4591  if (NewSize != C.getTypeSize(Type)) {
4592  if (NewSize < C.getTypeSize(Type)) {
4593  assert(NewSize == 64 && "incorrect loop var size");
4594  SemaRef.Diag(DefaultLoc, diag::warn_omp_loop_64_bit_var)
4595  << InitSrcRange << ConditionSrcRange;
4596  }
4597  QualType NewType = C.getIntTypeForBitwidth(
4598  NewSize, Type->hasSignedIntegerRepresentation() ||
4599  C.getTypeSize(Type) < NewSize);
4600  if (!SemaRef.Context.hasSameType(Diff.get()->getType(), NewType)) {
4601  Diff = SemaRef.PerformImplicitConversion(Diff.get(), NewType,
4602  Sema::AA_Converting, true);
4603  if (!Diff.isUsable())
4604  return nullptr;
4605  }
4606  }
4607  }
4608 
4609  return Diff.get();
4610 }
4611 
4612 Expr *OpenMPIterationSpaceChecker::buildPreCond(
4613  Scope *S, Expr *Cond,
4614  llvm::MapVector<const Expr *, DeclRefExpr *> &Captures) const {
4615  // Try to build LB <op> UB, where <op> is <, >, <=, or >=.
4616  bool Suppress = SemaRef.getDiagnostics().getSuppressAllDiagnostics();
4617  SemaRef.getDiagnostics().setSuppressAllDiagnostics(/*Val=*/true);
4618 
4619  ExprResult NewLB = tryBuildCapture(SemaRef, LB, Captures);
4620  ExprResult NewUB = tryBuildCapture(SemaRef, UB, Captures);
4621  if (!NewLB.isUsable() || !NewUB.isUsable())
4622  return nullptr;
4623 
4624  ExprResult CondExpr =
4625  SemaRef.BuildBinOp(S, DefaultLoc,
4626  TestIsLessOp.getValue() ?
4627  (TestIsStrictOp ? BO_LT : BO_LE) :
4628  (TestIsStrictOp ? BO_GT : BO_GE),
4629  NewLB.get(), NewUB.get());
4630  if (CondExpr.isUsable()) {
4631  if (!SemaRef.Context.hasSameUnqualifiedType(CondExpr.get()->getType(),
4632  SemaRef.Context.BoolTy))
4633  CondExpr = SemaRef.PerformImplicitConversion(
4634  CondExpr.get(), SemaRef.Context.BoolTy, /*Action=*/Sema::AA_Casting,
4635  /*AllowExplicit=*/true);
4636  }
4637  SemaRef.getDiagnostics().setSuppressAllDiagnostics(Suppress);
4638  // Otherwise use original loop condition and evaluate it in runtime.
4639  return CondExpr.isUsable() ? CondExpr.get() : Cond;
4640 }
4641 
4642 /// Build reference expression to the counter be used for codegen.
4643 DeclRefExpr *OpenMPIterationSpaceChecker::buildCounterVar(
4644  llvm::MapVector<const Expr *, DeclRefExpr *> &Captures,
4645  DSAStackTy &DSA) const {
4646  auto *VD = dyn_cast<VarDecl>(LCDecl);
4647  if (!VD) {
4648  VD = SemaRef.isOpenMPCapturedDecl(LCDecl);
4650  SemaRef, VD, VD->getType().getNonReferenceType(), DefaultLoc);
4651  const DSAStackTy::DSAVarData Data =
4652  DSA.getTopDSA(LCDecl, /*FromParent=*/false);
4653  // If the loop control decl is explicitly marked as private, do not mark it
4654  // as captured again.
4655  if (!isOpenMPPrivate(Data.CKind) || !Data.RefExpr)
4656  Captures.insert(std::make_pair(LCRef, Ref));
4657  return Ref;
4658  }
4659  return buildDeclRefExpr(SemaRef, VD, VD->getType().getNonReferenceType(),
4660  DefaultLoc);
4661 }
4662 
4663 Expr *OpenMPIterationSpaceChecker::buildPrivateCounterVar() const {
4664  if (LCDecl && !LCDecl->isInvalidDecl()) {
4665  QualType Type = LCDecl->getType().getNonReferenceType();
4666  VarDecl *PrivateVar = buildVarDecl(
4667  SemaRef, DefaultLoc, Type, LCDecl->getName(),
4668  LCDecl->hasAttrs() ? &LCDecl->getAttrs() : nullptr,
4669  isa<VarDecl>(LCDecl)
4670  ? buildDeclRefExpr(SemaRef, cast<VarDecl>(LCDecl), Type, DefaultLoc)
4671  : nullptr);
4672  if (PrivateVar->isInvalidDecl())
4673  return nullptr;
4674  return buildDeclRefExpr(SemaRef, PrivateVar, Type, DefaultLoc);
4675  }
4676  return nullptr;
4677 }
4678 
4679 /// Build initialization of the counter to be used for codegen.
4681 
4682 /// Build step of the counter be used for codegen.
4683 Expr *OpenMPIterationSpaceChecker::buildCounterStep() const { return Step; }
4684 
4685 Expr *OpenMPIterationSpaceChecker::buildOrderedLoopData(
4686  Scope *S, Expr *Counter,
4687  llvm::MapVector<const Expr *, DeclRefExpr *> &Captures, SourceLocation Loc,
4688  Expr *Inc, OverloadedOperatorKind OOK) {
4689  Expr *Cnt = SemaRef.DefaultLvalueConversion(Counter).get();
4690  if (!Cnt)
4691  return nullptr;
4692  if (Inc) {
4693  assert((OOK == OO_Plus || OOK == OO_Minus) &&
4694  "Expected only + or - operations for depend clauses.");
4695  BinaryOperatorKind BOK = (OOK == OO_Plus) ? BO_Add : BO_Sub;
4696  Cnt = SemaRef.BuildBinOp(S, Loc, BOK, Cnt, Inc).get();
4697  if (!Cnt)
4698  return nullptr;
4699  }
4700  ExprResult Diff;
4701  QualType VarType = LCDecl->getType().getNonReferenceType();
4702  if (VarType->isIntegerType() || VarType->isPointerType() ||
4703  SemaRef.getLangOpts().CPlusPlus) {
4704  // Upper - Lower
4705  Expr *Upper = TestIsLessOp.getValue()
4706  ? Cnt
4707  : tryBuildCapture(SemaRef, UB, Captures).get();
4708  Expr *Lower = TestIsLessOp.getValue()
4709  ? tryBuildCapture(SemaRef, LB, Captures).get()
4710  : Cnt;
4711  if (!Upper || !Lower)
4712  return nullptr;
4713 
4714  Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Sub, Upper, Lower);
4715 
4716  if (!Diff.isUsable() && VarType->getAsCXXRecordDecl()) {
4717  // BuildBinOp already emitted error, this one is to point user to upper
4718  // and lower bound, and to tell what is passed to 'operator-'.
4719  SemaRef.Diag(Upper->getBeginLoc(), diag::err_omp_loop_diff_cxx)
4720  << Upper->getSourceRange() << Lower->getSourceRange();
4721  return nullptr;
4722  }
4723  }
4724 
4725  if (!Diff.isUsable())
4726  return nullptr;
4727 
4728  // Parentheses (for dumping/debugging purposes only).
4729  Diff = SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, Diff.get());
4730  if (!Diff.isUsable())
4731  return nullptr;
4732 
4733  ExprResult NewStep = tryBuildCapture(SemaRef, Step, Captures);
4734  if (!NewStep.isUsable())
4735  return nullptr;
4736  // (Upper - Lower) / Step
4737  Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Div, Diff.get(), NewStep.get());
4738  if (!Diff.isUsable())
4739  return nullptr;
4740 
4741  return Diff.get();
4742 }
4743 
4744 /// Iteration space of a single for loop.
4745 struct LoopIterationSpace final {
4746  /// True if the condition operator is the strict compare operator (<, > or
4747  /// !=).
4748  bool IsStrictCompare = false;
4749  /// Condition of the loop.
4750  Expr *PreCond = nullptr;
4751  /// This expression calculates the number of iterations in the loop.
4752  /// It is always possible to calculate it before starting the loop.
4753  Expr *NumIterations = nullptr;
4754  /// The loop counter variable.
4755  Expr *CounterVar = nullptr;
4756  /// Private loop counter variable.
4757  Expr *PrivateCounterVar = nullptr;
4758  /// This is initializer for the initial value of #CounterVar.
4759  Expr *CounterInit = nullptr;
4760  /// This is step for the #CounterVar used to generate its update:
4761  /// #CounterVar = #CounterInit + #CounterStep * CurrentIteration.
4762  Expr *CounterStep = nullptr;
4763  /// Should step be subtracted?
4764  bool Subtract = false;
4765  /// Source range of the loop init.
4766  SourceRange InitSrcRange;
4767  /// Source range of the loop condition.
4768  SourceRange CondSrcRange;
4769  /// Source range of the loop increment.
4770  SourceRange IncSrcRange;
4771 };
4772 
4773 } // namespace
4774 
4776  assert(getLangOpts().OpenMP && "OpenMP is not active.");
4777  assert(Init && "Expected loop in canonical form.");
4778  unsigned AssociatedLoops = DSAStack->getAssociatedLoops();
4779  if (AssociatedLoops > 0 &&
4780  isOpenMPLoopDirective(DSAStack->getCurrentDirective())) {
4781  DSAStack->loopStart();
4782  OpenMPIterationSpaceChecker ISC(*this, ForLoc);
4783  if (!ISC.checkAndSetInit(Init, /*EmitDiags=*/false)) {
4784  if (ValueDecl *D = ISC.getLoopDecl()) {
4785  auto *VD = dyn_cast<VarDecl>(D);
4786  if (!VD) {
4787  if (VarDecl *Private = isOpenMPCapturedDecl(D)) {
4788  VD = Private;
4789  } else {
4790  DeclRefExpr *Ref = buildCapture(*this, D, ISC.getLoopDeclRefExpr(),
4791  /*WithInit=*/false);
4792  VD = cast<VarDecl>(Ref->getDecl());
4793  }
4794  }
4795  DSAStack->addLoopControlVariable(D, VD);
4796  const Decl *LD = DSAStack->getPossiblyLoopCunter();
4797  if (LD != D->getCanonicalDecl()) {
4798  DSAStack->resetPossibleLoopCounter();
4799  if (auto *Var = dyn_cast_or_null<VarDecl>(LD))
4800  MarkDeclarationsReferencedInExpr(
4801  buildDeclRefExpr(*this, const_cast<VarDecl *>(Var),
4802  Var->getType().getNonLValueExprType(Context),
4803  ForLoc, /*RefersToCapture=*/true));
4804  }
4805  }
4806  }
4807  DSAStack->setAssociatedLoops(AssociatedLoops - 1);
4808  }
4809 }
4810 
4811 /// Called on a for stmt to check and extract its iteration space
4812 /// for further processing (such as collapsing).
4814  OpenMPDirectiveKind DKind, Stmt *S, Sema &SemaRef, DSAStackTy &DSA,
4815  unsigned CurrentNestedLoopCount, unsigned NestedLoopCount,
4816  unsigned TotalNestedLoopCount, Expr *CollapseLoopCountExpr,
4817  Expr *OrderedLoopCountExpr,
4818  Sema::VarsWithInheritedDSAType &VarsWithImplicitDSA,
4819  LoopIterationSpace &ResultIterSpace,
4820  llvm::MapVector<const Expr *, DeclRefExpr *> &Captures) {
4821  // OpenMP [2.6, Canonical Loop Form]
4822  // for (init-expr; test-expr; incr-expr) structured-block
4823  auto *For = dyn_cast_or_null<ForStmt>(S);
4824  if (!For) {
4825  SemaRef.Diag(S->getBeginLoc(), diag::err_omp_not_for)
4826  << (CollapseLoopCountExpr != nullptr || OrderedLoopCountExpr != nullptr)
4827  << getOpenMPDirectiveName(DKind) << TotalNestedLoopCount
4828  << (CurrentNestedLoopCount > 0) << CurrentNestedLoopCount;
4829  if (TotalNestedLoopCount > 1) {
4830  if (CollapseLoopCountExpr && OrderedLoopCountExpr)
4831  SemaRef.Diag(DSA.getConstructLoc(),
4832  diag::note_omp_collapse_ordered_expr)
4833  << 2 << CollapseLoopCountExpr->getSourceRange()
4834  << OrderedLoopCountExpr->getSourceRange();
4835  else if (CollapseLoopCountExpr)
4836  SemaRef.Diag(CollapseLoopCountExpr->getExprLoc(),
4837  diag::note_omp_collapse_ordered_expr)
4838  << 0 << CollapseLoopCountExpr->getSourceRange();
4839  else
4840  SemaRef.Diag(OrderedLoopCountExpr->getExprLoc(),
4841  diag::note_omp_collapse_ordered_expr)
4842  << 1 << OrderedLoopCountExpr->getSourceRange();
4843  }
4844  return true;
4845  }
4846  assert(For->getBody());
4847 
4848  OpenMPIterationSpaceChecker ISC(SemaRef, For->getForLoc());
4849 
4850  // Check init.
4851  Stmt *Init = For->getInit();
4852  if (ISC.checkAndSetInit(Init))
4853  return true;
4854 
4855  bool HasErrors = false;
4856 
4857  // Check loop variable's type.
4858  if (ValueDecl *LCDecl = ISC.getLoopDecl()) {
4859  Expr *LoopDeclRefExpr = ISC.getLoopDeclRefExpr();
4860 
4861  // OpenMP [2.6, Canonical Loop Form]
4862  // Var is one of the following:
4863  // A variable of signed or unsigned integer type.
4864  // For C++, a variable of a random access iterator type.
4865  // For C, a variable of a pointer type.
4866  QualType VarType = LCDecl->getType().getNonReferenceType();
4867  if (!VarType->isDependentType() && !VarType->isIntegerType() &&
4868  !VarType->isPointerType() &&
4869  !(SemaRef.getLangOpts().CPlusPlus && VarType->isOverloadableType())) {
4870  SemaRef.Diag(Init->getBeginLoc(), diag::err_omp_loop_variable_type)
4871  << SemaRef.getLangOpts().CPlusPlus;
4872  HasErrors = true;
4873  }
4874 
4875  // OpenMP, 2.14.1.1 Data-sharing Attribute Rules for Variables Referenced in
4876  // a Construct
4877  // The loop iteration variable(s) in the associated for-loop(s) of a for or
4878  // parallel for construct is (are) private.
4879  // The loop iteration variable in the associated for-loop of a simd
4880  // construct with just one associated for-loop is linear with a
4881  // constant-linear-step that is the increment of the associated for-loop.
4882  // Exclude loop var from the list of variables with implicitly defined data
4883  // sharing attributes.
4884  VarsWithImplicitDSA.erase(LCDecl);
4885 
4886  // OpenMP [2.14.1.1, Data-sharing Attribute Rules for Variables Referenced
4887  // in a Construct, C/C++].
4888  // The loop iteration variable in the associated for-loop of a simd
4889  // construct with just one associated for-loop may be listed in a linear
4890  // clause with a constant-linear-step that is the increment of the
4891  // associated for-loop.
4892  // The loop iteration variable(s) in the associated for-loop(s) of a for or
4893  // parallel for construct may be listed in a private or lastprivate clause.
4894  DSAStackTy::DSAVarData DVar = DSA.getTopDSA(LCDecl, false);
4895  // If LoopVarRefExpr is nullptr it means the corresponding loop variable is
4896  // declared in the loop and it is predetermined as a private.
4897  OpenMPClauseKind PredeterminedCKind =
4898  isOpenMPSimdDirective(DKind)
4899  ? ((NestedLoopCount == 1) ? OMPC_linear : OMPC_lastprivate)
4900  : OMPC_private;
4901  if (((isOpenMPSimdDirective(DKind) && DVar.CKind != OMPC_unknown &&
4902  DVar.CKind != PredeterminedCKind) ||
4903  ((isOpenMPWorksharingDirective(DKind) || DKind == OMPD_taskloop ||
4904  isOpenMPDistributeDirective(DKind)) &&
4905  !isOpenMPSimdDirective(DKind) && DVar.CKind != OMPC_unknown &&
4906  DVar.CKind != OMPC_private && DVar.CKind != OMPC_lastprivate)) &&
4907  (DVar.CKind != OMPC_private || DVar.RefExpr != nullptr)) {
4908  SemaRef.Diag(Init->getBeginLoc(), diag::err_omp_loop_var_dsa)
4909  << getOpenMPClauseName(DVar.CKind) << getOpenMPDirectiveName(DKind)
4910  << getOpenMPClauseName(PredeterminedCKind);
4911  if (DVar.RefExpr == nullptr)
4912  DVar.CKind = PredeterminedCKind;
4913  reportOriginalDsa(SemaRef, &DSA, LCDecl, DVar, /*IsLoopIterVar=*/true);
4914  HasErrors = true;
4915  } else if (LoopDeclRefExpr != nullptr) {
4916  // Make the loop iteration variable private (for worksharing constructs),
4917  // linear (for simd directives with the only one associated loop) or
4918  // lastprivate (for simd directives with several collapsed or ordered
4919  // loops).
4920  if (DVar.CKind == OMPC_unknown)
4921  DSA.addDSA(LCDecl, LoopDeclRefExpr, PredeterminedCKind);
4922  }
4923 
4924  assert(isOpenMPLoopDirective(DKind) && "DSA for non-loop vars");
4925 
4926  // Check test-expr.
4927  HasErrors |= ISC.checkAndSetCond(For->getCond());
4928 
4929  // Check incr-expr.
4930  HasErrors |= ISC.checkAndSetInc(For->getInc());
4931  }
4932 
4933  if (ISC.dependent() || SemaRef.CurContext->isDependentContext() || HasErrors)
4934  return HasErrors;
4935 
4936  // Build the loop's iteration space representation.
4937  ResultIterSpace.PreCond =
4938  ISC.buildPreCond(DSA.getCurScope(), For->getCond(), Captures);
4939  ResultIterSpace.NumIterations = ISC.buildNumIterations(
4940  DSA.getCurScope(),
4941  (isOpenMPWorksharingDirective(DKind) ||
4943  Captures);
4944  ResultIterSpace.CounterVar = ISC.buildCounterVar(Captures, DSA);
4945  ResultIterSpace.PrivateCounterVar = ISC.buildPrivateCounterVar();
4946  ResultIterSpace.CounterInit = ISC.buildCounterInit();
4947  ResultIterSpace.CounterStep = ISC.buildCounterStep();
4948  ResultIterSpace.InitSrcRange = ISC.getInitSrcRange();
4949  ResultIterSpace.CondSrcRange = ISC.getConditionSrcRange();
4950  ResultIterSpace.IncSrcRange = ISC.getIncrementSrcRange();
4951  ResultIterSpace.Subtract = ISC.shouldSubtractStep();
4952  ResultIterSpace.IsStrictCompare = ISC.isStrictTestOp();
4953 
4954  HasErrors |= (ResultIterSpace.PreCond == nullptr ||
4955  ResultIterSpace.NumIterations == nullptr ||
4956  ResultIterSpace.CounterVar == nullptr ||
4957  ResultIterSpace.PrivateCounterVar == nullptr ||
4958  ResultIterSpace.CounterInit == nullptr ||
4959  ResultIterSpace.CounterStep == nullptr);
4960  if (!HasErrors && DSA.isOrderedRegion()) {
4961  if (DSA.getOrderedRegionParam().second->getNumForLoops()) {
4962  if (CurrentNestedLoopCount <
4963  DSA.getOrderedRegionParam().second->getLoopNumIterations().size()) {
4964  DSA.getOrderedRegionParam().second->setLoopNumIterations(
4965  CurrentNestedLoopCount, ResultIterSpace.NumIterations);
4966  DSA.getOrderedRegionParam().second->setLoopCounter(
4967  CurrentNestedLoopCount, ResultIterSpace.CounterVar);
4968  }
4969  }
4970  for (auto &Pair : DSA.getDoacrossDependClauses()) {
4971  if (CurrentNestedLoopCount >= Pair.first->getNumLoops()) {
4972  // Erroneous case - clause has some problems.
4973  continue;
4974  }
4975  if (Pair.first->getDependencyKind() == OMPC_DEPEND_sink &&
4976  Pair.second.size() <= CurrentNestedLoopCount) {
4977  // Erroneous case - clause has some problems.
4978  Pair.first->setLoopData(CurrentNestedLoopCount, nullptr);
4979  continue;
4980  }
4981  Expr *CntValue;
4982  if (Pair.first->getDependencyKind() == OMPC_DEPEND_source)
4983  CntValue = ISC.buildOrderedLoopData(
4984  DSA.getCurScope(), ResultIterSpace.CounterVar, Captures,
4985  Pair.first->getDependencyLoc());
4986  else
4987  CntValue = ISC.buildOrderedLoopData(
4988  DSA.getCurScope(), ResultIterSpace.CounterVar, Captures,
4989  Pair.first->getDependencyLoc(),
4990  Pair.second[CurrentNestedLoopCount].first,
4991  Pair.second[CurrentNestedLoopCount].second);
4992  Pair.first->setLoopData(CurrentNestedLoopCount, CntValue);
4993  }
4994  }
4995 
4996  return HasErrors;
4997 }
4998 
4999 /// Build 'VarRef = Start.
5000 static ExprResult
5002  ExprResult Start,
5003  llvm::MapVector<const Expr *, DeclRefExpr *> &Captures) {
5004  // Build 'VarRef = Start.
5005  ExprResult NewStart = tryBuildCapture(SemaRef, Start.get(), Captures);
5006  if (!NewStart.isUsable())
5007  return ExprError();
5008  if (!SemaRef.Context.hasSameType(NewStart.get()->getType(),
5009  VarRef.get()->getType())) {
5010  NewStart = SemaRef.PerformImplicitConversion(
5011  NewStart.get(), VarRef.get()->getType(), Sema::AA_Converting,
5012  /*AllowExplicit=*/true);
5013  if (!NewStart.isUsable())
5014  return ExprError();
5015  }
5016 
5017  ExprResult Init =
5018  SemaRef.BuildBinOp(S, Loc, BO_Assign, VarRef.get(), NewStart.get());
5019  return Init;
5020 }
5021 
5022 /// Build 'VarRef = Start + Iter * Step'.
5024  Sema &SemaRef, Scope *S, SourceLocation Loc, ExprResult VarRef,
5025  ExprResult Start, ExprResult Iter, ExprResult Step, bool Subtract,
5026  llvm::MapVector<const Expr *, DeclRefExpr *> *Captures = nullptr) {
5027  // Add parentheses (for debugging purposes only).
5028  Iter = SemaRef.ActOnParenExpr(Loc, Loc, Iter.get());
5029  if (!VarRef.isUsable() || !Start.isUsable() || !Iter.isUsable() ||
5030  !Step.isUsable())
5031  return ExprError();
5032 
5033  ExprResult NewStep = Step;
5034  if (Captures)
5035  NewStep = tryBuildCapture(SemaRef, Step.get(), *Captures);
5036  if (NewStep.isInvalid())
5037  return ExprError();
5038  ExprResult Update =
5039  SemaRef.BuildBinOp(S, Loc, BO_Mul, Iter.get(), NewStep.get());
5040  if (!Update.isUsable())
5041  return ExprError();
5042 
5043  // Try to build 'VarRef = Start, VarRef (+|-)= Iter * Step' or
5044  // 'VarRef = Start (+|-) Iter * Step'.
5045  ExprResult NewStart = Start;
5046  if (Captures)
5047  NewStart = tryBuildCapture(SemaRef, Start.get(), *Captures);
5048  if (NewStart.isInvalid())
5049  return ExprError();
5050 
5051  // First attempt: try to build 'VarRef = Start, VarRef += Iter * Step'.
5052  ExprResult SavedUpdate = Update;
5053  ExprResult UpdateVal;
5054  if (VarRef.get()->getType()->isOverloadableType() ||
5055  NewStart.get()->getType()->isOverloadableType() ||
5056  Update.get()->getType()->isOverloadableType()) {
5057  bool Suppress = SemaRef.getDiagnostics().getSuppressAllDiagnostics();
5058  SemaRef.getDiagnostics().setSuppressAllDiagnostics(/*Val=*/true);
5059  Update =
5060  SemaRef.BuildBinOp(S, Loc, BO_Assign, VarRef.get(), NewStart.get());
5061  if (Update.isUsable()) {
5062  UpdateVal =
5063  SemaRef.BuildBinOp(S, Loc, Subtract ? BO_SubAssign : BO_AddAssign,
5064  VarRef.get(), SavedUpdate.get());
5065  if (UpdateVal.isUsable()) {
5066  Update = SemaRef.CreateBuiltinBinOp(Loc, BO_Comma, Update.get(),
5067  UpdateVal.get());
5068  }
5069  }
5070  SemaRef.getDiagnostics().setSuppressAllDiagnostics(Suppress);
5071  }
5072 
5073  // Second attempt: try to build 'VarRef = Start (+|-) Iter * Step'.
5074  if (!Update.isUsable() || !UpdateVal.isUsable()) {
5075  Update = SemaRef.BuildBinOp(S, Loc, Subtract ? BO_Sub : BO_Add,
5076  NewStart.get(), SavedUpdate.get());
5077  if (!Update.isUsable())
5078  return ExprError();
5079 
5080  if (!SemaRef.Context.hasSameType(Update.get()->getType(),
5081  VarRef.get()->getType())) {
5082  Update = SemaRef.PerformImplicitConversion(
5083  Update.get(), VarRef.get()->getType(), Sema::AA_Converting, true);
5084  if (!Update.isUsable())
5085  return ExprError();
5086  }
5087 
5088  Update = SemaRef.BuildBinOp(S, Loc, BO_Assign, VarRef.get(), Update.get());
5089  }
5090  return Update;
5091 }
5092 
5093 /// Convert integer expression \a E to make it have at least \a Bits
5094 /// bits.
5095 static ExprResult widenIterationCount(unsigned Bits, Expr *E, Sema &SemaRef) {
5096  if (E == nullptr)
5097  return ExprError();
5098  ASTContext &C = SemaRef.Context;
5099  QualType OldType = E->getType();
5100  unsigned HasBits = C.getTypeSize(OldType);
5101  if (HasBits >= Bits)
5102  return ExprResult(E);
5103  // OK to convert to signed, because new type has more bits than old.
5104  QualType NewType = C.getIntTypeForBitwidth(Bits, /* Signed */ true);
5105  return SemaRef.PerformImplicitConversion(E, NewType, Sema::AA_Converting,
5106  true);
5107 }
5108 
5109 /// Check if the given expression \a E is a constant integer that fits
5110 /// into \a Bits bits.
5111 static bool fitsInto(unsigned Bits, bool Signed, const Expr *E, Sema &SemaRef) {
5112  if (E == nullptr)
5113  return false;
5114  llvm::APSInt Result;
5115  if (E->isIntegerConstantExpr(Result, SemaRef.Context))
5116  return Signed ? Result.isSignedIntN(Bits) : Result.isIntN(Bits);
5117  return false;
5118 }
5119 
5120 /// Build preinits statement for the given declarations.
5121 static Stmt *buildPreInits(ASTContext &Context,
5122  MutableArrayRef<Decl *> PreInits) {
5123  if (!PreInits.empty()) {
5124  return new (Context) DeclStmt(
5125  DeclGroupRef::Create(Context, PreInits.begin(), PreInits.size()),
5127  }
5128  return nullptr;
5129 }
5130 
5131 /// Build preinits statement for the given declarations.
5132 static Stmt *
5134  const llvm::MapVector<const Expr *, DeclRefExpr *> &Captures) {
5135  if (!Captures.empty()) {
5136  SmallVector<Decl *, 16> PreInits;
5137  for (const auto &Pair : Captures)
5138  PreInits.push_back(Pair.second->getDecl());
5139  return buildPreInits(Context, PreInits);
5140  }
5141  return nullptr;
5142 }
5143 
5144 /// Build postupdate expression for the given list of postupdates expressions.
5145 static Expr *buildPostUpdate(Sema &S, ArrayRef<Expr *> PostUpdates) {
5146  Expr *PostUpdate = nullptr;
5147  if (!PostUpdates.empty()) {
5148  for (Expr *E : PostUpdates) {
5149  Expr *ConvE = S.BuildCStyleCastExpr(
5150  E->getExprLoc(),
5152  E->getExprLoc(), E)
5153  .get();
5154  PostUpdate = PostUpdate
5155  ? S.CreateBuiltinBinOp(ConvE->getExprLoc(), BO_Comma,
5156  PostUpdate, ConvE)
5157  .get()
5158  : ConvE;
5159  }
5160  }
5161  return PostUpdate;
5162 }
5163 
5164 /// Called on a for stmt to check itself and nested loops (if any).
5165 /// \return Returns 0 if one of the collapsed stmts is not canonical for loop,
5166 /// number of collapsed loops otherwise.
5167 static unsigned
5168 checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
5169  Expr *OrderedLoopCountExpr, Stmt *AStmt, Sema &SemaRef,
5170  DSAStackTy &DSA,
5171  Sema::VarsWithInheritedDSAType &VarsWithImplicitDSA,
5173  unsigned NestedLoopCount = 1;
5174  if (CollapseLoopCountExpr) {
5175  // Found 'collapse' clause - calculate collapse number.
5177  if (CollapseLoopCountExpr->EvaluateAsInt(Result, SemaRef.getASTContext()))
5178  NestedLoopCount = Result.Val.getInt().getLimitedValue();
5179  }
5180  unsigned OrderedLoopCount = 1;
5181  if (OrderedLoopCountExpr) {
5182  // Found 'ordered' clause - calculate collapse number.
5183  Expr::EvalResult EVResult;
5184  if (OrderedLoopCountExpr->EvaluateAsInt(EVResult, SemaRef.getASTContext())) {
5185  llvm::APSInt Result = EVResult.Val.getInt();
5186  if (Result.getLimitedValue() < NestedLoopCount) {
5187  SemaRef.Diag(OrderedLoopCountExpr->getExprLoc(),
5188  diag::err_omp_wrong_ordered_loop_count)
5189  << OrderedLoopCountExpr->getSourceRange();
5190  SemaRef.Diag(CollapseLoopCountExpr->getExprLoc(),
5191  diag::note_collapse_loop_count)
5192  << CollapseLoopCountExpr->getSourceRange();
5193  }
5194  OrderedLoopCount = Result.getLimitedValue();
5195  }
5196  }
5197  // This is helper routine for loop directives (e.g., 'for', 'simd',
5198  // 'for simd', etc.).
5199  llvm::MapVector<const Expr *, DeclRefExpr *> Captures;
5201  std::max(OrderedLoopCount, NestedLoopCount));
5202  Stmt *CurStmt = AStmt->IgnoreContainers(/* IgnoreCaptured */ true);
5203  for (unsigned Cnt = 0; Cnt < NestedLoopCount; ++Cnt) {
5205  DKind, CurStmt, SemaRef, DSA, Cnt, NestedLoopCount,
5206  std::max(OrderedLoopCount, NestedLoopCount), CollapseLoopCountExpr,
5207  OrderedLoopCountExpr, VarsWithImplicitDSA, IterSpaces[Cnt],
5208  Captures))
5209  return 0;
5210  // Move on to the next nested for loop, or to the loop body.
5211  // OpenMP [2.8.1, simd construct, Restrictions]
5212  // All loops associated with the construct must be perfectly nested; that
5213  // is, there must be no intervening code nor any OpenMP directive between
5214  // any two loops.
5215  CurStmt = cast<ForStmt>(CurStmt)->getBody()->IgnoreContainers();
5216  }
5217  for (unsigned Cnt = NestedLoopCount; Cnt < OrderedLoopCount; ++Cnt) {
5219  DKind, CurStmt, SemaRef, DSA, Cnt, NestedLoopCount,
5220  std::max(OrderedLoopCount, NestedLoopCount), CollapseLoopCountExpr,
5221  OrderedLoopCountExpr, VarsWithImplicitDSA, IterSpaces[Cnt],
5222  Captures))
5223  return 0;
5224  if (Cnt > 0 && IterSpaces[Cnt].CounterVar) {
5225  // Handle initialization of captured loop iterator variables.
5226  auto *DRE = cast<DeclRefExpr>(IterSpaces[Cnt].CounterVar);
5227  if (isa<OMPCapturedExprDecl>(DRE->getDecl())) {
5228  Captures[DRE] = DRE;
5229  }
5230  }
5231  // Move on to the next nested for loop, or to the loop body.
5232  // OpenMP [2.8.1, simd construct, Restrictions]
5233  // All loops associated with the construct must be perfectly nested; that
5234  // is, there must be no intervening code nor any OpenMP directive between
5235  // any two loops.
5236  CurStmt = cast<ForStmt>(CurStmt)->getBody()->IgnoreContainers();
5237  }
5238 
5239  Built.clear(/* size */ NestedLoopCount);
5240 
5241  if (SemaRef.CurContext->isDependentContext())
5242  return NestedLoopCount;
5243 
5244  // An example of what is generated for the following code:
5245  //
5246  // #pragma omp simd collapse(2) ordered(2)
5247  // for (i = 0; i < NI; ++i)
5248  // for (k = 0; k < NK; ++k)
5249  // for (j = J0; j < NJ; j+=2) {
5250  // <loop body>
5251  // }
5252  //
5253  // We generate the code below.
5254  // Note: the loop body may be outlined in CodeGen.
5255  // Note: some counters may be C++ classes, operator- is used to find number of
5256  // iterations and operator+= to calculate counter value.
5257  // Note: decltype(NumIterations) must be integer type (in 'omp for', only i32
5258  // or i64 is currently supported).
5259  //
5260  // #define NumIterations (NI * ((NJ - J0 - 1 + 2) / 2))
5261  // for (int[32|64]_t IV = 0; IV < NumIterations; ++IV ) {
5262  // .local.i = IV / ((NJ - J0 - 1 + 2) / 2);
5263  // .local.j = J0 + (IV % ((NJ - J0 - 1 + 2) / 2)) * 2;
5264  // // similar updates for vars in clauses (e.g. 'linear')
5265  // <loop body (using local i and j)>
5266  // }
5267  // i = NI; // assign final values of counters
5268  // j = NJ;
5269  //
5270 
5271  // Last iteration number is (I1 * I2 * ... In) - 1, where I1, I2 ... In are
5272  // the iteration counts of the collapsed for loops.
5273  // Precondition tests if there is at least one iteration (all conditions are
5274  // true).
5275  auto PreCond = ExprResult(IterSpaces[0].PreCond);
5276  Expr *N0 = IterSpaces[0].NumIterations;
5277  ExprResult LastIteration32 =
5278  widenIterationCount(/*Bits=*/32,
5279  SemaRef
5280  .PerformImplicitConversion(
5281  N0->IgnoreImpCasts(), N0->getType(),
5282  Sema::AA_Converting, /*AllowExplicit=*/true)
5283  .get(),
5284  SemaRef);
5285  ExprResult LastIteration64 = widenIterationCount(
5286  /*Bits=*/64,
5287  SemaRef
5288  .PerformImplicitConversion(N0->IgnoreImpCasts(), N0->getType(),
5290  /*AllowExplicit=*/true)
5291  .get(),
5292  SemaRef);
5293 
5294  if (!LastIteration32.isUsable() || !LastIteration64.isUsable())
5295  return NestedLoopCount;
5296 
5297  ASTContext &C = SemaRef.Context;
5298  bool AllCountsNeedLessThan32Bits = C.getTypeSize(N0->getType()) < 32;
5299 
5300  Scope *CurScope = DSA.getCurScope();
5301  for (unsigned Cnt = 1; Cnt < NestedLoopCount; ++Cnt) {
5302  if (PreCond.isUsable()) {
5303  PreCond =
5304  SemaRef.BuildBinOp(CurScope, PreCond.get()->getExprLoc(), BO_LAnd,
5305  PreCond.get(), IterSpaces[Cnt].PreCond);
5306  }
5307  Expr *N = IterSpaces[Cnt].NumIterations;
5308  SourceLocation Loc = N->getExprLoc();
5309  AllCountsNeedLessThan32Bits &= C.getTypeSize(N->getType()) < 32;
5310  if (LastIteration32.isUsable())
5311  LastIteration32 = SemaRef.BuildBinOp(
5312  CurScope, Loc, BO_Mul, LastIteration32.get(),
5313  SemaRef
5316  /*AllowExplicit=*/true)
5317  .get());
5318  if (LastIteration64.isUsable())
5319  LastIteration64 = SemaRef.BuildBinOp(
5320  CurScope, Loc, BO_Mul, LastIteration64.get(),
5321  SemaRef
5324  /*AllowExplicit=*/true)
5325  .get());
5326  }
5327 
5328  // Choose either the 32-bit or 64-bit version.
5329  ExprResult LastIteration = LastIteration64;
5330  if (SemaRef.getLangOpts().OpenMPOptimisticCollapse ||
5331  (LastIteration32.isUsable() &&
5332  C.getTypeSize(LastIteration32.get()->getType()) == 32 &&
5333  (AllCountsNeedLessThan32Bits || NestedLoopCount == 1 ||
5334  fitsInto(
5335  /*Bits=*/32,
5336  LastIteration32.get()->getType()->hasSignedIntegerRepresentation(),
5337  LastIteration64.get(), SemaRef))))
5338  LastIteration = LastIteration32;
5339  QualType VType = LastIteration.get()->getType();
5340  QualType RealVType = VType;
5341  QualType StrideVType = VType;
5342  if (isOpenMPTaskLoopDirective(DKind)) {
5343  VType =
5344  SemaRef.Context.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0);
5345  StrideVType =
5346  SemaRef.Context.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1);
5347  }
5348 
5349  if (!LastIteration.isUsable())
5350  return 0;
5351 
5352  // Save the number of iterations.
5353  ExprResult NumIterations = LastIteration;
5354  {
5355  LastIteration = SemaRef.BuildBinOp(
5356  CurScope, LastIteration.get()->getExprLoc(), BO_Sub,
5357  LastIteration.get(),
5358  SemaRef.ActOnIntegerConstant(SourceLocation(), 1).get());
5359  if (!LastIteration.isUsable())
5360  return 0;
5361  }
5362 
5363  // Calculate the last iteration number beforehand instead of doing this on
5364  // each iteration. Do not do this if the number of iterations may be kfold-ed.
5365  llvm::APSInt Result;
5366  bool IsConstant =
5367  LastIteration.get()->isIntegerConstantExpr(Result, SemaRef.Context);
5368  ExprResult CalcLastIteration;
5369  if (!IsConstant) {
5370  ExprResult SaveRef =
5371  tryBuildCapture(SemaRef, LastIteration.get(), Captures);
5372  LastIteration = SaveRef;
5373 
5374  // Prepare SaveRef + 1.
5375  NumIterations = SemaRef.BuildBinOp(
5376  CurScope, SaveRef.get()->getExprLoc(), BO_Add, SaveRef.get(),
5377  SemaRef.ActOnIntegerConstant(SourceLocation(), 1).get());
5378  if (!NumIterations.isUsable())
5379  return 0;
5380  }
5381 
5382  SourceLocation InitLoc = IterSpaces[0].InitSrcRange.getBegin();
5383 
5384  // Build variables passed into runtime, necessary for worksharing directives.
5385  ExprResult LB, UB, IL, ST, EUB, CombLB, CombUB, PrevLB, PrevUB, CombEUB;
5387  isOpenMPDistributeDirective(DKind)) {
5388  // Lower bound variable, initialized with zero.
5389  VarDecl *LBDecl = buildVarDecl(SemaRef, InitLoc, VType, ".omp.lb");
5390  LB = buildDeclRefExpr(SemaRef, LBDecl, VType, InitLoc);
5391  SemaRef.AddInitializerToDecl(LBDecl,
5392  SemaRef.ActOnIntegerConstant(InitLoc, 0).get(),
5393  /*DirectInit*/ false);
5394 
5395  // Upper bound variable, initialized with last iteration number.
5396  VarDecl *UBDecl = buildVarDecl(SemaRef, InitLoc, VType, ".omp.ub");
5397  UB = buildDeclRefExpr(SemaRef, UBDecl, VType, InitLoc);
5398  SemaRef.AddInitializerToDecl(UBDecl, LastIteration.get(),
5399  /*DirectInit*/ false);
5400 
5401  // A 32-bit variable-flag where runtime returns 1 for the last iteration.
5402  // This will be used to implement clause 'lastprivate'.
5403  QualType Int32Ty = SemaRef.Context.getIntTypeForBitwidth(32, true);
5404  VarDecl *ILDecl = buildVarDecl(SemaRef, InitLoc, Int32Ty, ".omp.is_last");
5405  IL = buildDeclRefExpr(SemaRef, ILDecl, Int32Ty, InitLoc);
5406  SemaRef.AddInitializerToDecl(ILDecl,
5407  SemaRef.ActOnIntegerConstant(InitLoc, 0).get(),
5408  /*DirectInit*/ false);
5409 
5410  // Stride variable returned by runtime (we initialize it to 1 by default).
5411  VarDecl *STDecl =
5412  buildVarDecl(SemaRef, InitLoc, StrideVType, ".omp.stride");
5413  ST = buildDeclRefExpr(SemaRef, STDecl, StrideVType, InitLoc);
5414  SemaRef.AddInitializerToDecl(STDecl,
5415  SemaRef.ActOnIntegerConstant(InitLoc, 1).get(),
5416  /*DirectInit*/ false);
5417 
5418  // Build expression: UB = min(UB, LastIteration)
5419  // It is necessary for CodeGen of directives with static scheduling.
5420  ExprResult IsUBGreater = SemaRef.BuildBinOp(CurScope, InitLoc, BO_GT,
5421  UB.get(), LastIteration.get());
5422  ExprResult CondOp = SemaRef.ActOnConditionalOp(
5423  LastIteration.get()->getExprLoc(), InitLoc, IsUBGreater.get(),
5424  LastIteration.get(), UB.get());
5425  EUB = SemaRef.BuildBinOp(CurScope, InitLoc, BO_Assign, UB.get(),
5426  CondOp.get());
5427  EUB = SemaRef.ActOnFinishFullExpr(EUB.get(), /*DiscardedValue*/ false);
5428 
5429  // If we have a combined directive that combines 'distribute', 'for' or
5430  // 'simd' we need to be able to access the bounds of the schedule of the
5431  // enclosing region. E.g. in 'distribute parallel for' the bounds obtained
5432  // by scheduling 'distribute' have to be passed to the schedule of 'for'.
5433  if (isOpenMPLoopBoundSharingDirective(DKind)) {
5434  // Lower bound variable, initialized with zero.
5435  VarDecl *CombLBDecl =
5436  buildVarDecl(SemaRef, InitLoc, VType, ".omp.comb.lb");
5437  CombLB = buildDeclRefExpr(SemaRef, CombLBDecl, VType, InitLoc);
5438  SemaRef.AddInitializerToDecl(
5439  CombLBDecl, SemaRef.ActOnIntegerConstant(InitLoc, 0).get(),
5440  /*DirectInit*/ false);
5441 
5442  // Upper bound variable, initialized with last iteration number.
5443  VarDecl *CombUBDecl =
5444  buildVarDecl(SemaRef, InitLoc, VType, ".omp.comb.ub");
5445  CombUB = buildDeclRefExpr(SemaRef, CombUBDecl, VType, InitLoc);
5446  SemaRef.AddInitializerToDecl(CombUBDecl, LastIteration.get(),
5447  /*DirectInit*/ false);
5448 
5449  ExprResult CombIsUBGreater = SemaRef.BuildBinOp(
5450  CurScope, InitLoc, BO_GT, CombUB.get(), LastIteration.get());
5451  ExprResult CombCondOp =
5452  SemaRef.ActOnConditionalOp(InitLoc, InitLoc, CombIsUBGreater.get(),
5453  LastIteration.get(), CombUB.get());
5454  CombEUB = SemaRef.BuildBinOp(CurScope, InitLoc, BO_Assign, CombUB.get(),
5455  CombCondOp.get());
5456  CombEUB =
5457  SemaRef.ActOnFinishFullExpr(CombEUB.get(), /*DiscardedValue*/ false);
5458 
5459  const CapturedDecl *CD = cast<CapturedStmt>(AStmt)->getCapturedDecl();
5460  // We expect to have at least 2 more parameters than the 'parallel'
5461  // directive does - the lower and upper bounds of the previous schedule.
5462  assert(CD->getNumParams() >= 4 &&
5463  "Unexpected number of parameters in loop combined directive");
5464 
5465  // Set the proper type for the bounds given what we learned from the
5466  // enclosed loops.
5467  ImplicitParamDecl *PrevLBDecl = CD->getParam(/*PrevLB=*/2);
5468  ImplicitParamDecl *PrevUBDecl = CD->getParam(/*PrevUB=*/3);
5469 
5470  // Previous lower and upper bounds are obtained from the region
5471  // parameters.
5472  PrevLB =
5473  buildDeclRefExpr(SemaRef, PrevLBDecl, PrevLBDecl->getType(), InitLoc);
5474  PrevUB =
5475  buildDeclRefExpr(SemaRef, PrevUBDecl, PrevUBDecl->getType(), InitLoc);
5476  }
5477  }
5478 
5479  // Build the iteration variable and its initialization before loop.
5480  ExprResult IV;
5481  ExprResult Init, CombInit;
5482  {
5483  VarDecl *IVDecl = buildVarDecl(SemaRef, InitLoc, RealVType, ".omp.iv");
5484  IV = buildDeclRefExpr(SemaRef, IVDecl, RealVType, InitLoc);
5485  Expr *RHS =
5486  (isOpenMPWorksharingDirective(DKind) ||
5488  ? LB.get()
5489  : SemaRef.ActOnIntegerConstant(SourceLocation(), 0).get();
5490  Init = SemaRef.BuildBinOp(CurScope, InitLoc, BO_Assign, IV.get(), RHS);
5491  Init = SemaRef.ActOnFinishFullExpr(Init.get(), /*DiscardedValue*/ false);
5492 
5493  if (isOpenMPLoopBoundSharingDirective(DKind)) {
5494  Expr *CombRHS =
5495  (isOpenMPWorksharingDirective(DKind) ||
5496  isOpenMPTaskLoopDirective(DKind) ||
5498  ? CombLB.get()
5499  : SemaRef.ActOnIntegerConstant(SourceLocation(), 0).get();
5500  CombInit =
5501  SemaRef.BuildBinOp(CurScope, InitLoc, BO_Assign, IV.get(), CombRHS);
5502  CombInit =
5503  SemaRef.ActOnFinishFullExpr(CombInit.get(), /*DiscardedValue*/ false);
5504  }
5505  }
5506 
5507  bool UseStrictCompare =
5508  RealVType->hasUnsignedIntegerRepresentation() &&
5509  llvm::all_of(IterSpaces, [](const LoopIterationSpace &LIS) {
5510  return LIS.IsStrictCompare;
5511  });
5512  // Loop condition (IV < NumIterations) or (IV <= UB or IV < UB + 1 (for
5513  // unsigned IV)) for worksharing loops.
5514  SourceLocation CondLoc = AStmt->getBeginLoc();
5515  Expr *BoundUB = UB.get();
5516  if (UseStrictCompare) {
5517  BoundUB =
5518  SemaRef
5519  .BuildBinOp(CurScope, CondLoc, BO_Add, BoundUB,
5520  SemaRef.ActOnIntegerConstant(SourceLocation(), 1).get())
5521  .get();
5522  BoundUB =
5523  SemaRef.ActOnFinishFullExpr(BoundUB, /*DiscardedValue*/ false).get();
5524  }
5525  ExprResult Cond =
5526  (isOpenMPWorksharingDirective(DKind) ||
5528  ? SemaRef.BuildBinOp(CurScope, CondLoc,
5529  UseStrictCompare ? BO_LT : BO_LE, IV.get(),
5530  BoundUB)
5531  : SemaRef.BuildBinOp(CurScope, CondLoc, BO_LT, IV.get(),
5532  NumIterations.get());
5533  ExprResult CombDistCond;
5534  if (isOpenMPLoopBoundSharingDirective(DKind)) {
5535  CombDistCond = SemaRef.BuildBinOp(CurScope, CondLoc, BO_LT, IV.get(),
5536  NumIterations.get());
5537  }
5538 
5539  ExprResult CombCond;
5540  if (isOpenMPLoopBoundSharingDirective(DKind)) {
5541  Expr *BoundCombUB = CombUB.get();
5542  if (UseStrictCompare) {
5543  BoundCombUB =
5544  SemaRef
5545  .BuildBinOp(
5546  CurScope, CondLoc, BO_Add, BoundCombUB,
5547  SemaRef.ActOnIntegerConstant(SourceLocation(), 1).get())
5548  .get();
5549  BoundCombUB =
5550  SemaRef.ActOnFinishFullExpr(BoundCombUB, /*DiscardedValue*/ false)
5551  .get();
5552  }
5553  CombCond =
5554  SemaRef.BuildBinOp(CurScope, CondLoc, UseStrictCompare ? BO_LT : BO_LE,
5555  IV.get(), BoundCombUB);
5556  }
5557  // Loop increment (IV = IV + 1)
5558  SourceLocation IncLoc = AStmt->getBeginLoc();
5559  ExprResult Inc =
5560  SemaRef.BuildBinOp(CurScope, IncLoc, BO_Add, IV.get(),
5561  SemaRef.ActOnIntegerConstant(IncLoc, 1).get());
5562  if (!Inc.isUsable())
5563  return 0;
5564  Inc = SemaRef.BuildBinOp(CurScope, IncLoc, BO_Assign, IV.get(), Inc.get());
5565  Inc = SemaRef.ActOnFinishFullExpr(Inc.get(), /*DiscardedValue*/ false);
5566  if (!Inc.isUsable())
5567  return 0;
5568 
5569  // Increments for worksharing loops (LB = LB + ST; UB = UB + ST).
5570  // Used for directives with static scheduling.
5571  // In combined construct, add combined version that use CombLB and CombUB
5572  // base variables for the update
5573  ExprResult NextLB, NextUB, CombNextLB, CombNextUB;
5575  isOpenMPDistributeDirective(DKind)) {
5576  // LB + ST
5577  NextLB = SemaRef.BuildBinOp(CurScope, IncLoc, BO_Add, LB.get(), ST.get());
5578  if (!NextLB.isUsable())
5579  return 0;
5580  // LB = LB + ST
5581  NextLB =
5582  SemaRef.BuildBinOp(CurScope, IncLoc, BO_Assign, LB.get(), NextLB.get());
5583  NextLB =
5584  SemaRef.ActOnFinishFullExpr(NextLB.get(), /*DiscardedValue*/ false);
5585  if (!NextLB.isUsable())
5586  return 0;
5587  // UB + ST
5588  NextUB = SemaRef.BuildBinOp(CurScope, IncLoc, BO_Add, UB.get(), ST.get());
5589  if (!NextUB.isUsable())
5590  return 0;
5591  // UB = UB + ST
5592  NextUB =
5593  SemaRef.BuildBinOp(CurScope, IncLoc, BO_Assign, UB.get(), NextUB.get());
5594  NextUB =
5595  SemaRef.ActOnFinishFullExpr(NextUB.get(), /*DiscardedValue*/ false);
5596  if (!NextUB.isUsable())
5597  return 0;
5598  if (isOpenMPLoopBoundSharingDirective(DKind)) {
5599  CombNextLB =
5600  SemaRef.BuildBinOp(CurScope, IncLoc, BO_Add, CombLB.get(), ST.get());
5601  if (!NextLB.isUsable())
5602  return 0;
5603  // LB = LB + ST
5604  CombNextLB = SemaRef.BuildBinOp(CurScope, IncLoc, BO_Assign, CombLB.get(),
5605  CombNextLB.get());
5606  CombNextLB = SemaRef.ActOnFinishFullExpr(CombNextLB.get(),
5607  /*DiscardedValue*/ false);
5608  if (!CombNextLB.isUsable())
5609  return 0;
5610  // UB + ST
5611  CombNextUB =
5612  SemaRef.BuildBinOp(CurScope, IncLoc, BO_Add, CombUB.get(), ST.get());
5613  if (!CombNextUB.isUsable())
5614  return 0;
5615  // UB = UB + ST
5616  CombNextUB = SemaRef.BuildBinOp(CurScope, IncLoc, BO_Assign, CombUB.get(),
5617  CombNextUB.get());
5618  CombNextUB = SemaRef.ActOnFinishFullExpr(CombNextUB.get(),
5619  /*DiscardedValue*/ false);
5620  if (!CombNextUB.isUsable())
5621  return 0;
5622  }
5623  }
5624 
5625  // Create increment expression for distribute loop when combined in a same
5626  // directive with for as IV = IV + ST; ensure upper bound expression based
5627  // on PrevUB instead of NumIterations - used to implement 'for' when found
5628  // in combination with 'distribute', like in 'distribute parallel for'
5629  SourceLocation DistIncLoc = AStmt->getBeginLoc();
5630  ExprResult DistCond, DistInc, PrevEUB, ParForInDistCond;
5631  if (isOpenMPLoopBoundSharingDirective(DKind)) {
5632  DistCond = SemaRef.BuildBinOp(
5633  CurScope, CondLoc, UseStrictCompare ? BO_LT : BO_LE, IV.get(), BoundUB);
5634  assert(DistCond.isUsable() && "distribute cond expr was not built");
5635 
5636  DistInc =
5637  SemaRef.BuildBinOp(CurScope, DistIncLoc, BO_Add, IV.get(), ST.get());
5638  assert(DistInc.isUsable() && "distribute inc expr was not built");
5639  DistInc = SemaRef.BuildBinOp(CurScope, DistIncLoc, BO_Assign, IV.get(),
5640  DistInc.get());
5641  DistInc =
5642  SemaRef.ActOnFinishFullExpr(DistInc.get(), /*DiscardedValue*/ false);
5643  assert(DistInc.isUsable() && "distribute inc expr was not built");
5644 
5645  // Build expression: UB = min(UB, prevUB) for #for in composite or combined
5646  // construct
5647  SourceLocation DistEUBLoc = AStmt->getBeginLoc();
5648  ExprResult IsUBGreater =
5649  SemaRef.BuildBinOp(CurScope, DistEUBLoc, BO_GT, UB.get(), PrevUB.get());
5650  ExprResult CondOp = SemaRef.ActOnConditionalOp(
5651  DistEUBLoc, DistEUBLoc, IsUBGreater.get(), PrevUB.get(), UB.get());
5652  PrevEUB = SemaRef.BuildBinOp(CurScope, DistIncLoc, BO_Assign, UB.get(),
5653  CondOp.get());
5654  PrevEUB =
5655  SemaRef.ActOnFinishFullExpr(PrevEUB.get(), /*DiscardedValue*/ false);
5656 
5657  // Build IV <= PrevUB or IV < PrevUB + 1 for unsigned IV to be used in
5658  // parallel for is in combination with a distribute directive with
5659  // schedule(static, 1)
5660  Expr *BoundPrevUB = PrevUB.get();
5661  if (UseStrictCompare) {
5662  BoundPrevUB =
5663  SemaRef
5664  .BuildBinOp(
5665  CurScope, CondLoc, BO_Add, BoundPrevUB,
5666  SemaRef.ActOnIntegerConstant(SourceLocation(), 1).get())
5667  .get();
5668  BoundPrevUB =
5669  SemaRef.ActOnFinishFullExpr(BoundPrevUB, /*DiscardedValue*/ false)
5670  .get();
5671  }
5672  ParForInDistCond =
5673  SemaRef.BuildBinOp(CurScope, CondLoc, UseStrictCompare ? BO_LT : BO_LE,
5674  IV.get(), BoundPrevUB);
5675  }
5676 
5677  // Build updates and final values of the loop counters.
5678  bool HasErrors = false;
5679  Built.Counters.resize(NestedLoopCount);
5680  Built.Inits.resize(NestedLoopCount);
5681  Built.Updates.resize(NestedLoopCount);
5682  Built.Finals.resize(NestedLoopCount);
5683  {
5684  // We implement the following algorithm for obtaining the
5685  // original loop iteration variable values based on the
5686  // value of the collapsed loop iteration variable IV.
5687  //
5688  // Let n+1 be the number of collapsed loops in the nest.
5689  // Iteration variables (I0, I1, .... In)
5690  // Iteration counts (N0, N1, ... Nn)
5691  //
5692  // Acc = IV;
5693  //
5694  // To compute Ik for loop k, 0 <= k <= n, generate:
5695  // Prod = N(k+1) * N(k+2) * ... * Nn;
5696  // Ik = Acc / Prod;
5697  // Acc -= Ik * Prod;
5698  //
5699  ExprResult Acc = IV;
5700  for (unsigned int Cnt = 0; Cnt < NestedLoopCount; ++Cnt) {
5701  LoopIterationSpace &IS = IterSpaces[Cnt];
5702  SourceLocation UpdLoc = IS.IncSrcRange.getBegin();
5703  ExprResult Iter;
5704 
5705  // Compute prod
5706  ExprResult Prod =
5707  SemaRef.ActOnIntegerConstant(SourceLocation(), 1).get();
5708  for (unsigned int K = Cnt+1; K < NestedLoopCount; ++K)
5709  Prod = SemaRef.BuildBinOp(CurScope, UpdLoc, BO_Mul, Prod.