clang  10.0.0svn
SemaOpenMP.cpp
Go to the documentation of this file.
1 //===--- SemaOpenMP.cpp - Semantic Analysis for OpenMP constructs ---------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements semantic analysis for OpenMP directives and
10 /// clauses.
11 ///
12 //===----------------------------------------------------------------------===//
13 
14 #include "TreeTransform.h"
15 #include "clang/AST/ASTContext.h"
18 #include "clang/AST/Decl.h"
19 #include "clang/AST/DeclCXX.h"
20 #include "clang/AST/DeclOpenMP.h"
21 #include "clang/AST/StmtCXX.h"
22 #include "clang/AST/StmtOpenMP.h"
23 #include "clang/AST/StmtVisitor.h"
24 #include "clang/AST/TypeOrdering.h"
27 #include "clang/Sema/Lookup.h"
28 #include "clang/Sema/Scope.h"
29 #include "clang/Sema/ScopeInfo.h"
31 #include "llvm/ADT/PointerEmbeddedInt.h"
32 using namespace clang;
33 
34 //===----------------------------------------------------------------------===//
35 // Stack of data-sharing attributes for variables
36 //===----------------------------------------------------------------------===//
37 
39  Sema &SemaRef, Expr *E,
41  OpenMPClauseKind CKind, bool NoDiagnose);
42 
43 namespace {
44 /// Default data sharing attributes, which can be applied to directive.
46  DSA_unspecified = 0, /// Data sharing attribute not specified.
47  DSA_none = 1 << 0, /// Default data sharing attribute 'none'.
48  DSA_shared = 1 << 1, /// Default data sharing attribute 'shared'.
49 };
50 
51 /// Attributes of the defaultmap clause.
53  DMA_unspecified, /// Default mapping is not specified.
54  DMA_tofrom_scalar, /// Default mapping is 'tofrom:scalar'.
55 };
56 
57 /// Stack for tracking declarations used in OpenMP directives and
58 /// clauses and their data-sharing attributes.
59 class DSAStackTy {
60 public:
61  struct DSAVarData {
64  const Expr *RefExpr = nullptr;
65  DeclRefExpr *PrivateCopy = nullptr;
66  SourceLocation ImplicitDSALoc;
67  DSAVarData() = default;
68  DSAVarData(OpenMPDirectiveKind DKind, OpenMPClauseKind CKind,
69  const Expr *RefExpr, DeclRefExpr *PrivateCopy,
70  SourceLocation ImplicitDSALoc)
71  : DKind(DKind), CKind(CKind), RefExpr(RefExpr),
72  PrivateCopy(PrivateCopy), ImplicitDSALoc(ImplicitDSALoc) {}
73  };
74  using OperatorOffsetTy =
76  using DoacrossDependMapTy =
77  llvm::DenseMap<OMPDependClause *, OperatorOffsetTy>;
78 
79 private:
80  struct DSAInfo {
81  OpenMPClauseKind Attributes = OMPC_unknown;
82  /// Pointer to a reference expression and a flag which shows that the
83  /// variable is marked as lastprivate(true) or not (false).
84  llvm::PointerIntPair<const Expr *, 1, bool> RefExpr;
85  DeclRefExpr *PrivateCopy = nullptr;
86  };
87  using DeclSAMapTy = llvm::SmallDenseMap<const ValueDecl *, DSAInfo, 8>;
88  using AlignedMapTy = llvm::SmallDenseMap<const ValueDecl *, const Expr *, 8>;
89  using LCDeclInfo = std::pair<unsigned, VarDecl *>;
90  using LoopControlVariablesMapTy =
91  llvm::SmallDenseMap<const ValueDecl *, LCDeclInfo, 8>;
92  /// Struct that associates a component with the clause kind where they are
93  /// found.
94  struct MappedExprComponentTy {
97  };
98  using MappedExprComponentsTy =
99  llvm::DenseMap<const ValueDecl *, MappedExprComponentTy>;
100  using CriticalsWithHintsTy =
101  llvm::StringMap<std::pair<const OMPCriticalDirective *, llvm::APSInt>>;
102  struct ReductionData {
103  using BOKPtrType = llvm::PointerEmbeddedInt<BinaryOperatorKind, 16>;
104  SourceRange ReductionRange;
105  llvm::PointerUnion<const Expr *, BOKPtrType> ReductionOp;
106  ReductionData() = default;
107  void set(BinaryOperatorKind BO, SourceRange RR) {
108  ReductionRange = RR;
109  ReductionOp = BO;
110  }
111  void set(const Expr *RefExpr, SourceRange RR) {
112  ReductionRange = RR;
113  ReductionOp = RefExpr;
114  }
115  };
116  using DeclReductionMapTy =
117  llvm::SmallDenseMap<const ValueDecl *, ReductionData, 4>;
118 
119  struct SharingMapTy {
120  DeclSAMapTy SharingMap;
121  DeclReductionMapTy ReductionMap;
122  AlignedMapTy AlignedMap;
123  MappedExprComponentsTy MappedExprComponents;
124  LoopControlVariablesMapTy LCVMap;
125  DefaultDataSharingAttributes DefaultAttr = DSA_unspecified;
126  SourceLocation DefaultAttrLoc;
127  DefaultMapAttributes DefaultMapAttr = DMA_unspecified;
128  SourceLocation DefaultMapAttrLoc;
130  DeclarationNameInfo DirectiveName;
131  Scope *CurScope = nullptr;
132  SourceLocation ConstructLoc;
133  /// Set of 'depend' clauses with 'sink|source' dependence kind. Required to
134  /// get the data (loop counters etc.) about enclosing loop-based construct.
135  /// This data is required during codegen.
136  DoacrossDependMapTy DoacrossDepends;
137  /// First argument (Expr *) contains optional argument of the
138  /// 'ordered' clause, the second one is true if the regions has 'ordered'
139  /// clause, false otherwise.
141  unsigned AssociatedLoops = 1;
142  bool HasMutipleLoops = false;
143  const Decl *PossiblyLoopCounter = nullptr;
144  bool NowaitRegion = false;
145  bool CancelRegion = false;
146  bool LoopStart = false;
147  bool BodyComplete = false;
148  SourceLocation InnerTeamsRegionLoc;
149  /// Reference to the taskgroup task_reduction reference expression.
150  Expr *TaskgroupReductionRef = nullptr;
151  llvm::DenseSet<QualType> MappedClassesQualTypes;
152  /// List of globals marked as declare target link in this target region
153  /// (isOpenMPTargetExecutionDirective(Directive) == true).
154  llvm::SmallVector<DeclRefExpr *, 4> DeclareTargetLinkVarDecls;
155  SharingMapTy(OpenMPDirectiveKind DKind, DeclarationNameInfo Name,
156  Scope *CurScope, SourceLocation Loc)
157  : Directive(DKind), DirectiveName(Name), CurScope(CurScope),
158  ConstructLoc(Loc) {}
159  SharingMapTy() = default;
160  };
161 
162  using StackTy = SmallVector<SharingMapTy, 4>;
163 
164  /// Stack of used declaration and their data-sharing attributes.
165  DeclSAMapTy Threadprivates;
166  const FunctionScopeInfo *CurrentNonCapturingFunctionScope = nullptr;
168  /// true, if check for DSA must be from parent directive, false, if
169  /// from current directive.
170  OpenMPClauseKind ClauseKindMode = OMPC_unknown;
171  Sema &SemaRef;
172  bool ForceCapturing = false;
173  /// true if all the variables in the target executable directives must be
174  /// captured by reference.
175  bool ForceCaptureByReferenceInTargetExecutable = false;
176  CriticalsWithHintsTy Criticals;
177  unsigned IgnoredStackElements = 0;
178 
179  /// Iterators over the stack iterate in order from innermost to outermost
180  /// directive.
181  using const_iterator = StackTy::const_reverse_iterator;
182  const_iterator begin() const {
183  return Stack.empty() ? const_iterator()
184  : Stack.back().first.rbegin() + IgnoredStackElements;
185  }
186  const_iterator end() const {
187  return Stack.empty() ? const_iterator() : Stack.back().first.rend();
188  }
189  using iterator = StackTy::reverse_iterator;
190  iterator begin() {
191  return Stack.empty() ? iterator()
192  : Stack.back().first.rbegin() + IgnoredStackElements;
193  }
194  iterator end() {
195  return Stack.empty() ? iterator() : Stack.back().first.rend();
196  }
197 
198  // Convenience operations to get at the elements of the stack.
199 
200  bool isStackEmpty() const {
201  return Stack.empty() ||
202  Stack.back().second != CurrentNonCapturingFunctionScope ||
203  Stack.back().first.size() <= IgnoredStackElements;
204  }
205  size_t getStackSize() const {
206  return isStackEmpty() ? 0
207  : Stack.back().first.size() - IgnoredStackElements;
208  }
209 
210  SharingMapTy *getTopOfStackOrNull() {
211  size_t Size = getStackSize();
212  if (Size == 0)
213  return nullptr;
214  return &Stack.back().first[Size - 1];
215  }
216  const SharingMapTy *getTopOfStackOrNull() const {
217  return const_cast<DSAStackTy&>(*this).getTopOfStackOrNull();
218  }
219  SharingMapTy &getTopOfStack() {
220  assert(!isStackEmpty() && "no current directive");
221  return *getTopOfStackOrNull();
222  }
223  const SharingMapTy &getTopOfStack() const {
224  return const_cast<DSAStackTy&>(*this).getTopOfStack();
225  }
226 
227  SharingMapTy *getSecondOnStackOrNull() {
228  size_t Size = getStackSize();
229  if (Size <= 1)
230  return nullptr;
231  return &Stack.back().first[Size - 2];
232  }
233  const SharingMapTy *getSecondOnStackOrNull() const {
234  return const_cast<DSAStackTy&>(*this).getSecondOnStackOrNull();
235  }
236 
237  /// Get the stack element at a certain level (previously returned by
238  /// \c getNestingLevel).
239  ///
240  /// Note that nesting levels count from outermost to innermost, and this is
241  /// the reverse of our iteration order where new inner levels are pushed at
242  /// the front of the stack.
243  SharingMapTy &getStackElemAtLevel(unsigned Level) {
244  assert(Level < getStackSize() && "no such stack element");
245  return Stack.back().first[Level];
246  }
247  const SharingMapTy &getStackElemAtLevel(unsigned Level) const {
248  return const_cast<DSAStackTy&>(*this).getStackElemAtLevel(Level);
249  }
250 
251  DSAVarData getDSA(const_iterator &Iter, ValueDecl *D) const;
252 
253  /// Checks if the variable is a local for OpenMP region.
254  bool isOpenMPLocal(VarDecl *D, const_iterator Iter) const;
255 
256  /// Vector of previously declared requires directives
258  /// omp_allocator_handle_t type.
259  QualType OMPAllocatorHandleT;
260  /// Expression for the predefined allocators.
261  Expr *OMPPredefinedAllocators[OMPAllocateDeclAttr::OMPUserDefinedMemAlloc] = {
262  nullptr};
263  /// Vector of previously encountered target directives
264  SmallVector<SourceLocation, 2> TargetLocations;
265 
266 public:
267  explicit DSAStackTy(Sema &S) : SemaRef(S) {}
268 
269  /// Sets omp_allocator_handle_t type.
270  void setOMPAllocatorHandleT(QualType Ty) { OMPAllocatorHandleT = Ty; }
271  /// Gets omp_allocator_handle_t type.
272  QualType getOMPAllocatorHandleT() const { return OMPAllocatorHandleT; }
273  /// Sets the given default allocator.
274  void setAllocator(OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind,
275  Expr *Allocator) {
276  OMPPredefinedAllocators[AllocatorKind] = Allocator;
277  }
278  /// Returns the specified default allocator.
279  Expr *getAllocator(OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind) const {
280  return OMPPredefinedAllocators[AllocatorKind];
281  }
282 
283  bool isClauseParsingMode() const { return ClauseKindMode != OMPC_unknown; }
284  OpenMPClauseKind getClauseParsingMode() const {
285  assert(isClauseParsingMode() && "Must be in clause parsing mode.");
286  return ClauseKindMode;
287  }
288  void setClauseParsingMode(OpenMPClauseKind K) { ClauseKindMode = K; }
289 
290  bool isBodyComplete() const {
291  const SharingMapTy *Top = getTopOfStackOrNull();
292  return Top && Top->BodyComplete;
293  }
294  void setBodyComplete() {
295  getTopOfStack().BodyComplete = true;
296  }
297 
298  bool isForceVarCapturing() const { return ForceCapturing; }
299  void setForceVarCapturing(bool V) { ForceCapturing = V; }
300 
301  void setForceCaptureByReferenceInTargetExecutable(bool V) {
302  ForceCaptureByReferenceInTargetExecutable = V;
303  }
304  bool isForceCaptureByReferenceInTargetExecutable() const {
305  return ForceCaptureByReferenceInTargetExecutable;
306  }
307 
308  void push(OpenMPDirectiveKind DKind, const DeclarationNameInfo &DirName,
309  Scope *CurScope, SourceLocation Loc) {
310  assert(!IgnoredStackElements &&
311  "cannot change stack while ignoring elements");
312  if (Stack.empty() ||
313  Stack.back().second != CurrentNonCapturingFunctionScope)
314  Stack.emplace_back(StackTy(), CurrentNonCapturingFunctionScope);
315  Stack.back().first.emplace_back(DKind, DirName, CurScope, Loc);
316  Stack.back().first.back().DefaultAttrLoc = Loc;
317  }
318 
319  void pop() {
320  assert(!IgnoredStackElements &&
321  "cannot change stack while ignoring elements");
322  assert(!Stack.back().first.empty() &&
323  "Data-sharing attributes stack is empty!");
324  Stack.back().first.pop_back();
325  }
326 
327  /// RAII object to temporarily leave the scope of a directive when we want to
328  /// logically operate in its parent.
329  class ParentDirectiveScope {
330  DSAStackTy &Self;
331  bool Active;
332  public:
333  ParentDirectiveScope(DSAStackTy &Self, bool Activate)
334  : Self(Self), Active(false) {
335  if (Activate)
336  enable();
337  }
338  ~ParentDirectiveScope() { disable(); }
339  void disable() {
340  if (Active) {
341  --Self.IgnoredStackElements;
342  Active = false;
343  }
344  }
345  void enable() {
346  if (!Active) {
347  ++Self.IgnoredStackElements;
348  Active = true;
349  }
350  }
351  };
352 
353  /// Marks that we're started loop parsing.
354  void loopInit() {
355  assert(isOpenMPLoopDirective(getCurrentDirective()) &&
356  "Expected loop-based directive.");
357  getTopOfStack().LoopStart = true;
358  }
359  /// Start capturing of the variables in the loop context.
360  void loopStart() {
361  assert(isOpenMPLoopDirective(getCurrentDirective()) &&
362  "Expected loop-based directive.");
363  getTopOfStack().LoopStart = false;
364  }
365  /// true, if variables are captured, false otherwise.
366  bool isLoopStarted() const {
367  assert(isOpenMPLoopDirective(getCurrentDirective()) &&
368  "Expected loop-based directive.");
369  return !getTopOfStack().LoopStart;
370  }
371  /// Marks (or clears) declaration as possibly loop counter.
372  void resetPossibleLoopCounter(const Decl *D = nullptr) {
373  getTopOfStack().PossiblyLoopCounter =
374  D ? D->getCanonicalDecl() : D;
375  }
376  /// Gets the possible loop counter decl.
377  const Decl *getPossiblyLoopCunter() const {
378  return getTopOfStack().PossiblyLoopCounter;
379  }
380  /// Start new OpenMP region stack in new non-capturing function.
381  void pushFunction() {
382  assert(!IgnoredStackElements &&
383  "cannot change stack while ignoring elements");
384  const FunctionScopeInfo *CurFnScope = SemaRef.getCurFunction();
385  assert(!isa<CapturingScopeInfo>(CurFnScope));
386  CurrentNonCapturingFunctionScope = CurFnScope;
387  }
388  /// Pop region stack for non-capturing function.
389  void popFunction(const FunctionScopeInfo *OldFSI) {
390  assert(!IgnoredStackElements &&
391  "cannot change stack while ignoring elements");
392  if (!Stack.empty() && Stack.back().second == OldFSI) {
393  assert(Stack.back().first.empty());
394  Stack.pop_back();
395  }
396  CurrentNonCapturingFunctionScope = nullptr;
397  for (const FunctionScopeInfo *FSI : llvm::reverse(SemaRef.FunctionScopes)) {
398  if (!isa<CapturingScopeInfo>(FSI)) {
399  CurrentNonCapturingFunctionScope = FSI;
400  break;
401  }
402  }
403  }
404 
405  void addCriticalWithHint(const OMPCriticalDirective *D, llvm::APSInt Hint) {
406  Criticals.try_emplace(D->getDirectiveName().getAsString(), D, Hint);
407  }
408  const std::pair<const OMPCriticalDirective *, llvm::APSInt>
409  getCriticalWithHint(const DeclarationNameInfo &Name) const {
410  auto I = Criticals.find(Name.getAsString());
411  if (I != Criticals.end())
412  return I->second;
413  return std::make_pair(nullptr, llvm::APSInt());
414  }
415  /// If 'aligned' declaration for given variable \a D was not seen yet,
416  /// add it and return NULL; otherwise return previous occurrence's expression
417  /// for diagnostics.
418  const Expr *addUniqueAligned(const ValueDecl *D, const Expr *NewDE);
419 
420  /// Register specified variable as loop control variable.
421  void addLoopControlVariable(const ValueDecl *D, VarDecl *Capture);
422  /// Check if the specified variable is a loop control variable for
423  /// current region.
424  /// \return The index of the loop control variable in the list of associated
425  /// for-loops (from outer to inner).
426  const LCDeclInfo isLoopControlVariable(const ValueDecl *D) const;
427  /// Check if the specified variable is a loop control variable for
428  /// parent region.
429  /// \return The index of the loop control variable in the list of associated
430  /// for-loops (from outer to inner).
431  const LCDeclInfo isParentLoopControlVariable(const ValueDecl *D) const;
432  /// Get the loop control variable for the I-th loop (or nullptr) in
433  /// parent directive.
434  const ValueDecl *getParentLoopControlVariable(unsigned I) const;
435 
436  /// Adds explicit data sharing attribute to the specified declaration.
437  void addDSA(const ValueDecl *D, const Expr *E, OpenMPClauseKind A,
438  DeclRefExpr *PrivateCopy = nullptr);
439 
440  /// Adds additional information for the reduction items with the reduction id
441  /// represented as an operator.
442  void addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
443  BinaryOperatorKind BOK);
444  /// Adds additional information for the reduction items with the reduction id
445  /// represented as reduction identifier.
446  void addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
447  const Expr *ReductionRef);
448  /// Returns the location and reduction operation from the innermost parent
449  /// region for the given \p D.
450  const DSAVarData
451  getTopMostTaskgroupReductionData(const ValueDecl *D, SourceRange &SR,
452  BinaryOperatorKind &BOK,
453  Expr *&TaskgroupDescriptor) const;
454  /// Returns the location and reduction operation from the innermost parent
455  /// region for the given \p D.
456  const DSAVarData
457  getTopMostTaskgroupReductionData(const ValueDecl *D, SourceRange &SR,
458  const Expr *&ReductionRef,
459  Expr *&TaskgroupDescriptor) const;
460  /// Return reduction reference expression for the current taskgroup.
461  Expr *getTaskgroupReductionRef() const {
462  assert(getTopOfStack().Directive == OMPD_taskgroup &&
463  "taskgroup reference expression requested for non taskgroup "
464  "directive.");
465  return getTopOfStack().TaskgroupReductionRef;
466  }
467  /// Checks if the given \p VD declaration is actually a taskgroup reduction
468  /// descriptor variable at the \p Level of OpenMP regions.
469  bool isTaskgroupReductionRef(const ValueDecl *VD, unsigned Level) const {
470  return getStackElemAtLevel(Level).TaskgroupReductionRef &&
471  cast<DeclRefExpr>(getStackElemAtLevel(Level).TaskgroupReductionRef)
472  ->getDecl() == VD;
473  }
474 
475  /// Returns data sharing attributes from top of the stack for the
476  /// specified declaration.
477  const DSAVarData getTopDSA(ValueDecl *D, bool FromParent);
478  /// Returns data-sharing attributes for the specified declaration.
479  const DSAVarData getImplicitDSA(ValueDecl *D, bool FromParent) const;
480  /// Checks if the specified variables has data-sharing attributes which
481  /// match specified \a CPred predicate in any directive which matches \a DPred
482  /// predicate.
483  const DSAVarData
484  hasDSA(ValueDecl *D, const llvm::function_ref<bool(OpenMPClauseKind)> CPred,
485  const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
486  bool FromParent) const;
487  /// Checks if the specified variables has data-sharing attributes which
488  /// match specified \a CPred predicate in any innermost directive which
489  /// matches \a DPred predicate.
490  const DSAVarData
491  hasInnermostDSA(ValueDecl *D,
492  const llvm::function_ref<bool(OpenMPClauseKind)> CPred,
493  const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
494  bool FromParent) const;
495  /// Checks if the specified variables has explicit data-sharing
496  /// attributes which match specified \a CPred predicate at the specified
497  /// OpenMP region.
498  bool hasExplicitDSA(const ValueDecl *D,
499  const llvm::function_ref<bool(OpenMPClauseKind)> CPred,
500  unsigned Level, bool NotLastprivate = false) const;
501 
502  /// Returns true if the directive at level \Level matches in the
503  /// specified \a DPred predicate.
504  bool hasExplicitDirective(
505  const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
506  unsigned Level) const;
507 
508  /// Finds a directive which matches specified \a DPred predicate.
509  bool hasDirective(
510  const llvm::function_ref<bool(
512  DPred,
513  bool FromParent) const;
514 
515  /// Returns currently analyzed directive.
516  OpenMPDirectiveKind getCurrentDirective() const {
517  const SharingMapTy *Top = getTopOfStackOrNull();
518  return Top ? Top->Directive : OMPD_unknown;
519  }
520  /// Returns directive kind at specified level.
521  OpenMPDirectiveKind getDirective(unsigned Level) const {
522  assert(!isStackEmpty() && "No directive at specified level.");
523  return getStackElemAtLevel(Level).Directive;
524  }
525  /// Returns the capture region at the specified level.
526  OpenMPDirectiveKind getCaptureRegion(unsigned Level,
527  unsigned OpenMPCaptureLevel) const {
529  getOpenMPCaptureRegions(CaptureRegions, getDirective(Level));
530  return CaptureRegions[OpenMPCaptureLevel];
531  }
532  /// Returns parent directive.
533  OpenMPDirectiveKind getParentDirective() const {
534  const SharingMapTy *Parent = getSecondOnStackOrNull();
535  return Parent ? Parent->Directive : OMPD_unknown;
536  }
537 
538  /// Add requires decl to internal vector
539  void addRequiresDecl(OMPRequiresDecl *RD) {
540  RequiresDecls.push_back(RD);
541  }
542 
543  /// Checks if the defined 'requires' directive has specified type of clause.
544  template <typename ClauseType>
545  bool hasRequiresDeclWithClause() {
546  return llvm::any_of(RequiresDecls, [](const OMPRequiresDecl *D) {
547  return llvm::any_of(D->clauselists(), [](const OMPClause *C) {
548  return isa<ClauseType>(C);
549  });
550  });
551  }
552 
553  /// Checks for a duplicate clause amongst previously declared requires
554  /// directives
555  bool hasDuplicateRequiresClause(ArrayRef<OMPClause *> ClauseList) const {
556  bool IsDuplicate = false;
557  for (OMPClause *CNew : ClauseList) {
558  for (const OMPRequiresDecl *D : RequiresDecls) {
559  for (const OMPClause *CPrev : D->clauselists()) {
560  if (CNew->getClauseKind() == CPrev->getClauseKind()) {
561  SemaRef.Diag(CNew->getBeginLoc(),
562  diag::err_omp_requires_clause_redeclaration)
563  << getOpenMPClauseName(CNew->getClauseKind());
564  SemaRef.Diag(CPrev->getBeginLoc(),
565  diag::note_omp_requires_previous_clause)
566  << getOpenMPClauseName(CPrev->getClauseKind());
567  IsDuplicate = true;
568  }
569  }
570  }
571  }
572  return IsDuplicate;
573  }
574 
575  /// Add location of previously encountered target to internal vector
576  void addTargetDirLocation(SourceLocation LocStart) {
577  TargetLocations.push_back(LocStart);
578  }
579 
580  // Return previously encountered target region locations.
581  ArrayRef<SourceLocation> getEncounteredTargetLocs() const {
582  return TargetLocations;
583  }
584 
585  /// Set default data sharing attribute to none.
586  void setDefaultDSANone(SourceLocation Loc) {
587  getTopOfStack().DefaultAttr = DSA_none;
588  getTopOfStack().DefaultAttrLoc = Loc;
589  }
590  /// Set default data sharing attribute to shared.
591  void setDefaultDSAShared(SourceLocation Loc) {
592  getTopOfStack().DefaultAttr = DSA_shared;
593  getTopOfStack().DefaultAttrLoc = Loc;
594  }
595  /// Set default data mapping attribute to 'tofrom:scalar'.
596  void setDefaultDMAToFromScalar(SourceLocation Loc) {
597  getTopOfStack().DefaultMapAttr = DMA_tofrom_scalar;
598  getTopOfStack().DefaultMapAttrLoc = Loc;
599  }
600 
601  DefaultDataSharingAttributes getDefaultDSA() const {
602  return isStackEmpty() ? DSA_unspecified
603  : getTopOfStack().DefaultAttr;
604  }
605  SourceLocation getDefaultDSALocation() const {
606  return isStackEmpty() ? SourceLocation()
607  : getTopOfStack().DefaultAttrLoc;
608  }
609  DefaultMapAttributes getDefaultDMA() const {
610  return isStackEmpty() ? DMA_unspecified
611  : getTopOfStack().DefaultMapAttr;
612  }
613  DefaultMapAttributes getDefaultDMAAtLevel(unsigned Level) const {
614  return getStackElemAtLevel(Level).DefaultMapAttr;
615  }
616  SourceLocation getDefaultDMALocation() const {
617  return isStackEmpty() ? SourceLocation()
618  : getTopOfStack().DefaultMapAttrLoc;
619  }
620 
621  /// Checks if the specified variable is a threadprivate.
622  bool isThreadPrivate(VarDecl *D) {
623  const DSAVarData DVar = getTopDSA(D, false);
624  return isOpenMPThreadPrivate(DVar.CKind);
625  }
626 
627  /// Marks current region as ordered (it has an 'ordered' clause).
628  void setOrderedRegion(bool IsOrdered, const Expr *Param,
629  OMPOrderedClause *Clause) {
630  if (IsOrdered)
631  getTopOfStack().OrderedRegion.emplace(Param, Clause);
632  else
633  getTopOfStack().OrderedRegion.reset();
634  }
635  /// Returns true, if region is ordered (has associated 'ordered' clause),
636  /// false - otherwise.
637  bool isOrderedRegion() const {
638  if (const SharingMapTy *Top = getTopOfStackOrNull())
639  return Top->OrderedRegion.hasValue();
640  return false;
641  }
642  /// Returns optional parameter for the ordered region.
643  std::pair<const Expr *, OMPOrderedClause *> getOrderedRegionParam() const {
644  if (const SharingMapTy *Top = getTopOfStackOrNull())
645  if (Top->OrderedRegion.hasValue())
646  return Top->OrderedRegion.getValue();
647  return std::make_pair(nullptr, nullptr);
648  }
649  /// Returns true, if parent region is ordered (has associated
650  /// 'ordered' clause), false - otherwise.
651  bool isParentOrderedRegion() const {
652  if (const SharingMapTy *Parent = getSecondOnStackOrNull())
653  return Parent->OrderedRegion.hasValue();
654  return false;
655  }
656  /// Returns optional parameter for the ordered region.
657  std::pair<const Expr *, OMPOrderedClause *>
658  getParentOrderedRegionParam() const {
659  if (const SharingMapTy *Parent = getSecondOnStackOrNull())
660  if (Parent->OrderedRegion.hasValue())
661  return Parent->OrderedRegion.getValue();
662  return std::make_pair(nullptr, nullptr);
663  }
664  /// Marks current region as nowait (it has a 'nowait' clause).
665  void setNowaitRegion(bool IsNowait = true) {
666  getTopOfStack().NowaitRegion = IsNowait;
667  }
668  /// Returns true, if parent region is nowait (has associated
669  /// 'nowait' clause), false - otherwise.
670  bool isParentNowaitRegion() const {
671  if (const SharingMapTy *Parent = getSecondOnStackOrNull())
672  return Parent->NowaitRegion;
673  return false;
674  }
675  /// Marks parent region as cancel region.
676  void setParentCancelRegion(bool Cancel = true) {
677  if (SharingMapTy *Parent = getSecondOnStackOrNull())
678  Parent->CancelRegion |= Cancel;
679  }
680  /// Return true if current region has inner cancel construct.
681  bool isCancelRegion() const {
682  const SharingMapTy *Top = getTopOfStackOrNull();
683  return Top ? Top->CancelRegion : false;
684  }
685 
686  /// Set collapse value for the region.
687  void setAssociatedLoops(unsigned Val) {
688  getTopOfStack().AssociatedLoops = Val;
689  if (Val > 1)
690  getTopOfStack().HasMutipleLoops = true;
691  }
692  /// Return collapse value for region.
693  unsigned getAssociatedLoops() const {
694  const SharingMapTy *Top = getTopOfStackOrNull();
695  return Top ? Top->AssociatedLoops : 0;
696  }
697  /// Returns true if the construct is associated with multiple loops.
698  bool hasMutipleLoops() const {
699  const SharingMapTy *Top = getTopOfStackOrNull();
700  return Top ? Top->HasMutipleLoops : false;
701  }
702 
703  /// Marks current target region as one with closely nested teams
704  /// region.
705  void setParentTeamsRegionLoc(SourceLocation TeamsRegionLoc) {
706  if (SharingMapTy *Parent = getSecondOnStackOrNull())
707  Parent->InnerTeamsRegionLoc = TeamsRegionLoc;
708  }
709  /// Returns true, if current region has closely nested teams region.
710  bool hasInnerTeamsRegion() const {
711  return getInnerTeamsRegionLoc().isValid();
712  }
713  /// Returns location of the nested teams region (if any).
714  SourceLocation getInnerTeamsRegionLoc() const {
715  const SharingMapTy *Top = getTopOfStackOrNull();
716  return Top ? Top->InnerTeamsRegionLoc : SourceLocation();
717  }
718 
719  Scope *getCurScope() const {
720  const SharingMapTy *Top = getTopOfStackOrNull();
721  return Top ? Top->CurScope : nullptr;
722  }
723  SourceLocation getConstructLoc() const {
724  const SharingMapTy *Top = getTopOfStackOrNull();
725  return Top ? Top->ConstructLoc : SourceLocation();
726  }
727 
728  /// Do the check specified in \a Check to all component lists and return true
729  /// if any issue is found.
730  bool checkMappableExprComponentListsForDecl(
731  const ValueDecl *VD, bool CurrentRegionOnly,
732  const llvm::function_ref<
735  Check) const {
736  if (isStackEmpty())
737  return false;
738  auto SI = begin();
739  auto SE = end();
740 
741  if (SI == SE)
742  return false;
743 
744  if (CurrentRegionOnly)
745  SE = std::next(SI);
746  else
747  std::advance(SI, 1);
748 
749  for (; SI != SE; ++SI) {
750  auto MI = SI->MappedExprComponents.find(VD);
751  if (MI != SI->MappedExprComponents.end())
753  MI->second.Components)
754  if (Check(L, MI->second.Kind))
755  return true;
756  }
757  return false;
758  }
759 
760  /// Do the check specified in \a Check to all component lists at a given level
761  /// and return true if any issue is found.
762  bool checkMappableExprComponentListsForDeclAtLevel(
763  const ValueDecl *VD, unsigned Level,
764  const llvm::function_ref<
767  Check) const {
768  if (getStackSize() <= Level)
769  return false;
770 
771  const SharingMapTy &StackElem = getStackElemAtLevel(Level);
772  auto MI = StackElem.MappedExprComponents.find(VD);
773  if (MI != StackElem.MappedExprComponents.end())
775  MI->second.Components)
776  if (Check(L, MI->second.Kind))
777  return true;
778  return false;
779  }
780 
781  /// Create a new mappable expression component list associated with a given
782  /// declaration and initialize it with the provided list of components.
783  void addMappableExpressionComponents(
784  const ValueDecl *VD,
786  OpenMPClauseKind WhereFoundClauseKind) {
787  MappedExprComponentTy &MEC = getTopOfStack().MappedExprComponents[VD];
788  // Create new entry and append the new components there.
789  MEC.Components.resize(MEC.Components.size() + 1);
790  MEC.Components.back().append(Components.begin(), Components.end());
791  MEC.Kind = WhereFoundClauseKind;
792  }
793 
794  unsigned getNestingLevel() const {
795  assert(!isStackEmpty());
796  return getStackSize() - 1;
797  }
798  void addDoacrossDependClause(OMPDependClause *C,
799  const OperatorOffsetTy &OpsOffs) {
800  SharingMapTy *Parent = getSecondOnStackOrNull();
801  assert(Parent && isOpenMPWorksharingDirective(Parent->Directive));
802  Parent->DoacrossDepends.try_emplace(C, OpsOffs);
803  }
804  llvm::iterator_range<DoacrossDependMapTy::const_iterator>
805  getDoacrossDependClauses() const {
806  const SharingMapTy &StackElem = getTopOfStack();
807  if (isOpenMPWorksharingDirective(StackElem.Directive)) {
808  const DoacrossDependMapTy &Ref = StackElem.DoacrossDepends;
809  return llvm::make_range(Ref.begin(), Ref.end());
810  }
811  return llvm::make_range(StackElem.DoacrossDepends.end(),
812  StackElem.DoacrossDepends.end());
813  }
814 
815  // Store types of classes which have been explicitly mapped
816  void addMappedClassesQualTypes(QualType QT) {
817  SharingMapTy &StackElem = getTopOfStack();
818  StackElem.MappedClassesQualTypes.insert(QT);
819  }
820 
821  // Return set of mapped classes types
822  bool isClassPreviouslyMapped(QualType QT) const {
823  const SharingMapTy &StackElem = getTopOfStack();
824  return StackElem.MappedClassesQualTypes.count(QT) != 0;
825  }
826 
827  /// Adds global declare target to the parent target region.
828  void addToParentTargetRegionLinkGlobals(DeclRefExpr *E) {
829  assert(*OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(
830  E->getDecl()) == OMPDeclareTargetDeclAttr::MT_Link &&
831  "Expected declare target link global.");
832  for (auto &Elem : *this) {
833  if (isOpenMPTargetExecutionDirective(Elem.Directive)) {
834  Elem.DeclareTargetLinkVarDecls.push_back(E);
835  return;
836  }
837  }
838  }
839 
840  /// Returns the list of globals with declare target link if current directive
841  /// is target.
842  ArrayRef<DeclRefExpr *> getLinkGlobals() const {
843  assert(isOpenMPTargetExecutionDirective(getCurrentDirective()) &&
844  "Expected target executable directive.");
845  return getTopOfStack().DeclareTargetLinkVarDecls;
846  }
847 };
848 
849 bool isImplicitTaskingRegion(OpenMPDirectiveKind DKind) {
850  return isOpenMPParallelDirective(DKind) || isOpenMPTeamsDirective(DKind);
851 }
852 
853 bool isImplicitOrExplicitTaskingRegion(OpenMPDirectiveKind DKind) {
854  return isImplicitTaskingRegion(DKind) || isOpenMPTaskingDirective(DKind) ||
855  DKind == OMPD_unknown;
856 }
857 
858 } // namespace
859 
860 static const Expr *getExprAsWritten(const Expr *E) {
861  if (const auto *FE = dyn_cast<FullExpr>(E))
862  E = FE->getSubExpr();
863 
864  if (const auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E))
865  E = MTE->GetTemporaryExpr();
866 
867  while (const auto *Binder = dyn_cast<CXXBindTemporaryExpr>(E))
868  E = Binder->getSubExpr();
869 
870  if (const auto *ICE = dyn_cast<ImplicitCastExpr>(E))
871  E = ICE->getSubExprAsWritten();
872  return E->IgnoreParens();
873 }
874 
876  return const_cast<Expr *>(getExprAsWritten(const_cast<const Expr *>(E)));
877 }
878 
879 static const ValueDecl *getCanonicalDecl(const ValueDecl *D) {
880  if (const auto *CED = dyn_cast<OMPCapturedExprDecl>(D))
881  if (const auto *ME = dyn_cast<MemberExpr>(getExprAsWritten(CED->getInit())))
882  D = ME->getMemberDecl();
883  const auto *VD = dyn_cast<VarDecl>(D);
884  const auto *FD = dyn_cast<FieldDecl>(D);
885  if (VD != nullptr) {
886  VD = VD->getCanonicalDecl();
887  D = VD;
888  } else {
889  assert(FD);
890  FD = FD->getCanonicalDecl();
891  D = FD;
892  }
893  return D;
894 }
895 
897  return const_cast<ValueDecl *>(
898  getCanonicalDecl(const_cast<const ValueDecl *>(D)));
899 }
900 
901 DSAStackTy::DSAVarData DSAStackTy::getDSA(const_iterator &Iter,
902  ValueDecl *D) const {
903  D = getCanonicalDecl(D);
904  auto *VD = dyn_cast<VarDecl>(D);
905  const auto *FD = dyn_cast<FieldDecl>(D);
906  DSAVarData DVar;
907  if (Iter == end()) {
908  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
909  // in a region but not in construct]
910  // File-scope or namespace-scope variables referenced in called routines
911  // in the region are shared unless they appear in a threadprivate
912  // directive.
913  if (VD && !VD->isFunctionOrMethodVarDecl() && !isa<ParmVarDecl>(VD))
914  DVar.CKind = OMPC_shared;
915 
916  // OpenMP [2.9.1.2, Data-sharing Attribute Rules for Variables Referenced
917  // in a region but not in construct]
918  // Variables with static storage duration that are declared in called
919  // routines in the region are shared.
920  if (VD && VD->hasGlobalStorage())
921  DVar.CKind = OMPC_shared;
922 
923  // Non-static data members are shared by default.
924  if (FD)
925  DVar.CKind = OMPC_shared;
926 
927  return DVar;
928  }
929 
930  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
931  // in a Construct, C/C++, predetermined, p.1]
932  // Variables with automatic storage duration that are declared in a scope
933  // inside the construct are private.
934  if (VD && isOpenMPLocal(VD, Iter) && VD->isLocalVarDecl() &&
935  (VD->getStorageClass() == SC_Auto || VD->getStorageClass() == SC_None)) {
936  DVar.CKind = OMPC_private;
937  return DVar;
938  }
939 
940  DVar.DKind = Iter->Directive;
941  // Explicitly specified attributes and local variables with predetermined
942  // attributes.
943  if (Iter->SharingMap.count(D)) {
944  const DSAInfo &Data = Iter->SharingMap.lookup(D);
945  DVar.RefExpr = Data.RefExpr.getPointer();
946  DVar.PrivateCopy = Data.PrivateCopy;
947  DVar.CKind = Data.Attributes;
948  DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
949  return DVar;
950  }
951 
952  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
953  // in a Construct, C/C++, implicitly determined, p.1]
954  // In a parallel or task construct, the data-sharing attributes of these
955  // variables are determined by the default clause, if present.
956  switch (Iter->DefaultAttr) {
957  case DSA_shared:
958  DVar.CKind = OMPC_shared;
959  DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
960  return DVar;
961  case DSA_none:
962  return DVar;
963  case DSA_unspecified:
964  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
965  // in a Construct, implicitly determined, p.2]
966  // In a parallel construct, if no default clause is present, these
967  // variables are shared.
968  DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
969  if ((isOpenMPParallelDirective(DVar.DKind) &&
970  !isOpenMPTaskLoopDirective(DVar.DKind)) ||
971  isOpenMPTeamsDirective(DVar.DKind)) {
972  DVar.CKind = OMPC_shared;
973  return DVar;
974  }
975 
976  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
977  // in a Construct, implicitly determined, p.4]
978  // In a task construct, if no default clause is present, a variable that in
979  // the enclosing context is determined to be shared by all implicit tasks
980  // bound to the current team is shared.
981  if (isOpenMPTaskingDirective(DVar.DKind)) {
982  DSAVarData DVarTemp;
983  const_iterator I = Iter, E = end();
984  do {
985  ++I;
986  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables
987  // Referenced in a Construct, implicitly determined, p.6]
988  // In a task construct, if no default clause is present, a variable
989  // whose data-sharing attribute is not determined by the rules above is
990  // firstprivate.
991  DVarTemp = getDSA(I, D);
992  if (DVarTemp.CKind != OMPC_shared) {
993  DVar.RefExpr = nullptr;
994  DVar.CKind = OMPC_firstprivate;
995  return DVar;
996  }
997  } while (I != E && !isImplicitTaskingRegion(I->Directive));
998  DVar.CKind =
999  (DVarTemp.CKind == OMPC_unknown) ? OMPC_firstprivate : OMPC_shared;
1000  return DVar;
1001  }
1002  }
1003  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1004  // in a Construct, implicitly determined, p.3]
1005  // For constructs other than task, if no default clause is present, these
1006  // variables inherit their data-sharing attributes from the enclosing
1007  // context.
1008  return getDSA(++Iter, D);
1009 }
1010 
1011 const Expr *DSAStackTy::addUniqueAligned(const ValueDecl *D,
1012  const Expr *NewDE) {
1013  assert(!isStackEmpty() && "Data sharing attributes stack is empty");
1014  D = getCanonicalDecl(D);
1015  SharingMapTy &StackElem = getTopOfStack();
1016  auto It = StackElem.AlignedMap.find(D);
1017  if (It == StackElem.AlignedMap.end()) {
1018  assert(NewDE && "Unexpected nullptr expr to be added into aligned map");
1019  StackElem.AlignedMap[D] = NewDE;
1020  return nullptr;
1021  }
1022  assert(It->second && "Unexpected nullptr expr in the aligned map");
1023  return It->second;
1024 }
1025 
1026 void DSAStackTy::addLoopControlVariable(const ValueDecl *D, VarDecl *Capture) {
1027  assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
1028  D = getCanonicalDecl(D);
1029  SharingMapTy &StackElem = getTopOfStack();
1030  StackElem.LCVMap.try_emplace(
1031  D, LCDeclInfo(StackElem.LCVMap.size() + 1, Capture));
1032 }
1033 
1034 const DSAStackTy::LCDeclInfo
1035 DSAStackTy::isLoopControlVariable(const ValueDecl *D) const {
1036  assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
1037  D = getCanonicalDecl(D);
1038  const SharingMapTy &StackElem = getTopOfStack();
1039  auto It = StackElem.LCVMap.find(D);
1040  if (It != StackElem.LCVMap.end())
1041  return It->second;
1042  return {0, nullptr};
1043 }
1044 
1045 const DSAStackTy::LCDeclInfo
1046 DSAStackTy::isParentLoopControlVariable(const ValueDecl *D) const {
1047  const SharingMapTy *Parent = getSecondOnStackOrNull();
1048  assert(Parent && "Data-sharing attributes stack is empty");
1049  D = getCanonicalDecl(D);
1050  auto It = Parent->LCVMap.find(D);
1051  if (It != Parent->LCVMap.end())
1052  return It->second;
1053  return {0, nullptr};
1054 }
1055 
1056 const ValueDecl *DSAStackTy::getParentLoopControlVariable(unsigned I) const {
1057  const SharingMapTy *Parent = getSecondOnStackOrNull();
1058  assert(Parent && "Data-sharing attributes stack is empty");
1059  if (Parent->LCVMap.size() < I)
1060  return nullptr;
1061  for (const auto &Pair : Parent->LCVMap)
1062  if (Pair.second.first == I)
1063  return Pair.first;
1064  return nullptr;
1065 }
1066 
1067 void DSAStackTy::addDSA(const ValueDecl *D, const Expr *E, OpenMPClauseKind A,
1068  DeclRefExpr *PrivateCopy) {
1069  D = getCanonicalDecl(D);
1070  if (A == OMPC_threadprivate) {
1071  DSAInfo &Data = Threadprivates[D];
1072  Data.Attributes = A;
1073  Data.RefExpr.setPointer(E);
1074  Data.PrivateCopy = nullptr;
1075  } else {
1076  DSAInfo &Data = getTopOfStack().SharingMap[D];
1077  assert(Data.Attributes == OMPC_unknown || (A == Data.Attributes) ||
1078  (A == OMPC_firstprivate && Data.Attributes == OMPC_lastprivate) ||
1079  (A == OMPC_lastprivate && Data.Attributes == OMPC_firstprivate) ||
1080  (isLoopControlVariable(D).first && A == OMPC_private));
1081  if (A == OMPC_lastprivate && Data.Attributes == OMPC_firstprivate) {
1082  Data.RefExpr.setInt(/*IntVal=*/true);
1083  return;
1084  }
1085  const bool IsLastprivate =
1086  A == OMPC_lastprivate || Data.Attributes == OMPC_lastprivate;
1087  Data.Attributes = A;
1088  Data.RefExpr.setPointerAndInt(E, IsLastprivate);
1089  Data.PrivateCopy = PrivateCopy;
1090  if (PrivateCopy) {
1091  DSAInfo &Data = getTopOfStack().SharingMap[PrivateCopy->getDecl()];
1092  Data.Attributes = A;
1093  Data.RefExpr.setPointerAndInt(PrivateCopy, IsLastprivate);
1094  Data.PrivateCopy = nullptr;
1095  }
1096  }
1097 }
1098 
1099 /// Build a variable declaration for OpenMP loop iteration variable.
1101  StringRef Name, const AttrVec *Attrs = nullptr,
1102  DeclRefExpr *OrigRef = nullptr) {
1103  DeclContext *DC = SemaRef.CurContext;
1104  IdentifierInfo *II = &SemaRef.PP.getIdentifierTable().get(Name);
1105  TypeSourceInfo *TInfo = SemaRef.Context.getTrivialTypeSourceInfo(Type, Loc);
1106  auto *Decl =
1107  VarDecl::Create(SemaRef.Context, DC, Loc, Loc, II, Type, TInfo, SC_None);
1108  if (Attrs) {
1109  for (specific_attr_iterator<AlignedAttr> I(Attrs->begin()), E(Attrs->end());
1110  I != E; ++I)
1111  Decl->addAttr(*I);
1112  }
1113  Decl->setImplicit();
1114  if (OrigRef) {
1115  Decl->addAttr(
1116  OMPReferencedVarAttr::CreateImplicit(SemaRef.Context, OrigRef));
1117  }
1118  return Decl;
1119 }
1120 
1122  SourceLocation Loc,
1123  bool RefersToCapture = false) {
1124  D->setReferenced();
1125  D->markUsed(S.Context);
1127  SourceLocation(), D, RefersToCapture, Loc, Ty,
1128  VK_LValue);
1129 }
1130 
1131 void DSAStackTy::addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
1132  BinaryOperatorKind BOK) {
1133  D = getCanonicalDecl(D);
1134  assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
1135  assert(
1136  getTopOfStack().SharingMap[D].Attributes == OMPC_reduction &&
1137  "Additional reduction info may be specified only for reduction items.");
1138  ReductionData &ReductionData = getTopOfStack().ReductionMap[D];
1139  assert(ReductionData.ReductionRange.isInvalid() &&
1140  getTopOfStack().Directive == OMPD_taskgroup &&
1141  "Additional reduction info may be specified only once for reduction "
1142  "items.");
1143  ReductionData.set(BOK, SR);
1144  Expr *&TaskgroupReductionRef =
1145  getTopOfStack().TaskgroupReductionRef;
1146  if (!TaskgroupReductionRef) {
1147  VarDecl *VD = buildVarDecl(SemaRef, SR.getBegin(),
1148  SemaRef.Context.VoidPtrTy, ".task_red.");
1149  TaskgroupReductionRef =
1150  buildDeclRefExpr(SemaRef, VD, SemaRef.Context.VoidPtrTy, SR.getBegin());
1151  }
1152 }
1153 
1154 void DSAStackTy::addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
1155  const Expr *ReductionRef) {
1156  D = getCanonicalDecl(D);
1157  assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
1158  assert(
1159  getTopOfStack().SharingMap[D].Attributes == OMPC_reduction &&
1160  "Additional reduction info may be specified only for reduction items.");
1161  ReductionData &ReductionData = getTopOfStack().ReductionMap[D];
1162  assert(ReductionData.ReductionRange.isInvalid() &&
1163  getTopOfStack().Directive == OMPD_taskgroup &&
1164  "Additional reduction info may be specified only once for reduction "
1165  "items.");
1166  ReductionData.set(ReductionRef, SR);
1167  Expr *&TaskgroupReductionRef =
1168  getTopOfStack().TaskgroupReductionRef;
1169  if (!TaskgroupReductionRef) {
1170  VarDecl *VD = buildVarDecl(SemaRef, SR.getBegin(),
1171  SemaRef.Context.VoidPtrTy, ".task_red.");
1172  TaskgroupReductionRef =
1173  buildDeclRefExpr(SemaRef, VD, SemaRef.Context.VoidPtrTy, SR.getBegin());
1174  }
1175 }
1176 
1177 const DSAStackTy::DSAVarData DSAStackTy::getTopMostTaskgroupReductionData(
1178  const ValueDecl *D, SourceRange &SR, BinaryOperatorKind &BOK,
1179  Expr *&TaskgroupDescriptor) const {
1180  D = getCanonicalDecl(D);
1181  assert(!isStackEmpty() && "Data-sharing attributes stack is empty.");
1182  for (const_iterator I = begin() + 1, E = end(); I != E; ++I) {
1183  const DSAInfo &Data = I->SharingMap.lookup(D);
1184  if (Data.Attributes != OMPC_reduction || I->Directive != OMPD_taskgroup)
1185  continue;
1186  const ReductionData &ReductionData = I->ReductionMap.lookup(D);
1187  if (!ReductionData.ReductionOp ||
1188  ReductionData.ReductionOp.is<const Expr *>())
1189  return DSAVarData();
1190  SR = ReductionData.ReductionRange;
1191  BOK = ReductionData.ReductionOp.get<ReductionData::BOKPtrType>();
1192  assert(I->TaskgroupReductionRef && "taskgroup reduction reference "
1193  "expression for the descriptor is not "
1194  "set.");
1195  TaskgroupDescriptor = I->TaskgroupReductionRef;
1196  return DSAVarData(OMPD_taskgroup, OMPC_reduction, Data.RefExpr.getPointer(),
1197  Data.PrivateCopy, I->DefaultAttrLoc);
1198  }
1199  return DSAVarData();
1200 }
1201 
1202 const DSAStackTy::DSAVarData DSAStackTy::getTopMostTaskgroupReductionData(
1203  const ValueDecl *D, SourceRange &SR, const Expr *&ReductionRef,
1204  Expr *&TaskgroupDescriptor) const {
1205  D = getCanonicalDecl(D);
1206  assert(!isStackEmpty() && "Data-sharing attributes stack is empty.");
1207  for (const_iterator I = begin() + 1, E = end(); I != E; ++I) {
1208  const DSAInfo &Data = I->SharingMap.lookup(D);
1209  if (Data.Attributes != OMPC_reduction || I->Directive != OMPD_taskgroup)
1210  continue;
1211  const ReductionData &ReductionData = I->ReductionMap.lookup(D);
1212  if (!ReductionData.ReductionOp ||
1213  !ReductionData.ReductionOp.is<const Expr *>())
1214  return DSAVarData();
1215  SR = ReductionData.ReductionRange;
1216  ReductionRef = ReductionData.ReductionOp.get<const Expr *>();
1217  assert(I->TaskgroupReductionRef && "taskgroup reduction reference "
1218  "expression for the descriptor is not "
1219  "set.");
1220  TaskgroupDescriptor = I->TaskgroupReductionRef;
1221  return DSAVarData(OMPD_taskgroup, OMPC_reduction, Data.RefExpr.getPointer(),
1222  Data.PrivateCopy, I->DefaultAttrLoc);
1223  }
1224  return DSAVarData();
1225 }
1226 
1227 bool DSAStackTy::isOpenMPLocal(VarDecl *D, const_iterator I) const {
1228  D = D->getCanonicalDecl();
1229  for (const_iterator E = end(); I != E; ++I) {
1230  if (isImplicitOrExplicitTaskingRegion(I->Directive) ||
1231  isOpenMPTargetExecutionDirective(I->Directive)) {
1232  Scope *TopScope = I->CurScope ? I->CurScope->getParent() : nullptr;
1233  Scope *CurScope = getCurScope();
1234  while (CurScope && CurScope != TopScope && !CurScope->isDeclScope(D))
1235  CurScope = CurScope->getParent();
1236  return CurScope != TopScope;
1237  }
1238  }
1239  return false;
1240 }
1241 
1242 static bool isConstNotMutableType(Sema &SemaRef, QualType Type,
1243  bool AcceptIfMutable = true,
1244  bool *IsClassType = nullptr) {
1245  ASTContext &Context = SemaRef.getASTContext();
1246  Type = Type.getNonReferenceType().getCanonicalType();
1247  bool IsConstant = Type.isConstant(Context);
1248  Type = Context.getBaseElementType(Type);
1249  const CXXRecordDecl *RD = AcceptIfMutable && SemaRef.getLangOpts().CPlusPlus
1250  ? Type->getAsCXXRecordDecl()
1251  : nullptr;
1252  if (const auto *CTSD = dyn_cast_or_null<ClassTemplateSpecializationDecl>(RD))
1253  if (const ClassTemplateDecl *CTD = CTSD->getSpecializedTemplate())
1254  RD = CTD->getTemplatedDecl();
1255  if (IsClassType)
1256  *IsClassType = RD;
1257  return IsConstant && !(SemaRef.getLangOpts().CPlusPlus && RD &&
1258  RD->hasDefinition() && RD->hasMutableFields());
1259 }
1260 
1261 static bool rejectConstNotMutableType(Sema &SemaRef, const ValueDecl *D,
1263  SourceLocation ELoc,
1264  bool AcceptIfMutable = true,
1265  bool ListItemNotVar = false) {
1266  ASTContext &Context = SemaRef.getASTContext();
1267  bool IsClassType;
1268  if (isConstNotMutableType(SemaRef, Type, AcceptIfMutable, &IsClassType)) {
1269  unsigned Diag = ListItemNotVar
1270  ? diag::err_omp_const_list_item
1271  : IsClassType ? diag::err_omp_const_not_mutable_variable
1272  : diag::err_omp_const_variable;
1273  SemaRef.Diag(ELoc, Diag) << getOpenMPClauseName(CKind);
1274  if (!ListItemNotVar && D) {
1275  const VarDecl *VD = dyn_cast<VarDecl>(D);
1276  bool IsDecl = !VD || VD->isThisDeclarationADefinition(Context) ==
1278  SemaRef.Diag(D->getLocation(),
1279  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
1280  << D;
1281  }
1282  return true;
1283  }
1284  return false;
1285 }
1286 
1287 const DSAStackTy::DSAVarData DSAStackTy::getTopDSA(ValueDecl *D,
1288  bool FromParent) {
1289  D = getCanonicalDecl(D);
1290  DSAVarData DVar;
1291 
1292  auto *VD = dyn_cast<VarDecl>(D);
1293  auto TI = Threadprivates.find(D);
1294  if (TI != Threadprivates.end()) {
1295  DVar.RefExpr = TI->getSecond().RefExpr.getPointer();
1296  DVar.CKind = OMPC_threadprivate;
1297  return DVar;
1298  }
1299  if (VD && VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
1300  DVar.RefExpr = buildDeclRefExpr(
1301  SemaRef, VD, D->getType().getNonReferenceType(),
1302  VD->getAttr<OMPThreadPrivateDeclAttr>()->getLocation());
1303  DVar.CKind = OMPC_threadprivate;
1304  addDSA(D, DVar.RefExpr, OMPC_threadprivate);
1305  return DVar;
1306  }
1307  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1308  // in a Construct, C/C++, predetermined, p.1]
1309  // Variables appearing in threadprivate directives are threadprivate.
1310  if ((VD && VD->getTLSKind() != VarDecl::TLS_None &&
1311  !(VD->hasAttr<OMPThreadPrivateDeclAttr>() &&
1312  SemaRef.getLangOpts().OpenMPUseTLS &&
1313  SemaRef.getASTContext().getTargetInfo().isTLSSupported())) ||
1314  (VD && VD->getStorageClass() == SC_Register &&
1315  VD->hasAttr<AsmLabelAttr>() && !VD->isLocalVarDecl())) {
1316  DVar.RefExpr = buildDeclRefExpr(
1317  SemaRef, VD, D->getType().getNonReferenceType(), D->getLocation());
1318  DVar.CKind = OMPC_threadprivate;
1319  addDSA(D, DVar.RefExpr, OMPC_threadprivate);
1320  return DVar;
1321  }
1322  if (SemaRef.getLangOpts().OpenMPCUDAMode && VD &&
1323  VD->isLocalVarDeclOrParm() && !isStackEmpty() &&
1324  !isLoopControlVariable(D).first) {
1325  const_iterator IterTarget =
1326  std::find_if(begin(), end(), [](const SharingMapTy &Data) {
1327  return isOpenMPTargetExecutionDirective(Data.Directive);
1328  });
1329  if (IterTarget != end()) {
1330  const_iterator ParentIterTarget = IterTarget + 1;
1331  for (const_iterator Iter = begin();
1332  Iter != ParentIterTarget; ++Iter) {
1333  if (isOpenMPLocal(VD, Iter)) {
1334  DVar.RefExpr =
1335  buildDeclRefExpr(SemaRef, VD, D->getType().getNonReferenceType(),
1336  D->getLocation());
1337  DVar.CKind = OMPC_threadprivate;
1338  return DVar;
1339  }
1340  }
1341  if (!isClauseParsingMode() || IterTarget != begin()) {
1342  auto DSAIter = IterTarget->SharingMap.find(D);
1343  if (DSAIter != IterTarget->SharingMap.end() &&
1344  isOpenMPPrivate(DSAIter->getSecond().Attributes)) {
1345  DVar.RefExpr = DSAIter->getSecond().RefExpr.getPointer();
1346  DVar.CKind = OMPC_threadprivate;
1347  return DVar;
1348  }
1349  const_iterator End = end();
1350  if (!SemaRef.isOpenMPCapturedByRef(
1351  D, std::distance(ParentIterTarget, End),
1352  /*OpenMPCaptureLevel=*/0)) {
1353  DVar.RefExpr =
1354  buildDeclRefExpr(SemaRef, VD, D->getType().getNonReferenceType(),
1355  IterTarget->ConstructLoc);
1356  DVar.CKind = OMPC_threadprivate;
1357  return DVar;
1358  }
1359  }
1360  }
1361  }
1362 
1363  if (isStackEmpty())
1364  // Not in OpenMP execution region and top scope was already checked.
1365  return DVar;
1366 
1367  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1368  // in a Construct, C/C++, predetermined, p.4]
1369  // Static data members are shared.
1370  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1371  // in a Construct, C/C++, predetermined, p.7]
1372  // Variables with static storage duration that are declared in a scope
1373  // inside the construct are shared.
1374  if (VD && VD->isStaticDataMember()) {
1375  // Check for explicitly specified attributes.
1376  const_iterator I = begin();
1377  const_iterator EndI = end();
1378  if (FromParent && I != EndI)
1379  ++I;
1380  auto It = I->SharingMap.find(D);
1381  if (It != I->SharingMap.end()) {
1382  const DSAInfo &Data = It->getSecond();
1383  DVar.RefExpr = Data.RefExpr.getPointer();
1384  DVar.PrivateCopy = Data.PrivateCopy;
1385  DVar.CKind = Data.Attributes;
1386  DVar.ImplicitDSALoc = I->DefaultAttrLoc;
1387  DVar.DKind = I->Directive;
1388  return DVar;
1389  }
1390 
1391  DVar.CKind = OMPC_shared;
1392  return DVar;
1393  }
1394 
1395  auto &&MatchesAlways = [](OpenMPDirectiveKind) { return true; };
1396  // The predetermined shared attribute for const-qualified types having no
1397  // mutable members was removed after OpenMP 3.1.
1398  if (SemaRef.LangOpts.OpenMP <= 31) {
1399  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1400  // in a Construct, C/C++, predetermined, p.6]
1401  // Variables with const qualified type having no mutable member are
1402  // shared.
1403  if (isConstNotMutableType(SemaRef, D->getType())) {
1404  // Variables with const-qualified type having no mutable member may be
1405  // listed in a firstprivate clause, even if they are static data members.
1406  DSAVarData DVarTemp = hasInnermostDSA(
1407  D,
1408  [](OpenMPClauseKind C) {
1409  return C == OMPC_firstprivate || C == OMPC_shared;
1410  },
1411  MatchesAlways, FromParent);
1412  if (DVarTemp.CKind != OMPC_unknown && DVarTemp.RefExpr)
1413  return DVarTemp;
1414 
1415  DVar.CKind = OMPC_shared;
1416  return DVar;
1417  }
1418  }
1419 
1420  // Explicitly specified attributes and local variables with predetermined
1421  // attributes.
1422  const_iterator I = begin();
1423  const_iterator EndI = end();
1424  if (FromParent && I != EndI)
1425  ++I;
1426  auto It = I->SharingMap.find(D);
1427  if (It != I->SharingMap.end()) {
1428  const DSAInfo &Data = It->getSecond();
1429  DVar.RefExpr = Data.RefExpr.getPointer();
1430  DVar.PrivateCopy = Data.PrivateCopy;
1431  DVar.CKind = Data.Attributes;
1432  DVar.ImplicitDSALoc = I->DefaultAttrLoc;
1433  DVar.DKind = I->Directive;
1434  }
1435 
1436  return DVar;
1437 }
1438 
1439 const DSAStackTy::DSAVarData DSAStackTy::getImplicitDSA(ValueDecl *D,
1440  bool FromParent) const {
1441  if (isStackEmpty()) {
1442  const_iterator I;
1443  return getDSA(I, D);
1444  }
1445  D = getCanonicalDecl(D);
1446  const_iterator StartI = begin();
1447  const_iterator EndI = end();
1448  if (FromParent && StartI != EndI)
1449  ++StartI;
1450  return getDSA(StartI, D);
1451 }
1452 
1453 const DSAStackTy::DSAVarData
1454 DSAStackTy::hasDSA(ValueDecl *D,
1455  const llvm::function_ref<bool(OpenMPClauseKind)> CPred,
1456  const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
1457  bool FromParent) const {
1458  if (isStackEmpty())
1459  return {};
1460  D = getCanonicalDecl(D);
1461  const_iterator I = begin();
1462  const_iterator EndI = end();
1463  if (FromParent && I != EndI)
1464  ++I;
1465  for (; I != EndI; ++I) {
1466  if (!DPred(I->Directive) &&
1467  !isImplicitOrExplicitTaskingRegion(I->Directive))
1468  continue;
1469  const_iterator NewI = I;
1470  DSAVarData DVar = getDSA(NewI, D);
1471  if (I == NewI && CPred(DVar.CKind))
1472  return DVar;
1473  }
1474  return {};
1475 }
1476 
1477 const DSAStackTy::DSAVarData DSAStackTy::hasInnermostDSA(
1478  ValueDecl *D, const llvm::function_ref<bool(OpenMPClauseKind)> CPred,
1479  const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
1480  bool FromParent) const {
1481  if (isStackEmpty())
1482  return {};
1483  D = getCanonicalDecl(D);
1484  const_iterator StartI = begin();
1485  const_iterator EndI = end();
1486  if (FromParent && StartI != EndI)
1487  ++StartI;
1488  if (StartI == EndI || !DPred(StartI->Directive))
1489  return {};
1490  const_iterator NewI = StartI;
1491  DSAVarData DVar = getDSA(NewI, D);
1492  return (NewI == StartI && CPred(DVar.CKind)) ? DVar : DSAVarData();
1493 }
1494 
1495 bool DSAStackTy::hasExplicitDSA(
1496  const ValueDecl *D, const llvm::function_ref<bool(OpenMPClauseKind)> CPred,
1497  unsigned Level, bool NotLastprivate) const {
1498  if (getStackSize() <= Level)
1499  return false;
1500  D = getCanonicalDecl(D);
1501  const SharingMapTy &StackElem = getStackElemAtLevel(Level);
1502  auto I = StackElem.SharingMap.find(D);
1503  if (I != StackElem.SharingMap.end() &&
1504  I->getSecond().RefExpr.getPointer() &&
1505  CPred(I->getSecond().Attributes) &&
1506  (!NotLastprivate || !I->getSecond().RefExpr.getInt()))
1507  return true;
1508  // Check predetermined rules for the loop control variables.
1509  auto LI = StackElem.LCVMap.find(D);
1510  if (LI != StackElem.LCVMap.end())
1511  return CPred(OMPC_private);
1512  return false;
1513 }
1514 
1515 bool DSAStackTy::hasExplicitDirective(
1516  const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
1517  unsigned Level) const {
1518  if (getStackSize() <= Level)
1519  return false;
1520  const SharingMapTy &StackElem = getStackElemAtLevel(Level);
1521  return DPred(StackElem.Directive);
1522 }
1523 
1524 bool DSAStackTy::hasDirective(
1525  const llvm::function_ref<bool(OpenMPDirectiveKind,
1527  DPred,
1528  bool FromParent) const {
1529  // We look only in the enclosing region.
1530  size_t Skip = FromParent ? 2 : 1;
1531  for (const_iterator I = begin() + std::min(Skip, getStackSize()), E = end();
1532  I != E; ++I) {
1533  if (DPred(I->Directive, I->DirectiveName, I->ConstructLoc))
1534  return true;
1535  }
1536  return false;
1537 }
1538 
1539 void Sema::InitDataSharingAttributesStack() {
1540  VarDataSharingAttributesStack = new DSAStackTy(*this);
1541 }
1542 
1543 #define DSAStack static_cast<DSAStackTy *>(VarDataSharingAttributesStack)
1544 
1545 void Sema::pushOpenMPFunctionRegion() {
1546  DSAStack->pushFunction();
1547 }
1548 
1549 void Sema::popOpenMPFunctionRegion(const FunctionScopeInfo *OldFSI) {
1550  DSAStack->popFunction(OldFSI);
1551 }
1552 
1554  assert(S.LangOpts.OpenMP && S.LangOpts.OpenMPIsDevice &&
1555  "Expected OpenMP device compilation.");
1556  return !S.isInOpenMPTargetExecutionDirective() &&
1558 }
1559 
1560 namespace {
1561 /// Status of the function emission on the host/device.
1563  Emitted,
1564  Discarded,
1565  Unknown,
1566 };
1567 } // anonymous namespace
1568 
1570  unsigned DiagID) {
1571  assert(LangOpts.OpenMP && LangOpts.OpenMPIsDevice &&
1572  "Expected OpenMP device compilation.");
1573  FunctionEmissionStatus FES = getEmissionStatus(getCurFunctionDecl());
1574  DeviceDiagBuilder::Kind Kind = DeviceDiagBuilder::K_Nop;
1575  switch (FES) {
1576  case FunctionEmissionStatus::Emitted:
1577  Kind = DeviceDiagBuilder::K_Immediate;
1578  break;
1580  Kind = isOpenMPDeviceDelayedContext(*this) ? DeviceDiagBuilder::K_Deferred
1581  : DeviceDiagBuilder::K_Immediate;
1582  break;
1583  case FunctionEmissionStatus::TemplateDiscarded:
1584  case FunctionEmissionStatus::OMPDiscarded:
1585  Kind = DeviceDiagBuilder::K_Nop;
1586  break;
1587  case FunctionEmissionStatus::CUDADiscarded:
1588  llvm_unreachable("CUDADiscarded unexpected in OpenMP device compilation");
1589  break;
1590  }
1591 
1592  return DeviceDiagBuilder(Kind, Loc, DiagID, getCurFunctionDecl(), *this);
1593 }
1594 
1596  unsigned DiagID) {
1597  assert(LangOpts.OpenMP && !LangOpts.OpenMPIsDevice &&
1598  "Expected OpenMP host compilation.");
1599  FunctionEmissionStatus FES = getEmissionStatus(getCurFunctionDecl());
1600  DeviceDiagBuilder::Kind Kind = DeviceDiagBuilder::K_Nop;
1601  switch (FES) {
1602  case FunctionEmissionStatus::Emitted:
1603  Kind = DeviceDiagBuilder::K_Immediate;
1604  break;
1606  Kind = DeviceDiagBuilder::K_Deferred;
1607  break;
1608  case FunctionEmissionStatus::TemplateDiscarded:
1609  case FunctionEmissionStatus::OMPDiscarded:
1610  case FunctionEmissionStatus::CUDADiscarded:
1611  Kind = DeviceDiagBuilder::K_Nop;
1612  break;
1613  }
1614 
1615  return DeviceDiagBuilder(Kind, Loc, DiagID, getCurFunctionDecl(), *this);
1616 }
1617 
1618 void Sema::checkOpenMPDeviceFunction(SourceLocation Loc, FunctionDecl *Callee,
1619  bool CheckForDelayedContext) {
1620  assert(LangOpts.OpenMP && LangOpts.OpenMPIsDevice &&
1621  "Expected OpenMP device compilation.");
1622  assert(Callee && "Callee may not be null.");
1623  Callee = Callee->getMostRecentDecl();
1624  FunctionDecl *Caller = getCurFunctionDecl();
1625 
1626  // host only function are not available on the device.
1627  if (Caller) {
1628  FunctionEmissionStatus CallerS = getEmissionStatus(Caller);
1629  FunctionEmissionStatus CalleeS = getEmissionStatus(Callee);
1630  assert(CallerS != FunctionEmissionStatus::CUDADiscarded &&
1631  CalleeS != FunctionEmissionStatus::CUDADiscarded &&
1632  "CUDADiscarded unexpected in OpenMP device function check");
1633  if ((CallerS == FunctionEmissionStatus::Emitted ||
1634  (!isOpenMPDeviceDelayedContext(*this) &&
1635  CallerS == FunctionEmissionStatus::Unknown)) &&
1636  CalleeS == FunctionEmissionStatus::OMPDiscarded) {
1637  StringRef HostDevTy = getOpenMPSimpleClauseTypeName(
1638  OMPC_device_type, OMPC_DEVICE_TYPE_host);
1639  Diag(Loc, diag::err_omp_wrong_device_function_call) << HostDevTy << 0;
1640  Diag(Callee->getAttr<OMPDeclareTargetDeclAttr>()->getLocation(),
1641  diag::note_omp_marked_device_type_here)
1642  << HostDevTy;
1643  return;
1644  }
1645  }
1646  // If the caller is known-emitted, mark the callee as known-emitted.
1647  // Otherwise, mark the call in our call graph so we can traverse it later.
1648  if ((CheckForDelayedContext && !isOpenMPDeviceDelayedContext(*this)) ||
1649  (!Caller && !CheckForDelayedContext) ||
1650  (Caller && getEmissionStatus(Caller) == FunctionEmissionStatus::Emitted))
1651  markKnownEmitted(*this, Caller, Callee, Loc,
1652  [CheckForDelayedContext](Sema &S, FunctionDecl *FD) {
1653  return CheckForDelayedContext &&
1654  S.getEmissionStatus(FD) ==
1655  FunctionEmissionStatus::Emitted;
1656  });
1657  else if (Caller)
1658  DeviceCallGraph[Caller].insert({Callee, Loc});
1659 }
1660 
1661 void Sema::checkOpenMPHostFunction(SourceLocation Loc, FunctionDecl *Callee,
1662  bool CheckCaller) {
1663  assert(LangOpts.OpenMP && !LangOpts.OpenMPIsDevice &&
1664  "Expected OpenMP host compilation.");
1665  assert(Callee && "Callee may not be null.");
1666  Callee = Callee->getMostRecentDecl();
1667  FunctionDecl *Caller = getCurFunctionDecl();
1668 
1669  // device only function are not available on the host.
1670  if (Caller) {
1671  FunctionEmissionStatus CallerS = getEmissionStatus(Caller);
1672  FunctionEmissionStatus CalleeS = getEmissionStatus(Callee);
1673  assert(
1674  (LangOpts.CUDA || (CallerS != FunctionEmissionStatus::CUDADiscarded &&
1675  CalleeS != FunctionEmissionStatus::CUDADiscarded)) &&
1676  "CUDADiscarded unexpected in OpenMP host function check");
1677  if (CallerS == FunctionEmissionStatus::Emitted &&
1678  CalleeS == FunctionEmissionStatus::OMPDiscarded) {
1679  StringRef NoHostDevTy = getOpenMPSimpleClauseTypeName(
1680  OMPC_device_type, OMPC_DEVICE_TYPE_nohost);
1681  Diag(Loc, diag::err_omp_wrong_device_function_call) << NoHostDevTy << 1;
1682  Diag(Callee->getAttr<OMPDeclareTargetDeclAttr>()->getLocation(),
1683  diag::note_omp_marked_device_type_here)
1684  << NoHostDevTy;
1685  return;
1686  }
1687  }
1688  // If the caller is known-emitted, mark the callee as known-emitted.
1689  // Otherwise, mark the call in our call graph so we can traverse it later.
1690  if (!shouldIgnoreInHostDeviceCheck(Callee)) {
1691  if ((!CheckCaller && !Caller) ||
1692  (Caller &&
1693  getEmissionStatus(Caller) == FunctionEmissionStatus::Emitted))
1694  markKnownEmitted(
1695  *this, Caller, Callee, Loc, [CheckCaller](Sema &S, FunctionDecl *FD) {
1696  return CheckCaller &&
1697  S.getEmissionStatus(FD) == FunctionEmissionStatus::Emitted;
1698  });
1699  else if (Caller)
1700  DeviceCallGraph[Caller].insert({Callee, Loc});
1701  }
1702 }
1703 
1704 void Sema::checkOpenMPDeviceExpr(const Expr *E) {
1705  assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice &&
1706  "OpenMP device compilation mode is expected.");
1707  QualType Ty = E->getType();
1708  if ((Ty->isFloat16Type() && !Context.getTargetInfo().hasFloat16Type()) ||
1709  ((Ty->isFloat128Type() ||
1710  (Ty->isRealFloatingType() && Context.getTypeSize(Ty) == 128)) &&
1711  !Context.getTargetInfo().hasFloat128Type()) ||
1712  (Ty->isIntegerType() && Context.getTypeSize(Ty) == 128 &&
1713  !Context.getTargetInfo().hasInt128Type()))
1714  targetDiag(E->getExprLoc(), diag::err_omp_unsupported_type)
1715  << static_cast<unsigned>(Context.getTypeSize(Ty)) << Ty
1716  << Context.getTargetInfo().getTriple().str() << E->getSourceRange();
1717 }
1718 
1720  unsigned OpenMPCaptureLevel) const {
1721  assert(LangOpts.OpenMP && "OpenMP is not allowed");
1722 
1723  ASTContext &Ctx = getASTContext();
1724  bool IsByRef = true;
1725 
1726  // Find the directive that is associated with the provided scope.
1727  D = cast<ValueDecl>(D->getCanonicalDecl());
1728  QualType Ty = D->getType();
1729 
1730  bool IsVariableUsedInMapClause = false;
1731  if (DSAStack->hasExplicitDirective(isOpenMPTargetExecutionDirective, Level)) {
1732  // This table summarizes how a given variable should be passed to the device
1733  // given its type and the clauses where it appears. This table is based on
1734  // the description in OpenMP 4.5 [2.10.4, target Construct] and
1735  // OpenMP 4.5 [2.15.5, Data-mapping Attribute Rules and Clauses].
1736  //
1737  // =========================================================================
1738  // | type | defaultmap | pvt | first | is_device_ptr | map | res. |
1739  // | |(tofrom:scalar)| | pvt | | | |
1740  // =========================================================================
1741  // | scl | | | | - | | bycopy|
1742  // | scl | | - | x | - | - | bycopy|
1743  // | scl | | x | - | - | - | null |
1744  // | scl | x | | | - | | byref |
1745  // | scl | x | - | x | - | - | bycopy|
1746  // | scl | x | x | - | - | - | null |
1747  // | scl | | - | - | - | x | byref |
1748  // | scl | x | - | - | - | x | byref |
1749  //
1750  // | agg | n.a. | | | - | | byref |
1751  // | agg | n.a. | - | x | - | - | byref |
1752  // | agg | n.a. | x | - | - | - | null |
1753  // | agg | n.a. | - | - | - | x | byref |
1754  // | agg | n.a. | - | - | - | x[] | byref |
1755  //
1756  // | ptr | n.a. | | | - | | bycopy|
1757  // | ptr | n.a. | - | x | - | - | bycopy|
1758  // | ptr | n.a. | x | - | - | - | null |
1759  // | ptr | n.a. | - | - | - | x | byref |
1760  // | ptr | n.a. | - | - | - | x[] | bycopy|
1761  // | ptr | n.a. | - | - | x | | bycopy|
1762  // | ptr | n.a. | - | - | x | x | bycopy|
1763  // | ptr | n.a. | - | - | x | x[] | bycopy|
1764  // =========================================================================
1765  // Legend:
1766  // scl - scalar
1767  // ptr - pointer
1768  // agg - aggregate
1769  // x - applies
1770  // - - invalid in this combination
1771  // [] - mapped with an array section
1772  // byref - should be mapped by reference
1773  // byval - should be mapped by value
1774  // null - initialize a local variable to null on the device
1775  //
1776  // Observations:
1777  // - All scalar declarations that show up in a map clause have to be passed
1778  // by reference, because they may have been mapped in the enclosing data
1779  // environment.
1780  // - If the scalar value does not fit the size of uintptr, it has to be
1781  // passed by reference, regardless the result in the table above.
1782  // - For pointers mapped by value that have either an implicit map or an
1783  // array section, the runtime library may pass the NULL value to the
1784  // device instead of the value passed to it by the compiler.
1785 
1786  if (Ty->isReferenceType())
1787  Ty = Ty->castAs<ReferenceType>()->getPointeeType();
1788 
1789  // Locate map clauses and see if the variable being captured is referred to
1790  // in any of those clauses. Here we only care about variables, not fields,
1791  // because fields are part of aggregates.
1792  bool IsVariableAssociatedWithSection = false;
1793 
1794  DSAStack->checkMappableExprComponentListsForDeclAtLevel(
1795  D, Level,
1796  [&IsVariableUsedInMapClause, &IsVariableAssociatedWithSection, D](
1798  MapExprComponents,
1799  OpenMPClauseKind WhereFoundClauseKind) {
1800  // Only the map clause information influences how a variable is
1801  // captured. E.g. is_device_ptr does not require changing the default
1802  // behavior.
1803  if (WhereFoundClauseKind != OMPC_map)
1804  return false;
1805 
1806  auto EI = MapExprComponents.rbegin();
1807  auto EE = MapExprComponents.rend();
1808 
1809  assert(EI != EE && "Invalid map expression!");
1810 
1811  if (isa<DeclRefExpr>(EI->getAssociatedExpression()))
1812  IsVariableUsedInMapClause |= EI->getAssociatedDeclaration() == D;
1813 
1814  ++EI;
1815  if (EI == EE)
1816  return false;
1817 
1818  if (isa<ArraySubscriptExpr>(EI->getAssociatedExpression()) ||
1819  isa<OMPArraySectionExpr>(EI->getAssociatedExpression()) ||
1820  isa<MemberExpr>(EI->getAssociatedExpression())) {
1821  IsVariableAssociatedWithSection = true;
1822  // There is nothing more we need to know about this variable.
1823  return true;
1824  }
1825 
1826  // Keep looking for more map info.
1827  return false;
1828  });
1829 
1830  if (IsVariableUsedInMapClause) {
1831  // If variable is identified in a map clause it is always captured by
1832  // reference except if it is a pointer that is dereferenced somehow.
1833  IsByRef = !(Ty->isPointerType() && IsVariableAssociatedWithSection);
1834  } else {
1835  // By default, all the data that has a scalar type is mapped by copy
1836  // (except for reduction variables).
1837  IsByRef =
1838  (DSAStack->isForceCaptureByReferenceInTargetExecutable() &&
1839  !Ty->isAnyPointerType()) ||
1840  !Ty->isScalarType() ||
1841  DSAStack->getDefaultDMAAtLevel(Level) == DMA_tofrom_scalar ||
1842  DSAStack->hasExplicitDSA(
1843  D, [](OpenMPClauseKind K) { return K == OMPC_reduction; }, Level);
1844  }
1845  }
1846 
1847  if (IsByRef && Ty.getNonReferenceType()->isScalarType()) {
1848  IsByRef =
1849  ((IsVariableUsedInMapClause &&
1850  DSAStack->getCaptureRegion(Level, OpenMPCaptureLevel) ==
1851  OMPD_target) ||
1852  !DSAStack->hasExplicitDSA(
1853  D,
1854  [](OpenMPClauseKind K) -> bool { return K == OMPC_firstprivate; },
1855  Level, /*NotLastprivate=*/true)) &&
1856  // If the variable is artificial and must be captured by value - try to
1857  // capture by value.
1858  !(isa<OMPCapturedExprDecl>(D) && !D->hasAttr<OMPCaptureNoInitAttr>() &&
1859  !cast<OMPCapturedExprDecl>(D)->getInit()->isGLValue());
1860  }
1861 
1862  // When passing data by copy, we need to make sure it fits the uintptr size
1863  // and alignment, because the runtime library only deals with uintptr types.
1864  // If it does not fit the uintptr size, we need to pass the data by reference
1865  // instead.
1866  if (!IsByRef &&
1867  (Ctx.getTypeSizeInChars(Ty) >
1868  Ctx.getTypeSizeInChars(Ctx.getUIntPtrType()) ||
1869  Ctx.getDeclAlign(D) > Ctx.getTypeAlignInChars(Ctx.getUIntPtrType()))) {
1870  IsByRef = true;
1871  }
1872 
1873  return IsByRef;
1874 }
1875 
1876 unsigned Sema::getOpenMPNestingLevel() const {
1877  assert(getLangOpts().OpenMP);
1878  return DSAStack->getNestingLevel();
1879 }
1880 
1882  return (isOpenMPTargetExecutionDirective(DSAStack->getCurrentDirective()) &&
1883  !DSAStack->isClauseParsingMode()) ||
1884  DSAStack->hasDirective(
1886  SourceLocation) -> bool {
1887  return isOpenMPTargetExecutionDirective(K);
1888  },
1889  false);
1890 }
1891 
1893  unsigned StopAt) {
1894  assert(LangOpts.OpenMP && "OpenMP is not allowed");
1895  D = getCanonicalDecl(D);
1896 
1897  // If we want to determine whether the variable should be captured from the
1898  // perspective of the current capturing scope, and we've already left all the
1899  // capturing scopes of the top directive on the stack, check from the
1900  // perspective of its parent directive (if any) instead.
1901  DSAStackTy::ParentDirectiveScope InParentDirectiveRAII(
1902  *DSAStack, CheckScopeInfo && DSAStack->isBodyComplete());
1903 
1904  // If we are attempting to capture a global variable in a directive with
1905  // 'target' we return true so that this global is also mapped to the device.
1906  //
1907  auto *VD = dyn_cast<VarDecl>(D);
1908  if (VD && !VD->hasLocalStorage() &&
1909  (getCurCapturedRegion() || getCurBlock() || getCurLambda())) {
1910  if (isInOpenMPDeclareTargetContext()) {
1911  // Try to mark variable as declare target if it is used in capturing
1912  // regions.
1913  if (LangOpts.OpenMP <= 45 &&
1914  !OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
1915  checkDeclIsAllowedInOpenMPTarget(nullptr, VD);
1916  return nullptr;
1917  } else if (isInOpenMPTargetExecutionDirective()) {
1918  // If the declaration is enclosed in a 'declare target' directive,
1919  // then it should not be captured.
1920  //
1921  if (OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
1922  return nullptr;
1923  return VD;
1924  }
1925  }
1926 
1927  if (CheckScopeInfo) {
1928  bool OpenMPFound = false;
1929  for (unsigned I = StopAt + 1; I > 0; --I) {
1930  FunctionScopeInfo *FSI = FunctionScopes[I - 1];
1931  if(!isa<CapturingScopeInfo>(FSI))
1932  return nullptr;
1933  if (auto *RSI = dyn_cast<CapturedRegionScopeInfo>(FSI))
1934  if (RSI->CapRegionKind == CR_OpenMP) {
1935  OpenMPFound = true;
1936  break;
1937  }
1938  }
1939  if (!OpenMPFound)
1940  return nullptr;
1941  }
1942 
1943  if (DSAStack->getCurrentDirective() != OMPD_unknown &&
1944  (!DSAStack->isClauseParsingMode() ||
1945  DSAStack->getParentDirective() != OMPD_unknown)) {
1946  auto &&Info = DSAStack->isLoopControlVariable(D);
1947  if (Info.first ||
1948  (VD && VD->hasLocalStorage() &&
1949  isImplicitOrExplicitTaskingRegion(DSAStack->getCurrentDirective())) ||
1950  (VD && DSAStack->isForceVarCapturing()))
1951  return VD ? VD : Info.second;
1952  DSAStackTy::DSAVarData DVarPrivate =
1953  DSAStack->getTopDSA(D, DSAStack->isClauseParsingMode());
1954  if (DVarPrivate.CKind != OMPC_unknown && isOpenMPPrivate(DVarPrivate.CKind))
1955  return VD ? VD : cast<VarDecl>(DVarPrivate.PrivateCopy->getDecl());
1956  // Threadprivate variables must not be captured.
1957  if (isOpenMPThreadPrivate(DVarPrivate.CKind))
1958  return nullptr;
1959  // The variable is not private or it is the variable in the directive with
1960  // default(none) clause and not used in any clause.
1961  DVarPrivate = DSAStack->hasDSA(D, isOpenMPPrivate,
1962  [](OpenMPDirectiveKind) { return true; },
1963  DSAStack->isClauseParsingMode());
1964  if (DVarPrivate.CKind != OMPC_unknown ||
1965  (VD && DSAStack->getDefaultDSA() == DSA_none))
1966  return VD ? VD : cast<VarDecl>(DVarPrivate.PrivateCopy->getDecl());
1967  }
1968  return nullptr;
1969 }
1970 
1971 void Sema::adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
1972  unsigned Level) const {
1974  getOpenMPCaptureRegions(Regions, DSAStack->getDirective(Level));
1975  FunctionScopesIndex -= Regions.size();
1976 }
1977 
1979  assert(LangOpts.OpenMP && "OpenMP must be enabled.");
1980  if (isOpenMPLoopDirective(DSAStack->getCurrentDirective()))
1981  DSAStack->loopInit();
1982 }
1983 
1985  assert(LangOpts.OpenMP && "OpenMP must be enabled.");
1986  if (isOpenMPLoopDirective(DSAStack->getCurrentDirective())) {
1987  DSAStack->resetPossibleLoopCounter();
1988  DSAStack->loopStart();
1989  }
1990 }
1991 
1992 bool Sema::isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const {
1993  assert(LangOpts.OpenMP && "OpenMP is not allowed");
1994  if (isOpenMPLoopDirective(DSAStack->getCurrentDirective())) {
1995  if (DSAStack->getAssociatedLoops() > 0 &&
1996  !DSAStack->isLoopStarted()) {
1997  DSAStack->resetPossibleLoopCounter(D);
1998  DSAStack->loopStart();
1999  return true;
2000  }
2001  if ((DSAStack->getPossiblyLoopCunter() == D->getCanonicalDecl() ||
2002  DSAStack->isLoopControlVariable(D).first) &&
2003  !DSAStack->hasExplicitDSA(
2004  D, [](OpenMPClauseKind K) { return K != OMPC_private; }, Level) &&
2005  !isOpenMPSimdDirective(DSAStack->getCurrentDirective()))
2006  return true;
2007  }
2008  if (const auto *VD = dyn_cast<VarDecl>(D)) {
2009  if (DSAStack->isThreadPrivate(const_cast<VarDecl *>(VD)) &&
2010  DSAStack->isForceVarCapturing() &&
2011  !DSAStack->hasExplicitDSA(
2012  D, [](OpenMPClauseKind K) { return K == OMPC_copyin; }, Level))
2013  return true;
2014  }
2015  return DSAStack->hasExplicitDSA(
2016  D, [](OpenMPClauseKind K) { return K == OMPC_private; }, Level) ||
2017  (DSAStack->isClauseParsingMode() &&
2018  DSAStack->getClauseParsingMode() == OMPC_private) ||
2019  // Consider taskgroup reduction descriptor variable a private to avoid
2020  // possible capture in the region.
2021  (DSAStack->hasExplicitDirective(
2022  [](OpenMPDirectiveKind K) { return K == OMPD_taskgroup; },
2023  Level) &&
2024  DSAStack->isTaskgroupReductionRef(D, Level));
2025 }
2026 
2028  unsigned Level) {
2029  assert(LangOpts.OpenMP && "OpenMP is not allowed");
2030  D = getCanonicalDecl(D);
2032  for (unsigned I = DSAStack->getNestingLevel() + 1; I > Level; --I) {
2033  const unsigned NewLevel = I - 1;
2034  if (DSAStack->hasExplicitDSA(D,
2035  [&OMPC](const OpenMPClauseKind K) {
2036  if (isOpenMPPrivate(K)) {
2037  OMPC = K;
2038  return true;
2039  }
2040  return false;
2041  },
2042  NewLevel))
2043  break;
2044  if (DSAStack->checkMappableExprComponentListsForDeclAtLevel(
2045  D, NewLevel,
2047  OpenMPClauseKind) { return true; })) {
2048  OMPC = OMPC_map;
2049  break;
2050  }
2051  if (DSAStack->hasExplicitDirective(isOpenMPTargetExecutionDirective,
2052  NewLevel)) {
2053  OMPC = OMPC_map;
2054  if (D->getType()->isScalarType() &&
2055  DSAStack->getDefaultDMAAtLevel(NewLevel) !=
2056  DefaultMapAttributes::DMA_tofrom_scalar)
2057  OMPC = OMPC_firstprivate;
2058  break;
2059  }
2060  }
2061  if (OMPC != OMPC_unknown)
2062  FD->addAttr(OMPCaptureKindAttr::CreateImplicit(Context, OMPC));
2063 }
2064 
2066  unsigned Level) const {
2067  assert(LangOpts.OpenMP && "OpenMP is not allowed");
2068  // Return true if the current level is no longer enclosed in a target region.
2069 
2070  const auto *VD = dyn_cast<VarDecl>(D);
2071  return VD && !VD->hasLocalStorage() &&
2072  DSAStack->hasExplicitDirective(isOpenMPTargetExecutionDirective,
2073  Level);
2074 }
2075 
2076 void Sema::DestroyDataSharingAttributesStack() { delete DSAStack; }
2077 
2078 void Sema::finalizeOpenMPDelayedAnalysis() {
2079  assert(LangOpts.OpenMP && "Expected OpenMP compilation mode.");
2080  // Diagnose implicit declare target functions and their callees.
2081  for (const auto &CallerCallees : DeviceCallGraph) {
2083  OMPDeclareTargetDeclAttr::getDeviceType(
2084  CallerCallees.getFirst()->getMostRecentDecl());
2085  // Ignore host functions during device analyzis.
2086  if (LangOpts.OpenMPIsDevice && DevTy &&
2087  *DevTy == OMPDeclareTargetDeclAttr::DT_Host)
2088  continue;
2089  // Ignore nohost functions during host analyzis.
2090  if (!LangOpts.OpenMPIsDevice && DevTy &&
2091  *DevTy == OMPDeclareTargetDeclAttr::DT_NoHost)
2092  continue;
2093  for (const std::pair<CanonicalDeclPtr<FunctionDecl>, SourceLocation>
2094  &Callee : CallerCallees.getSecond()) {
2095  const FunctionDecl *FD = Callee.first->getMostRecentDecl();
2097  OMPDeclareTargetDeclAttr::getDeviceType(FD);
2098  if (LangOpts.OpenMPIsDevice && DevTy &&
2099  *DevTy == OMPDeclareTargetDeclAttr::DT_Host) {
2100  // Diagnose host function called during device codegen.
2101  StringRef HostDevTy = getOpenMPSimpleClauseTypeName(
2102  OMPC_device_type, OMPC_DEVICE_TYPE_host);
2103  Diag(Callee.second, diag::err_omp_wrong_device_function_call)
2104  << HostDevTy << 0;
2105  Diag(FD->getAttr<OMPDeclareTargetDeclAttr>()->getLocation(),
2106  diag::note_omp_marked_device_type_here)
2107  << HostDevTy;
2108  continue;
2109  }
2110  if (!LangOpts.OpenMPIsDevice && DevTy &&
2111  *DevTy == OMPDeclareTargetDeclAttr::DT_NoHost) {
2112  // Diagnose nohost function called during host codegen.
2113  StringRef NoHostDevTy = getOpenMPSimpleClauseTypeName(
2114  OMPC_device_type, OMPC_DEVICE_TYPE_nohost);
2115  Diag(Callee.second, diag::err_omp_wrong_device_function_call)
2116  << NoHostDevTy << 1;
2117  Diag(FD->getAttr<OMPDeclareTargetDeclAttr>()->getLocation(),
2118  diag::note_omp_marked_device_type_here)
2119  << NoHostDevTy;
2120  continue;
2121  }
2122  }
2123  }
2124 }
2125 
2127  const DeclarationNameInfo &DirName,
2128  Scope *CurScope, SourceLocation Loc) {
2129  DSAStack->push(DKind, DirName, CurScope, Loc);
2130  PushExpressionEvaluationContext(
2131  ExpressionEvaluationContext::PotentiallyEvaluated);
2132 }
2133 
2135  DSAStack->setClauseParsingMode(K);
2136 }
2137 
2139  DSAStack->setClauseParsingMode(/*K=*/OMPC_unknown);
2140 }
2141 
2142 static void checkAllocateClauses(Sema &S, DSAStackTy *Stack,
2143  ArrayRef<OMPClause *> Clauses);
2144 
2145 void Sema::EndOpenMPDSABlock(Stmt *CurDirective) {
2146  // OpenMP [2.14.3.5, Restrictions, C/C++, p.1]
2147  // A variable of class type (or array thereof) that appears in a lastprivate
2148  // clause requires an accessible, unambiguous default constructor for the
2149  // class type, unless the list item is also specified in a firstprivate
2150  // clause.
2151  if (const auto *D = dyn_cast_or_null<OMPExecutableDirective>(CurDirective)) {
2152  for (OMPClause *C : D->clauses()) {
2153  if (auto *Clause = dyn_cast<OMPLastprivateClause>(C)) {
2154  SmallVector<Expr *, 8> PrivateCopies;
2155  for (Expr *DE : Clause->varlists()) {
2156  if (DE->isValueDependent() || DE->isTypeDependent()) {
2157  PrivateCopies.push_back(nullptr);
2158  continue;
2159  }
2160  auto *DRE = cast<DeclRefExpr>(DE->IgnoreParens());
2161  auto *VD = cast<VarDecl>(DRE->getDecl());
2162  QualType Type = VD->getType().getNonReferenceType();
2163  const DSAStackTy::DSAVarData DVar =
2164  DSAStack->getTopDSA(VD, /*FromParent=*/false);
2165  if (DVar.CKind == OMPC_lastprivate) {
2166  // Generate helper private variable and initialize it with the
2167  // default value. The address of the original variable is replaced
2168  // by the address of the new private variable in CodeGen. This new
2169  // variable is not added to IdResolver, so the code in the OpenMP
2170  // region uses original variable for proper diagnostics.
2171  VarDecl *VDPrivate = buildVarDecl(
2172  *this, DE->getExprLoc(), Type.getUnqualifiedType(),
2173  VD->getName(), VD->hasAttrs() ? &VD->getAttrs() : nullptr, DRE);
2174  ActOnUninitializedDecl(VDPrivate);
2175  if (VDPrivate->isInvalidDecl()) {
2176  PrivateCopies.push_back(nullptr);
2177  continue;
2178  }
2179  PrivateCopies.push_back(buildDeclRefExpr(
2180  *this, VDPrivate, DE->getType(), DE->getExprLoc()));
2181  } else {
2182  // The variable is also a firstprivate, so initialization sequence
2183  // for private copy is generated already.
2184  PrivateCopies.push_back(nullptr);
2185  }
2186  }
2187  Clause->setPrivateCopies(PrivateCopies);
2188  }
2189  }
2190  // Check allocate clauses.
2191  if (!CurContext->isDependentContext())
2192  checkAllocateClauses(*this, DSAStack, D->clauses());
2193  }
2194 
2195  DSAStack->pop();
2196  DiscardCleanupsInEvaluationContext();
2197  PopExpressionEvaluationContext();
2198 }
2199 
2200 static bool FinishOpenMPLinearClause(OMPLinearClause &Clause, DeclRefExpr *IV,
2201  Expr *NumIterations, Sema &SemaRef,
2202  Scope *S, DSAStackTy *Stack);
2203 
2204 namespace {
2205 
2206 class VarDeclFilterCCC final : public CorrectionCandidateCallback {
2207 private:
2208  Sema &SemaRef;
2209 
2210 public:
2211  explicit VarDeclFilterCCC(Sema &S) : SemaRef(S) {}
2212  bool ValidateCandidate(const TypoCorrection &Candidate) override {
2213  NamedDecl *ND = Candidate.getCorrectionDecl();
2214  if (const auto *VD = dyn_cast_or_null<VarDecl>(ND)) {
2215  return VD->hasGlobalStorage() &&
2216  SemaRef.isDeclInScope(ND, SemaRef.getCurLexicalContext(),
2217  SemaRef.getCurScope());
2218  }
2219  return false;
2220  }
2221 
2222  std::unique_ptr<CorrectionCandidateCallback> clone() override {
2223  return std::make_unique<VarDeclFilterCCC>(*this);
2224  }
2225 
2226 };
2227 
2228 class VarOrFuncDeclFilterCCC final : public CorrectionCandidateCallback {
2229 private:
2230  Sema &SemaRef;
2231 
2232 public:
2233  explicit VarOrFuncDeclFilterCCC(Sema &S) : SemaRef(S) {}
2234  bool ValidateCandidate(const TypoCorrection &Candidate) override {
2235  NamedDecl *ND = Candidate.getCorrectionDecl();
2236  if (ND && ((isa<VarDecl>(ND) && ND->getKind() == Decl::Var) ||
2237  isa<FunctionDecl>(ND))) {
2238  return SemaRef.isDeclInScope(ND, SemaRef.getCurLexicalContext(),
2239  SemaRef.getCurScope());
2240  }
2241  return false;
2242  }
2243 
2244  std::unique_ptr<CorrectionCandidateCallback> clone() override {
2245  return std::make_unique<VarOrFuncDeclFilterCCC>(*this);
2246  }
2247 };
2248 
2249 } // namespace
2250 
2252  CXXScopeSpec &ScopeSpec,
2253  const DeclarationNameInfo &Id,
2255  LookupResult Lookup(*this, Id, LookupOrdinaryName);
2256  LookupParsedName(Lookup, CurScope, &ScopeSpec, true);
2257 
2258  if (Lookup.isAmbiguous())
2259  return ExprError();
2260 
2261  VarDecl *VD;
2262  if (!Lookup.isSingleResult()) {
2263  VarDeclFilterCCC CCC(*this);
2264  if (TypoCorrection Corrected =
2265  CorrectTypo(Id, LookupOrdinaryName, CurScope, nullptr, CCC,
2266  CTK_ErrorRecovery)) {
2267  diagnoseTypo(Corrected,
2268  PDiag(Lookup.empty()
2269  ? diag::err_undeclared_var_use_suggest
2270  : diag::err_omp_expected_var_arg_suggest)
2271  << Id.getName());
2272  VD = Corrected.getCorrectionDeclAs<VarDecl>();
2273  } else {
2274  Diag(Id.getLoc(), Lookup.empty() ? diag::err_undeclared_var_use
2275  : diag::err_omp_expected_var_arg)
2276  << Id.getName();
2277  return ExprError();
2278  }
2279  } else if (!(VD = Lookup.getAsSingle<VarDecl>())) {
2280  Diag(Id.getLoc(), diag::err_omp_expected_var_arg) << Id.getName();
2281  Diag(Lookup.getFoundDecl()->getLocation(), diag::note_declared_at);
2282  return ExprError();
2283  }
2284  Lookup.suppressDiagnostics();
2285 
2286  // OpenMP [2.9.2, Syntax, C/C++]
2287  // Variables must be file-scope, namespace-scope, or static block-scope.
2288  if (Kind == OMPD_threadprivate && !VD->hasGlobalStorage()) {
2289  Diag(Id.getLoc(), diag::err_omp_global_var_arg)
2290  << getOpenMPDirectiveName(Kind) << !VD->isStaticLocal();
2291  bool IsDecl =
2293  Diag(VD->getLocation(),
2294  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
2295  << VD;
2296  return ExprError();
2297  }
2298 
2299  VarDecl *CanonicalVD = VD->getCanonicalDecl();
2300  NamedDecl *ND = CanonicalVD;
2301  // OpenMP [2.9.2, Restrictions, C/C++, p.2]
2302  // A threadprivate directive for file-scope variables must appear outside
2303  // any definition or declaration.
2304  if (CanonicalVD->getDeclContext()->isTranslationUnit() &&
2305  !getCurLexicalContext()->isTranslationUnit()) {
2306  Diag(Id.getLoc(), diag::err_omp_var_scope)
2307  << getOpenMPDirectiveName(Kind) << VD;
2308  bool IsDecl =
2310  Diag(VD->getLocation(),
2311  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
2312  << VD;
2313  return ExprError();
2314  }
2315  // OpenMP [2.9.2, Restrictions, C/C++, p.3]
2316  // A threadprivate directive for static class member variables must appear
2317  // in the class definition, in the same scope in which the member
2318  // variables are declared.
2319  if (CanonicalVD->isStaticDataMember() &&
2320  !CanonicalVD->getDeclContext()->Equals(getCurLexicalContext())) {
2321  Diag(Id.getLoc(), diag::err_omp_var_scope)
2322  << getOpenMPDirectiveName(Kind) << VD;
2323  bool IsDecl =
2325  Diag(VD->getLocation(),
2326  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
2327  << VD;
2328  return ExprError();
2329  }
2330  // OpenMP [2.9.2, Restrictions, C/C++, p.4]
2331  // A threadprivate directive for namespace-scope variables must appear
2332  // outside any definition or declaration other than the namespace
2333  // definition itself.
2334  if (CanonicalVD->getDeclContext()->isNamespace() &&
2335  (!getCurLexicalContext()->isFileContext() ||
2336  !getCurLexicalContext()->Encloses(CanonicalVD->getDeclContext()))) {
2337  Diag(Id.getLoc(), diag::err_omp_var_scope)
2338  << getOpenMPDirectiveName(Kind) << VD;
2339  bool IsDecl =
2341  Diag(VD->getLocation(),
2342  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
2343  << VD;
2344  return ExprError();
2345  }
2346  // OpenMP [2.9.2, Restrictions, C/C++, p.6]
2347  // A threadprivate directive for static block-scope variables must appear
2348  // in the scope of the variable and not in a nested scope.
2349  if (CanonicalVD->isLocalVarDecl() && CurScope &&
2350  !isDeclInScope(ND, getCurLexicalContext(), CurScope)) {
2351  Diag(Id.getLoc(), diag::err_omp_var_scope)
2352  << getOpenMPDirectiveName(Kind) << VD;
2353  bool IsDecl =
2355  Diag(VD->getLocation(),
2356  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
2357  << VD;
2358  return ExprError();
2359  }
2360 
2361  // OpenMP [2.9.2, Restrictions, C/C++, p.2-6]
2362  // A threadprivate directive must lexically precede all references to any
2363  // of the variables in its list.
2364  if (Kind == OMPD_threadprivate && VD->isUsed() &&
2365  !DSAStack->isThreadPrivate(VD)) {
2366  Diag(Id.getLoc(), diag::err_omp_var_used)
2367  << getOpenMPDirectiveName(Kind) << VD;
2368  return ExprError();
2369  }
2370 
2371  QualType ExprType = VD->getType().getNonReferenceType();
2372  return DeclRefExpr::Create(Context, NestedNameSpecifierLoc(),
2373  SourceLocation(), VD,
2374  /*RefersToEnclosingVariableOrCapture=*/false,
2375  Id.getLoc(), ExprType, VK_LValue);
2376 }
2377 
2380  ArrayRef<Expr *> VarList) {
2381  if (OMPThreadPrivateDecl *D = CheckOMPThreadPrivateDecl(Loc, VarList)) {
2382  CurContext->addDecl(D);
2383  return DeclGroupPtrTy::make(DeclGroupRef(D));
2384  }
2385  return nullptr;
2386 }
2387 
2388 namespace {
2389 class LocalVarRefChecker final
2390  : public ConstStmtVisitor<LocalVarRefChecker, bool> {
2391  Sema &SemaRef;
2392 
2393 public:
2394  bool VisitDeclRefExpr(const DeclRefExpr *E) {
2395  if (const auto *VD = dyn_cast<VarDecl>(E->getDecl())) {
2396  if (VD->hasLocalStorage()) {
2397  SemaRef.Diag(E->getBeginLoc(),
2398  diag::err_omp_local_var_in_threadprivate_init)
2399  << E->getSourceRange();
2400  SemaRef.Diag(VD->getLocation(), diag::note_defined_here)
2401  << VD << VD->getSourceRange();
2402  return true;
2403  }
2404  }
2405  return false;
2406  }
2407  bool VisitStmt(const Stmt *S) {
2408  for (const Stmt *Child : S->children()) {
2409  if (Child && Visit(Child))
2410  return true;
2411  }
2412  return false;
2413  }
2414  explicit LocalVarRefChecker(Sema &SemaRef) : SemaRef(SemaRef) {}
2415 };
2416 } // namespace
2417 
2421  for (Expr *RefExpr : VarList) {
2422  auto *DE = cast<DeclRefExpr>(RefExpr);
2423  auto *VD = cast<VarDecl>(DE->getDecl());
2424  SourceLocation ILoc = DE->getExprLoc();
2425 
2426  // Mark variable as used.
2427  VD->setReferenced();
2428  VD->markUsed(Context);
2429 
2430  QualType QType = VD->getType();
2431  if (QType->isDependentType() || QType->isInstantiationDependentType()) {
2432  // It will be analyzed later.
2433  Vars.push_back(DE);
2434  continue;
2435  }
2436 
2437  // OpenMP [2.9.2, Restrictions, C/C++, p.10]
2438  // A threadprivate variable must not have an incomplete type.
2439  if (RequireCompleteType(ILoc, VD->getType(),
2440  diag::err_omp_threadprivate_incomplete_type)) {
2441  continue;
2442  }
2443 
2444  // OpenMP [2.9.2, Restrictions, C/C++, p.10]
2445  // A threadprivate variable must not have a reference type.
2446  if (VD->getType()->isReferenceType()) {
2447  Diag(ILoc, diag::err_omp_ref_type_arg)
2448  << getOpenMPDirectiveName(OMPD_threadprivate) << VD->getType();
2449  bool IsDecl =
2450  VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
2451  Diag(VD->getLocation(),
2452  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
2453  << VD;
2454  continue;
2455  }
2456 
2457  // Check if this is a TLS variable. If TLS is not being supported, produce
2458  // the corresponding diagnostic.
2459  if ((VD->getTLSKind() != VarDecl::TLS_None &&
2460  !(VD->hasAttr<OMPThreadPrivateDeclAttr>() &&
2461  getLangOpts().OpenMPUseTLS &&
2462  getASTContext().getTargetInfo().isTLSSupported())) ||
2463  (VD->getStorageClass() == SC_Register && VD->hasAttr<AsmLabelAttr>() &&
2464  !VD->isLocalVarDecl())) {
2465  Diag(ILoc, diag::err_omp_var_thread_local)
2466  << VD << ((VD->getTLSKind() != VarDecl::TLS_None) ? 0 : 1);
2467  bool IsDecl =
2468  VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
2469  Diag(VD->getLocation(),
2470  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
2471  << VD;
2472  continue;
2473  }
2474 
2475  // Check if initial value of threadprivate variable reference variable with
2476  // local storage (it is not supported by runtime).
2477  if (const Expr *Init = VD->getAnyInitializer()) {
2478  LocalVarRefChecker Checker(*this);
2479  if (Checker.Visit(Init))
2480  continue;
2481  }
2482 
2483  Vars.push_back(RefExpr);
2484  DSAStack->addDSA(VD, DE, OMPC_threadprivate);
2485  VD->addAttr(OMPThreadPrivateDeclAttr::CreateImplicit(
2486  Context, SourceRange(Loc, Loc)));
2487  if (ASTMutationListener *ML = Context.getASTMutationListener())
2488  ML->DeclarationMarkedOpenMPThreadPrivate(VD);
2489  }
2490  OMPThreadPrivateDecl *D = nullptr;
2491  if (!Vars.empty()) {
2492  D = OMPThreadPrivateDecl::Create(Context, getCurLexicalContext(), Loc,
2493  Vars);
2494  D->setAccess(AS_public);
2495  }
2496  return D;
2497 }
2498 
2499 static OMPAllocateDeclAttr::AllocatorTypeTy
2500 getAllocatorKind(Sema &S, DSAStackTy *Stack, Expr *Allocator) {
2501  if (!Allocator)
2502  return OMPAllocateDeclAttr::OMPDefaultMemAlloc;
2503  if (Allocator->isTypeDependent() || Allocator->isValueDependent() ||
2504  Allocator->isInstantiationDependent() ||
2505  Allocator->containsUnexpandedParameterPack())
2506  return OMPAllocateDeclAttr::OMPUserDefinedMemAlloc;
2507  auto AllocatorKindRes = OMPAllocateDeclAttr::OMPUserDefinedMemAlloc;
2508  const Expr *AE = Allocator->IgnoreParenImpCasts();
2509  for (int I = OMPAllocateDeclAttr::OMPDefaultMemAlloc;
2510  I < OMPAllocateDeclAttr::OMPUserDefinedMemAlloc; ++I) {
2511  auto AllocatorKind = static_cast<OMPAllocateDeclAttr::AllocatorTypeTy>(I);
2512  const Expr *DefAllocator = Stack->getAllocator(AllocatorKind);
2513  llvm::FoldingSetNodeID AEId, DAEId;
2514  AE->Profile(AEId, S.getASTContext(), /*Canonical=*/true);
2515  DefAllocator->Profile(DAEId, S.getASTContext(), /*Canonical=*/true);
2516  if (AEId == DAEId) {
2517  AllocatorKindRes = AllocatorKind;
2518  break;
2519  }
2520  }
2521  return AllocatorKindRes;
2522 }
2523 
2525  Sema &S, DSAStackTy *Stack, Expr *RefExpr, VarDecl *VD,
2526  OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind, Expr *Allocator) {
2527  if (!VD->hasAttr<OMPAllocateDeclAttr>())
2528  return false;
2529  const auto *A = VD->getAttr<OMPAllocateDeclAttr>();
2530  Expr *PrevAllocator = A->getAllocator();
2531  OMPAllocateDeclAttr::AllocatorTypeTy PrevAllocatorKind =
2532  getAllocatorKind(S, Stack, PrevAllocator);
2533  bool AllocatorsMatch = AllocatorKind == PrevAllocatorKind;
2534  if (AllocatorsMatch &&
2535  AllocatorKind == OMPAllocateDeclAttr::OMPUserDefinedMemAlloc &&
2536  Allocator && PrevAllocator) {
2537  const Expr *AE = Allocator->IgnoreParenImpCasts();
2538  const Expr *PAE = PrevAllocator->IgnoreParenImpCasts();
2539  llvm::FoldingSetNodeID AEId, PAEId;
2540  AE->Profile(AEId, S.Context, /*Canonical=*/true);
2541  PAE->Profile(PAEId, S.Context, /*Canonical=*/true);
2542  AllocatorsMatch = AEId == PAEId;
2543  }
2544  if (!AllocatorsMatch) {
2545  SmallString<256> AllocatorBuffer;
2546  llvm::raw_svector_ostream AllocatorStream(AllocatorBuffer);
2547  if (Allocator)
2548  Allocator->printPretty(AllocatorStream, nullptr, S.getPrintingPolicy());
2549  SmallString<256> PrevAllocatorBuffer;
2550  llvm::raw_svector_ostream PrevAllocatorStream(PrevAllocatorBuffer);
2551  if (PrevAllocator)
2552  PrevAllocator->printPretty(PrevAllocatorStream, nullptr,
2553  S.getPrintingPolicy());
2554 
2555  SourceLocation AllocatorLoc =
2556  Allocator ? Allocator->getExprLoc() : RefExpr->getExprLoc();
2557  SourceRange AllocatorRange =
2558  Allocator ? Allocator->getSourceRange() : RefExpr->getSourceRange();
2559  SourceLocation PrevAllocatorLoc =
2560  PrevAllocator ? PrevAllocator->getExprLoc() : A->getLocation();
2561  SourceRange PrevAllocatorRange =
2562  PrevAllocator ? PrevAllocator->getSourceRange() : A->getRange();
2563  S.Diag(AllocatorLoc, diag::warn_omp_used_different_allocator)
2564  << (Allocator ? 1 : 0) << AllocatorStream.str()
2565  << (PrevAllocator ? 1 : 0) << PrevAllocatorStream.str()
2566  << AllocatorRange;
2567  S.Diag(PrevAllocatorLoc, diag::note_omp_previous_allocator)
2568  << PrevAllocatorRange;
2569  return true;
2570  }
2571  return false;
2572 }
2573 
2574 static void
2576  OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind,
2577  Expr *Allocator, SourceRange SR) {
2578  if (VD->hasAttr<OMPAllocateDeclAttr>())
2579  return;
2580  if (Allocator &&
2581  (Allocator->isTypeDependent() || Allocator->isValueDependent() ||
2582  Allocator->isInstantiationDependent() ||
2583  Allocator->containsUnexpandedParameterPack()))
2584  return;
2585  auto *A = OMPAllocateDeclAttr::CreateImplicit(S.Context, AllocatorKind,
2586  Allocator, SR);
2587  VD->addAttr(A);
2589  ML->DeclarationMarkedOpenMPAllocate(VD, A);
2590 }
2591 
2593  SourceLocation Loc, ArrayRef<Expr *> VarList,
2594  ArrayRef<OMPClause *> Clauses, DeclContext *Owner) {
2595  assert(Clauses.size() <= 1 && "Expected at most one clause.");
2596  Expr *Allocator = nullptr;
2597  if (Clauses.empty()) {
2598  // OpenMP 5.0, 2.11.3 allocate Directive, Restrictions.
2599  // allocate directives that appear in a target region must specify an
2600  // allocator clause unless a requires directive with the dynamic_allocators
2601  // clause is present in the same compilation unit.
2602  if (LangOpts.OpenMPIsDevice &&
2603  !DSAStack->hasRequiresDeclWithClause<OMPDynamicAllocatorsClause>())
2604  targetDiag(Loc, diag::err_expected_allocator_clause);
2605  } else {
2606  Allocator = cast<OMPAllocatorClause>(Clauses.back())->getAllocator();
2607  }
2608  OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind =
2609  getAllocatorKind(*this, DSAStack, Allocator);
2611  for (Expr *RefExpr : VarList) {
2612  auto *DE = cast<DeclRefExpr>(RefExpr);
2613  auto *VD = cast<VarDecl>(DE->getDecl());
2614 
2615  // Check if this is a TLS variable or global register.
2616  if (VD->getTLSKind() != VarDecl::TLS_None ||
2617  VD->hasAttr<OMPThreadPrivateDeclAttr>() ||
2618  (VD->getStorageClass() == SC_Register && VD->hasAttr<AsmLabelAttr>() &&
2619  !VD->isLocalVarDecl()))
2620  continue;
2621 
2622  // If the used several times in the allocate directive, the same allocator
2623  // must be used.
2624  if (checkPreviousOMPAllocateAttribute(*this, DSAStack, RefExpr, VD,
2625  AllocatorKind, Allocator))
2626  continue;
2627 
2628  // OpenMP, 2.11.3 allocate Directive, Restrictions, C / C++
2629  // If a list item has a static storage type, the allocator expression in the
2630  // allocator clause must be a constant expression that evaluates to one of
2631  // the predefined memory allocator values.
2632  if (Allocator && VD->hasGlobalStorage()) {
2633  if (AllocatorKind == OMPAllocateDeclAttr::OMPUserDefinedMemAlloc) {
2634  Diag(Allocator->getExprLoc(),
2635  diag::err_omp_expected_predefined_allocator)
2636  << Allocator->getSourceRange();
2637  bool IsDecl = VD->isThisDeclarationADefinition(Context) ==
2639  Diag(VD->getLocation(),
2640  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
2641  << VD;
2642  continue;
2643  }
2644  }
2645 
2646  Vars.push_back(RefExpr);
2647  applyOMPAllocateAttribute(*this, VD, AllocatorKind, Allocator,
2648  DE->getSourceRange());
2649  }
2650  if (Vars.empty())
2651  return nullptr;
2652  if (!Owner)
2653  Owner = getCurLexicalContext();
2654  auto *D = OMPAllocateDecl::Create(Context, Owner, Loc, Vars, Clauses);
2655  D->setAccess(AS_public);
2656  Owner->addDecl(D);
2657  return DeclGroupPtrTy::make(DeclGroupRef(D));
2658 }
2659 
2662  ArrayRef<OMPClause *> ClauseList) {
2663  OMPRequiresDecl *D = nullptr;
2664  if (!CurContext->isFileContext()) {
2665  Diag(Loc, diag::err_omp_invalid_scope) << "requires";
2666  } else {
2667  D = CheckOMPRequiresDecl(Loc, ClauseList);
2668  if (D) {
2669  CurContext->addDecl(D);
2670  DSAStack->addRequiresDecl(D);
2671  }
2672  }
2673  return DeclGroupPtrTy::make(DeclGroupRef(D));
2674 }
2675 
2677  ArrayRef<OMPClause *> ClauseList) {
2678  /// For target specific clauses, the requires directive cannot be
2679  /// specified after the handling of any of the target regions in the
2680  /// current compilation unit.
2681  ArrayRef<SourceLocation> TargetLocations =
2682  DSAStack->getEncounteredTargetLocs();
2683  if (!TargetLocations.empty()) {
2684  for (const OMPClause *CNew : ClauseList) {
2685  // Check if any of the requires clauses affect target regions.
2686  if (isa<OMPUnifiedSharedMemoryClause>(CNew) ||
2687  isa<OMPUnifiedAddressClause>(CNew) ||
2688  isa<OMPReverseOffloadClause>(CNew) ||
2689  isa<OMPDynamicAllocatorsClause>(CNew)) {
2690  Diag(Loc, diag::err_omp_target_before_requires)
2691  << getOpenMPClauseName(CNew->getClauseKind());
2692  for (SourceLocation TargetLoc : TargetLocations) {
2693  Diag(TargetLoc, diag::note_omp_requires_encountered_target);
2694  }
2695  }
2696  }
2697  }
2698 
2699  if (!DSAStack->hasDuplicateRequiresClause(ClauseList))
2700  return OMPRequiresDecl::Create(Context, getCurLexicalContext(), Loc,
2701  ClauseList);
2702  return nullptr;
2703 }
2704 
2705 static void reportOriginalDsa(Sema &SemaRef, const DSAStackTy *Stack,
2706  const ValueDecl *D,
2707  const DSAStackTy::DSAVarData &DVar,
2708  bool IsLoopIterVar = false) {
2709  if (DVar.RefExpr) {
2710  SemaRef.Diag(DVar.RefExpr->getExprLoc(), diag::note_omp_explicit_dsa)
2711  << getOpenMPClauseName(DVar.CKind);
2712  return;
2713  }
2714  enum {
2715  PDSA_StaticMemberShared,
2716  PDSA_StaticLocalVarShared,
2717  PDSA_LoopIterVarPrivate,
2718  PDSA_LoopIterVarLinear,
2719  PDSA_LoopIterVarLastprivate,
2720  PDSA_ConstVarShared,
2721  PDSA_GlobalVarShared,
2722  PDSA_TaskVarFirstprivate,
2723  PDSA_LocalVarPrivate,
2724  PDSA_Implicit
2725  } Reason = PDSA_Implicit;
2726  bool ReportHint = false;
2727  auto ReportLoc = D->getLocation();
2728  auto *VD = dyn_cast<VarDecl>(D);
2729  if (IsLoopIterVar) {
2730  if (DVar.CKind == OMPC_private)
2731  Reason = PDSA_LoopIterVarPrivate;
2732  else if (DVar.CKind == OMPC_lastprivate)
2733  Reason = PDSA_LoopIterVarLastprivate;
2734  else
2735  Reason = PDSA_LoopIterVarLinear;
2736  } else if (isOpenMPTaskingDirective(DVar.DKind) &&
2737  DVar.CKind == OMPC_firstprivate) {
2738  Reason = PDSA_TaskVarFirstprivate;
2739  ReportLoc = DVar.ImplicitDSALoc;
2740  } else if (VD && VD->isStaticLocal())
2741  Reason = PDSA_StaticLocalVarShared;
2742  else if (VD && VD->isStaticDataMember())
2743  Reason = PDSA_StaticMemberShared;
2744  else if (VD && VD->isFileVarDecl())
2745  Reason = PDSA_GlobalVarShared;
2746  else if (D->getType().isConstant(SemaRef.getASTContext()))
2747  Reason = PDSA_ConstVarShared;
2748  else if (VD && VD->isLocalVarDecl() && DVar.CKind == OMPC_private) {
2749  ReportHint = true;
2750  Reason = PDSA_LocalVarPrivate;
2751  }
2752  if (Reason != PDSA_Implicit) {
2753  SemaRef.Diag(ReportLoc, diag::note_omp_predetermined_dsa)
2754  << Reason << ReportHint
2755  << getOpenMPDirectiveName(Stack->getCurrentDirective());
2756  } else if (DVar.ImplicitDSALoc.isValid()) {
2757  SemaRef.Diag(DVar.ImplicitDSALoc, diag::note_omp_implicit_dsa)
2758  << getOpenMPClauseName(DVar.CKind);
2759  }
2760 }
2761 
2762 namespace {
2763 class DSAAttrChecker final : public StmtVisitor<DSAAttrChecker, void> {
2764  DSAStackTy *Stack;
2765  Sema &SemaRef;
2766  bool ErrorFound = false;
2767  CapturedStmt *CS = nullptr;
2768  llvm::SmallVector<Expr *, 4> ImplicitFirstprivate;
2769  llvm::SmallVector<Expr *, 4> ImplicitMap;
2770  Sema::VarsWithInheritedDSAType VarsWithInheritedDSA;
2771  llvm::SmallDenseSet<const ValueDecl *, 4> ImplicitDeclarations;
2772 
2773  void VisitSubCaptures(OMPExecutableDirective *S) {
2774  // Check implicitly captured variables.
2775  if (!S->hasAssociatedStmt() || !S->getAssociatedStmt())
2776  return;
2777  visitSubCaptures(S->getInnermostCapturedStmt());
2778  }
2779 
2780 public:
2781  void VisitDeclRefExpr(DeclRefExpr *E) {
2782  if (E->isTypeDependent() || E->isValueDependent() ||
2784  return;
2785  if (auto *VD = dyn_cast<VarDecl>(E->getDecl())) {
2786  // Check the datasharing rules for the expressions in the clauses.
2787  if (!CS) {
2788  if (auto *CED = dyn_cast<OMPCapturedExprDecl>(VD))
2789  if (!CED->hasAttr<OMPCaptureNoInitAttr>()) {
2790  Visit(CED->getInit());
2791  return;
2792  }
2793  } else if (VD->isImplicit() || isa<OMPCapturedExprDecl>(VD))
2794  // Do not analyze internal variables and do not enclose them into
2795  // implicit clauses.
2796  return;
2797  VD = VD->getCanonicalDecl();
2798  // Skip internally declared variables.
2799  if (VD->hasLocalStorage() && CS && !CS->capturesVariable(VD))
2800  return;
2801 
2802  DSAStackTy::DSAVarData DVar = Stack->getTopDSA(VD, /*FromParent=*/false);
2803  // Check if the variable has explicit DSA set and stop analysis if it so.
2804  if (DVar.RefExpr || !ImplicitDeclarations.insert(VD).second)
2805  return;
2806 
2807  // Skip internally declared static variables.
2809  OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
2810  if (VD->hasGlobalStorage() && CS && !CS->capturesVariable(VD) &&
2811  (Stack->hasRequiresDeclWithClause<OMPUnifiedSharedMemoryClause>() ||
2812  !Res || *Res != OMPDeclareTargetDeclAttr::MT_Link))
2813  return;
2814 
2815  SourceLocation ELoc = E->getExprLoc();
2816  OpenMPDirectiveKind DKind = Stack->getCurrentDirective();
2817  // The default(none) clause requires that each variable that is referenced
2818  // in the construct, and does not have a predetermined data-sharing
2819  // attribute, must have its data-sharing attribute explicitly determined
2820  // by being listed in a data-sharing attribute clause.
2821  if (DVar.CKind == OMPC_unknown && Stack->getDefaultDSA() == DSA_none &&
2822  isImplicitOrExplicitTaskingRegion(DKind) &&
2823  VarsWithInheritedDSA.count(VD) == 0) {
2824  VarsWithInheritedDSA[VD] = E;
2825  return;
2826  }
2827 
2828  if (isOpenMPTargetExecutionDirective(DKind) &&
2829  !Stack->isLoopControlVariable(VD).first) {
2830  if (!Stack->checkMappableExprComponentListsForDecl(
2831  VD, /*CurrentRegionOnly=*/true,
2833  StackComponents,
2834  OpenMPClauseKind) {
2835  // Variable is used if it has been marked as an array, array
2836  // section or the variable iself.
2837  return StackComponents.size() == 1 ||
2838  std::all_of(
2839  std::next(StackComponents.rbegin()),
2840  StackComponents.rend(),
2841  [](const OMPClauseMappableExprCommon::
2842  MappableComponent &MC) {
2843  return MC.getAssociatedDeclaration() ==
2844  nullptr &&
2845  (isa<OMPArraySectionExpr>(
2846  MC.getAssociatedExpression()) ||
2847  isa<ArraySubscriptExpr>(
2848  MC.getAssociatedExpression()));
2849  });
2850  })) {
2851  bool IsFirstprivate = false;
2852  // By default lambdas are captured as firstprivates.
2853  if (const auto *RD =
2854  VD->getType().getNonReferenceType()->getAsCXXRecordDecl())
2855  IsFirstprivate = RD->isLambda();
2856  IsFirstprivate =
2857  IsFirstprivate ||
2858  (VD->getType().getNonReferenceType()->isScalarType() &&
2859  Stack->getDefaultDMA() != DMA_tofrom_scalar && !Res);
2860  if (IsFirstprivate)
2861  ImplicitFirstprivate.emplace_back(E);
2862  else
2863  ImplicitMap.emplace_back(E);
2864  return;
2865  }
2866  }
2867 
2868  // OpenMP [2.9.3.6, Restrictions, p.2]
2869  // A list item that appears in a reduction clause of the innermost
2870  // enclosing worksharing or parallel construct may not be accessed in an
2871  // explicit task.
2872  DVar = Stack->hasInnermostDSA(
2873  VD, [](OpenMPClauseKind C) { return C == OMPC_reduction; },
2874  [](OpenMPDirectiveKind K) {
2875  return isOpenMPParallelDirective(K) ||
2877  },
2878  /*FromParent=*/true);
2879  if (isOpenMPTaskingDirective(DKind) && DVar.CKind == OMPC_reduction) {
2880  ErrorFound = true;
2881  SemaRef.Diag(ELoc, diag::err_omp_reduction_in_task);
2882  reportOriginalDsa(SemaRef, Stack, VD, DVar);
2883  return;
2884  }
2885 
2886  // Define implicit data-sharing attributes for task.
2887  DVar = Stack->getImplicitDSA(VD, /*FromParent=*/false);
2888  if (isOpenMPTaskingDirective(DKind) && DVar.CKind != OMPC_shared &&
2889  !Stack->isLoopControlVariable(VD).first) {
2890  ImplicitFirstprivate.push_back(E);
2891  return;
2892  }
2893 
2894  // Store implicitly used globals with declare target link for parent
2895  // target.
2896  if (!isOpenMPTargetExecutionDirective(DKind) && Res &&
2897  *Res == OMPDeclareTargetDeclAttr::MT_Link) {
2898  Stack->addToParentTargetRegionLinkGlobals(E);
2899  return;
2900  }
2901  }
2902  }
2903  void VisitMemberExpr(MemberExpr *E) {
2904  if (E->isTypeDependent() || E->isValueDependent() ||
2906  return;
2907  auto *FD = dyn_cast<FieldDecl>(E->getMemberDecl());
2908  OpenMPDirectiveKind DKind = Stack->getCurrentDirective();
2909  if (auto *TE = dyn_cast<CXXThisExpr>(E->getBase()->IgnoreParens())) {
2910  if (!FD)
2911  return;
2912  DSAStackTy::DSAVarData DVar = Stack->getTopDSA(FD, /*FromParent=*/false);
2913  // Check if the variable has explicit DSA set and stop analysis if it
2914  // so.
2915  if (DVar.RefExpr || !ImplicitDeclarations.insert(FD).second)
2916  return;
2917 
2918  if (isOpenMPTargetExecutionDirective(DKind) &&
2919  !Stack->isLoopControlVariable(FD).first &&
2920  !Stack->checkMappableExprComponentListsForDecl(
2921  FD, /*CurrentRegionOnly=*/true,
2923  StackComponents,
2924  OpenMPClauseKind) {
2925  return isa<CXXThisExpr>(
2926  cast<MemberExpr>(
2927  StackComponents.back().getAssociatedExpression())
2928  ->getBase()
2929  ->IgnoreParens());
2930  })) {
2931  // OpenMP 4.5 [2.15.5.1, map Clause, Restrictions, C/C++, p.3]
2932  // A bit-field cannot appear in a map clause.
2933  //
2934  if (FD->isBitField())
2935  return;
2936 
2937  // Check to see if the member expression is referencing a class that
2938  // has already been explicitly mapped
2939  if (Stack->isClassPreviouslyMapped(TE->getType()))
2940  return;
2941 
2942  ImplicitMap.emplace_back(E);
2943  return;
2944  }
2945 
2946  SourceLocation ELoc = E->getExprLoc();
2947  // OpenMP [2.9.3.6, Restrictions, p.2]
2948  // A list item that appears in a reduction clause of the innermost
2949  // enclosing worksharing or parallel construct may not be accessed in
2950  // an explicit task.
2951  DVar = Stack->hasInnermostDSA(
2952  FD, [](OpenMPClauseKind C) { return C == OMPC_reduction; },
2953  [](OpenMPDirectiveKind K) {
2954  return isOpenMPParallelDirective(K) ||
2956  },
2957  /*FromParent=*/true);
2958  if (isOpenMPTaskingDirective(DKind) && DVar.CKind == OMPC_reduction) {
2959  ErrorFound = true;
2960  SemaRef.Diag(ELoc, diag::err_omp_reduction_in_task);
2961  reportOriginalDsa(SemaRef, Stack, FD, DVar);
2962  return;
2963  }
2964 
2965  // Define implicit data-sharing attributes for task.
2966  DVar = Stack->getImplicitDSA(FD, /*FromParent=*/false);
2967  if (isOpenMPTaskingDirective(DKind) && DVar.CKind != OMPC_shared &&
2968  !Stack->isLoopControlVariable(FD).first) {
2969  // Check if there is a captured expression for the current field in the
2970  // region. Do not mark it as firstprivate unless there is no captured
2971  // expression.
2972  // TODO: try to make it firstprivate.
2973  if (DVar.CKind != OMPC_unknown)
2974  ImplicitFirstprivate.push_back(E);
2975  }
2976  return;
2977  }
2978  if (isOpenMPTargetExecutionDirective(DKind)) {
2980  if (!checkMapClauseExpressionBase(SemaRef, E, CurComponents, OMPC_map,
2981  /*NoDiagnose=*/true))
2982  return;
2983  const auto *VD = cast<ValueDecl>(
2984  CurComponents.back().getAssociatedDeclaration()->getCanonicalDecl());
2985  if (!Stack->checkMappableExprComponentListsForDecl(
2986  VD, /*CurrentRegionOnly=*/true,
2987  [&CurComponents](
2989  StackComponents,
2990  OpenMPClauseKind) {
2991  auto CCI = CurComponents.rbegin();
2992  auto CCE = CurComponents.rend();
2993  for (const auto &SC : llvm::reverse(StackComponents)) {
2994  // Do both expressions have the same kind?
2995  if (CCI->getAssociatedExpression()->getStmtClass() !=
2996  SC.getAssociatedExpression()->getStmtClass())
2997  if (!(isa<OMPArraySectionExpr>(
2998  SC.getAssociatedExpression()) &&
2999  isa<ArraySubscriptExpr>(
3000  CCI->getAssociatedExpression())))
3001  return false;
3002 
3003  const Decl *CCD = CCI->getAssociatedDeclaration();
3004  const Decl *SCD = SC.getAssociatedDeclaration();
3005  CCD = CCD ? CCD->getCanonicalDecl() : nullptr;
3006  SCD = SCD ? SCD->getCanonicalDecl() : nullptr;
3007  if (SCD != CCD)
3008  return false;
3009  std::advance(CCI, 1);
3010  if (CCI == CCE)
3011  break;
3012  }
3013  return true;
3014  })) {
3015  Visit(E->getBase());
3016  }
3017  } else {
3018  Visit(E->getBase());
3019  }
3020  }
3021  void VisitOMPExecutableDirective(OMPExecutableDirective *S) {
3022  for (OMPClause *C : S->clauses()) {
3023  // Skip analysis of arguments of implicitly defined firstprivate clause
3024  // for task|target directives.
3025  // Skip analysis of arguments of implicitly defined map clause for target
3026  // directives.
3027  if (C && !((isa<OMPFirstprivateClause>(C) || isa<OMPMapClause>(C)) &&
3028  C->isImplicit())) {
3029  for (Stmt *CC : C->children()) {
3030  if (CC)
3031  Visit(CC);
3032  }
3033  }
3034  }
3035  // Check implicitly captured variables.
3036  VisitSubCaptures(S);
3037  }
3038  void VisitStmt(Stmt *S) {
3039  for (Stmt *C : S->children()) {
3040  if (C) {
3041  // Check implicitly captured variables in the task-based directives to
3042  // check if they must be firstprivatized.
3043  Visit(C);
3044  }
3045  }
3046  }
3047 
3048  void visitSubCaptures(CapturedStmt *S) {
3049  for (const CapturedStmt::Capture &Cap : S->captures()) {
3050  if (!Cap.capturesVariable() && !Cap.capturesVariableByCopy())
3051  continue;
3052  VarDecl *VD = Cap.getCapturedVar();
3053  // Do not try to map the variable if it or its sub-component was mapped
3054  // already.
3055  if (isOpenMPTargetExecutionDirective(Stack->getCurrentDirective()) &&
3056  Stack->checkMappableExprComponentListsForDecl(
3057  VD, /*CurrentRegionOnly=*/true,
3059  OpenMPClauseKind) { return true; }))
3060  continue;
3062  SemaRef, VD, VD->getType().getNonLValueExprType(SemaRef.Context),
3063  Cap.getLocation(), /*RefersToCapture=*/true);
3064  Visit(DRE);
3065  }
3066  }
3067  bool isErrorFound() const { return ErrorFound; }
3068  ArrayRef<Expr *> getImplicitFirstprivate() const {
3069  return ImplicitFirstprivate;
3070  }
3071  ArrayRef<Expr *> getImplicitMap() const { return ImplicitMap; }
3072  const Sema::VarsWithInheritedDSAType &getVarsWithInheritedDSA() const {
3073  return VarsWithInheritedDSA;
3074  }
3075 
3076  DSAAttrChecker(DSAStackTy *S, Sema &SemaRef, CapturedStmt *CS)
3077  : Stack(S), SemaRef(SemaRef), ErrorFound(false), CS(CS) {
3078  // Process declare target link variables for the target directives.
3079  if (isOpenMPTargetExecutionDirective(S->getCurrentDirective())) {
3080  for (DeclRefExpr *E : Stack->getLinkGlobals())
3081  Visit(E);
3082  }
3083  }
3084 };
3085 } // namespace
3086 
3088  switch (DKind) {
3089  case OMPD_parallel:
3090  case OMPD_parallel_for:
3091  case OMPD_parallel_for_simd:
3092  case OMPD_parallel_sections:
3093  case OMPD_teams:
3094  case OMPD_teams_distribute:
3095  case OMPD_teams_distribute_simd: {
3096  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
3097  QualType KmpInt32PtrTy =
3098  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
3099  Sema::CapturedParamNameType Params[] = {
3100  std::make_pair(".global_tid.", KmpInt32PtrTy),
3101  std::make_pair(".bound_tid.", KmpInt32PtrTy),
3102  std::make_pair(StringRef(), QualType()) // __context with shared vars
3103  };
3104  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
3105  Params);
3106  break;
3107  }
3108  case OMPD_target_teams:
3109  case OMPD_target_parallel:
3110  case OMPD_target_parallel_for:
3111  case OMPD_target_parallel_for_simd:
3112  case OMPD_target_teams_distribute:
3113  case OMPD_target_teams_distribute_simd: {
3114  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
3115  QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
3116  QualType KmpInt32PtrTy =
3117  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
3118  QualType Args[] = {VoidPtrTy};
3120  EPI.Variadic = true;
3121  QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
3122  Sema::CapturedParamNameType Params[] = {
3123  std::make_pair(".global_tid.", KmpInt32Ty),
3124  std::make_pair(".part_id.", KmpInt32PtrTy),
3125  std::make_pair(".privates.", VoidPtrTy),
3126  std::make_pair(
3127  ".copy_fn.",
3128  Context.getPointerType(CopyFnType).withConst().withRestrict()),
3129  std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
3130  std::make_pair(StringRef(), QualType()) // __context with shared vars
3131  };
3132  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
3133  Params, /*OpenMPCaptureLevel=*/0);
3134  // Mark this captured region as inlined, because we don't use outlined
3135  // function directly.
3136  getCurCapturedRegion()->TheCapturedDecl->addAttr(
3137  AlwaysInlineAttr::CreateImplicit(
3138  Context, {}, AttributeCommonInfo::AS_Keyword,
3139  AlwaysInlineAttr::Keyword_forceinline));
3140  Sema::CapturedParamNameType ParamsTarget[] = {
3141  std::make_pair(StringRef(), QualType()) // __context with shared vars
3142  };
3143  // Start a captured region for 'target' with no implicit parameters.
3144  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
3145  ParamsTarget, /*OpenMPCaptureLevel=*/1);
3146  Sema::CapturedParamNameType ParamsTeamsOrParallel[] = {
3147  std::make_pair(".global_tid.", KmpInt32PtrTy),
3148  std::make_pair(".bound_tid.", KmpInt32PtrTy),
3149  std::make_pair(StringRef(), QualType()) // __context with shared vars
3150  };
3151  // Start a captured region for 'teams' or 'parallel'. Both regions have
3152  // the same implicit parameters.
3153  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
3154  ParamsTeamsOrParallel, /*OpenMPCaptureLevel=*/2);
3155  break;
3156  }
3157  case OMPD_target:
3158  case OMPD_target_simd: {
3159  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
3160  QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
3161  QualType KmpInt32PtrTy =
3162  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
3163  QualType Args[] = {VoidPtrTy};
3165  EPI.Variadic = true;
3166  QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
3167  Sema::CapturedParamNameType Params[] = {
3168  std::make_pair(".global_tid.", KmpInt32Ty),
3169  std::make_pair(".part_id.", KmpInt32PtrTy),
3170  std::make_pair(".privates.", VoidPtrTy),
3171  std::make_pair(
3172  ".copy_fn.",
3173  Context.getPointerType(CopyFnType).withConst().withRestrict()),
3174  std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
3175  std::make_pair(StringRef(), QualType()) // __context with shared vars
3176  };
3177  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
3178  Params, /*OpenMPCaptureLevel=*/0);
3179  // Mark this captured region as inlined, because we don't use outlined
3180  // function directly.
3181  getCurCapturedRegion()->TheCapturedDecl->addAttr(
3182  AlwaysInlineAttr::CreateImplicit(
3183  Context, {}, AttributeCommonInfo::AS_Keyword,
3184  AlwaysInlineAttr::Keyword_forceinline));
3185  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
3186  std::make_pair(StringRef(), QualType()),
3187  /*OpenMPCaptureLevel=*/1);
3188  break;
3189  }
3190  case OMPD_simd:
3191  case OMPD_for:
3192  case OMPD_for_simd:
3193  case OMPD_sections:
3194  case OMPD_section:
3195  case OMPD_single:
3196  case OMPD_master:
3197  case OMPD_critical:
3198  case OMPD_taskgroup:
3199  case OMPD_distribute:
3200  case OMPD_distribute_simd:
3201  case OMPD_ordered:
3202  case OMPD_atomic:
3203  case OMPD_target_data: {
3204  Sema::CapturedParamNameType Params[] = {
3205  std::make_pair(StringRef(), QualType()) // __context with shared vars
3206  };
3207  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
3208  Params);
3209  break;
3210  }
3211  case OMPD_task: {
3212  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
3213  QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
3214  QualType KmpInt32PtrTy =
3215  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
3216  QualType Args[] = {VoidPtrTy};
3218  EPI.Variadic = true;
3219  QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
3220  Sema::CapturedParamNameType Params[] = {
3221  std::make_pair(".global_tid.", KmpInt32Ty),
3222  std::make_pair(".part_id.", KmpInt32PtrTy),
3223  std::make_pair(".privates.", VoidPtrTy),
3224  std::make_pair(
3225  ".copy_fn.",
3226  Context.getPointerType(CopyFnType).withConst().withRestrict()),
3227  std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
3228  std::make_pair(StringRef(), QualType()) // __context with shared vars
3229  };
3230  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
3231  Params);
3232  // Mark this captured region as inlined, because we don't use outlined
3233  // function directly.
3234  getCurCapturedRegion()->TheCapturedDecl->addAttr(
3235  AlwaysInlineAttr::CreateImplicit(
3236  Context, {}, AttributeCommonInfo::AS_Keyword,
3237  AlwaysInlineAttr::Keyword_forceinline));
3238  break;
3239  }
3240  case OMPD_taskloop:
3241  case OMPD_taskloop_simd:
3242  case OMPD_master_taskloop: {
3243  QualType KmpInt32Ty =
3244  Context.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1)
3245  .withConst();
3246  QualType KmpUInt64Ty =
3247  Context.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0)
3248  .withConst();
3249  QualType KmpInt64Ty =
3250  Context.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1)
3251  .withConst();
3252  QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
3253  QualType KmpInt32PtrTy =
3254  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
3255  QualType Args[] = {VoidPtrTy};
3257  EPI.Variadic = true;
3258  QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
3259  Sema::CapturedParamNameType Params[] = {
3260  std::make_pair(".global_tid.", KmpInt32Ty),
3261  std::make_pair(".part_id.", KmpInt32PtrTy),
3262  std::make_pair(".privates.", VoidPtrTy),
3263  std::make_pair(
3264  ".copy_fn.",
3265  Context.getPointerType(CopyFnType).withConst().withRestrict()),
3266  std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
3267  std::make_pair(".lb.", KmpUInt64Ty),
3268  std::make_pair(".ub.", KmpUInt64Ty),
3269  std::make_pair(".st.", KmpInt64Ty),
3270  std::make_pair(".liter.", KmpInt32Ty),
3271  std::make_pair(".reductions.", VoidPtrTy),
3272  std::make_pair(StringRef(), QualType()) // __context with shared vars
3273  };
3274  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
3275  Params);
3276  // Mark this captured region as inlined, because we don't use outlined
3277  // function directly.
3278  getCurCapturedRegion()->TheCapturedDecl->addAttr(
3279  AlwaysInlineAttr::CreateImplicit(
3280  Context, {}, AttributeCommonInfo::AS_Keyword,
3281  AlwaysInlineAttr::Keyword_forceinline));
3282  break;
3283  }
3284  case OMPD_parallel_master_taskloop: {
3285  QualType KmpInt32Ty =
3286  Context.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1)
3287  .withConst();
3288  QualType KmpUInt64Ty =
3289  Context.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0)
3290  .withConst();
3291  QualType KmpInt64Ty =
3292  Context.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1)
3293  .withConst();
3294  QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
3295  QualType KmpInt32PtrTy =
3296  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
3297  Sema::CapturedParamNameType ParamsParallel[] = {
3298  std::make_pair(".global_tid.", KmpInt32PtrTy),
3299  std::make_pair(".bound_tid.", KmpInt32PtrTy),
3300  std::make_pair(StringRef(), QualType()) // __context with shared vars
3301  };
3302  // Start a captured region for 'parallel'.
3303  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
3304  ParamsParallel, /*OpenMPCaptureLevel=*/1);
3305  QualType Args[] = {VoidPtrTy};
3307  EPI.Variadic = true;
3308  QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
3309  Sema::CapturedParamNameType Params[] = {
3310  std::make_pair(".global_tid.", KmpInt32Ty),
3311  std::make_pair(".part_id.", KmpInt32PtrTy),
3312  std::make_pair(".privates.", VoidPtrTy),
3313  std::make_pair(
3314  ".copy_fn.",
3315  Context.getPointerType(CopyFnType).withConst().withRestrict()),
3316  std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
3317  std::make_pair(".lb.", KmpUInt64Ty),
3318  std::make_pair(".ub.", KmpUInt64Ty),
3319  std::make_pair(".st.", KmpInt64Ty),
3320  std::make_pair(".liter.", KmpInt32Ty),
3321  std::make_pair(".reductions.", VoidPtrTy),
3322  std::make_pair(StringRef(), QualType()) // __context with shared vars
3323  };
3324  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
3325  Params, /*OpenMPCaptureLevel=*/2);
3326  // Mark this captured region as inlined, because we don't use outlined
3327  // function directly.
3328  getCurCapturedRegion()->TheCapturedDecl->addAttr(
3329  AlwaysInlineAttr::CreateImplicit(
3330  Context, {}, AttributeCommonInfo::AS_Keyword,
3331  AlwaysInlineAttr::Keyword_forceinline));
3332  break;
3333  }
3334  case OMPD_distribute_parallel_for_simd:
3335  case OMPD_distribute_parallel_for: {
3336  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
3337  QualType KmpInt32PtrTy =
3338  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
3339  Sema::CapturedParamNameType Params[] = {
3340  std::make_pair(".global_tid.", KmpInt32PtrTy),
3341  std::make_pair(".bound_tid.", KmpInt32PtrTy),
3342  std::make_pair(".previous.lb.", Context.getSizeType().withConst()),
3343  std::make_pair(".previous.ub.", Context.getSizeType().withConst()),
3344  std::make_pair(StringRef(), QualType()) // __context with shared vars
3345  };
3346  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
3347  Params);
3348  break;
3349  }
3350  case OMPD_target_teams_distribute_parallel_for:
3351  case OMPD_target_teams_distribute_parallel_for_simd: {
3352  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
3353  QualType KmpInt32PtrTy =
3354  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
3355  QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
3356 
3357  QualType Args[] = {VoidPtrTy};
3359  EPI.Variadic = true;
3360  QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
3361  Sema::CapturedParamNameType Params[] = {
3362  std::make_pair(".global_tid.", KmpInt32Ty),
3363  std::make_pair(".part_id.", KmpInt32PtrTy),
3364  std::make_pair(".privates.", VoidPtrTy),
3365  std::make_pair(
3366  ".copy_fn.",
3367  Context.getPointerType(CopyFnType).withConst().withRestrict()),
3368  std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
3369  std::make_pair(StringRef(), QualType()) // __context with shared vars
3370  };
3371  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
3372  Params, /*OpenMPCaptureLevel=*/0);
3373  // Mark this captured region as inlined, because we don't use outlined
3374  // function directly.
3375  getCurCapturedRegion()->TheCapturedDecl->addAttr(
3376  AlwaysInlineAttr::CreateImplicit(
3377  Context, {}, AttributeCommonInfo::AS_Keyword,
3378  AlwaysInlineAttr::Keyword_forceinline));
3379  Sema::CapturedParamNameType ParamsTarget[] = {
3380  std::make_pair(StringRef(), QualType()) // __context with shared vars
3381  };
3382  // Start a captured region for 'target' with no implicit parameters.
3383  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
3384  ParamsTarget, /*OpenMPCaptureLevel=*/1);
3385 
3386  Sema::CapturedParamNameType ParamsTeams[] = {
3387  std::make_pair(".global_tid.", KmpInt32PtrTy),
3388  std::make_pair(".bound_tid.", KmpInt32PtrTy),
3389  std::make_pair(StringRef(), QualType()) // __context with shared vars
3390  };
3391  // Start a captured region for 'target' with no implicit parameters.
3392  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
3393  ParamsTeams, /*OpenMPCaptureLevel=*/2);
3394 
3395  Sema::CapturedParamNameType ParamsParallel[] = {
3396  std::make_pair(".global_tid.", KmpInt32PtrTy),
3397  std::make_pair(".bound_tid.", KmpInt32PtrTy),
3398  std::make_pair(".previous.lb.", Context.getSizeType().withConst()),
3399  std::make_pair(".previous.ub.", Context.getSizeType().withConst()),
3400  std::make_pair(StringRef(), QualType()) // __context with shared vars
3401  };
3402  // Start a captured region for 'teams' or 'parallel'. Both regions have
3403  // the same implicit parameters.
3404  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
3405  ParamsParallel, /*OpenMPCaptureLevel=*/3);
3406  break;
3407  }
3408 
3409  case OMPD_teams_distribute_parallel_for:
3410  case OMPD_teams_distribute_parallel_for_simd: {
3411  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
3412  QualType KmpInt32PtrTy =
3413  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
3414 
3415  Sema::CapturedParamNameType ParamsTeams[] = {
3416  std::make_pair(".global_tid.", KmpInt32PtrTy),
3417  std::make_pair(".bound_tid.", KmpInt32PtrTy),
3418  std::make_pair(StringRef(), QualType()) // __context with shared vars
3419  };
3420  // Start a captured region for 'target' with no implicit parameters.
3421  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
3422  ParamsTeams, /*OpenMPCaptureLevel=*/0);
3423 
3424  Sema::CapturedParamNameType ParamsParallel[] = {
3425  std::make_pair(".global_tid.", KmpInt32PtrTy),
3426  std::make_pair(".bound_tid.", KmpInt32PtrTy),
3427  std::make_pair(".previous.lb.", Context.getSizeType().withConst()),
3428  std::make_pair(".previous.ub.", Context.getSizeType().withConst()),
3429  std::make_pair(StringRef(), QualType()) // __context with shared vars
3430  };
3431  // Start a captured region for 'teams' or 'parallel'. Both regions have
3432  // the same implicit parameters.
3433  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
3434  ParamsParallel, /*OpenMPCaptureLevel=*/1);
3435  break;
3436  }
3437  case OMPD_target_update:
3438  case OMPD_target_enter_data:
3439  case OMPD_target_exit_data: {
3440  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
3441  QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
3442  QualType KmpInt32PtrTy =
3443  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
3444  QualType Args[] = {VoidPtrTy};
3446  EPI.Variadic = true;
3447  QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
3448  Sema::CapturedParamNameType Params[] = {
3449  std::make_pair(".global_tid.", KmpInt32Ty),
3450  std::make_pair(".part_id.", KmpInt32PtrTy),
3451  std::make_pair(".privates.", VoidPtrTy),
3452  std::make_pair(
3453  ".copy_fn.",
3454  Context.getPointerType(CopyFnType).withConst().withRestrict()),
3455  std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
3456  std::make_pair(StringRef(), QualType()) // __context with shared vars
3457  };
3458  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
3459  Params);
3460  // Mark this captured region as inlined, because we don't use outlined
3461  // function directly.
3462  getCurCapturedRegion()->TheCapturedDecl->addAttr(
3463  AlwaysInlineAttr::CreateImplicit(
3464  Context, {}, AttributeCommonInfo::AS_Keyword,
3465  AlwaysInlineAttr::Keyword_forceinline));
3466  break;
3467  }
3468  case OMPD_threadprivate:
3469  case OMPD_allocate:
3470  case OMPD_taskyield:
3471  case OMPD_barrier:
3472  case OMPD_taskwait:
3473  case OMPD_cancellation_point:
3474  case OMPD_cancel:
3475  case OMPD_flush:
3476  case OMPD_declare_reduction:
3477  case OMPD_declare_mapper:
3478  case OMPD_declare_simd:
3479  case OMPD_declare_target:
3480  case OMPD_end_declare_target:
3481  case OMPD_requires:
3482  case OMPD_declare_variant:
3483  llvm_unreachable("OpenMP Directive is not allowed");
3484  case OMPD_unknown:
3485  llvm_unreachable("Unknown OpenMP directive");
3486  }
3487 }
3488 
3489 int Sema::getNumberOfConstructScopes(unsigned Level) const {
3490  return getOpenMPCaptureLevels(DSAStack->getDirective(Level));
3491 }
3492 
3494  SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
3495  getOpenMPCaptureRegions(CaptureRegions, DKind);
3496  return CaptureRegions.size();
3497 }
3498 
3500  Expr *CaptureExpr, bool WithInit,
3501  bool AsExpression) {
3502  assert(CaptureExpr);
3503  ASTContext &C = S.getASTContext();
3504  Expr *Init = AsExpression ? CaptureExpr : CaptureExpr->IgnoreImpCasts();
3505  QualType Ty = Init->getType();
3506  if (CaptureExpr->getObjectKind() == OK_Ordinary && CaptureExpr->isGLValue()) {
3507  if (S.getLangOpts().CPlusPlus) {
3508  Ty = C.getLValueReferenceType(Ty);
3509  } else {
3510  Ty = C.getPointerType(Ty);
3511  ExprResult Res =
3512  S.CreateBuiltinUnaryOp(CaptureExpr->getExprLoc(), UO_AddrOf, Init);
3513  if (!Res.isUsable())
3514  return nullptr;
3515  Init = Res.get();
3516  }
3517  WithInit = true;
3518  }
3519  auto *CED = OMPCapturedExprDecl::Create(C, S.CurContext, Id, Ty,
3520  CaptureExpr->getBeginLoc());
3521  if (!WithInit)
3522  CED->addAttr(OMPCaptureNoInitAttr::CreateImplicit(C));
3523  S.CurContext->addHiddenDecl(CED);
3524  S.AddInitializerToDecl(CED, Init, /*DirectInit=*/false);
3525  return CED;
3526 }
3527 
3528 static DeclRefExpr *buildCapture(Sema &S, ValueDecl *D, Expr *CaptureExpr,
3529  bool WithInit) {
3530  OMPCapturedExprDecl *CD;
3531  if (VarDecl *VD = S.isOpenMPCapturedDecl(D))
3532  CD = cast<OMPCapturedExprDecl>(VD);
3533  else
3534  CD = buildCaptureDecl(S, D->getIdentifier(), CaptureExpr, WithInit,
3535  /*AsExpression=*/false);
3536  return buildDeclRefExpr(S, CD, CD->getType().getNonReferenceType(),
3537  CaptureExpr->getExprLoc());
3538 }
3539 
3540 static ExprResult buildCapture(Sema &S, Expr *CaptureExpr, DeclRefExpr *&Ref) {
3541  CaptureExpr = S.DefaultLvalueConversion(CaptureExpr).get();
3542  if (!Ref) {
3544  S, &S.getASTContext().Idents.get(".capture_expr."), CaptureExpr,
3545  /*WithInit=*/true, /*AsExpression=*/true);
3546  Ref = buildDeclRefExpr(S, CD, CD->getType().getNonReferenceType(),
3547  CaptureExpr->getExprLoc());
3548  }
3549  ExprResult Res = Ref;
3550  if (!S.getLangOpts().CPlusPlus &&
3551  CaptureExpr->getObjectKind() == OK_Ordinary && CaptureExpr->isGLValue() &&
3552  Ref->getType()->isPointerType()) {
3553  Res = S.CreateBuiltinUnaryOp(CaptureExpr->getExprLoc(), UO_Deref, Ref);
3554  if (!Res.isUsable())
3555  return ExprError();
3556  }
3557  return S.DefaultLvalueConversion(Res.get());
3558 }
3559 
3560 namespace {
3561 // OpenMP directives parsed in this section are represented as a
3562 // CapturedStatement with an associated statement. If a syntax error
3563 // is detected during the parsing of the associated statement, the
3564 // compiler must abort processing and close the CapturedStatement.
3565 //
3566 // Combined directives such as 'target parallel' have more than one
3567 // nested CapturedStatements. This RAII ensures that we unwind out
3568 // of all the nested CapturedStatements when an error is found.
3569 class CaptureRegionUnwinderRAII {
3570 private:
3571  Sema &S;
3572  bool &ErrorFound;
3574 
3575 public:
3576  CaptureRegionUnwinderRAII(Sema &S, bool &ErrorFound,
3577  OpenMPDirectiveKind DKind)
3578  : S(S), ErrorFound(ErrorFound), DKind(DKind) {}
3579  ~CaptureRegionUnwinderRAII() {
3580  if (ErrorFound) {
3581  int ThisCaptureLevel = S.getOpenMPCaptureLevels(DKind);
3582  while (--ThisCaptureLevel >= 0)
3584  }
3585  }
3586 };
3587 } // namespace
3588 
3590  // Capture variables captured by reference in lambdas for target-based
3591  // directives.
3592  if (!CurContext->isDependentContext() &&
3593  (isOpenMPTargetExecutionDirective(DSAStack->getCurrentDirective()) ||
3595  DSAStack->getCurrentDirective()))) {
3596  QualType Type = V->getType();
3597  if (const auto *RD = Type.getCanonicalType()
3599  ->getAsCXXRecordDecl()) {
3600  bool SavedForceCaptureByReferenceInTargetExecutable =
3601  DSAStack->isForceCaptureByReferenceInTargetExecutable();
3602  DSAStack->setForceCaptureByReferenceInTargetExecutable(
3603  /*V=*/true);
3604  if (RD->isLambda()) {
3605  llvm::DenseMap<const VarDecl *, FieldDecl *> Captures;
3606  FieldDecl *ThisCapture;
3607  RD->getCaptureFields(Captures, ThisCapture);
3608  for (const LambdaCapture &LC : RD->captures()) {
3609  if (LC.getCaptureKind() == LCK_ByRef) {
3610  VarDecl *VD = LC.getCapturedVar();
3611  DeclContext *VDC = VD->getDeclContext();
3612  if (!VDC->Encloses(CurContext))
3613  continue;
3614  MarkVariableReferenced(LC.getLocation(), VD);
3615  } else if (LC.getCaptureKind() == LCK_This) {
3616  QualType ThisTy = getCurrentThisType();
3617  if (!ThisTy.isNull() &&
3618  Context.typesAreCompatible(ThisTy, ThisCapture->getType()))
3619  CheckCXXThisCapture(LC.getLocation());
3620  }
3621  }
3622  }
3623  DSAStack->setForceCaptureByReferenceInTargetExecutable(
3624  SavedForceCaptureByReferenceInTargetExecutable);
3625  }
3626  }
3627 }
3628 
3630  ArrayRef<OMPClause *> Clauses) {
3631  bool ErrorFound = false;
3632  CaptureRegionUnwinderRAII CaptureRegionUnwinder(
3633  *this, ErrorFound, DSAStack->getCurrentDirective());
3634  if (!S.isUsable()) {
3635  ErrorFound = true;
3636  return StmtError();
3637  }
3638 
3639  SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
3640  getOpenMPCaptureRegions(CaptureRegions, DSAStack->getCurrentDirective());
3641  OMPOrderedClause *OC = nullptr;
3642  OMPScheduleClause *SC = nullptr;
3645  // This is required for proper codegen.
3646  for (OMPClause *Clause : Clauses) {
3647  if (isOpenMPTaskingDirective(DSAStack->getCurrentDirective()) &&
3648  Clause->getClauseKind() == OMPC_in_reduction) {
3649  // Capture taskgroup task_reduction descriptors inside the tasking regions
3650  // with the corresponding in_reduction items.
3651  auto *IRC = cast<OMPInReductionClause>(Clause);
3652  for (Expr *E : IRC->taskgroup_descriptors())
3653  if (E)
3654  MarkDeclarationsReferencedInExpr(E);
3655  }
3656  if (isOpenMPPrivate(Clause->getClauseKind()) ||
3657  Clause->getClauseKind() == OMPC_copyprivate ||
3658  (getLangOpts().OpenMPUseTLS &&
3659  getASTContext().getTargetInfo().isTLSSupported() &&
3660  Clause->getClauseKind() == OMPC_copyin)) {
3661  DSAStack->setForceVarCapturing(Clause->getClauseKind() == OMPC_copyin);
3662  // Mark all variables in private list clauses as used in inner region.
3663  for (Stmt *VarRef : Clause->children()) {
3664  if (auto *E = cast_or_null<Expr>(VarRef)) {
3665  MarkDeclarationsReferencedInExpr(E);
3666  }
3667  }
3668  DSAStack->setForceVarCapturing(/*V=*/false);
3669  } else if (CaptureRegions.size() > 1 ||
3670  CaptureRegions.back() != OMPD_unknown) {
3671  if (auto *C = OMPClauseWithPreInit::get(Clause))
3672  PICs.push_back(C);
3673  if (auto *C = OMPClauseWithPostUpdate::get(Clause)) {
3674  if (Expr *E = C->getPostUpdateExpr())
3675  MarkDeclarationsReferencedInExpr(E);
3676  }
3677  }
3678  if (Clause->getClauseKind() == OMPC_schedule)
3679  SC = cast<OMPScheduleClause>(Clause);
3680  else if (Clause->getClauseKind() == OMPC_ordered)
3681  OC = cast<OMPOrderedClause>(Clause);
3682  else if (Clause->getClauseKind() == OMPC_linear)
3683  LCs.push_back(cast<OMPLinearClause>(Clause));
3684  }
3685  // OpenMP, 2.7.1 Loop Construct, Restrictions
3686  // The nonmonotonic modifier cannot be specified if an ordered clause is
3687  // specified.
3688  if (SC &&
3689  (SC->getFirstScheduleModifier() == OMPC_SCHEDULE_MODIFIER_nonmonotonic ||
3690  SC->getSecondScheduleModifier() ==
3691  OMPC_SCHEDULE_MODIFIER_nonmonotonic) &&
3692  OC) {
3693  Diag(SC->getFirstScheduleModifier() == OMPC_SCHEDULE_MODIFIER_nonmonotonic
3696  diag::err_omp_schedule_nonmonotonic_ordered)
3697  << SourceRange(OC->getBeginLoc(), OC->getEndLoc());
3698  ErrorFound = true;
3699  }
3700  if (!LCs.empty() && OC && OC->getNumForLoops()) {
3701  for (const OMPLinearClause *C : LCs) {
3702  Diag(C->getBeginLoc(), diag::err_omp_linear_ordered)
3703  << SourceRange(OC->getBeginLoc(), OC->getEndLoc());
3704  }
3705  ErrorFound = true;
3706  }
3707  if (isOpenMPWorksharingDirective(DSAStack->getCurrentDirective()) &&
3708  isOpenMPSimdDirective(DSAStack->getCurrentDirective()) && OC &&
3709  OC->getNumForLoops()) {
3710  Diag(OC->getBeginLoc(), diag::err_omp_ordered_simd)
3711  << getOpenMPDirectiveName(DSAStack->getCurrentDirective());
3712  ErrorFound = true;
3713  }
3714  if (ErrorFound) {
3715  return StmtError();
3716  }
3717  StmtResult SR = S;
3718  unsigned CompletedRegions = 0;
3719  for (OpenMPDirectiveKind ThisCaptureRegion : llvm::reverse(CaptureRegions)) {
3720  // Mark all variables in private list clauses as used in inner region.
3721  // Required for proper codegen of combined directives.
3722  // TODO: add processing for other clauses.
3723  if (ThisCaptureRegion != OMPD_unknown) {
3724  for (const clang::OMPClauseWithPreInit *C : PICs) {
3725  OpenMPDirectiveKind CaptureRegion = C->getCaptureRegion();
3726  // Find the particular capture region for the clause if the
3727  // directive is a combined one with multiple capture regions.
3728  // If the directive is not a combined one, the capture region
3729  // associated with the clause is OMPD_unknown and is generated
3730  // only once.
3731  if (CaptureRegion == ThisCaptureRegion ||
3732  CaptureRegion == OMPD_unknown) {
3733  if (auto *DS = cast_or_null<DeclStmt>(C->getPreInitStmt())) {
3734  for (Decl *D : DS->decls())
3735  MarkVariableReferenced(D->getLocation(), cast<VarDecl>(D));
3736  }
3737  }
3738  }
3739  }
3740  if (++CompletedRegions == CaptureRegions.size())
3741  DSAStack->setBodyComplete();
3742  SR = ActOnCapturedRegionEnd(SR.get());
3743  }
3744  return SR;
3745 }
3746 
3747 static bool checkCancelRegion(Sema &SemaRef, OpenMPDirectiveKind CurrentRegion,
3748  OpenMPDirectiveKind CancelRegion,
3749  SourceLocation StartLoc) {
3750  // CancelRegion is only needed for cancel and cancellation_point.
3751  if (CurrentRegion != OMPD_cancel && CurrentRegion != OMPD_cancellation_point)
3752  return false;
3753 
3754  if (CancelRegion == OMPD_parallel || CancelRegion == OMPD_for ||
3755  CancelRegion == OMPD_sections || CancelRegion == OMPD_taskgroup)
3756  return false;
3757 
3758  SemaRef.Diag(StartLoc, diag::err_omp_wrong_cancel_region)
3759  << getOpenMPDirectiveName(CancelRegion);
3760  return true;
3761 }
3762 
3763 static bool checkNestingOfRegions(Sema &SemaRef, const DSAStackTy *Stack,
3764  OpenMPDirectiveKind CurrentRegion,
3765  const DeclarationNameInfo &CurrentName,
3766  OpenMPDirectiveKind CancelRegion,
3767  SourceLocation StartLoc) {
3768  if (Stack->getCurScope()) {
3769  OpenMPDirectiveKind ParentRegion = Stack->getParentDirective();
3770  OpenMPDirectiveKind OffendingRegion = ParentRegion;
3771  bool NestingProhibited = false;
3772  bool CloseNesting = true;
3773  bool OrphanSeen = false;
3774  enum {
3775  NoRecommend,
3776  ShouldBeInParallelRegion,
3777  ShouldBeInOrderedRegion,
3778  ShouldBeInTargetRegion,
3779  ShouldBeInTeamsRegion
3780  } Recommend = NoRecommend;
3781  if (isOpenMPSimdDirective(ParentRegion) && CurrentRegion != OMPD_ordered) {
3782  // OpenMP [2.16, Nesting of Regions]
3783  // OpenMP constructs may not be nested inside a simd region.
3784  // OpenMP [2.8.1,simd Construct, Restrictions]
3785  // An ordered construct with the simd clause is the only OpenMP
3786  // construct that can appear in the simd region.
3787  // Allowing a SIMD construct nested in another SIMD construct is an
3788  // extension. The OpenMP 4.5 spec does not allow it. Issue a warning
3789  // message.
3790  SemaRef.Diag(StartLoc, (CurrentRegion != OMPD_simd)
3791  ? diag::err_omp_prohibited_region_simd
3792  : diag::warn_omp_nesting_simd);
3793  return CurrentRegion != OMPD_simd;
3794  }
3795  if (ParentRegion == OMPD_atomic) {
3796  // OpenMP [2.16, Nesting of Regions]
3797  // OpenMP constructs may not be nested inside an atomic region.
3798  SemaRef.Diag(StartLoc, diag::err_omp_prohibited_region_atomic);
3799  return true;
3800  }
3801  if (CurrentRegion == OMPD_section) {
3802  // OpenMP [2.7.2, sections Construct, Restrictions]
3803  // Orphaned section directives are prohibited. That is, the section
3804  // directives must appear within the sections construct and must not be
3805  // encountered elsewhere in the sections region.
3806  if (ParentRegion != OMPD_sections &&
3807  ParentRegion != OMPD_parallel_sections) {
3808  SemaRef.Diag(StartLoc, diag::err_omp_orphaned_section_directive)
3809  << (ParentRegion != OMPD_unknown)
3810  << getOpenMPDirectiveName(ParentRegion);
3811  return true;
3812  }
3813  return false;
3814  }
3815  // Allow some constructs (except teams and cancellation constructs) to be
3816  // orphaned (they could be used in functions, called from OpenMP regions
3817  // with the required preconditions).
3818  if (ParentRegion == OMPD_unknown &&
3819  !isOpenMPNestingTeamsDirective(CurrentRegion) &&
3820  CurrentRegion != OMPD_cancellation_point &&
3821  CurrentRegion != OMPD_cancel)
3822  return false;
3823  if (CurrentRegion == OMPD_cancellation_point ||
3824  CurrentRegion == OMPD_cancel) {
3825  // OpenMP [2.16, Nesting of Regions]
3826  // A cancellation point construct for which construct-type-clause is
3827  // taskgroup must be nested inside a task construct. A cancellation
3828  // point construct for which construct-type-clause is not taskgroup must
3829  // be closely nested inside an OpenMP construct that matches the type
3830  // specified in construct-type-clause.
3831  // A cancel construct for which construct-type-clause is taskgroup must be
3832  // nested inside a task construct. A cancel construct for which
3833  // construct-type-clause is not taskgroup must be closely nested inside an
3834  // OpenMP construct that matches the type specified in
3835  // construct-type-clause.
3836  NestingProhibited =
3837  !((CancelRegion == OMPD_parallel &&
3838  (ParentRegion == OMPD_parallel ||
3839  ParentRegion == OMPD_target_parallel)) ||
3840  (CancelRegion == OMPD_for &&
3841  (ParentRegion == OMPD_for || ParentRegion == OMPD_parallel_for ||
3842  ParentRegion == OMPD_target_parallel_for ||
3843  ParentRegion == OMPD_distribute_parallel_for ||
3844  ParentRegion == OMPD_teams_distribute_parallel_for ||
3845  ParentRegion == OMPD_target_teams_distribute_parallel_for)) ||
3846  (CancelRegion == OMPD_taskgroup && ParentRegion == OMPD_task) ||
3847  (CancelRegion == OMPD_sections &&
3848  (ParentRegion == OMPD_section || ParentRegion == OMPD_sections ||
3849  ParentRegion == OMPD_parallel_sections)));
3850  OrphanSeen = ParentRegion == OMPD_unknown;
3851  } else if (CurrentRegion == OMPD_master) {
3852  // OpenMP [2.16, Nesting of Regions]
3853  // A master region may not be closely nested inside a worksharing,
3854  // atomic, or explicit task region.
3855  NestingProhibited = isOpenMPWorksharingDirective(ParentRegion) ||
3856  isOpenMPTaskingDirective(ParentRegion);
3857  } else if (CurrentRegion == OMPD_critical && CurrentName.getName()) {
3858  // OpenMP [2.16, Nesting of Regions]
3859  // A critical region may not be nested (closely or otherwise) inside a
3860  // critical region with the same name. Note that this restriction is not
3861  // sufficient to prevent deadlock.
3862  SourceLocation PreviousCriticalLoc;
3863  bool DeadLock = Stack->hasDirective(
3864  [CurrentName, &PreviousCriticalLoc](OpenMPDirectiveKind K,
3865  const DeclarationNameInfo &DNI,
3866  SourceLocation Loc) {
3867  if (K == OMPD_critical && DNI.getName() == CurrentName.getName()) {
3868  PreviousCriticalLoc = Loc;
3869  return true;
3870  }
3871  return false;
3872  },
3873  false /* skip top directive */);
3874  if (DeadLock) {
3875  SemaRef.Diag(StartLoc,
3876  diag::err_omp_prohibited_region_critical_same_name)
3877  << CurrentName.getName();
3878  if (PreviousCriticalLoc.isValid())
3879  SemaRef.Diag(PreviousCriticalLoc,
3880  diag::note_omp_previous_critical_region);
3881  return true;
3882  }
3883  } else if (CurrentRegion == OMPD_barrier) {
3884  // OpenMP [2.16, Nesting of Regions]
3885  // A barrier region may not be closely nested inside a worksharing,
3886  // explicit task, critical, ordered, atomic, or master region.
3887  NestingProhibited = isOpenMPWorksharingDirective(ParentRegion) ||
3888  isOpenMPTaskingDirective(ParentRegion) ||
3889  ParentRegion == OMPD_master ||
3890  ParentRegion == OMPD_critical ||
3891  ParentRegion == OMPD_ordered;
3892  } else if (isOpenMPWorksharingDirective(CurrentRegion) &&
3893  !isOpenMPParallelDirective(CurrentRegion) &&
3894  !isOpenMPTeamsDirective(CurrentRegion)) {
3895  // OpenMP [2.16, Nesting of Regions]
3896  // A worksharing region may not be closely nested inside a worksharing,
3897  // explicit task, critical, ordered, atomic, or master region.
3898  NestingProhibited = isOpenMPWorksharingDirective(ParentRegion) ||
3899  isOpenMPTaskingDirective(ParentRegion) ||
3900  ParentRegion == OMPD_master ||
3901  ParentRegion == OMPD_critical ||
3902  ParentRegion == OMPD_ordered;
3903  Recommend = ShouldBeInParallelRegion;
3904  } else if (CurrentRegion == OMPD_ordered) {
3905  // OpenMP [2.16, Nesting of Regions]
3906  // An ordered region may not be closely nested inside a critical,
3907  // atomic, or explicit task region.
3908  // An ordered region must be closely nested inside a loop region (or
3909  // parallel loop region) with an ordered clause.
3910  // OpenMP [2.8.1,simd Construct, Restrictions]
3911  // An ordered construct with the simd clause is the only OpenMP construct
3912  // that can appear in the simd region.
3913  NestingProhibited = ParentRegion == OMPD_critical ||
3914  isOpenMPTaskingDirective(ParentRegion) ||
3915  !(isOpenMPSimdDirective(ParentRegion) ||
3916  Stack->isParentOrderedRegion());
3917  Recommend = ShouldBeInOrderedRegion;
3918  } else if (isOpenMPNestingTeamsDirective(CurrentRegion)) {
3919  // OpenMP [2.16, Nesting of Regions]
3920  // If specified, a teams construct must be contained within a target
3921  // construct.
3922  NestingProhibited =
3923  (SemaRef.LangOpts.OpenMP <= 45 && ParentRegion != OMPD_target) ||
3924  (SemaRef.LangOpts.OpenMP >= 50 && ParentRegion != OMPD_unknown &&
3925  ParentRegion != OMPD_target);
3926  OrphanSeen = ParentRegion == OMPD_unknown;
3927  Recommend = ShouldBeInTargetRegion;
3928  }
3929  if (!NestingProhibited &&
3930  !isOpenMPTargetExecutionDirective(CurrentRegion) &&
3931  !isOpenMPTargetDataManagementDirective(CurrentRegion) &&
3932  (ParentRegion == OMPD_teams || ParentRegion == OMPD_target_teams)) {
3933  // OpenMP [2.16, Nesting of Regions]
3934  // distribute, parallel, parallel sections, parallel workshare, and the
3935  // parallel loop and parallel loop SIMD constructs are the only OpenMP
3936  // constructs that can be closely nested in the teams region.
3937  NestingProhibited = !isOpenMPParallelDirective(CurrentRegion) &&
3938  !isOpenMPDistributeDirective(CurrentRegion);
3939  Recommend = ShouldBeInParallelRegion;
3940  }
3941  if (!NestingProhibited &&
3942  isOpenMPNestingDistributeDirective(CurrentRegion)) {
3943  // OpenMP 4.5 [2.17 Nesting of Regions]
3944  // The region associated with the distribute construct must be strictly
3945  // nested inside a teams region
3946  NestingProhibited =
3947  (ParentRegion != OMPD_teams && ParentRegion != OMPD_target_teams);
3948  Recommend = ShouldBeInTeamsRegion;
3949  }
3950  if (!NestingProhibited &&
3951  (isOpenMPTargetExecutionDirective(CurrentRegion) ||
3952  isOpenMPTargetDataManagementDirective(CurrentRegion))) {
3953  // OpenMP 4.5 [2.17 Nesting of Regions]
3954  // If a target, target update, target data, target enter data, or
3955  // target exit data construct is encountered during execution of a
3956  // target region, the behavior is unspecified.
3957  NestingProhibited = Stack->hasDirective(
3958  [&OffendingRegion](OpenMPDirectiveKind K, const DeclarationNameInfo &,
3959  SourceLocation) {
3961  OffendingRegion = K;
3962  return true;
3963  }
3964  return false;
3965  },
3966  false /* don't skip top directive */);
3967  CloseNesting = false;
3968  }
3969  if (NestingProhibited) {
3970  if (OrphanSeen) {
3971  SemaRef.Diag(StartLoc, diag::err_omp_orphaned_device_directive)
3972  << getOpenMPDirectiveName(CurrentRegion) << Recommend;
3973  } else {
3974  SemaRef.Diag(StartLoc, diag::err_omp_prohibited_region)
3975  << CloseNesting << getOpenMPDirectiveName(OffendingRegion)
3976  << Recommend << getOpenMPDirectiveName(CurrentRegion);
3977  }
3978  return true;
3979  }
3980  }
3981  return false;
3982 }
3983 
3985  ArrayRef<OMPClause *> Clauses,
3986  ArrayRef<OpenMPDirectiveKind> AllowedNameModifiers) {
3987  bool ErrorFound = false;
3988  unsigned NamedModifiersNumber = 0;
3990  OMPD_unknown + 1);
3991  SmallVector<SourceLocation, 4> NameModifierLoc;
3992  for (const OMPClause *C : Clauses) {
3993  if (const auto *IC = dyn_cast_or_null<OMPIfClause>(C)) {
3994  // At most one if clause without a directive-name-modifier can appear on
3995  // the directive.
3996  OpenMPDirectiveKind CurNM = IC->getNameModifier();
3997  if (FoundNameModifiers[CurNM]) {
3998  S.Diag(C->getBeginLoc(), diag::err_omp_more_one_clause)
3999  << getOpenMPDirectiveName(Kind) << getOpenMPClauseName(OMPC_if)
4000  << (CurNM != OMPD_unknown) << getOpenMPDirectiveName(CurNM);
4001  ErrorFound = true;
4002  } else if (CurNM != OMPD_unknown) {
4003  NameModifierLoc.push_back(IC->getNameModifierLoc());
4004  ++NamedModifiersNumber;
4005  }
4006  FoundNameModifiers[CurNM] = IC;
4007  if (CurNM == OMPD_unknown)
4008  continue;
4009  // Check if the specified name modifier is allowed for the current
4010  // directive.
4011  // At most one if clause with the particular directive-name-modifier can
4012  // appear on the directive.
4013  bool MatchFound = false;
4014  for (auto NM : AllowedNameModifiers) {
4015  if (CurNM == NM) {
4016  MatchFound = true;
4017  break;
4018  }
4019  }
4020  if (!MatchFound) {
4021  S.Diag(IC->getNameModifierLoc(),
4022  diag::err_omp_wrong_if_directive_name_modifier)
4024  ErrorFound = true;
4025  }
4026  }
4027  }
4028  // If any if clause on the directive includes a directive-name-modifier then
4029  // all if clauses on the directive must include a directive-name-modifier.
4030  if (FoundNameModifiers[OMPD_unknown] && NamedModifiersNumber > 0) {
4031  if (NamedModifiersNumber == AllowedNameModifiers.size()) {
4032  S.Diag(FoundNameModifiers[OMPD_unknown]->getBeginLoc(),
4033  diag::err_omp_no_more_if_clause);
4034  } else {
4035  std::string Values;
4036  std::string Sep(", ");
4037  unsigned AllowedCnt = 0;
4038  unsigned TotalAllowedNum =
4039  AllowedNameModifiers.size() - NamedModifiersNumber;
4040  for (unsigned Cnt = 0, End = AllowedNameModifiers.size(); Cnt < End;
4041  ++Cnt) {
4042  OpenMPDirectiveKind NM = AllowedNameModifiers[Cnt];
4043  if (!FoundNameModifiers[NM]) {
4044  Values += "'";
4045  Values += getOpenMPDirectiveName(NM);
4046  Values += "'";
4047  if (AllowedCnt + 2 == TotalAllowedNum)
4048  Values += " or ";
4049  else if (AllowedCnt + 1 != TotalAllowedNum)
4050  Values += Sep;
4051  ++AllowedCnt;
4052  }
4053  }
4054  S.Diag(FoundNameModifiers[OMPD_unknown]->getCondition()->getBeginLoc(),
4055  diag::err_omp_unnamed_if_clause)
4056  << (TotalAllowedNum > 1) << Values;
4057  }
4058  for (SourceLocation Loc : NameModifierLoc) {
4059  S.Diag(Loc, diag::note_omp_previous_named_if_clause);
4060  }
4061  ErrorFound = true;
4062  }
4063  return ErrorFound;
4064 }
4065 
4066 static std::pair<ValueDecl *, bool>
4068  SourceRange &ERange, bool AllowArraySection = false) {
4069  if (RefExpr->isTypeDependent() || RefExpr->isValueDependent() ||
4071  return std::make_pair(nullptr, true);
4072 
4073  // OpenMP [3.1, C/C++]
4074  // A list item is a variable name.
4075  // OpenMP [2.9.3.3, Restrictions, p.1]
4076  // A variable that is part of another variable (as an array or
4077  // structure element) cannot appear in a private clause.
4078  RefExpr = RefExpr->IgnoreParens();
4079  enum {
4080  NoArrayExpr = -1,
4081  ArraySubscript = 0,
4082  OMPArraySection = 1
4083  } IsArrayExpr = NoArrayExpr;
4084  if (AllowArraySection) {
4085  if (auto *ASE = dyn_cast_or_null<ArraySubscriptExpr>(RefExpr)) {
4086  Expr *Base = ASE->getBase()->IgnoreParenImpCasts();
4087  while (auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
4088  Base = TempASE->getBase()->IgnoreParenImpCasts();
4089  RefExpr = Base;
4090  IsArrayExpr = ArraySubscript;
4091  } else if (auto *OASE = dyn_cast_or_null<OMPArraySectionExpr>(RefExpr)) {
4092  Expr *Base = OASE->getBase()->IgnoreParenImpCasts();
4093  while (auto *TempOASE = dyn_cast<OMPArraySectionExpr>(Base))
4094  Base = TempOASE->getBase()->IgnoreParenImpCasts();
4095  while (auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
4096  Base = TempASE->getBase()->IgnoreParenImpCasts();
4097  RefExpr = Base;
4098  IsArrayExpr = OMPArraySection;
4099  }
4100  }
4101  ELoc = RefExpr->getExprLoc();
4102  ERange = RefExpr->getSourceRange();
4103  RefExpr = RefExpr->IgnoreParenImpCasts();
4104  auto *DE = dyn_cast_or_null<DeclRefExpr>(RefExpr);
4105  auto *ME = dyn_cast_or_null<MemberExpr>(RefExpr);
4106  if ((!DE || !isa<VarDecl>(DE->getDecl())) &&
4107  (S.getCurrentThisType().isNull() || !ME ||
4108  !isa<CXXThisExpr>(ME->getBase()->IgnoreParenImpCasts()) ||
4109  !isa<FieldDecl>(ME->getMemberDecl()))) {
4110  if (IsArrayExpr != NoArrayExpr) {
4111  S.Diag(ELoc, diag::err_omp_expected_base_var_name) << IsArrayExpr
4112  << ERange;
4113  } else {
4114  S.Diag(ELoc,
4115  AllowArraySection
4116  ? diag::err_omp_expected_var_name_member_expr_or_array_item
4117  : diag::err_omp_expected_var_name_member_expr)
4118  << (S.getCurrentThisType().isNull() ? 0 : 1) << ERange;
4119  }
4120  return std::make_pair(nullptr, false);
4121  }
4122  return std::make_pair(
4123  getCanonicalDecl(DE ? DE->getDecl() : ME->getMemberDecl()), false);
4124 }
4125 
4126 static void checkAllocateClauses(Sema &S, DSAStackTy *Stack,
4127  ArrayRef<OMPClause *> Clauses) {
4128  assert(!S.CurContext->isDependentContext() &&
4129  "Expected non-dependent context.");
4130  auto AllocateRange =
4131  llvm::make_filter_range(Clauses, OMPAllocateClause::classof);
4132  llvm::DenseMap<CanonicalDeclPtr<Decl>, CanonicalDeclPtr<VarDecl>>
4133  DeclToCopy;
4134  auto PrivateRange = llvm::make_filter_range(Clauses, [](const OMPClause *C) {
4135  return isOpenMPPrivate(C->getClauseKind());
4136  });
4137  for (OMPClause *Cl : PrivateRange) {
4138  MutableArrayRef<Expr *>::iterator I, It, Et;
4139  if (Cl->getClauseKind() == OMPC_private) {
4140  auto *PC = cast<OMPPrivateClause>(Cl);
4141  I = PC->private_copies().begin();
4142  It = PC->varlist_begin();
4143  Et = PC->varlist_end();
4144  } else if (Cl->getClauseKind() == OMPC_firstprivate) {
4145  auto *PC = cast<OMPFirstprivateClause>(Cl);
4146  I = PC->private_copies().begin();
4147  It = PC->varlist_begin();
4148  Et = PC->varlist_end();
4149  } else if (Cl->getClauseKind() == OMPC_lastprivate) {
4150  auto *PC = cast<OMPLastprivateClause>(Cl);
4151  I = PC->private_copies().begin();
4152  It = PC->varlist_begin();
4153  Et = PC->varlist_end();
4154  } else if (Cl->getClauseKind() == OMPC_linear) {
4155  auto *PC = cast<OMPLinearClause>(Cl);
4156  I = PC->privates().begin();
4157  It = PC->varlist_begin();
4158  Et = PC->varlist_end();
4159  } else if (Cl->getClauseKind() == OMPC_reduction) {
4160  auto *PC = cast<OMPReductionClause>(Cl);
4161  I = PC->privates().begin();
4162  It = PC->varlist_begin();
4163  Et = PC->varlist_end();
4164  } else if (Cl->getClauseKind() == OMPC_task_reduction) {
4165  auto *PC = cast<OMPTaskReductionClause>(Cl);
4166  I = PC->privates().begin();
4167  It = PC->varlist_begin();
4168  Et = PC->varlist_end();
4169  } else if (Cl->getClauseKind() == OMPC_in_reduction) {
4170  auto *PC = cast<OMPInReductionClause>(Cl);
4171  I = PC->privates().begin();
4172  It = PC->varlist_begin();
4173  Et = PC->varlist_end();
4174  } else {
4175  llvm_unreachable("Expected private clause.");
4176  }
4177  for (Expr *E : llvm::make_range(It, Et)) {
4178  if (!*I) {
4179  ++I;
4180  continue;
4181  }
4182  SourceLocation ELoc;
4183  SourceRange ERange;
4184  Expr *SimpleRefExpr = E;
4185  auto Res = getPrivateItem(S, SimpleRefExpr, ELoc, ERange,
4186  /*AllowArraySection=*/true);
4187  DeclToCopy.try_emplace(Res.first,
4188  cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()));
4189  ++I;
4190  }
4191  }
4192  for (OMPClause *C : AllocateRange) {
4193  auto *AC = cast<OMPAllocateClause>(C);
4194  OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind =
4195  getAllocatorKind(S, Stack, AC->getAllocator());
4196  // OpenMP, 2.11.4 allocate Clause, Restrictions.
4197  // For task, taskloop or target directives, allocation requests to memory
4198  // allocators with the trait access set to thread result in unspecified
4199  // behavior.
4200  if (AllocatorKind == OMPAllocateDeclAttr::OMPThreadMemAlloc &&
4201  (isOpenMPTaskingDirective(Stack->getCurrentDirective()) ||
4202  isOpenMPTargetExecutionDirective(Stack->getCurrentDirective()))) {
4203  S.Diag(AC->getAllocator()->getExprLoc(),
4204  diag::warn_omp_allocate_thread_on_task_target_directive)
4205  << getOpenMPDirectiveName(Stack->getCurrentDirective());
4206  }
4207  for (Expr *E : AC->varlists()) {
4208  SourceLocation ELoc;
4209  SourceRange ERange;
4210  Expr *SimpleRefExpr = E;
4211  auto Res = getPrivateItem(S, SimpleRefExpr, ELoc, ERange);
4212  ValueDecl *VD = Res.first;
4213  DSAStackTy::DSAVarData Data = Stack->getTopDSA(VD, /*FromParent=*/false);
4214  if (!isOpenMPPrivate(Data.CKind)) {
4215  S.Diag(E->getExprLoc(),
4216  diag::err_omp_expected_private_copy_for_allocate);
4217  continue;
4218  }
4219  VarDecl *PrivateVD = DeclToCopy[VD];
4220  if (checkPreviousOMPAllocateAttribute(S, Stack, E, PrivateVD,
4221  AllocatorKind, AC->getAllocator()))
4222  continue;
4223  applyOMPAllocateAttribute(S, PrivateVD, AllocatorKind, AC->getAllocator(),
4224  E->getSourceRange());
4225  }
4226  }
4227 }
4228 
4231  OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
4232  Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc) {
4233  StmtResult Res = StmtError();
4234  // First check CancelRegion which is then used in checkNestingOfRegions.
4235  if (checkCancelRegion(*this, Kind, CancelRegion, StartLoc) ||
4236  checkNestingOfRegions(*this, DSAStack, Kind, DirName, CancelRegion,
4237  StartLoc))
4238  return StmtError();
4239 
4240  llvm::SmallVector<OMPClause *, 8> ClausesWithImplicit;
4241  VarsWithInheritedDSAType VarsWithInheritedDSA;
4242  bool ErrorFound = false;
4243  ClausesWithImplicit.append(Clauses.begin(), Clauses.end());
4244  if (AStmt && !CurContext->isDependentContext()) {
4245  assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
4246 
4247  // Check default data sharing attributes for referenced variables.
4248  DSAAttrChecker DSAChecker(DSAStack, *this, cast<CapturedStmt>(AStmt));
4249  int ThisCaptureLevel = getOpenMPCaptureLevels(Kind);
4250  Stmt *S = AStmt;
4251  while (--ThisCaptureLevel >= 0)
4252  S = cast<CapturedStmt>(S)->getCapturedStmt();
4253  DSAChecker.Visit(S);
4255  !isOpenMPTaskingDirective(Kind)) {
4256  // Visit subcaptures to generate implicit clauses for captured vars.
4257  auto *CS = cast<CapturedStmt>(AStmt);
4258  SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
4259  getOpenMPCaptureRegions(CaptureRegions, Kind);
4260  // Ignore outer tasking regions for target directives.
4261  if (CaptureRegions.size() > 1 && CaptureRegions.front() == OMPD_task)
4262  CS = cast<CapturedStmt>(CS->getCapturedStmt());
4263  DSAChecker.visitSubCaptures(CS);
4264  }
4265  if (DSAChecker.isErrorFound())
4266  return StmtError();
4267  // Generate list of implicitly defined firstprivate variables.
4268  VarsWithInheritedDSA = DSAChecker.getVarsWithInheritedDSA();
4269 
4270  SmallVector<Expr *, 4> ImplicitFirstprivates(
4271  DSAChecker.getImplicitFirstprivate().begin(),
4272  DSAChecker.getImplicitFirstprivate().end());
4273  SmallVector<Expr *, 4> ImplicitMaps(DSAChecker.getImplicitMap().begin(),
4274  DSAChecker.getImplicitMap().end());
4275  // Mark taskgroup task_reduction descriptors as implicitly firstprivate.
4276  for (OMPClause *C : Clauses) {
4277  if (auto *IRC = dyn_cast<OMPInReductionClause>(C)) {
4278  for (Expr *E : IRC->taskgroup_descriptors())
4279  if (E)
4280  ImplicitFirstprivates.emplace_back(E);
4281  }
4282  }
4283  if (!ImplicitFirstprivates.empty()) {
4284  if (OMPClause *Implicit = ActOnOpenMPFirstprivateClause(
4285  ImplicitFirstprivates, SourceLocation(), SourceLocation(),
4286  SourceLocation())) {
4287  ClausesWithImplicit.push_back(Implicit);
4288  ErrorFound = cast<OMPFirstprivateClause>(Implicit)->varlist_size() !=
4289  ImplicitFirstprivates.size();
4290  } else {
4291  ErrorFound = true;
4292  }
4293  }
4294  if (!ImplicitMaps.empty()) {
4295  CXXScopeSpec MapperIdScopeSpec;
4296  DeclarationNameInfo MapperId;
4297  if (OMPClause *Implicit = ActOnOpenMPMapClause(
4298  llvm::None, llvm::None, MapperIdScopeSpec, MapperId,
4299  OMPC_MAP_tofrom, /*IsMapTypeImplicit=*/true, SourceLocation(),
4300  SourceLocation(), ImplicitMaps, OMPVarListLocTy())) {
4301  ClausesWithImplicit.emplace_back(Implicit);
4302  ErrorFound |=
4303  cast<OMPMapClause>(Implicit)->varlist_size() != ImplicitMaps.size();
4304  } else {
4305  ErrorFound = true;
4306  }
4307  }
4308  }
4309 
4310  llvm::SmallVector<OpenMPDirectiveKind, 4> AllowedNameModifiers;
4311  switch (Kind) {
4312  case OMPD_parallel:
4313  Res = ActOnOpenMPParallelDirective(ClausesWithImplicit, AStmt, StartLoc,
4314  EndLoc);
4315  AllowedNameModifiers.push_back(OMPD_parallel);
4316  break;
4317  case OMPD_simd:
4318  Res = ActOnOpenMPSimdDirective(ClausesWithImplicit, AStmt, StartLoc, EndLoc,
4319  VarsWithInheritedDSA);
4320  break;
4321  case OMPD_for:
4322  Res = ActOnOpenMPForDirective(ClausesWithImplicit, AStmt, StartLoc, EndLoc,
4323  VarsWithInheritedDSA);
4324  break;
4325  case OMPD_for_simd:
4326  Res = ActOnOpenMPForSimdDirective(ClausesWithImplicit, AStmt, StartLoc,
4327  EndLoc, VarsWithInheritedDSA);
4328  break;
4329  case OMPD_sections:
4330  Res = ActOnOpenMPSectionsDirective(ClausesWithImplicit, AStmt, StartLoc,
4331  EndLoc);
4332  break;
4333  case OMPD_section:
4334  assert(ClausesWithImplicit.empty() &&
4335  "No clauses are allowed for 'omp section' directive");
4336  Res = ActOnOpenMPSectionDirective(AStmt, StartLoc, EndLoc);
4337  break;
4338  case OMPD_single:
4339  Res = ActOnOpenMPSingleDirective(ClausesWithImplicit, AStmt, StartLoc,
4340  EndLoc);
4341  break;
4342  case OMPD_master:
4343  assert(ClausesWithImplicit.empty() &&
4344  "No clauses are allowed for 'omp master' directive");
4345  Res = ActOnOpenMPMasterDirective(AStmt, StartLoc, EndLoc);
4346  break;
4347  case OMPD_critical:
4348  Res = ActOnOpenMPCriticalDirective(DirName, ClausesWithImplicit, AStmt,
4349  StartLoc, EndLoc);
4350  break;
4351  case OMPD_parallel_for:
4352  Res = ActOnOpenMPParallelForDirective(ClausesWithImplicit, AStmt, StartLoc,
4353  EndLoc, VarsWithInheritedDSA);
4354  AllowedNameModifiers.push_back(OMPD_parallel);
4355  break;
4356  case OMPD_parallel_for_simd:
4357  Res = ActOnOpenMPParallelForSimdDirective(
4358  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
4359  AllowedNameModifiers.push_back(OMPD_parallel);
4360  break;
4361  case OMPD_parallel_sections:
4362  Res = ActOnOpenMPParallelSectionsDirective(ClausesWithImplicit, AStmt,
4363  StartLoc, EndLoc);
4364  AllowedNameModifiers.push_back(OMPD_parallel);
4365  break;
4366  case OMPD_task:
4367  Res =
4368  ActOnOpenMPTaskDirective(ClausesWithImplicit, AStmt, StartLoc, EndLoc);
4369  AllowedNameModifiers.push_back(OMPD_task);
4370  break;
4371  case OMPD_taskyield:
4372  assert(ClausesWithImplicit.empty() &&
4373  "No clauses are allowed for 'omp taskyield' directive");
4374  assert(AStmt == nullptr &&
4375  "No associated statement allowed for 'omp taskyield' directive");
4376  Res = ActOnOpenMPTaskyieldDirective(StartLoc, EndLoc);
4377  break;
4378  case OMPD_barrier:
4379  assert(ClausesWithImplicit.empty() &&
4380  "No clauses are allowed for 'omp barrier' directive");
4381  assert(AStmt == nullptr &&
4382  "No associated statement allowed for 'omp barrier' directive");
4383  Res = ActOnOpenMPBarrierDirective(StartLoc, EndLoc);
4384  break;
4385  case OMPD_taskwait:
4386  assert(ClausesWithImplicit.empty() &&
4387  "No clauses are allowed for 'omp taskwait' directive");
4388  assert(AStmt == nullptr &&
4389  "No associated statement allowed for 'omp taskwait' directive");
4390  Res = ActOnOpenMPTaskwaitDirective(StartLoc, EndLoc);
4391  break;
4392  case OMPD_taskgroup:
4393  Res = ActOnOpenMPTaskgroupDirective(ClausesWithImplicit, AStmt, StartLoc,
4394  EndLoc);
4395  break;
4396  case OMPD_flush:
4397  assert(AStmt == nullptr &&
4398  "No associated statement allowed for 'omp flush' directive");
4399  Res = ActOnOpenMPFlushDirective(ClausesWithImplicit, StartLoc, EndLoc);
4400  break;
4401  case OMPD_ordered:
4402  Res = ActOnOpenMPOrderedDirective(ClausesWithImplicit, AStmt, StartLoc,
4403  EndLoc);
4404  break;
4405  case OMPD_atomic:
4406  Res = ActOnOpenMPAtomicDirective(ClausesWithImplicit, AStmt, StartLoc,
4407  EndLoc);
4408  break;
4409  case OMPD_teams:
4410  Res =
4411  ActOnOpenMPTeamsDirective(ClausesWithImplicit, AStmt, StartLoc, EndLoc);
4412  break;
4413  case OMPD_target:
4414  Res = ActOnOpenMPTargetDirective(ClausesWithImplicit, AStmt, StartLoc,
4415  EndLoc);
4416  AllowedNameModifiers.push_back(OMPD_target);
4417  break;
4418  case OMPD_target_parallel:
4419  Res = ActOnOpenMPTargetParallelDirective(ClausesWithImplicit, AStmt,
4420  StartLoc, EndLoc);
4421  AllowedNameModifiers.push_back(OMPD_target);
4422  AllowedNameModifiers.push_back(OMPD_parallel);
4423  break;
4424  case OMPD_target_parallel_for:
4425  Res = ActOnOpenMPTargetParallelForDirective(
4426  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
4427  AllowedNameModifiers.push_back(OMPD_target);
4428  AllowedNameModifiers.push_back(OMPD_parallel);
4429  break;
4430  case OMPD_cancellation_point:
4431  assert(ClausesWithImplicit.empty() &&
4432  "No clauses are allowed for 'omp cancellation point' directive");
4433  assert(AStmt == nullptr && "No associated statement allowed for 'omp "
4434  "cancellation point' directive");
4435  Res = ActOnOpenMPCancellationPointDirective(StartLoc, EndLoc, CancelRegion);
4436  break;
4437  case OMPD_cancel:
4438  assert(AStmt == nullptr &&
4439  "No associated statement allowed for 'omp cancel' directive");
4440  Res = ActOnOpenMPCancelDirective(ClausesWithImplicit, StartLoc, EndLoc,
4441  CancelRegion);
4442  AllowedNameModifiers.push_back(OMPD_cancel);
4443  break;
4444  case OMPD_target_data:
4445  Res = ActOnOpenMPTargetDataDirective(ClausesWithImplicit, AStmt, StartLoc,
4446  EndLoc);
4447  AllowedNameModifiers.push_back(OMPD_target_data);
4448  break;
4449  case OMPD_target_enter_data:
4450  Res = ActOnOpenMPTargetEnterDataDirective(ClausesWithImplicit, StartLoc,
4451  EndLoc, AStmt);
4452  AllowedNameModifiers.push_back(OMPD_target_enter_data);
4453  break;
4454  case OMPD_target_exit_data:
4455  Res = ActOnOpenMPTargetExitDataDirective(ClausesWithImplicit, StartLoc,
4456  EndLoc, AStmt);
4457  AllowedNameModifiers.push_back(OMPD_target_exit_data);
4458  break;
4459  case OMPD_taskloop:
4460  Res = ActOnOpenMPTaskLoopDirective(ClausesWithImplicit, AStmt, StartLoc,
4461  EndLoc, VarsWithInheritedDSA);
4462  AllowedNameModifiers.push_back(OMPD_taskloop);
4463  break;
4464  case OMPD_taskloop_simd:
4465  Res = ActOnOpenMPTaskLoopSimdDirective(ClausesWithImplicit, AStmt, StartLoc,
4466  EndLoc, VarsWithInheritedDSA);
4467  AllowedNameModifiers.push_back(OMPD_taskloop);
4468  break;
4469  case OMPD_master_taskloop:
4470  Res = ActOnOpenMPMasterTaskLoopDirective(
4471  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
4472  AllowedNameModifiers.push_back(OMPD_taskloop);
4473  break;
4474  case OMPD_parallel_master_taskloop:
4475  Res = ActOnOpenMPParallelMasterTaskLoopDirective(
4476  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
4477  AllowedNameModifiers.push_back(OMPD_taskloop);
4478  AllowedNameModifiers.push_back(OMPD_parallel);
4479  break;
4480  case OMPD_distribute:
4481  Res = ActOnOpenMPDistributeDirective(ClausesWithImplicit, AStmt, StartLoc,
4482  EndLoc, VarsWithInheritedDSA);
4483  break;
4484  case OMPD_target_update:
4485  Res = ActOnOpenMPTargetUpdateDirective(ClausesWithImplicit, StartLoc,
4486  EndLoc, AStmt);
4487  AllowedNameModifiers.push_back(OMPD_target_update);
4488  break;
4489  case OMPD_distribute_parallel_for:
4490  Res = ActOnOpenMPDistributeParallelForDirective(
4491  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
4492  AllowedNameModifiers.push_back(OMPD_parallel);
4493  break;
4494  case OMPD_distribute_parallel_for_simd:
4495  Res = ActOnOpenMPDistributeParallelForSimdDirective(
4496  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
4497  AllowedNameModifiers.push_back(OMPD_parallel);
4498  break;
4499  case OMPD_distribute_simd:
4500  Res = ActOnOpenMPDistributeSimdDirective(
4501  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
4502  break;
4503  case OMPD_target_parallel_for_simd:
4504  Res = ActOnOpenMPTargetParallelForSimdDirective(
4505  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
4506  AllowedNameModifiers.push_back(OMPD_target);
4507  AllowedNameModifiers.push_back(OMPD_parallel);
4508  break;
4509  case OMPD_target_simd:
4510  Res = ActOnOpenMPTargetSimdDirective(ClausesWithImplicit, AStmt, StartLoc,
4511  EndLoc, VarsWithInheritedDSA);
4512  AllowedNameModifiers.push_back(OMPD_target);
4513  break;
4514  case OMPD_teams_distribute:
4515  Res = ActOnOpenMPTeamsDistributeDirective(
4516  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
4517  break;
4518  case OMPD_teams_distribute_simd:
4519  Res = ActOnOpenMPTeamsDistributeSimdDirective(
4520  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
4521  break;
4522  case OMPD_teams_distribute_parallel_for_simd:
4523  Res = ActOnOpenMPTeamsDistributeParallelForSimdDirective(
4524  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
4525  AllowedNameModifiers.push_back(OMPD_parallel);
4526  break;
4527  case OMPD_teams_distribute_parallel_for:
4528  Res = ActOnOpenMPTeamsDistributeParallelForDirective(
4529  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
4530  AllowedNameModifiers.push_back(OMPD_parallel);
4531  break;
4532  case OMPD_target_teams:
4533  Res = ActOnOpenMPTargetTeamsDirective(ClausesWithImplicit, AStmt, StartLoc,
4534  EndLoc);
4535  AllowedNameModifiers.push_back(OMPD_target);
4536  break;
4537  case OMPD_target_teams_distribute:
4538  Res = ActOnOpenMPTargetTeamsDistributeDirective(
4539  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
4540  AllowedNameModifiers.push_back(OMPD_target);
4541  break;
4542  case OMPD_target_teams_distribute_parallel_for:
4543  Res = ActOnOpenMPTargetTeamsDistributeParallelForDirective(
4544  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
4545  AllowedNameModifiers.push_back(OMPD_target);
4546  AllowedNameModifiers.push_back(OMPD_parallel);
4547  break;
4548  case OMPD_target_teams_distribute_parallel_for_simd:
4549  Res = ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective(
4550  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
4551  AllowedNameModifiers.push_back(OMPD_target);
4552  AllowedNameModifiers.push_back(OMPD_parallel);
4553  break;
4554  case OMPD_target_teams_distribute_simd:
4555  Res = ActOnOpenMPTargetTeamsDistributeSimdDirective(
4556  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
4557  AllowedNameModifiers.push_back(OMPD_target);
4558  break;
4559  case OMPD_declare_target:
4560  case OMPD_end_declare_target:
4561  case OMPD_threadprivate:
4562  case OMPD_allocate:
4563  case OMPD_declare_reduction:
4564  case OMPD_declare_mapper:
4565  case OMPD_declare_simd:
4566  case OMPD_requires:
4567  case OMPD_declare_variant:
4568  llvm_unreachable("OpenMP Directive is not allowed");
4569  case OMPD_unknown:
4570  llvm_unreachable("Unknown OpenMP directive");
4571  }
4572 
4573  ErrorFound = Res.isInvalid() || ErrorFound;
4574 
4575  // Check variables in the clauses if default(none) was specified.
4576  if (DSAStack->getDefaultDSA() == DSA_none) {
4577  DSAAttrChecker DSAChecker(DSAStack, *this, nullptr);
4578  for (OMPClause *C : Clauses) {
4579  switch (C->getClauseKind()) {
4580  case OMPC_num_threads:
4581  case OMPC_dist_schedule:
4582  // Do not analyse if no parent teams directive.
4583  if (isOpenMPTeamsDirective(DSAStack->getCurrentDirective()))
4584  break;
4585  continue;
4586  case OMPC_if:
4587  if (isOpenMPTeamsDirective(DSAStack->getCurrentDirective()) &&
4588  cast<OMPIfClause>(C)->getNameModifier() != OMPD_target)
4589  break;
4590  continue;
4591  case OMPC_schedule:
4592  break;
4593  case OMPC_grainsize:
4594  case OMPC_num_tasks:
4595  case OMPC_final:
4596  case OMPC_priority:
4597  // Do not analyze if no parent parallel directive.
4598  if (isOpenMPParallelDirective(DSAStack->getCurrentDirective()))
4599  break;
4600  continue;
4601  case OMPC_ordered:
4602  case OMPC_device:
4603  case OMPC_num_teams:
4604  case OMPC_thread_limit:
4605  case OMPC_hint:
4606  case OMPC_collapse:
4607  case OMPC_safelen:
4608  case OMPC_simdlen:
4609  case OMPC_default:
4610  case OMPC_proc_bind:
4611  case OMPC_private:
4612  case OMPC_firstprivate:
4613  case OMPC_lastprivate:
4614  case OMPC_shared:
4615  case OMPC_reduction:
4616  case OMPC_task_reduction:
4617  case OMPC_in_reduction:
4618  case OMPC_linear:
4619  case OMPC_aligned:
4620  case OMPC_copyin:
4621  case OMPC_copyprivate:
4622  case OMPC_nowait:
4623  case OMPC_untied:
4624  case OMPC_mergeable:
4625  case OMPC_allocate:
4626  case OMPC_read:
4627  case OMPC_write:
4628  case OMPC_update:
4629  case OMPC_capture:
4630  case OMPC_seq_cst:
4631  case OMPC_depend:
4632  case OMPC_threads:
4633  case OMPC_simd:
4634  case OMPC_map:
4635  case OMPC_nogroup:
4636  case OMPC_defaultmap:
4637  case OMPC_to:
4638  case OMPC_from:
4639  case OMPC_use_device_ptr:
4640  case OMPC_is_device_ptr:
4641  continue;
4642  case OMPC_allocator:
4643  case OMPC_flush:
4644  case OMPC_threadprivate:
4645  case OMPC_uniform:
4646  case OMPC_unknown:
4647  case OMPC_unified_address:
4648  case OMPC_unified_shared_memory:
4649  case OMPC_reverse_offload:
4650  case OMPC_dynamic_allocators:
4651  case OMPC_atomic_default_mem_order:
4652  case OMPC_device_type:
4653  case OMPC_match:
4654  llvm_unreachable("Unexpected clause");
4655  }
4656  for (Stmt *CC : C->children()) {
4657  if (CC)
4658  DSAChecker.Visit(CC);
4659  }
4660  }
4661  for (auto &P : DSAChecker.getVarsWithInheritedDSA())
4662  VarsWithInheritedDSA[P.getFirst()] = P.getSecond();
4663  }
4664  for (const auto &P : VarsWithInheritedDSA) {
4665  if (P.getFirst()->isImplicit() || isa<OMPCapturedExprDecl>(P.getFirst()))
4666  continue;
4667  ErrorFound = true;
4668  Diag(P.second->getExprLoc(), diag::err_omp_no_dsa_for_variable)
4669  << P.first << P.second->getSourceRange();
4670  Diag(DSAStack->getDefaultDSALocation(), diag::note_omp_default_dsa_none);
4671  }
4672 
4673  if (!AllowedNameModifiers.empty())
4674  ErrorFound = checkIfClauses(*this, Kind, Clauses, AllowedNameModifiers) ||
4675  ErrorFound;
4676 
4677  if (ErrorFound)
4678  return StmtError();
4679 
4682  ->getStructuredBlock()
4683  ->setIsOMPStructuredBlock(true);
4684  }
4685 
4686  if (!CurContext->isDependentContext() &&
4688  !(DSAStack->hasRequiresDeclWithClause<OMPUnifiedSharedMemoryClause>() ||
4689  DSAStack->hasRequiresDeclWithClause<OMPUnifiedAddressClause>() ||
4690  DSAStack->hasRequiresDeclWithClause<OMPReverseOffloadClause>() ||
4691  DSAStack->hasRequiresDeclWithClause<OMPDynamicAllocatorsClause>())) {
4692  // Register target to DSA Stack.
4693  DSAStack->addTargetDirLocation(StartLoc);
4694  }
4695 
4696  return Res;
4697 }
4698 
4700  DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS, Expr *Simdlen,
4701  ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds,
4702  ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears,
4703  ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR) {
4704  assert(Aligneds.size() == Alignments.size());
4705  assert(Linears.size() == LinModifiers.size());
4706  assert(Linears.size() == Steps.size());
4707  if (!DG || DG.get().isNull())
4708  return DeclGroupPtrTy();
4709 
4710  const int SimdId = 0;
4711  if (!DG.get().isSingleDecl()) {
4712  Diag(SR.getBegin(), diag::err_omp_single_decl_in_declare_simd_variant)
4713  << SimdId;
4714  return DG;
4715  }
4716  Decl *ADecl = DG.get().getSingleDecl();
4717  if (auto *FTD = dyn_cast<FunctionTemplateDecl>(ADecl))
4718  ADecl = FTD->getTemplatedDecl();
4719 
4720  auto *FD = dyn_cast<FunctionDecl>(ADecl);
4721  if (!FD) {
4722  Diag(ADecl->getLocation(), diag::err_omp_function_expected) << SimdId;
4723  return DeclGroupPtrTy();
4724  }
4725 
4726  // OpenMP [2.8.2, declare simd construct, Description]
4727  // The parameter of the simdlen clause must be a constant positive integer
4728  // expression.
4729  ExprResult SL;
4730  if (Simdlen)
4731  SL = VerifyPositiveIntegerConstantInClause(Simdlen, OMPC_simdlen);
4732  // OpenMP [2.8.2, declare simd construct, Description]
4733  // The special this pointer can be used as if was one of the arguments to the
4734  // function in any of the linear, aligned, or uniform clauses.
4735  // The uniform clause declares one or more arguments to have an invariant
4736  // value for all concurrent invocations of the function in the execution of a
4737  // single SIMD loop.
4738  llvm::DenseMap<const Decl *, const Expr *> UniformedArgs;
4739  const Expr *UniformedLinearThis = nullptr;
4740  for (const Expr *E : Uniforms) {
4741  E = E->IgnoreParenImpCasts();
4742  if (const auto *DRE = dyn_cast<DeclRefExpr>(E))
4743  if (const auto *PVD = dyn_cast<ParmVarDecl>(DRE->getDecl()))
4744  if (FD->getNumParams() > PVD->getFunctionScopeIndex() &&
4745  FD->getParamDecl(PVD->getFunctionScopeIndex())
4746  ->getCanonicalDecl() == PVD->getCanonicalDecl()) {
4747  UniformedArgs.try_emplace(PVD->getCanonicalDecl(), E);
4748  continue;
4749  }
4750  if (isa<CXXThisExpr>(E)) {
4751  UniformedLinearThis = E;
4752  continue;
4753  }
4754  Diag(E->getExprLoc(), diag::err_omp_param_or_this_in_clause)
4755  << FD->getDeclName() << (isa<CXXMethodDecl>(ADecl) ? 1 : 0);
4756  }
4757  // OpenMP [2.8.2, declare simd construct, Description]
4758  // The aligned clause declares that the object to which each list item points
4759  // is aligned to the number of bytes expressed in the optional parameter of
4760  // the aligned clause.
4761  // The special this pointer can be used as if was one of the arguments to the
4762  // function in any of the linear, aligned, or uniform clauses.
4763  // The type of list items appearing in the aligned clause must be array,
4764  // pointer, reference to array, or reference to pointer.
4765  llvm::DenseMap<const Decl *, const Expr *> AlignedArgs;
4766  const Expr *AlignedThis = nullptr;
4767  for (const Expr *E : Aligneds) {
4768  E = E->IgnoreParenImpCasts();
4769  if (const auto *DRE = dyn_cast<DeclRefExpr>(E))
4770  if (const auto *PVD = dyn_cast<ParmVarDecl>(DRE->getDecl())) {
4771  const VarDecl *CanonPVD = PVD->getCanonicalDecl();
4772  if (FD->getNumParams() > PVD->getFunctionScopeIndex() &&
4773  FD->getParamDecl(PVD->getFunctionScopeIndex())
4774  ->getCanonicalDecl() == CanonPVD) {
4775  // OpenMP [2.8.1, simd construct, Restrictions]
4776  // A list-item cannot appear in more than one aligned clause.
4777  if (AlignedArgs.count(CanonPVD) > 0) {
4778  Diag(E->getExprLoc(), diag::err_omp_aligned_twice)
4779  << 1 << E->getSourceRange();
4780  Diag(AlignedArgs[CanonPVD]->getExprLoc(),
4781  diag::note_omp_explicit_dsa)
4782  << getOpenMPClauseName(OMPC_aligned);
4783  continue;
4784  }
4785  AlignedArgs[CanonPVD] = E;
4786  QualType QTy = PVD->getType()
4787  .getNonReferenceType()
4788  .getUnqualifiedType()
4789  .getCanonicalType();
4790  const Type *Ty = QTy.getTypePtrOrNull();
4791  if (!Ty || (!Ty->isArrayType() && !Ty->isPointerType())) {
4792  Diag(E->getExprLoc(), diag::err_omp_aligned_expected_array_or_ptr)
4793  << QTy << getLangOpts().CPlusPlus << E->getSourceRange();
4794  Diag(PVD->getLocation(), diag::note_previous_decl) << PVD;
4795  }
4796  continue;
4797  }
4798  }
4799  if (isa<CXXThisExpr>(E)) {
4800  if (AlignedThis) {
4801  Diag(E->getExprLoc(), diag::err_omp_aligned_twice)
4802  << 2 << E->getSourceRange();
4803  Diag(AlignedThis->getExprLoc(), diag::note_omp_explicit_dsa)
4804  << getOpenMPClauseName(OMPC_aligned);
4805  }
4806  AlignedThis = E;
4807  continue;
4808  }
4809  Diag(E->getExprLoc(), diag::err_omp_param_or_this_in_clause)
4810  << FD->getDeclName() << (isa<CXXMethodDecl>(ADecl) ? 1 : 0);
4811  }
4812  // The optional parameter of the aligned clause, alignment, must be a constant
4813  // positive integer expression. If no optional parameter is specified,
4814  // implementation-defined default alignments for SIMD instructions on the
4815  // target platforms are assumed.
4816  SmallVector<const Expr *, 4> NewAligns;
4817  for (Expr *E : Alignments) {
4818  ExprResult Align;
4819  if (E)
4820  Align = VerifyPositiveIntegerConstantInClause(E, OMPC_aligned);
4821  NewAligns.push_back(Align.get());
4822  }
4823  // OpenMP [2.8.2, declare simd construct, Description]
4824  // The linear clause declares one or more list items to be private to a SIMD
4825  // lane and to have a linear relationship with respect to the iteration space
4826  // of a loop.
4827  // The special this pointer can be used as if was one of the arguments to the
4828  // function in any of the linear, aligned, or uniform clauses.
4829  // When a linear-step expression is specified in a linear clause it must be
4830  // either a constant integer expression or an integer-typed parameter that is
4831  // specified in a uniform clause on the directive.
4832  llvm::DenseMap<const Decl *, const Expr *> LinearArgs;
4833  const bool IsUniformedThis = UniformedLinearThis != nullptr;
4834  auto MI = LinModifiers.begin();
4835  for (const Expr *E : Linears) {
4836  auto LinKind = static_cast<OpenMPLinearClauseKind>(*MI);
4837  ++MI;
4838  E = E->IgnoreParenImpCasts();
4839  if (const auto *DRE = dyn_cast<DeclRefExpr>(E))
4840  if (const auto *PVD = dyn_cast<ParmVarDecl>(DRE->getDecl())) {
4841  const VarDecl *CanonPVD = PVD->getCanonicalDecl();
4842  if (FD->getNumParams() > PVD->getFunctionScopeIndex() &&
4843  FD->getParamDecl(PVD->getFunctionScopeIndex())
4844  ->getCanonicalDecl() == CanonPVD) {
4845  // OpenMP [2.15.3.7, linear Clause, Restrictions]
4846  // A list-item cannot appear in more than one linear clause.
4847  if (LinearArgs.count(CanonPVD) > 0) {
4848  Diag(E->getExprLoc(), diag::err_omp_wrong_dsa)
4849  << getOpenMPClauseName(OMPC_linear)
4850  << getOpenMPClauseName(OMPC_linear) << E->getSourceRange();
4851  Diag(LinearArgs[CanonPVD]->getExprLoc(),
4852  diag::note_omp_explicit_dsa)
4853  << getOpenMPClauseName(OMPC_linear);
4854  continue;
4855  }
4856  // Each argument can appear in at most one uniform or linear clause.
4857  if (UniformedArgs.count(CanonPVD) > 0) {
4858  Diag(E->getExprLoc(), diag::err_omp_wrong_dsa)
4859  << getOpenMPClauseName(OMPC_linear)
4861  Diag(UniformedArgs[CanonPVD]->getExprLoc(),
4862  diag::note_omp_explicit_dsa)
4864  continue;
4865  }
4866  LinearArgs[CanonPVD] = E;
4867  if (E->isValueDependent() || E->isTypeDependent() ||
4868  E->isInstantiationDependent() ||
4870  continue;
4871  (void)CheckOpenMPLinearDecl(CanonPVD, E->getExprLoc(), LinKind,
4872  PVD->getOriginalType());
4873  continue;
4874  }
4875  }
4876  if (isa<CXXThisExpr>(E)) {
4877  if (UniformedLinearThis) {
4878  Diag(E->getExprLoc(), diag::err_omp_wrong_dsa)
4879  << getOpenMPClauseName(OMPC_linear)
4880  << getOpenMPClauseName(IsUniformedThis ? OMPC_uniform : OMPC_linear)
4881  << E->getSourceRange();
4882  Diag(UniformedLinearThis->getExprLoc(), diag::note_omp_explicit_dsa)
4883  << getOpenMPClauseName(IsUniformedThis ? OMPC_uniform
4884  : OMPC_linear);
4885  continue;
4886  }
4887  UniformedLinearThis = E;
4888  if (E->isValueDependent() || E->isTypeDependent() ||
4890  continue;
4891  (void)CheckOpenMPLinearDecl(/*D=*/nullptr, E->getExprLoc(), LinKind,
4892  E->getType());
4893  continue;
4894  }
4895  Diag(E->getExprLoc(), diag::err_omp_param_or_this_in_clause)
4896  << FD->getDeclName() << (isa<CXXMethodDecl>(ADecl) ? 1 : 0);
4897  }
4898  Expr *Step = nullptr;
4899  Expr *NewStep = nullptr;
4900  SmallVector<Expr *, 4> NewSteps;
4901  for (Expr *E : Steps) {
4902  // Skip the same step expression, it was checked already.
4903  if (Step == E || !E) {
4904  NewSteps.push_back(E ? NewStep : nullptr);
4905  continue;
4906  }
4907  Step = E;
4908  if (const auto *DRE = dyn_cast<DeclRefExpr>(Step))
4909  if (const auto *PVD = dyn_cast<ParmVarDecl>(DRE->getDecl())) {
4910  const VarDecl *CanonPVD = PVD->getCanonicalDecl();
4911  if (UniformedArgs.count(CanonPVD) == 0) {
4912  Diag(Step->getExprLoc(), diag::err_omp_expected_uniform_param)
4913  << Step->getSourceRange();
4914  } else if (E->isValueDependent() || E->isTypeDependent() ||
4915  E->isInstantiationDependent() ||
4917  CanonPVD->getType()->hasIntegerRepresentation()) {
4918  NewSteps.push_back(Step);
4919  } else {
4920  Diag(Step->getExprLoc(), diag::err_omp_expected_int_param)
4921  << Step->getSourceRange();
4922  }
4923  continue;
4924  }
4925  NewStep = Step;
4926  if (Step && !Step->isValueDependent() && !Step->isTypeDependent() &&
4927  !Step->isInstantiationDependent() &&
4929  NewStep = PerformOpenMPImplicitIntegerConversion(Step->getExprLoc(), Step)
4930  .get();
4931  if (NewStep)
4932  NewStep = VerifyIntegerConstantExpression(NewStep).get();
4933  }
4934  NewSteps.push_back(NewStep);
4935  }
4936  auto *NewAttr = OMPDeclareSimdDeclAttr::CreateImplicit(
4937  Context, BS, SL.get(), const_cast<Expr **>(Uniforms.data()),
4938  Uniforms.size(), const_cast<Expr **>(Aligneds.data()), Aligneds.size(),
4939  const_cast<Expr **>(NewAligns.data()), NewAligns.size(),
4940  const_cast<Expr **>(Linears.data()), Linears.size(),
4941  const_cast<unsigned *>(LinModifiers.data()), LinModifiers.size(),
4942  NewSteps.data(), NewSteps.size(), SR);
4943  ADecl->addAttr(NewAttr);
4944  return DG;
4945 }
4946 
4949  Expr *VariantRef, SourceRange SR) {
4950  if (!DG || DG.get().isNull())
4951  return None;
4952 
4953  const int VariantId = 1;
4954  // Must be applied only to single decl.
4955  if (!DG.get().isSingleDecl()) {
4956  Diag(SR.getBegin(), diag::err_omp_single_decl_in_declare_simd_variant)
4957  << VariantId << SR;
4958  return None;
4959  }
4960  Decl *ADecl = DG.get().getSingleDecl();
4961  if (auto *FTD = dyn_cast<FunctionTemplateDecl>(ADecl))
4962  ADecl = FTD->getTemplatedDecl();
4963 
4964  // Decl must be a function.
4965  auto *FD = dyn_cast<FunctionDecl>(ADecl);
4966  if (!FD) {
4967  Diag(ADecl->getLocation(), diag::err_omp_function_expected)
4968  << VariantId << SR;
4969  return None;
4970  }
4971 
4972  auto &&HasMultiVersionAttributes = [](const FunctionDecl *FD) {
4973  return FD->hasAttrs() &&
4974  (FD->hasAttr<CPUDispatchAttr>() || FD->hasAttr<CPUSpecificAttr>() ||
4975  FD->hasAttr<TargetAttr>());
4976  };
4977  // OpenMP is not compatible with CPU-specific attributes.
4978  if (HasMultiVersionAttributes(FD)) {
4979  Diag(FD->getLocation(), diag::err_omp_declare_variant_incompat_attributes)
4980  << SR;
4981  return None;
4982  }
4983 
4984  // Allow #pragma omp declare variant only if the function is not used.
4985  if (FD->isUsed(false))
4986  Diag(SR.getBegin(), diag::warn_omp_declare_variant_after_used)
4987  << FD->getLocation();
4988 
4989  // Check if the function was emitted already.
4990  const FunctionDecl *Definition;
4991  if (!FD->isThisDeclarationADefinition() && FD->isDefined(Definition) &&
4992  (LangOpts.EmitAllDecls || Context.DeclMustBeEmitted(Definition)))
4993  Diag(SR.getBegin(), diag::warn_omp_declare_variant_after_emitted)
4994  << FD->getLocation();
4995 
4996  // The VariantRef must point to function.
4997  if (!VariantRef) {
4998  Diag(SR.getBegin(), diag::err_omp_function_expected) << VariantId;
4999  return None;
5000  }
5001 
5002  // Do not check templates, wait until instantiation.
5003  if (VariantRef->isTypeDependent() || VariantRef->isValueDependent() ||
5004  VariantRef->containsUnexpandedParameterPack() ||
5005  VariantRef->isInstantiationDependent() || FD->isDependentContext())
5006  return std::make_pair(FD, VariantRef);
5007 
5008  // Convert VariantRef expression to the type of the original function to
5009  // resolve possible conflicts.
5010  ExprResult VariantRefCast;
5011  if (LangOpts.CPlusPlus) {
5012  QualType FnPtrType;
5013  auto *Method = dyn_cast<CXXMethodDecl>(FD);
5014  if (Method && !Method->isStatic()) {
5015  const Type *ClassType =
5016  Context.getTypeDeclType(Method->getParent()).getTypePtr();
5017  FnPtrType = Context.getMemberPointerType(FD->getType(), ClassType);
5018  ExprResult ER;
5019  {
5020  // Build adrr_of unary op to correctly handle type checks for member
5021  // functions.
5022  Sema::TentativeAnalysisScope Trap(*this);
5023  ER = CreateBuiltinUnaryOp(VariantRef->getBeginLoc(), UO_AddrOf,
5024  VariantRef);
5025  }
5026  if (!ER.isUsable()) {
5027  Diag(VariantRef->getExprLoc(), diag::err_omp_function_expected)
5028  << VariantId << VariantRef->getSourceRange();
5029  return None;
5030  }
5031  VariantRef = ER.get();
5032  } else {
5033  FnPtrType = Context.getPointerType(FD->getType());
5034  }
5036  TryImplicitConversion(VariantRef, FnPtrType.getUnqualifiedType(),
5037  /*SuppressUserConversions=*/false,
5038  /*AllowExplicit=*/false,
5039  /*InOverloadResolution=*/false,
5040  /*CStyle=*/false,
5041  /*AllowObjCWritebackConversion=*/false);
5042  if (ICS.isFailure()) {
5043  Diag(VariantRef->getExprLoc(),
5044  diag::err_omp_declare_variant_incompat_types)
5045  << VariantRef->getType() << FnPtrType << VariantRef->getSourceRange();
5046  return None;
5047  }
5048  VariantRefCast = PerformImplicitConversion(
5049  VariantRef, FnPtrType.getUnqualifiedType(), AA_Converting);
5050  if (!VariantRefCast.isUsable())
5051  return None;
5052  // Drop previously built artificial addr_of unary op for member functions.
5053  if (Method && !Method->isStatic()) {
5054  Expr *PossibleAddrOfVariantRef = VariantRefCast.get();
5055  if (auto *UO = dyn_cast<UnaryOperator>(
5056  PossibleAddrOfVariantRef->IgnoreImplicit()))
5057  VariantRefCast = UO->getSubExpr();
5058  }
5059  } else {
5060  VariantRefCast = VariantRef;
5061  }
5062 
5063  ExprResult ER = CheckPlaceholderExpr(VariantRefCast.get());
5064  if (!ER.isUsable() ||
5065  !ER.get()->IgnoreParenImpCasts()->getType()->isFunctionType()) {
5066  Diag(VariantRef->getExprLoc(), diag::err_omp_function_expected)
5067  << VariantId << VariantRef->getSourceRange();
5068  return None;
5069  }
5070 
5071  // The VariantRef must point to function.
5072  auto *DRE = dyn_cast<DeclRefExpr>(ER.get()->IgnoreParenImpCasts());
5073  if (!DRE) {
5074  Diag(VariantRef->getExprLoc(), diag::err_omp_function_expected)
5075  << VariantId << VariantRef->getSourceRange();
5076  return None;
5077  }
5078  auto *NewFD = dyn_cast_or_null<FunctionDecl>(DRE->getDecl());
5079  if (!NewFD) {
5080  Diag(VariantRef->getExprLoc(), diag::err_omp_function_expected)
5081  << VariantId << VariantRef->getSourceRange();
5082  return None;
5083  }
5084 
5085  // Check if variant function is not marked with declare variant directive.
5086  if (NewFD->hasAttrs() && NewFD->hasAttr<OMPDeclareVariantAttr>()) {
5087  Diag(VariantRef->getExprLoc(),
5088  diag::warn_omp_declare_variant_marked_as_declare_variant)
5089  << VariantRef->getSourceRange();
5090  SourceRange SR =
5091  NewFD->specific_attr_begin<OMPDeclareVariantAttr>()->getRange();
5092  Diag(SR.getBegin(), diag::note_omp_marked_declare_variant_here) << SR;
5093  return None;
5094  }
5095 
5096  enum DoesntSupport {
5097  VirtFuncs = 1,
5098  Constructors = 3,
5099  Destructors = 4,
5100  DeletedFuncs = 5,
5101  DefaultedFuncs = 6,
5102  ConstexprFuncs = 7,
5103  ConstevalFuncs = 8,
5104  };
5105  if (const auto *CXXFD = dyn_cast<CXXMethodDecl>(FD)) {
5106  if (CXXFD->isVirtual()) {
5107  Diag(FD->getLocation(), diag::err_omp_declare_variant_doesnt_support)
5108  << VirtFuncs;
5109  return None;
5110  }
5111 
5112  if (isa<CXXConstructorDecl>(FD)) {
5113  Diag(FD->getLocation(), diag::err_omp_declare_variant_doesnt_support)
5114  << Constructors;
5115  return None;
5116  }
5117 
5118  if (isa<CXXDestructorDecl>(FD)) {
5119  Diag(FD->getLocation(), diag::err_omp_declare_variant_doesnt_support)
5120  << Destructors;
5121  return None;
5122  }
5123  }
5124 
5125  if (FD->isDeleted()) {
5126  Diag(FD->getLocation(), diag::err_omp_declare_variant_doesnt_support)
5127  << DeletedFuncs;
5128  return None;
5129  }
5130 
5131  if (FD->isDefaulted()) {
5132  Diag(FD->getLocation(), diag::err_omp_declare_variant_doesnt_support)
5133  << DefaultedFuncs;
5134  return None;
5135  }
5136 
5137  if (FD->isConstexpr()) {
5138  Diag(FD->getLocation(), diag::err_omp_declare_variant_doesnt_support)
5139  << (NewFD->isConsteval() ? ConstevalFuncs : ConstexprFuncs);
5140  return None;
5141  }
5142 
5143  // Check general compatibility.
5144  if (areMultiversionVariantFunctionsCompatible(
5145  FD, NewFD, PDiag(diag::err_omp_declare_variant_noproto),
5147  SR.getBegin(),
5148  PDiag(diag::note_omp_declare_variant_specified_here) << SR),
5150  VariantRef->getExprLoc(),
5151  PDiag(diag::err_omp_declare_variant_doesnt_support)),
5152  PartialDiagnosticAt(VariantRef->getExprLoc(),
5153  PDiag(diag::err_omp_declare_variant_diff)
5154  << FD->getLocation()),
5155  /*TemplatesSupported=*/true, /*ConstexprSupported=*/false,
5156  /*CLinkageMayDiffer=*/true))
5157  return None;
5158  return std::make_pair(FD, cast<Expr>(DRE));
5159 }
5160 
5162  FunctionDecl *FD, Expr *VariantRef, SourceRange SR,
5164  if (Data.CtxSet == OMPDeclareVariantAttr::CtxSetUnknown ||
5165  Data.Ctx == OMPDeclareVariantAttr::CtxUnknown)
5166  return;
5167  Expr *Score = nullptr;
5168  OMPDeclareVariantAttr::ScoreType ST = OMPDeclareVariantAttr::ScoreUnknown;
5169  if (Data.CtxScore.isUsable()) {
5170  ST = OMPDeclareVariantAttr::ScoreSpecified;
5171  Score = Data.CtxScore.get();
5172  if (!Score->isTypeDependent() && !Score->isValueDependent() &&
5173  !Score->isInstantiationDependent() &&
5174  !Score->containsUnexpandedParameterPack()) {
5176  ExprResult ICE = VerifyIntegerConstantExpression(Score, &Result);
5177  if (ICE.isInvalid())
5178  return;
5179  }
5180  }
5181  auto *NewAttr = OMPDeclareVariantAttr::CreateImplicit(
5182  Context, VariantRef, Score, Data.CtxSet, ST, Data.Ctx,
5183  Data.ImplVendors.begin(), Data.ImplVendors.size(), SR);
5184  FD->addAttr(NewAttr);
5185 }
5186 
5187 void Sema::markOpenMPDeclareVariantFuncsReferenced(SourceLocation Loc,
5188  FunctionDecl *Func,
5189  bool MightBeOdrUse) {
5190  assert(LangOpts.OpenMP && "Expected OpenMP mode.");
5191 
5192  if (!Func->isDependentContext() && Func->hasAttrs()) {
5193  for (OMPDeclareVariantAttr *A :
5194  Func->specific_attrs<OMPDeclareVariantAttr>()) {
5195  // TODO: add checks for active OpenMP context where possible.
5196  Expr *VariantRef = A->getVariantFuncRef();
5197  auto *DRE = dyn_cast<DeclRefExpr>(VariantRef->IgnoreParenImpCasts());
5198  auto *F = cast<FunctionDecl>(DRE->getDecl());
5199  if (!F->isDefined() && F->isTemplateInstantiation())
5200  InstantiateFunctionDefinition(Loc, F->getFirstDecl());
5201  MarkFunctionReferenced(Loc, F, MightBeOdrUse);
5202  }
5203  }
5204 }
5205 
5207  Stmt *AStmt,
5208  SourceLocation StartLoc,
5209  SourceLocation EndLoc) {
5210  if (!AStmt)
5211  return StmtError();
5212 
5213  auto *CS = cast<CapturedStmt>(AStmt);
5214  // 1.2.2 OpenMP Language Terminology
5215  // Structured block - An executable statement with a single entry at the
5216  // top and a single exit at the bottom.
5217  // The point of exit cannot be a branch out of the structured block.
5218  // longjmp() and throw() must not violate the entry/exit criteria.
5219  CS->getCapturedDecl()->setNothrow();
5220 
5221  setFunctionHasBranchProtectedScope();
5222 
5223  return OMPParallelDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt,
5224  DSAStack->isCancelRegion());
5225 }
5226 
5227 namespace {
5228 /// Iteration space of a single for loop.
5229 struct LoopIterationSpace final {
5230  /// True if the condition operator is the strict compare operator (<, > or
5231  /// !=).
5232  bool IsStrictCompare = false;
5233  /// Condition of the loop.
5234  Expr *PreCond = nullptr;
5235  /// This expression calculates the number of iterations in the loop.
5236  /// It is always possible to calculate it before starting the loop.
5237  Expr *NumIterations = nullptr;
5238  /// The loop counter variable.
5239  Expr *CounterVar = nullptr;
5240  /// Private loop counter variable.
5241  Expr *PrivateCounterVar = nullptr;
5242  /// This is initializer for the initial value of #CounterVar.
5243  Expr *CounterInit = nullptr;
5244  /// This is step for the #CounterVar used to generate its update:
5245  /// #CounterVar = #CounterInit + #CounterStep * CurrentIteration.
5246  Expr *CounterStep = nullptr;
5247  /// Should step be subtracted?
5248  bool Subtract = false;
5249  /// Source range of the loop init.
5250  SourceRange InitSrcRange;
5251  /// Source range of the loop condition.
5252  SourceRange CondSrcRange;
5253  /// Source range of the loop increment.
5254  SourceRange IncSrcRange;
5255  /// Minimum value that can have the loop control variable. Used to support
5256  /// non-rectangular loops. Applied only for LCV with the non-iterator types,
5257  /// since only such variables can be used in non-loop invariant expressions.
5258  Expr *MinValue = nullptr;
5259  /// Maximum value that can have the loop control variable. Used to support
5260  /// non-rectangular loops. Applied only for LCV with the non-iterator type,
5261  /// since only such variables can be used in non-loop invariant expressions.
5262  Expr *MaxValue = nullptr;
5263  /// true, if the lower bound depends on the outer loop control var.
5264  bool IsNonRectangularLB = false;
5265  /// true, if the upper bound depends on the outer loop control var.
5266  bool IsNonRectangularUB = false;
5267  /// Index of the loop this loop depends on and forms non-rectangular loop
5268  /// nest.
5269  unsigned LoopDependentIdx = 0;
5270  /// Final condition for the non-rectangular loop nest support. It is used to
5271  /// check that the number of iterations for this particular counter must be
5272  /// finished.
5273  Expr *FinalCondition = nullptr;
5274 };
5275 
5276 /// Helper class for checking canonical form of the OpenMP loops and
5277 /// extracting iteration space of each loop in the loop nest, that will be used
5278 /// for IR generation.
5279 class OpenMPIterationSpaceChecker {
5280  /// Reference to Sema.
5281  Sema &SemaRef;
5282  /// Data-sharing stack.
5283  DSAStackTy &Stack;
5284  /// A location for diagnostics (when there is no some better location).
5285  SourceLocation DefaultLoc;
5286  /// A location for diagnostics (when increment is not compatible).
5287  SourceLocation ConditionLoc;
5288  /// A source location for referring to loop init later.
5289  SourceRange InitSrcRange;
5290  /// A source location for referring to condition later.
5291  SourceRange ConditionSrcRange;
5292  /// A source location for referring to increment later.
5293  SourceRange IncrementSrcRange;
5294  /// Loop variable.
5295  ValueDecl *LCDecl = nullptr;
5296  /// Reference to loop variable.
5297  Expr *LCRef = nullptr;
5298  /// Lower bound (initializer for the var).
5299  Expr *LB = nullptr;
5300  /// Upper bound.
5301  Expr *UB = nullptr;
5302  /// Loop step (increment).
5303  Expr *Step = nullptr;
5304  /// This flag is true when condition is one of:
5305  /// Var < UB
5306  /// Var <= UB
5307  /// UB > Var
5308  /// UB >= Var
5309  /// This will have no value when the condition is !=
5310  llvm::Optional<bool> TestIsLessOp;
5311  /// This flag is true when condition is strict ( < or > ).
5312  bool TestIsStrictOp = false;
5313  /// This flag is true when step is subtracted on each iteration.
5314  bool SubtractStep = false;
5315  /// The outer loop counter this loop depends on (if any).
5316  const ValueDecl *DepDecl = nullptr;
5317  /// Contains number of loop (starts from 1) on which loop counter init
5318  /// expression of this loop depends on.
5319  Optional<unsigned> InitDependOnLC;
5320  /// Contains number of loop (starts from 1) on which loop counter condition
5321  /// expression of this loop depends on.
5322  Optional<unsigned> CondDependOnLC;
5323  /// Checks if the provide statement depends on the loop counter.
5324  Optional<unsigned> doesDependOnLoopCounter(const Stmt *S, bool IsInitializer);
5325  /// Original condition required for checking of the exit condition for
5326  /// non-rectangular loop.
5327  Expr *Condition = nullptr;
5328 
5329 public:
5330  OpenMPIterationSpaceChecker(Sema &SemaRef, DSAStackTy &Stack,
5331  SourceLocation DefaultLoc)
5332  : SemaRef(SemaRef), Stack(Stack), DefaultLoc(DefaultLoc),
5333  ConditionLoc(DefaultLoc) {}
5334  /// Check init-expr for canonical loop form and save loop counter
5335  /// variable - #Var and its initialization value - #LB.
5336  bool checkAndSetInit(Stmt *S, bool EmitDiags = true);
5337  /// Check test-expr for canonical form, save upper-bound (#UB), flags
5338  /// for less/greater and for strict/non-strict comparison.
5339  bool checkAndSetCond(Expr *S);
5340  /// Check incr-expr for canonical loop form and return true if it
5341  /// does not conform, otherwise save loop step (#Step).
5342  bool checkAndSetInc(Expr *S);
5343  /// Return the loop counter variable.
5344  ValueDecl *getLoopDecl() const { return LCDecl; }
5345  /// Return the reference expression to loop counter variable.
5346  Expr *getLoopDeclRefExpr() const { return LCRef; }
5347  /// Source range of the loop init.
5348  SourceRange getInitSrcRange() const { return InitSrcRange; }
5349  /// Source range of the loop condition.
5350  SourceRange getConditionSrcRange() const { return ConditionSrcRange; }
5351  /// Source range of the loop increment.
5352  SourceRange getIncrementSrcRange() const { return IncrementSrcRange; }
5353  /// True if the step should be subtracted.
5354  bool shouldSubtractStep() const { return SubtractStep; }
5355  /// True, if the compare operator is strict (<, > or !=).
5356  bool isStrictTestOp() const { return TestIsStrictOp; }
5357  /// Build the expression to calculate the number of iterations.
5358  Expr *buildNumIterations(
5359  Scope *S, ArrayRef<LoopIterationSpace> ResultIterSpaces, bool LimitedType,
5360  llvm::MapVector<const Expr *, DeclRefExpr *> &Captures) const;
5361  /// Build the precondition expression for the loops.
5362  Expr *
5363  buildPreCond(Scope *S, Expr *Cond,
5364  llvm::MapVector<const Expr *, DeclRefExpr *> &Captures) const;
5365  /// Build reference expression to the counter be used for codegen.
5366  DeclRefExpr *
5367  buildCounterVar(llvm::MapVector<const Expr *, DeclRefExpr *> &Captures,
5368  DSAStackTy &DSA) const;
5369  /// Build reference expression to the private counter be used for
5370  /// codegen.
5371  Expr *buildPrivateCounterVar() const;
5372  /// Build initialization of the counter be used for codegen.
5373  Expr *buildCounterInit() const;
5374  /// Build step of the counter be used for codegen.
5375  Expr *buildCounterStep() const;
5376  /// Build loop data with counter value for depend clauses in ordered
5377  /// directives.
5378  Expr *
5379  buildOrderedLoopData(Scope *S, Expr *Counter,
5380  llvm::MapVector<const Expr *, DeclRefExpr *> &Captures,
5381  SourceLocation Loc, Expr *Inc = nullptr,
5382  OverloadedOperatorKind OOK = OO_Amp);
5383  /// Builds the minimum value for the loop counter.
5384  std::pair<Expr *, Expr *> buildMinMaxValues(
5385  Scope *S, llvm::MapVector<const Expr *, DeclRefExpr *> &Captures) const;
5386  /// Builds final condition for the non-rectangular loops.
5387  Expr *buildFinalCondition(Scope *S) const;
5388  /// Return true if any expression is dependent.
5389  bool dependent() const;
5390  /// Returns true if the initializer forms non-rectangular loop.
5391  bool doesInitDependOnLC() const { return InitDependOnLC.hasValue(); }
5392  /// Returns true if the condition forms non-rectangular loop.
5393  bool doesCondDependOnLC() const { return CondDependOnLC.hasValue(); }
5394  /// Returns index of the loop we depend on (starting from 1), or 0 otherwise.
5395  unsigned getLoopDependentIdx() const {
5396  return InitDependOnLC.getValueOr(CondDependOnLC.getValueOr(0));
5397  }
5398 
5399 private:
5400  /// Check the right-hand side of an assignment in the increment
5401  /// expression.
5402  bool checkAndSetIncRHS(Expr *RHS);
5403  /// Helper to set loop counter variable and its initializer.
5404  bool setLCDeclAndLB(ValueDecl *NewLCDecl, Expr *NewDeclRefExpr, Expr *NewLB,
5405  bool EmitDiags);
5406  /// Helper to set upper bound.
5407  bool setUB(Expr *NewUB, llvm::Optional<bool> LessOp, bool StrictOp,
5408  SourceRange SR, SourceLocation SL);
5409  /// Helper to set loop increment.
5410  bool setStep(Expr *NewStep, bool Subtract);
5411 };
5412 
5413 bool OpenMPIterationSpaceChecker::dependent() const {
5414  if (!LCDecl) {
5415  assert(!LB && !UB && !Step);
5416  return false;
5417  }
5418  return LCDecl->getType()->isDependentType() ||
5419  (LB && LB->isValueDependent()) || (UB && UB->isValueDependent()) ||
5420  (Step && Step->isValueDependent());
5421 }
5422 
5423 bool OpenMPIterationSpaceChecker::setLCDeclAndLB(ValueDecl *NewLCDecl,
5424  Expr *NewLCRefExpr,
5425  Expr *NewLB, bool EmitDiags) {
5426  // State consistency checking to ensure correct usage.
5427  assert(LCDecl == nullptr && LB == nullptr && LCRef == nullptr &&
5428  UB == nullptr && Step == nullptr && !TestIsLessOp && !TestIsStrictOp);
5429  if (!NewLCDecl || !NewLB)
5430  return true;
5431  LCDecl = getCanonicalDecl(NewLCDecl);
5432  LCRef = NewLCRefExpr;
5433  if (auto *CE = dyn_cast_or_null<CXXConstructExpr>(NewLB))
5434  if (const CXXConstructorDecl *Ctor = CE->getConstructor())
5435  if ((Ctor->isCopyOrMoveConstructor() ||
5436  Ctor->isConvertingConstructor(/*AllowExplicit=*/false)) &&
5437  CE->getNumArgs() > 0 && CE->getArg(0) != nullptr)
5438  NewLB = CE->getArg(0)->IgnoreParenImpCasts();
5439  LB = NewLB;
5440  if (EmitDiags)
5441  InitDependOnLC = doesDependOnLoopCounter(LB, /*IsInitializer=*/true);
5442  return false;
5443 }
5444 
5445 bool OpenMPIterationSpaceChecker::setUB(Expr *NewUB,
5446  llvm::Optional<bool> LessOp,
5447  bool StrictOp, SourceRange SR,
5448  SourceLocation SL) {
5449  // State consistency checking to ensure correct usage.
5450  assert(LCDecl != nullptr && LB != nullptr && UB == nullptr &&
5451  Step == nullptr && !TestIsLessOp && !TestIsStrictOp);
5452  if (!NewUB)
5453  return true;
5454  UB = NewUB;
5455  if (LessOp)
5456  TestIsLessOp = LessOp;
5457  TestIsStrictOp = StrictOp;
5458  ConditionSrcRange = SR;
5459  ConditionLoc = SL;
5460  CondDependOnLC = doesDependOnLoopCounter(UB, /*IsInitializer=*/false);
5461  return false;
5462 }
5463 
5464 bool OpenMPIterationSpaceChecker::setStep(Expr *NewStep, bool Subtract) {
5465  // State consistency checking to ensure correct usage.
5466  assert(LCDecl != nullptr && LB != nullptr && Step == nullptr);
5467  if (!NewStep)
5468  return true;
5469  if (!NewStep->isValueDependent()) {
5470  // Check that the step is integer expression.
5471  SourceLocation StepLoc = NewStep->getBeginLoc();
5473  StepLoc, getExprAsWritten(NewStep));
5474  if (Val.isInvalid())
5475  return true;
5476  NewStep = Val.get();
5477 
5478  // OpenMP [2.6, Canonical Loop Form, Restrictions]
5479  // If test-expr is of form var relational-op b and relational-op is < or
5480  // <= then incr-expr must cause var to increase on each iteration of the
5481  // loop. If test-expr is of form var relational-op b and relational-op is
5482  // > or >= then incr-expr must cause var to decrease on each iteration of
5483  // the loop.
5484  // If test-expr is of form b relational-op var and relational-op is < or
5485  // <= then incr-expr must cause var to decrease on each iteration of the
5486  // loop. If test-expr is of form b relational-op var and relational-op is
5487  // > or >= then incr-expr must cause var to increase on each iteration of
5488  // the loop.
5490  bool IsConstant = NewStep->isIntegerConstantExpr(Result, SemaRef.Context);
5491  bool IsUnsigned = !NewStep->getType()->hasSignedIntegerRepresentation();
5492  bool IsConstNeg =
5493  IsConstant && Result.isSigned() && (Subtract != Result.isNegative());
5494  bool IsConstPos =
5495  IsConstant && Result.isSigned() && (Subtract == Result.isNegative());
5496  bool IsConstZero = IsConstant && !Result.getBoolValue();
5497 
5498  // != with increment is treated as <; != with decrement is treated as >
5499  if (!TestIsLessOp.hasValue())
5500  TestIsLessOp = IsConstPos || (IsUnsigned && !Subtract);
5501  if (UB && (IsConstZero ||
5502  (TestIsLessOp.getValue() ?
5503  (IsConstNeg || (IsUnsigned && Subtract)) :
5504  (IsConstPos || (IsUnsigned && !Subtract))))) {
5505  SemaRef.Diag(NewStep->getExprLoc(),
5506  diag::err_omp_loop_incr_not_compatible)
5507  << LCDecl << TestIsLessOp.getValue() << NewStep->getSourceRange();
5508  SemaRef.Diag(ConditionLoc,
5509  diag::note_omp_loop_cond_requres_compatible_incr)
5510  << TestIsLessOp.getValue() << ConditionSrcRange;
5511  return true;
5512  }
5513  if (TestIsLessOp.getValue() == Subtract) {
5514  NewStep =
5515  SemaRef.CreateBuiltinUnaryOp(NewStep->getExprLoc(), UO_Minus, NewStep)
5516  .get();
5517  Subtract = !Subtract;
5518  }
5519  }
5520 
5521  Step = NewStep;
5522  SubtractStep = Subtract;
5523  return false;
5524 }
5525 
5526 namespace {
5527 /// Checker for the non-rectangular loops. Checks if the initializer or
5528 /// condition expression references loop counter variable.
5529 class LoopCounterRefChecker final
5530  : public ConstStmtVisitor<LoopCounterRefChecker, bool> {
5531  Sema &SemaRef;
5532  DSAStackTy &Stack;
5533  const ValueDecl *CurLCDecl = nullptr;
5534  const ValueDecl *DepDecl = nullptr;
5535  const ValueDecl *PrevDepDecl = nullptr;
5536  bool IsInitializer = true;
5537  unsigned BaseLoopId = 0;
5538  bool checkDecl(const Expr *E, const ValueDecl *VD) {
5539  if (getCanonicalDecl(VD) == getCanonicalDecl(CurLCDecl)) {
5540  SemaRef.Diag(E->getExprLoc(), diag::err_omp_stmt_depends_on_loop_counter)
5541  << (IsInitializer ? 0 : 1);
5542  return false;
5543  }
5544  const auto &&Data = Stack.isLoopControlVariable(VD);
5545  // OpenMP, 2.9.1 Canonical Loop Form, Restrictions.
5546  // The type of the loop iterator on which we depend may not have a random
5547  // access iterator type.
5548  if (Data.first && VD->getType()->isRecordType()) {
5549  SmallString<128> Name;
5550  llvm::raw_svector_ostream OS(Name);
5551  VD->getNameForDiagnostic(OS, SemaRef.getPrintingPolicy(),
5552  /*Qualified=*/true);
5553  SemaRef.Diag(E->getExprLoc(),
5554  diag::err_omp_wrong_dependency_iterator_type)
5555  << OS.str();
5556  SemaRef.Diag(VD->getLocation(), diag::note_previous_decl) << VD;
5557  return false;
5558  }
5559  if (Data.first &&
5560  (DepDecl || (PrevDepDecl &&
5561  getCanonicalDecl(VD) != getCanonicalDecl(PrevDepDecl)))) {
5562  if (!DepDecl && PrevDepDecl)
5563  DepDecl = PrevDepDecl;
5564  SmallString<128> Name;
5565  llvm::raw_svector_ostream OS(Name);
5566  DepDecl->getNameForDiagnostic(OS, SemaRef.getPrintingPolicy(),
5567  /*Qualified=*/true);
5568  SemaRef.Diag(E->getExprLoc(),
5569  diag::err_omp_invariant_or_linear_dependency)
5570  << OS.str();
5571  return false;
5572  }
5573  if (Data.first) {
5574  DepDecl = VD;
5575  BaseLoopId = Data.first;
5576  }
5577  return Data.first;
5578  }
5579 
5580 public:
5581  bool VisitDeclRefExpr(const DeclRefExpr *E) {
5582  const ValueDecl *VD = E->getDecl();
5583  if (isa<VarDecl>(VD))
5584  return checkDecl(E, VD);
5585  return false;
5586  }
5587  bool VisitMemberExpr(const MemberExpr *E) {
5588  if (isa<CXXThisExpr>(E->getBase()->IgnoreParens())) {
5589  const ValueDecl *VD = E->getMemberDecl();
5590  if (isa<VarDecl>(VD) || isa<FieldDecl>(VD))
5591  return checkDecl(E, VD);
5592  }
5593  return false;
5594  }
5595  bool VisitStmt(const Stmt *S) {
5596  bool Res = false;
5597  for (const Stmt *Child : S->children())
5598  Res = (Child && Visit(Child)) || Res;
5599  return Res;
5600  }
5601  explicit LoopCounterRefChecker(Sema &SemaRef, DSAStackTy &Stack,
5602  const ValueDecl *CurLCDecl, bool IsInitializer,
5603  const ValueDecl *PrevDepDecl = nullptr)
5604  : SemaRef(SemaRef), Stack(Stack), CurLCDecl(CurLCDecl),
5605  PrevDepDecl(PrevDepDecl), IsInitializer(IsInitializer) {}
5606  unsigned getBaseLoopId() const {
5607  assert(CurLCDecl && "Expected loop dependency.");
5608  return BaseLoopId;
5609  }
5610  const ValueDecl *getDepDecl() const {
5611  assert(CurLCDecl && "Expected loop dependency.");
5612  return DepDecl;
5613  }
5614 };
5615 } // namespace
5616 
5618 OpenMPIterationSpaceChecker::doesDependOnLoopCounter(const Stmt *S,
5619  bool IsInitializer) {
5620  // Check for the non-rectangular loops.
5621  LoopCounterRefChecker LoopStmtChecker(SemaRef, Stack, LCDecl, IsInitializer,
5622  DepDecl);
5623  if (LoopStmtChecker.Visit(S)) {
5624  DepDecl = LoopStmtChecker.getDepDecl();
5625  return LoopStmtChecker.getBaseLoopId();
5626  }
5627  return llvm::None;
5628 }
5629 
5630 bool OpenMPIterationSpaceChecker::checkAndSetInit(Stmt *S, bool EmitDiags) {
5631  // Check init-expr for canonical loop form and save loop counter
5632  // variable - #Var and its initialization value - #LB.
5633  // OpenMP [2.6] Canonical loop form. init-expr may be one of the following:
5634  // var = lb
5635  // integer-type var = lb
5636  // random-access-iterator-type var = lb
5637  // pointer-type var = lb
5638  //
5639  if (!S) {
5640  if (EmitDiags) {
5641  SemaRef.Diag(DefaultLoc, diag::err_omp_loop_not_canonical_init);
5642  }
5643  return true;
5644  }
5645  if (auto *ExprTemp = dyn_cast<ExprWithCleanups>(S))
5646  if (!ExprTemp->cleanupsHaveSideEffects())
5647  S = ExprTemp->getSubExpr();
5648 
5649  InitSrcRange = S->getSourceRange();
5650  if (Expr *E = dyn_cast<Expr>(S))
5651  S = E->IgnoreParens();
5652  if (auto *BO = dyn_cast<BinaryOperator>(S)) {
5653  if (BO->getOpcode() == BO_Assign) {
5654  Expr *LHS = BO->getLHS()->IgnoreParens();
5655  if (auto *DRE = dyn_cast<DeclRefExpr>(LHS)) {
5656  if (auto *CED = dyn_cast<OMPCapturedExprDecl>(DRE->getDecl()))
5657  if (auto *ME = dyn_cast<MemberExpr>(getExprAsWritten(CED->getInit())))
5658  return setLCDeclAndLB(ME->getMemberDecl(), ME, BO->getRHS(),
5659  EmitDiags);
5660  return setLCDeclAndLB(DRE->getDecl(), DRE, BO->getRHS(), EmitDiags);
5661  }
5662  if (auto *ME = dyn_cast<MemberExpr>(LHS)) {
5663  if (ME->isArrow() &&
5664  isa<CXXThisExpr>(ME->getBase()->IgnoreParenImpCasts()))
5665  return setLCDeclAndLB(ME->getMemberDecl(), ME, BO->getRHS(),
5666  EmitDiags);
5667  }
5668  }
5669  } else if (auto *DS = dyn_cast<DeclStmt>(S)) {
5670  if (DS->isSingleDecl()) {
5671  if (auto *Var = dyn_cast_or_null<VarDecl>(DS->getSingleDecl())) {
5672  if (Var->hasInit() && !Var->getType()->isReferenceType()) {
5673  // Accept non-canonical init form here but emit ext. warning.
5674  if (Var->getInitStyle() != VarDecl::CInit && EmitDiags)
5675  SemaRef.Diag(S->getBeginLoc(),
5676  diag::ext_omp_loop_not_canonical_init)
5677  << S->getSourceRange();
5678  return setLCDeclAndLB(
5679  Var,
5680  buildDeclRefExpr(SemaRef, Var,
5681  Var->getType().getNonReferenceType(),
5682  DS->getBeginLoc()),
5683  Var->getInit(), EmitDiags);
5684  }
5685  }
5686  }
5687  } else if (auto *CE = dyn_cast<CXXOperatorCallExpr>(S)) {
5688  if (CE->getOperator() == OO_Equal) {
5689  Expr *LHS = CE->getArg(0);
5690  if (auto *DRE = dyn_cast<DeclRefExpr>(LHS)) {
5691  if (auto *CED = dyn_cast<OMPCapturedExprDecl>(DRE->getDecl()))
5692  if (auto *ME = dyn_cast<MemberExpr>(getExprAsWritten(CED->getInit())))
5693  return setLCDeclAndLB(ME->getMemberDecl(), ME, BO->getRHS(),
5694  EmitDiags);
5695  return setLCDeclAndLB(DRE->getDecl(), DRE, CE->getArg(1), EmitDiags);
5696  }
5697  if (auto *ME = dyn_cast<MemberExpr>(LHS)) {
5698  if (ME->isArrow() &&
5699  isa<CXXThisExpr>(ME->getBase()->IgnoreParenImpCasts()))
5700  return setLCDeclAndLB(ME->getMemberDecl(), ME, BO->getRHS(),
5701  EmitDiags);
5702  }
5703  }
5704  }
5705 
5706  if (dependent() || SemaRef.CurContext->isDependentContext())
5707  return false;
5708  if (EmitDiags) {
5709  SemaRef.Diag(S->getBeginLoc(), diag::err_omp_loop_not_canonical_init)
5710  << S->getSourceRange();
5711  }
5712  return true;
5713 }
5714 
5715 /// Ignore parenthesizes, implicit casts, copy constructor and return the
5716 /// variable (which may be the loop variable) if possible.
5717 static const ValueDecl *getInitLCDecl(const Expr *E) {
5718  if (!E)
5719  return nullptr;
5720  E = getExprAsWritten(E);
5721  if (const auto *CE = dyn_cast_or_null<CXXConstructExpr>(E))
5722  if (const CXXConstructorDecl *Ctor = CE->getConstructor())
5723  if ((Ctor->isCopyOrMoveConstructor() ||
5724  Ctor->isConvertingConstructor(/*AllowExplicit=*/false)) &&
5725  CE->getNumArgs() > 0 && CE->getArg(0) != nullptr)
5726  E = CE->getArg(0)->IgnoreParenImpCasts();
5727  if (const auto *DRE = dyn_cast_or_null<DeclRefExpr>(E)) {
5728  if (const auto *VD = dyn_cast<VarDecl>(DRE->getDecl()))
5729  return getCanonicalDecl(VD);
5730  }
5731  if (const auto *ME = dyn_cast_or_null<MemberExpr>(E))
5732  if (ME->isArrow() && isa<CXXThisExpr>(ME->getBase()->IgnoreParenImpCasts()))
5733  return getCanonicalDecl(ME->getMemberDecl());
5734  return nullptr;
5735 }
5736 
5737 bool OpenMPIterationSpaceChecker::checkAndSetCond(Expr *S) {
5738  // Check test-expr for canonical form, save upper-bound UB, flags for
5739  // less/greater and for strict/non-strict comparison.
5740  // OpenMP [2.9] Canonical loop form. Test-expr may be one of the following:
5741  // var relational-op b
5742  // b relational-op var
5743  //
5744  bool IneqCondIsCanonical = SemaRef.getLangOpts().OpenMP >= 50;
5745  if (!S) {
5746  SemaRef.Diag(DefaultLoc, diag::err_omp_loop_not_canonical_cond)
5747  << (IneqCondIsCanonical ? 1 : 0) << LCDecl;
5748  return true;
5749  }
5750  Condition = S;
5751  S = getExprAsWritten(S);
5752  SourceLocation CondLoc = S->getBeginLoc();
5753  if (auto *BO = dyn_cast<BinaryOperator>(S)) {
5754  if (BO->isRelationalOp()) {
5755  if (getInitLCDecl(BO->getLHS()) == LCDecl)
5756  return setUB(BO->getRHS(),
5757  (BO->getOpcode() == BO_LT || BO->getOpcode() == BO_LE),
5758  (BO->getOpcode() == BO_LT || BO->getOpcode() == BO_GT),
5759  BO->getSourceRange(), BO->getOperatorLoc());
5760  if (getInitLCDecl(BO->getRHS()) == LCDecl)
5761  return setUB(BO->getLHS(),
5762  (BO->getOpcode() == BO_GT || BO->getOpcode() == BO_GE),
5763  (BO->getOpcode() == BO_LT || BO->getOpcode() == BO_GT),
5764  BO->getSourceRange(), BO->getOperatorLoc());
5765  } else if (IneqCondIsCanonical && BO->getOpcode() == BO_NE)
5766  return setUB(
5767  getInitLCDecl(BO->getLHS()) == LCDecl ? BO->getRHS() : BO->getLHS(),
5768  /*LessOp=*/llvm::None,
5769  /*StrictOp=*/true, BO->getSourceRange(), BO->getOperatorLoc());
5770  } else if (auto *CE = dyn_cast<CXXOperatorCallExpr>(S)) {
5771  if (CE->getNumArgs() == 2) {
5772  auto Op = CE->getOperator();
5773  switch (Op) {
5774  case OO_Greater:
5775  case OO_GreaterEqual:
5776  case OO_Less:
5777  case OO_LessEqual:
5778  if (getInitLCDecl(CE->getArg(0)) == LCDecl)
5779  return setUB(CE->getArg(1), Op == OO_Less || Op == OO_LessEqual,
5780  Op == OO_Less || Op == OO_Greater, CE->getSourceRange(),
5781  CE->getOperatorLoc());
5782  if (getInitLCDecl(CE->getArg(1)) == LCDecl)
5783  return setUB(CE->getArg(0), Op == OO_Greater || Op == OO_GreaterEqual,
5784  Op == OO_Less || Op == OO_Greater, CE->getSourceRange(),
5785  CE->getOperatorLoc());
5786  break;
5787  case OO_ExclaimEqual:
5788  if (IneqCondIsCanonical)
5789  return setUB(getInitLCDecl(CE->getArg(0)) == LCDecl ? CE->getArg(1)
5790  : CE->getArg(0),
5791  /*LessOp=*/llvm::None,
5792  /*StrictOp=*/true, CE->getSourceRange(),
5793  CE->getOperatorLoc());
5794  break;
5795  default:
5796  break;
5797  }
5798  }
5799  }
5800  if (dependent() || SemaRef.CurContext->isDependentContext())
5801  return false;
5802  SemaRef.Diag(CondLoc, diag::err_omp_loop_not_canonical_cond)
5803  << (IneqCondIsCanonical ? 1 : 0) << S->getSourceRange() << LCDecl;
5804  return true;
5805 }
5806 
5807 bool OpenMPIterationSpaceChecker::checkAndSetIncRHS(Expr *RHS) {
5808  // RHS of canonical loop form increment can be:
5809  // var + incr
5810  // incr + var
5811  // var - incr
5812  //
5813