clang  10.0.0svn
SemaOpenMP.cpp
Go to the documentation of this file.
1 //===--- SemaOpenMP.cpp - Semantic Analysis for OpenMP constructs ---------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements semantic analysis for OpenMP directives and
10 /// clauses.
11 ///
12 //===----------------------------------------------------------------------===//
13 
14 #include "TreeTransform.h"
15 #include "clang/AST/ASTContext.h"
18 #include "clang/AST/Decl.h"
19 #include "clang/AST/DeclCXX.h"
20 #include "clang/AST/DeclOpenMP.h"
21 #include "clang/AST/StmtCXX.h"
22 #include "clang/AST/StmtOpenMP.h"
23 #include "clang/AST/StmtVisitor.h"
24 #include "clang/AST/TypeOrdering.h"
27 #include "clang/Sema/Lookup.h"
28 #include "clang/Sema/Scope.h"
29 #include "clang/Sema/ScopeInfo.h"
31 #include "llvm/ADT/PointerEmbeddedInt.h"
32 using namespace clang;
33 
34 //===----------------------------------------------------------------------===//
35 // Stack of data-sharing attributes for variables
36 //===----------------------------------------------------------------------===//
37 
39  Sema &SemaRef, Expr *E,
41  OpenMPClauseKind CKind, bool NoDiagnose);
42 
43 namespace {
44 /// Default data sharing attributes, which can be applied to directive.
46  DSA_unspecified = 0, /// Data sharing attribute not specified.
47  DSA_none = 1 << 0, /// Default data sharing attribute 'none'.
48  DSA_shared = 1 << 1, /// Default data sharing attribute 'shared'.
49 };
50 
51 /// Attributes of the defaultmap clause.
53  DMA_unspecified, /// Default mapping is not specified.
54  DMA_tofrom_scalar, /// Default mapping is 'tofrom:scalar'.
55 };
56 
57 /// Stack for tracking declarations used in OpenMP directives and
58 /// clauses and their data-sharing attributes.
59 class DSAStackTy {
60 public:
61  struct DSAVarData {
64  const Expr *RefExpr = nullptr;
65  DeclRefExpr *PrivateCopy = nullptr;
66  SourceLocation ImplicitDSALoc;
67  DSAVarData() = default;
68  DSAVarData(OpenMPDirectiveKind DKind, OpenMPClauseKind CKind,
69  const Expr *RefExpr, DeclRefExpr *PrivateCopy,
70  SourceLocation ImplicitDSALoc)
71  : DKind(DKind), CKind(CKind), RefExpr(RefExpr),
72  PrivateCopy(PrivateCopy), ImplicitDSALoc(ImplicitDSALoc) {}
73  };
74  using OperatorOffsetTy =
76  using DoacrossDependMapTy =
77  llvm::DenseMap<OMPDependClause *, OperatorOffsetTy>;
78 
79 private:
80  struct DSAInfo {
81  OpenMPClauseKind Attributes = OMPC_unknown;
82  /// Pointer to a reference expression and a flag which shows that the
83  /// variable is marked as lastprivate(true) or not (false).
84  llvm::PointerIntPair<const Expr *, 1, bool> RefExpr;
85  DeclRefExpr *PrivateCopy = nullptr;
86  };
87  using DeclSAMapTy = llvm::SmallDenseMap<const ValueDecl *, DSAInfo, 8>;
88  using AlignedMapTy = llvm::SmallDenseMap<const ValueDecl *, const Expr *, 8>;
89  using LCDeclInfo = std::pair<unsigned, VarDecl *>;
90  using LoopControlVariablesMapTy =
91  llvm::SmallDenseMap<const ValueDecl *, LCDeclInfo, 8>;
92  /// Struct that associates a component with the clause kind where they are
93  /// found.
94  struct MappedExprComponentTy {
97  };
98  using MappedExprComponentsTy =
99  llvm::DenseMap<const ValueDecl *, MappedExprComponentTy>;
100  using CriticalsWithHintsTy =
101  llvm::StringMap<std::pair<const OMPCriticalDirective *, llvm::APSInt>>;
102  struct ReductionData {
103  using BOKPtrType = llvm::PointerEmbeddedInt<BinaryOperatorKind, 16>;
104  SourceRange ReductionRange;
105  llvm::PointerUnion<const Expr *, BOKPtrType> ReductionOp;
106  ReductionData() = default;
107  void set(BinaryOperatorKind BO, SourceRange RR) {
108  ReductionRange = RR;
109  ReductionOp = BO;
110  }
111  void set(const Expr *RefExpr, SourceRange RR) {
112  ReductionRange = RR;
113  ReductionOp = RefExpr;
114  }
115  };
116  using DeclReductionMapTy =
117  llvm::SmallDenseMap<const ValueDecl *, ReductionData, 4>;
118 
119  struct SharingMapTy {
120  DeclSAMapTy SharingMap;
121  DeclReductionMapTy ReductionMap;
122  AlignedMapTy AlignedMap;
123  MappedExprComponentsTy MappedExprComponents;
124  LoopControlVariablesMapTy LCVMap;
125  DefaultDataSharingAttributes DefaultAttr = DSA_unspecified;
126  SourceLocation DefaultAttrLoc;
127  DefaultMapAttributes DefaultMapAttr = DMA_unspecified;
128  SourceLocation DefaultMapAttrLoc;
130  DeclarationNameInfo DirectiveName;
131  Scope *CurScope = nullptr;
132  SourceLocation ConstructLoc;
133  /// Set of 'depend' clauses with 'sink|source' dependence kind. Required to
134  /// get the data (loop counters etc.) about enclosing loop-based construct.
135  /// This data is required during codegen.
136  DoacrossDependMapTy DoacrossDepends;
137  /// First argument (Expr *) contains optional argument of the
138  /// 'ordered' clause, the second one is true if the regions has 'ordered'
139  /// clause, false otherwise.
141  unsigned AssociatedLoops = 1;
142  bool HasMutipleLoops = false;
143  const Decl *PossiblyLoopCounter = nullptr;
144  bool NowaitRegion = false;
145  bool CancelRegion = false;
146  bool LoopStart = false;
147  bool BodyComplete = false;
148  SourceLocation InnerTeamsRegionLoc;
149  /// Reference to the taskgroup task_reduction reference expression.
150  Expr *TaskgroupReductionRef = nullptr;
151  llvm::DenseSet<QualType> MappedClassesQualTypes;
152  /// List of globals marked as declare target link in this target region
153  /// (isOpenMPTargetExecutionDirective(Directive) == true).
154  llvm::SmallVector<DeclRefExpr *, 4> DeclareTargetLinkVarDecls;
155  SharingMapTy(OpenMPDirectiveKind DKind, DeclarationNameInfo Name,
156  Scope *CurScope, SourceLocation Loc)
157  : Directive(DKind), DirectiveName(Name), CurScope(CurScope),
158  ConstructLoc(Loc) {}
159  SharingMapTy() = default;
160  };
161 
162  using StackTy = SmallVector<SharingMapTy, 4>;
163 
164  /// Stack of used declaration and their data-sharing attributes.
165  DeclSAMapTy Threadprivates;
166  const FunctionScopeInfo *CurrentNonCapturingFunctionScope = nullptr;
168  /// true, if check for DSA must be from parent directive, false, if
169  /// from current directive.
170  OpenMPClauseKind ClauseKindMode = OMPC_unknown;
171  Sema &SemaRef;
172  bool ForceCapturing = false;
173  /// true if all the variables in the target executable directives must be
174  /// captured by reference.
175  bool ForceCaptureByReferenceInTargetExecutable = false;
176  CriticalsWithHintsTy Criticals;
177  unsigned IgnoredStackElements = 0;
178 
179  /// Iterators over the stack iterate in order from innermost to outermost
180  /// directive.
181  using const_iterator = StackTy::const_reverse_iterator;
182  const_iterator begin() const {
183  return Stack.empty() ? const_iterator()
184  : Stack.back().first.rbegin() + IgnoredStackElements;
185  }
186  const_iterator end() const {
187  return Stack.empty() ? const_iterator() : Stack.back().first.rend();
188  }
189  using iterator = StackTy::reverse_iterator;
190  iterator begin() {
191  return Stack.empty() ? iterator()
192  : Stack.back().first.rbegin() + IgnoredStackElements;
193  }
194  iterator end() {
195  return Stack.empty() ? iterator() : Stack.back().first.rend();
196  }
197 
198  // Convenience operations to get at the elements of the stack.
199 
200  bool isStackEmpty() const {
201  return Stack.empty() ||
202  Stack.back().second != CurrentNonCapturingFunctionScope ||
203  Stack.back().first.size() <= IgnoredStackElements;
204  }
205  size_t getStackSize() const {
206  return isStackEmpty() ? 0
207  : Stack.back().first.size() - IgnoredStackElements;
208  }
209 
210  SharingMapTy *getTopOfStackOrNull() {
211  size_t Size = getStackSize();
212  if (Size == 0)
213  return nullptr;
214  return &Stack.back().first[Size - 1];
215  }
216  const SharingMapTy *getTopOfStackOrNull() const {
217  return const_cast<DSAStackTy&>(*this).getTopOfStackOrNull();
218  }
219  SharingMapTy &getTopOfStack() {
220  assert(!isStackEmpty() && "no current directive");
221  return *getTopOfStackOrNull();
222  }
223  const SharingMapTy &getTopOfStack() const {
224  return const_cast<DSAStackTy&>(*this).getTopOfStack();
225  }
226 
227  SharingMapTy *getSecondOnStackOrNull() {
228  size_t Size = getStackSize();
229  if (Size <= 1)
230  return nullptr;
231  return &Stack.back().first[Size - 2];
232  }
233  const SharingMapTy *getSecondOnStackOrNull() const {
234  return const_cast<DSAStackTy&>(*this).getSecondOnStackOrNull();
235  }
236 
237  /// Get the stack element at a certain level (previously returned by
238  /// \c getNestingLevel).
239  ///
240  /// Note that nesting levels count from outermost to innermost, and this is
241  /// the reverse of our iteration order where new inner levels are pushed at
242  /// the front of the stack.
243  SharingMapTy &getStackElemAtLevel(unsigned Level) {
244  assert(Level < getStackSize() && "no such stack element");
245  return Stack.back().first[Level];
246  }
247  const SharingMapTy &getStackElemAtLevel(unsigned Level) const {
248  return const_cast<DSAStackTy&>(*this).getStackElemAtLevel(Level);
249  }
250 
251  DSAVarData getDSA(const_iterator &Iter, ValueDecl *D) const;
252 
253  /// Checks if the variable is a local for OpenMP region.
254  bool isOpenMPLocal(VarDecl *D, const_iterator Iter) const;
255 
256  /// Vector of previously declared requires directives
258  /// omp_allocator_handle_t type.
259  QualType OMPAllocatorHandleT;
260  /// Expression for the predefined allocators.
261  Expr *OMPPredefinedAllocators[OMPAllocateDeclAttr::OMPUserDefinedMemAlloc] = {
262  nullptr};
263  /// Vector of previously encountered target directives
264  SmallVector<SourceLocation, 2> TargetLocations;
265 
266 public:
267  explicit DSAStackTy(Sema &S) : SemaRef(S) {}
268 
269  /// Sets omp_allocator_handle_t type.
270  void setOMPAllocatorHandleT(QualType Ty) { OMPAllocatorHandleT = Ty; }
271  /// Gets omp_allocator_handle_t type.
272  QualType getOMPAllocatorHandleT() const { return OMPAllocatorHandleT; }
273  /// Sets the given default allocator.
274  void setAllocator(OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind,
275  Expr *Allocator) {
276  OMPPredefinedAllocators[AllocatorKind] = Allocator;
277  }
278  /// Returns the specified default allocator.
279  Expr *getAllocator(OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind) const {
280  return OMPPredefinedAllocators[AllocatorKind];
281  }
282 
283  bool isClauseParsingMode() const { return ClauseKindMode != OMPC_unknown; }
284  OpenMPClauseKind getClauseParsingMode() const {
285  assert(isClauseParsingMode() && "Must be in clause parsing mode.");
286  return ClauseKindMode;
287  }
288  void setClauseParsingMode(OpenMPClauseKind K) { ClauseKindMode = K; }
289 
290  bool isBodyComplete() const {
291  const SharingMapTy *Top = getTopOfStackOrNull();
292  return Top && Top->BodyComplete;
293  }
294  void setBodyComplete() {
295  getTopOfStack().BodyComplete = true;
296  }
297 
298  bool isForceVarCapturing() const { return ForceCapturing; }
299  void setForceVarCapturing(bool V) { ForceCapturing = V; }
300 
301  void setForceCaptureByReferenceInTargetExecutable(bool V) {
302  ForceCaptureByReferenceInTargetExecutable = V;
303  }
304  bool isForceCaptureByReferenceInTargetExecutable() const {
305  return ForceCaptureByReferenceInTargetExecutable;
306  }
307 
308  void push(OpenMPDirectiveKind DKind, const DeclarationNameInfo &DirName,
309  Scope *CurScope, SourceLocation Loc) {
310  assert(!IgnoredStackElements &&
311  "cannot change stack while ignoring elements");
312  if (Stack.empty() ||
313  Stack.back().second != CurrentNonCapturingFunctionScope)
314  Stack.emplace_back(StackTy(), CurrentNonCapturingFunctionScope);
315  Stack.back().first.emplace_back(DKind, DirName, CurScope, Loc);
316  Stack.back().first.back().DefaultAttrLoc = Loc;
317  }
318 
319  void pop() {
320  assert(!IgnoredStackElements &&
321  "cannot change stack while ignoring elements");
322  assert(!Stack.back().first.empty() &&
323  "Data-sharing attributes stack is empty!");
324  Stack.back().first.pop_back();
325  }
326 
327  /// RAII object to temporarily leave the scope of a directive when we want to
328  /// logically operate in its parent.
329  class ParentDirectiveScope {
330  DSAStackTy &Self;
331  bool Active;
332  public:
333  ParentDirectiveScope(DSAStackTy &Self, bool Activate)
334  : Self(Self), Active(false) {
335  if (Activate)
336  enable();
337  }
338  ~ParentDirectiveScope() { disable(); }
339  void disable() {
340  if (Active) {
341  --Self.IgnoredStackElements;
342  Active = false;
343  }
344  }
345  void enable() {
346  if (!Active) {
347  ++Self.IgnoredStackElements;
348  Active = true;
349  }
350  }
351  };
352 
353  /// Marks that we're started loop parsing.
354  void loopInit() {
355  assert(isOpenMPLoopDirective(getCurrentDirective()) &&
356  "Expected loop-based directive.");
357  getTopOfStack().LoopStart = true;
358  }
359  /// Start capturing of the variables in the loop context.
360  void loopStart() {
361  assert(isOpenMPLoopDirective(getCurrentDirective()) &&
362  "Expected loop-based directive.");
363  getTopOfStack().LoopStart = false;
364  }
365  /// true, if variables are captured, false otherwise.
366  bool isLoopStarted() const {
367  assert(isOpenMPLoopDirective(getCurrentDirective()) &&
368  "Expected loop-based directive.");
369  return !getTopOfStack().LoopStart;
370  }
371  /// Marks (or clears) declaration as possibly loop counter.
372  void resetPossibleLoopCounter(const Decl *D = nullptr) {
373  getTopOfStack().PossiblyLoopCounter =
374  D ? D->getCanonicalDecl() : D;
375  }
376  /// Gets the possible loop counter decl.
377  const Decl *getPossiblyLoopCunter() const {
378  return getTopOfStack().PossiblyLoopCounter;
379  }
380  /// Start new OpenMP region stack in new non-capturing function.
381  void pushFunction() {
382  assert(!IgnoredStackElements &&
383  "cannot change stack while ignoring elements");
384  const FunctionScopeInfo *CurFnScope = SemaRef.getCurFunction();
385  assert(!isa<CapturingScopeInfo>(CurFnScope));
386  CurrentNonCapturingFunctionScope = CurFnScope;
387  }
388  /// Pop region stack for non-capturing function.
389  void popFunction(const FunctionScopeInfo *OldFSI) {
390  assert(!IgnoredStackElements &&
391  "cannot change stack while ignoring elements");
392  if (!Stack.empty() && Stack.back().second == OldFSI) {
393  assert(Stack.back().first.empty());
394  Stack.pop_back();
395  }
396  CurrentNonCapturingFunctionScope = nullptr;
397  for (const FunctionScopeInfo *FSI : llvm::reverse(SemaRef.FunctionScopes)) {
398  if (!isa<CapturingScopeInfo>(FSI)) {
399  CurrentNonCapturingFunctionScope = FSI;
400  break;
401  }
402  }
403  }
404 
405  void addCriticalWithHint(const OMPCriticalDirective *D, llvm::APSInt Hint) {
406  Criticals.try_emplace(D->getDirectiveName().getAsString(), D, Hint);
407  }
408  const std::pair<const OMPCriticalDirective *, llvm::APSInt>
409  getCriticalWithHint(const DeclarationNameInfo &Name) const {
410  auto I = Criticals.find(Name.getAsString());
411  if (I != Criticals.end())
412  return I->second;
413  return std::make_pair(nullptr, llvm::APSInt());
414  }
415  /// If 'aligned' declaration for given variable \a D was not seen yet,
416  /// add it and return NULL; otherwise return previous occurrence's expression
417  /// for diagnostics.
418  const Expr *addUniqueAligned(const ValueDecl *D, const Expr *NewDE);
419 
420  /// Register specified variable as loop control variable.
421  void addLoopControlVariable(const ValueDecl *D, VarDecl *Capture);
422  /// Check if the specified variable is a loop control variable for
423  /// current region.
424  /// \return The index of the loop control variable in the list of associated
425  /// for-loops (from outer to inner).
426  const LCDeclInfo isLoopControlVariable(const ValueDecl *D) const;
427  /// Check if the specified variable is a loop control variable for
428  /// parent region.
429  /// \return The index of the loop control variable in the list of associated
430  /// for-loops (from outer to inner).
431  const LCDeclInfo isParentLoopControlVariable(const ValueDecl *D) const;
432  /// Get the loop control variable for the I-th loop (or nullptr) in
433  /// parent directive.
434  const ValueDecl *getParentLoopControlVariable(unsigned I) const;
435 
436  /// Adds explicit data sharing attribute to the specified declaration.
437  void addDSA(const ValueDecl *D, const Expr *E, OpenMPClauseKind A,
438  DeclRefExpr *PrivateCopy = nullptr);
439 
440  /// Adds additional information for the reduction items with the reduction id
441  /// represented as an operator.
442  void addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
443  BinaryOperatorKind BOK);
444  /// Adds additional information for the reduction items with the reduction id
445  /// represented as reduction identifier.
446  void addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
447  const Expr *ReductionRef);
448  /// Returns the location and reduction operation from the innermost parent
449  /// region for the given \p D.
450  const DSAVarData
451  getTopMostTaskgroupReductionData(const ValueDecl *D, SourceRange &SR,
452  BinaryOperatorKind &BOK,
453  Expr *&TaskgroupDescriptor) const;
454  /// Returns the location and reduction operation from the innermost parent
455  /// region for the given \p D.
456  const DSAVarData
457  getTopMostTaskgroupReductionData(const ValueDecl *D, SourceRange &SR,
458  const Expr *&ReductionRef,
459  Expr *&TaskgroupDescriptor) const;
460  /// Return reduction reference expression for the current taskgroup.
461  Expr *getTaskgroupReductionRef() const {
462  assert(getTopOfStack().Directive == OMPD_taskgroup &&
463  "taskgroup reference expression requested for non taskgroup "
464  "directive.");
465  return getTopOfStack().TaskgroupReductionRef;
466  }
467  /// Checks if the given \p VD declaration is actually a taskgroup reduction
468  /// descriptor variable at the \p Level of OpenMP regions.
469  bool isTaskgroupReductionRef(const ValueDecl *VD, unsigned Level) const {
470  return getStackElemAtLevel(Level).TaskgroupReductionRef &&
471  cast<DeclRefExpr>(getStackElemAtLevel(Level).TaskgroupReductionRef)
472  ->getDecl() == VD;
473  }
474 
475  /// Returns data sharing attributes from top of the stack for the
476  /// specified declaration.
477  const DSAVarData getTopDSA(ValueDecl *D, bool FromParent);
478  /// Returns data-sharing attributes for the specified declaration.
479  const DSAVarData getImplicitDSA(ValueDecl *D, bool FromParent) const;
480  /// Checks if the specified variables has data-sharing attributes which
481  /// match specified \a CPred predicate in any directive which matches \a DPred
482  /// predicate.
483  const DSAVarData
484  hasDSA(ValueDecl *D, const llvm::function_ref<bool(OpenMPClauseKind)> CPred,
485  const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
486  bool FromParent) const;
487  /// Checks if the specified variables has data-sharing attributes which
488  /// match specified \a CPred predicate in any innermost directive which
489  /// matches \a DPred predicate.
490  const DSAVarData
491  hasInnermostDSA(ValueDecl *D,
492  const llvm::function_ref<bool(OpenMPClauseKind)> CPred,
493  const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
494  bool FromParent) const;
495  /// Checks if the specified variables has explicit data-sharing
496  /// attributes which match specified \a CPred predicate at the specified
497  /// OpenMP region.
498  bool hasExplicitDSA(const ValueDecl *D,
499  const llvm::function_ref<bool(OpenMPClauseKind)> CPred,
500  unsigned Level, bool NotLastprivate = false) const;
501 
502  /// Returns true if the directive at level \Level matches in the
503  /// specified \a DPred predicate.
504  bool hasExplicitDirective(
505  const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
506  unsigned Level) const;
507 
508  /// Finds a directive which matches specified \a DPred predicate.
509  bool hasDirective(
510  const llvm::function_ref<bool(
512  DPred,
513  bool FromParent) const;
514 
515  /// Returns currently analyzed directive.
516  OpenMPDirectiveKind getCurrentDirective() const {
517  const SharingMapTy *Top = getTopOfStackOrNull();
518  return Top ? Top->Directive : OMPD_unknown;
519  }
520  /// Returns directive kind at specified level.
521  OpenMPDirectiveKind getDirective(unsigned Level) const {
522  assert(!isStackEmpty() && "No directive at specified level.");
523  return getStackElemAtLevel(Level).Directive;
524  }
525  /// Returns the capture region at the specified level.
526  OpenMPDirectiveKind getCaptureRegion(unsigned Level,
527  unsigned OpenMPCaptureLevel) const {
529  getOpenMPCaptureRegions(CaptureRegions, getDirective(Level));
530  return CaptureRegions[OpenMPCaptureLevel];
531  }
532  /// Returns parent directive.
533  OpenMPDirectiveKind getParentDirective() const {
534  const SharingMapTy *Parent = getSecondOnStackOrNull();
535  return Parent ? Parent->Directive : OMPD_unknown;
536  }
537 
538  /// Add requires decl to internal vector
539  void addRequiresDecl(OMPRequiresDecl *RD) {
540  RequiresDecls.push_back(RD);
541  }
542 
543  /// Checks if the defined 'requires' directive has specified type of clause.
544  template <typename ClauseType>
545  bool hasRequiresDeclWithClause() {
546  return llvm::any_of(RequiresDecls, [](const OMPRequiresDecl *D) {
547  return llvm::any_of(D->clauselists(), [](const OMPClause *C) {
548  return isa<ClauseType>(C);
549  });
550  });
551  }
552 
553  /// Checks for a duplicate clause amongst previously declared requires
554  /// directives
555  bool hasDuplicateRequiresClause(ArrayRef<OMPClause *> ClauseList) const {
556  bool IsDuplicate = false;
557  for (OMPClause *CNew : ClauseList) {
558  for (const OMPRequiresDecl *D : RequiresDecls) {
559  for (const OMPClause *CPrev : D->clauselists()) {
560  if (CNew->getClauseKind() == CPrev->getClauseKind()) {
561  SemaRef.Diag(CNew->getBeginLoc(),
562  diag::err_omp_requires_clause_redeclaration)
563  << getOpenMPClauseName(CNew->getClauseKind());
564  SemaRef.Diag(CPrev->getBeginLoc(),
565  diag::note_omp_requires_previous_clause)
566  << getOpenMPClauseName(CPrev->getClauseKind());
567  IsDuplicate = true;
568  }
569  }
570  }
571  }
572  return IsDuplicate;
573  }
574 
575  /// Add location of previously encountered target to internal vector
576  void addTargetDirLocation(SourceLocation LocStart) {
577  TargetLocations.push_back(LocStart);
578  }
579 
580  // Return previously encountered target region locations.
581  ArrayRef<SourceLocation> getEncounteredTargetLocs() const {
582  return TargetLocations;
583  }
584 
585  /// Set default data sharing attribute to none.
586  void setDefaultDSANone(SourceLocation Loc) {
587  getTopOfStack().DefaultAttr = DSA_none;
588  getTopOfStack().DefaultAttrLoc = Loc;
589  }
590  /// Set default data sharing attribute to shared.
591  void setDefaultDSAShared(SourceLocation Loc) {
592  getTopOfStack().DefaultAttr = DSA_shared;
593  getTopOfStack().DefaultAttrLoc = Loc;
594  }
595  /// Set default data mapping attribute to 'tofrom:scalar'.
596  void setDefaultDMAToFromScalar(SourceLocation Loc) {
597  getTopOfStack().DefaultMapAttr = DMA_tofrom_scalar;
598  getTopOfStack().DefaultMapAttrLoc = Loc;
599  }
600 
601  DefaultDataSharingAttributes getDefaultDSA() const {
602  return isStackEmpty() ? DSA_unspecified
603  : getTopOfStack().DefaultAttr;
604  }
605  SourceLocation getDefaultDSALocation() const {
606  return isStackEmpty() ? SourceLocation()
607  : getTopOfStack().DefaultAttrLoc;
608  }
609  DefaultMapAttributes getDefaultDMA() const {
610  return isStackEmpty() ? DMA_unspecified
611  : getTopOfStack().DefaultMapAttr;
612  }
613  DefaultMapAttributes getDefaultDMAAtLevel(unsigned Level) const {
614  return getStackElemAtLevel(Level).DefaultMapAttr;
615  }
616  SourceLocation getDefaultDMALocation() const {
617  return isStackEmpty() ? SourceLocation()
618  : getTopOfStack().DefaultMapAttrLoc;
619  }
620 
621  /// Checks if the specified variable is a threadprivate.
622  bool isThreadPrivate(VarDecl *D) {
623  const DSAVarData DVar = getTopDSA(D, false);
624  return isOpenMPThreadPrivate(DVar.CKind);
625  }
626 
627  /// Marks current region as ordered (it has an 'ordered' clause).
628  void setOrderedRegion(bool IsOrdered, const Expr *Param,
629  OMPOrderedClause *Clause) {
630  if (IsOrdered)
631  getTopOfStack().OrderedRegion.emplace(Param, Clause);
632  else
633  getTopOfStack().OrderedRegion.reset();
634  }
635  /// Returns true, if region is ordered (has associated 'ordered' clause),
636  /// false - otherwise.
637  bool isOrderedRegion() const {
638  if (const SharingMapTy *Top = getTopOfStackOrNull())
639  return Top->OrderedRegion.hasValue();
640  return false;
641  }
642  /// Returns optional parameter for the ordered region.
643  std::pair<const Expr *, OMPOrderedClause *> getOrderedRegionParam() const {
644  if (const SharingMapTy *Top = getTopOfStackOrNull())
645  if (Top->OrderedRegion.hasValue())
646  return Top->OrderedRegion.getValue();
647  return std::make_pair(nullptr, nullptr);
648  }
649  /// Returns true, if parent region is ordered (has associated
650  /// 'ordered' clause), false - otherwise.
651  bool isParentOrderedRegion() const {
652  if (const SharingMapTy *Parent = getSecondOnStackOrNull())
653  return Parent->OrderedRegion.hasValue();
654  return false;
655  }
656  /// Returns optional parameter for the ordered region.
657  std::pair<const Expr *, OMPOrderedClause *>
658  getParentOrderedRegionParam() const {
659  if (const SharingMapTy *Parent = getSecondOnStackOrNull())
660  if (Parent->OrderedRegion.hasValue())
661  return Parent->OrderedRegion.getValue();
662  return std::make_pair(nullptr, nullptr);
663  }
664  /// Marks current region as nowait (it has a 'nowait' clause).
665  void setNowaitRegion(bool IsNowait = true) {
666  getTopOfStack().NowaitRegion = IsNowait;
667  }
668  /// Returns true, if parent region is nowait (has associated
669  /// 'nowait' clause), false - otherwise.
670  bool isParentNowaitRegion() const {
671  if (const SharingMapTy *Parent = getSecondOnStackOrNull())
672  return Parent->NowaitRegion;
673  return false;
674  }
675  /// Marks parent region as cancel region.
676  void setParentCancelRegion(bool Cancel = true) {
677  if (SharingMapTy *Parent = getSecondOnStackOrNull())
678  Parent->CancelRegion |= Cancel;
679  }
680  /// Return true if current region has inner cancel construct.
681  bool isCancelRegion() const {
682  const SharingMapTy *Top = getTopOfStackOrNull();
683  return Top ? Top->CancelRegion : false;
684  }
685 
686  /// Set collapse value for the region.
687  void setAssociatedLoops(unsigned Val) {
688  getTopOfStack().AssociatedLoops = Val;
689  if (Val > 1)
690  getTopOfStack().HasMutipleLoops = true;
691  }
692  /// Return collapse value for region.
693  unsigned getAssociatedLoops() const {
694  const SharingMapTy *Top = getTopOfStackOrNull();
695  return Top ? Top->AssociatedLoops : 0;
696  }
697  /// Returns true if the construct is associated with multiple loops.
698  bool hasMutipleLoops() const {
699  const SharingMapTy *Top = getTopOfStackOrNull();
700  return Top ? Top->HasMutipleLoops : false;
701  }
702 
703  /// Marks current target region as one with closely nested teams
704  /// region.
705  void setParentTeamsRegionLoc(SourceLocation TeamsRegionLoc) {
706  if (SharingMapTy *Parent = getSecondOnStackOrNull())
707  Parent->InnerTeamsRegionLoc = TeamsRegionLoc;
708  }
709  /// Returns true, if current region has closely nested teams region.
710  bool hasInnerTeamsRegion() const {
711  return getInnerTeamsRegionLoc().isValid();
712  }
713  /// Returns location of the nested teams region (if any).
714  SourceLocation getInnerTeamsRegionLoc() const {
715  const SharingMapTy *Top = getTopOfStackOrNull();
716  return Top ? Top->InnerTeamsRegionLoc : SourceLocation();
717  }
718 
719  Scope *getCurScope() const {
720  const SharingMapTy *Top = getTopOfStackOrNull();
721  return Top ? Top->CurScope : nullptr;
722  }
723  SourceLocation getConstructLoc() const {
724  const SharingMapTy *Top = getTopOfStackOrNull();
725  return Top ? Top->ConstructLoc : SourceLocation();
726  }
727 
728  /// Do the check specified in \a Check to all component lists and return true
729  /// if any issue is found.
730  bool checkMappableExprComponentListsForDecl(
731  const ValueDecl *VD, bool CurrentRegionOnly,
732  const llvm::function_ref<
735  Check) const {
736  if (isStackEmpty())
737  return false;
738  auto SI = begin();
739  auto SE = end();
740 
741  if (SI == SE)
742  return false;
743 
744  if (CurrentRegionOnly)
745  SE = std::next(SI);
746  else
747  std::advance(SI, 1);
748 
749  for (; SI != SE; ++SI) {
750  auto MI = SI->MappedExprComponents.find(VD);
751  if (MI != SI->MappedExprComponents.end())
753  MI->second.Components)
754  if (Check(L, MI->second.Kind))
755  return true;
756  }
757  return false;
758  }
759 
760  /// Do the check specified in \a Check to all component lists at a given level
761  /// and return true if any issue is found.
762  bool checkMappableExprComponentListsForDeclAtLevel(
763  const ValueDecl *VD, unsigned Level,
764  const llvm::function_ref<
767  Check) const {
768  if (getStackSize() <= Level)
769  return false;
770 
771  const SharingMapTy &StackElem = getStackElemAtLevel(Level);
772  auto MI = StackElem.MappedExprComponents.find(VD);
773  if (MI != StackElem.MappedExprComponents.end())
775  MI->second.Components)
776  if (Check(L, MI->second.Kind))
777  return true;
778  return false;
779  }
780 
781  /// Create a new mappable expression component list associated with a given
782  /// declaration and initialize it with the provided list of components.
783  void addMappableExpressionComponents(
784  const ValueDecl *VD,
786  OpenMPClauseKind WhereFoundClauseKind) {
787  MappedExprComponentTy &MEC = getTopOfStack().MappedExprComponents[VD];
788  // Create new entry and append the new components there.
789  MEC.Components.resize(MEC.Components.size() + 1);
790  MEC.Components.back().append(Components.begin(), Components.end());
791  MEC.Kind = WhereFoundClauseKind;
792  }
793 
794  unsigned getNestingLevel() const {
795  assert(!isStackEmpty());
796  return getStackSize() - 1;
797  }
798  void addDoacrossDependClause(OMPDependClause *C,
799  const OperatorOffsetTy &OpsOffs) {
800  SharingMapTy *Parent = getSecondOnStackOrNull();
801  assert(Parent && isOpenMPWorksharingDirective(Parent->Directive));
802  Parent->DoacrossDepends.try_emplace(C, OpsOffs);
803  }
804  llvm::iterator_range<DoacrossDependMapTy::const_iterator>
805  getDoacrossDependClauses() const {
806  const SharingMapTy &StackElem = getTopOfStack();
807  if (isOpenMPWorksharingDirective(StackElem.Directive)) {
808  const DoacrossDependMapTy &Ref = StackElem.DoacrossDepends;
809  return llvm::make_range(Ref.begin(), Ref.end());
810  }
811  return llvm::make_range(StackElem.DoacrossDepends.end(),
812  StackElem.DoacrossDepends.end());
813  }
814 
815  // Store types of classes which have been explicitly mapped
816  void addMappedClassesQualTypes(QualType QT) {
817  SharingMapTy &StackElem = getTopOfStack();
818  StackElem.MappedClassesQualTypes.insert(QT);
819  }
820 
821  // Return set of mapped classes types
822  bool isClassPreviouslyMapped(QualType QT) const {
823  const SharingMapTy &StackElem = getTopOfStack();
824  return StackElem.MappedClassesQualTypes.count(QT) != 0;
825  }
826 
827  /// Adds global declare target to the parent target region.
828  void addToParentTargetRegionLinkGlobals(DeclRefExpr *E) {
829  assert(*OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(
830  E->getDecl()) == OMPDeclareTargetDeclAttr::MT_Link &&
831  "Expected declare target link global.");
832  for (auto &Elem : *this) {
833  if (isOpenMPTargetExecutionDirective(Elem.Directive)) {
834  Elem.DeclareTargetLinkVarDecls.push_back(E);
835  return;
836  }
837  }
838  }
839 
840  /// Returns the list of globals with declare target link if current directive
841  /// is target.
842  ArrayRef<DeclRefExpr *> getLinkGlobals() const {
843  assert(isOpenMPTargetExecutionDirective(getCurrentDirective()) &&
844  "Expected target executable directive.");
845  return getTopOfStack().DeclareTargetLinkVarDecls;
846  }
847 };
848 
849 bool isImplicitTaskingRegion(OpenMPDirectiveKind DKind) {
850  return isOpenMPParallelDirective(DKind) || isOpenMPTeamsDirective(DKind);
851 }
852 
853 bool isImplicitOrExplicitTaskingRegion(OpenMPDirectiveKind DKind) {
854  return isImplicitTaskingRegion(DKind) || isOpenMPTaskingDirective(DKind) ||
855  DKind == OMPD_unknown;
856 }
857 
858 } // namespace
859 
860 static const Expr *getExprAsWritten(const Expr *E) {
861  if (const auto *FE = dyn_cast<FullExpr>(E))
862  E = FE->getSubExpr();
863 
864  if (const auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E))
865  E = MTE->GetTemporaryExpr();
866 
867  while (const auto *Binder = dyn_cast<CXXBindTemporaryExpr>(E))
868  E = Binder->getSubExpr();
869 
870  if (const auto *ICE = dyn_cast<ImplicitCastExpr>(E))
871  E = ICE->getSubExprAsWritten();
872  return E->IgnoreParens();
873 }
874 
876  return const_cast<Expr *>(getExprAsWritten(const_cast<const Expr *>(E)));
877 }
878 
879 static const ValueDecl *getCanonicalDecl(const ValueDecl *D) {
880  if (const auto *CED = dyn_cast<OMPCapturedExprDecl>(D))
881  if (const auto *ME = dyn_cast<MemberExpr>(getExprAsWritten(CED->getInit())))
882  D = ME->getMemberDecl();
883  const auto *VD = dyn_cast<VarDecl>(D);
884  const auto *FD = dyn_cast<FieldDecl>(D);
885  if (VD != nullptr) {
886  VD = VD->getCanonicalDecl();
887  D = VD;
888  } else {
889  assert(FD);
890  FD = FD->getCanonicalDecl();
891  D = FD;
892  }
893  return D;
894 }
895 
897  return const_cast<ValueDecl *>(
898  getCanonicalDecl(const_cast<const ValueDecl *>(D)));
899 }
900 
901 DSAStackTy::DSAVarData DSAStackTy::getDSA(const_iterator &Iter,
902  ValueDecl *D) const {
903  D = getCanonicalDecl(D);
904  auto *VD = dyn_cast<VarDecl>(D);
905  const auto *FD = dyn_cast<FieldDecl>(D);
906  DSAVarData DVar;
907  if (Iter == end()) {
908  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
909  // in a region but not in construct]
910  // File-scope or namespace-scope variables referenced in called routines
911  // in the region are shared unless they appear in a threadprivate
912  // directive.
913  if (VD && !VD->isFunctionOrMethodVarDecl() && !isa<ParmVarDecl>(VD))
914  DVar.CKind = OMPC_shared;
915 
916  // OpenMP [2.9.1.2, Data-sharing Attribute Rules for Variables Referenced
917  // in a region but not in construct]
918  // Variables with static storage duration that are declared in called
919  // routines in the region are shared.
920  if (VD && VD->hasGlobalStorage())
921  DVar.CKind = OMPC_shared;
922 
923  // Non-static data members are shared by default.
924  if (FD)
925  DVar.CKind = OMPC_shared;
926 
927  return DVar;
928  }
929 
930  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
931  // in a Construct, C/C++, predetermined, p.1]
932  // Variables with automatic storage duration that are declared in a scope
933  // inside the construct are private.
934  if (VD && isOpenMPLocal(VD, Iter) && VD->isLocalVarDecl() &&
935  (VD->getStorageClass() == SC_Auto || VD->getStorageClass() == SC_None)) {
936  DVar.CKind = OMPC_private;
937  return DVar;
938  }
939 
940  DVar.DKind = Iter->Directive;
941  // Explicitly specified attributes and local variables with predetermined
942  // attributes.
943  if (Iter->SharingMap.count(D)) {
944  const DSAInfo &Data = Iter->SharingMap.lookup(D);
945  DVar.RefExpr = Data.RefExpr.getPointer();
946  DVar.PrivateCopy = Data.PrivateCopy;
947  DVar.CKind = Data.Attributes;
948  DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
949  return DVar;
950  }
951 
952  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
953  // in a Construct, C/C++, implicitly determined, p.1]
954  // In a parallel or task construct, the data-sharing attributes of these
955  // variables are determined by the default clause, if present.
956  switch (Iter->DefaultAttr) {
957  case DSA_shared:
958  DVar.CKind = OMPC_shared;
959  DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
960  return DVar;
961  case DSA_none:
962  return DVar;
963  case DSA_unspecified:
964  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
965  // in a Construct, implicitly determined, p.2]
966  // In a parallel construct, if no default clause is present, these
967  // variables are shared.
968  DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
969  if ((isOpenMPParallelDirective(DVar.DKind) &&
970  !isOpenMPTaskLoopDirective(DVar.DKind)) ||
971  isOpenMPTeamsDirective(DVar.DKind)) {
972  DVar.CKind = OMPC_shared;
973  return DVar;
974  }
975 
976  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
977  // in a Construct, implicitly determined, p.4]
978  // In a task construct, if no default clause is present, a variable that in
979  // the enclosing context is determined to be shared by all implicit tasks
980  // bound to the current team is shared.
981  if (isOpenMPTaskingDirective(DVar.DKind)) {
982  DSAVarData DVarTemp;
983  const_iterator I = Iter, E = end();
984  do {
985  ++I;
986  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables
987  // Referenced in a Construct, implicitly determined, p.6]
988  // In a task construct, if no default clause is present, a variable
989  // whose data-sharing attribute is not determined by the rules above is
990  // firstprivate.
991  DVarTemp = getDSA(I, D);
992  if (DVarTemp.CKind != OMPC_shared) {
993  DVar.RefExpr = nullptr;
994  DVar.CKind = OMPC_firstprivate;
995  return DVar;
996  }
997  } while (I != E && !isImplicitTaskingRegion(I->Directive));
998  DVar.CKind =
999  (DVarTemp.CKind == OMPC_unknown) ? OMPC_firstprivate : OMPC_shared;
1000  return DVar;
1001  }
1002  }
1003  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1004  // in a Construct, implicitly determined, p.3]
1005  // For constructs other than task, if no default clause is present, these
1006  // variables inherit their data-sharing attributes from the enclosing
1007  // context.
1008  return getDSA(++Iter, D);
1009 }
1010 
1011 const Expr *DSAStackTy::addUniqueAligned(const ValueDecl *D,
1012  const Expr *NewDE) {
1013  assert(!isStackEmpty() && "Data sharing attributes stack is empty");
1014  D = getCanonicalDecl(D);
1015  SharingMapTy &StackElem = getTopOfStack();
1016  auto It = StackElem.AlignedMap.find(D);
1017  if (It == StackElem.AlignedMap.end()) {
1018  assert(NewDE && "Unexpected nullptr expr to be added into aligned map");
1019  StackElem.AlignedMap[D] = NewDE;
1020  return nullptr;
1021  }
1022  assert(It->second && "Unexpected nullptr expr in the aligned map");
1023  return It->second;
1024 }
1025 
1026 void DSAStackTy::addLoopControlVariable(const ValueDecl *D, VarDecl *Capture) {
1027  assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
1028  D = getCanonicalDecl(D);
1029  SharingMapTy &StackElem = getTopOfStack();
1030  StackElem.LCVMap.try_emplace(
1031  D, LCDeclInfo(StackElem.LCVMap.size() + 1, Capture));
1032 }
1033 
1034 const DSAStackTy::LCDeclInfo
1035 DSAStackTy::isLoopControlVariable(const ValueDecl *D) const {
1036  assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
1037  D = getCanonicalDecl(D);
1038  const SharingMapTy &StackElem = getTopOfStack();
1039  auto It = StackElem.LCVMap.find(D);
1040  if (It != StackElem.LCVMap.end())
1041  return It->second;
1042  return {0, nullptr};
1043 }
1044 
1045 const DSAStackTy::LCDeclInfo
1046 DSAStackTy::isParentLoopControlVariable(const ValueDecl *D) const {
1047  const SharingMapTy *Parent = getSecondOnStackOrNull();
1048  assert(Parent && "Data-sharing attributes stack is empty");
1049  D = getCanonicalDecl(D);
1050  auto It = Parent->LCVMap.find(D);
1051  if (It != Parent->LCVMap.end())
1052  return It->second;
1053  return {0, nullptr};
1054 }
1055 
1056 const ValueDecl *DSAStackTy::getParentLoopControlVariable(unsigned I) const {
1057  const SharingMapTy *Parent = getSecondOnStackOrNull();
1058  assert(Parent && "Data-sharing attributes stack is empty");
1059  if (Parent->LCVMap.size() < I)
1060  return nullptr;
1061  for (const auto &Pair : Parent->LCVMap)
1062  if (Pair.second.first == I)
1063  return Pair.first;
1064  return nullptr;
1065 }
1066 
1067 void DSAStackTy::addDSA(const ValueDecl *D, const Expr *E, OpenMPClauseKind A,
1068  DeclRefExpr *PrivateCopy) {
1069  D = getCanonicalDecl(D);
1070  if (A == OMPC_threadprivate) {
1071  DSAInfo &Data = Threadprivates[D];
1072  Data.Attributes = A;
1073  Data.RefExpr.setPointer(E);
1074  Data.PrivateCopy = nullptr;
1075  } else {
1076  DSAInfo &Data = getTopOfStack().SharingMap[D];
1077  assert(Data.Attributes == OMPC_unknown || (A == Data.Attributes) ||
1078  (A == OMPC_firstprivate && Data.Attributes == OMPC_lastprivate) ||
1079  (A == OMPC_lastprivate && Data.Attributes == OMPC_firstprivate) ||
1080  (isLoopControlVariable(D).first && A == OMPC_private));
1081  if (A == OMPC_lastprivate && Data.Attributes == OMPC_firstprivate) {
1082  Data.RefExpr.setInt(/*IntVal=*/true);
1083  return;
1084  }
1085  const bool IsLastprivate =
1086  A == OMPC_lastprivate || Data.Attributes == OMPC_lastprivate;
1087  Data.Attributes = A;
1088  Data.RefExpr.setPointerAndInt(E, IsLastprivate);
1089  Data.PrivateCopy = PrivateCopy;
1090  if (PrivateCopy) {
1091  DSAInfo &Data = getTopOfStack().SharingMap[PrivateCopy->getDecl()];
1092  Data.Attributes = A;
1093  Data.RefExpr.setPointerAndInt(PrivateCopy, IsLastprivate);
1094  Data.PrivateCopy = nullptr;
1095  }
1096  }
1097 }
1098 
1099 /// Build a variable declaration for OpenMP loop iteration variable.
1101  StringRef Name, const AttrVec *Attrs = nullptr,
1102  DeclRefExpr *OrigRef = nullptr) {
1103  DeclContext *DC = SemaRef.CurContext;
1104  IdentifierInfo *II = &SemaRef.PP.getIdentifierTable().get(Name);
1105  TypeSourceInfo *TInfo = SemaRef.Context.getTrivialTypeSourceInfo(Type, Loc);
1106  auto *Decl =
1107  VarDecl::Create(SemaRef.Context, DC, Loc, Loc, II, Type, TInfo, SC_None);
1108  if (Attrs) {
1109  for (specific_attr_iterator<AlignedAttr> I(Attrs->begin()), E(Attrs->end());
1110  I != E; ++I)
1111  Decl->addAttr(*I);
1112  }
1113  Decl->setImplicit();
1114  if (OrigRef) {
1115  Decl->addAttr(
1116  OMPReferencedVarAttr::CreateImplicit(SemaRef.Context, OrigRef));
1117  }
1118  return Decl;
1119 }
1120 
1122  SourceLocation Loc,
1123  bool RefersToCapture = false) {
1124  D->setReferenced();
1125  D->markUsed(S.Context);
1127  SourceLocation(), D, RefersToCapture, Loc, Ty,
1128  VK_LValue);
1129 }
1130 
1131 void DSAStackTy::addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
1132  BinaryOperatorKind BOK) {
1133  D = getCanonicalDecl(D);
1134  assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
1135  assert(
1136  getTopOfStack().SharingMap[D].Attributes == OMPC_reduction &&
1137  "Additional reduction info may be specified only for reduction items.");
1138  ReductionData &ReductionData = getTopOfStack().ReductionMap[D];
1139  assert(ReductionData.ReductionRange.isInvalid() &&
1140  getTopOfStack().Directive == OMPD_taskgroup &&
1141  "Additional reduction info may be specified only once for reduction "
1142  "items.");
1143  ReductionData.set(BOK, SR);
1144  Expr *&TaskgroupReductionRef =
1145  getTopOfStack().TaskgroupReductionRef;
1146  if (!TaskgroupReductionRef) {
1147  VarDecl *VD = buildVarDecl(SemaRef, SR.getBegin(),
1148  SemaRef.Context.VoidPtrTy, ".task_red.");
1149  TaskgroupReductionRef =
1150  buildDeclRefExpr(SemaRef, VD, SemaRef.Context.VoidPtrTy, SR.getBegin());
1151  }
1152 }
1153 
1154 void DSAStackTy::addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
1155  const Expr *ReductionRef) {
1156  D = getCanonicalDecl(D);
1157  assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
1158  assert(
1159  getTopOfStack().SharingMap[D].Attributes == OMPC_reduction &&
1160  "Additional reduction info may be specified only for reduction items.");
1161  ReductionData &ReductionData = getTopOfStack().ReductionMap[D];
1162  assert(ReductionData.ReductionRange.isInvalid() &&
1163  getTopOfStack().Directive == OMPD_taskgroup &&
1164  "Additional reduction info may be specified only once for reduction "
1165  "items.");
1166  ReductionData.set(ReductionRef, SR);
1167  Expr *&TaskgroupReductionRef =
1168  getTopOfStack().TaskgroupReductionRef;
1169  if (!TaskgroupReductionRef) {
1170  VarDecl *VD = buildVarDecl(SemaRef, SR.getBegin(),
1171  SemaRef.Context.VoidPtrTy, ".task_red.");
1172  TaskgroupReductionRef =
1173  buildDeclRefExpr(SemaRef, VD, SemaRef.Context.VoidPtrTy, SR.getBegin());
1174  }
1175 }
1176 
1177 const DSAStackTy::DSAVarData DSAStackTy::getTopMostTaskgroupReductionData(
1178  const ValueDecl *D, SourceRange &SR, BinaryOperatorKind &BOK,
1179  Expr *&TaskgroupDescriptor) const {
1180  D = getCanonicalDecl(D);
1181  assert(!isStackEmpty() && "Data-sharing attributes stack is empty.");
1182  for (const_iterator I = begin() + 1, E = end(); I != E; ++I) {
1183  const DSAInfo &Data = I->SharingMap.lookup(D);
1184  if (Data.Attributes != OMPC_reduction || I->Directive != OMPD_taskgroup)
1185  continue;
1186  const ReductionData &ReductionData = I->ReductionMap.lookup(D);
1187  if (!ReductionData.ReductionOp ||
1188  ReductionData.ReductionOp.is<const Expr *>())
1189  return DSAVarData();
1190  SR = ReductionData.ReductionRange;
1191  BOK = ReductionData.ReductionOp.get<ReductionData::BOKPtrType>();
1192  assert(I->TaskgroupReductionRef && "taskgroup reduction reference "
1193  "expression for the descriptor is not "
1194  "set.");
1195  TaskgroupDescriptor = I->TaskgroupReductionRef;
1196  return DSAVarData(OMPD_taskgroup, OMPC_reduction, Data.RefExpr.getPointer(),
1197  Data.PrivateCopy, I->DefaultAttrLoc);
1198  }
1199  return DSAVarData();
1200 }
1201 
1202 const DSAStackTy::DSAVarData DSAStackTy::getTopMostTaskgroupReductionData(
1203  const ValueDecl *D, SourceRange &SR, const Expr *&ReductionRef,
1204  Expr *&TaskgroupDescriptor) const {
1205  D = getCanonicalDecl(D);
1206  assert(!isStackEmpty() && "Data-sharing attributes stack is empty.");
1207  for (const_iterator I = begin() + 1, E = end(); I != E; ++I) {
1208  const DSAInfo &Data = I->SharingMap.lookup(D);
1209  if (Data.Attributes != OMPC_reduction || I->Directive != OMPD_taskgroup)
1210  continue;
1211  const ReductionData &ReductionData = I->ReductionMap.lookup(D);
1212  if (!ReductionData.ReductionOp ||
1213  !ReductionData.ReductionOp.is<const Expr *>())
1214  return DSAVarData();
1215  SR = ReductionData.ReductionRange;
1216  ReductionRef = ReductionData.ReductionOp.get<const Expr *>();
1217  assert(I->TaskgroupReductionRef && "taskgroup reduction reference "
1218  "expression for the descriptor is not "
1219  "set.");
1220  TaskgroupDescriptor = I->TaskgroupReductionRef;
1221  return DSAVarData(OMPD_taskgroup, OMPC_reduction, Data.RefExpr.getPointer(),
1222  Data.PrivateCopy, I->DefaultAttrLoc);
1223  }
1224  return DSAVarData();
1225 }
1226 
1227 bool DSAStackTy::isOpenMPLocal(VarDecl *D, const_iterator I) const {
1228  D = D->getCanonicalDecl();
1229  for (const_iterator E = end(); I != E; ++I) {
1230  if (isImplicitOrExplicitTaskingRegion(I->Directive) ||
1231  isOpenMPTargetExecutionDirective(I->Directive)) {
1232  Scope *TopScope = I->CurScope ? I->CurScope->getParent() : nullptr;
1233  Scope *CurScope = getCurScope();
1234  while (CurScope && CurScope != TopScope && !CurScope->isDeclScope(D))
1235  CurScope = CurScope->getParent();
1236  return CurScope != TopScope;
1237  }
1238  }
1239  return false;
1240 }
1241 
1242 static bool isConstNotMutableType(Sema &SemaRef, QualType Type,
1243  bool AcceptIfMutable = true,
1244  bool *IsClassType = nullptr) {
1245  ASTContext &Context = SemaRef.getASTContext();
1246  Type = Type.getNonReferenceType().getCanonicalType();
1247  bool IsConstant = Type.isConstant(Context);
1248  Type = Context.getBaseElementType(Type);
1249  const CXXRecordDecl *RD = AcceptIfMutable && SemaRef.getLangOpts().CPlusPlus
1250  ? Type->getAsCXXRecordDecl()
1251  : nullptr;
1252  if (const auto *CTSD = dyn_cast_or_null<ClassTemplateSpecializationDecl>(RD))
1253  if (const ClassTemplateDecl *CTD = CTSD->getSpecializedTemplate())
1254  RD = CTD->getTemplatedDecl();
1255  if (IsClassType)
1256  *IsClassType = RD;
1257  return IsConstant && !(SemaRef.getLangOpts().CPlusPlus && RD &&
1258  RD->hasDefinition() && RD->hasMutableFields());
1259 }
1260 
1261 static bool rejectConstNotMutableType(Sema &SemaRef, const ValueDecl *D,
1263  SourceLocation ELoc,
1264  bool AcceptIfMutable = true,
1265  bool ListItemNotVar = false) {
1266  ASTContext &Context = SemaRef.getASTContext();
1267  bool IsClassType;
1268  if (isConstNotMutableType(SemaRef, Type, AcceptIfMutable, &IsClassType)) {
1269  unsigned Diag = ListItemNotVar
1270  ? diag::err_omp_const_list_item
1271  : IsClassType ? diag::err_omp_const_not_mutable_variable
1272  : diag::err_omp_const_variable;
1273  SemaRef.Diag(ELoc, Diag) << getOpenMPClauseName(CKind);
1274  if (!ListItemNotVar && D) {
1275  const VarDecl *VD = dyn_cast<VarDecl>(D);
1276  bool IsDecl = !VD || VD->isThisDeclarationADefinition(Context) ==
1278  SemaRef.Diag(D->getLocation(),
1279  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
1280  << D;
1281  }
1282  return true;
1283  }
1284  return false;
1285 }
1286 
1287 const DSAStackTy::DSAVarData DSAStackTy::getTopDSA(ValueDecl *D,
1288  bool FromParent) {
1289  D = getCanonicalDecl(D);
1290  DSAVarData DVar;
1291 
1292  auto *VD = dyn_cast<VarDecl>(D);
1293  auto TI = Threadprivates.find(D);
1294  if (TI != Threadprivates.end()) {
1295  DVar.RefExpr = TI->getSecond().RefExpr.getPointer();
1296  DVar.CKind = OMPC_threadprivate;
1297  return DVar;
1298  }
1299  if (VD && VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
1300  DVar.RefExpr = buildDeclRefExpr(
1301  SemaRef, VD, D->getType().getNonReferenceType(),
1302  VD->getAttr<OMPThreadPrivateDeclAttr>()->getLocation());
1303  DVar.CKind = OMPC_threadprivate;
1304  addDSA(D, DVar.RefExpr, OMPC_threadprivate);
1305  return DVar;
1306  }
1307  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1308  // in a Construct, C/C++, predetermined, p.1]
1309  // Variables appearing in threadprivate directives are threadprivate.
1310  if ((VD && VD->getTLSKind() != VarDecl::TLS_None &&
1311  !(VD->hasAttr<OMPThreadPrivateDeclAttr>() &&
1312  SemaRef.getLangOpts().OpenMPUseTLS &&
1313  SemaRef.getASTContext().getTargetInfo().isTLSSupported())) ||
1314  (VD && VD->getStorageClass() == SC_Register &&
1315  VD->hasAttr<AsmLabelAttr>() && !VD->isLocalVarDecl())) {
1316  DVar.RefExpr = buildDeclRefExpr(
1317  SemaRef, VD, D->getType().getNonReferenceType(), D->getLocation());
1318  DVar.CKind = OMPC_threadprivate;
1319  addDSA(D, DVar.RefExpr, OMPC_threadprivate);
1320  return DVar;
1321  }
1322  if (SemaRef.getLangOpts().OpenMPCUDAMode && VD &&
1323  VD->isLocalVarDeclOrParm() && !isStackEmpty() &&
1324  !isLoopControlVariable(D).first) {
1325  const_iterator IterTarget =
1326  std::find_if(begin(), end(), [](const SharingMapTy &Data) {
1327  return isOpenMPTargetExecutionDirective(Data.Directive);
1328  });
1329  if (IterTarget != end()) {
1330  const_iterator ParentIterTarget = IterTarget + 1;
1331  for (const_iterator Iter = begin();
1332  Iter != ParentIterTarget; ++Iter) {
1333  if (isOpenMPLocal(VD, Iter)) {
1334  DVar.RefExpr =
1335  buildDeclRefExpr(SemaRef, VD, D->getType().getNonReferenceType(),
1336  D->getLocation());
1337  DVar.CKind = OMPC_threadprivate;
1338  return DVar;
1339  }
1340  }
1341  if (!isClauseParsingMode() || IterTarget != begin()) {
1342  auto DSAIter = IterTarget->SharingMap.find(D);
1343  if (DSAIter != IterTarget->SharingMap.end() &&
1344  isOpenMPPrivate(DSAIter->getSecond().Attributes)) {
1345  DVar.RefExpr = DSAIter->getSecond().RefExpr.getPointer();
1346  DVar.CKind = OMPC_threadprivate;
1347  return DVar;
1348  }
1349  const_iterator End = end();
1350  if (!SemaRef.isOpenMPCapturedByRef(
1351  D, std::distance(ParentIterTarget, End),
1352  /*OpenMPCaptureLevel=*/0)) {
1353  DVar.RefExpr =
1354  buildDeclRefExpr(SemaRef, VD, D->getType().getNonReferenceType(),
1355  IterTarget->ConstructLoc);
1356  DVar.CKind = OMPC_threadprivate;
1357  return DVar;
1358  }
1359  }
1360  }
1361  }
1362 
1363  if (isStackEmpty())
1364  // Not in OpenMP execution region and top scope was already checked.
1365  return DVar;
1366 
1367  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1368  // in a Construct, C/C++, predetermined, p.4]
1369  // Static data members are shared.
1370  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1371  // in a Construct, C/C++, predetermined, p.7]
1372  // Variables with static storage duration that are declared in a scope
1373  // inside the construct are shared.
1374  if (VD && VD->isStaticDataMember()) {
1375  // Check for explicitly specified attributes.
1376  const_iterator I = begin();
1377  const_iterator EndI = end();
1378  if (FromParent && I != EndI)
1379  ++I;
1380  auto It = I->SharingMap.find(D);
1381  if (It != I->SharingMap.end()) {
1382  const DSAInfo &Data = It->getSecond();
1383  DVar.RefExpr = Data.RefExpr.getPointer();
1384  DVar.PrivateCopy = Data.PrivateCopy;
1385  DVar.CKind = Data.Attributes;
1386  DVar.ImplicitDSALoc = I->DefaultAttrLoc;
1387  DVar.DKind = I->Directive;
1388  return DVar;
1389  }
1390 
1391  DVar.CKind = OMPC_shared;
1392  return DVar;
1393  }
1394 
1395  auto &&MatchesAlways = [](OpenMPDirectiveKind) { return true; };
1396  // The predetermined shared attribute for const-qualified types having no
1397  // mutable members was removed after OpenMP 3.1.
1398  if (SemaRef.LangOpts.OpenMP <= 31) {
1399  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
1400  // in a Construct, C/C++, predetermined, p.6]
1401  // Variables with const qualified type having no mutable member are
1402  // shared.
1403  if (isConstNotMutableType(SemaRef, D->getType())) {
1404  // Variables with const-qualified type having no mutable member may be
1405  // listed in a firstprivate clause, even if they are static data members.
1406  DSAVarData DVarTemp = hasInnermostDSA(
1407  D,
1408  [](OpenMPClauseKind C) {
1409  return C == OMPC_firstprivate || C == OMPC_shared;
1410  },
1411  MatchesAlways, FromParent);
1412  if (DVarTemp.CKind != OMPC_unknown && DVarTemp.RefExpr)
1413  return DVarTemp;
1414 
1415  DVar.CKind = OMPC_shared;
1416  return DVar;
1417  }
1418  }
1419 
1420  // Explicitly specified attributes and local variables with predetermined
1421  // attributes.
1422  const_iterator I = begin();
1423  const_iterator EndI = end();
1424  if (FromParent && I != EndI)
1425  ++I;
1426  auto It = I->SharingMap.find(D);
1427  if (It != I->SharingMap.end()) {
1428  const DSAInfo &Data = It->getSecond();
1429  DVar.RefExpr = Data.RefExpr.getPointer();
1430  DVar.PrivateCopy = Data.PrivateCopy;
1431  DVar.CKind = Data.Attributes;
1432  DVar.ImplicitDSALoc = I->DefaultAttrLoc;
1433  DVar.DKind = I->Directive;
1434  }
1435 
1436  return DVar;
1437 }
1438 
1439 const DSAStackTy::DSAVarData DSAStackTy::getImplicitDSA(ValueDecl *D,
1440  bool FromParent) const {
1441  if (isStackEmpty()) {
1442  const_iterator I;
1443  return getDSA(I, D);
1444  }
1445  D = getCanonicalDecl(D);
1446  const_iterator StartI = begin();
1447  const_iterator EndI = end();
1448  if (FromParent && StartI != EndI)
1449  ++StartI;
1450  return getDSA(StartI, D);
1451 }
1452 
1453 const DSAStackTy::DSAVarData
1454 DSAStackTy::hasDSA(ValueDecl *D,
1455  const llvm::function_ref<bool(OpenMPClauseKind)> CPred,
1456  const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
1457  bool FromParent) const {
1458  if (isStackEmpty())
1459  return {};
1460  D = getCanonicalDecl(D);
1461  const_iterator I = begin();
1462  const_iterator EndI = end();
1463  if (FromParent && I != EndI)
1464  ++I;
1465  for (; I != EndI; ++I) {
1466  if (!DPred(I->Directive) &&
1467  !isImplicitOrExplicitTaskingRegion(I->Directive))
1468  continue;
1469  const_iterator NewI = I;
1470  DSAVarData DVar = getDSA(NewI, D);
1471  if (I == NewI && CPred(DVar.CKind))
1472  return DVar;
1473  }
1474  return {};
1475 }
1476 
1477 const DSAStackTy::DSAVarData DSAStackTy::hasInnermostDSA(
1478  ValueDecl *D, const llvm::function_ref<bool(OpenMPClauseKind)> CPred,
1479  const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
1480  bool FromParent) const {
1481  if (isStackEmpty())
1482  return {};
1483  D = getCanonicalDecl(D);
1484  const_iterator StartI = begin();
1485  const_iterator EndI = end();
1486  if (FromParent && StartI != EndI)
1487  ++StartI;
1488  if (StartI == EndI || !DPred(StartI->Directive))
1489  return {};
1490  const_iterator NewI = StartI;
1491  DSAVarData DVar = getDSA(NewI, D);
1492  return (NewI == StartI && CPred(DVar.CKind)) ? DVar : DSAVarData();
1493 }
1494 
1495 bool DSAStackTy::hasExplicitDSA(
1496  const ValueDecl *D, const llvm::function_ref<bool(OpenMPClauseKind)> CPred,
1497  unsigned Level, bool NotLastprivate) const {
1498  if (getStackSize() <= Level)
1499  return false;
1500  D = getCanonicalDecl(D);
1501  const SharingMapTy &StackElem = getStackElemAtLevel(Level);
1502  auto I = StackElem.SharingMap.find(D);
1503  if (I != StackElem.SharingMap.end() &&
1504  I->getSecond().RefExpr.getPointer() &&
1505  CPred(I->getSecond().Attributes) &&
1506  (!NotLastprivate || !I->getSecond().RefExpr.getInt()))
1507  return true;
1508  // Check predetermined rules for the loop control variables.
1509  auto LI = StackElem.LCVMap.find(D);
1510  if (LI != StackElem.LCVMap.end())
1511  return CPred(OMPC_private);
1512  return false;
1513 }
1514 
1515 bool DSAStackTy::hasExplicitDirective(
1516  const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
1517  unsigned Level) const {
1518  if (getStackSize() <= Level)
1519  return false;
1520  const SharingMapTy &StackElem = getStackElemAtLevel(Level);
1521  return DPred(StackElem.Directive);
1522 }
1523 
1524 bool DSAStackTy::hasDirective(
1525  const llvm::function_ref<bool(OpenMPDirectiveKind,
1527  DPred,
1528  bool FromParent) const {
1529  // We look only in the enclosing region.
1530  size_t Skip = FromParent ? 2 : 1;
1531  for (const_iterator I = begin() + std::min(Skip, getStackSize()), E = end();
1532  I != E; ++I) {
1533  if (DPred(I->Directive, I->DirectiveName, I->ConstructLoc))
1534  return true;
1535  }
1536  return false;
1537 }
1538 
1539 void Sema::InitDataSharingAttributesStack() {
1540  VarDataSharingAttributesStack = new DSAStackTy(*this);
1541 }
1542 
1543 #define DSAStack static_cast<DSAStackTy *>(VarDataSharingAttributesStack)
1544 
1545 void Sema::pushOpenMPFunctionRegion() {
1546  DSAStack->pushFunction();
1547 }
1548 
1549 void Sema::popOpenMPFunctionRegion(const FunctionScopeInfo *OldFSI) {
1550  DSAStack->popFunction(OldFSI);
1551 }
1552 
1554  assert(S.LangOpts.OpenMP && S.LangOpts.OpenMPIsDevice &&
1555  "Expected OpenMP device compilation.");
1556  return !S.isInOpenMPTargetExecutionDirective() &&
1558 }
1559 
1560 namespace {
1561 /// Status of the function emission on the host/device.
1563  Emitted,
1564  Discarded,
1565  Unknown,
1566 };
1567 } // anonymous namespace
1568 
1570  unsigned DiagID) {
1571  assert(LangOpts.OpenMP && LangOpts.OpenMPIsDevice &&
1572  "Expected OpenMP device compilation.");
1573  FunctionEmissionStatus FES = getEmissionStatus(getCurFunctionDecl());
1574  DeviceDiagBuilder::Kind Kind = DeviceDiagBuilder::K_Nop;
1575  switch (FES) {
1576  case FunctionEmissionStatus::Emitted:
1577  Kind = DeviceDiagBuilder::K_Immediate;
1578  break;
1580  Kind = isOpenMPDeviceDelayedContext(*this) ? DeviceDiagBuilder::K_Deferred
1581  : DeviceDiagBuilder::K_Immediate;
1582  break;
1583  case FunctionEmissionStatus::TemplateDiscarded:
1584  case FunctionEmissionStatus::OMPDiscarded:
1585  Kind = DeviceDiagBuilder::K_Nop;
1586  break;
1587  case FunctionEmissionStatus::CUDADiscarded:
1588  llvm_unreachable("CUDADiscarded unexpected in OpenMP device compilation");
1589  break;
1590  }
1591 
1592  return DeviceDiagBuilder(Kind, Loc, DiagID, getCurFunctionDecl(), *this);
1593 }
1594 
1596  unsigned DiagID) {
1597  assert(LangOpts.OpenMP && !LangOpts.OpenMPIsDevice &&
1598  "Expected OpenMP host compilation.");
1599  FunctionEmissionStatus FES = getEmissionStatus(getCurFunctionDecl());
1600  DeviceDiagBuilder::Kind Kind = DeviceDiagBuilder::K_Nop;
1601  switch (FES) {
1602  case FunctionEmissionStatus::Emitted:
1603  Kind = DeviceDiagBuilder::K_Immediate;
1604  break;
1606  Kind = DeviceDiagBuilder::K_Deferred;
1607  break;
1608  case FunctionEmissionStatus::TemplateDiscarded:
1609  case FunctionEmissionStatus::OMPDiscarded:
1610  case FunctionEmissionStatus::CUDADiscarded:
1611  Kind = DeviceDiagBuilder::K_Nop;
1612  break;
1613  }
1614 
1615  return DeviceDiagBuilder(Kind, Loc, DiagID, getCurFunctionDecl(), *this);
1616 }
1617 
1618 void Sema::checkOpenMPDeviceFunction(SourceLocation Loc, FunctionDecl *Callee,
1619  bool CheckForDelayedContext) {
1620  assert(LangOpts.OpenMP && LangOpts.OpenMPIsDevice &&
1621  "Expected OpenMP device compilation.");
1622  assert(Callee && "Callee may not be null.");
1623  Callee = Callee->getMostRecentDecl();
1624  FunctionDecl *Caller = getCurFunctionDecl();
1625 
1626  // host only function are not available on the device.
1627  if (Caller) {
1628  FunctionEmissionStatus CallerS = getEmissionStatus(Caller);
1629  FunctionEmissionStatus CalleeS = getEmissionStatus(Callee);
1630  assert(CallerS != FunctionEmissionStatus::CUDADiscarded &&
1631  CalleeS != FunctionEmissionStatus::CUDADiscarded &&
1632  "CUDADiscarded unexpected in OpenMP device function check");
1633  if ((CallerS == FunctionEmissionStatus::Emitted ||
1634  (!isOpenMPDeviceDelayedContext(*this) &&
1635  CallerS == FunctionEmissionStatus::Unknown)) &&
1636  CalleeS == FunctionEmissionStatus::OMPDiscarded) {
1637  StringRef HostDevTy = getOpenMPSimpleClauseTypeName(
1638  OMPC_device_type, OMPC_DEVICE_TYPE_host);
1639  Diag(Loc, diag::err_omp_wrong_device_function_call) << HostDevTy << 0;
1640  Diag(Callee->getAttr<OMPDeclareTargetDeclAttr>()->getLocation(),
1641  diag::note_omp_marked_device_type_here)
1642  << HostDevTy;
1643  return;
1644  }
1645  }
1646  // If the caller is known-emitted, mark the callee as known-emitted.
1647  // Otherwise, mark the call in our call graph so we can traverse it later.
1648  if ((CheckForDelayedContext && !isOpenMPDeviceDelayedContext(*this)) ||
1649  (!Caller && !CheckForDelayedContext) ||
1650  (Caller && getEmissionStatus(Caller) == FunctionEmissionStatus::Emitted))
1651  markKnownEmitted(*this, Caller, Callee, Loc,
1652  [CheckForDelayedContext](Sema &S, FunctionDecl *FD) {
1653  return CheckForDelayedContext &&
1654  S.getEmissionStatus(FD) ==
1655  FunctionEmissionStatus::Emitted;
1656  });
1657  else if (Caller)
1658  DeviceCallGraph[Caller].insert({Callee, Loc});
1659 }
1660 
1661 void Sema::checkOpenMPHostFunction(SourceLocation Loc, FunctionDecl *Callee,
1662  bool CheckCaller) {
1663  assert(LangOpts.OpenMP && !LangOpts.OpenMPIsDevice &&
1664  "Expected OpenMP host compilation.");
1665  assert(Callee && "Callee may not be null.");
1666  Callee = Callee->getMostRecentDecl();
1667  FunctionDecl *Caller = getCurFunctionDecl();
1668 
1669  // device only function are not available on the host.
1670  if (Caller) {
1671  FunctionEmissionStatus CallerS = getEmissionStatus(Caller);
1672  FunctionEmissionStatus CalleeS = getEmissionStatus(Callee);
1673  assert(
1674  (LangOpts.CUDA || (CallerS != FunctionEmissionStatus::CUDADiscarded &&
1675  CalleeS != FunctionEmissionStatus::CUDADiscarded)) &&
1676  "CUDADiscarded unexpected in OpenMP host function check");
1677  if (CallerS == FunctionEmissionStatus::Emitted &&
1678  CalleeS == FunctionEmissionStatus::OMPDiscarded) {
1679  StringRef NoHostDevTy = getOpenMPSimpleClauseTypeName(
1680  OMPC_device_type, OMPC_DEVICE_TYPE_nohost);
1681  Diag(Loc, diag::err_omp_wrong_device_function_call) << NoHostDevTy << 1;
1682  Diag(Callee->getAttr<OMPDeclareTargetDeclAttr>()->getLocation(),
1683  diag::note_omp_marked_device_type_here)
1684  << NoHostDevTy;
1685  return;
1686  }
1687  }
1688  // If the caller is known-emitted, mark the callee as known-emitted.
1689  // Otherwise, mark the call in our call graph so we can traverse it later.
1690  if (!shouldIgnoreInHostDeviceCheck(Callee)) {
1691  if ((!CheckCaller && !Caller) ||
1692  (Caller &&
1693  getEmissionStatus(Caller) == FunctionEmissionStatus::Emitted))
1694  markKnownEmitted(
1695  *this, Caller, Callee, Loc, [CheckCaller](Sema &S, FunctionDecl *FD) {
1696  return CheckCaller &&
1697  S.getEmissionStatus(FD) == FunctionEmissionStatus::Emitted;
1698  });
1699  else if (Caller)
1700  DeviceCallGraph[Caller].insert({Callee, Loc});
1701  }
1702 }
1703 
1704 void Sema::checkOpenMPDeviceExpr(const Expr *E) {
1705  assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice &&
1706  "OpenMP device compilation mode is expected.");
1707  QualType Ty = E->getType();
1708  if ((Ty->isFloat16Type() && !Context.getTargetInfo().hasFloat16Type()) ||
1709  ((Ty->isFloat128Type() ||
1710  (Ty->isRealFloatingType() && Context.getTypeSize(Ty) == 128)) &&
1711  !Context.getTargetInfo().hasFloat128Type()) ||
1712  (Ty->isIntegerType() && Context.getTypeSize(Ty) == 128 &&
1713  !Context.getTargetInfo().hasInt128Type()))
1714  targetDiag(E->getExprLoc(), diag::err_omp_unsupported_type)
1715  << static_cast<unsigned>(Context.getTypeSize(Ty)) << Ty
1716  << Context.getTargetInfo().getTriple().str() << E->getSourceRange();
1717 }
1718 
1720  unsigned OpenMPCaptureLevel) const {
1721  assert(LangOpts.OpenMP && "OpenMP is not allowed");
1722 
1723  ASTContext &Ctx = getASTContext();
1724  bool IsByRef = true;
1725 
1726  // Find the directive that is associated with the provided scope.
1727  D = cast<ValueDecl>(D->getCanonicalDecl());
1728  QualType Ty = D->getType();
1729 
1730  bool IsVariableUsedInMapClause = false;
1731  if (DSAStack->hasExplicitDirective(isOpenMPTargetExecutionDirective, Level)) {
1732  // This table summarizes how a given variable should be passed to the device
1733  // given its type and the clauses where it appears. This table is based on
1734  // the description in OpenMP 4.5 [2.10.4, target Construct] and
1735  // OpenMP 4.5 [2.15.5, Data-mapping Attribute Rules and Clauses].
1736  //
1737  // =========================================================================
1738  // | type | defaultmap | pvt | first | is_device_ptr | map | res. |
1739  // | |(tofrom:scalar)| | pvt | | | |
1740  // =========================================================================
1741  // | scl | | | | - | | bycopy|
1742  // | scl | | - | x | - | - | bycopy|
1743  // | scl | | x | - | - | - | null |
1744  // | scl | x | | | - | | byref |
1745  // | scl | x | - | x | - | - | bycopy|
1746  // | scl | x | x | - | - | - | null |
1747  // | scl | | - | - | - | x | byref |
1748  // | scl | x | - | - | - | x | byref |
1749  //
1750  // | agg | n.a. | | | - | | byref |
1751  // | agg | n.a. | - | x | - | - | byref |
1752  // | agg | n.a. | x | - | - | - | null |
1753  // | agg | n.a. | - | - | - | x | byref |
1754  // | agg | n.a. | - | - | - | x[] | byref |
1755  //
1756  // | ptr | n.a. | | | - | | bycopy|
1757  // | ptr | n.a. | - | x | - | - | bycopy|
1758  // | ptr | n.a. | x | - | - | - | null |
1759  // | ptr | n.a. | - | - | - | x | byref |
1760  // | ptr | n.a. | - | - | - | x[] | bycopy|
1761  // | ptr | n.a. | - | - | x | | bycopy|
1762  // | ptr | n.a. | - | - | x | x | bycopy|
1763  // | ptr | n.a. | - | - | x | x[] | bycopy|
1764  // =========================================================================
1765  // Legend:
1766  // scl - scalar
1767  // ptr - pointer
1768  // agg - aggregate
1769  // x - applies
1770  // - - invalid in this combination
1771  // [] - mapped with an array section
1772  // byref - should be mapped by reference
1773  // byval - should be mapped by value
1774  // null - initialize a local variable to null on the device
1775  //
1776  // Observations:
1777  // - All scalar declarations that show up in a map clause have to be passed
1778  // by reference, because they may have been mapped in the enclosing data
1779  // environment.
1780  // - If the scalar value does not fit the size of uintptr, it has to be
1781  // passed by reference, regardless the result in the table above.
1782  // - For pointers mapped by value that have either an implicit map or an
1783  // array section, the runtime library may pass the NULL value to the
1784  // device instead of the value passed to it by the compiler.
1785 
1786  if (Ty->isReferenceType())
1787  Ty = Ty->castAs<ReferenceType>()->getPointeeType();
1788 
1789  // Locate map clauses and see if the variable being captured is referred to
1790  // in any of those clauses. Here we only care about variables, not fields,
1791  // because fields are part of aggregates.
1792  bool IsVariableAssociatedWithSection = false;
1793 
1794  DSAStack->checkMappableExprComponentListsForDeclAtLevel(
1795  D, Level,
1796  [&IsVariableUsedInMapClause, &IsVariableAssociatedWithSection, D](
1798  MapExprComponents,
1799  OpenMPClauseKind WhereFoundClauseKind) {
1800  // Only the map clause information influences how a variable is
1801  // captured. E.g. is_device_ptr does not require changing the default
1802  // behavior.
1803  if (WhereFoundClauseKind != OMPC_map)
1804  return false;
1805 
1806  auto EI = MapExprComponents.rbegin();
1807  auto EE = MapExprComponents.rend();
1808 
1809  assert(EI != EE && "Invalid map expression!");
1810 
1811  if (isa<DeclRefExpr>(EI->getAssociatedExpression()))
1812  IsVariableUsedInMapClause |= EI->getAssociatedDeclaration() == D;
1813 
1814  ++EI;
1815  if (EI == EE)
1816  return false;
1817 
1818  if (isa<ArraySubscriptExpr>(EI->getAssociatedExpression()) ||
1819  isa<OMPArraySectionExpr>(EI->getAssociatedExpression()) ||
1820  isa<MemberExpr>(EI->getAssociatedExpression())) {
1821  IsVariableAssociatedWithSection = true;
1822  // There is nothing more we need to know about this variable.
1823  return true;
1824  }
1825 
1826  // Keep looking for more map info.
1827  return false;
1828  });
1829 
1830  if (IsVariableUsedInMapClause) {
1831  // If variable is identified in a map clause it is always captured by
1832  // reference except if it is a pointer that is dereferenced somehow.
1833  IsByRef = !(Ty->isPointerType() && IsVariableAssociatedWithSection);
1834  } else {
1835  // By default, all the data that has a scalar type is mapped by copy
1836  // (except for reduction variables).
1837  IsByRef =
1838  (DSAStack->isForceCaptureByReferenceInTargetExecutable() &&
1839  !Ty->isAnyPointerType()) ||
1840  !Ty->isScalarType() ||
1841  DSAStack->getDefaultDMAAtLevel(Level) == DMA_tofrom_scalar ||
1842  DSAStack->hasExplicitDSA(
1843  D, [](OpenMPClauseKind K) { return K == OMPC_reduction; }, Level);
1844  }
1845  }
1846 
1847  if (IsByRef && Ty.getNonReferenceType()->isScalarType()) {
1848  IsByRef =
1849  ((IsVariableUsedInMapClause &&
1850  DSAStack->getCaptureRegion(Level, OpenMPCaptureLevel) ==
1851  OMPD_target) ||
1852  !DSAStack->hasExplicitDSA(
1853  D,
1854  [](OpenMPClauseKind K) -> bool { return K == OMPC_firstprivate; },
1855  Level, /*NotLastprivate=*/true)) &&
1856  // If the variable is artificial and must be captured by value - try to
1857  // capture by value.
1858  !(isa<OMPCapturedExprDecl>(D) && !D->hasAttr<OMPCaptureNoInitAttr>() &&
1859  !cast<OMPCapturedExprDecl>(D)->getInit()->isGLValue());
1860  }
1861 
1862  // When passing data by copy, we need to make sure it fits the uintptr size
1863  // and alignment, because the runtime library only deals with uintptr types.
1864  // If it does not fit the uintptr size, we need to pass the data by reference
1865  // instead.
1866  if (!IsByRef &&
1867  (Ctx.getTypeSizeInChars(Ty) >
1868  Ctx.getTypeSizeInChars(Ctx.getUIntPtrType()) ||
1869  Ctx.getDeclAlign(D) > Ctx.getTypeAlignInChars(Ctx.getUIntPtrType()))) {
1870  IsByRef = true;
1871  }
1872 
1873  return IsByRef;
1874 }
1875 
1876 unsigned Sema::getOpenMPNestingLevel() const {
1877  assert(getLangOpts().OpenMP);
1878  return DSAStack->getNestingLevel();
1879 }
1880 
1882  return (isOpenMPTargetExecutionDirective(DSAStack->getCurrentDirective()) &&
1883  !DSAStack->isClauseParsingMode()) ||
1884  DSAStack->hasDirective(
1886  SourceLocation) -> bool {
1887  return isOpenMPTargetExecutionDirective(K);
1888  },
1889  false);
1890 }
1891 
1893  unsigned StopAt) {
1894  assert(LangOpts.OpenMP && "OpenMP is not allowed");
1895  D = getCanonicalDecl(D);
1896 
1897  // If we want to determine whether the variable should be captured from the
1898  // perspective of the current capturing scope, and we've already left all the
1899  // capturing scopes of the top directive on the stack, check from the
1900  // perspective of its parent directive (if any) instead.
1901  DSAStackTy::ParentDirectiveScope InParentDirectiveRAII(
1902  *DSAStack, CheckScopeInfo && DSAStack->isBodyComplete());
1903 
1904  // If we are attempting to capture a global variable in a directive with
1905  // 'target' we return true so that this global is also mapped to the device.
1906  //
1907  auto *VD = dyn_cast<VarDecl>(D);
1908  if (VD && !VD->hasLocalStorage() &&
1909  (getCurCapturedRegion() || getCurBlock() || getCurLambda())) {
1910  if (isInOpenMPDeclareTargetContext()) {
1911  // Try to mark variable as declare target if it is used in capturing
1912  // regions.
1913  if (LangOpts.OpenMP <= 45 &&
1914  !OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
1915  checkDeclIsAllowedInOpenMPTarget(nullptr, VD);
1916  return nullptr;
1917  } else if (isInOpenMPTargetExecutionDirective()) {
1918  // If the declaration is enclosed in a 'declare target' directive,
1919  // then it should not be captured.
1920  //
1921  if (OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
1922  return nullptr;
1923  return VD;
1924  }
1925  }
1926 
1927  if (CheckScopeInfo) {
1928  bool OpenMPFound = false;
1929  for (unsigned I = StopAt + 1; I > 0; --I) {
1930  FunctionScopeInfo *FSI = FunctionScopes[I - 1];
1931  if(!isa<CapturingScopeInfo>(FSI))
1932  return nullptr;
1933  if (auto *RSI = dyn_cast<CapturedRegionScopeInfo>(FSI))
1934  if (RSI->CapRegionKind == CR_OpenMP) {
1935  OpenMPFound = true;
1936  break;
1937  }
1938  }
1939  if (!OpenMPFound)
1940  return nullptr;
1941  }
1942 
1943  if (DSAStack->getCurrentDirective() != OMPD_unknown &&
1944  (!DSAStack->isClauseParsingMode() ||
1945  DSAStack->getParentDirective() != OMPD_unknown)) {
1946  auto &&Info = DSAStack->isLoopControlVariable(D);
1947  if (Info.first ||
1948  (VD && VD->hasLocalStorage() &&
1949  isImplicitOrExplicitTaskingRegion(DSAStack->getCurrentDirective())) ||
1950  (VD && DSAStack->isForceVarCapturing()))
1951  return VD ? VD : Info.second;
1952  DSAStackTy::DSAVarData DVarPrivate =
1953  DSAStack->getTopDSA(D, DSAStack->isClauseParsingMode());
1954  if (DVarPrivate.CKind != OMPC_unknown && isOpenMPPrivate(DVarPrivate.CKind))
1955  return VD ? VD : cast<VarDecl>(DVarPrivate.PrivateCopy->getDecl());
1956  // Threadprivate variables must not be captured.
1957  if (isOpenMPThreadPrivate(DVarPrivate.CKind))
1958  return nullptr;
1959  // The variable is not private or it is the variable in the directive with
1960  // default(none) clause and not used in any clause.
1961  DVarPrivate = DSAStack->hasDSA(D, isOpenMPPrivate,
1962  [](OpenMPDirectiveKind) { return true; },
1963  DSAStack->isClauseParsingMode());
1964  if (DVarPrivate.CKind != OMPC_unknown ||
1965  (VD && DSAStack->getDefaultDSA() == DSA_none))
1966  return VD ? VD : cast<VarDecl>(DVarPrivate.PrivateCopy->getDecl());
1967  }
1968  return nullptr;
1969 }
1970 
1971 void Sema::adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
1972  unsigned Level) const {
1974  getOpenMPCaptureRegions(Regions, DSAStack->getDirective(Level));
1975  FunctionScopesIndex -= Regions.size();
1976 }
1977 
1979  assert(LangOpts.OpenMP && "OpenMP must be enabled.");
1980  if (isOpenMPLoopDirective(DSAStack->getCurrentDirective()))
1981  DSAStack->loopInit();
1982 }
1983 
1985  assert(LangOpts.OpenMP && "OpenMP must be enabled.");
1986  if (isOpenMPLoopDirective(DSAStack->getCurrentDirective())) {
1987  DSAStack->resetPossibleLoopCounter();
1988  DSAStack->loopStart();
1989  }
1990 }
1991 
1992 bool Sema::isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const {
1993  assert(LangOpts.OpenMP && "OpenMP is not allowed");
1994  if (isOpenMPLoopDirective(DSAStack->getCurrentDirective())) {
1995  if (DSAStack->getAssociatedLoops() > 0 &&
1996  !DSAStack->isLoopStarted()) {
1997  DSAStack->resetPossibleLoopCounter(D);
1998  DSAStack->loopStart();
1999  return true;
2000  }
2001  if ((DSAStack->getPossiblyLoopCunter() == D->getCanonicalDecl() ||
2002  DSAStack->isLoopControlVariable(D).first) &&
2003  !DSAStack->hasExplicitDSA(
2004  D, [](OpenMPClauseKind K) { return K != OMPC_private; }, Level) &&
2005  !isOpenMPSimdDirective(DSAStack->getCurrentDirective()))
2006  return true;
2007  }
2008  if (const auto *VD = dyn_cast<VarDecl>(D)) {
2009  if (DSAStack->isThreadPrivate(const_cast<VarDecl *>(VD)) &&
2010  DSAStack->isForceVarCapturing() &&
2011  !DSAStack->hasExplicitDSA(
2012  D, [](OpenMPClauseKind K) { return K == OMPC_copyin; }, Level))
2013  return true;
2014  }
2015  return DSAStack->hasExplicitDSA(
2016  D, [](OpenMPClauseKind K) { return K == OMPC_private; }, Level) ||
2017  (DSAStack->isClauseParsingMode() &&
2018  DSAStack->getClauseParsingMode() == OMPC_private) ||
2019  // Consider taskgroup reduction descriptor variable a private to avoid
2020  // possible capture in the region.
2021  (DSAStack->hasExplicitDirective(
2022  [](OpenMPDirectiveKind K) { return K == OMPD_taskgroup; },
2023  Level) &&
2024  DSAStack->isTaskgroupReductionRef(D, Level));
2025 }
2026 
2028  unsigned Level) {
2029  assert(LangOpts.OpenMP && "OpenMP is not allowed");
2030  D = getCanonicalDecl(D);
2032  for (unsigned I = DSAStack->getNestingLevel() + 1; I > Level; --I) {
2033  const unsigned NewLevel = I - 1;
2034  if (DSAStack->hasExplicitDSA(D,
2035  [&OMPC](const OpenMPClauseKind K) {
2036  if (isOpenMPPrivate(K)) {
2037  OMPC = K;
2038  return true;
2039  }
2040  return false;
2041  },
2042  NewLevel))
2043  break;
2044  if (DSAStack->checkMappableExprComponentListsForDeclAtLevel(
2045  D, NewLevel,
2047  OpenMPClauseKind) { return true; })) {
2048  OMPC = OMPC_map;
2049  break;
2050  }
2051  if (DSAStack->hasExplicitDirective(isOpenMPTargetExecutionDirective,
2052  NewLevel)) {
2053  OMPC = OMPC_map;
2054  if (D->getType()->isScalarType() &&
2055  DSAStack->getDefaultDMAAtLevel(NewLevel) !=
2056  DefaultMapAttributes::DMA_tofrom_scalar)
2057  OMPC = OMPC_firstprivate;
2058  break;
2059  }
2060  }
2061  if (OMPC != OMPC_unknown)
2062  FD->addAttr(OMPCaptureKindAttr::CreateImplicit(Context, OMPC));
2063 }
2064 
2066  unsigned Level) const {
2067  assert(LangOpts.OpenMP && "OpenMP is not allowed");
2068  // Return true if the current level is no longer enclosed in a target region.
2069 
2070  const auto *VD = dyn_cast<VarDecl>(D);
2071  return VD && !VD->hasLocalStorage() &&
2072  DSAStack->hasExplicitDirective(isOpenMPTargetExecutionDirective,
2073  Level);
2074 }
2075 
2076 void Sema::DestroyDataSharingAttributesStack() { delete DSAStack; }
2077 
2078 void Sema::finalizeOpenMPDelayedAnalysis() {
2079  assert(LangOpts.OpenMP && "Expected OpenMP compilation mode.");
2080  // Diagnose implicit declare target functions and their callees.
2081  for (const auto &CallerCallees : DeviceCallGraph) {
2083  OMPDeclareTargetDeclAttr::getDeviceType(
2084  CallerCallees.getFirst()->getMostRecentDecl());
2085  // Ignore host functions during device analyzis.
2086  if (LangOpts.OpenMPIsDevice && DevTy &&
2087  *DevTy == OMPDeclareTargetDeclAttr::DT_Host)
2088  continue;
2089  // Ignore nohost functions during host analyzis.
2090  if (!LangOpts.OpenMPIsDevice && DevTy &&
2091  *DevTy == OMPDeclareTargetDeclAttr::DT_NoHost)
2092  continue;
2093  for (const std::pair<CanonicalDeclPtr<FunctionDecl>, SourceLocation>
2094  &Callee : CallerCallees.getSecond()) {
2095  const FunctionDecl *FD = Callee.first->getMostRecentDecl();
2097  OMPDeclareTargetDeclAttr::getDeviceType(FD);
2098  if (LangOpts.OpenMPIsDevice && DevTy &&
2099  *DevTy == OMPDeclareTargetDeclAttr::DT_Host) {
2100  // Diagnose host function called during device codegen.
2101  StringRef HostDevTy = getOpenMPSimpleClauseTypeName(
2102  OMPC_device_type, OMPC_DEVICE_TYPE_host);
2103  Diag(Callee.second, diag::err_omp_wrong_device_function_call)
2104  << HostDevTy << 0;
2105  Diag(FD->getAttr<OMPDeclareTargetDeclAttr>()->getLocation(),
2106  diag::note_omp_marked_device_type_here)
2107  << HostDevTy;
2108  continue;
2109  }
2110  if (!LangOpts.OpenMPIsDevice && DevTy &&
2111  *DevTy == OMPDeclareTargetDeclAttr::DT_NoHost) {
2112  // Diagnose nohost function called during host codegen.
2113  StringRef NoHostDevTy = getOpenMPSimpleClauseTypeName(
2114  OMPC_device_type, OMPC_DEVICE_TYPE_nohost);
2115  Diag(Callee.second, diag::err_omp_wrong_device_function_call)
2116  << NoHostDevTy << 1;
2117  Diag(FD->getAttr<OMPDeclareTargetDeclAttr>()->getLocation(),
2118  diag::note_omp_marked_device_type_here)
2119  << NoHostDevTy;
2120  continue;
2121  }
2122  }
2123  }
2124 }
2125 
2127  const DeclarationNameInfo &DirName,
2128  Scope *CurScope, SourceLocation Loc) {
2129  DSAStack->push(DKind, DirName, CurScope, Loc);
2130  PushExpressionEvaluationContext(
2131  ExpressionEvaluationContext::PotentiallyEvaluated);
2132 }
2133 
2135  DSAStack->setClauseParsingMode(K);
2136 }
2137 
2139  DSAStack->setClauseParsingMode(/*K=*/OMPC_unknown);
2140 }
2141 
2142 static void checkAllocateClauses(Sema &S, DSAStackTy *Stack,
2143  ArrayRef<OMPClause *> Clauses);
2144 
2145 void Sema::EndOpenMPDSABlock(Stmt *CurDirective) {
2146  // OpenMP [2.14.3.5, Restrictions, C/C++, p.1]
2147  // A variable of class type (or array thereof) that appears in a lastprivate
2148  // clause requires an accessible, unambiguous default constructor for the
2149  // class type, unless the list item is also specified in a firstprivate
2150  // clause.
2151  if (const auto *D = dyn_cast_or_null<OMPExecutableDirective>(CurDirective)) {
2152  for (OMPClause *C : D->clauses()) {
2153  if (auto *Clause = dyn_cast<OMPLastprivateClause>(C)) {
2154  SmallVector<Expr *, 8> PrivateCopies;
2155  for (Expr *DE : Clause->varlists()) {
2156  if (DE->isValueDependent() || DE->isTypeDependent()) {
2157  PrivateCopies.push_back(nullptr);
2158  continue;
2159  }
2160  auto *DRE = cast<DeclRefExpr>(DE->IgnoreParens());
2161  auto *VD = cast<VarDecl>(DRE->getDecl());
2162  QualType Type = VD->getType().getNonReferenceType();
2163  const DSAStackTy::DSAVarData DVar =
2164  DSAStack->getTopDSA(VD, /*FromParent=*/false);
2165  if (DVar.CKind == OMPC_lastprivate) {
2166  // Generate helper private variable and initialize it with the
2167  // default value. The address of the original variable is replaced
2168  // by the address of the new private variable in CodeGen. This new
2169  // variable is not added to IdResolver, so the code in the OpenMP
2170  // region uses original variable for proper diagnostics.
2171  VarDecl *VDPrivate = buildVarDecl(
2172  *this, DE->getExprLoc(), Type.getUnqualifiedType(),
2173  VD->getName(), VD->hasAttrs() ? &VD->getAttrs() : nullptr, DRE);
2174  ActOnUninitializedDecl(VDPrivate);
2175  if (VDPrivate->isInvalidDecl()) {
2176  PrivateCopies.push_back(nullptr);
2177  continue;
2178  }
2179  PrivateCopies.push_back(buildDeclRefExpr(
2180  *this, VDPrivate, DE->getType(), DE->getExprLoc()));
2181  } else {
2182  // The variable is also a firstprivate, so initialization sequence
2183  // for private copy is generated already.
2184  PrivateCopies.push_back(nullptr);
2185  }
2186  }
2187  Clause->setPrivateCopies(PrivateCopies);
2188  }
2189  }
2190  // Check allocate clauses.
2191  if (!CurContext->isDependentContext())
2192  checkAllocateClauses(*this, DSAStack, D->clauses());
2193  }
2194 
2195  DSAStack->pop();
2196  DiscardCleanupsInEvaluationContext();
2197  PopExpressionEvaluationContext();
2198 }
2199 
2200 static bool FinishOpenMPLinearClause(OMPLinearClause &Clause, DeclRefExpr *IV,
2201  Expr *NumIterations, Sema &SemaRef,
2202  Scope *S, DSAStackTy *Stack);
2203 
2204 namespace {
2205 
2206 class VarDeclFilterCCC final : public CorrectionCandidateCallback {
2207 private:
2208  Sema &SemaRef;
2209 
2210 public:
2211  explicit VarDeclFilterCCC(Sema &S) : SemaRef(S) {}
2212  bool ValidateCandidate(const TypoCorrection &Candidate) override {
2213  NamedDecl *ND = Candidate.getCorrectionDecl();
2214  if (const auto *VD = dyn_cast_or_null<VarDecl>(ND)) {
2215  return VD->hasGlobalStorage() &&
2216  SemaRef.isDeclInScope(ND, SemaRef.getCurLexicalContext(),
2217  SemaRef.getCurScope());
2218  }
2219  return false;
2220  }
2221 
2222  std::unique_ptr<CorrectionCandidateCallback> clone() override {
2223  return std::make_unique<VarDeclFilterCCC>(*this);
2224  }
2225 
2226 };
2227 
2228 class VarOrFuncDeclFilterCCC final : public CorrectionCandidateCallback {
2229 private:
2230  Sema &SemaRef;
2231 
2232 public:
2233  explicit VarOrFuncDeclFilterCCC(Sema &S) : SemaRef(S) {}
2234  bool ValidateCandidate(const TypoCorrection &Candidate) override {
2235  NamedDecl *ND = Candidate.getCorrectionDecl();
2236  if (ND && ((isa<VarDecl>(ND) && ND->getKind() == Decl::Var) ||
2237  isa<FunctionDecl>(ND))) {
2238  return SemaRef.isDeclInScope(ND, SemaRef.getCurLexicalContext(),
2239  SemaRef.getCurScope());
2240  }
2241  return false;
2242  }
2243 
2244  std::unique_ptr<CorrectionCandidateCallback> clone() override {
2245  return std::make_unique<VarOrFuncDeclFilterCCC>(*this);
2246  }
2247 };
2248 
2249 } // namespace
2250 
2252  CXXScopeSpec &ScopeSpec,
2253  const DeclarationNameInfo &Id,
2255  LookupResult Lookup(*this, Id, LookupOrdinaryName);
2256  LookupParsedName(Lookup, CurScope, &ScopeSpec, true);
2257 
2258  if (Lookup.isAmbiguous())
2259  return ExprError();
2260 
2261  VarDecl *VD;
2262  if (!Lookup.isSingleResult()) {
2263  VarDeclFilterCCC CCC(*this);
2264  if (TypoCorrection Corrected =
2265  CorrectTypo(Id, LookupOrdinaryName, CurScope, nullptr, CCC,
2266  CTK_ErrorRecovery)) {
2267  diagnoseTypo(Corrected,
2268  PDiag(Lookup.empty()
2269  ? diag::err_undeclared_var_use_suggest
2270  : diag::err_omp_expected_var_arg_suggest)
2271  << Id.getName());
2272  VD = Corrected.getCorrectionDeclAs<VarDecl>();
2273  } else {
2274  Diag(Id.getLoc(), Lookup.empty() ? diag::err_undeclared_var_use
2275  : diag::err_omp_expected_var_arg)
2276  << Id.getName();
2277  return ExprError();
2278  }
2279  } else if (!(VD = Lookup.getAsSingle<VarDecl>())) {
2280  Diag(Id.getLoc(), diag::err_omp_expected_var_arg) << Id.getName();
2281  Diag(Lookup.getFoundDecl()->getLocation(), diag::note_declared_at);
2282  return ExprError();
2283  }
2284  Lookup.suppressDiagnostics();
2285 
2286  // OpenMP [2.9.2, Syntax, C/C++]
2287  // Variables must be file-scope, namespace-scope, or static block-scope.
2288  if (Kind == OMPD_threadprivate && !VD->hasGlobalStorage()) {
2289  Diag(Id.getLoc(), diag::err_omp_global_var_arg)
2290  << getOpenMPDirectiveName(Kind) << !VD->isStaticLocal();
2291  bool IsDecl =
2293  Diag(VD->getLocation(),
2294  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
2295  << VD;
2296  return ExprError();
2297  }
2298 
2299  VarDecl *CanonicalVD = VD->getCanonicalDecl();
2300  NamedDecl *ND = CanonicalVD;
2301  // OpenMP [2.9.2, Restrictions, C/C++, p.2]
2302  // A threadprivate directive for file-scope variables must appear outside
2303  // any definition or declaration.
2304  if (CanonicalVD->getDeclContext()->isTranslationUnit() &&
2305  !getCurLexicalContext()->isTranslationUnit()) {
2306  Diag(Id.getLoc(), diag::err_omp_var_scope)
2307  << getOpenMPDirectiveName(Kind) << VD;
2308  bool IsDecl =
2310  Diag(VD->getLocation(),
2311  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
2312  << VD;
2313  return ExprError();
2314  }
2315  // OpenMP [2.9.2, Restrictions, C/C++, p.3]
2316  // A threadprivate directive for static class member variables must appear
2317  // in the class definition, in the same scope in which the member
2318  // variables are declared.
2319  if (CanonicalVD->isStaticDataMember() &&
2320  !CanonicalVD->getDeclContext()->Equals(getCurLexicalContext())) {
2321  Diag(Id.getLoc(), diag::err_omp_var_scope)
2322  << getOpenMPDirectiveName(Kind) << VD;
2323  bool IsDecl =
2325  Diag(VD->getLocation(),
2326  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
2327  << VD;
2328  return ExprError();
2329  }
2330  // OpenMP [2.9.2, Restrictions, C/C++, p.4]
2331  // A threadprivate directive for namespace-scope variables must appear
2332  // outside any definition or declaration other than the namespace
2333  // definition itself.
2334  if (CanonicalVD->getDeclContext()->isNamespace() &&
2335  (!getCurLexicalContext()->isFileContext() ||
2336  !getCurLexicalContext()->Encloses(CanonicalVD->getDeclContext()))) {
2337  Diag(Id.getLoc(), diag::err_omp_var_scope)
2338  << getOpenMPDirectiveName(Kind) << VD;
2339  bool IsDecl =
2341  Diag(VD->getLocation(),
2342  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
2343  << VD;
2344  return ExprError();
2345  }
2346  // OpenMP [2.9.2, Restrictions, C/C++, p.6]
2347  // A threadprivate directive for static block-scope variables must appear
2348  // in the scope of the variable and not in a nested scope.
2349  if (CanonicalVD->isLocalVarDecl() && CurScope &&
2350  !isDeclInScope(ND, getCurLexicalContext(), CurScope)) {
2351  Diag(Id.getLoc(), diag::err_omp_var_scope)
2352  << getOpenMPDirectiveName(Kind) << VD;
2353  bool IsDecl =
2355  Diag(VD->getLocation(),
2356  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
2357  << VD;
2358  return ExprError();
2359  }
2360 
2361  // OpenMP [2.9.2, Restrictions, C/C++, p.2-6]
2362  // A threadprivate directive must lexically precede all references to any
2363  // of the variables in its list.
2364  if (Kind == OMPD_threadprivate && VD->isUsed() &&
2365  !DSAStack->isThreadPrivate(VD)) {
2366  Diag(Id.getLoc(), diag::err_omp_var_used)
2367  << getOpenMPDirectiveName(Kind) << VD;
2368  return ExprError();
2369  }
2370 
2371  QualType ExprType = VD->getType().getNonReferenceType();
2372  return DeclRefExpr::Create(Context, NestedNameSpecifierLoc(),
2373  SourceLocation(), VD,
2374  /*RefersToEnclosingVariableOrCapture=*/false,
2375  Id.getLoc(), ExprType, VK_LValue);
2376 }
2377 
2380  ArrayRef<Expr *> VarList) {
2381  if (OMPThreadPrivateDecl *D = CheckOMPThreadPrivateDecl(Loc, VarList)) {
2382  CurContext->addDecl(D);
2383  return DeclGroupPtrTy::make(DeclGroupRef(D));
2384  }
2385  return nullptr;
2386 }
2387 
2388 namespace {
2389 class LocalVarRefChecker final
2390  : public ConstStmtVisitor<LocalVarRefChecker, bool> {
2391  Sema &SemaRef;
2392 
2393 public:
2394  bool VisitDeclRefExpr(const DeclRefExpr *E) {
2395  if (const auto *VD = dyn_cast<VarDecl>(E->getDecl())) {
2396  if (VD->hasLocalStorage()) {
2397  SemaRef.Diag(E->getBeginLoc(),
2398  diag::err_omp_local_var_in_threadprivate_init)
2399  << E->getSourceRange();
2400  SemaRef.Diag(VD->getLocation(), diag::note_defined_here)
2401  << VD << VD->getSourceRange();
2402  return true;
2403  }
2404  }
2405  return false;
2406  }
2407  bool VisitStmt(const Stmt *S) {
2408  for (const Stmt *Child : S->children()) {
2409  if (Child && Visit(Child))
2410  return true;
2411  }
2412  return false;
2413  }
2414  explicit LocalVarRefChecker(Sema &SemaRef) : SemaRef(SemaRef) {}
2415 };
2416 } // namespace
2417 
2421  for (Expr *RefExpr : VarList) {
2422  auto *DE = cast<DeclRefExpr>(RefExpr);
2423  auto *VD = cast<VarDecl>(DE->getDecl());
2424  SourceLocation ILoc = DE->getExprLoc();
2425 
2426  // Mark variable as used.
2427  VD->setReferenced();
2428  VD->markUsed(Context);
2429 
2430  QualType QType = VD->getType();
2431  if (QType->isDependentType() || QType->isInstantiationDependentType()) {
2432  // It will be analyzed later.
2433  Vars.push_back(DE);
2434  continue;
2435  }
2436 
2437  // OpenMP [2.9.2, Restrictions, C/C++, p.10]
2438  // A threadprivate variable must not have an incomplete type.
2439  if (RequireCompleteType(ILoc, VD->getType(),
2440  diag::err_omp_threadprivate_incomplete_type)) {
2441  continue;
2442  }
2443 
2444  // OpenMP [2.9.2, Restrictions, C/C++, p.10]
2445  // A threadprivate variable must not have a reference type.
2446  if (VD->getType()->isReferenceType()) {
2447  Diag(ILoc, diag::err_omp_ref_type_arg)
2448  << getOpenMPDirectiveName(OMPD_threadprivate) << VD->getType();
2449  bool IsDecl =
2450  VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
2451  Diag(VD->getLocation(),
2452  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
2453  << VD;
2454  continue;
2455  }
2456 
2457  // Check if this is a TLS variable. If TLS is not being supported, produce
2458  // the corresponding diagnostic.
2459  if ((VD->getTLSKind() != VarDecl::TLS_None &&
2460  !(VD->hasAttr<OMPThreadPrivateDeclAttr>() &&
2461  getLangOpts().OpenMPUseTLS &&
2462  getASTContext().getTargetInfo().isTLSSupported())) ||
2463  (VD->getStorageClass() == SC_Register && VD->hasAttr<AsmLabelAttr>() &&
2464  !VD->isLocalVarDecl())) {
2465  Diag(ILoc, diag::err_omp_var_thread_local)
2466  << VD << ((VD->getTLSKind() != VarDecl::TLS_None) ? 0 : 1);
2467  bool IsDecl =
2468  VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
2469  Diag(VD->getLocation(),
2470  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
2471  << VD;
2472  continue;
2473  }
2474 
2475  // Check if initial value of threadprivate variable reference variable with
2476  // local storage (it is not supported by runtime).
2477  if (const Expr *Init = VD->getAnyInitializer()) {
2478  LocalVarRefChecker Checker(*this);
2479  if (Checker.Visit(Init))
2480  continue;
2481  }
2482 
2483  Vars.push_back(RefExpr);
2484  DSAStack->addDSA(VD, DE, OMPC_threadprivate);
2485  VD->addAttr(OMPThreadPrivateDeclAttr::CreateImplicit(
2486  Context, SourceRange(Loc, Loc)));
2487  if (ASTMutationListener *ML = Context.getASTMutationListener())
2488  ML->DeclarationMarkedOpenMPThreadPrivate(VD);
2489  }
2490  OMPThreadPrivateDecl *D = nullptr;
2491  if (!Vars.empty()) {
2492  D = OMPThreadPrivateDecl::Create(Context, getCurLexicalContext(), Loc,
2493  Vars);
2494  D->setAccess(AS_public);
2495  }
2496  return D;
2497 }
2498 
2499 static OMPAllocateDeclAttr::AllocatorTypeTy
2500 getAllocatorKind(Sema &S, DSAStackTy *Stack, Expr *Allocator) {
2501  if (!Allocator)
2502  return OMPAllocateDeclAttr::OMPDefaultMemAlloc;
2503  if (Allocator->isTypeDependent() || Allocator->isValueDependent() ||
2504  Allocator->isInstantiationDependent() ||
2505  Allocator->containsUnexpandedParameterPack())
2506  return OMPAllocateDeclAttr::OMPUserDefinedMemAlloc;
2507  auto AllocatorKindRes = OMPAllocateDeclAttr::OMPUserDefinedMemAlloc;
2508  const Expr *AE = Allocator->IgnoreParenImpCasts();
2509  for (int I = OMPAllocateDeclAttr::OMPDefaultMemAlloc;
2510  I < OMPAllocateDeclAttr::OMPUserDefinedMemAlloc; ++I) {
2511  auto AllocatorKind = static_cast<OMPAllocateDeclAttr::AllocatorTypeTy>(I);
2512  const Expr *DefAllocator = Stack->getAllocator(AllocatorKind);
2513  llvm::FoldingSetNodeID AEId, DAEId;
2514  AE->Profile(AEId, S.getASTContext(), /*Canonical=*/true);
2515  DefAllocator->Profile(DAEId, S.getASTContext(), /*Canonical=*/true);
2516  if (AEId == DAEId) {
2517  AllocatorKindRes = AllocatorKind;
2518  break;
2519  }
2520  }
2521  return AllocatorKindRes;
2522 }
2523 
2525  Sema &S, DSAStackTy *Stack, Expr *RefExpr, VarDecl *VD,
2526  OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind, Expr *Allocator) {
2527  if (!VD->hasAttr<OMPAllocateDeclAttr>())
2528  return false;
2529  const auto *A = VD->getAttr<OMPAllocateDeclAttr>();
2530  Expr *PrevAllocator = A->getAllocator();
2531  OMPAllocateDeclAttr::AllocatorTypeTy PrevAllocatorKind =
2532  getAllocatorKind(S, Stack, PrevAllocator);
2533  bool AllocatorsMatch = AllocatorKind == PrevAllocatorKind;
2534  if (AllocatorsMatch &&
2535  AllocatorKind == OMPAllocateDeclAttr::OMPUserDefinedMemAlloc &&
2536  Allocator && PrevAllocator) {
2537  const Expr *AE = Allocator->IgnoreParenImpCasts();
2538  const Expr *PAE = PrevAllocator->IgnoreParenImpCasts();
2539  llvm::FoldingSetNodeID AEId, PAEId;
2540  AE->Profile(AEId, S.Context, /*Canonical=*/true);
2541  PAE->Profile(PAEId, S.Context, /*Canonical=*/true);
2542  AllocatorsMatch = AEId == PAEId;
2543  }
2544  if (!AllocatorsMatch) {
2545  SmallString<256> AllocatorBuffer;
2546  llvm::raw_svector_ostream AllocatorStream(AllocatorBuffer);
2547  if (Allocator)
2548  Allocator->printPretty(AllocatorStream, nullptr, S.getPrintingPolicy());
2549  SmallString<256> PrevAllocatorBuffer;
2550  llvm::raw_svector_ostream PrevAllocatorStream(PrevAllocatorBuffer);
2551  if (PrevAllocator)
2552  PrevAllocator->printPretty(PrevAllocatorStream, nullptr,
2553  S.getPrintingPolicy());
2554 
2555  SourceLocation AllocatorLoc =
2556  Allocator ? Allocator->getExprLoc() : RefExpr->getExprLoc();
2557  SourceRange AllocatorRange =
2558  Allocator ? Allocator->getSourceRange() : RefExpr->getSourceRange();
2559  SourceLocation PrevAllocatorLoc =
2560  PrevAllocator ? PrevAllocator->getExprLoc() : A->getLocation();
2561  SourceRange PrevAllocatorRange =
2562  PrevAllocator ? PrevAllocator->getSourceRange() : A->getRange();
2563  S.Diag(AllocatorLoc, diag::warn_omp_used_different_allocator)
2564  << (Allocator ? 1 : 0) << AllocatorStream.str()
2565  << (PrevAllocator ? 1 : 0) << PrevAllocatorStream.str()
2566  << AllocatorRange;
2567  S.Diag(PrevAllocatorLoc, diag::note_omp_previous_allocator)
2568  << PrevAllocatorRange;
2569  return true;
2570  }
2571  return false;
2572 }
2573 
2574 static void
2576  OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind,
2577  Expr *Allocator, SourceRange SR) {
2578  if (VD->hasAttr<OMPAllocateDeclAttr>())
2579  return;
2580  if (Allocator &&
2581  (Allocator->isTypeDependent() || Allocator->isValueDependent() ||
2582  Allocator->isInstantiationDependent() ||
2583  Allocator->containsUnexpandedParameterPack()))
2584  return;
2585  auto *A = OMPAllocateDeclAttr::CreateImplicit(S.Context, AllocatorKind,
2586  Allocator, SR);
2587  VD->addAttr(A);
2589  ML->DeclarationMarkedOpenMPAllocate(VD, A);
2590 }
2591 
2593  SourceLocation Loc, ArrayRef<Expr *> VarList,
2594  ArrayRef<OMPClause *> Clauses, DeclContext *Owner) {
2595  assert(Clauses.size() <= 1 && "Expected at most one clause.");
2596  Expr *Allocator = nullptr;
2597  if (Clauses.empty()) {
2598  // OpenMP 5.0, 2.11.3 allocate Directive, Restrictions.
2599  // allocate directives that appear in a target region must specify an
2600  // allocator clause unless a requires directive with the dynamic_allocators
2601  // clause is present in the same compilation unit.
2602  if (LangOpts.OpenMPIsDevice &&
2603  !DSAStack->hasRequiresDeclWithClause<OMPDynamicAllocatorsClause>())
2604  targetDiag(Loc, diag::err_expected_allocator_clause);
2605  } else {
2606  Allocator = cast<OMPAllocatorClause>(Clauses.back())->getAllocator();
2607  }
2608  OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind =
2609  getAllocatorKind(*this, DSAStack, Allocator);
2611  for (Expr *RefExpr : VarList) {
2612  auto *DE = cast<DeclRefExpr>(RefExpr);
2613  auto *VD = cast<VarDecl>(DE->getDecl());
2614 
2615  // Check if this is a TLS variable or global register.
2616  if (VD->getTLSKind() != VarDecl::TLS_None ||
2617  VD->hasAttr<OMPThreadPrivateDeclAttr>() ||
2618  (VD->getStorageClass() == SC_Register && VD->hasAttr<AsmLabelAttr>() &&
2619  !VD->isLocalVarDecl()))
2620  continue;
2621 
2622  // If the used several times in the allocate directive, the same allocator
2623  // must be used.
2624  if (checkPreviousOMPAllocateAttribute(*this, DSAStack, RefExpr, VD,
2625  AllocatorKind, Allocator))
2626  continue;
2627 
2628  // OpenMP, 2.11.3 allocate Directive, Restrictions, C / C++
2629  // If a list item has a static storage type, the allocator expression in the
2630  // allocator clause must be a constant expression that evaluates to one of
2631  // the predefined memory allocator values.
2632  if (Allocator && VD->hasGlobalStorage()) {
2633  if (AllocatorKind == OMPAllocateDeclAttr::OMPUserDefinedMemAlloc) {
2634  Diag(Allocator->getExprLoc(),
2635  diag::err_omp_expected_predefined_allocator)
2636  << Allocator->getSourceRange();
2637  bool IsDecl = VD->isThisDeclarationADefinition(Context) ==
2639  Diag(VD->getLocation(),
2640  IsDecl ? diag::note_previous_decl : diag::note_defined_here)
2641  << VD;
2642  continue;
2643  }
2644  }
2645 
2646  Vars.push_back(RefExpr);
2647  applyOMPAllocateAttribute(*this, VD, AllocatorKind, Allocator,
2648  DE->getSourceRange());
2649  }
2650  if (Vars.empty())
2651  return nullptr;
2652  if (!Owner)
2653  Owner = getCurLexicalContext();
2654  auto *D = OMPAllocateDecl::Create(Context, Owner, Loc, Vars, Clauses);
2655  D->setAccess(AS_public);
2656  Owner->addDecl(D);
2657  return DeclGroupPtrTy::make(DeclGroupRef(D));
2658 }
2659 
2662  ArrayRef<OMPClause *> ClauseList) {
2663  OMPRequiresDecl *D = nullptr;
2664  if (!CurContext->isFileContext()) {
2665  Diag(Loc, diag::err_omp_invalid_scope) << "requires";
2666  } else {
2667  D = CheckOMPRequiresDecl(Loc, ClauseList);
2668  if (D) {
2669  CurContext->addDecl(D);
2670  DSAStack->addRequiresDecl(D);
2671  }
2672  }
2673  return DeclGroupPtrTy::make(DeclGroupRef(D));
2674 }
2675 
2677  ArrayRef<OMPClause *> ClauseList) {
2678  /// For target specific clauses, the requires directive cannot be
2679  /// specified after the handling of any of the target regions in the
2680  /// current compilation unit.
2681  ArrayRef<SourceLocation> TargetLocations =
2682  DSAStack->getEncounteredTargetLocs();
2683  if (!TargetLocations.empty()) {
2684  for (const OMPClause *CNew : ClauseList) {
2685  // Check if any of the requires clauses affect target regions.
2686  if (isa<OMPUnifiedSharedMemoryClause>(CNew) ||
2687  isa<OMPUnifiedAddressClause>(CNew) ||
2688  isa<OMPReverseOffloadClause>(CNew) ||
2689  isa<OMPDynamicAllocatorsClause>(CNew)) {
2690  Diag(Loc, diag::err_omp_target_before_requires)
2691  << getOpenMPClauseName(CNew->getClauseKind());
2692  for (SourceLocation TargetLoc : TargetLocations) {
2693  Diag(TargetLoc, diag::note_omp_requires_encountered_target);
2694  }
2695  }
2696  }
2697  }
2698 
2699  if (!DSAStack->hasDuplicateRequiresClause(ClauseList))
2700  return OMPRequiresDecl::Create(Context, getCurLexicalContext(), Loc,
2701  ClauseList);
2702  return nullptr;
2703 }
2704 
2705 static void reportOriginalDsa(Sema &SemaRef, const DSAStackTy *Stack,
2706  const ValueDecl *D,
2707  const DSAStackTy::DSAVarData &DVar,
2708  bool IsLoopIterVar = false) {
2709  if (DVar.RefExpr) {
2710  SemaRef.Diag(DVar.RefExpr->getExprLoc(), diag::note_omp_explicit_dsa)
2711  << getOpenMPClauseName(DVar.CKind);
2712  return;
2713  }
2714  enum {
2715  PDSA_StaticMemberShared,
2716  PDSA_StaticLocalVarShared,
2717  PDSA_LoopIterVarPrivate,
2718  PDSA_LoopIterVarLinear,
2719  PDSA_LoopIterVarLastprivate,
2720  PDSA_ConstVarShared,
2721  PDSA_GlobalVarShared,
2722  PDSA_TaskVarFirstprivate,
2723  PDSA_LocalVarPrivate,
2724  PDSA_Implicit
2725  } Reason = PDSA_Implicit;
2726  bool ReportHint = false;
2727  auto ReportLoc = D->getLocation();
2728  auto *VD = dyn_cast<VarDecl>(D);
2729  if (IsLoopIterVar) {
2730  if (DVar.CKind == OMPC_private)
2731  Reason = PDSA_LoopIterVarPrivate;
2732  else if (DVar.CKind == OMPC_lastprivate)
2733  Reason = PDSA_LoopIterVarLastprivate;
2734  else
2735  Reason = PDSA_LoopIterVarLinear;
2736  } else if (isOpenMPTaskingDirective(DVar.DKind) &&
2737  DVar.CKind == OMPC_firstprivate) {
2738  Reason = PDSA_TaskVarFirstprivate;
2739  ReportLoc = DVar.ImplicitDSALoc;
2740  } else if (VD && VD->isStaticLocal())
2741  Reason = PDSA_StaticLocalVarShared;
2742  else if (VD && VD->isStaticDataMember())
2743  Reason = PDSA_StaticMemberShared;
2744  else if (VD && VD->isFileVarDecl())
2745  Reason = PDSA_GlobalVarShared;
2746  else if (D->getType().isConstant(SemaRef.getASTContext()))
2747  Reason = PDSA_ConstVarShared;
2748  else if (VD && VD->isLocalVarDecl() && DVar.CKind == OMPC_private) {
2749  ReportHint = true;
2750  Reason = PDSA_LocalVarPrivate;
2751  }
2752  if (Reason != PDSA_Implicit) {
2753  SemaRef.Diag(ReportLoc, diag::note_omp_predetermined_dsa)
2754  << Reason << ReportHint
2755  << getOpenMPDirectiveName(Stack->getCurrentDirective());
2756  } else if (DVar.ImplicitDSALoc.isValid()) {
2757  SemaRef.Diag(DVar.ImplicitDSALoc, diag::note_omp_implicit_dsa)
2758  << getOpenMPClauseName(DVar.CKind);
2759  }
2760 }
2761 
2762 namespace {
2763 class DSAAttrChecker final : public StmtVisitor<DSAAttrChecker, void> {
2764  DSAStackTy *Stack;
2765  Sema &SemaRef;
2766  bool ErrorFound = false;
2767  CapturedStmt *CS = nullptr;
2768  llvm::SmallVector<Expr *, 4> ImplicitFirstprivate;
2769  llvm::SmallVector<Expr *, 4> ImplicitMap;
2770  Sema::VarsWithInheritedDSAType VarsWithInheritedDSA;
2771  llvm::SmallDenseSet<const ValueDecl *, 4> ImplicitDeclarations;
2772 
2773  void VisitSubCaptures(OMPExecutableDirective *S) {
2774  // Check implicitly captured variables.
2775  if (!S->hasAssociatedStmt() || !S->getAssociatedStmt())
2776  return;
2777  visitSubCaptures(S->getInnermostCapturedStmt());
2778  }
2779 
2780 public:
2781  void VisitDeclRefExpr(DeclRefExpr *E) {
2782  if (E->isTypeDependent() || E->isValueDependent() ||
2784  return;
2785  if (auto *VD = dyn_cast<VarDecl>(E->getDecl())) {
2786  // Check the datasharing rules for the expressions in the clauses.
2787  if (!CS) {
2788  if (auto *CED = dyn_cast<OMPCapturedExprDecl>(VD))
2789  if (!CED->hasAttr<OMPCaptureNoInitAttr>()) {
2790  Visit(CED->getInit());
2791  return;
2792  }
2793  } else if (VD->isImplicit() || isa<OMPCapturedExprDecl>(VD))
2794  // Do not analyze internal variables and do not enclose them into
2795  // implicit clauses.
2796  return;
2797  VD = VD->getCanonicalDecl();
2798  // Skip internally declared variables.
2799  if (VD->hasLocalStorage() && CS && !CS->capturesVariable(VD))
2800  return;
2801 
2802  DSAStackTy::DSAVarData DVar = Stack->getTopDSA(VD, /*FromParent=*/false);
2803  // Check if the variable has explicit DSA set and stop analysis if it so.
2804  if (DVar.RefExpr || !ImplicitDeclarations.insert(VD).second)
2805  return;
2806 
2807  // Skip internally declared static variables.
2809  OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
2810  if (VD->hasGlobalStorage() && CS && !CS->capturesVariable(VD) &&
2811  (Stack->hasRequiresDeclWithClause<OMPUnifiedSharedMemoryClause>() ||
2812  !Res || *Res != OMPDeclareTargetDeclAttr::MT_Link))
2813  return;
2814 
2815  SourceLocation ELoc = E->getExprLoc();
2816  OpenMPDirectiveKind DKind = Stack->getCurrentDirective();
2817  // The default(none) clause requires that each variable that is referenced
2818  // in the construct, and does not have a predetermined data-sharing
2819  // attribute, must have its data-sharing attribute explicitly determined
2820  // by being listed in a data-sharing attribute clause.
2821  if (DVar.CKind == OMPC_unknown && Stack->getDefaultDSA() == DSA_none &&
2822  isImplicitOrExplicitTaskingRegion(DKind) &&
2823  VarsWithInheritedDSA.count(VD) == 0) {
2824  VarsWithInheritedDSA[VD] = E;
2825  return;
2826  }
2827 
2828  if (isOpenMPTargetExecutionDirective(DKind) &&
2829  !Stack->isLoopControlVariable(VD).first) {
2830  if (!Stack->checkMappableExprComponentListsForDecl(
2831  VD, /*CurrentRegionOnly=*/true,
2833  StackComponents,
2834  OpenMPClauseKind) {
2835  // Variable is used if it has been marked as an array, array
2836  // section or the variable iself.
2837  return StackComponents.size() == 1 ||
2838  std::all_of(
2839  std::next(StackComponents.rbegin()),
2840  StackComponents.rend(),
2841  [](const OMPClauseMappableExprCommon::
2842  MappableComponent &MC) {
2843  return MC.getAssociatedDeclaration() ==
2844  nullptr &&
2845  (isa<OMPArraySectionExpr>(
2846  MC.getAssociatedExpression()) ||
2847  isa<ArraySubscriptExpr>(
2848  MC.getAssociatedExpression()));
2849  });
2850  })) {
2851  bool IsFirstprivate = false;
2852  // By default lambdas are captured as firstprivates.
2853  if (const auto *RD =
2854  VD->getType().getNonReferenceType()->getAsCXXRecordDecl())
2855  IsFirstprivate = RD->isLambda();
2856  IsFirstprivate =
2857  IsFirstprivate ||
2858  (VD->getType().getNonReferenceType()->isScalarType() &&
2859  Stack->getDefaultDMA() != DMA_tofrom_scalar && !Res);
2860  if (IsFirstprivate)
2861  ImplicitFirstprivate.emplace_back(E);
2862  else
2863  ImplicitMap.emplace_back(E);
2864  return;
2865  }
2866  }
2867 
2868  // OpenMP [2.9.3.6, Restrictions, p.2]
2869  // A list item that appears in a reduction clause of the innermost
2870  // enclosing worksharing or parallel construct may not be accessed in an
2871  // explicit task.
2872  DVar = Stack->hasInnermostDSA(
2873  VD, [](OpenMPClauseKind C) { return C == OMPC_reduction; },
2874  [](OpenMPDirectiveKind K) {
2875  return isOpenMPParallelDirective(K) ||
2877  },
2878  /*FromParent=*/true);
2879  if (isOpenMPTaskingDirective(DKind) && DVar.CKind == OMPC_reduction) {
2880  ErrorFound = true;
2881  SemaRef.Diag(ELoc, diag::err_omp_reduction_in_task);
2882  reportOriginalDsa(SemaRef, Stack, VD, DVar);
2883  return;
2884  }
2885 
2886  // Define implicit data-sharing attributes for task.
2887  DVar = Stack->getImplicitDSA(VD, /*FromParent=*/false);
2888  if (isOpenMPTaskingDirective(DKind) && DVar.CKind != OMPC_shared &&
2889  !Stack->isLoopControlVariable(VD).first) {
2890  ImplicitFirstprivate.push_back(E);
2891  return;
2892  }
2893 
2894  // Store implicitly used globals with declare target link for parent
2895  // target.
2896  if (!isOpenMPTargetExecutionDirective(DKind) && Res &&
2897  *Res == OMPDeclareTargetDeclAttr::MT_Link) {
2898  Stack->addToParentTargetRegionLinkGlobals(E);
2899  return;
2900  }
2901  }
2902  }
2903  void VisitMemberExpr(MemberExpr *E) {
2904  if (E->isTypeDependent() || E->isValueDependent() ||
2906  return;
2907  auto *FD = dyn_cast<FieldDecl>(E->getMemberDecl());
2908  OpenMPDirectiveKind DKind = Stack->getCurrentDirective();
2909  if (auto *TE = dyn_cast<CXXThisExpr>(E->getBase()->IgnoreParens())) {
2910  if (!FD)
2911  return;
2912  DSAStackTy::DSAVarData DVar = Stack->getTopDSA(FD, /*FromParent=*/false);
2913  // Check if the variable has explicit DSA set and stop analysis if it
2914  // so.
2915  if (DVar.RefExpr || !ImplicitDeclarations.insert(FD).second)
2916  return;
2917 
2918  if (isOpenMPTargetExecutionDirective(DKind) &&
2919  !Stack->isLoopControlVariable(FD).first &&
2920  !Stack->checkMappableExprComponentListsForDecl(
2921  FD, /*CurrentRegionOnly=*/true,
2923  StackComponents,
2924  OpenMPClauseKind) {
2925  return isa<CXXThisExpr>(
2926  cast<MemberExpr>(
2927  StackComponents.back().getAssociatedExpression())
2928  ->getBase()
2929  ->IgnoreParens());
2930  })) {
2931  // OpenMP 4.5 [2.15.5.1, map Clause, Restrictions, C/C++, p.3]
2932  // A bit-field cannot appear in a map clause.
2933  //
2934  if (FD->isBitField())
2935  return;
2936 
2937  // Check to see if the member expression is referencing a class that
2938  // has already been explicitly mapped
2939  if (Stack->isClassPreviouslyMapped(TE->getType()))
2940  return;
2941 
2942  ImplicitMap.emplace_back(E);
2943  return;
2944  }
2945 
2946  SourceLocation ELoc = E->getExprLoc();
2947  // OpenMP [2.9.3.6, Restrictions, p.2]
2948  // A list item that appears in a reduction clause of the innermost
2949  // enclosing worksharing or parallel construct may not be accessed in
2950  // an explicit task.
2951  DVar = Stack->hasInnermostDSA(
2952  FD, [](OpenMPClauseKind C) { return C == OMPC_reduction; },
2953  [](OpenMPDirectiveKind K) {
2954  return isOpenMPParallelDirective(K) ||
2956  },
2957  /*FromParent=*/true);
2958  if (isOpenMPTaskingDirective(DKind) && DVar.CKind == OMPC_reduction) {
2959  ErrorFound = true;
2960  SemaRef.Diag(ELoc, diag::err_omp_reduction_in_task);
2961  reportOriginalDsa(SemaRef, Stack, FD, DVar);
2962  return;
2963  }
2964 
2965  // Define implicit data-sharing attributes for task.
2966  DVar = Stack->getImplicitDSA(FD, /*FromParent=*/false);
2967  if (isOpenMPTaskingDirective(DKind) && DVar.CKind != OMPC_shared &&
2968  !Stack->isLoopControlVariable(FD).first) {
2969  // Check if there is a captured expression for the current field in the
2970  // region. Do not mark it as firstprivate unless there is no captured
2971  // expression.
2972  // TODO: try to make it firstprivate.
2973  if (DVar.CKind != OMPC_unknown)
2974  ImplicitFirstprivate.push_back(E);
2975  }
2976  return;
2977  }
2978  if (isOpenMPTargetExecutionDirective(DKind)) {
2980  if (!checkMapClauseExpressionBase(SemaRef, E, CurComponents, OMPC_map,
2981  /*NoDiagnose=*/true))
2982  return;
2983  const auto *VD = cast<ValueDecl>(
2984  CurComponents.back().getAssociatedDeclaration()->getCanonicalDecl());
2985  if (!Stack->checkMappableExprComponentListsForDecl(
2986  VD, /*CurrentRegionOnly=*/true,
2987  [&CurComponents](
2989  StackComponents,
2990  OpenMPClauseKind) {
2991  auto CCI = CurComponents.rbegin();
2992  auto CCE = CurComponents.rend();
2993  for (const auto &SC : llvm::reverse(StackComponents)) {
2994  // Do both expressions have the same kind?
2995  if (CCI->getAssociatedExpression()->getStmtClass() !=
2996  SC.getAssociatedExpression()->getStmtClass())
2997  if (!(isa<OMPArraySectionExpr>(
2998  SC.getAssociatedExpression()) &&
2999  isa<ArraySubscriptExpr>(
3000  CCI->getAssociatedExpression())))
3001  return false;
3002 
3003  const Decl *CCD = CCI->getAssociatedDeclaration();
3004  const Decl *SCD = SC.getAssociatedDeclaration();
3005  CCD = CCD ? CCD->getCanonicalDecl() : nullptr;
3006  SCD = SCD ? SCD->getCanonicalDecl() : nullptr;
3007  if (SCD != CCD)
3008  return false;
3009  std::advance(CCI, 1);
3010  if (CCI == CCE)
3011  break;
3012  }
3013  return true;
3014  })) {
3015  Visit(E->getBase());
3016  }
3017  } else {
3018  Visit(E->getBase());
3019  }
3020  }
3021  void VisitOMPExecutableDirective(OMPExecutableDirective *S) {
3022  for (OMPClause *C : S->clauses()) {
3023  // Skip analysis of arguments of implicitly defined firstprivate clause
3024  // for task|target directives.
3025  // Skip analysis of arguments of implicitly defined map clause for target
3026  // directives.
3027  if (C && !((isa<OMPFirstprivateClause>(C) || isa<OMPMapClause>(C)) &&
3028  C->isImplicit())) {
3029  for (Stmt *CC : C->children()) {
3030  if (CC)
3031  Visit(CC);
3032  }
3033  }
3034  }
3035  // Check implicitly captured variables.
3036  VisitSubCaptures(S);
3037  }
3038  void VisitStmt(Stmt *S) {
3039  for (Stmt *C : S->children()) {
3040  if (C) {
3041  // Check implicitly captured variables in the task-based directives to
3042  // check if they must be firstprivatized.
3043  Visit(C);
3044  }
3045  }
3046  }
3047 
3048  void visitSubCaptures(CapturedStmt *S) {
3049  for (const CapturedStmt::Capture &Cap : S->captures()) {
3050  if (!Cap.capturesVariable() && !Cap.capturesVariableByCopy())
3051  continue;
3052  VarDecl *VD = Cap.getCapturedVar();
3053  // Do not try to map the variable if it or its sub-component was mapped
3054  // already.
3055  if (isOpenMPTargetExecutionDirective(Stack->getCurrentDirective()) &&
3056  Stack->checkMappableExprComponentListsForDecl(
3057  VD, /*CurrentRegionOnly=*/true,
3059  OpenMPClauseKind) { return true; }))
3060  continue;
3062  SemaRef, VD, VD->getType().getNonLValueExprType(SemaRef.Context),
3063  Cap.getLocation(), /*RefersToCapture=*/true);
3064  Visit(DRE);
3065  }
3066  }
3067  bool isErrorFound() const { return ErrorFound; }
3068  ArrayRef<Expr *> getImplicitFirstprivate() const {
3069  return ImplicitFirstprivate;
3070  }
3071  ArrayRef<Expr *> getImplicitMap() const { return ImplicitMap; }
3072  const Sema::VarsWithInheritedDSAType &getVarsWithInheritedDSA() const {
3073  return VarsWithInheritedDSA;
3074  }
3075 
3076  DSAAttrChecker(DSAStackTy *S, Sema &SemaRef, CapturedStmt *CS)
3077  : Stack(S), SemaRef(SemaRef), ErrorFound(false), CS(CS) {
3078  // Process declare target link variables for the target directives.
3079  if (isOpenMPTargetExecutionDirective(S->getCurrentDirective())) {
3080  for (DeclRefExpr *E : Stack->getLinkGlobals())
3081  Visit(E);
3082  }
3083  }
3084 };
3085 } // namespace
3086 
3088  switch (DKind) {
3089  case OMPD_parallel:
3090  case OMPD_parallel_for:
3091  case OMPD_parallel_for_simd:
3092  case OMPD_parallel_sections:
3093  case OMPD_teams:
3094  case OMPD_teams_distribute:
3095  case OMPD_teams_distribute_simd: {
3096  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
3097  QualType KmpInt32PtrTy =
3098  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
3099  Sema::CapturedParamNameType Params[] = {
3100  std::make_pair(".global_tid.", KmpInt32PtrTy),
3101  std::make_pair(".bound_tid.", KmpInt32PtrTy),
3102  std::make_pair(StringRef(), QualType()) // __context with shared vars
3103  };
3104  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
3105  Params);
3106  break;
3107  }
3108  case OMPD_target_teams:
3109  case OMPD_target_parallel:
3110  case OMPD_target_parallel_for:
3111  case OMPD_target_parallel_for_simd:
3112  case OMPD_target_teams_distribute:
3113  case OMPD_target_teams_distribute_simd: {
3114  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
3115  QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
3116  QualType KmpInt32PtrTy =
3117  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
3118  QualType Args[] = {VoidPtrTy};
3120  EPI.Variadic = true;
3121  QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
3122  Sema::CapturedParamNameType Params[] = {
3123  std::make_pair(".global_tid.", KmpInt32Ty),
3124  std::make_pair(".part_id.", KmpInt32PtrTy),
3125  std::make_pair(".privates.", VoidPtrTy),
3126  std::make_pair(
3127  ".copy_fn.",
3128  Context.getPointerType(CopyFnType).withConst().withRestrict()),
3129  std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
3130  std::make_pair(StringRef(), QualType()) // __context with shared vars
3131  };
3132  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
3133  Params, /*OpenMPCaptureLevel=*/0);
3134  // Mark this captured region as inlined, because we don't use outlined
3135  // function directly.
3136  getCurCapturedRegion()->TheCapturedDecl->addAttr(
3137  AlwaysInlineAttr::CreateImplicit(
3138  Context, {}, AttributeCommonInfo::AS_Keyword,
3139  AlwaysInlineAttr::Keyword_forceinline));
3140  Sema::CapturedParamNameType ParamsTarget[] = {
3141  std::make_pair(StringRef(), QualType()) // __context with shared vars
3142  };
3143  // Start a captured region for 'target' with no implicit parameters.
3144  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
3145  ParamsTarget, /*OpenMPCaptureLevel=*/1);
3146  Sema::CapturedParamNameType ParamsTeamsOrParallel[] = {
3147  std::make_pair(".global_tid.", KmpInt32PtrTy),
3148  std::make_pair(".bound_tid.", KmpInt32PtrTy),
3149  std::make_pair(StringRef(), QualType()) // __context with shared vars
3150  };
3151  // Start a captured region for 'teams' or 'parallel'. Both regions have
3152  // the same implicit parameters.
3153  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
3154  ParamsTeamsOrParallel, /*OpenMPCaptureLevel=*/2);
3155  break;
3156  }
3157  case OMPD_target:
3158  case OMPD_target_simd: {
3159  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
3160  QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
3161  QualType KmpInt32PtrTy =
3162  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
3163  QualType Args[] = {VoidPtrTy};
3165  EPI.Variadic = true;
3166  QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
3167  Sema::CapturedParamNameType Params[] = {
3168  std::make_pair(".global_tid.", KmpInt32Ty),
3169  std::make_pair(".part_id.", KmpInt32PtrTy),
3170  std::make_pair(".privates.", VoidPtrTy),
3171  std::make_pair(
3172  ".copy_fn.",
3173  Context.getPointerType(CopyFnType).withConst().withRestrict()),
3174  std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
3175  std::make_pair(StringRef(), QualType()) // __context with shared vars
3176  };
3177  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
3178  Params, /*OpenMPCaptureLevel=*/0);
3179  // Mark this captured region as inlined, because we don't use outlined
3180  // function directly.
3181  getCurCapturedRegion()->TheCapturedDecl->addAttr(
3182  AlwaysInlineAttr::CreateImplicit(
3183  Context, {}, AttributeCommonInfo::AS_Keyword,
3184  AlwaysInlineAttr::Keyword_forceinline));
3185  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
3186  std::make_pair(StringRef(), QualType()),
3187  /*OpenMPCaptureLevel=*/1);
3188  break;
3189  }
3190  case OMPD_simd:
3191  case OMPD_for:
3192  case OMPD_for_simd:
3193  case OMPD_sections:
3194  case OMPD_section:
3195  case OMPD_single:
3196  case OMPD_master:
3197  case OMPD_critical:
3198  case OMPD_taskgroup:
3199  case OMPD_distribute:
3200  case OMPD_distribute_simd:
3201  case OMPD_ordered:
3202  case OMPD_atomic:
3203  case OMPD_target_data: {
3204  Sema::CapturedParamNameType Params[] = {
3205  std::make_pair(StringRef(), QualType()) // __context with shared vars
3206  };
3207  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
3208  Params);
3209  break;
3210  }
3211  case OMPD_task: {
3212  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
3213  QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
3214  QualType KmpInt32PtrTy =
3215  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
3216  QualType Args[] = {VoidPtrTy};
3218  EPI.Variadic = true;
3219  QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
3220  Sema::CapturedParamNameType Params[] = {
3221  std::make_pair(".global_tid.", KmpInt32Ty),
3222  std::make_pair(".part_id.", KmpInt32PtrTy),
3223  std::make_pair(".privates.", VoidPtrTy),
3224  std::make_pair(
3225  ".copy_fn.",
3226  Context.getPointerType(CopyFnType).withConst().withRestrict()),
3227  std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
3228  std::make_pair(StringRef(), QualType()) // __context with shared vars
3229  };
3230  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
3231  Params);
3232  // Mark this captured region as inlined, because we don't use outlined
3233  // function directly.
3234  getCurCapturedRegion()->TheCapturedDecl->addAttr(
3235  AlwaysInlineAttr::CreateImplicit(
3236  Context, {}, AttributeCommonInfo::AS_Keyword,
3237  AlwaysInlineAttr::Keyword_forceinline));
3238  break;
3239  }
3240  case OMPD_taskloop:
3241  case OMPD_taskloop_simd:
3242  case OMPD_master_taskloop:
3243  case OMPD_master_taskloop_simd: {
3244  QualType KmpInt32Ty =
3245  Context.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1)
3246  .withConst();
3247  QualType KmpUInt64Ty =
3248  Context.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0)
3249  .withConst();
3250  QualType KmpInt64Ty =
3251  Context.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1)
3252  .withConst();
3253  QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
3254  QualType KmpInt32PtrTy =
3255  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
3256  QualType Args[] = {VoidPtrTy};
3258  EPI.Variadic = true;
3259  QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
3260  Sema::CapturedParamNameType Params[] = {
3261  std::make_pair(".global_tid.", KmpInt32Ty),
3262  std::make_pair(".part_id.", KmpInt32PtrTy),
3263  std::make_pair(".privates.", VoidPtrTy),
3264  std::make_pair(
3265  ".copy_fn.",
3266  Context.getPointerType(CopyFnType).withConst().withRestrict()),
3267  std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
3268  std::make_pair(".lb.", KmpUInt64Ty),
3269  std::make_pair(".ub.", KmpUInt64Ty),
3270  std::make_pair(".st.", KmpInt64Ty),
3271  std::make_pair(".liter.", KmpInt32Ty),
3272  std::make_pair(".reductions.", VoidPtrTy),
3273  std::make_pair(StringRef(), QualType()) // __context with shared vars
3274  };
3275  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
3276  Params);
3277  // Mark this captured region as inlined, because we don't use outlined
3278  // function directly.
3279  getCurCapturedRegion()->TheCapturedDecl->addAttr(
3280  AlwaysInlineAttr::CreateImplicit(
3281  Context, {}, AttributeCommonInfo::AS_Keyword,
3282  AlwaysInlineAttr::Keyword_forceinline));
3283  break;
3284  }
3285  case OMPD_parallel_master_taskloop: {
3286  QualType KmpInt32Ty =
3287  Context.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1)
3288  .withConst();
3289  QualType KmpUInt64Ty =
3290  Context.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0)
3291  .withConst();
3292  QualType KmpInt64Ty =
3293  Context.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1)
3294  .withConst();
3295  QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
3296  QualType KmpInt32PtrTy =
3297  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
3298  Sema::CapturedParamNameType ParamsParallel[] = {
3299  std::make_pair(".global_tid.", KmpInt32PtrTy),
3300  std::make_pair(".bound_tid.", KmpInt32PtrTy),
3301  std::make_pair(StringRef(), QualType()) // __context with shared vars
3302  };
3303  // Start a captured region for 'parallel'.
3304  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
3305  ParamsParallel, /*OpenMPCaptureLevel=*/1);
3306  QualType Args[] = {VoidPtrTy};
3308  EPI.Variadic = true;
3309  QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
3310  Sema::CapturedParamNameType Params[] = {
3311  std::make_pair(".global_tid.", KmpInt32Ty),
3312  std::make_pair(".part_id.", KmpInt32PtrTy),
3313  std::make_pair(".privates.", VoidPtrTy),
3314  std::make_pair(
3315  ".copy_fn.",
3316  Context.getPointerType(CopyFnType).withConst().withRestrict()),
3317  std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
3318  std::make_pair(".lb.", KmpUInt64Ty),
3319  std::make_pair(".ub.", KmpUInt64Ty),
3320  std::make_pair(".st.", KmpInt64Ty),
3321  std::make_pair(".liter.", KmpInt32Ty),
3322  std::make_pair(".reductions.", VoidPtrTy),
3323  std::make_pair(StringRef(), QualType()) // __context with shared vars
3324  };
3325  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
3326  Params, /*OpenMPCaptureLevel=*/2);
3327  // Mark this captured region as inlined, because we don't use outlined
3328  // function directly.
3329  getCurCapturedRegion()->TheCapturedDecl->addAttr(
3330  AlwaysInlineAttr::CreateImplicit(
3331  Context, {}, AttributeCommonInfo::AS_Keyword,
3332  AlwaysInlineAttr::Keyword_forceinline));
3333  break;
3334  }
3335  case OMPD_distribute_parallel_for_simd:
3336  case OMPD_distribute_parallel_for: {
3337  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
3338  QualType KmpInt32PtrTy =
3339  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
3340  Sema::CapturedParamNameType Params[] = {
3341  std::make_pair(".global_tid.", KmpInt32PtrTy),
3342  std::make_pair(".bound_tid.", KmpInt32PtrTy),
3343  std::make_pair(".previous.lb.", Context.getSizeType().withConst()),
3344  std::make_pair(".previous.ub.", Context.getSizeType().withConst()),
3345  std::make_pair(StringRef(), QualType()) // __context with shared vars
3346  };
3347  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
3348  Params);
3349  break;
3350  }
3351  case OMPD_target_teams_distribute_parallel_for:
3352  case OMPD_target_teams_distribute_parallel_for_simd: {
3353  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
3354  QualType KmpInt32PtrTy =
3355  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
3356  QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
3357 
3358  QualType Args[] = {VoidPtrTy};
3360  EPI.Variadic = true;
3361  QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
3362  Sema::CapturedParamNameType Params[] = {
3363  std::make_pair(".global_tid.", KmpInt32Ty),
3364  std::make_pair(".part_id.", KmpInt32PtrTy),
3365  std::make_pair(".privates.", VoidPtrTy),
3366  std::make_pair(
3367  ".copy_fn.",
3368  Context.getPointerType(CopyFnType).withConst().withRestrict()),
3369  std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
3370  std::make_pair(StringRef(), QualType()) // __context with shared vars
3371  };
3372  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
3373  Params, /*OpenMPCaptureLevel=*/0);
3374  // Mark this captured region as inlined, because we don't use outlined
3375  // function directly.
3376  getCurCapturedRegion()->TheCapturedDecl->addAttr(
3377  AlwaysInlineAttr::CreateImplicit(
3378  Context, {}, AttributeCommonInfo::AS_Keyword,
3379  AlwaysInlineAttr::Keyword_forceinline));
3380  Sema::CapturedParamNameType ParamsTarget[] = {
3381  std::make_pair(StringRef(), QualType()) // __context with shared vars
3382  };
3383  // Start a captured region for 'target' with no implicit parameters.
3384  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
3385  ParamsTarget, /*OpenMPCaptureLevel=*/1);
3386 
3387  Sema::CapturedParamNameType ParamsTeams[] = {
3388  std::make_pair(".global_tid.", KmpInt32PtrTy),
3389  std::make_pair(".bound_tid.", KmpInt32PtrTy),
3390  std::make_pair(StringRef(), QualType()) // __context with shared vars
3391  };
3392  // Start a captured region for 'target' with no implicit parameters.
3393  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
3394  ParamsTeams, /*OpenMPCaptureLevel=*/2);
3395 
3396  Sema::CapturedParamNameType ParamsParallel[] = {
3397  std::make_pair(".global_tid.", KmpInt32PtrTy),
3398  std::make_pair(".bound_tid.", KmpInt32PtrTy),
3399  std::make_pair(".previous.lb.", Context.getSizeType().withConst()),
3400  std::make_pair(".previous.ub.", Context.getSizeType().withConst()),
3401  std::make_pair(StringRef(), QualType()) // __context with shared vars
3402  };
3403  // Start a captured region for 'teams' or 'parallel'. Both regions have
3404  // the same implicit parameters.
3405  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
3406  ParamsParallel, /*OpenMPCaptureLevel=*/3);
3407  break;
3408  }
3409 
3410  case OMPD_teams_distribute_parallel_for:
3411  case OMPD_teams_distribute_parallel_for_simd: {
3412  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
3413  QualType KmpInt32PtrTy =
3414  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
3415 
3416  Sema::CapturedParamNameType ParamsTeams[] = {
3417  std::make_pair(".global_tid.", KmpInt32PtrTy),
3418  std::make_pair(".bound_tid.", KmpInt32PtrTy),
3419  std::make_pair(StringRef(), QualType()) // __context with shared vars
3420  };
3421  // Start a captured region for 'target' with no implicit parameters.
3422  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
3423  ParamsTeams, /*OpenMPCaptureLevel=*/0);
3424 
3425  Sema::CapturedParamNameType ParamsParallel[] = {
3426  std::make_pair(".global_tid.", KmpInt32PtrTy),
3427  std::make_pair(".bound_tid.", KmpInt32PtrTy),
3428  std::make_pair(".previous.lb.", Context.getSizeType().withConst()),
3429  std::make_pair(".previous.ub.", Context.getSizeType().withConst()),
3430  std::make_pair(StringRef(), QualType()) // __context with shared vars
3431  };
3432  // Start a captured region for 'teams' or 'parallel'. Both regions have
3433  // the same implicit parameters.
3434  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
3435  ParamsParallel, /*OpenMPCaptureLevel=*/1);
3436  break;
3437  }
3438  case OMPD_target_update:
3439  case OMPD_target_enter_data:
3440  case OMPD_target_exit_data: {
3441  QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
3442  QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
3443  QualType KmpInt32PtrTy =
3444  Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
3445  QualType Args[] = {VoidPtrTy};
3447  EPI.Variadic = true;
3448  QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
3449  Sema::CapturedParamNameType Params[] = {
3450  std::make_pair(".global_tid.", KmpInt32Ty),
3451  std::make_pair(".part_id.", KmpInt32PtrTy),
3452  std::make_pair(".privates.", VoidPtrTy),
3453  std::make_pair(
3454  ".copy_fn.",
3455  Context.getPointerType(CopyFnType).withConst().withRestrict()),
3456  std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
3457  std::make_pair(StringRef(), QualType()) // __context with shared vars
3458  };
3459  ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
3460  Params);
3461  // Mark this captured region as inlined, because we don't use outlined
3462  // function directly.
3463  getCurCapturedRegion()->TheCapturedDecl->addAttr(
3464  AlwaysInlineAttr::CreateImplicit(
3465  Context, {}, AttributeCommonInfo::AS_Keyword,
3466  AlwaysInlineAttr::Keyword_forceinline));
3467  break;
3468  }
3469  case OMPD_threadprivate:
3470  case OMPD_allocate:
3471  case OMPD_taskyield:
3472  case OMPD_barrier:
3473  case OMPD_taskwait:
3474  case OMPD_cancellation_point:
3475  case OMPD_cancel:
3476  case OMPD_flush:
3477  case OMPD_declare_reduction:
3478  case OMPD_declare_mapper:
3479  case OMPD_declare_simd:
3480  case OMPD_declare_target:
3481  case OMPD_end_declare_target:
3482  case OMPD_requires:
3483  case OMPD_declare_variant:
3484  llvm_unreachable("OpenMP Directive is not allowed");
3485  case OMPD_unknown:
3486  llvm_unreachable("Unknown OpenMP directive");
3487  }
3488 }
3489 
3490 int Sema::getNumberOfConstructScopes(unsigned Level) const {
3491  return getOpenMPCaptureLevels(DSAStack->getDirective(Level));
3492 }
3493 
3495  SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
3496  getOpenMPCaptureRegions(CaptureRegions, DKind);
3497  return CaptureRegions.size();
3498 }
3499 
3501  Expr *CaptureExpr, bool WithInit,
3502  bool AsExpression) {
3503  assert(CaptureExpr);
3504  ASTContext &C = S.getASTContext();
3505  Expr *Init = AsExpression ? CaptureExpr : CaptureExpr->IgnoreImpCasts();
3506  QualType Ty = Init->getType();
3507  if (CaptureExpr->getObjectKind() == OK_Ordinary && CaptureExpr->isGLValue()) {
3508  if (S.getLangOpts().CPlusPlus) {
3509  Ty = C.getLValueReferenceType(Ty);
3510  } else {
3511  Ty = C.getPointerType(Ty);
3512  ExprResult Res =
3513  S.CreateBuiltinUnaryOp(CaptureExpr->getExprLoc(), UO_AddrOf, Init);
3514  if (!Res.isUsable())
3515  return nullptr;
3516  Init = Res.get();
3517  }
3518  WithInit = true;
3519  }
3520  auto *CED = OMPCapturedExprDecl::Create(C, S.CurContext, Id, Ty,
3521  CaptureExpr->getBeginLoc());
3522  if (!WithInit)
3523  CED->addAttr(OMPCaptureNoInitAttr::CreateImplicit(C));
3524  S.CurContext->addHiddenDecl(CED);
3525  S.AddInitializerToDecl(CED, Init, /*DirectInit=*/false);
3526  return CED;
3527 }
3528 
3529 static DeclRefExpr *buildCapture(Sema &S, ValueDecl *D, Expr *CaptureExpr,
3530  bool WithInit) {
3531  OMPCapturedExprDecl *CD;
3532  if (VarDecl *VD = S.isOpenMPCapturedDecl(D))
3533  CD = cast<OMPCapturedExprDecl>(VD);
3534  else
3535  CD = buildCaptureDecl(S, D->getIdentifier(), CaptureExpr, WithInit,
3536  /*AsExpression=*/false);
3537  return buildDeclRefExpr(S, CD, CD->getType().getNonReferenceType(),
3538  CaptureExpr->getExprLoc());
3539 }
3540 
3541 static ExprResult buildCapture(Sema &S, Expr *CaptureExpr, DeclRefExpr *&Ref) {
3542  CaptureExpr = S.DefaultLvalueConversion(CaptureExpr).get();
3543  if (!Ref) {
3545  S, &S.getASTContext().Idents.get(".capture_expr."), CaptureExpr,
3546  /*WithInit=*/true, /*AsExpression=*/true);
3547  Ref = buildDeclRefExpr(S, CD, CD->getType().getNonReferenceType(),
3548  CaptureExpr->getExprLoc());
3549  }
3550  ExprResult Res = Ref;
3551  if (!S.getLangOpts().CPlusPlus &&
3552  CaptureExpr->getObjectKind() == OK_Ordinary && CaptureExpr->isGLValue() &&
3553  Ref->getType()->isPointerType()) {
3554  Res = S.CreateBuiltinUnaryOp(CaptureExpr->getExprLoc(), UO_Deref, Ref);
3555  if (!Res.isUsable())
3556  return ExprError();
3557  }
3558  return S.DefaultLvalueConversion(Res.get());
3559 }
3560 
3561 namespace {
3562 // OpenMP directives parsed in this section are represented as a
3563 // CapturedStatement with an associated statement. If a syntax error
3564 // is detected during the parsing of the associated statement, the
3565 // compiler must abort processing and close the CapturedStatement.
3566 //
3567 // Combined directives such as 'target parallel' have more than one
3568 // nested CapturedStatements. This RAII ensures that we unwind out
3569 // of all the nested CapturedStatements when an error is found.
3570 class CaptureRegionUnwinderRAII {
3571 private:
3572  Sema &S;
3573  bool &ErrorFound;
3575 
3576 public:
3577  CaptureRegionUnwinderRAII(Sema &S, bool &ErrorFound,
3578  OpenMPDirectiveKind DKind)
3579  : S(S), ErrorFound(ErrorFound), DKind(DKind) {}
3580  ~CaptureRegionUnwinderRAII() {
3581  if (ErrorFound) {
3582  int ThisCaptureLevel = S.getOpenMPCaptureLevels(DKind);
3583  while (--ThisCaptureLevel >= 0)
3585  }
3586  }
3587 };
3588 } // namespace
3589 
3591  // Capture variables captured by reference in lambdas for target-based
3592  // directives.
3593  if (!CurContext->isDependentContext() &&
3594  (isOpenMPTargetExecutionDirective(DSAStack->getCurrentDirective()) ||
3596  DSAStack->getCurrentDirective()))) {
3597  QualType Type = V->getType();
3598  if (const auto *RD = Type.getCanonicalType()
3600  ->getAsCXXRecordDecl()) {
3601  bool SavedForceCaptureByReferenceInTargetExecutable =
3602  DSAStack->isForceCaptureByReferenceInTargetExecutable();
3603  DSAStack->setForceCaptureByReferenceInTargetExecutable(
3604  /*V=*/true);
3605  if (RD->isLambda()) {
3606  llvm::DenseMap<const VarDecl *, FieldDecl *> Captures;
3607  FieldDecl *ThisCapture;
3608  RD->getCaptureFields(Captures, ThisCapture);
3609  for (const LambdaCapture &LC : RD->captures()) {
3610  if (LC.getCaptureKind() == LCK_ByRef) {
3611  VarDecl *VD = LC.getCapturedVar();
3612  DeclContext *VDC = VD->getDeclContext();
3613  if (!VDC->Encloses(CurContext))
3614  continue;
3615  MarkVariableReferenced(LC.getLocation(), VD);
3616  } else if (LC.getCaptureKind() == LCK_This) {
3617  QualType ThisTy = getCurrentThisType();
3618  if (!ThisTy.isNull() &&
3619  Context.typesAreCompatible(ThisTy, ThisCapture->getType()))
3620  CheckCXXThisCapture(LC.getLocation());
3621  }
3622  }
3623  }
3624  DSAStack->setForceCaptureByReferenceInTargetExecutable(
3625  SavedForceCaptureByReferenceInTargetExecutable);
3626  }
3627  }
3628 }
3629 
3631  ArrayRef<OMPClause *> Clauses) {
3632  bool ErrorFound = false;
3633  CaptureRegionUnwinderRAII CaptureRegionUnwinder(
3634  *this, ErrorFound, DSAStack->getCurrentDirective());
3635  if (!S.isUsable()) {
3636  ErrorFound = true;
3637  return StmtError();
3638  }
3639 
3640  SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
3641  getOpenMPCaptureRegions(CaptureRegions, DSAStack->getCurrentDirective());
3642  OMPOrderedClause *OC = nullptr;
3643  OMPScheduleClause *SC = nullptr;
3646  // This is required for proper codegen.
3647  for (OMPClause *Clause : Clauses) {
3648  if (isOpenMPTaskingDirective(DSAStack->getCurrentDirective()) &&
3649  Clause->getClauseKind() == OMPC_in_reduction) {
3650  // Capture taskgroup task_reduction descriptors inside the tasking regions
3651  // with the corresponding in_reduction items.
3652  auto *IRC = cast<OMPInReductionClause>(Clause);
3653  for (Expr *E : IRC->taskgroup_descriptors())
3654  if (E)
3655  MarkDeclarationsReferencedInExpr(E);
3656  }
3657  if (isOpenMPPrivate(Clause->getClauseKind()) ||
3658  Clause->getClauseKind() == OMPC_copyprivate ||
3659  (getLangOpts().OpenMPUseTLS &&
3660  getASTContext().getTargetInfo().isTLSSupported() &&
3661  Clause->getClauseKind() == OMPC_copyin)) {
3662  DSAStack->setForceVarCapturing(Clause->getClauseKind() == OMPC_copyin);
3663  // Mark all variables in private list clauses as used in inner region.
3664  for (Stmt *VarRef : Clause->children()) {
3665  if (auto *E = cast_or_null<Expr>(VarRef)) {
3666  MarkDeclarationsReferencedInExpr(E);
3667  }
3668  }
3669  DSAStack->setForceVarCapturing(/*V=*/false);
3670  } else if (CaptureRegions.size() > 1 ||
3671  CaptureRegions.back() != OMPD_unknown) {
3672  if (auto *C = OMPClauseWithPreInit::get(Clause))
3673  PICs.push_back(C);
3674  if (auto *C = OMPClauseWithPostUpdate::get(Clause)) {
3675  if (Expr *E = C->getPostUpdateExpr())
3676  MarkDeclarationsReferencedInExpr(E);
3677  }
3678  }
3679  if (Clause->getClauseKind() == OMPC_schedule)
3680  SC = cast<OMPScheduleClause>(Clause);
3681  else if (Clause->getClauseKind() == OMPC_ordered)
3682  OC = cast<OMPOrderedClause>(Clause);
3683  else if (Clause->getClauseKind() == OMPC_linear)
3684  LCs.push_back(cast<OMPLinearClause>(Clause));
3685  }
3686  // OpenMP, 2.7.1 Loop Construct, Restrictions
3687  // The nonmonotonic modifier cannot be specified if an ordered clause is
3688  // specified.
3689  if (SC &&
3690  (SC->getFirstScheduleModifier() == OMPC_SCHEDULE_MODIFIER_nonmonotonic ||
3691  SC->getSecondScheduleModifier() ==
3692  OMPC_SCHEDULE_MODIFIER_nonmonotonic) &&
3693  OC) {
3694  Diag(SC->getFirstScheduleModifier() == OMPC_SCHEDULE_MODIFIER_nonmonotonic
3697  diag::err_omp_schedule_nonmonotonic_ordered)
3698  << SourceRange(OC->getBeginLoc(), OC->getEndLoc());
3699  ErrorFound = true;
3700  }
3701  if (!LCs.empty() && OC && OC->getNumForLoops()) {
3702  for (const OMPLinearClause *C : LCs) {
3703  Diag(C->getBeginLoc(), diag::err_omp_linear_ordered)
3704  << SourceRange(OC->getBeginLoc(), OC->getEndLoc());
3705  }
3706  ErrorFound = true;
3707  }
3708  if (isOpenMPWorksharingDirective(DSAStack->getCurrentDirective()) &&
3709  isOpenMPSimdDirective(DSAStack->getCurrentDirective()) && OC &&
3710  OC->getNumForLoops()) {
3711  Diag(OC->getBeginLoc(), diag::err_omp_ordered_simd)
3712  << getOpenMPDirectiveName(DSAStack->getCurrentDirective());
3713  ErrorFound = true;
3714  }
3715  if (ErrorFound) {
3716  return StmtError();
3717  }
3718  StmtResult SR = S;
3719  unsigned CompletedRegions = 0;
3720  for (OpenMPDirectiveKind ThisCaptureRegion : llvm::reverse(CaptureRegions)) {
3721  // Mark all variables in private list clauses as used in inner region.
3722  // Required for proper codegen of combined directives.
3723  // TODO: add processing for other clauses.
3724  if (ThisCaptureRegion != OMPD_unknown) {
3725  for (const clang::OMPClauseWithPreInit *C : PICs) {
3726  OpenMPDirectiveKind CaptureRegion = C->getCaptureRegion();
3727  // Find the particular capture region for the clause if the
3728  // directive is a combined one with multiple capture regions.
3729  // If the directive is not a combined one, the capture region
3730  // associated with the clause is OMPD_unknown and is generated
3731  // only once.
3732  if (CaptureRegion == ThisCaptureRegion ||
3733  CaptureRegion == OMPD_unknown) {
3734  if (auto *DS = cast_or_null<DeclStmt>(C->getPreInitStmt())) {
3735  for (Decl *D : DS->decls())
3736  MarkVariableReferenced(D->getLocation(), cast<VarDecl>(D));
3737  }
3738  }
3739  }
3740  }
3741  if (++CompletedRegions == CaptureRegions.size())
3742  DSAStack->setBodyComplete();
3743  SR = ActOnCapturedRegionEnd(SR.get());
3744  }
3745  return SR;
3746 }
3747 
3748 static bool checkCancelRegion(Sema &SemaRef, OpenMPDirectiveKind CurrentRegion,
3749  OpenMPDirectiveKind CancelRegion,
3750  SourceLocation StartLoc) {
3751  // CancelRegion is only needed for cancel and cancellation_point.
3752  if (CurrentRegion != OMPD_cancel && CurrentRegion != OMPD_cancellation_point)
3753  return false;
3754 
3755  if (CancelRegion == OMPD_parallel || CancelRegion == OMPD_for ||
3756  CancelRegion == OMPD_sections || CancelRegion == OMPD_taskgroup)
3757  return false;
3758 
3759  SemaRef.Diag(StartLoc, diag::err_omp_wrong_cancel_region)
3760  << getOpenMPDirectiveName(CancelRegion);
3761  return true;
3762 }
3763 
3764 static bool checkNestingOfRegions(Sema &SemaRef, const DSAStackTy *Stack,
3765  OpenMPDirectiveKind CurrentRegion,
3766  const DeclarationNameInfo &CurrentName,
3767  OpenMPDirectiveKind CancelRegion,
3768  SourceLocation StartLoc) {
3769  if (Stack->getCurScope()) {
3770  OpenMPDirectiveKind ParentRegion = Stack->getParentDirective();
3771  OpenMPDirectiveKind OffendingRegion = ParentRegion;
3772  bool NestingProhibited = false;
3773  bool CloseNesting = true;
3774  bool OrphanSeen = false;
3775  enum {
3776  NoRecommend,
3777  ShouldBeInParallelRegion,
3778  ShouldBeInOrderedRegion,
3779  ShouldBeInTargetRegion,
3780  ShouldBeInTeamsRegion
3781  } Recommend = NoRecommend;
3782  if (isOpenMPSimdDirective(ParentRegion) && CurrentRegion != OMPD_ordered) {
3783  // OpenMP [2.16, Nesting of Regions]
3784  // OpenMP constructs may not be nested inside a simd region.
3785  // OpenMP [2.8.1,simd Construct, Restrictions]
3786  // An ordered construct with the simd clause is the only OpenMP
3787  // construct that can appear in the simd region.
3788  // Allowing a SIMD construct nested in another SIMD construct is an
3789  // extension. The OpenMP 4.5 spec does not allow it. Issue a warning
3790  // message.
3791  SemaRef.Diag(StartLoc, (CurrentRegion != OMPD_simd)
3792  ? diag::err_omp_prohibited_region_simd
3793  : diag::warn_omp_nesting_simd);
3794  return CurrentRegion != OMPD_simd;
3795  }
3796  if (ParentRegion == OMPD_atomic) {
3797  // OpenMP [2.16, Nesting of Regions]
3798  // OpenMP constructs may not be nested inside an atomic region.
3799  SemaRef.Diag(StartLoc, diag::err_omp_prohibited_region_atomic);
3800  return true;
3801  }
3802  if (CurrentRegion == OMPD_section) {
3803  // OpenMP [2.7.2, sections Construct, Restrictions]
3804  // Orphaned section directives are prohibited. That is, the section
3805  // directives must appear within the sections construct and must not be
3806  // encountered elsewhere in the sections region.
3807  if (ParentRegion != OMPD_sections &&
3808  ParentRegion != OMPD_parallel_sections) {
3809  SemaRef.Diag(StartLoc, diag::err_omp_orphaned_section_directive)
3810  << (ParentRegion != OMPD_unknown)
3811  << getOpenMPDirectiveName(ParentRegion);
3812  return true;
3813  }
3814  return false;
3815  }
3816  // Allow some constructs (except teams and cancellation constructs) to be
3817  // orphaned (they could be used in functions, called from OpenMP regions
3818  // with the required preconditions).
3819  if (ParentRegion == OMPD_unknown &&
3820  !isOpenMPNestingTeamsDirective(CurrentRegion) &&
3821  CurrentRegion != OMPD_cancellation_point &&
3822  CurrentRegion != OMPD_cancel)
3823  return false;
3824  if (CurrentRegion == OMPD_cancellation_point ||
3825  CurrentRegion == OMPD_cancel) {
3826  // OpenMP [2.16, Nesting of Regions]
3827  // A cancellation point construct for which construct-type-clause is
3828  // taskgroup must be nested inside a task construct. A cancellation
3829  // point construct for which construct-type-clause is not taskgroup must
3830  // be closely nested inside an OpenMP construct that matches the type
3831  // specified in construct-type-clause.
3832  // A cancel construct for which construct-type-clause is taskgroup must be
3833  // nested inside a task construct. A cancel construct for which
3834  // construct-type-clause is not taskgroup must be closely nested inside an
3835  // OpenMP construct that matches the type specified in
3836  // construct-type-clause.
3837  NestingProhibited =
3838  !((CancelRegion == OMPD_parallel &&
3839  (ParentRegion == OMPD_parallel ||
3840  ParentRegion == OMPD_target_parallel)) ||
3841  (CancelRegion == OMPD_for &&
3842  (ParentRegion == OMPD_for || ParentRegion == OMPD_parallel_for ||
3843  ParentRegion == OMPD_target_parallel_for ||
3844  ParentRegion == OMPD_distribute_parallel_for ||
3845  ParentRegion == OMPD_teams_distribute_parallel_for ||
3846  ParentRegion == OMPD_target_teams_distribute_parallel_for)) ||
3847  (CancelRegion == OMPD_taskgroup && ParentRegion == OMPD_task) ||
3848  (CancelRegion == OMPD_sections &&
3849  (ParentRegion == OMPD_section || ParentRegion == OMPD_sections ||
3850  ParentRegion == OMPD_parallel_sections)));
3851  OrphanSeen = ParentRegion == OMPD_unknown;
3852  } else if (CurrentRegion == OMPD_master) {
3853  // OpenMP [2.16, Nesting of Regions]
3854  // A master region may not be closely nested inside a worksharing,
3855  // atomic, or explicit task region.
3856  NestingProhibited = isOpenMPWorksharingDirective(ParentRegion) ||
3857  isOpenMPTaskingDirective(ParentRegion);
3858  } else if (CurrentRegion == OMPD_critical && CurrentName.getName()) {
3859  // OpenMP [2.16, Nesting of Regions]
3860  // A critical region may not be nested (closely or otherwise) inside a
3861  // critical region with the same name. Note that this restriction is not
3862  // sufficient to prevent deadlock.
3863  SourceLocation PreviousCriticalLoc;
3864  bool DeadLock = Stack->hasDirective(
3865  [CurrentName, &PreviousCriticalLoc](OpenMPDirectiveKind K,
3866  const DeclarationNameInfo &DNI,
3867  SourceLocation Loc) {
3868  if (K == OMPD_critical && DNI.getName() == CurrentName.getName()) {
3869  PreviousCriticalLoc = Loc;
3870  return true;
3871  }
3872  return false;
3873  },
3874  false /* skip top directive */);
3875  if (DeadLock) {
3876  SemaRef.Diag(StartLoc,
3877  diag::err_omp_prohibited_region_critical_same_name)
3878  << CurrentName.getName();
3879  if (PreviousCriticalLoc.isValid())
3880  SemaRef.Diag(PreviousCriticalLoc,
3881  diag::note_omp_previous_critical_region);
3882  return true;
3883  }
3884  } else if (CurrentRegion == OMPD_barrier) {
3885  // OpenMP [2.16, Nesting of Regions]
3886  // A barrier region may not be closely nested inside a worksharing,
3887  // explicit task, critical, ordered, atomic, or master region.
3888  NestingProhibited = isOpenMPWorksharingDirective(ParentRegion) ||
3889  isOpenMPTaskingDirective(ParentRegion) ||
3890  ParentRegion == OMPD_master ||
3891  ParentRegion == OMPD_critical ||
3892  ParentRegion == OMPD_ordered;
3893  } else if (isOpenMPWorksharingDirective(CurrentRegion) &&
3894  !isOpenMPParallelDirective(CurrentRegion) &&
3895  !isOpenMPTeamsDirective(CurrentRegion)) {
3896  // OpenMP [2.16, Nesting of Regions]
3897  // A worksharing region may not be closely nested inside a worksharing,
3898  // explicit task, critical, ordered, atomic, or master region.
3899  NestingProhibited = isOpenMPWorksharingDirective(ParentRegion) ||
3900  isOpenMPTaskingDirective(ParentRegion) ||
3901  ParentRegion == OMPD_master ||
3902  ParentRegion == OMPD_critical ||
3903  ParentRegion == OMPD_ordered;
3904  Recommend = ShouldBeInParallelRegion;
3905  } else if (CurrentRegion == OMPD_ordered) {
3906  // OpenMP [2.16, Nesting of Regions]
3907  // An ordered region may not be closely nested inside a critical,
3908  // atomic, or explicit task region.
3909  // An ordered region must be closely nested inside a loop region (or
3910  // parallel loop region) with an ordered clause.
3911  // OpenMP [2.8.1,simd Construct, Restrictions]
3912  // An ordered construct with the simd clause is the only OpenMP construct
3913  // that can appear in the simd region.
3914  NestingProhibited = ParentRegion == OMPD_critical ||
3915  isOpenMPTaskingDirective(ParentRegion) ||
3916  !(isOpenMPSimdDirective(ParentRegion) ||
3917  Stack->isParentOrderedRegion());
3918  Recommend = ShouldBeInOrderedRegion;
3919  } else if (isOpenMPNestingTeamsDirective(CurrentRegion)) {
3920  // OpenMP [2.16, Nesting of Regions]
3921  // If specified, a teams construct must be contained within a target
3922  // construct.
3923  NestingProhibited =
3924  (SemaRef.LangOpts.OpenMP <= 45 && ParentRegion != OMPD_target) ||
3925  (SemaRef.LangOpts.OpenMP >= 50 && ParentRegion != OMPD_unknown &&
3926  ParentRegion != OMPD_target);
3927  OrphanSeen = ParentRegion == OMPD_unknown;
3928  Recommend = ShouldBeInTargetRegion;
3929  }
3930  if (!NestingProhibited &&
3931  !isOpenMPTargetExecutionDirective(CurrentRegion) &&
3932  !isOpenMPTargetDataManagementDirective(CurrentRegion) &&
3933  (ParentRegion == OMPD_teams || ParentRegion == OMPD_target_teams)) {
3934  // OpenMP [2.16, Nesting of Regions]
3935  // distribute, parallel, parallel sections, parallel workshare, and the
3936  // parallel loop and parallel loop SIMD constructs are the only OpenMP
3937  // constructs that can be closely nested in the teams region.
3938  NestingProhibited = !isOpenMPParallelDirective(CurrentRegion) &&
3939  !isOpenMPDistributeDirective(CurrentRegion);
3940  Recommend = ShouldBeInParallelRegion;
3941  }
3942  if (!NestingProhibited &&
3943  isOpenMPNestingDistributeDirective(CurrentRegion)) {
3944  // OpenMP 4.5 [2.17 Nesting of Regions]
3945  // The region associated with the distribute construct must be strictly
3946  // nested inside a teams region
3947  NestingProhibited =
3948  (ParentRegion != OMPD_teams && ParentRegion != OMPD_target_teams);
3949  Recommend = ShouldBeInTeamsRegion;
3950  }
3951  if (!NestingProhibited &&
3952  (isOpenMPTargetExecutionDirective(CurrentRegion) ||
3953  isOpenMPTargetDataManagementDirective(CurrentRegion))) {
3954  // OpenMP 4.5 [2.17 Nesting of Regions]
3955  // If a target, target update, target data, target enter data, or
3956  // target exit data construct is encountered during execution of a
3957  // target region, the behavior is unspecified.
3958  NestingProhibited = Stack->hasDirective(
3959  [&OffendingRegion](OpenMPDirectiveKind K, const DeclarationNameInfo &,
3960  SourceLocation) {
3962  OffendingRegion = K;
3963  return true;
3964  }
3965  return false;
3966  },
3967  false /* don't skip top directive */);
3968  CloseNesting = false;
3969  }
3970  if (NestingProhibited) {
3971  if (OrphanSeen) {
3972  SemaRef.Diag(StartLoc, diag::err_omp_orphaned_device_directive)
3973  << getOpenMPDirectiveName(CurrentRegion) << Recommend;
3974  } else {
3975  SemaRef.Diag(StartLoc, diag::err_omp_prohibited_region)
3976  << CloseNesting << getOpenMPDirectiveName(OffendingRegion)
3977  << Recommend << getOpenMPDirectiveName(CurrentRegion);
3978  }
3979  return true;
3980  }
3981  }
3982  return false;
3983 }
3984 
3986  ArrayRef<OMPClause *> Clauses,
3987  ArrayRef<OpenMPDirectiveKind> AllowedNameModifiers) {
3988  bool ErrorFound = false;
3989  unsigned NamedModifiersNumber = 0;
3991  OMPD_unknown + 1);
3992  SmallVector<SourceLocation, 4> NameModifierLoc;
3993  for (const OMPClause *C : Clauses) {
3994  if (const auto *IC = dyn_cast_or_null<OMPIfClause>(C)) {
3995  // At most one if clause without a directive-name-modifier can appear on
3996  // the directive.
3997  OpenMPDirectiveKind CurNM = IC->getNameModifier();
3998  if (FoundNameModifiers[CurNM]) {
3999  S.Diag(C->getBeginLoc(), diag::err_omp_more_one_clause)
4000  << getOpenMPDirectiveName(Kind) << getOpenMPClauseName(OMPC_if)
4001  << (CurNM != OMPD_unknown) << getOpenMPDirectiveName(CurNM);
4002  ErrorFound = true;
4003  } else if (CurNM != OMPD_unknown) {
4004  NameModifierLoc.push_back(IC->getNameModifierLoc());
4005  ++NamedModifiersNumber;
4006  }
4007  FoundNameModifiers[CurNM] = IC;
4008  if (CurNM == OMPD_unknown)
4009  continue;
4010  // Check if the specified name modifier is allowed for the current
4011  // directive.
4012  // At most one if clause with the particular directive-name-modifier can
4013  // appear on the directive.
4014  bool MatchFound = false;
4015  for (auto NM : AllowedNameModifiers) {
4016  if (CurNM == NM) {
4017  MatchFound = true;
4018  break;
4019  }
4020  }
4021  if (!MatchFound) {
4022  S.Diag(IC->getNameModifierLoc(),
4023  diag::err_omp_wrong_if_directive_name_modifier)
4025  ErrorFound = true;
4026  }
4027  }
4028  }
4029  // If any if clause on the directive includes a directive-name-modifier then
4030  // all if clauses on the directive must include a directive-name-modifier.
4031  if (FoundNameModifiers[OMPD_unknown] && NamedModifiersNumber > 0) {
4032  if (NamedModifiersNumber == AllowedNameModifiers.size()) {
4033  S.Diag(FoundNameModifiers[OMPD_unknown]->getBeginLoc(),
4034  diag::err_omp_no_more_if_clause);
4035  } else {
4036  std::string Values;
4037  std::string Sep(", ");
4038  unsigned AllowedCnt = 0;
4039  unsigned TotalAllowedNum =
4040  AllowedNameModifiers.size() - NamedModifiersNumber;
4041  for (unsigned Cnt = 0, End = AllowedNameModifiers.size(); Cnt < End;
4042  ++Cnt) {
4043  OpenMPDirectiveKind NM = AllowedNameModifiers[Cnt];
4044  if (!FoundNameModifiers[NM]) {
4045  Values += "'";
4046  Values += getOpenMPDirectiveName(NM);
4047  Values += "'";
4048  if (AllowedCnt + 2 == TotalAllowedNum)
4049  Values += " or ";
4050  else if (AllowedCnt + 1 != TotalAllowedNum)
4051  Values += Sep;
4052  ++AllowedCnt;
4053  }
4054  }
4055  S.Diag(FoundNameModifiers[OMPD_unknown]->getCondition()->getBeginLoc(),
4056  diag::err_omp_unnamed_if_clause)
4057  << (TotalAllowedNum > 1) << Values;
4058  }
4059  for (SourceLocation Loc : NameModifierLoc) {
4060  S.Diag(Loc, diag::note_omp_previous_named_if_clause);
4061  }
4062  ErrorFound = true;
4063  }
4064  return ErrorFound;
4065 }
4066 
4067 static std::pair<ValueDecl *, bool>
4069  SourceRange &ERange, bool AllowArraySection = false) {
4070  if (RefExpr->isTypeDependent() || RefExpr->isValueDependent() ||
4072  return std::make_pair(nullptr, true);
4073 
4074  // OpenMP [3.1, C/C++]
4075  // A list item is a variable name.
4076  // OpenMP [2.9.3.3, Restrictions, p.1]
4077  // A variable that is part of another variable (as an array or
4078  // structure element) cannot appear in a private clause.
4079  RefExpr = RefExpr->IgnoreParens();
4080  enum {
4081  NoArrayExpr = -1,
4082  ArraySubscript = 0,
4083  OMPArraySection = 1
4084  } IsArrayExpr = NoArrayExpr;
4085  if (AllowArraySection) {
4086  if (auto *ASE = dyn_cast_or_null<ArraySubscriptExpr>(RefExpr)) {
4087  Expr *Base = ASE->getBase()->IgnoreParenImpCasts();
4088  while (auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
4089  Base = TempASE->getBase()->IgnoreParenImpCasts();
4090  RefExpr = Base;
4091  IsArrayExpr = ArraySubscript;
4092  } else if (auto *OASE = dyn_cast_or_null<OMPArraySectionExpr>(RefExpr)) {
4093  Expr *Base = OASE->getBase()->IgnoreParenImpCasts();
4094  while (auto *TempOASE = dyn_cast<OMPArraySectionExpr>(Base))
4095  Base = TempOASE->getBase()->IgnoreParenImpCasts();
4096  while (auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
4097  Base = TempASE->getBase()->IgnoreParenImpCasts();
4098  RefExpr = Base;
4099  IsArrayExpr = OMPArraySection;
4100  }
4101  }
4102  ELoc = RefExpr->getExprLoc();
4103  ERange = RefExpr->getSourceRange();
4104  RefExpr = RefExpr->IgnoreParenImpCasts();
4105  auto *DE = dyn_cast_or_null<DeclRefExpr>(RefExpr);
4106  auto *ME = dyn_cast_or_null<MemberExpr>(RefExpr);
4107  if ((!DE || !isa<VarDecl>(DE->getDecl())) &&
4108  (S.getCurrentThisType().isNull() || !ME ||
4109  !isa<CXXThisExpr>(ME->getBase()->IgnoreParenImpCasts()) ||
4110  !isa<FieldDecl>(ME->getMemberDecl()))) {
4111  if (IsArrayExpr != NoArrayExpr) {
4112  S.Diag(ELoc, diag::err_omp_expected_base_var_name) << IsArrayExpr
4113  << ERange;
4114  } else {
4115  S.Diag(ELoc,
4116  AllowArraySection
4117  ? diag::err_omp_expected_var_name_member_expr_or_array_item
4118  : diag::err_omp_expected_var_name_member_expr)
4119  << (S.getCurrentThisType().isNull() ? 0 : 1) << ERange;
4120  }
4121  return std::make_pair(nullptr, false);
4122  }
4123  return std::make_pair(
4124  getCanonicalDecl(DE ? DE->getDecl() : ME->getMemberDecl()), false);
4125 }
4126 
4127 static void checkAllocateClauses(Sema &S, DSAStackTy *Stack,
4128  ArrayRef<OMPClause *> Clauses) {
4129  assert(!S.CurContext->isDependentContext() &&
4130  "Expected non-dependent context.");
4131  auto AllocateRange =
4132  llvm::make_filter_range(Clauses, OMPAllocateClause::classof);
4133  llvm::DenseMap<CanonicalDeclPtr<Decl>, CanonicalDeclPtr<VarDecl>>
4134  DeclToCopy;
4135  auto PrivateRange = llvm::make_filter_range(Clauses, [](const OMPClause *C) {
4136  return isOpenMPPrivate(C->getClauseKind());
4137  });
4138  for (OMPClause *Cl : PrivateRange) {
4139  MutableArrayRef<Expr *>::iterator I, It, Et;
4140  if (Cl->getClauseKind() == OMPC_private) {
4141  auto *PC = cast<OMPPrivateClause>(Cl);
4142  I = PC->private_copies().begin();
4143  It = PC->varlist_begin();
4144  Et = PC->varlist_end();
4145  } else if (Cl->getClauseKind() == OMPC_firstprivate) {
4146  auto *PC = cast<OMPFirstprivateClause>(Cl);
4147  I = PC->private_copies().begin();
4148  It = PC->varlist_begin();
4149  Et = PC->varlist_end();
4150  } else if (Cl->getClauseKind() == OMPC_lastprivate) {
4151  auto *PC = cast<OMPLastprivateClause>(Cl);
4152  I = PC->private_copies().begin();
4153  It = PC->varlist_begin();
4154  Et = PC->varlist_end();
4155  } else if (Cl->getClauseKind() == OMPC_linear) {
4156  auto *PC = cast<OMPLinearClause>(Cl);
4157  I = PC->privates().begin();
4158  It = PC->varlist_begin();
4159  Et = PC->varlist_end();
4160  } else if (Cl->getClauseKind() == OMPC_reduction) {
4161  auto *PC = cast<OMPReductionClause>(Cl);
4162  I = PC->privates().begin();
4163  It = PC->varlist_begin();
4164  Et = PC->varlist_end();
4165  } else if (Cl->getClauseKind() == OMPC_task_reduction) {
4166  auto *PC = cast<OMPTaskReductionClause>(Cl);
4167  I = PC->privates().begin();
4168  It = PC->varlist_begin();
4169  Et = PC->varlist_end();
4170  } else if (Cl->getClauseKind() == OMPC_in_reduction) {
4171  auto *PC = cast<OMPInReductionClause>(Cl);
4172  I = PC->privates().begin();
4173  It = PC->varlist_begin();
4174  Et = PC->varlist_end();
4175  } else {
4176  llvm_unreachable("Expected private clause.");
4177  }
4178  for (Expr *E : llvm::make_range(It, Et)) {
4179  if (!*I) {
4180  ++I;
4181  continue;
4182  }
4183  SourceLocation ELoc;
4184  SourceRange ERange;
4185  Expr *SimpleRefExpr = E;
4186  auto Res = getPrivateItem(S, SimpleRefExpr, ELoc, ERange,
4187  /*AllowArraySection=*/true);
4188  DeclToCopy.try_emplace(Res.first,
4189  cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()));
4190  ++I;
4191  }
4192  }
4193  for (OMPClause *C : AllocateRange) {
4194  auto *AC = cast<OMPAllocateClause>(C);
4195  OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind =
4196  getAllocatorKind(S, Stack, AC->getAllocator());
4197  // OpenMP, 2.11.4 allocate Clause, Restrictions.
4198  // For task, taskloop or target directives, allocation requests to memory
4199  // allocators with the trait access set to thread result in unspecified
4200  // behavior.
4201  if (AllocatorKind == OMPAllocateDeclAttr::OMPThreadMemAlloc &&
4202  (isOpenMPTaskingDirective(Stack->getCurrentDirective()) ||
4203  isOpenMPTargetExecutionDirective(Stack->getCurrentDirective()))) {
4204  S.Diag(AC->getAllocator()->getExprLoc(),
4205  diag::warn_omp_allocate_thread_on_task_target_directive)
4206  << getOpenMPDirectiveName(Stack->getCurrentDirective());
4207  }
4208  for (Expr *E : AC->varlists()) {
4209  SourceLocation ELoc;
4210  SourceRange ERange;
4211  Expr *SimpleRefExpr = E;
4212  auto Res = getPrivateItem(S, SimpleRefExpr, ELoc, ERange);
4213  ValueDecl *VD = Res.first;
4214  DSAStackTy::DSAVarData Data = Stack->getTopDSA(VD, /*FromParent=*/false);
4215  if (!isOpenMPPrivate(Data.CKind)) {
4216  S.Diag(E->getExprLoc(),
4217  diag::err_omp_expected_private_copy_for_allocate);
4218  continue;
4219  }
4220  VarDecl *PrivateVD = DeclToCopy[VD];
4221  if (checkPreviousOMPAllocateAttribute(S, Stack, E, PrivateVD,
4222  AllocatorKind, AC->getAllocator()))
4223  continue;
4224  applyOMPAllocateAttribute(S, PrivateVD, AllocatorKind, AC->getAllocator(),
4225  E->getSourceRange());
4226  }
4227  }
4228 }
4229 
4232  OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
4233  Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc) {
4234  StmtResult Res = StmtError();
4235  // First check CancelRegion which is then used in checkNestingOfRegions.
4236  if (checkCancelRegion(*this, Kind, CancelRegion, StartLoc) ||
4237  checkNestingOfRegions(*this, DSAStack, Kind, DirName, CancelRegion,
4238  StartLoc))
4239  return StmtError();
4240 
4241  llvm::SmallVector<OMPClause *, 8> ClausesWithImplicit;
4242  VarsWithInheritedDSAType VarsWithInheritedDSA;
4243  bool ErrorFound = false;
4244  ClausesWithImplicit.append(Clauses.begin(), Clauses.end());
4245  if (AStmt && !CurContext->isDependentContext()) {
4246  assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
4247 
4248  // Check default data sharing attributes for referenced variables.
4249  DSAAttrChecker DSAChecker(DSAStack, *this, cast<CapturedStmt>(AStmt));
4250  int ThisCaptureLevel = getOpenMPCaptureLevels(Kind);
4251  Stmt *S = AStmt;
4252  while (--ThisCaptureLevel >= 0)
4253  S = cast<CapturedStmt>(S)->getCapturedStmt();
4254  DSAChecker.Visit(S);
4256  !isOpenMPTaskingDirective(Kind)) {
4257  // Visit subcaptures to generate implicit clauses for captured vars.
4258  auto *CS = cast<CapturedStmt>(AStmt);
4259  SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
4260  getOpenMPCaptureRegions(CaptureRegions, Kind);
4261  // Ignore outer tasking regions for target directives.
4262  if (CaptureRegions.size() > 1 && CaptureRegions.front() == OMPD_task)
4263  CS = cast<CapturedStmt>(CS->getCapturedStmt());
4264  DSAChecker.visitSubCaptures(CS);
4265  }
4266  if (DSAChecker.isErrorFound())
4267  return StmtError();
4268  // Generate list of implicitly defined firstprivate variables.
4269  VarsWithInheritedDSA = DSAChecker.getVarsWithInheritedDSA();
4270 
4271  SmallVector<Expr *, 4> ImplicitFirstprivates(
4272  DSAChecker.getImplicitFirstprivate().begin(),
4273  DSAChecker.getImplicitFirstprivate().end());
4274  SmallVector<Expr *, 4> ImplicitMaps(DSAChecker.getImplicitMap().begin(),
4275  DSAChecker.getImplicitMap().end());
4276  // Mark taskgroup task_reduction descriptors as implicitly firstprivate.
4277  for (OMPClause *C : Clauses) {
4278  if (auto *IRC = dyn_cast<OMPInReductionClause>(C)) {
4279  for (Expr *E : IRC->taskgroup_descriptors())
4280  if (E)
4281  ImplicitFirstprivates.emplace_back(E);
4282  }
4283  }
4284  if (!ImplicitFirstprivates.empty()) {
4285  if (OMPClause *Implicit = ActOnOpenMPFirstprivateClause(
4286  ImplicitFirstprivates, SourceLocation(), SourceLocation(),
4287  SourceLocation())) {
4288  ClausesWithImplicit.push_back(Implicit);
4289  ErrorFound = cast<OMPFirstprivateClause>(Implicit)->varlist_size() !=
4290  ImplicitFirstprivates.size();
4291  } else {
4292  ErrorFound = true;
4293  }
4294  }
4295  if (!ImplicitMaps.empty()) {
4296  CXXScopeSpec MapperIdScopeSpec;
4297  DeclarationNameInfo MapperId;
4298  if (OMPClause *Implicit = ActOnOpenMPMapClause(
4299  llvm::None, llvm::None, MapperIdScopeSpec, MapperId,
4300  OMPC_MAP_tofrom, /*IsMapTypeImplicit=*/true, SourceLocation(),
4301  SourceLocation(), ImplicitMaps, OMPVarListLocTy())) {
4302  ClausesWithImplicit.emplace_back(Implicit);
4303  ErrorFound |=
4304  cast<OMPMapClause>(Implicit)->varlist_size() != ImplicitMaps.size();
4305  } else {
4306  ErrorFound = true;
4307  }
4308  }
4309  }
4310 
4311  llvm::SmallVector<OpenMPDirectiveKind, 4> AllowedNameModifiers;
4312  switch (Kind) {
4313  case OMPD_parallel:
4314  Res = ActOnOpenMPParallelDirective(ClausesWithImplicit, AStmt, StartLoc,
4315  EndLoc);
4316  AllowedNameModifiers.push_back(OMPD_parallel);
4317  break;
4318  case OMPD_simd:
4319  Res = ActOnOpenMPSimdDirective(ClausesWithImplicit, AStmt, StartLoc, EndLoc,
4320  VarsWithInheritedDSA);
4321  break;
4322  case OMPD_for:
4323  Res = ActOnOpenMPForDirective(ClausesWithImplicit, AStmt, StartLoc, EndLoc,
4324  VarsWithInheritedDSA);
4325  break;
4326  case OMPD_for_simd:
4327  Res = ActOnOpenMPForSimdDirective(ClausesWithImplicit, AStmt, StartLoc,
4328  EndLoc, VarsWithInheritedDSA);
4329  break;
4330  case OMPD_sections:
4331  Res = ActOnOpenMPSectionsDirective(ClausesWithImplicit, AStmt, StartLoc,
4332  EndLoc);
4333  break;
4334  case OMPD_section:
4335  assert(ClausesWithImplicit.empty() &&
4336  "No clauses are allowed for 'omp section' directive");
4337  Res = ActOnOpenMPSectionDirective(AStmt, StartLoc, EndLoc);
4338  break;
4339  case OMPD_single:
4340  Res = ActOnOpenMPSingleDirective(ClausesWithImplicit, AStmt, StartLoc,
4341  EndLoc);
4342  break;
4343  case OMPD_master:
4344  assert(ClausesWithImplicit.empty() &&
4345  "No clauses are allowed for 'omp master' directive");
4346  Res = ActOnOpenMPMasterDirective(AStmt, StartLoc, EndLoc);
4347  break;
4348  case OMPD_critical:
4349  Res = ActOnOpenMPCriticalDirective(DirName, ClausesWithImplicit, AStmt,
4350  StartLoc, EndLoc);
4351  break;
4352  case OMPD_parallel_for:
4353  Res = ActOnOpenMPParallelForDirective(ClausesWithImplicit, AStmt, StartLoc,
4354  EndLoc, VarsWithInheritedDSA);
4355  AllowedNameModifiers.push_back(OMPD_parallel);
4356  break;
4357  case OMPD_parallel_for_simd:
4358  Res = ActOnOpenMPParallelForSimdDirective(
4359  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
4360  AllowedNameModifiers.push_back(OMPD_parallel);
4361  break;
4362  case OMPD_parallel_sections:
4363  Res = ActOnOpenMPParallelSectionsDirective(ClausesWithImplicit, AStmt,
4364  StartLoc, EndLoc);
4365  AllowedNameModifiers.push_back(OMPD_parallel);
4366  break;
4367  case OMPD_task:
4368  Res =
4369  ActOnOpenMPTaskDirective(ClausesWithImplicit, AStmt, StartLoc, EndLoc);
4370  AllowedNameModifiers.push_back(OMPD_task);
4371  break;
4372  case OMPD_taskyield:
4373  assert(ClausesWithImplicit.empty() &&
4374  "No clauses are allowed for 'omp taskyield' directive");
4375  assert(AStmt == nullptr &&
4376  "No associated statement allowed for 'omp taskyield' directive");
4377  Res = ActOnOpenMPTaskyieldDirective(StartLoc, EndLoc);
4378  break;
4379  case OMPD_barrier:
4380  assert(ClausesWithImplicit.empty() &&
4381  "No clauses are allowed for 'omp barrier' directive");
4382  assert(AStmt == nullptr &&
4383  "No associated statement allowed for 'omp barrier' directive");
4384  Res = ActOnOpenMPBarrierDirective(StartLoc, EndLoc);
4385  break;
4386  case OMPD_taskwait:
4387  assert(ClausesWithImplicit.empty() &&
4388  "No clauses are allowed for 'omp taskwait' directive");
4389  assert(AStmt == nullptr &&
4390  "No associated statement allowed for 'omp taskwait' directive");
4391  Res = ActOnOpenMPTaskwaitDirective(StartLoc, EndLoc);
4392  break;
4393  case OMPD_taskgroup:
4394  Res = ActOnOpenMPTaskgroupDirective(ClausesWithImplicit, AStmt, StartLoc,
4395  EndLoc);
4396  break;
4397  case OMPD_flush:
4398  assert(AStmt == nullptr &&
4399  "No associated statement allowed for 'omp flush' directive");
4400  Res = ActOnOpenMPFlushDirective(ClausesWithImplicit, StartLoc, EndLoc);
4401  break;
4402  case OMPD_ordered:
4403  Res = ActOnOpenMPOrderedDirective(ClausesWithImplicit, AStmt, StartLoc,
4404  EndLoc);
4405  break;
4406  case OMPD_atomic:
4407  Res = ActOnOpenMPAtomicDirective(ClausesWithImplicit, AStmt, StartLoc,
4408  EndLoc);
4409  break;
4410  case OMPD_teams:
4411  Res =
4412  ActOnOpenMPTeamsDirective(ClausesWithImplicit, AStmt, StartLoc, EndLoc);
4413  break;
4414  case OMPD_target:
4415  Res = ActOnOpenMPTargetDirective(ClausesWithImplicit, AStmt, StartLoc,
4416  EndLoc);
4417  AllowedNameModifiers.push_back(OMPD_target);
4418  break;
4419  case OMPD_target_parallel:
4420  Res = ActOnOpenMPTargetParallelDirective(ClausesWithImplicit, AStmt,
4421  StartLoc, EndLoc);
4422  AllowedNameModifiers.push_back(OMPD_target);
4423  AllowedNameModifiers.push_back(OMPD_parallel);
4424  break;
4425  case OMPD_target_parallel_for:
4426  Res = ActOnOpenMPTargetParallelForDirective(
4427  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
4428  AllowedNameModifiers.push_back(OMPD_target);
4429  AllowedNameModifiers.push_back(OMPD_parallel);
4430  break;
4431  case OMPD_cancellation_point:
4432  assert(ClausesWithImplicit.empty() &&
4433  "No clauses are allowed for 'omp cancellation point' directive");
4434  assert(AStmt == nullptr && "No associated statement allowed for 'omp "
4435  "cancellation point' directive");
4436  Res = ActOnOpenMPCancellationPointDirective(StartLoc, EndLoc, CancelRegion);
4437  break;
4438  case OMPD_cancel:
4439  assert(AStmt == nullptr &&
4440  "No associated statement allowed for 'omp cancel' directive");
4441  Res = ActOnOpenMPCancelDirective(ClausesWithImplicit, StartLoc, EndLoc,
4442  CancelRegion);
4443  AllowedNameModifiers.push_back(OMPD_cancel);
4444  break;
4445  case OMPD_target_data:
4446  Res = ActOnOpenMPTargetDataDirective(ClausesWithImplicit, AStmt, StartLoc,
4447  EndLoc);
4448  AllowedNameModifiers.push_back(OMPD_target_data);
4449  break;
4450  case OMPD_target_enter_data:
4451  Res = ActOnOpenMPTargetEnterDataDirective(ClausesWithImplicit, StartLoc,
4452  EndLoc, AStmt);
4453  AllowedNameModifiers.push_back(OMPD_target_enter_data);
4454  break;
4455  case OMPD_target_exit_data:
4456  Res = ActOnOpenMPTargetExitDataDirective(ClausesWithImplicit, StartLoc,
4457  EndLoc, AStmt);
4458  AllowedNameModifiers.push_back(OMPD_target_exit_data);
4459  break;
4460  case OMPD_taskloop:
4461  Res = ActOnOpenMPTaskLoopDirective(ClausesWithImplicit, AStmt, StartLoc,
4462  EndLoc, VarsWithInheritedDSA);
4463  AllowedNameModifiers.push_back(OMPD_taskloop);
4464  break;
4465  case OMPD_taskloop_simd:
4466  Res = ActOnOpenMPTaskLoopSimdDirective(ClausesWithImplicit, AStmt, StartLoc,
4467  EndLoc, VarsWithInheritedDSA);
4468  AllowedNameModifiers.push_back(OMPD_taskloop);
4469  break;
4470  case OMPD_master_taskloop:
4471  Res = ActOnOpenMPMasterTaskLoopDirective(
4472  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
4473  AllowedNameModifiers.push_back(OMPD_taskloop);
4474  break;
4475  case OMPD_master_taskloop_simd:
4476  Res = ActOnOpenMPMasterTaskLoopSimdDirective(
4477  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
4478  AllowedNameModifiers.push_back(OMPD_taskloop);
4479  break;
4480  case OMPD_parallel_master_taskloop:
4481  Res = ActOnOpenMPParallelMasterTaskLoopDirective(
4482  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
4483  AllowedNameModifiers.push_back(OMPD_taskloop);
4484  AllowedNameModifiers.push_back(OMPD_parallel);
4485  break;
4486  case OMPD_distribute:
4487  Res = ActOnOpenMPDistributeDirective(ClausesWithImplicit, AStmt, StartLoc,
4488  EndLoc, VarsWithInheritedDSA);
4489  break;
4490  case OMPD_target_update:
4491  Res = ActOnOpenMPTargetUpdateDirective(ClausesWithImplicit, StartLoc,
4492  EndLoc, AStmt);
4493  AllowedNameModifiers.push_back(OMPD_target_update);
4494  break;
4495  case OMPD_distribute_parallel_for:
4496  Res = ActOnOpenMPDistributeParallelForDirective(
4497  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
4498  AllowedNameModifiers.push_back(OMPD_parallel);
4499  break;
4500  case OMPD_distribute_parallel_for_simd:
4501  Res = ActOnOpenMPDistributeParallelForSimdDirective(
4502  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
4503  AllowedNameModifiers.push_back(OMPD_parallel);
4504  break;
4505  case OMPD_distribute_simd:
4506  Res = ActOnOpenMPDistributeSimdDirective(
4507  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
4508  break;
4509  case OMPD_target_parallel_for_simd:
4510  Res = ActOnOpenMPTargetParallelForSimdDirective(
4511  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
4512  AllowedNameModifiers.push_back(OMPD_target);
4513  AllowedNameModifiers.push_back(OMPD_parallel);
4514  break;
4515  case OMPD_target_simd:
4516  Res = ActOnOpenMPTargetSimdDirective(ClausesWithImplicit, AStmt, StartLoc,
4517  EndLoc, VarsWithInheritedDSA);
4518  AllowedNameModifiers.push_back(OMPD_target);
4519  break;
4520  case OMPD_teams_distribute:
4521  Res = ActOnOpenMPTeamsDistributeDirective(
4522  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
4523  break;
4524  case OMPD_teams_distribute_simd:
4525  Res = ActOnOpenMPTeamsDistributeSimdDirective(
4526  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
4527  break;
4528  case OMPD_teams_distribute_parallel_for_simd:
4529  Res = ActOnOpenMPTeamsDistributeParallelForSimdDirective(
4530  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
4531  AllowedNameModifiers.push_back(OMPD_parallel);
4532  break;
4533  case OMPD_teams_distribute_parallel_for:
4534  Res = ActOnOpenMPTeamsDistributeParallelForDirective(
4535  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
4536  AllowedNameModifiers.push_back(OMPD_parallel);
4537  break;
4538  case OMPD_target_teams:
4539  Res = ActOnOpenMPTargetTeamsDirective(ClausesWithImplicit, AStmt, StartLoc,
4540  EndLoc);
4541  AllowedNameModifiers.push_back(OMPD_target);
4542  break;
4543  case OMPD_target_teams_distribute:
4544  Res = ActOnOpenMPTargetTeamsDistributeDirective(
4545  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
4546  AllowedNameModifiers.push_back(OMPD_target);
4547  break;
4548  case OMPD_target_teams_distribute_parallel_for:
4549  Res = ActOnOpenMPTargetTeamsDistributeParallelForDirective(
4550  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
4551  AllowedNameModifiers.push_back(OMPD_target);
4552  AllowedNameModifiers.push_back(OMPD_parallel);
4553  break;
4554  case OMPD_target_teams_distribute_parallel_for_simd:
4555  Res = ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective(
4556  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
4557  AllowedNameModifiers.push_back(OMPD_target);
4558  AllowedNameModifiers.push_back(OMPD_parallel);
4559  break;
4560  case OMPD_target_teams_distribute_simd:
4561  Res = ActOnOpenMPTargetTeamsDistributeSimdDirective(
4562  ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
4563  AllowedNameModifiers.push_back(OMPD_target);
4564  break;
4565  case OMPD_declare_target:
4566  case OMPD_end_declare_target:
4567  case OMPD_threadprivate:
4568  case OMPD_allocate:
4569  case OMPD_declare_reduction:
4570  case OMPD_declare_mapper:
4571  case OMPD_declare_simd:
4572  case OMPD_requires:
4573  case OMPD_declare_variant:
4574  llvm_unreachable("OpenMP Directive is not allowed");
4575  case OMPD_unknown:
4576  llvm_unreachable("Unknown OpenMP directive");
4577  }
4578 
4579  ErrorFound = Res.isInvalid() || ErrorFound;
4580 
4581  // Check variables in the clauses if default(none) was specified.
4582  if (DSAStack->getDefaultDSA() == DSA_none) {
4583  DSAAttrChecker DSAChecker(DSAStack, *this, nullptr);
4584  for (OMPClause *C : Clauses) {
4585  switch (C->getClauseKind()) {
4586  case OMPC_num_threads:
4587  case OMPC_dist_schedule:
4588  // Do not analyse if no parent teams directive.
4589  if (isOpenMPTeamsDirective(DSAStack->getCurrentDirective()))
4590  break;
4591  continue;
4592  case OMPC_if:
4593  if (isOpenMPTeamsDirective(DSAStack->getCurrentDirective()) &&
4594  cast<OMPIfClause>(C)->getNameModifier() != OMPD_target)
4595  break;
4596  continue;
4597  case OMPC_schedule:
4598  break;
4599  case OMPC_grainsize:
4600  case OMPC_num_tasks:
4601  case OMPC_final:
4602  case OMPC_priority:
4603  // Do not analyze if no parent parallel directive.
4604  if (isOpenMPParallelDirective(DSAStack->getCurrentDirective()))
4605  break;
4606  continue;
4607  case OMPC_ordered:
4608  case OMPC_device:
4609  case OMPC_num_teams:
4610  case OMPC_thread_limit:
4611  case OMPC_hint:
4612  case OMPC_collapse:
4613  case OMPC_safelen:
4614  case OMPC_simdlen:
4615  case OMPC_default:
4616  case OMPC_proc_bind:
4617  case OMPC_private:
4618  case OMPC_firstprivate:
4619  case OMPC_lastprivate:
4620  case OMPC_shared:
4621  case OMPC_reduction:
4622  case OMPC_task_reduction:
4623  case OMPC_in_reduction:
4624  case OMPC_linear:
4625  case OMPC_aligned:
4626  case OMPC_copyin:
4627  case OMPC_copyprivate:
4628  case OMPC_nowait:
4629  case OMPC_untied:
4630  case OMPC_mergeable:
4631  case OMPC_allocate:
4632  case OMPC_read:
4633  case OMPC_write:
4634  case OMPC_update:
4635  case OMPC_capture:
4636  case OMPC_seq_cst:
4637  case OMPC_depend:
4638  case OMPC_threads:
4639  case OMPC_simd:
4640  case OMPC_map:
4641  case OMPC_nogroup:
4642  case OMPC_defaultmap:
4643  case OMPC_to:
4644  case OMPC_from:
4645  case OMPC_use_device_ptr:
4646  case OMPC_is_device_ptr:
4647  continue;
4648  case OMPC_allocator:
4649  case OMPC_flush:
4650  case OMPC_threadprivate:
4651  case OMPC_uniform:
4652  case OMPC_unknown:
4653  case OMPC_unified_address:
4654  case OMPC_unified_shared_memory:
4655  case OMPC_reverse_offload:
4656  case OMPC_dynamic_allocators:
4657  case OMPC_atomic_default_mem_order:
4658  case OMPC_device_type:
4659  case OMPC_match:
4660  llvm_unreachable("Unexpected clause");
4661  }
4662  for (Stmt *CC : C->children()) {
4663  if (CC)
4664  DSAChecker.Visit(CC);
4665  }
4666  }
4667  for (auto &P : DSAChecker.getVarsWithInheritedDSA())
4668  VarsWithInheritedDSA[P.getFirst()] = P.getSecond();
4669  }
4670  for (const auto &P : VarsWithInheritedDSA) {
4671  if (P.getFirst()->isImplicit() || isa<OMPCapturedExprDecl>(P.getFirst()))
4672  continue;
4673  ErrorFound = true;
4674  Diag(P.second->getExprLoc(), diag::err_omp_no_dsa_for_variable)
4675  << P.first << P.second->getSourceRange();
4676  Diag(DSAStack->getDefaultDSALocation(), diag::note_omp_default_dsa_none);
4677  }
4678 
4679  if (!AllowedNameModifiers.empty())
4680  ErrorFound = checkIfClauses(*this, Kind, Clauses, AllowedNameModifiers) ||
4681  ErrorFound;
4682 
4683  if (ErrorFound)
4684  return StmtError();
4685 
4688  ->getStructuredBlock()
4689  ->setIsOMPStructuredBlock(true);
4690  }
4691 
4692  if (!CurContext->isDependentContext() &&
4694  !(DSAStack->hasRequiresDeclWithClause<OMPUnifiedSharedMemoryClause>() ||
4695  DSAStack->hasRequiresDeclWithClause<OMPUnifiedAddressClause>() ||
4696  DSAStack->hasRequiresDeclWithClause<OMPReverseOffloadClause>() ||
4697  DSAStack->hasRequiresDeclWithClause<OMPDynamicAllocatorsClause>())) {
4698  // Register target to DSA Stack.
4699  DSAStack->addTargetDirLocation(StartLoc);
4700  }
4701 
4702  return Res;
4703 }
4704 
4706  DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS, Expr *Simdlen,
4707  ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds,
4708  ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears,
4709  ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR) {
4710  assert(Aligneds.size() == Alignments.size());
4711  assert(Linears.size() == LinModifiers.size());
4712  assert(Linears.size() == Steps.size());
4713  if (!DG || DG.get().isNull())
4714  return DeclGroupPtrTy();
4715 
4716  const int SimdId = 0;
4717  if (!DG.get().isSingleDecl()) {
4718  Diag(SR.getBegin(), diag::err_omp_single_decl_in_declare_simd_variant)
4719  << SimdId;
4720  return DG;
4721  }
4722  Decl *ADecl = DG.get().getSingleDecl();
4723  if (auto *FTD = dyn_cast<FunctionTemplateDecl>(ADecl))
4724  ADecl = FTD->getTemplatedDecl();
4725 
4726  auto *FD = dyn_cast<FunctionDecl>(ADecl);
4727  if (!FD) {
4728  Diag(ADecl->getLocation(), diag::err_omp_function_expected) << SimdId;
4729  return DeclGroupPtrTy();
4730  }
4731 
4732  // OpenMP [2.8.2, declare simd construct, Description]
4733  // The parameter of the simdlen clause must be a constant positive integer
4734  // expression.
4735  ExprResult SL;
4736  if (Simdlen)
4737  SL = VerifyPositiveIntegerConstantInClause(Simdlen, OMPC_simdlen);
4738  // OpenMP [2.8.2, declare simd construct, Description]
4739  // The special this pointer can be used as if was one of the arguments to the
4740  // function in any of the linear, aligned, or uniform clauses.
4741  // The uniform clause declares one or more arguments to have an invariant
4742  // value for all concurrent invocations of the function in the execution of a
4743  // single SIMD loop.
4744  llvm::DenseMap<const Decl *, const Expr *> UniformedArgs;
4745  const Expr *UniformedLinearThis = nullptr;
4746  for (const Expr *E : Uniforms) {
4747  E = E->IgnoreParenImpCasts();
4748  if (const auto *DRE = dyn_cast<DeclRefExpr>(E))
4749  if (const auto *PVD = dyn_cast<ParmVarDecl>(DRE->getDecl()))
4750  if (FD->getNumParams() > PVD->getFunctionScopeIndex() &&
4751  FD->getParamDecl(PVD->getFunctionScopeIndex())
4752  ->getCanonicalDecl() == PVD->getCanonicalDecl()) {
4753  UniformedArgs.try_emplace(PVD->getCanonicalDecl(), E);
4754  continue;
4755  }
4756  if (isa<CXXThisExpr>(E)) {
4757  UniformedLinearThis = E;
4758  continue;
4759  }
4760  Diag(E->getExprLoc(), diag::err_omp_param_or_this_in_clause)
4761  << FD->getDeclName() << (isa<CXXMethodDecl>(ADecl) ? 1 : 0);
4762  }
4763  // OpenMP [2.8.2, declare simd construct, Description]
4764  // The aligned clause declares that the object to which each list item points
4765  // is aligned to the number of bytes expressed in the optional parameter of
4766  // the aligned clause.
4767  // The special this pointer can be used as if was one of the arguments to the
4768  // function in any of the linear, aligned, or uniform clauses.
4769  // The type of list items appearing in the aligned clause must be array,
4770  // pointer, reference to array, or reference to pointer.
4771  llvm::DenseMap<const Decl *, const Expr *> AlignedArgs;
4772  const Expr *AlignedThis = nullptr;
4773  for (const Expr *E : Aligneds) {
4774  E = E->IgnoreParenImpCasts();
4775  if (const auto *DRE = dyn_cast<DeclRefExpr>(E))
4776  if (const auto *PVD = dyn_cast<ParmVarDecl>(DRE->getDecl())) {
4777  const VarDecl *CanonPVD = PVD->getCanonicalDecl();
4778  if (FD->getNumParams() > PVD->getFunctionScopeIndex() &&
4779  FD->getParamDecl(PVD->getFunctionScopeIndex())
4780  ->getCanonicalDecl() == CanonPVD) {
4781  // OpenMP [2.8.1, simd construct, Restrictions]
4782  // A list-item cannot appear in more than one aligned clause.
4783  if (AlignedArgs.count(CanonPVD) > 0) {
4784  Diag(E->getExprLoc(), diag::err_omp_aligned_twice)
4785  << 1 << E->getSourceRange();
4786  Diag(AlignedArgs[CanonPVD]->getExprLoc(),
4787  diag::note_omp_explicit_dsa)
4788  << getOpenMPClauseName(OMPC_aligned);
4789  continue;
4790  }
4791  AlignedArgs[CanonPVD] = E;
4792  QualType QTy = PVD->getType()
4793  .getNonReferenceType()
4794  .getUnqualifiedType()
4795  .getCanonicalType();
4796  const Type *Ty = QTy.getTypePtrOrNull();
4797  if (!Ty || (!Ty->isArrayType() && !Ty->isPointerType())) {
4798  Diag(E->getExprLoc(), diag::err_omp_aligned_expected_array_or_ptr)
4799  << QTy << getLangOpts().CPlusPlus << E->getSourceRange();
4800  Diag(PVD->getLocation(), diag::note_previous_decl) << PVD;
4801  }
4802  continue;
4803  }
4804  }
4805  if (isa<CXXThisExpr>(E)) {
4806  if (AlignedThis) {
4807  Diag(E->getExprLoc(), diag::err_omp_aligned_twice)
4808  << 2 << E->getSourceRange();
4809  Diag(AlignedThis->getExprLoc(), diag::note_omp_explicit_dsa)
4810  << getOpenMPClauseName(OMPC_aligned);
4811  }
4812  AlignedThis = E;
4813  continue;
4814  }
4815  Diag(E->getExprLoc(), diag::err_omp_param_or_this_in_clause)
4816  << FD->getDeclName() << (isa<CXXMethodDecl>(ADecl) ? 1 : 0);
4817  }
4818  // The optional parameter of the aligned clause, alignment, must be a constant
4819  // positive integer expression. If no optional parameter is specified,
4820  // implementation-defined default alignments for SIMD instructions on the
4821  // target platforms are assumed.
4822  SmallVector<const Expr *, 4> NewAligns;
4823  for (Expr *E : Alignments) {
4824  ExprResult Align;
4825  if (E)
4826  Align = VerifyPositiveIntegerConstantInClause(E, OMPC_aligned);
4827  NewAligns.push_back(Align.get());
4828  }
4829  // OpenMP [2.8.2, declare simd construct, Description]
4830  // The linear clause declares one or more list items to be private to a SIMD
4831  // lane and to have a linear relationship with respect to the iteration space
4832  // of a loop.
4833  // The special this pointer can be used as if was one of the arguments to the
4834  // function in any of the linear, aligned, or uniform clauses.
4835  // When a linear-step expression is specified in a linear clause it must be
4836  // either a constant integer expression or an integer-typed parameter that is
4837  // specified in a uniform clause on the directive.
4838  llvm::DenseMap<const Decl *, const Expr *> LinearArgs;
4839  const bool IsUniformedThis = UniformedLinearThis != nullptr;
4840  auto MI = LinModifiers.begin();
4841  for (const Expr *E : Linears) {
4842  auto LinKind = static_cast<OpenMPLinearClauseKind>(*MI);
4843  ++MI;
4844  E = E->IgnoreParenImpCasts();
4845  if (const auto *DRE = dyn_cast<DeclRefExpr>(E))
4846  if (const auto *PVD = dyn_cast<ParmVarDecl>(DRE->getDecl())) {
4847  const VarDecl *CanonPVD = PVD->getCanonicalDecl();
4848  if (FD->getNumParams() > PVD->getFunctionScopeIndex() &&
4849  FD->getParamDecl(PVD->getFunctionScopeIndex())
4850  ->getCanonicalDecl() == CanonPVD) {
4851  // OpenMP [2.15.3.7, linear Clause, Restrictions]
4852  // A list-item cannot appear in more than one linear clause.
4853  if (LinearArgs.count(CanonPVD) > 0) {
4854  Diag(E->getExprLoc(), diag::err_omp_wrong_dsa)
4855  << getOpenMPClauseName(OMPC_linear)
4856  << getOpenMPClauseName(OMPC_linear) << E->getSourceRange();
4857  Diag(LinearArgs[CanonPVD]->getExprLoc(),
4858  diag::note_omp_explicit_dsa)
4859  << getOpenMPClauseName(OMPC_linear);
4860  continue;
4861  }
4862  // Each argument can appear in at most one uniform or linear clause.
4863  if (UniformedArgs.count(CanonPVD) > 0) {
4864  Diag(E->getExprLoc(), diag::err_omp_wrong_dsa)
4865  << getOpenMPClauseName(OMPC_linear)
4867  Diag(UniformedArgs[CanonPVD]->getExprLoc(),
4868  diag::note_omp_explicit_dsa)
4870  continue;
4871  }
4872  LinearArgs[CanonPVD] = E;
4873  if (E->isValueDependent() || E->isTypeDependent() ||
4874  E->isInstantiationDependent() ||
4876  continue;
4877  (void)CheckOpenMPLinearDecl(CanonPVD, E->getExprLoc(), LinKind,
4878  PVD->getOriginalType());
4879  continue;
4880  }
4881  }
4882  if (isa<CXXThisExpr>(E)) {
4883  if (UniformedLinearThis) {
4884  Diag(E->getExprLoc(), diag::err_omp_wrong_dsa)
4885  << getOpenMPClauseName(OMPC_linear)
4886  << getOpenMPClauseName(IsUniformedThis ? OMPC_uniform : OMPC_linear)
4887  << E->getSourceRange();
4888  Diag(UniformedLinearThis->getExprLoc(), diag::note_omp_explicit_dsa)
4889  << getOpenMPClauseName(IsUniformedThis ? OMPC_uniform
4890  : OMPC_linear);
4891  continue;
4892  }
4893  UniformedLinearThis = E;
4894  if (E->isValueDependent() || E->isTypeDependent() ||
4896  continue;
4897  (void)CheckOpenMPLinearDecl(/*D=*/nullptr, E->getExprLoc(), LinKind,
4898  E->getType());
4899  continue;
4900  }
4901  Diag(E->getExprLoc(), diag::err_omp_param_or_this_in_clause)
4902  << FD->getDeclName() << (isa<CXXMethodDecl>(ADecl) ? 1 : 0);
4903  }
4904  Expr *Step = nullptr;
4905  Expr *NewStep = nullptr;
4906  SmallVector<Expr *, 4> NewSteps;
4907  for (Expr *E : Steps) {
4908  // Skip the same step expression, it was checked already.
4909  if (Step == E || !E) {
4910  NewSteps.push_back(E ? NewStep : nullptr);
4911  continue;
4912  }
4913  Step = E;
4914  if (const auto *DRE = dyn_cast<DeclRefExpr>(Step))
4915  if (const auto *PVD = dyn_cast<ParmVarDecl>(DRE->getDecl())) {
4916  const VarDecl *CanonPVD = PVD->getCanonicalDecl();
4917  if (UniformedArgs.count(CanonPVD) == 0) {
4918  Diag(Step->getExprLoc(), diag::err_omp_expected_uniform_param)
4919  << Step->getSourceRange();
4920  } else if (E->isValueDependent() || E->isTypeDependent() ||
4921  E->isInstantiationDependent() ||
4923  CanonPVD->getType()->hasIntegerRepresentation()) {
4924  NewSteps.push_back(Step);
4925  } else {
4926  Diag(Step->getExprLoc(), diag::err_omp_expected_int_param)
4927  << Step->getSourceRange();
4928  }
4929  continue;
4930  }
4931  NewStep = Step;
4932  if (Step && !Step->isValueDependent() && !Step->isTypeDependent() &&
4933  !Step->isInstantiationDependent() &&
4935  NewStep = PerformOpenMPImplicitIntegerConversion(Step->getExprLoc(), Step)
4936  .get();
4937  if (NewStep)
4938  NewStep = VerifyIntegerConstantExpression(NewStep).get();
4939  }
4940  NewSteps.push_back(NewStep);
4941  }
4942  auto *NewAttr = OMPDeclareSimdDeclAttr::CreateImplicit(
4943  Context, BS, SL.get(), const_cast<Expr **>(Uniforms.data()),
4944  Uniforms.size(), const_cast<Expr **>(Aligneds.data()), Aligneds.size(),
4945  const_cast<Expr **>(NewAligns.data()), NewAligns.size(),
4946  const_cast<Expr **>(Linears.data()), Linears.size(),
4947  const_cast<unsigned *>(LinModifiers.data()), LinModifiers.size(),
4948  NewSteps.data(), NewSteps.size(), SR);
4949  ADecl->addAttr(NewAttr);
4950  return DG;
4951 }
4952 
4955  Expr *VariantRef, SourceRange SR) {
4956  if (!DG || DG.get().isNull())
4957  return None;
4958 
4959  const int VariantId = 1;
4960  // Must be applied only to single decl.
4961  if (!DG.get().isSingleDecl()) {
4962  Diag(SR.getBegin(), diag::err_omp_single_decl_in_declare_simd_variant)
4963  << VariantId << SR;
4964  return None;
4965  }
4966  Decl *ADecl = DG.get().getSingleDecl();
4967  if (auto *FTD = dyn_cast<FunctionTemplateDecl>(ADecl))
4968  ADecl = FTD->getTemplatedDecl();
4969 
4970  // Decl must be a function.
4971  auto *FD = dyn_cast<FunctionDecl>(ADecl);
4972  if (!FD) {
4973  Diag(ADecl->getLocation(), diag::err_omp_function_expected)
4974  << VariantId << SR;
4975  return None;
4976  }
4977 
4978  auto &&HasMultiVersionAttributes = [](const FunctionDecl *FD) {
4979  return FD->hasAttrs() &&
4980  (FD->hasAttr<CPUDispatchAttr>() || FD->hasAttr<CPUSpecificAttr>() ||
4981  FD->hasAttr<TargetAttr>());
4982  };
4983  // OpenMP is not compatible with CPU-specific attributes.
4984  if (HasMultiVersionAttributes(FD)) {
4985  Diag(FD->getLocation(), diag::err_omp_declare_variant_incompat_attributes)
4986  << SR;
4987  return None;
4988  }
4989 
4990  // Allow #pragma omp declare variant only if the function is not used.
4991  if (FD->isUsed(false))
4992  Diag(SR.getBegin(), diag::warn_omp_declare_variant_after_used)
4993  << FD->getLocation();
4994 
4995  // Check if the function was emitted already.
4996  const FunctionDecl *Definition;
4997  if (!FD->isThisDeclarationADefinition() && FD->isDefined(Definition) &&
4998  (LangOpts.EmitAllDecls || Context.DeclMustBeEmitted(Definition)))
4999  Diag(SR.getBegin(), diag::warn_omp_declare_variant_after_emitted)
5000  << FD->getLocation();
5001 
5002  // The VariantRef must point to function.
5003  if (!VariantRef) {
5004  Diag(SR.getBegin(), diag::err_omp_function_expected) << VariantId;
5005  return None;
5006  }
5007 
5008  // Do not check templates, wait until instantiation.
5009  if (VariantRef->isTypeDependent() || VariantRef->isValueDependent() ||
5010  VariantRef->containsUnexpandedParameterPack() ||
5011  VariantRef->isInstantiationDependent() || FD->isDependentContext())
5012  return std::make_pair(FD, VariantRef);
5013 
5014  // Convert VariantRef expression to the type of the original function to
5015  // resolve possible conflicts.
5016  ExprResult VariantRefCast;
5017  if (LangOpts.CPlusPlus) {
5018  QualType FnPtrType;
5019  auto *Method = dyn_cast<CXXMethodDecl>(FD);
5020  if (Method && !Method->isStatic()) {
5021  const Type *ClassType =
5022  Context.getTypeDeclType(Method->getParent()).getTypePtr();
5023  FnPtrType = Context.getMemberPointerType(FD->getType(), ClassType);
5024  ExprResult ER;
5025  {
5026  // Build adrr_of unary op to correctly handle type checks for member
5027  // functions.
5028  Sema::TentativeAnalysisScope Trap(*this);
5029  ER = CreateBuiltinUnaryOp(VariantRef->getBeginLoc(), UO_AddrOf,
5030  VariantRef);
5031  }
5032  if (!ER.isUsable()) {
5033  Diag(VariantRef->getExprLoc(), diag::err_omp_function_expected)
5034  << VariantId << VariantRef->getSourceRange();
5035  return None;
5036  }
5037  VariantRef = ER.get();
5038  } else {
5039  FnPtrType = Context.getPointerType(FD->getType());
5040  }
5042  TryImplicitConversion(VariantRef, FnPtrType.getUnqualifiedType(),
5043  /*SuppressUserConversions=*/false,
5044  /*AllowExplicit=*/false,
5045  /*InOverloadResolution=*/false,
5046  /*CStyle=*/false,
5047  /*AllowObjCWritebackConversion=*/false);
5048  if (ICS.isFailure()) {
5049  Diag(VariantRef->getExprLoc(),
5050  diag::err_omp_declare_variant_incompat_types)
5051  << VariantRef->getType() << FnPtrType << VariantRef->getSourceRange();
5052  return None;
5053  }
5054  VariantRefCast = PerformImplicitConversion(
5055  VariantRef, FnPtrType.getUnqualifiedType(), AA_Converting);
5056  if (!VariantRefCast.isUsable())
5057  return None;
5058  // Drop previously built artificial addr_of unary op for member functions.
5059  if (Method && !Method->isStatic()) {
5060  Expr *PossibleAddrOfVariantRef = VariantRefCast.get();
5061  if (auto *UO = dyn_cast<UnaryOperator>(
5062  PossibleAddrOfVariantRef->IgnoreImplicit()))
5063  VariantRefCast = UO->getSubExpr();
5064  }
5065  } else {
5066  VariantRefCast = VariantRef;
5067  }
5068 
5069  ExprResult ER = CheckPlaceholderExpr(VariantRefCast.get());
5070  if (!ER.isUsable() ||
5071  !ER.get()->IgnoreParenImpCasts()->getType()->isFunctionType()) {
5072  Diag(VariantRef->getExprLoc(), diag::err_omp_function_expected)
5073  << VariantId << VariantRef->getSourceRange();
5074  return None;
5075  }
5076 
5077  // The VariantRef must point to function.
5078  auto *DRE = dyn_cast<DeclRefExpr>(ER.get()->IgnoreParenImpCasts());
5079  if (!DRE) {
5080  Diag(VariantRef->getExprLoc(), diag::err_omp_function_expected)
5081  << VariantId << VariantRef->getSourceRange();
5082  return None;
5083  }
5084  auto *NewFD = dyn_cast_or_null<FunctionDecl>(DRE->getDecl());
5085  if (!NewFD) {
5086  Diag(VariantRef->getExprLoc(), diag::err_omp_function_expected)
5087  << VariantId << VariantRef->getSourceRange();
5088  return None;
5089  }
5090 
5091  // Check if variant function is not marked with declare variant directive.
5092  if (NewFD->hasAttrs() && NewFD->hasAttr<OMPDeclareVariantAttr>()) {
5093  Diag(VariantRef->getExprLoc(),
5094  diag::warn_omp_declare_variant_marked_as_declare_variant)
5095  << VariantRef->getSourceRange();
5096  SourceRange SR =
5097  NewFD->specific_attr_begin<OMPDeclareVariantAttr>()->getRange();
5098  Diag(SR.getBegin(), diag::note_omp_marked_declare_variant_here) << SR;
5099  return None;
5100  }
5101 
5102  enum DoesntSupport {
5103  VirtFuncs = 1,
5104  Constructors = 3,
5105  Destructors = 4,
5106  DeletedFuncs = 5,
5107  DefaultedFuncs = 6,
5108  ConstexprFuncs = 7,
5109  ConstevalFuncs = 8,
5110  };
5111  if (const auto *CXXFD = dyn_cast<CXXMethodDecl>(FD)) {
5112  if (CXXFD->isVirtual()) {
5113  Diag(FD->getLocation(), diag::err_omp_declare_variant_doesnt_support)
5114  << VirtFuncs;
5115  return None;
5116  }
5117 
5118  if (isa<CXXConstructorDecl>(FD)) {
5119  Diag(FD->getLocation(), diag::err_omp_declare_variant_doesnt_support)
5120  << Constructors;
5121  return None;
5122  }
5123 
5124  if (isa<CXXDestructorDecl>(FD)) {
5125  Diag(FD->getLocation(), diag::err_omp_declare_variant_doesnt_support)
5126  << Destructors;
5127  return None;
5128  }
5129  }
5130 
5131  if (FD->isDeleted()) {
5132  Diag(FD->getLocation(), diag::err_omp_declare_variant_doesnt_support)
5133  << DeletedFuncs;
5134  return None;
5135  }
5136 
5137  if (FD->isDefaulted()) {
5138  Diag(FD->getLocation(), diag::err_omp_declare_variant_doesnt_support)
5139  << DefaultedFuncs;
5140  return None;
5141  }
5142 
5143  if (FD->isConstexpr()) {
5144  Diag(FD->getLocation(), diag::err_omp_declare_variant_doesnt_support)
5145  << (NewFD->isConsteval() ? ConstevalFuncs : ConstexprFuncs);
5146  return None;
5147  }
5148 
5149  // Check general compatibility.
5150  if (areMultiversionVariantFunctionsCompatible(
5151  FD, NewFD, PDiag(diag::err_omp_declare_variant_noproto),
5153  SR.getBegin(),
5154  PDiag(diag::note_omp_declare_variant_specified_here) << SR),
5156  VariantRef->getExprLoc(),
5157  PDiag(diag::err_omp_declare_variant_doesnt_support)),
5158  PartialDiagnosticAt(VariantRef->getExprLoc(),
5159  PDiag(diag::err_omp_declare_variant_diff)
5160  << FD->getLocation()),
5161  /*TemplatesSupported=*/true, /*ConstexprSupported=*/false,
5162  /*CLinkageMayDiffer=*/true))
5163  return None;
5164  return std::make_pair(FD, cast<Expr>(DRE));
5165 }
5166 
5168  FunctionDecl *FD, Expr *VariantRef, SourceRange SR,
5170  if (Data.CtxSet == OMPDeclareVariantAttr::CtxSetUnknown ||
5171  Data.Ctx == OMPDeclareVariantAttr::CtxUnknown)
5172  return;
5173  Expr *Score = nullptr;
5174  OMPDeclareVariantAttr::ScoreType ST = OMPDeclareVariantAttr::ScoreUnknown;
5175  if (Data.CtxScore.isUsable()) {
5176  ST = OMPDeclareVariantAttr::ScoreSpecified;
5177  Score = Data.CtxScore.get();
5178  if (!Score->isTypeDependent() && !Score->isValueDependent() &&
5179  !Score->isInstantiationDependent() &&
5180  !Score->containsUnexpandedParameterPack()) {
5182  ExprResult ICE = VerifyIntegerConstantExpression(Score, &Result);
5183  if (ICE.isInvalid())
5184  return;
5185  }
5186  }
5187  auto *NewAttr = OMPDeclareVariantAttr::CreateImplicit(
5188  Context, VariantRef, Score, Data.CtxSet, ST, Data.Ctx,
5189  Data.ImplVendors.begin(), Data.ImplVendors.size(), SR);
5190  FD->addAttr(NewAttr);
5191 }
5192 
5193 void Sema::markOpenMPDeclareVariantFuncsReferenced(SourceLocation Loc,
5194  FunctionDecl *Func,
5195  bool MightBeOdrUse) {
5196  assert(LangOpts.OpenMP && "Expected OpenMP mode.");
5197 
5198  if (!Func->isDependentContext() && Func->hasAttrs()) {
5199  for (OMPDeclareVariantAttr *A :
5200  Func->specific_attrs<OMPDeclareVariantAttr>()) {
5201  // TODO: add checks for active OpenMP context where possible.
5202  Expr *VariantRef = A->getVariantFuncRef();
5203  auto *DRE = dyn_cast<DeclRefExpr>(VariantRef->IgnoreParenImpCasts());
5204  auto *F = cast<FunctionDecl>(DRE->getDecl());
5205  if (!F->isDefined() && F->isTemplateInstantiation())
5206  InstantiateFunctionDefinition(Loc, F->getFirstDecl());
5207  MarkFunctionReferenced(Loc, F, MightBeOdrUse);
5208  }
5209  }
5210 }
5211 
5213  Stmt *AStmt,
5214  SourceLocation StartLoc,
5215  SourceLocation EndLoc) {
5216  if (!AStmt)
5217  return StmtError();
5218 
5219  auto *CS = cast<CapturedStmt>(AStmt);
5220  // 1.2.2 OpenMP Language Terminology
5221  // Structured block - An executable statement with a single entry at the
5222  // top and a single exit at the bottom.
5223  // The point of exit cannot be a branch out of the structured block.
5224  // longjmp() and throw() must not violate the entry/exit criteria.
5225  CS->getCapturedDecl()->setNothrow();
5226 
5227  setFunctionHasBranchProtectedScope();
5228 
5229  return OMPParallelDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt,
5230  DSAStack->isCancelRegion());
5231 }
5232 
5233 namespace {
5234 /// Iteration space of a single for loop.
5235 struct LoopIterationSpace final {
5236  /// True if the condition operator is the strict compare operator (<, > or
5237  /// !=).
5238  bool IsStrictCompare = false;
5239  /// Condition of the loop.
5240  Expr *PreCond = nullptr;
5241  /// This expression calculates the number of iterations in the loop.
5242  /// It is always possible to calculate it before starting the loop.
5243  Expr *NumIterations = nullptr;
5244  /// The loop counter variable.
5245  Expr *CounterVar = nullptr;
5246  /// Private loop counter variable.
5247  Expr *PrivateCounterVar = nullptr;
5248  /// This is initializer for the initial value of #CounterVar.
5249  Expr *CounterInit = nullptr;
5250  /// This is step for the #CounterVar used to generate its update:
5251  /// #CounterVar = #CounterInit + #CounterStep * CurrentIteration.
5252  Expr *CounterStep = nullptr;
5253  /// Should step be subtracted?
5254  bool Subtract = false;
5255  /// Source range of the loop init.
5256  SourceRange InitSrcRange;
5257  /// Source range of the loop condition.
5258  SourceRange CondSrcRange;
5259  /// Source range of the loop increment.
5260  SourceRange IncSrcRange;
5261  /// Minimum value that can have the loop control variable. Used to support
5262  /// non-rectangular loops. Applied only for LCV with the non-iterator types,
5263  /// since only such variables can be used in non-loop invariant expressions.
5264  Expr *MinValue = nullptr;
5265  /// Maximum value that can have the loop control variable. Used to support
5266  /// non-rectangular loops. Applied only for LCV with the non-iterator type,
5267  /// since only such variables can be used in non-loop invariant expressions.
5268  Expr *MaxValue = nullptr;
5269  /// true, if the lower bound depends on the outer loop control var.
5270  bool IsNonRectangularLB = false;
5271  /// true, if the upper bound depends on the outer loop control var.
5272  bool IsNonRectangularUB = false;
5273  /// Index of the loop this loop depends on and forms non-rectangular loop
5274  /// nest.
5275  unsigned LoopDependentIdx = 0;
5276  /// Final condition for the non-rectangular loop nest support. It is used to
5277  /// check that the number of iterations for this particular counter must be
5278  /// finished.
5279  Expr *FinalCondition = nullptr;
5280 };
5281 
5282 /// Helper class for checking canonical form of the OpenMP loops and
5283 /// extracting iteration space of each loop in the loop nest, that will be used
5284 /// for IR generation.
5285 class OpenMPIterationSpaceChecker {
5286  /// Reference to Sema.
5287  Sema &SemaRef;
5288  /// Data-sharing stack.
5289  DSAStackTy &Stack;
5290  /// A location for diagnostics (when there is no some better location).
5291  SourceLocation DefaultLoc;
5292  /// A location for diagnostics (when increment is not compatible).
5293  SourceLocation ConditionLoc;
5294  /// A source location for referring to loop init later.
5295  SourceRange InitSrcRange;
5296  /// A source location for referring to condition later.
5297  SourceRange ConditionSrcRange;
5298  /// A source location for referring to increment later.
5299  SourceRange IncrementSrcRange;
5300  /// Loop variable.
5301  ValueDecl *LCDecl = nullptr;
5302  /// Reference to loop variable.
5303  Expr *LCRef = nullptr;
5304  /// Lower bound (initializer for the var).
5305  Expr *LB = nullptr;
5306  /// Upper bound.
5307  Expr *UB = nullptr;
5308  /// Loop step (increment).
5309  Expr *Step = nullptr;
5310  /// This flag is true when condition is one of:
5311  /// Var < UB
5312  /// Var <= UB
5313  /// UB > Var
5314  /// UB >= Var
5315  /// This will have no value when the condition is !=
5316  llvm::Optional<bool> TestIsLessOp;
5317  /// This flag is true when condition is strict ( < or > ).
5318  bool TestIsStrictOp = false;
5319  /// This flag is true when step is subtracted on each iteration.
5320  bool SubtractStep = false;
5321  /// The outer loop counter this loop depends on (if any).
5322  const ValueDecl *DepDecl = nullptr;
5323  /// Contains number of loop (starts from 1) on which loop counter init
5324  /// expression of this loop depends on.
5325  Optional<unsigned> InitDependOnLC;
5326  /// Contains number of loop (starts from 1) on which loop counter condition
5327  /// expression of this loop depends on.
5328  Optional<unsigned> CondDependOnLC;
5329  /// Checks if the provide statement depends on the loop counter.
5330  Optional<unsigned> doesDependOnLoopCounter(const Stmt *S, bool IsInitializer);
5331  /// Original condition required for checking of the exit condition for
5332  /// non-rectangular loop.
5333  Expr *Condition = nullptr;
5334 
5335 public:
5336  OpenMPIterationSpaceChecker(Sema &SemaRef, DSAStackTy &Stack,
5337  SourceLocation DefaultLoc)
5338  : SemaRef(SemaRef), Stack(Stack), DefaultLoc(DefaultLoc),
5339  ConditionLoc(DefaultLoc) {}
5340  /// Check init-expr for canonical loop form and save loop counter
5341  /// variable - #Var and its initialization value - #LB.
5342  bool checkAndSetInit(Stmt *S, bool EmitDiags = true);
5343  /// Check test-expr for canonical form, save upper-bound (#UB), flags
5344  /// for less/greater and for strict/non-strict comparison.
5345  bool checkAndSetCond(Expr *S);
5346  /// Check incr-expr for canonical loop form and return true if it
5347  /// does not conform, otherwise save loop step (#Step).
5348  bool checkAndSetInc(Expr *S);
5349  /// Return the loop counter variable.
5350  ValueDecl *getLoopDecl() const { return LCDecl; }
5351  /// Return the reference expression to loop counter variable.
5352  Expr *getLoopDeclRefExpr() const { return LCRef; }
5353  /// Source range of the loop init.
5354  SourceRange getInitSrcRange() const { return InitSrcRange; }
5355  /// Source range of the loop condition.
5356  SourceRange getConditionSrcRange() const { return ConditionSrcRange; }
5357  /// Source range of the loop increment.
5358  SourceRange getIncrementSrcRange() const { return IncrementSrcRange; }
5359  /// True if the step should be subtracted.
5360  bool shouldSubtractStep() const { return SubtractStep; }
5361  /// True, if the compare operator is strict (<, > or !=).
5362  bool isStrictTestOp() const { return TestIsStrictOp; }
5363  /// Build the expression to calculate the number of iterations.
5364  Expr *buildNumIterations(
5365  Scope *S, ArrayRef<LoopIterationSpace> ResultIterSpaces, bool LimitedType,
5366  llvm::MapVector<const Expr *, DeclRefExpr *> &Captures) const;
5367  /// Build the precondition expression for the loops.
5368  Expr *
5369  buildPreCond(Scope *S, Expr *Cond,
5370  llvm::MapVector<const Expr *, DeclRefExpr *> &Captures) const;
5371  /// Build reference expression to the counter be used for codegen.
5372  DeclRefExpr *
5373  buildCounterVar(llvm::MapVector<const Expr *, DeclRefExpr *> &Captures,
5374  DSAStackTy &DSA) const;
5375  /// Build reference expression to the private counter be used for
5376  /// codegen.
5377  Expr *buildPrivateCounterVar() const;
5378  /// Build initialization of the counter be used for codegen.
5379  Expr *buildCounterInit() const;
5380  /// Build step of the counter be used for codegen.
5381  Expr *buildCounterStep() const;
5382  /// Build loop data with counter value for depend clauses in ordered
5383  /// directives.
5384  Expr *
5385  buildOrderedLoopData(Scope *S, Expr *Counter,
5386  llvm::MapVector<const Expr *, DeclRefExpr *> &Captures,
5387  SourceLocation Loc, Expr *Inc = nullptr,
5388  OverloadedOperatorKind OOK = OO_Amp);
5389  /// Builds the minimum value for the loop counter.
5390  std::pair<Expr *, Expr *> buildMinMaxValues(
5391  Scope *S, llvm::MapVector<const Expr *, DeclRefExpr *> &Captures) const;
5392  /// Builds final condition for the non-rectangular loops.
5393  Expr *buildFinalCondition(Scope *S) const;
5394  /// Return true if any expression is dependent.
5395  bool dependent() const;
5396  /// Returns true if the initializer forms non-rectangular loop.
5397  bool doesInitDependOnLC() const { return InitDependOnLC.hasValue(); }
5398  /// Returns true if the condition forms non-rectangular loop.
5399  bool doesCondDependOnLC() const { return CondDependOnLC.hasValue(); }
5400  /// Returns index of the loop we depend on (starting from 1), or 0 otherwise.
5401  unsigned getLoopDependentIdx() const {
5402  return InitDependOnLC.getValueOr(CondDependOnLC.getValueOr(0));
5403  }
5404 
5405 private:
5406  /// Check the right-hand side of an assignment in the increment
5407  /// expression.
5408  bool checkAndSetIncRHS(Expr *RHS);
5409  /// Helper to set loop counter variable and its initializer.
5410  bool setLCDeclAndLB(ValueDecl *NewLCDecl, Expr *NewDeclRefExpr, Expr *NewLB,
5411  bool EmitDiags);
5412  /// Helper to set upper bound.
5413  bool setUB(Expr *NewUB, llvm::Optional<bool> LessOp, bool StrictOp,
5414  SourceRange SR, SourceLocation SL);
5415  /// Helper to set loop increment.
5416  bool setStep(Expr *NewStep, bool Subtract);
5417 };
5418 
5419 bool OpenMPIterationSpaceChecker::dependent() const {
5420  if (!LCDecl) {
5421  assert(!LB && !UB && !Step);
5422  return false;
5423  }
5424  return LCDecl->getType()->isDependentType() ||
5425  (LB && LB->isValueDependent()) || (UB && UB->isValueDependent()) ||
5426  (Step && Step->isValueDependent());
5427 }
5428 
5429 bool OpenMPIterationSpaceChecker::setLCDeclAndLB(ValueDecl *NewLCDecl,
5430  Expr *NewLCRefExpr,
5431  Expr *NewLB, bool EmitDiags) {
5432  // State consistency checking to ensure correct usage.
5433  assert(LCDecl == nullptr && LB == nullptr && LCRef == nullptr &&
5434  UB == nullptr && Step == nullptr && !TestIsLessOp && !TestIsStrictOp);
5435  if (!NewLCDecl || !NewLB)
5436  return true;
5437  LCDecl = getCanonicalDecl(NewLCDecl);
5438  LCRef = NewLCRefExpr;
5439  if (auto *CE = dyn_cast_or_null<CXXConstructExpr>(NewLB))
5440  if (const CXXConstructorDecl *Ctor = CE->getConstructor())
5441  if ((Ctor->isCopyOrMoveConstructor() ||
5442  Ctor->isConvertingConstructor(/*AllowExplicit=*/false)) &&
5443  CE->getNumArgs() > 0 && CE->getArg(0) != nullptr)
5444  NewLB = CE->getArg(0)->IgnoreParenImpCasts();
5445  LB = NewLB;
5446  if (EmitDiags)
5447  InitDependOnLC = doesDependOnLoopCounter(LB, /*IsInitializer=*/true);
5448  return false;
5449 }
5450 
5451 bool OpenMPIterationSpaceChecker::setUB(Expr *NewUB,
5452  llvm::Optional<bool> LessOp,
5453  bool StrictOp, SourceRange SR,
5454  SourceLocation SL) {
5455  // State consistency checking to ensure correct usage.
5456  assert(LCDecl != nullptr && LB != nullptr && UB == nullptr &&
5457  Step == nullptr && !TestIsLessOp && !TestIsStrictOp);
5458  if (!NewUB)
5459  return true;
5460  UB = NewUB;
5461  if (LessOp)
5462  TestIsLessOp = LessOp;
5463  TestIsStrictOp = StrictOp;
5464  ConditionSrcRange = SR;
5465  ConditionLoc = SL;
5466  CondDependOnLC = doesDependOnLoopCounter(UB, /*IsInitializer=*/false);
5467  return false;
5468 }
5469 
5470 bool OpenMPIterationSpaceChecker::setStep(Expr *NewStep, bool Subtract) {
5471  // State consistency checking to ensure correct usage.
5472  assert(LCDecl != nullptr && LB != nullptr && Step == nullptr);
5473  if (!NewStep)
5474  return true;
5475  if (!NewStep->isValueDependent()) {
5476  // Check that the step is integer expression.
5477  SourceLocation StepLoc = NewStep->getBeginLoc();
5479  StepLoc, getExprAsWritten(NewStep));
5480  if (Val.isInvalid())
5481  return true;
5482  NewStep = Val.get();
5483 
5484  // OpenMP [2.6, Canonical Loop Form, Restrictions]
5485  // If test-expr is of form var relational-op b and relational-op is < or
5486  // <= then incr-expr must cause var to increase on each iteration of the
5487  // loop. If test-expr is of form var relational-op b and relational-op is
5488  // > or >= then incr-expr must cause var to decrease on each iteration of
5489  // the loop.
5490  // If test-expr is of form b relational-op var and relational-op is < or
5491  // <= then incr-expr must cause var to decrease on each iteration of the
5492  // loop. If test-expr is of form b relational-op var and relational-op is
5493  // > or >= then incr-expr must cause var to increase on each iteration of
5494  // the loop.
5496  bool IsConstant = NewStep->isIntegerConstantExpr(Result, SemaRef.Context);
5497  bool IsUnsigned = !NewStep->getType()->hasSignedIntegerRepresentation();
5498  bool IsConstNeg =
5499  IsConstant && Result.isSigned() && (Subtract != Result.isNegative());
5500  bool IsConstPos =
5501  IsConstant && Result.isSigned() && (Subtract == Result.isNegative());
5502  bool IsConstZero = IsConstant && !Result.getBoolValue();
5503 
5504  // != with increment is treated as <; != with decrement is treated as >
5505  if (!TestIsLessOp.hasValue())
5506  TestIsLessOp = IsConstPos || (IsUnsigned && !Subtract);
5507  if (UB && (IsConstZero ||
5508  (TestIsLessOp.getValue() ?
5509  (IsConstNeg || (IsUnsigned && Subtract)) :
5510  (IsConstPos || (IsUnsigned && !Subtract))))) {
5511  SemaRef.Diag(NewStep->getExprLoc(),
5512  diag::err_omp_loop_incr_not_compatible)
5513  << LCDecl << TestIsLessOp.getValue() << NewStep->getSourceRange();
5514  SemaRef.Diag(ConditionLoc,
5515  diag::note_omp_loop_cond_requres_compatible_incr)
5516  << TestIsLessOp.getValue() << ConditionSrcRange;
5517  return true;
5518  }
5519  if (TestIsLessOp.getValue() == Subtract) {
5520  NewStep =
5521  SemaRef.CreateBuiltinUnaryOp(NewStep->getExprLoc(), UO_Minus, NewStep)
5522  .get();
5523  Subtract = !Subtract;
5524  }
5525  }
5526 
5527  Step = NewStep;
5528  SubtractStep = Subtract;
5529  return false;
5530 }
5531 
5532 namespace {
5533 /// Checker for the non-rectangular loops. Checks if the initializer or
5534 /// condition expression references loop counter variable.
5535 class LoopCounterRefChecker final
5536  : public ConstStmtVisitor<LoopCounterRefChecker, bool> {
5537  Sema &SemaRef;
5538  DSAStackTy &Stack;
5539  const ValueDecl *CurLCDecl = nullptr;
5540  const ValueDecl *DepDecl = nullptr;
5541  const ValueDecl *PrevDepDecl = nullptr;
5542  bool IsInitializer = true;
5543  unsigned BaseLoopId = 0;
5544  bool checkDecl(const Expr *E, const ValueDecl *VD) {
5545  if (getCanonicalDecl(VD) == getCanonicalDecl(CurLCDecl)) {
5546  SemaRef.Diag(E->getExprLoc(), diag::err_omp_stmt_depends_on_loop_counter)
5547  << (IsInitializer ? 0 : 1);
5548  return false;
5549  }
5550  const auto &&Data = Stack.isLoopControlVariable(VD);
5551  // OpenMP, 2.9.1 Canonical Loop Form, Restrictions.
5552  // The type of the loop iterator on which we depend may not have a random
5553  // access iterator type.
5554  if (Data.first && VD->getType()->isRecordType()) {
5555  SmallString<128> Name;
5556  llvm::raw_svector_ostream OS(Name);
5557  VD->getNameForDiagnostic(OS, SemaRef.getPrintingPolicy(),
5558  /*Qualified=*/true);
5559  SemaRef.Diag(E->getExprLoc(),
5560  diag::err_omp_wrong_dependency_iterator_type)
5561  << OS.str();
5562  SemaRef.Diag(VD->getLocation(), diag::note_previous_decl) << VD;
5563  return false;
5564  }
5565  if (Data.first &&
5566  (DepDecl || (PrevDepDecl &&
5567  getCanonicalDecl(VD) != getCanonicalDecl(PrevDepDecl)))) {
5568  if (!DepDecl && PrevDepDecl)
5569  DepDecl = PrevDepDecl;
5570  SmallString<128> Name;
5571  llvm::raw_svector_ostream OS(Name);
5572  DepDecl->getNameForDiagnostic(OS, SemaRef.getPrintingPolicy(),
5573  /*Qualified=*/true);
5574  SemaRef.Diag(E->getExprLoc(),
5575  diag::err_omp_invariant_or_linear_dependency)
5576  << OS.str();
5577  return false;
5578  }
5579  if (Data.first) {
5580  DepDecl = VD;
5581  BaseLoopId = Data.first;
5582  }
5583  return Data.first;
5584  }
5585 
5586 public:
5587  bool VisitDeclRefExpr(const DeclRefExpr *E) {
5588  const ValueDecl *VD = E->getDecl();
5589  if (isa<VarDecl>(VD))
5590  return checkDecl(E, VD);
5591  return false;
5592  }
5593  bool VisitMemberExpr(const MemberExpr *E) {
5594  if (isa<CXXThisExpr>(E->getBase()->IgnoreParens())) {
5595  const ValueDecl *VD = E->getMemberDecl();
5596  if (isa<VarDecl>(VD) || isa<FieldDecl>(VD))
5597  return checkDecl(E, VD);
5598  }
5599  return false;
5600  }
5601  bool VisitStmt(const Stmt *S) {
5602  bool Res = false;
5603  for (const Stmt *Child : S->children())
5604  Res = (Child && Visit(Child)) || Res;
5605  return Res;
5606  }
5607  explicit LoopCounterRefChecker(Sema &SemaRef, DSAStackTy &Stack,
5608  const ValueDecl *CurLCDecl, bool IsInitializer,
5609  const ValueDecl *PrevDepDecl = nullptr)
5610  : SemaRef(SemaRef), Stack(Stack), CurLCDecl(CurLCDecl),
5611  PrevDepDecl(PrevDepDecl), IsInitializer(IsInitializer) {}
5612  unsigned getBaseLoopId() const {
5613  assert(CurLCDecl && "Expected loop dependency.");
5614  return BaseLoopId;
5615  }
5616  const ValueDecl *getDepDecl() const {
5617  assert(CurLCDecl && "Expected loop dependency.");
5618  return DepDecl;
5619  }
5620 };
5621 } // namespace
5622 
5624 OpenMPIterationSpaceChecker::doesDependOnLoopCounter(const Stmt *S,
5625  bool IsInitializer) {
5626  // Check for the non-rectangular loops.
5627  LoopCounterRefChecker LoopStmtChecker(SemaRef, Stack, LCDecl, IsInitializer,
5628  DepDecl);
5629  if (LoopStmtChecker.Visit(S)) {
5630  DepDecl = LoopStmtChecker.getDepDecl();
5631  return LoopStmtChecker.getBaseLoopId();
5632  }
5633  return llvm::None;
5634 }
5635 
5636 bool OpenMPIterationSpaceChecker::checkAndSetInit(Stmt *S, bool EmitDiags) {
5637  // Check init-expr for canonical loop form and save loop counter
5638  // variable - #Var and its initialization value - #LB.
5639  // OpenMP [2.6] Canonical loop form. init-expr may be one of the following:
5640  // var = lb
5641  // integer-type var = lb
5642  // random-access-iterator-type var = lb
5643  // pointer-type var = lb
5644  //
5645  if (!S) {
5646  if (EmitDiags) {
5647  SemaRef.Diag(DefaultLoc, diag::err_omp_loop_not_canonical_init);
5648  }
5649  return true;
5650  }
5651  if (auto *ExprTemp = dyn_cast<ExprWithCleanups>(S))
5652  if (!ExprTemp->cleanupsHaveSideEffects())
5653  S = ExprTemp->getSubExpr();
5654 
5655  InitSrcRange = S->getSourceRange();
5656  if (Expr *E = dyn_cast<Expr>(S))
5657  S = E->IgnoreParens();
5658  if (auto *BO = dyn_cast<BinaryOperator>(S)) {
5659  if (BO->getOpcode() == BO_Assign) {
5660  Expr *LHS = BO->getLHS()->IgnoreParens();
5661  if (auto *DRE = dyn_cast<DeclRefExpr>(LHS)) {
5662  if (auto *CED = dyn_cast<OMPCapturedExprDecl>(DRE->getDecl()))
5663  if (auto *ME = dyn_cast<MemberExpr>(getExprAsWritten(CED->getInit())))
5664  return setLCDeclAndLB(ME->getMemberDecl(), ME, BO->getRHS(),
5665  EmitDiags);
5666  return setLCDeclAndLB(DRE->getDecl(), DRE, BO->getRHS(), EmitDiags);
5667  }
5668  if (auto *ME = dyn_cast<MemberExpr>(LHS)) {
5669  if (ME->isArrow() &&
5670  isa<CXXThisExpr>(ME->getBase()->IgnoreParenImpCasts()))
5671  return setLCDeclAndLB(ME->getMemberDecl(), ME, BO->getRHS(),
5672  EmitDiags);
5673  }
5674  }
5675  } else if (auto *DS = dyn_cast<DeclStmt>(S)) {
5676  if (DS->isSingleDecl()) {
5677  if (auto *Var = dyn_cast_or_null<VarDecl>(DS->getSingleDecl())) {
5678  if (Var->hasInit() && !Var->getType()->isReferenceType()) {
5679  // Accept non-canonical init form here but emit ext. warning.
5680  if (Var->getInitStyle() != VarDecl::CInit && EmitDiags)
5681  SemaRef.Diag(S->getBeginLoc(),
5682  diag::ext_omp_loop_not_canonical_init)
5683  << S->getSourceRange();
5684  return setLCDeclAndLB(
5685  Var,
5686  buildDeclRefExpr(SemaRef, Var,
5687  Var->getType().getNonReferenceType(),
5688  DS->getBeginLoc()),
5689  Var->getInit(), EmitDiags);
5690  }
5691  }
5692  }
5693  } else if (auto *CE = dyn_cast<CXXOperatorCallExpr>(S)) {
5694  if (CE->getOperator() == OO_Equal) {
5695  Expr *LHS = CE->getArg(0);
5696  if (auto *DRE = dyn_cast<DeclRefExpr>(LHS)) {
5697  if (auto *CED = dyn_cast<OMPCapturedExprDecl>(DRE->getDecl()))
5698  if (auto *ME = dyn_cast<MemberExpr>(getExprAsWritten(CED->getInit())))
5699  return setLCDeclAndLB(ME->getMemberDecl(), ME, BO->getRHS(),
5700  EmitDiags);
5701  return setLCDeclAndLB(DRE->getDecl(), DRE, CE->getArg(1), EmitDiags);
5702  }
5703  if (auto *ME = dyn_cast<MemberExpr>(LHS)) {
5704  if (ME->isArrow() &&
5705  isa<CXXThisExpr>(ME->getBase()->IgnoreParenImpCasts()))
5706  return setLCDeclAndLB(ME->getMemberDecl(), ME, BO->getRHS(),
5707  EmitDiags);
5708  }
5709  }
5710  }
5711 
5712  if (dependent() || SemaRef.CurContext->isDependentContext())
5713  return false;
5714  if (EmitDiags) {
5715  SemaRef.Diag(S->getBeginLoc(), diag::err_omp_loop_not_canonical_init)
5716  << S->getSourceRange();
5717  }
5718  return true;
5719 }
5720 
5721 /// Ignore parenthesizes, implicit casts, copy constructor and return the
5722 /// variable (which may be the loop variable) if possible.
5723 static const ValueDecl *getInitLCDecl(const Expr *E) {
5724  if (!E)
5725  return nullptr;
5726  E = getExprAsWritten(E);
5727  if (const auto *CE = dyn_cast_or_null<CXXConstructExpr>(E))
5728  if (const CXXConstructorDecl *Ctor = CE->getConstructor())
5729  if ((Ctor->isCopyOrMoveConstructor() ||
5730  Ctor->isConvertingConstructor(/*AllowExplicit=*/false)) &&
5731  CE->getNumArgs() > 0 && CE->getArg(0) != nullptr)
5732  E = CE->getArg(0)->IgnoreParenImpCasts();
5733  if (const auto *DRE = dyn_cast_or_null<DeclRefExpr>(E)) {
5734  if (const auto *VD = dyn_cast<VarDecl>(DRE->getDecl()))
5735  return getCanonicalDecl(VD);
5736  }
5737  if (const auto *ME = dyn_cast_or_null<MemberExpr>(E))
5738  if (ME->isArrow() && isa<CXXThisExpr>(ME->getBase()->IgnoreParenImpCasts()))
5739  return getCanonicalDecl(ME->getMemberDecl());
5740  return nullptr;
5741 }
5742 
5743 bool OpenMPIterationSpaceChecker::checkAndSetCond(Expr *S) {
5744  // Check test-expr for canonical form, save upper-bound UB, flags for
5745  // less/greater and for strict/non-strict comparison.
5746  // OpenMP [2.9] Canonical loop form. Test-expr may be one of the following:
5747  // var relational-op b
5748  // b relational-op var
5749  //
5750  bool IneqCondIsCanonical = SemaRef.getLangOpts().OpenMP >= 50;
5751  if (!S) {
5752  SemaRef.Diag(DefaultLoc, diag::err_omp_loop_not_canonical_cond)
5753  << (IneqCondIsCanonical ? 1 : 0) << LCDecl;
5754  return true;
5755  }
5756  Condition = S;
5757  S = getExprAsWritten(S);
5758  SourceLocation CondLoc = S->getBeginLoc();
5759  if (auto *BO = dyn_cast<BinaryOperator>(S)) {
5760  if (BO->isRelationalOp()) {
5761  if (getInitLCDecl(BO->getLHS()) == LCDecl)
5762  return setUB(BO->getRHS(),
5763  (BO->getOpcode() == BO_LT || BO->getOpcode() == BO_LE),
5764  (BO->getOpcode() == BO_LT || BO->getOpcode() == BO_GT),
5765  BO->getSourceRange(), BO->getOperatorLoc());
5766  if (getInitLCDecl(BO->getRHS()) == LCDecl)
5767  return setUB(BO->getLHS(),
5768  (BO->getOpcode() == BO_GT || BO->getOpcode() == BO_GE),
5769  (BO->getOpcode() == BO_LT || BO->getOpcode() == BO_GT),
5770  BO->getSourceRange(), BO->getOperatorLoc());
5771  } else if (IneqCondIsCanonical && BO->getOpcode() == BO_NE)
5772  return setUB(
5773  getInitLCDecl(BO->getLHS()) == LCDecl ? BO->getRHS() : BO->getLHS(),
5774  /*LessOp=*/llvm::None,
5775  /*StrictOp=*/true, BO->getSourceRange(), BO->getOperatorLoc());
5776  } else if (auto *CE = dyn_cast<CXXOperatorCallExpr>(S)) {
5777  if (CE->getNumArgs() == 2) {
5778  auto Op = CE->getOperator();
5779  switch (Op) {
5780  case OO_Greater:
5781  case OO_GreaterEqual:
5782  case OO_Less:
5783  case OO_LessEqual:
5784  if (getInitLCDecl(CE->getArg(0)) == LCDecl)
5785  return setUB(CE->getArg(1), Op == OO_Less || Op == OO_LessEqual,
5786  Op == OO_Less || Op == OO_Greater, CE->getSourceRange(),
5787  CE->getOperatorLoc());
5788  if (getInitLCDecl(CE->getArg(1)) == LCDecl)
5789  return setUB(CE->getArg(0), Op == OO_Greater || Op == OO_GreaterEqual,
5790  Op == OO_Less || Op == OO_Greater, CE->getSourceRange(),
5791  CE->getOperatorLoc());
5792  break;
5793  case OO_ExclaimEqual:
5794  if (IneqCondIsCanonical)
5795  return setUB(getInitLCDecl(CE->getArg(0)) == LCDecl ? CE->getArg(1)
5796  : CE->getArg(0),
5797  /*LessOp=*/llvm::None,
5798  /*StrictOp=*/true, CE->getSourceRange(),
5799  CE->getOperatorLoc());
5800  break;
5801  default:
5802  break;
5803  }
5804  }
5805  }
5806  if (dependent() || SemaRef.CurContext->isDependentContext())
5807  return false;
5808  SemaRef.Diag(CondLoc, diag::err_omp_loop_not_canonical_cond)
5809  << (IneqCondIsCanonical ? 1 : 0) << S->getSourceRange() << LCDecl;
5810  return true;
5811 }
5812 
5813 bool