clang  15.0.0git
CGStmtOpenMP.cpp
Go to the documentation of this file.
1 //===--- CGStmtOpenMP.cpp - Emit LLVM Code from Statements ----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This contains code to emit OpenMP nodes as LLVM code.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "CGCleanup.h"
14 #include "CGOpenMPRuntime.h"
15 #include "CodeGenFunction.h"
16 #include "CodeGenModule.h"
17 #include "TargetInfo.h"
18 #include "clang/AST/ASTContext.h"
19 #include "clang/AST/Attr.h"
20 #include "clang/AST/DeclOpenMP.h"
21 #include "clang/AST/OpenMPClause.h"
22 #include "clang/AST/Stmt.h"
23 #include "clang/AST/StmtOpenMP.h"
24 #include "clang/AST/StmtVisitor.h"
27 #include "llvm/ADT/SmallSet.h"
28 #include "llvm/BinaryFormat/Dwarf.h"
29 #include "llvm/Frontend/OpenMP/OMPConstants.h"
30 #include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
31 #include "llvm/IR/Constants.h"
32 #include "llvm/IR/DebugInfoMetadata.h"
33 #include "llvm/IR/Instructions.h"
34 #include "llvm/IR/IntrinsicInst.h"
35 #include "llvm/IR/Metadata.h"
36 #include "llvm/Support/AtomicOrdering.h"
37 using namespace clang;
38 using namespace CodeGen;
39 using namespace llvm::omp;
40 
41 static const VarDecl *getBaseDecl(const Expr *Ref);
42 
43 namespace {
44 /// Lexical scope for OpenMP executable constructs, that handles correct codegen
45 /// for captured expressions.
46 class OMPLexicalScope : public CodeGenFunction::LexicalScope {
47  void emitPreInitStmt(CodeGenFunction &CGF, const OMPExecutableDirective &S) {
48  for (const auto *C : S.clauses()) {
49  if (const auto *CPI = OMPClauseWithPreInit::get(C)) {
50  if (const auto *PreInit =
51  cast_or_null<DeclStmt>(CPI->getPreInitStmt())) {
52  for (const auto *I : PreInit->decls()) {
53  if (!I->hasAttr<OMPCaptureNoInitAttr>()) {
54  CGF.EmitVarDecl(cast<VarDecl>(*I));
55  } else {
57  CGF.EmitAutoVarAlloca(cast<VarDecl>(*I));
58  CGF.EmitAutoVarCleanups(Emission);
59  }
60  }
61  }
62  }
63  }
64  }
65  CodeGenFunction::OMPPrivateScope InlinedShareds;
66 
67  static bool isCapturedVar(CodeGenFunction &CGF, const VarDecl *VD) {
68  return CGF.LambdaCaptureFields.lookup(VD) ||
69  (CGF.CapturedStmtInfo && CGF.CapturedStmtInfo->lookup(VD)) ||
70  (CGF.CurCodeDecl && isa<BlockDecl>(CGF.CurCodeDecl) &&
71  cast<BlockDecl>(CGF.CurCodeDecl)->capturesVariable(VD));
72  }
73 
74 public:
75  OMPLexicalScope(
77  const llvm::Optional<OpenMPDirectiveKind> CapturedRegion = llvm::None,
78  const bool EmitPreInitStmt = true)
79  : CodeGenFunction::LexicalScope(CGF, S.getSourceRange()),
80  InlinedShareds(CGF) {
81  if (EmitPreInitStmt)
82  emitPreInitStmt(CGF, S);
83  if (!CapturedRegion.hasValue())
84  return;
85  assert(S.hasAssociatedStmt() &&
86  "Expected associated statement for inlined directive.");
87  const CapturedStmt *CS = S.getCapturedStmt(*CapturedRegion);
88  for (const auto &C : CS->captures()) {
89  if (C.capturesVariable() || C.capturesVariableByCopy()) {
90  auto *VD = C.getCapturedVar();
91  assert(VD == VD->getCanonicalDecl() &&
92  "Canonical decl must be captured.");
93  DeclRefExpr DRE(
94  CGF.getContext(), const_cast<VarDecl *>(VD),
95  isCapturedVar(CGF, VD) || (CGF.CapturedStmtInfo &&
96  InlinedShareds.isGlobalVarCaptured(VD)),
97  VD->getType().getNonReferenceType(), VK_LValue, C.getLocation());
98  InlinedShareds.addPrivate(VD, CGF.EmitLValue(&DRE).getAddress(CGF));
99  }
100  }
101  (void)InlinedShareds.Privatize();
102  }
103 };
104 
105 /// Lexical scope for OpenMP parallel construct, that handles correct codegen
106 /// for captured expressions.
107 class OMPParallelScope final : public OMPLexicalScope {
108  bool EmitPreInitStmt(const OMPExecutableDirective &S) {
109  OpenMPDirectiveKind Kind = S.getDirectiveKind();
113  }
114 
115 public:
116  OMPParallelScope(CodeGenFunction &CGF, const OMPExecutableDirective &S)
117  : OMPLexicalScope(CGF, S, /*CapturedRegion=*/llvm::None,
118  EmitPreInitStmt(S)) {}
119 };
120 
121 /// Lexical scope for OpenMP teams construct, that handles correct codegen
122 /// for captured expressions.
123 class OMPTeamsScope final : public OMPLexicalScope {
124  bool EmitPreInitStmt(const OMPExecutableDirective &S) {
125  OpenMPDirectiveKind Kind = S.getDirectiveKind();
128  }
129 
130 public:
131  OMPTeamsScope(CodeGenFunction &CGF, const OMPExecutableDirective &S)
132  : OMPLexicalScope(CGF, S, /*CapturedRegion=*/llvm::None,
133  EmitPreInitStmt(S)) {}
134 };
135 
136 /// Private scope for OpenMP loop-based directives, that supports capturing
137 /// of used expression from loop statement.
138 class OMPLoopScope : public CodeGenFunction::RunCleanupsScope {
139  void emitPreInitStmt(CodeGenFunction &CGF, const OMPLoopBasedDirective &S) {
140  const DeclStmt *PreInits;
141  CodeGenFunction::OMPMapVars PreCondVars;
142  if (auto *LD = dyn_cast<OMPLoopDirective>(&S)) {
143  llvm::DenseSet<const VarDecl *> EmittedAsPrivate;
144  for (const auto *E : LD->counters()) {
145  const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
146  EmittedAsPrivate.insert(VD->getCanonicalDecl());
147  (void)PreCondVars.setVarAddr(
148  CGF, VD, CGF.CreateMemTemp(VD->getType().getNonReferenceType()));
149  }
150  // Mark private vars as undefs.
151  for (const auto *C : LD->getClausesOfKind<OMPPrivateClause>()) {
152  for (const Expr *IRef : C->varlists()) {
153  const auto *OrigVD =
154  cast<VarDecl>(cast<DeclRefExpr>(IRef)->getDecl());
155  if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
156  QualType OrigVDTy = OrigVD->getType().getNonReferenceType();
157  (void)PreCondVars.setVarAddr(
158  CGF, OrigVD,
159  Address(llvm::UndefValue::get(CGF.ConvertTypeForMem(
160  CGF.getContext().getPointerType(OrigVDTy))),
161  CGF.ConvertTypeForMem(OrigVDTy),
162  CGF.getContext().getDeclAlign(OrigVD)));
163  }
164  }
165  }
166  (void)PreCondVars.apply(CGF);
167  // Emit init, __range and __end variables for C++ range loops.
169  LD->getInnermostCapturedStmt()->getCapturedStmt(),
170  /*TryImperfectlyNestedLoops=*/true, LD->getLoopsNumber(),
171  [&CGF](unsigned Cnt, const Stmt *CurStmt) {
172  if (const auto *CXXFor = dyn_cast<CXXForRangeStmt>(CurStmt)) {
173  if (const Stmt *Init = CXXFor->getInit())
174  CGF.EmitStmt(Init);
175  CGF.EmitStmt(CXXFor->getRangeStmt());
176  CGF.EmitStmt(CXXFor->getEndStmt());
177  }
178  return false;
179  });
180  PreInits = cast_or_null<DeclStmt>(LD->getPreInits());
181  } else if (const auto *Tile = dyn_cast<OMPTileDirective>(&S)) {
182  PreInits = cast_or_null<DeclStmt>(Tile->getPreInits());
183  } else if (const auto *Unroll = dyn_cast<OMPUnrollDirective>(&S)) {
184  PreInits = cast_or_null<DeclStmt>(Unroll->getPreInits());
185  } else {
186  llvm_unreachable("Unknown loop-based directive kind.");
187  }
188  if (PreInits) {
189  for (const auto *I : PreInits->decls())
190  CGF.EmitVarDecl(cast<VarDecl>(*I));
191  }
192  PreCondVars.restore(CGF);
193  }
194 
195 public:
196  OMPLoopScope(CodeGenFunction &CGF, const OMPLoopBasedDirective &S)
197  : CodeGenFunction::RunCleanupsScope(CGF) {
198  emitPreInitStmt(CGF, S);
199  }
200 };
201 
202 class OMPSimdLexicalScope : public CodeGenFunction::LexicalScope {
203  CodeGenFunction::OMPPrivateScope InlinedShareds;
204 
205  static bool isCapturedVar(CodeGenFunction &CGF, const VarDecl *VD) {
206  return CGF.LambdaCaptureFields.lookup(VD) ||
207  (CGF.CapturedStmtInfo && CGF.CapturedStmtInfo->lookup(VD)) ||
208  (CGF.CurCodeDecl && isa<BlockDecl>(CGF.CurCodeDecl) &&
209  cast<BlockDecl>(CGF.CurCodeDecl)->capturesVariable(VD));
210  }
211 
212 public:
213  OMPSimdLexicalScope(CodeGenFunction &CGF, const OMPExecutableDirective &S)
214  : CodeGenFunction::LexicalScope(CGF, S.getSourceRange()),
215  InlinedShareds(CGF) {
216  for (const auto *C : S.clauses()) {
217  if (const auto *CPI = OMPClauseWithPreInit::get(C)) {
218  if (const auto *PreInit =
219  cast_or_null<DeclStmt>(CPI->getPreInitStmt())) {
220  for (const auto *I : PreInit->decls()) {
221  if (!I->hasAttr<OMPCaptureNoInitAttr>()) {
222  CGF.EmitVarDecl(cast<VarDecl>(*I));
223  } else {
225  CGF.EmitAutoVarAlloca(cast<VarDecl>(*I));
226  CGF.EmitAutoVarCleanups(Emission);
227  }
228  }
229  }
230  } else if (const auto *UDP = dyn_cast<OMPUseDevicePtrClause>(C)) {
231  for (const Expr *E : UDP->varlists()) {
232  const Decl *D = cast<DeclRefExpr>(E)->getDecl();
233  if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(D))
234  CGF.EmitVarDecl(*OED);
235  }
236  } else if (const auto *UDP = dyn_cast<OMPUseDeviceAddrClause>(C)) {
237  for (const Expr *E : UDP->varlists()) {
238  const Decl *D = getBaseDecl(E);
239  if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(D))
240  CGF.EmitVarDecl(*OED);
241  }
242  }
243  }
244  if (!isOpenMPSimdDirective(S.getDirectiveKind()))
245  CGF.EmitOMPPrivateClause(S, InlinedShareds);
246  if (const auto *TG = dyn_cast<OMPTaskgroupDirective>(&S)) {
247  if (const Expr *E = TG->getReductionRef())
248  CGF.EmitVarDecl(*cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()));
249  }
250  // Temp copy arrays for inscan reductions should not be emitted as they are
251  // not used in simd only mode.
253  for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) {
254  if (C->getModifier() != OMPC_REDUCTION_inscan)
255  continue;
256  for (const Expr *E : C->copy_array_temps())
257  CopyArrayTemps.insert(cast<DeclRefExpr>(E)->getDecl());
258  }
259  const auto *CS = cast_or_null<CapturedStmt>(S.getAssociatedStmt());
260  while (CS) {
261  for (auto &C : CS->captures()) {
262  if (C.capturesVariable() || C.capturesVariableByCopy()) {
263  auto *VD = C.getCapturedVar();
264  if (CopyArrayTemps.contains(VD))
265  continue;
266  assert(VD == VD->getCanonicalDecl() &&
267  "Canonical decl must be captured.");
268  DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(VD),
269  isCapturedVar(CGF, VD) ||
270  (CGF.CapturedStmtInfo &&
271  InlinedShareds.isGlobalVarCaptured(VD)),
273  C.getLocation());
274  InlinedShareds.addPrivate(VD, CGF.EmitLValue(&DRE).getAddress(CGF));
275  }
276  }
277  CS = dyn_cast<CapturedStmt>(CS->getCapturedStmt());
278  }
279  (void)InlinedShareds.Privatize();
280  }
281 };
282 
283 } // namespace
284 
286  const OMPExecutableDirective &S,
287  const RegionCodeGenTy &CodeGen);
288 
290  if (const auto *OrigDRE = dyn_cast<DeclRefExpr>(E)) {
291  if (const auto *OrigVD = dyn_cast<VarDecl>(OrigDRE->getDecl())) {
292  OrigVD = OrigVD->getCanonicalDecl();
293  bool IsCaptured =
294  LambdaCaptureFields.lookup(OrigVD) ||
295  (CapturedStmtInfo && CapturedStmtInfo->lookup(OrigVD)) ||
296  (CurCodeDecl && isa<BlockDecl>(CurCodeDecl));
297  DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), IsCaptured,
298  OrigDRE->getType(), VK_LValue, OrigDRE->getExprLoc());
299  return EmitLValue(&DRE);
300  }
301  }
302  return EmitLValue(E);
303 }
304 
306  ASTContext &C = getContext();
307  llvm::Value *Size = nullptr;
308  auto SizeInChars = C.getTypeSizeInChars(Ty);
309  if (SizeInChars.isZero()) {
310  // getTypeSizeInChars() returns 0 for a VLA.
311  while (const VariableArrayType *VAT = C.getAsVariableArrayType(Ty)) {
312  VlaSizePair VlaSize = getVLASize(VAT);
313  Ty = VlaSize.Type;
314  Size =
315  Size ? Builder.CreateNUWMul(Size, VlaSize.NumElts) : VlaSize.NumElts;
316  }
317  SizeInChars = C.getTypeSizeInChars(Ty);
318  if (SizeInChars.isZero())
319  return llvm::ConstantInt::get(SizeTy, /*V=*/0);
320  return Builder.CreateNUWMul(Size, CGM.getSize(SizeInChars));
321  }
322  return CGM.getSize(SizeInChars);
323 }
324 
326  const CapturedStmt &S, SmallVectorImpl<llvm::Value *> &CapturedVars) {
327  const RecordDecl *RD = S.getCapturedRecordDecl();
328  auto CurField = RD->field_begin();
329  auto CurCap = S.captures().begin();
330  for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(),
331  E = S.capture_init_end();
332  I != E; ++I, ++CurField, ++CurCap) {
333  if (CurField->hasCapturedVLAType()) {
334  const VariableArrayType *VAT = CurField->getCapturedVLAType();
335  llvm::Value *Val = VLASizeMap[VAT->getSizeExpr()];
336  CapturedVars.push_back(Val);
337  } else if (CurCap->capturesThis()) {
338  CapturedVars.push_back(CXXThisValue);
339  } else if (CurCap->capturesVariableByCopy()) {
340  llvm::Value *CV = EmitLoadOfScalar(EmitLValue(*I), CurCap->getLocation());
341 
342  // If the field is not a pointer, we need to save the actual value
343  // and load it as a void pointer.
344  if (!CurField->getType()->isAnyPointerType()) {
345  ASTContext &Ctx = getContext();
346  Address DstAddr = CreateMemTemp(
347  Ctx.getUIntPtrType(),
348  Twine(CurCap->getCapturedVar()->getName(), ".casted"));
349  LValue DstLV = MakeAddrLValue(DstAddr, Ctx.getUIntPtrType());
350 
351  llvm::Value *SrcAddrVal = EmitScalarConversion(
352  DstAddr.getPointer(), Ctx.getPointerType(Ctx.getUIntPtrType()),
353  Ctx.getPointerType(CurField->getType()), CurCap->getLocation());
354  LValue SrcLV =
355  MakeNaturalAlignAddrLValue(SrcAddrVal, CurField->getType());
356 
357  // Store the value using the source type pointer.
358  EmitStoreThroughLValue(RValue::get(CV), SrcLV);
359 
360  // Load the value using the destination type pointer.
361  CV = EmitLoadOfScalar(DstLV, CurCap->getLocation());
362  }
363  CapturedVars.push_back(CV);
364  } else {
365  assert(CurCap->capturesVariable() && "Expected capture by reference.");
366  CapturedVars.push_back(EmitLValue(*I).getAddress(*this).getPointer());
367  }
368  }
369 }
370 
372  QualType DstType, StringRef Name,
373  LValue AddrLV) {
374  ASTContext &Ctx = CGF.getContext();
375 
376  llvm::Value *CastedPtr = CGF.EmitScalarConversion(
377  AddrLV.getAddress(CGF).getPointer(), Ctx.getUIntPtrType(),
378  Ctx.getPointerType(DstType), Loc);
379  Address TmpAddr =
380  CGF.MakeNaturalAlignAddrLValue(CastedPtr, DstType).getAddress(CGF);
381  return TmpAddr;
382 }
383 
385  if (T->isLValueReferenceType())
386  return C.getLValueReferenceType(
388  /*SpelledAsLValue=*/false);
389  if (T->isPointerType())
390  return C.getPointerType(getCanonicalParamType(C, T->getPointeeType()));
391  if (const ArrayType *A = T->getAsArrayTypeUnsafe()) {
392  if (const auto *VLA = dyn_cast<VariableArrayType>(A))
393  return getCanonicalParamType(C, VLA->getElementType());
394  if (!A->isVariablyModifiedType())
395  return C.getCanonicalType(T);
396  }
397  return C.getCanonicalParamType(T);
398 }
399 
400 namespace {
401 /// Contains required data for proper outlined function codegen.
402 struct FunctionOptions {
403  /// Captured statement for which the function is generated.
404  const CapturedStmt *S = nullptr;
405  /// true if cast to/from UIntPtr is required for variables captured by
406  /// value.
407  const bool UIntPtrCastRequired = true;
408  /// true if only casted arguments must be registered as local args or VLA
409  /// sizes.
410  const bool RegisterCastedArgsOnly = false;
411  /// Name of the generated function.
412  const StringRef FunctionName;
413  /// Location of the non-debug version of the outlined function.
414  SourceLocation Loc;
415  explicit FunctionOptions(const CapturedStmt *S, bool UIntPtrCastRequired,
416  bool RegisterCastedArgsOnly, StringRef FunctionName,
417  SourceLocation Loc)
418  : S(S), UIntPtrCastRequired(UIntPtrCastRequired),
419  RegisterCastedArgsOnly(UIntPtrCastRequired && RegisterCastedArgsOnly),
420  FunctionName(FunctionName), Loc(Loc) {}
421 };
422 } // namespace
423 
424 static llvm::Function *emitOutlinedFunctionPrologue(
425  CodeGenFunction &CGF, FunctionArgList &Args,
426  llvm::MapVector<const Decl *, std::pair<const VarDecl *, Address>>
427  &LocalAddrs,
428  llvm::DenseMap<const Decl *, std::pair<const Expr *, llvm::Value *>>
429  &VLASizes,
430  llvm::Value *&CXXThisValue, const FunctionOptions &FO) {
431  const CapturedDecl *CD = FO.S->getCapturedDecl();
432  const RecordDecl *RD = FO.S->getCapturedRecordDecl();
433  assert(CD->hasBody() && "missing CapturedDecl body");
434 
435  CXXThisValue = nullptr;
436  // Build the argument list.
437  CodeGenModule &CGM = CGF.CGM;
438  ASTContext &Ctx = CGM.getContext();
439  FunctionArgList TargetArgs;
440  Args.append(CD->param_begin(),
441  std::next(CD->param_begin(), CD->getContextParamPosition()));
442  TargetArgs.append(
443  CD->param_begin(),
444  std::next(CD->param_begin(), CD->getContextParamPosition()));
445  auto I = FO.S->captures().begin();
446  FunctionDecl *DebugFunctionDecl = nullptr;
447  if (!FO.UIntPtrCastRequired) {
449  QualType FunctionTy = Ctx.getFunctionType(Ctx.VoidTy, llvm::None, EPI);
450  DebugFunctionDecl = FunctionDecl::Create(
451  Ctx, Ctx.getTranslationUnitDecl(), FO.S->getBeginLoc(),
452  SourceLocation(), DeclarationName(), FunctionTy,
453  Ctx.getTrivialTypeSourceInfo(FunctionTy), SC_Static,
454  /*UsesFPIntrin=*/false, /*isInlineSpecified=*/false,
455  /*hasWrittenPrototype=*/false);
456  }
457  for (const FieldDecl *FD : RD->fields()) {
458  QualType ArgType = FD->getType();
459  IdentifierInfo *II = nullptr;
460  VarDecl *CapVar = nullptr;
461 
462  // If this is a capture by copy and the type is not a pointer, the outlined
463  // function argument type should be uintptr and the value properly casted to
464  // uintptr. This is necessary given that the runtime library is only able to
465  // deal with pointers. We can pass in the same way the VLA type sizes to the
466  // outlined function.
467  if (FO.UIntPtrCastRequired &&
468  ((I->capturesVariableByCopy() && !ArgType->isAnyPointerType()) ||
469  I->capturesVariableArrayType()))
470  ArgType = Ctx.getUIntPtrType();
471 
472  if (I->capturesVariable() || I->capturesVariableByCopy()) {
473  CapVar = I->getCapturedVar();
474  II = CapVar->getIdentifier();
475  } else if (I->capturesThis()) {
476  II = &Ctx.Idents.get("this");
477  } else {
478  assert(I->capturesVariableArrayType());
479  II = &Ctx.Idents.get("vla");
480  }
481  if (ArgType->isVariablyModifiedType())
482  ArgType = getCanonicalParamType(Ctx, ArgType);
483  VarDecl *Arg;
484  if (CapVar && (CapVar->getTLSKind() != clang::VarDecl::TLS_None)) {
485  Arg = ImplicitParamDecl::Create(Ctx, /*DC=*/nullptr, FD->getLocation(),
486  II, ArgType,
488  } else if (DebugFunctionDecl && (CapVar || I->capturesThis())) {
489  Arg = ParmVarDecl::Create(
490  Ctx, DebugFunctionDecl,
491  CapVar ? CapVar->getBeginLoc() : FD->getBeginLoc(),
492  CapVar ? CapVar->getLocation() : FD->getLocation(), II, ArgType,
493  /*TInfo=*/nullptr, SC_None, /*DefArg=*/nullptr);
494  } else {
495  Arg = ImplicitParamDecl::Create(Ctx, /*DC=*/nullptr, FD->getLocation(),
496  II, ArgType, ImplicitParamDecl::Other);
497  }
498  Args.emplace_back(Arg);
499  // Do not cast arguments if we emit function with non-original types.
500  TargetArgs.emplace_back(
501  FO.UIntPtrCastRequired
502  ? Arg
503  : CGM.getOpenMPRuntime().translateParameter(FD, Arg));
504  ++I;
505  }
506  Args.append(std::next(CD->param_begin(), CD->getContextParamPosition() + 1),
507  CD->param_end());
508  TargetArgs.append(
509  std::next(CD->param_begin(), CD->getContextParamPosition() + 1),
510  CD->param_end());
511 
512  // Create the function declaration.
513  const CGFunctionInfo &FuncInfo =
514  CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, TargetArgs);
515  llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo);
516 
517  auto *F =
518  llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage,
519  FO.FunctionName, &CGM.getModule());
520  CGM.SetInternalFunctionAttributes(CD, F, FuncInfo);
521  if (CD->isNothrow())
522  F->setDoesNotThrow();
523  F->setDoesNotRecurse();
524 
525  // Always inline the outlined function if optimizations are enabled.
526  if (CGM.getCodeGenOpts().OptimizationLevel != 0) {
527  F->removeFnAttr(llvm::Attribute::NoInline);
528  F->addFnAttr(llvm::Attribute::AlwaysInline);
529  }
530 
531  // Generate the function.
532  CGF.StartFunction(CD, Ctx.VoidTy, F, FuncInfo, TargetArgs,
533  FO.UIntPtrCastRequired ? FO.Loc : FO.S->getBeginLoc(),
534  FO.UIntPtrCastRequired ? FO.Loc
535  : CD->getBody()->getBeginLoc());
536  unsigned Cnt = CD->getContextParamPosition();
537  I = FO.S->captures().begin();
538  for (const FieldDecl *FD : RD->fields()) {
539  // Do not map arguments if we emit function with non-original types.
540  Address LocalAddr(Address::invalid());
541  if (!FO.UIntPtrCastRequired && Args[Cnt] != TargetArgs[Cnt]) {
542  LocalAddr = CGM.getOpenMPRuntime().getParameterAddress(CGF, Args[Cnt],
543  TargetArgs[Cnt]);
544  } else {
545  LocalAddr = CGF.GetAddrOfLocalVar(Args[Cnt]);
546  }
547  // If we are capturing a pointer by copy we don't need to do anything, just
548  // use the value that we get from the arguments.
549  if (I->capturesVariableByCopy() && FD->getType()->isAnyPointerType()) {
550  const VarDecl *CurVD = I->getCapturedVar();
551  if (!FO.RegisterCastedArgsOnly)
552  LocalAddrs.insert({Args[Cnt], {CurVD, LocalAddr}});
553  ++Cnt;
554  ++I;
555  continue;
556  }
557 
558  LValue ArgLVal = CGF.MakeAddrLValue(LocalAddr, Args[Cnt]->getType(),
560  if (FD->hasCapturedVLAType()) {
561  if (FO.UIntPtrCastRequired) {
562  ArgLVal = CGF.MakeAddrLValue(
563  castValueFromUintptr(CGF, I->getLocation(), FD->getType(),
564  Args[Cnt]->getName(), ArgLVal),
566  }
567  llvm::Value *ExprArg = CGF.EmitLoadOfScalar(ArgLVal, I->getLocation());
568  const VariableArrayType *VAT = FD->getCapturedVLAType();
569  VLASizes.try_emplace(Args[Cnt], VAT->getSizeExpr(), ExprArg);
570  } else if (I->capturesVariable()) {
571  const VarDecl *Var = I->getCapturedVar();
572  QualType VarTy = Var->getType();
573  Address ArgAddr = ArgLVal.getAddress(CGF);
574  if (ArgLVal.getType()->isLValueReferenceType()) {
575  ArgAddr = CGF.EmitLoadOfReference(ArgLVal);
576  } else if (!VarTy->isVariablyModifiedType() || !VarTy->isPointerType()) {
577  assert(ArgLVal.getType()->isPointerType());
578  ArgAddr = CGF.EmitLoadOfPointer(
579  ArgAddr, ArgLVal.getType()->castAs<PointerType>());
580  }
581  if (!FO.RegisterCastedArgsOnly) {
582  LocalAddrs.insert(
583  {Args[Cnt], {Var, ArgAddr.withAlignment(Ctx.getDeclAlign(Var))}});
584  }
585  } else if (I->capturesVariableByCopy()) {
586  assert(!FD->getType()->isAnyPointerType() &&
587  "Not expecting a captured pointer.");
588  const VarDecl *Var = I->getCapturedVar();
589  LocalAddrs.insert({Args[Cnt],
590  {Var, FO.UIntPtrCastRequired
592  CGF, I->getLocation(), FD->getType(),
593  Args[Cnt]->getName(), ArgLVal)
594  : ArgLVal.getAddress(CGF)}});
595  } else {
596  // If 'this' is captured, load it into CXXThisValue.
597  assert(I->capturesThis());
598  CXXThisValue = CGF.EmitLoadOfScalar(ArgLVal, I->getLocation());
599  LocalAddrs.insert({Args[Cnt], {nullptr, ArgLVal.getAddress(CGF)}});
600  }
601  ++Cnt;
602  ++I;
603  }
604 
605  return F;
606 }
607 
608 llvm::Function *
610  SourceLocation Loc) {
611  assert(
612  CapturedStmtInfo &&
613  "CapturedStmtInfo should be set when generating the captured function");
614  const CapturedDecl *CD = S.getCapturedDecl();
615  // Build the argument list.
616  bool NeedWrapperFunction =
617  getDebugInfo() && CGM.getCodeGenOpts().hasReducedDebugInfo();
618  FunctionArgList Args;
619  llvm::MapVector<const Decl *, std::pair<const VarDecl *, Address>> LocalAddrs;
620  llvm::DenseMap<const Decl *, std::pair<const Expr *, llvm::Value *>> VLASizes;
621  SmallString<256> Buffer;
622  llvm::raw_svector_ostream Out(Buffer);
623  Out << CapturedStmtInfo->getHelperName();
624  if (NeedWrapperFunction)
625  Out << "_debug__";
626  FunctionOptions FO(&S, !NeedWrapperFunction, /*RegisterCastedArgsOnly=*/false,
627  Out.str(), Loc);
628  llvm::Function *F = emitOutlinedFunctionPrologue(*this, Args, LocalAddrs,
629  VLASizes, CXXThisValue, FO);
630  CodeGenFunction::OMPPrivateScope LocalScope(*this);
631  for (const auto &LocalAddrPair : LocalAddrs) {
632  if (LocalAddrPair.second.first) {
633  LocalScope.addPrivate(LocalAddrPair.second.first,
634  LocalAddrPair.second.second);
635  }
636  }
637  (void)LocalScope.Privatize();
638  for (const auto &VLASizePair : VLASizes)
639  VLASizeMap[VLASizePair.second.first] = VLASizePair.second.second;
640  PGO.assignRegionCounters(GlobalDecl(CD), F);
641  CapturedStmtInfo->EmitBody(*this, CD->getBody());
642  (void)LocalScope.ForceCleanup();
643  FinishFunction(CD->getBodyRBrace());
644  if (!NeedWrapperFunction)
645  return F;
646 
647  FunctionOptions WrapperFO(&S, /*UIntPtrCastRequired=*/true,
648  /*RegisterCastedArgsOnly=*/true,
649  CapturedStmtInfo->getHelperName(), Loc);
650  CodeGenFunction WrapperCGF(CGM, /*suppressNewContext=*/true);
651  WrapperCGF.CapturedStmtInfo = CapturedStmtInfo;
652  Args.clear();
653  LocalAddrs.clear();
654  VLASizes.clear();
655  llvm::Function *WrapperF =
656  emitOutlinedFunctionPrologue(WrapperCGF, Args, LocalAddrs, VLASizes,
657  WrapperCGF.CXXThisValue, WrapperFO);
659  auto *PI = F->arg_begin();
660  for (const auto *Arg : Args) {
661  llvm::Value *CallArg;
662  auto I = LocalAddrs.find(Arg);
663  if (I != LocalAddrs.end()) {
664  LValue LV = WrapperCGF.MakeAddrLValue(
665  I->second.second,
666  I->second.first ? I->second.first->getType() : Arg->getType(),
668  if (LV.getType()->isAnyComplexType())
670  LV.getAddress(WrapperCGF),
671  PI->getType()->getPointerTo(
672  LV.getAddress(WrapperCGF).getAddressSpace()),
673  PI->getType()));
674  CallArg = WrapperCGF.EmitLoadOfScalar(LV, S.getBeginLoc());
675  } else {
676  auto EI = VLASizes.find(Arg);
677  if (EI != VLASizes.end()) {
678  CallArg = EI->second.second;
679  } else {
680  LValue LV =
681  WrapperCGF.MakeAddrLValue(WrapperCGF.GetAddrOfLocalVar(Arg),
683  CallArg = WrapperCGF.EmitLoadOfScalar(LV, S.getBeginLoc());
684  }
685  }
686  CallArgs.emplace_back(WrapperCGF.EmitFromMemory(CallArg, Arg->getType()));
687  ++PI;
688  }
689  CGM.getOpenMPRuntime().emitOutlinedFunctionCall(WrapperCGF, Loc, F, CallArgs);
690  WrapperCGF.FinishFunction();
691  return WrapperF;
692 }
693 
694 //===----------------------------------------------------------------------===//
695 // OpenMP Directive Emission
696 //===----------------------------------------------------------------------===//
698  Address DestAddr, Address SrcAddr, QualType OriginalType,
699  const llvm::function_ref<void(Address, Address)> CopyGen) {
700  // Perform element-by-element initialization.
701  QualType ElementTy;
702 
703  // Drill down to the base element type on both arrays.
704  const ArrayType *ArrayTy = OriginalType->getAsArrayTypeUnsafe();
705  llvm::Value *NumElements = emitArrayLength(ArrayTy, ElementTy, DestAddr);
706  SrcAddr = Builder.CreateElementBitCast(SrcAddr, DestAddr.getElementType());
707 
708  llvm::Value *SrcBegin = SrcAddr.getPointer();
709  llvm::Value *DestBegin = DestAddr.getPointer();
710  // Cast from pointer to array type to pointer to single element.
711  llvm::Value *DestEnd =
712  Builder.CreateGEP(DestAddr.getElementType(), DestBegin, NumElements);
713  // The basic structure here is a while-do loop.
714  llvm::BasicBlock *BodyBB = createBasicBlock("omp.arraycpy.body");
715  llvm::BasicBlock *DoneBB = createBasicBlock("omp.arraycpy.done");
716  llvm::Value *IsEmpty =
717  Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arraycpy.isempty");
718  Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
719 
720  // Enter the loop body, making that address the current address.
721  llvm::BasicBlock *EntryBB = Builder.GetInsertBlock();
722  EmitBlock(BodyBB);
723 
724  CharUnits ElementSize = getContext().getTypeSizeInChars(ElementTy);
725 
726  llvm::PHINode *SrcElementPHI =
727  Builder.CreatePHI(SrcBegin->getType(), 2, "omp.arraycpy.srcElementPast");
728  SrcElementPHI->addIncoming(SrcBegin, EntryBB);
729  Address SrcElementCurrent =
730  Address(SrcElementPHI, SrcAddr.getElementType(),
731  SrcAddr.getAlignment().alignmentOfArrayElement(ElementSize));
732 
733  llvm::PHINode *DestElementPHI = Builder.CreatePHI(
734  DestBegin->getType(), 2, "omp.arraycpy.destElementPast");
735  DestElementPHI->addIncoming(DestBegin, EntryBB);
736  Address DestElementCurrent =
737  Address(DestElementPHI, DestAddr.getElementType(),
738  DestAddr.getAlignment().alignmentOfArrayElement(ElementSize));
739 
740  // Emit copy.
741  CopyGen(DestElementCurrent, SrcElementCurrent);
742 
743  // Shift the address forward by one element.
744  llvm::Value *DestElementNext =
745  Builder.CreateConstGEP1_32(DestAddr.getElementType(), DestElementPHI,
746  /*Idx0=*/1, "omp.arraycpy.dest.element");
747  llvm::Value *SrcElementNext =
748  Builder.CreateConstGEP1_32(SrcAddr.getElementType(), SrcElementPHI,
749  /*Idx0=*/1, "omp.arraycpy.src.element");
750  // Check whether we've reached the end.
751  llvm::Value *Done =
752  Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done");
753  Builder.CreateCondBr(Done, DoneBB, BodyBB);
754  DestElementPHI->addIncoming(DestElementNext, Builder.GetInsertBlock());
755  SrcElementPHI->addIncoming(SrcElementNext, Builder.GetInsertBlock());
756 
757  // Done.
758  EmitBlock(DoneBB, /*IsFinished=*/true);
759 }
760 
761 void CodeGenFunction::EmitOMPCopy(QualType OriginalType, Address DestAddr,
762  Address SrcAddr, const VarDecl *DestVD,
763  const VarDecl *SrcVD, const Expr *Copy) {
764  if (OriginalType->isArrayType()) {
765  const auto *BO = dyn_cast<BinaryOperator>(Copy);
766  if (BO && BO->getOpcode() == BO_Assign) {
767  // Perform simple memcpy for simple copying.
768  LValue Dest = MakeAddrLValue(DestAddr, OriginalType);
769  LValue Src = MakeAddrLValue(SrcAddr, OriginalType);
770  EmitAggregateAssign(Dest, Src, OriginalType);
771  } else {
772  // For arrays with complex element types perform element by element
773  // copying.
774  EmitOMPAggregateAssign(
775  DestAddr, SrcAddr, OriginalType,
776  [this, Copy, SrcVD, DestVD](Address DestElement, Address SrcElement) {
777  // Working with the single array element, so have to remap
778  // destination and source variables to corresponding array
779  // elements.
781  Remap.addPrivate(DestVD, DestElement);
782  Remap.addPrivate(SrcVD, SrcElement);
783  (void)Remap.Privatize();
784  EmitIgnoredExpr(Copy);
785  });
786  }
787  } else {
788  // Remap pseudo source variable to private copy.
790  Remap.addPrivate(SrcVD, SrcAddr);
791  Remap.addPrivate(DestVD, DestAddr);
792  (void)Remap.Privatize();
793  // Emit copying of the whole variable.
794  EmitIgnoredExpr(Copy);
795  }
796 }
797 
799  OMPPrivateScope &PrivateScope) {
800  if (!HaveInsertPoint())
801  return false;
802  bool DeviceConstTarget =
803  getLangOpts().OpenMPIsDevice &&
805  bool FirstprivateIsLastprivate = false;
806  llvm::DenseMap<const VarDecl *, OpenMPLastprivateModifier> Lastprivates;
807  for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) {
808  for (const auto *D : C->varlists())
809  Lastprivates.try_emplace(
810  cast<VarDecl>(cast<DeclRefExpr>(D)->getDecl())->getCanonicalDecl(),
811  C->getKind());
812  }
813  llvm::DenseSet<const VarDecl *> EmittedAsFirstprivate;
815  getOpenMPCaptureRegions(CaptureRegions, D.getDirectiveKind());
816  // Force emission of the firstprivate copy if the directive does not emit
817  // outlined function, like omp for, omp simd, omp distribute etc.
818  bool MustEmitFirstprivateCopy =
819  CaptureRegions.size() == 1 && CaptureRegions.back() == OMPD_unknown;
820  for (const auto *C : D.getClausesOfKind<OMPFirstprivateClause>()) {
821  const auto *IRef = C->varlist_begin();
822  const auto *InitsRef = C->inits().begin();
823  for (const Expr *IInit : C->private_copies()) {
824  const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
825  bool ThisFirstprivateIsLastprivate =
826  Lastprivates.count(OrigVD->getCanonicalDecl()) > 0;
827  const FieldDecl *FD = CapturedStmtInfo->lookup(OrigVD);
828  const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
829  if (!MustEmitFirstprivateCopy && !ThisFirstprivateIsLastprivate && FD &&
830  !FD->getType()->isReferenceType() &&
831  (!VD || !VD->hasAttr<OMPAllocateDeclAttr>())) {
832  EmittedAsFirstprivate.insert(OrigVD->getCanonicalDecl());
833  ++IRef;
834  ++InitsRef;
835  continue;
836  }
837  // Do not emit copy for firstprivate constant variables in target regions,
838  // captured by reference.
839  if (DeviceConstTarget && OrigVD->getType().isConstant(getContext()) &&
840  FD && FD->getType()->isReferenceType() &&
841  (!VD || !VD->hasAttr<OMPAllocateDeclAttr>())) {
842  EmittedAsFirstprivate.insert(OrigVD->getCanonicalDecl());
843  ++IRef;
844  ++InitsRef;
845  continue;
846  }
847  FirstprivateIsLastprivate =
848  FirstprivateIsLastprivate || ThisFirstprivateIsLastprivate;
849  if (EmittedAsFirstprivate.insert(OrigVD->getCanonicalDecl()).second) {
850  const auto *VDInit =
851  cast<VarDecl>(cast<DeclRefExpr>(*InitsRef)->getDecl());
852  bool IsRegistered;
853  DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD),
854  /*RefersToEnclosingVariableOrCapture=*/FD != nullptr,
855  (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc());
856  LValue OriginalLVal;
857  if (!FD) {
858  // Check if the firstprivate variable is just a constant value.
859  ConstantEmission CE = tryEmitAsConstant(&DRE);
860  if (CE && !CE.isReference()) {
861  // Constant value, no need to create a copy.
862  ++IRef;
863  ++InitsRef;
864  continue;
865  }
866  if (CE && CE.isReference()) {
867  OriginalLVal = CE.getReferenceLValue(*this, &DRE);
868  } else {
869  assert(!CE && "Expected non-constant firstprivate.");
870  OriginalLVal = EmitLValue(&DRE);
871  }
872  } else {
873  OriginalLVal = EmitLValue(&DRE);
874  }
875  QualType Type = VD->getType();
876  if (Type->isArrayType()) {
877  // Emit VarDecl with copy init for arrays.
878  // Get the address of the original variable captured in current
879  // captured region.
880  AutoVarEmission Emission = EmitAutoVarAlloca(*VD);
881  const Expr *Init = VD->getInit();
882  if (!isa<CXXConstructExpr>(Init) || isTrivialInitializer(Init)) {
883  // Perform simple memcpy.
884  LValue Dest = MakeAddrLValue(Emission.getAllocatedAddress(), Type);
885  EmitAggregateAssign(Dest, OriginalLVal, Type);
886  } else {
887  EmitOMPAggregateAssign(
888  Emission.getAllocatedAddress(), OriginalLVal.getAddress(*this),
889  Type,
890  [this, VDInit, Init](Address DestElement, Address SrcElement) {
891  // Clean up any temporaries needed by the
892  // initialization.
893  RunCleanupsScope InitScope(*this);
894  // Emit initialization for single element.
895  setAddrOfLocalVar(VDInit, SrcElement);
896  EmitAnyExprToMem(Init, DestElement,
897  Init->getType().getQualifiers(),
898  /*IsInitializer*/ false);
899  LocalDeclMap.erase(VDInit);
900  });
901  }
902  EmitAutoVarCleanups(Emission);
903  IsRegistered =
904  PrivateScope.addPrivate(OrigVD, Emission.getAllocatedAddress());
905  } else {
906  Address OriginalAddr = OriginalLVal.getAddress(*this);
907  // Emit private VarDecl with copy init.
908  // Remap temp VDInit variable to the address of the original
909  // variable (for proper handling of captured global variables).
910  setAddrOfLocalVar(VDInit, OriginalAddr);
911  EmitDecl(*VD);
912  LocalDeclMap.erase(VDInit);
913  Address VDAddr = GetAddrOfLocalVar(VD);
914  if (ThisFirstprivateIsLastprivate &&
915  Lastprivates[OrigVD->getCanonicalDecl()] ==
916  OMPC_LASTPRIVATE_conditional) {
917  // Create/init special variable for lastprivate conditionals.
918  llvm::Value *V =
919  EmitLoadOfScalar(MakeAddrLValue(VDAddr, (*IRef)->getType(),
921  (*IRef)->getExprLoc());
922  VDAddr = CGM.getOpenMPRuntime().emitLastprivateConditionalInit(
923  *this, OrigVD);
924  EmitStoreOfScalar(V, MakeAddrLValue(VDAddr, (*IRef)->getType(),
926  LocalDeclMap.erase(VD);
927  setAddrOfLocalVar(VD, VDAddr);
928  }
929  IsRegistered = PrivateScope.addPrivate(OrigVD, VDAddr);
930  }
931  assert(IsRegistered &&
932  "firstprivate var already registered as private");
933  // Silence the warning about unused variable.
934  (void)IsRegistered;
935  }
936  ++IRef;
937  ++InitsRef;
938  }
939  }
940  return FirstprivateIsLastprivate && !EmittedAsFirstprivate.empty();
941 }
942 
944  const OMPExecutableDirective &D,
945  CodeGenFunction::OMPPrivateScope &PrivateScope) {
946  if (!HaveInsertPoint())
947  return;
948  llvm::DenseSet<const VarDecl *> EmittedAsPrivate;
949  for (const auto *C : D.getClausesOfKind<OMPPrivateClause>()) {
950  auto IRef = C->varlist_begin();
951  for (const Expr *IInit : C->private_copies()) {
952  const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
953  if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
954  const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
955  EmitDecl(*VD);
956  // Emit private VarDecl with copy init.
957  bool IsRegistered =
958  PrivateScope.addPrivate(OrigVD, GetAddrOfLocalVar(VD));
959  assert(IsRegistered && "private var already registered as private");
960  // Silence the warning about unused variable.
961  (void)IsRegistered;
962  }
963  ++IRef;
964  }
965  }
966 }
967 
969  if (!HaveInsertPoint())
970  return false;
971  // threadprivate_var1 = master_threadprivate_var1;
972  // operator=(threadprivate_var2, master_threadprivate_var2);
973  // ...
974  // __kmpc_barrier(&loc, global_tid);
976  llvm::BasicBlock *CopyBegin = nullptr, *CopyEnd = nullptr;
977  for (const auto *C : D.getClausesOfKind<OMPCopyinClause>()) {
978  auto IRef = C->varlist_begin();
979  auto ISrcRef = C->source_exprs().begin();
980  auto IDestRef = C->destination_exprs().begin();
981  for (const Expr *AssignOp : C->assignment_ops()) {
982  const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
983  QualType Type = VD->getType();
984  if (CopiedVars.insert(VD->getCanonicalDecl()).second) {
985  // Get the address of the master variable. If we are emitting code with
986  // TLS support, the address is passed from the master as field in the
987  // captured declaration.
988  Address MasterAddr = Address::invalid();
989  if (getLangOpts().OpenMPUseTLS &&
990  getContext().getTargetInfo().isTLSSupported()) {
991  assert(CapturedStmtInfo->lookup(VD) &&
992  "Copyin threadprivates should have been captured!");
993  DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(VD), true,
994  (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc());
995  MasterAddr = EmitLValue(&DRE).getAddress(*this);
996  LocalDeclMap.erase(VD);
997  } else {
998  MasterAddr =
999  Address(VD->isStaticLocal() ? CGM.getStaticLocalDeclAddress(VD)
1000  : CGM.GetAddrOfGlobal(VD),
1001  CGM.getTypes().ConvertTypeForMem(VD->getType()),
1002  getContext().getDeclAlign(VD));
1003  }
1004  // Get the address of the threadprivate variable.
1005  Address PrivateAddr = EmitLValue(*IRef).getAddress(*this);
1006  if (CopiedVars.size() == 1) {
1007  // At first check if current thread is a master thread. If it is, no
1008  // need to copy data.
1009  CopyBegin = createBasicBlock("copyin.not.master");
1010  CopyEnd = createBasicBlock("copyin.not.master.end");
1011  // TODO: Avoid ptrtoint conversion.
1012  auto *MasterAddrInt =
1013  Builder.CreatePtrToInt(MasterAddr.getPointer(), CGM.IntPtrTy);
1014  auto *PrivateAddrInt =
1015  Builder.CreatePtrToInt(PrivateAddr.getPointer(), CGM.IntPtrTy);
1016  Builder.CreateCondBr(
1017  Builder.CreateICmpNE(MasterAddrInt, PrivateAddrInt), CopyBegin,
1018  CopyEnd);
1019  EmitBlock(CopyBegin);
1020  }
1021  const auto *SrcVD =
1022  cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl());
1023  const auto *DestVD =
1024  cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
1025  EmitOMPCopy(Type, PrivateAddr, MasterAddr, DestVD, SrcVD, AssignOp);
1026  }
1027  ++IRef;
1028  ++ISrcRef;
1029  ++IDestRef;
1030  }
1031  }
1032  if (CopyEnd) {
1033  // Exit out of copying procedure for non-master thread.
1034  EmitBlock(CopyEnd, /*IsFinished=*/true);
1035  return true;
1036  }
1037  return false;
1038 }
1039 
1041  const OMPExecutableDirective &D, OMPPrivateScope &PrivateScope) {
1042  if (!HaveInsertPoint())
1043  return false;
1044  bool HasAtLeastOneLastprivate = false;
1047  const auto *LoopDirective = cast<OMPLoopDirective>(&D);
1048  for (const Expr *C : LoopDirective->counters()) {
1049  SIMDLCVs.insert(
1050  cast<VarDecl>(cast<DeclRefExpr>(C)->getDecl())->getCanonicalDecl());
1051  }
1052  }
1053  llvm::DenseSet<const VarDecl *> AlreadyEmittedVars;
1054  for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) {
1055  HasAtLeastOneLastprivate = true;
1057  !getLangOpts().OpenMPSimd)
1058  break;
1059  const auto *IRef = C->varlist_begin();
1060  const auto *IDestRef = C->destination_exprs().begin();
1061  for (const Expr *IInit : C->private_copies()) {
1062  // Keep the address of the original variable for future update at the end
1063  // of the loop.
1064  const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
1065  // Taskloops do not require additional initialization, it is done in
1066  // runtime support library.
1067  if (AlreadyEmittedVars.insert(OrigVD->getCanonicalDecl()).second) {
1068  const auto *DestVD =
1069  cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
1070  DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD),
1071  /*RefersToEnclosingVariableOrCapture=*/
1072  CapturedStmtInfo->lookup(OrigVD) != nullptr,
1073  (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc());
1074  PrivateScope.addPrivate(DestVD, EmitLValue(&DRE).getAddress(*this));
1075  // Check if the variable is also a firstprivate: in this case IInit is
1076  // not generated. Initialization of this variable will happen in codegen
1077  // for 'firstprivate' clause.
1078  if (IInit && !SIMDLCVs.count(OrigVD->getCanonicalDecl())) {
1079  const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
1080  Address VDAddr = Address::invalid();
1081  if (C->getKind() == OMPC_LASTPRIVATE_conditional) {
1082  VDAddr = CGM.getOpenMPRuntime().emitLastprivateConditionalInit(
1083  *this, OrigVD);
1084  setAddrOfLocalVar(VD, VDAddr);
1085  } else {
1086  // Emit private VarDecl with copy init.
1087  EmitDecl(*VD);
1088  VDAddr = GetAddrOfLocalVar(VD);
1089  }
1090  bool IsRegistered = PrivateScope.addPrivate(OrigVD, VDAddr);
1091  assert(IsRegistered &&
1092  "lastprivate var already registered as private");
1093  (void)IsRegistered;
1094  }
1095  }
1096  ++IRef;
1097  ++IDestRef;
1098  }
1099  }
1100  return HasAtLeastOneLastprivate;
1101 }
1102 
1104  const OMPExecutableDirective &D, bool NoFinals,
1105  llvm::Value *IsLastIterCond) {
1106  if (!HaveInsertPoint())
1107  return;
1108  // Emit following code:
1109  // if (<IsLastIterCond>) {
1110  // orig_var1 = private_orig_var1;
1111  // ...
1112  // orig_varn = private_orig_varn;
1113  // }
1114  llvm::BasicBlock *ThenBB = nullptr;
1115  llvm::BasicBlock *DoneBB = nullptr;
1116  if (IsLastIterCond) {
1117  // Emit implicit barrier if at least one lastprivate conditional is found
1118  // and this is not a simd mode.
1119  if (!getLangOpts().OpenMPSimd &&
1120  llvm::any_of(D.getClausesOfKind<OMPLastprivateClause>(),
1121  [](const OMPLastprivateClause *C) {
1122  return C->getKind() == OMPC_LASTPRIVATE_conditional;
1123  })) {
1124  CGM.getOpenMPRuntime().emitBarrierCall(*this, D.getBeginLoc(),
1125  OMPD_unknown,
1126  /*EmitChecks=*/false,
1127  /*ForceSimpleCall=*/true);
1128  }
1129  ThenBB = createBasicBlock(".omp.lastprivate.then");
1130  DoneBB = createBasicBlock(".omp.lastprivate.done");
1131  Builder.CreateCondBr(IsLastIterCond, ThenBB, DoneBB);
1132  EmitBlock(ThenBB);
1133  }
1134  llvm::DenseSet<const VarDecl *> AlreadyEmittedVars;
1135  llvm::DenseMap<const VarDecl *, const Expr *> LoopCountersAndUpdates;
1136  if (const auto *LoopDirective = dyn_cast<OMPLoopDirective>(&D)) {
1137  auto IC = LoopDirective->counters().begin();
1138  for (const Expr *F : LoopDirective->finals()) {
1139  const auto *D =
1140  cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl())->getCanonicalDecl();
1141  if (NoFinals)
1142  AlreadyEmittedVars.insert(D);
1143  else
1144  LoopCountersAndUpdates[D] = F;
1145  ++IC;
1146  }
1147  }
1148  for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) {
1149  auto IRef = C->varlist_begin();
1150  auto ISrcRef = C->source_exprs().begin();
1151  auto IDestRef = C->destination_exprs().begin();
1152  for (const Expr *AssignOp : C->assignment_ops()) {
1153  const auto *PrivateVD =
1154  cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
1155  QualType Type = PrivateVD->getType();
1156  const auto *CanonicalVD = PrivateVD->getCanonicalDecl();
1157  if (AlreadyEmittedVars.insert(CanonicalVD).second) {
1158  // If lastprivate variable is a loop control variable for loop-based
1159  // directive, update its value before copyin back to original
1160  // variable.
1161  if (const Expr *FinalExpr = LoopCountersAndUpdates.lookup(CanonicalVD))
1162  EmitIgnoredExpr(FinalExpr);
1163  const auto *SrcVD =
1164  cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl());
1165  const auto *DestVD =
1166  cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
1167  // Get the address of the private variable.
1168  Address PrivateAddr = GetAddrOfLocalVar(PrivateVD);
1169  if (const auto *RefTy = PrivateVD->getType()->getAs<ReferenceType>())
1170  PrivateAddr = Address(
1171  Builder.CreateLoad(PrivateAddr),
1172  CGM.getTypes().ConvertTypeForMem(RefTy->getPointeeType()),
1173  CGM.getNaturalTypeAlignment(RefTy->getPointeeType()));
1174  // Store the last value to the private copy in the last iteration.
1175  if (C->getKind() == OMPC_LASTPRIVATE_conditional)
1176  CGM.getOpenMPRuntime().emitLastprivateConditionalFinalUpdate(
1177  *this, MakeAddrLValue(PrivateAddr, (*IRef)->getType()), PrivateVD,
1178  (*IRef)->getExprLoc());
1179  // Get the address of the original variable.
1180  Address OriginalAddr = GetAddrOfLocalVar(DestVD);
1181  EmitOMPCopy(Type, OriginalAddr, PrivateAddr, DestVD, SrcVD, AssignOp);
1182  }
1183  ++IRef;
1184  ++ISrcRef;
1185  ++IDestRef;
1186  }
1187  if (const Expr *PostUpdate = C->getPostUpdateExpr())
1188  EmitIgnoredExpr(PostUpdate);
1189  }
1190  if (IsLastIterCond)
1191  EmitBlock(DoneBB, /*IsFinished=*/true);
1192 }
1193 
1195  const OMPExecutableDirective &D,
1196  CodeGenFunction::OMPPrivateScope &PrivateScope, bool ForInscan) {
1197  if (!HaveInsertPoint())
1198  return;
1201  SmallVector<const Expr *, 4> ReductionOps;
1204  OMPTaskDataTy Data;
1207  for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
1208  if (ForInscan != (C->getModifier() == OMPC_REDUCTION_inscan))
1209  continue;
1210  Shareds.append(C->varlist_begin(), C->varlist_end());
1211  Privates.append(C->privates().begin(), C->privates().end());
1212  ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end());
1213  LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
1214  RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
1215  if (C->getModifier() == OMPC_REDUCTION_task) {
1216  Data.ReductionVars.append(C->privates().begin(), C->privates().end());
1217  Data.ReductionOrigs.append(C->varlist_begin(), C->varlist_end());
1218  Data.ReductionCopies.append(C->privates().begin(), C->privates().end());
1219  Data.ReductionOps.append(C->reduction_ops().begin(),
1220  C->reduction_ops().end());
1221  TaskLHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
1222  TaskRHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
1223  }
1224  }
1225  ReductionCodeGen RedCG(Shareds, Shareds, Privates, ReductionOps);
1226  unsigned Count = 0;
1227  auto *ILHS = LHSs.begin();
1228  auto *IRHS = RHSs.begin();
1229  auto *IPriv = Privates.begin();
1230  for (const Expr *IRef : Shareds) {
1231  const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*IPriv)->getDecl());
1232  // Emit private VarDecl with reduction init.
1233  RedCG.emitSharedOrigLValue(*this, Count);
1234  RedCG.emitAggregateType(*this, Count);
1235  AutoVarEmission Emission = EmitAutoVarAlloca(*PrivateVD);
1236  RedCG.emitInitialization(*this, Count, Emission.getAllocatedAddress(),
1237  RedCG.getSharedLValue(Count).getAddress(*this),
1238  [&Emission](CodeGenFunction &CGF) {
1239  CGF.EmitAutoVarInit(Emission);
1240  return true;
1241  });
1242  EmitAutoVarCleanups(Emission);
1243  Address BaseAddr = RedCG.adjustPrivateAddress(
1244  *this, Count, Emission.getAllocatedAddress());
1245  bool IsRegistered =
1246  PrivateScope.addPrivate(RedCG.getBaseDecl(Count), BaseAddr);
1247  assert(IsRegistered && "private var already registered as private");
1248  // Silence the warning about unused variable.
1249  (void)IsRegistered;
1250 
1251  const auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
1252  const auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
1253  QualType Type = PrivateVD->getType();
1254  bool isaOMPArraySectionExpr = isa<OMPArraySectionExpr>(IRef);
1255  if (isaOMPArraySectionExpr && Type->isVariablyModifiedType()) {
1256  // Store the address of the original variable associated with the LHS
1257  // implicit variable.
1258  PrivateScope.addPrivate(LHSVD,
1259  RedCG.getSharedLValue(Count).getAddress(*this));
1260  PrivateScope.addPrivate(RHSVD, GetAddrOfLocalVar(PrivateVD));
1261  } else if ((isaOMPArraySectionExpr && Type->isScalarType()) ||
1262  isa<ArraySubscriptExpr>(IRef)) {
1263  // Store the address of the original variable associated with the LHS
1264  // implicit variable.
1265  PrivateScope.addPrivate(LHSVD,
1266  RedCG.getSharedLValue(Count).getAddress(*this));
1267  PrivateScope.addPrivate(RHSVD, Builder.CreateElementBitCast(
1268  GetAddrOfLocalVar(PrivateVD),
1269  ConvertTypeForMem(RHSVD->getType()),
1270  "rhs.begin"));
1271  } else {
1272  QualType Type = PrivateVD->getType();
1273  bool IsArray = getContext().getAsArrayType(Type) != nullptr;
1274  Address OriginalAddr = RedCG.getSharedLValue(Count).getAddress(*this);
1275  // Store the address of the original variable associated with the LHS
1276  // implicit variable.
1277  if (IsArray) {
1278  OriginalAddr = Builder.CreateElementBitCast(
1279  OriginalAddr, ConvertTypeForMem(LHSVD->getType()), "lhs.begin");
1280  }
1281  PrivateScope.addPrivate(LHSVD, OriginalAddr);
1282  PrivateScope.addPrivate(
1283  RHSVD, IsArray ? Builder.CreateElementBitCast(
1284  GetAddrOfLocalVar(PrivateVD),
1285  ConvertTypeForMem(RHSVD->getType()), "rhs.begin")
1286  : GetAddrOfLocalVar(PrivateVD));
1287  }
1288  ++ILHS;
1289  ++IRHS;
1290  ++IPriv;
1291  ++Count;
1292  }
1293  if (!Data.ReductionVars.empty()) {
1294  Data.IsReductionWithTaskMod = true;
1295  Data.IsWorksharingReduction =
1297  llvm::Value *ReductionDesc = CGM.getOpenMPRuntime().emitTaskReductionInit(
1298  *this, D.getBeginLoc(), TaskLHSs, TaskRHSs, Data);
1299  const Expr *TaskRedRef = nullptr;
1300  switch (D.getDirectiveKind()) {
1301  case OMPD_parallel:
1302  TaskRedRef = cast<OMPParallelDirective>(D).getTaskReductionRefExpr();
1303  break;
1304  case OMPD_for:
1305  TaskRedRef = cast<OMPForDirective>(D).getTaskReductionRefExpr();
1306  break;
1307  case OMPD_sections:
1308  TaskRedRef = cast<OMPSectionsDirective>(D).getTaskReductionRefExpr();
1309  break;
1310  case OMPD_parallel_for:
1311  TaskRedRef = cast<OMPParallelForDirective>(D).getTaskReductionRefExpr();
1312  break;
1313  case OMPD_parallel_master:
1314  TaskRedRef =
1315  cast<OMPParallelMasterDirective>(D).getTaskReductionRefExpr();
1316  break;
1317  case OMPD_parallel_sections:
1318  TaskRedRef =
1319  cast<OMPParallelSectionsDirective>(D).getTaskReductionRefExpr();
1320  break;
1321  case OMPD_target_parallel:
1322  TaskRedRef =
1323  cast<OMPTargetParallelDirective>(D).getTaskReductionRefExpr();
1324  break;
1325  case OMPD_target_parallel_for:
1326  TaskRedRef =
1327  cast<OMPTargetParallelForDirective>(D).getTaskReductionRefExpr();
1328  break;
1329  case OMPD_distribute_parallel_for:
1330  TaskRedRef =
1331  cast<OMPDistributeParallelForDirective>(D).getTaskReductionRefExpr();
1332  break;
1333  case OMPD_teams_distribute_parallel_for:
1334  TaskRedRef = cast<OMPTeamsDistributeParallelForDirective>(D)
1335  .getTaskReductionRefExpr();
1336  break;
1337  case OMPD_target_teams_distribute_parallel_for:
1338  TaskRedRef = cast<OMPTargetTeamsDistributeParallelForDirective>(D)
1339  .getTaskReductionRefExpr();
1340  break;
1341  case OMPD_simd:
1342  case OMPD_for_simd:
1343  case OMPD_section:
1344  case OMPD_single:
1345  case OMPD_master:
1346  case OMPD_critical:
1347  case OMPD_parallel_for_simd:
1348  case OMPD_task:
1349  case OMPD_taskyield:
1350  case OMPD_barrier:
1351  case OMPD_taskwait:
1352  case OMPD_taskgroup:
1353  case OMPD_flush:
1354  case OMPD_depobj:
1355  case OMPD_scan:
1356  case OMPD_ordered:
1357  case OMPD_atomic:
1358  case OMPD_teams:
1359  case OMPD_target:
1360  case OMPD_cancellation_point:
1361  case OMPD_cancel:
1362  case OMPD_target_data:
1363  case OMPD_target_enter_data:
1364  case OMPD_target_exit_data:
1365  case OMPD_taskloop:
1366  case OMPD_taskloop_simd:
1367  case OMPD_master_taskloop:
1368  case OMPD_master_taskloop_simd:
1369  case OMPD_parallel_master_taskloop:
1370  case OMPD_parallel_master_taskloop_simd:
1371  case OMPD_distribute:
1372  case OMPD_target_update:
1373  case OMPD_distribute_parallel_for_simd:
1374  case OMPD_distribute_simd:
1375  case OMPD_target_parallel_for_simd:
1376  case OMPD_target_simd:
1377  case OMPD_teams_distribute:
1378  case OMPD_teams_distribute_simd:
1379  case OMPD_teams_distribute_parallel_for_simd:
1380  case OMPD_target_teams:
1381  case OMPD_target_teams_distribute:
1382  case OMPD_target_teams_distribute_parallel_for_simd:
1383  case OMPD_target_teams_distribute_simd:
1384  case OMPD_declare_target:
1385  case OMPD_end_declare_target:
1386  case OMPD_threadprivate:
1387  case OMPD_allocate:
1388  case OMPD_declare_reduction:
1389  case OMPD_declare_mapper:
1390  case OMPD_declare_simd:
1391  case OMPD_requires:
1392  case OMPD_declare_variant:
1393  case OMPD_begin_declare_variant:
1394  case OMPD_end_declare_variant:
1395  case OMPD_unknown:
1396  default:
1397  llvm_unreachable("Enexpected directive with task reductions.");
1398  }
1399 
1400  const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(TaskRedRef)->getDecl());
1401  EmitVarDecl(*VD);
1402  EmitStoreOfScalar(ReductionDesc, GetAddrOfLocalVar(VD),
1403  /*Volatile=*/false, TaskRedRef->getType());
1404  }
1405 }
1406 
1408  const OMPExecutableDirective &D, const OpenMPDirectiveKind ReductionKind) {
1409  if (!HaveInsertPoint())
1410  return;
1415  bool HasAtLeastOneReduction = false;
1416  bool IsReductionWithTaskMod = false;
1417  for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
1418  // Do not emit for inscan reductions.
1419  if (C->getModifier() == OMPC_REDUCTION_inscan)
1420  continue;
1421  HasAtLeastOneReduction = true;
1422  Privates.append(C->privates().begin(), C->privates().end());
1423  LHSExprs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
1424  RHSExprs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
1425  ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end());
1426  IsReductionWithTaskMod =
1427  IsReductionWithTaskMod || C->getModifier() == OMPC_REDUCTION_task;
1428  }
1429  if (HasAtLeastOneReduction) {
1430  if (IsReductionWithTaskMod) {
1431  CGM.getOpenMPRuntime().emitTaskReductionFini(
1432  *this, D.getBeginLoc(),
1434  }
1435  bool WithNowait = D.getSingleClause<OMPNowaitClause>() ||
1437  ReductionKind == OMPD_simd;
1438  bool SimpleReduction = ReductionKind == OMPD_simd;
1439  // Emit nowait reduction if nowait clause is present or directive is a
1440  // parallel directive (it always has implicit barrier).
1441  CGM.getOpenMPRuntime().emitReduction(
1442  *this, D.getEndLoc(), Privates, LHSExprs, RHSExprs, ReductionOps,
1443  {WithNowait, SimpleReduction, ReductionKind});
1444  }
1445 }
1446 
1448  CodeGenFunction &CGF, const OMPExecutableDirective &D,
1449  const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen) {
1450  if (!CGF.HaveInsertPoint())
1451  return;
1452  llvm::BasicBlock *DoneBB = nullptr;
1453  for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
1454  if (const Expr *PostUpdate = C->getPostUpdateExpr()) {
1455  if (!DoneBB) {
1456  if (llvm::Value *Cond = CondGen(CGF)) {
1457  // If the first post-update expression is found, emit conditional
1458  // block if it was requested.
1459  llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".omp.reduction.pu");
1460  DoneBB = CGF.createBasicBlock(".omp.reduction.pu.done");
1461  CGF.Builder.CreateCondBr(Cond, ThenBB, DoneBB);
1462  CGF.EmitBlock(ThenBB);
1463  }
1464  }
1465  CGF.EmitIgnoredExpr(PostUpdate);
1466  }
1467  }
1468  if (DoneBB)
1469  CGF.EmitBlock(DoneBB, /*IsFinished=*/true);
1470 }
1471 
1472 namespace {
1473 /// Codegen lambda for appending distribute lower and upper bounds to outlined
1474 /// parallel function. This is necessary for combined constructs such as
1475 /// 'distribute parallel for'
1476 typedef llvm::function_ref<void(CodeGenFunction &,
1477  const OMPExecutableDirective &,
1479  CodeGenBoundParametersTy;
1480 } // anonymous namespace
1481 
1482 static void
1484  const OMPExecutableDirective &S) {
1485  if (CGF.getLangOpts().OpenMP < 50)
1486  return;
1488  for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) {
1489  for (const Expr *Ref : C->varlists()) {
1490  if (!Ref->getType()->isScalarType())
1491  continue;
1492  const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
1493  if (!DRE)
1494  continue;
1495  PrivateDecls.insert(cast<VarDecl>(DRE->getDecl()));
1497  }
1498  }
1499  for (const auto *C : S.getClausesOfKind<OMPLastprivateClause>()) {
1500  for (const Expr *Ref : C->varlists()) {
1501  if (!Ref->getType()->isScalarType())
1502  continue;
1503  const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
1504  if (!DRE)
1505  continue;
1506  PrivateDecls.insert(cast<VarDecl>(DRE->getDecl()));
1508  }
1509  }
1510  for (const auto *C : S.getClausesOfKind<OMPLinearClause>()) {
1511  for (const Expr *Ref : C->varlists()) {
1512  if (!Ref->getType()->isScalarType())
1513  continue;
1514  const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
1515  if (!DRE)
1516  continue;
1517  PrivateDecls.insert(cast<VarDecl>(DRE->getDecl()));
1519  }
1520  }
1521  // Privates should ne analyzed since they are not captured at all.
1522  // Task reductions may be skipped - tasks are ignored.
1523  // Firstprivates do not return value but may be passed by reference - no need
1524  // to check for updated lastprivate conditional.
1525  for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) {
1526  for (const Expr *Ref : C->varlists()) {
1527  if (!Ref->getType()->isScalarType())
1528  continue;
1529  const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
1530  if (!DRE)
1531  continue;
1532  PrivateDecls.insert(cast<VarDecl>(DRE->getDecl()));
1533  }
1534  }
1536  CGF, S, PrivateDecls);
1537 }
1538 
1540  CodeGenFunction &CGF, const OMPExecutableDirective &S,
1541  OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
1542  const CodeGenBoundParametersTy &CodeGenBoundParameters) {
1543  const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel);
1544  llvm::Value *NumThreads = nullptr;
1545  llvm::Function *OutlinedFn =
1547  S, *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen);
1548  if (const auto *NumThreadsClause = S.getSingleClause<OMPNumThreadsClause>()) {
1549  CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF);
1550  NumThreads = CGF.EmitScalarExpr(NumThreadsClause->getNumThreads(),
1551  /*IgnoreResultAssign=*/true);
1553  CGF, NumThreads, NumThreadsClause->getBeginLoc());
1554  }
1555  if (const auto *ProcBindClause = S.getSingleClause<OMPProcBindClause>()) {
1556  CodeGenFunction::RunCleanupsScope ProcBindScope(CGF);
1558  CGF, ProcBindClause->getProcBindKind(), ProcBindClause->getBeginLoc());
1559  }
1560  const Expr *IfCond = nullptr;
1561  for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
1562  if (C->getNameModifier() == OMPD_unknown ||
1563  C->getNameModifier() == OMPD_parallel) {
1564  IfCond = C->getCondition();
1565  break;
1566  }
1567  }
1568 
1569  OMPParallelScope Scope(CGF, S);
1571  // Combining 'distribute' with 'for' requires sharing each 'distribute' chunk
1572  // lower and upper bounds with the pragma 'for' chunking mechanism.
1573  // The following lambda takes care of appending the lower and upper bound
1574  // parameters when necessary
1575  CodeGenBoundParameters(CGF, S, CapturedVars);
1576  CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars);
1577  CGF.CGM.getOpenMPRuntime().emitParallelCall(CGF, S.getBeginLoc(), OutlinedFn,
1578  CapturedVars, IfCond, NumThreads);
1579 }
1580 
1581 static bool isAllocatableDecl(const VarDecl *VD) {
1582  const VarDecl *CVD = VD->getCanonicalDecl();
1583  if (!CVD->hasAttr<OMPAllocateDeclAttr>())
1584  return false;
1585  const auto *AA = CVD->getAttr<OMPAllocateDeclAttr>();
1586  // Use the default allocation.
1587  return !((AA->getAllocatorType() == OMPAllocateDeclAttr::OMPDefaultMemAlloc ||
1588  AA->getAllocatorType() == OMPAllocateDeclAttr::OMPNullMemAlloc) &&
1589  !AA->getAllocator());
1590 }
1591 
1593  const OMPExecutableDirective &,
1595 
1597  CodeGenFunction &CGF, const VarDecl *VD) {
1598  CodeGenModule &CGM = CGF.CGM;
1599  auto &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
1600 
1601  if (!VD)
1602  return Address::invalid();
1603  const VarDecl *CVD = VD->getCanonicalDecl();
1604  if (!isAllocatableDecl(CVD))
1605  return Address::invalid();
1606  llvm::Value *Size;
1607  CharUnits Align = CGM.getContext().getDeclAlign(CVD);
1608  if (CVD->getType()->isVariablyModifiedType()) {
1609  Size = CGF.getTypeSize(CVD->getType());
1610  // Align the size: ((size + align - 1) / align) * align
1611  Size = CGF.Builder.CreateNUWAdd(
1612  Size, CGM.getSize(Align - CharUnits::fromQuantity(1)));
1613  Size = CGF.Builder.CreateUDiv(Size, CGM.getSize(Align));
1614  Size = CGF.Builder.CreateNUWMul(Size, CGM.getSize(Align));
1615  } else {
1616  CharUnits Sz = CGM.getContext().getTypeSizeInChars(CVD->getType());
1617  Size = CGM.getSize(Sz.alignTo(Align));
1618  }
1619 
1620  const auto *AA = CVD->getAttr<OMPAllocateDeclAttr>();
1621  assert(AA->getAllocator() &&
1622  "Expected allocator expression for non-default allocator.");
1623  llvm::Value *Allocator = CGF.EmitScalarExpr(AA->getAllocator());
1624  // According to the standard, the original allocator type is a enum (integer).
1625  // Convert to pointer type, if required.
1626  if (Allocator->getType()->isIntegerTy())
1627  Allocator = CGF.Builder.CreateIntToPtr(Allocator, CGM.VoidPtrTy);
1628  else if (Allocator->getType()->isPointerTy())
1629  Allocator = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Allocator,
1630  CGM.VoidPtrTy);
1631 
1632  llvm::Value *Addr = OMPBuilder.createOMPAlloc(
1633  CGF.Builder, Size, Allocator,
1634  getNameWithSeparators({CVD->getName(), ".void.addr"}, ".", "."));
1635  llvm::CallInst *FreeCI =
1636  OMPBuilder.createOMPFree(CGF.Builder, Addr, Allocator);
1637 
1638  CGF.EHStack.pushCleanup<OMPAllocateCleanupTy>(NormalAndEHCleanup, FreeCI);
1640  Addr,
1642  getNameWithSeparators({CVD->getName(), ".addr"}, ".", "."));
1643  return Address(Addr, CGF.ConvertTypeForMem(CVD->getType()), Align);
1644 }
1645 
1647  CodeGenFunction &CGF, const VarDecl *VD, Address VDAddr,
1648  SourceLocation Loc) {
1649  CodeGenModule &CGM = CGF.CGM;
1650  if (CGM.getLangOpts().OpenMPUseTLS &&
1652  return VDAddr;
1653 
1654  llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
1655 
1656  llvm::Type *VarTy = VDAddr.getElementType();
1657  llvm::Value *Data =
1658  CGF.Builder.CreatePointerCast(VDAddr.getPointer(), CGM.Int8PtrTy);
1659  llvm::ConstantInt *Size = CGM.getSize(CGM.GetTargetTypeStoreSize(VarTy));
1660  std::string Suffix = getNameWithSeparators({"cache", ""});
1661  llvm::Twine CacheName = Twine(CGM.getMangledName(VD)).concat(Suffix);
1662 
1663  llvm::CallInst *ThreadPrivateCacheCall =
1664  OMPBuilder.createCachedThreadPrivate(CGF.Builder, Data, Size, CacheName);
1665 
1666  return Address(ThreadPrivateCacheCall, CGM.Int8Ty, VDAddr.getAlignment());
1667 }
1668 
1670  ArrayRef<StringRef> Parts, StringRef FirstSeparator, StringRef Separator) {
1671  SmallString<128> Buffer;
1672  llvm::raw_svector_ostream OS(Buffer);
1673  StringRef Sep = FirstSeparator;
1674  for (StringRef Part : Parts) {
1675  OS << Sep << Part;
1676  Sep = Separator;
1677  }
1678  return OS.str().str();
1679 }
1680 
1682  CodeGenFunction &CGF, const Stmt *RegionBodyStmt, InsertPointTy AllocaIP,
1683  InsertPointTy CodeGenIP, Twine RegionName) {
1684  CGBuilderTy &Builder = CGF.Builder;
1685  Builder.restoreIP(CodeGenIP);
1686  llvm::BasicBlock *FiniBB = splitBBWithSuffix(Builder, /*CreateBranch=*/false,
1687  "." + RegionName + ".after");
1688 
1689  {
1690  OMPBuilderCBHelpers::InlinedRegionBodyRAII IRB(CGF, AllocaIP, *FiniBB);
1691  CGF.EmitStmt(RegionBodyStmt);
1692  }
1693 
1694  if (Builder.saveIP().isSet())
1695  Builder.CreateBr(FiniBB);
1696 }
1697 
1699  CodeGenFunction &CGF, const Stmt *RegionBodyStmt, InsertPointTy AllocaIP,
1700  InsertPointTy CodeGenIP, Twine RegionName) {
1701  CGBuilderTy &Builder = CGF.Builder;
1702  Builder.restoreIP(CodeGenIP);
1703  llvm::BasicBlock *FiniBB = splitBBWithSuffix(Builder, /*CreateBranch=*/false,
1704  "." + RegionName + ".after");
1705 
1706  {
1707  OMPBuilderCBHelpers::OutlinedRegionBodyRAII IRB(CGF, AllocaIP, *FiniBB);
1708  CGF.EmitStmt(RegionBodyStmt);
1709  }
1710 
1711  if (Builder.saveIP().isSet())
1712  Builder.CreateBr(FiniBB);
1713 }
1714 
1716  if (CGM.getLangOpts().OpenMPIRBuilder) {
1717  llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
1718  // Check if we have any if clause associated with the directive.
1719  llvm::Value *IfCond = nullptr;
1720  if (const auto *C = S.getSingleClause<OMPIfClause>())
1721  IfCond = EmitScalarExpr(C->getCondition(),
1722  /*IgnoreResultAssign=*/true);
1723 
1724  llvm::Value *NumThreads = nullptr;
1725  if (const auto *NumThreadsClause = S.getSingleClause<OMPNumThreadsClause>())
1726  NumThreads = EmitScalarExpr(NumThreadsClause->getNumThreads(),
1727  /*IgnoreResultAssign=*/true);
1728 
1729  ProcBindKind ProcBind = OMP_PROC_BIND_default;
1730  if (const auto *ProcBindClause = S.getSingleClause<OMPProcBindClause>())
1731  ProcBind = ProcBindClause->getProcBindKind();
1732 
1733  using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
1734 
1735  // The cleanup callback that finalizes all variabels at the given location,
1736  // thus calls destructors etc.
1737  auto FiniCB = [this](InsertPointTy IP) {
1738  OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP);
1739  };
1740 
1741  // Privatization callback that performs appropriate action for
1742  // shared/private/firstprivate/lastprivate/copyin/... variables.
1743  //
1744  // TODO: This defaults to shared right now.
1745  auto PrivCB = [](InsertPointTy AllocaIP, InsertPointTy CodeGenIP,
1746  llvm::Value &, llvm::Value &Val, llvm::Value *&ReplVal) {
1747  // The next line is appropriate only for variables (Val) with the
1748  // data-sharing attribute "shared".
1749  ReplVal = &Val;
1750 
1751  return CodeGenIP;
1752  };
1753 
1754  const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel);
1755  const Stmt *ParallelRegionBodyStmt = CS->getCapturedStmt();
1756 
1757  auto BodyGenCB = [&, this](InsertPointTy AllocaIP,
1758  InsertPointTy CodeGenIP) {
1759  OMPBuilderCBHelpers::EmitOMPOutlinedRegionBody(
1760  *this, ParallelRegionBodyStmt, AllocaIP, CodeGenIP, "parallel");
1761  };
1762 
1763  CGCapturedStmtInfo CGSI(*CS, CR_OpenMP);
1764  CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(*this, &CGSI);
1765  llvm::OpenMPIRBuilder::InsertPointTy AllocaIP(
1766  AllocaInsertPt->getParent(), AllocaInsertPt->getIterator());
1767  Builder.restoreIP(
1768  OMPBuilder.createParallel(Builder, AllocaIP, BodyGenCB, PrivCB, FiniCB,
1769  IfCond, NumThreads, ProcBind, S.hasCancel()));
1770  return;
1771  }
1772 
1773  // Emit parallel region as a standalone region.
1774  auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
1775  Action.Enter(CGF);
1776  OMPPrivateScope PrivateScope(CGF);
1777  bool Copyins = CGF.EmitOMPCopyinClause(S);
1778  (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope);
1779  if (Copyins) {
1780  // Emit implicit barrier to synchronize threads and avoid data races on
1781  // propagation master's thread values of threadprivate variables to local
1782  // instances of that variables of all other implicit threads.
1784  CGF, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false,
1785  /*ForceSimpleCall=*/true);
1786  }
1787  CGF.EmitOMPPrivateClause(S, PrivateScope);
1788  CGF.EmitOMPReductionClauseInit(S, PrivateScope);
1789  (void)PrivateScope.Privatize();
1790  CGF.EmitStmt(S.getCapturedStmt(OMPD_parallel)->getCapturedStmt());
1791  CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel);
1792  };
1793  {
1794  auto LPCRegion =
1796  emitCommonOMPParallelDirective(*this, S, OMPD_parallel, CodeGen,
1799  [](CodeGenFunction &) { return nullptr; });
1800  }
1801  // Check for outer lastprivate conditional update.
1803 }
1804 
1806  EmitStmt(S.getIfStmt());
1807 }
1808 
1809 namespace {
1810 /// RAII to handle scopes for loop transformation directives.
1811 class OMPTransformDirectiveScopeRAII {
1812  OMPLoopScope *Scope = nullptr;
1813  CodeGenFunction::CGCapturedStmtInfo *CGSI = nullptr;
1814  CodeGenFunction::CGCapturedStmtRAII *CapInfoRAII = nullptr;
1815 
1816 public:
1817  OMPTransformDirectiveScopeRAII(CodeGenFunction &CGF, const Stmt *S) {
1818  if (const auto *Dir = dyn_cast<OMPLoopBasedDirective>(S)) {
1819  Scope = new OMPLoopScope(CGF, *Dir);
1821  CapInfoRAII = new CodeGenFunction::CGCapturedStmtRAII(CGF, CGSI);
1822  }
1823  }
1824  ~OMPTransformDirectiveScopeRAII() {
1825  if (!Scope)
1826  return;
1827  delete CapInfoRAII;
1828  delete CGSI;
1829  delete Scope;
1830  }
1831 };
1832 } // namespace
1833 
1834 static void emitBody(CodeGenFunction &CGF, const Stmt *S, const Stmt *NextLoop,
1835  int MaxLevel, int Level = 0) {
1836  assert(Level < MaxLevel && "Too deep lookup during loop body codegen.");
1837  const Stmt *SimplifiedS = S->IgnoreContainers();
1838  if (const auto *CS = dyn_cast<CompoundStmt>(SimplifiedS)) {
1839  PrettyStackTraceLoc CrashInfo(
1840  CGF.getContext().getSourceManager(), CS->getLBracLoc(),
1841  "LLVM IR generation of compound statement ('{}')");
1842 
1843  // Keep track of the current cleanup stack depth, including debug scopes.
1844  CodeGenFunction::LexicalScope Scope(CGF, S->getSourceRange());
1845  for (const Stmt *CurStmt : CS->body())
1846  emitBody(CGF, CurStmt, NextLoop, MaxLevel, Level);
1847  return;
1848  }
1849  if (SimplifiedS == NextLoop) {
1850  if (auto *Dir = dyn_cast<OMPLoopTransformationDirective>(SimplifiedS))
1851  SimplifiedS = Dir->getTransformedStmt();
1852  if (const auto *CanonLoop = dyn_cast<OMPCanonicalLoop>(SimplifiedS))
1853  SimplifiedS = CanonLoop->getLoopStmt();
1854  if (const auto *For = dyn_cast<ForStmt>(SimplifiedS)) {
1855  S = For->getBody();
1856  } else {
1857  assert(isa<CXXForRangeStmt>(SimplifiedS) &&
1858  "Expected canonical for loop or range-based for loop.");
1859  const auto *CXXFor = cast<CXXForRangeStmt>(SimplifiedS);
1860  CGF.EmitStmt(CXXFor->getLoopVarStmt());
1861  S = CXXFor->getBody();
1862  }
1863  if (Level + 1 < MaxLevel) {
1865  S, /*TryImperfectlyNestedLoops=*/true);
1866  emitBody(CGF, S, NextLoop, MaxLevel, Level + 1);
1867  return;
1868  }
1869  }
1870  CGF.EmitStmt(S);
1871 }
1872 
1874  JumpDest LoopExit) {
1875  RunCleanupsScope BodyScope(*this);
1876  // Update counters values on current iteration.
1877  for (const Expr *UE : D.updates())
1878  EmitIgnoredExpr(UE);
1879  // Update the linear variables.
1880  // In distribute directives only loop counters may be marked as linear, no
1881  // need to generate the code for them.
1883  for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
1884  for (const Expr *UE : C->updates())
1885  EmitIgnoredExpr(UE);
1886  }
1887  }
1888 
1889  // On a continue in the body, jump to the end.
1890  JumpDest Continue = getJumpDestInCurrentScope("omp.body.continue");
1891  BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1892  for (const Expr *E : D.finals_conditions()) {
1893  if (!E)
1894  continue;
1895  // Check that loop counter in non-rectangular nest fits into the iteration
1896  // space.
1897  llvm::BasicBlock *NextBB = createBasicBlock("omp.body.next");
1898  EmitBranchOnBoolExpr(E, NextBB, Continue.getBlock(),
1899  getProfileCount(D.getBody()));
1900  EmitBlock(NextBB);
1901  }
1902 
1903  OMPPrivateScope InscanScope(*this);
1904  EmitOMPReductionClauseInit(D, InscanScope, /*ForInscan=*/true);
1905  bool IsInscanRegion = InscanScope.Privatize();
1906  if (IsInscanRegion) {
1907  // Need to remember the block before and after scan directive
1908  // to dispatch them correctly depending on the clause used in
1909  // this directive, inclusive or exclusive. For inclusive scan the natural
1910  // order of the blocks is used, for exclusive clause the blocks must be
1911  // executed in reverse order.
1912  OMPBeforeScanBlock = createBasicBlock("omp.before.scan.bb");
1913  OMPAfterScanBlock = createBasicBlock("omp.after.scan.bb");
1914  // No need to allocate inscan exit block, in simd mode it is selected in the
1915  // codegen for the scan directive.
1916  if (D.getDirectiveKind() != OMPD_simd && !getLangOpts().OpenMPSimd)
1917  OMPScanExitBlock = createBasicBlock("omp.exit.inscan.bb");
1918  OMPScanDispatch = createBasicBlock("omp.inscan.dispatch");
1919  EmitBranch(OMPScanDispatch);
1920  EmitBlock(OMPBeforeScanBlock);
1921  }
1922 
1923  // Emit loop variables for C++ range loops.
1924  const Stmt *Body =
1926  // Emit loop body.
1927  emitBody(*this, Body,
1929  Body, /*TryImperfectlyNestedLoops=*/true),
1930  D.getLoopsNumber());
1931 
1932  // Jump to the dispatcher at the end of the loop body.
1933  if (IsInscanRegion)
1934  EmitBranch(OMPScanExitBlock);
1935 
1936  // The end (updates/cleanups).
1937  EmitBlock(Continue.getBlock());
1938  BreakContinueStack.pop_back();
1939 }
1940 
1941 using EmittedClosureTy = std::pair<llvm::Function *, llvm::Value *>;
1942 
1943 /// Emit a captured statement and return the function as well as its captured
1944 /// closure context.
1946  const CapturedStmt *S) {
1947  LValue CapStruct = ParentCGF.InitCapturedStruct(*S);
1948  CodeGenFunction CGF(ParentCGF.CGM, /*suppressNewContext=*/true);
1949  std::unique_ptr<CodeGenFunction::CGCapturedStmtInfo> CSI =
1950  std::make_unique<CodeGenFunction::CGCapturedStmtInfo>(*S);
1951  CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, CSI.get());
1952  llvm::Function *F = CGF.GenerateCapturedStmtFunction(*S);
1953 
1954  return {F, CapStruct.getPointer(ParentCGF)};
1955 }
1956 
1957 /// Emit a call to a previously captured closure.
1958 static llvm::CallInst *
1961  // Append the closure context to the argument.
1962  SmallVector<llvm::Value *> EffectiveArgs;
1963  EffectiveArgs.reserve(Args.size() + 1);
1964  llvm::append_range(EffectiveArgs, Args);
1965  EffectiveArgs.push_back(Cap.second);
1966 
1967  return ParentCGF.Builder.CreateCall(Cap.first, EffectiveArgs);
1968 }
1969 
1970 llvm::CanonicalLoopInfo *
1972  assert(Depth == 1 && "Nested loops with OpenMPIRBuilder not yet implemented");
1973 
1974  // The caller is processing the loop-associated directive processing the \p
1975  // Depth loops nested in \p S. Put the previous pending loop-associated
1976  // directive to the stack. If the current loop-associated directive is a loop
1977  // transformation directive, it will push its generated loops onto the stack
1978  // such that together with the loops left here they form the combined loop
1979  // nest for the parent loop-associated directive.
1980  int ParentExpectedOMPLoopDepth = ExpectedOMPLoopDepth;
1981  ExpectedOMPLoopDepth = Depth;
1982 
1983  EmitStmt(S);
1984  assert(OMPLoopNestStack.size() >= (size_t)Depth && "Found too few loops");
1985 
1986  // The last added loop is the outermost one.
1987  llvm::CanonicalLoopInfo *Result = OMPLoopNestStack.back();
1988 
1989  // Pop the \p Depth loops requested by the call from that stack and restore
1990  // the previous context.
1991  OMPLoopNestStack.pop_back_n(Depth);
1992  ExpectedOMPLoopDepth = ParentExpectedOMPLoopDepth;
1993 
1994  return Result;
1995 }
1996 
1998  const Stmt *SyntacticalLoop = S->getLoopStmt();
1999  if (!getLangOpts().OpenMPIRBuilder) {
2000  // Ignore if OpenMPIRBuilder is not enabled.
2001  EmitStmt(SyntacticalLoop);
2002  return;
2003  }
2004 
2005  LexicalScope ForScope(*this, S->getSourceRange());
2006 
2007  // Emit init statements. The Distance/LoopVar funcs may reference variable
2008  // declarations they contain.
2009  const Stmt *BodyStmt;
2010  if (const auto *For = dyn_cast<ForStmt>(SyntacticalLoop)) {
2011  if (const Stmt *InitStmt = For->getInit())
2012  EmitStmt(InitStmt);
2013  BodyStmt = For->getBody();
2014  } else if (const auto *RangeFor =
2015  dyn_cast<CXXForRangeStmt>(SyntacticalLoop)) {
2016  if (const DeclStmt *RangeStmt = RangeFor->getRangeStmt())
2017  EmitStmt(RangeStmt);
2018  if (const DeclStmt *BeginStmt = RangeFor->getBeginStmt())
2019  EmitStmt(BeginStmt);
2020  if (const DeclStmt *EndStmt = RangeFor->getEndStmt())
2021  EmitStmt(EndStmt);
2022  if (const DeclStmt *LoopVarStmt = RangeFor->getLoopVarStmt())
2023  EmitStmt(LoopVarStmt);
2024  BodyStmt = RangeFor->getBody();
2025  } else
2026  llvm_unreachable("Expected for-stmt or range-based for-stmt");
2027 
2028  // Emit closure for later use. By-value captures will be captured here.
2029  const CapturedStmt *DistanceFunc = S->getDistanceFunc();
2030  EmittedClosureTy DistanceClosure = emitCapturedStmtFunc(*this, DistanceFunc);
2031  const CapturedStmt *LoopVarFunc = S->getLoopVarFunc();
2032  EmittedClosureTy LoopVarClosure = emitCapturedStmtFunc(*this, LoopVarFunc);
2033 
2034  // Call the distance function to get the number of iterations of the loop to
2035  // come.
2036  QualType LogicalTy = DistanceFunc->getCapturedDecl()
2037  ->getParam(0)
2038  ->getType()
2040  Address CountAddr = CreateMemTemp(LogicalTy, ".count.addr");
2041  emitCapturedStmtCall(*this, DistanceClosure, {CountAddr.getPointer()});
2042  llvm::Value *DistVal = Builder.CreateLoad(CountAddr, ".count");
2043 
2044  // Emit the loop structure.
2045  llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
2046  auto BodyGen = [&, this](llvm::OpenMPIRBuilder::InsertPointTy CodeGenIP,
2047  llvm::Value *IndVar) {
2048  Builder.restoreIP(CodeGenIP);
2049 
2050  // Emit the loop body: Convert the logical iteration number to the loop
2051  // variable and emit the body.
2052  const DeclRefExpr *LoopVarRef = S->getLoopVarRef();
2053  LValue LCVal = EmitLValue(LoopVarRef);
2054  Address LoopVarAddress = LCVal.getAddress(*this);
2055  emitCapturedStmtCall(*this, LoopVarClosure,
2056  {LoopVarAddress.getPointer(), IndVar});
2057 
2058  RunCleanupsScope BodyScope(*this);
2059  EmitStmt(BodyStmt);
2060  };
2061  llvm::CanonicalLoopInfo *CL =
2062  OMPBuilder.createCanonicalLoop(Builder, BodyGen, DistVal);
2063 
2064  // Finish up the loop.
2065  Builder.restoreIP(CL->getAfterIP());
2066  ForScope.ForceCleanup();
2067 
2068  // Remember the CanonicalLoopInfo for parent AST nodes consuming it.
2069  OMPLoopNestStack.push_back(CL);
2070 }
2071 
2073  const OMPExecutableDirective &S, bool RequiresCleanup, const Expr *LoopCond,
2074  const Expr *IncExpr,
2075  const llvm::function_ref<void(CodeGenFunction &)> BodyGen,
2076  const llvm::function_ref<void(CodeGenFunction &)> PostIncGen) {
2077  auto LoopExit = getJumpDestInCurrentScope("omp.inner.for.end");
2078 
2079  // Start the loop with a block that tests the condition.
2080  auto CondBlock = createBasicBlock("omp.inner.for.cond");
2081  EmitBlock(CondBlock);
2082  const SourceRange R = S.getSourceRange();
2083 
2084  // If attributes are attached, push to the basic block with them.
2085  const auto &OMPED = cast<OMPExecutableDirective>(S);
2086  const CapturedStmt *ICS = OMPED.getInnermostCapturedStmt();
2087  const Stmt *SS = ICS->getCapturedStmt();
2088  const AttributedStmt *AS = dyn_cast_or_null<AttributedStmt>(SS);
2089  OMPLoopNestStack.clear();
2090  if (AS)
2091  LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(),
2092  AS->getAttrs(), SourceLocToDebugLoc(R.getBegin()),
2093  SourceLocToDebugLoc(R.getEnd()));
2094  else
2095  LoopStack.push(CondBlock, SourceLocToDebugLoc(R.getBegin()),
2096  SourceLocToDebugLoc(R.getEnd()));
2097 
2098  // If there are any cleanups between here and the loop-exit scope,
2099  // create a block to stage a loop exit along.
2100  llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
2101  if (RequiresCleanup)
2102  ExitBlock = createBasicBlock("omp.inner.for.cond.cleanup");
2103 
2104  llvm::BasicBlock *LoopBody = createBasicBlock("omp.inner.for.body");
2105 
2106  // Emit condition.
2107  EmitBranchOnBoolExpr(LoopCond, LoopBody, ExitBlock, getProfileCount(&S));
2108  if (ExitBlock != LoopExit.getBlock()) {
2109  EmitBlock(ExitBlock);
2110  EmitBranchThroughCleanup(LoopExit);
2111  }
2112 
2113  EmitBlock(LoopBody);
2114  incrementProfileCounter(&S);
2115 
2116  // Create a block for the increment.
2117  JumpDest Continue = getJumpDestInCurrentScope("omp.inner.for.inc");
2118  BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
2119 
2120  BodyGen(*this);
2121 
2122  // Emit "IV = IV + 1" and a back-edge to the condition block.
2123  EmitBlock(Continue.getBlock());
2124  EmitIgnoredExpr(IncExpr);
2125  PostIncGen(*this);
2126  BreakContinueStack.pop_back();
2127  EmitBranch(CondBlock);
2128  LoopStack.pop();
2129  // Emit the fall-through block.
2130  EmitBlock(LoopExit.getBlock());
2131 }
2132 
2134  if (!HaveInsertPoint())
2135  return false;
2136  // Emit inits for the linear variables.
2137  bool HasLinears = false;
2138  for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
2139  for (const Expr *Init : C->inits()) {
2140  HasLinears = true;
2141  const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(Init)->getDecl());
2142  if (const auto *Ref =
2143  dyn_cast<DeclRefExpr>(VD->getInit()->IgnoreImpCasts())) {
2144  AutoVarEmission Emission = EmitAutoVarAlloca(*VD);
2145  const auto *OrigVD = cast<VarDecl>(Ref->getDecl());
2146  DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD),
2147  CapturedStmtInfo->lookup(OrigVD) != nullptr,
2148  VD->getInit()->getType(), VK_LValue,
2149  VD->getInit()->getExprLoc());
2150  EmitExprAsInit(
2151  &DRE, VD,
2152  MakeAddrLValue(Emission.getAllocatedAddress(), VD->getType()),
2153  /*capturedByInit=*/false);
2154  EmitAutoVarCleanups(Emission);
2155  } else {
2156  EmitVarDecl(*VD);
2157  }
2158  }
2159  // Emit the linear steps for the linear clauses.
2160  // If a step is not constant, it is pre-calculated before the loop.
2161  if (const auto *CS = cast_or_null<BinaryOperator>(C->getCalcStep()))
2162  if (const auto *SaveRef = cast<DeclRefExpr>(CS->getLHS())) {
2163  EmitVarDecl(*cast<VarDecl>(SaveRef->getDecl()));
2164  // Emit calculation of the linear step.
2165  EmitIgnoredExpr(CS);
2166  }
2167  }
2168  return HasLinears;
2169 }
2170 
2172  const OMPLoopDirective &D,
2173  const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen) {
2174  if (!HaveInsertPoint())
2175  return;
2176  llvm::BasicBlock *DoneBB = nullptr;
2177  // Emit the final values of the linear variables.
2178  for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
2179  auto IC = C->varlist_begin();
2180  for (const Expr *F : C->finals()) {
2181  if (!DoneBB) {
2182  if (llvm::Value *Cond = CondGen(*this)) {
2183  // If the first post-update expression is found, emit conditional
2184  // block if it was requested.
2185  llvm::BasicBlock *ThenBB = createBasicBlock(".omp.linear.pu");
2186  DoneBB = createBasicBlock(".omp.linear.pu.done");
2187  Builder.CreateCondBr(Cond, ThenBB, DoneBB);
2188  EmitBlock(ThenBB);
2189  }
2190  }
2191  const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl());
2192  DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD),
2193  CapturedStmtInfo->lookup(OrigVD) != nullptr,
2194  (*IC)->getType(), VK_LValue, (*IC)->getExprLoc());
2195  Address OrigAddr = EmitLValue(&DRE).getAddress(*this);
2196  CodeGenFunction::OMPPrivateScope VarScope(*this);
2197  VarScope.addPrivate(OrigVD, OrigAddr);
2198  (void)VarScope.Privatize();
2199  EmitIgnoredExpr(F);
2200  ++IC;
2201  }
2202  if (const Expr *PostUpdate = C->getPostUpdateExpr())
2203  EmitIgnoredExpr(PostUpdate);
2204  }
2205  if (DoneBB)
2206  EmitBlock(DoneBB, /*IsFinished=*/true);
2207 }
2208 
2210  const OMPExecutableDirective &D) {
2211  if (!CGF.HaveInsertPoint())
2212  return;
2213  for (const auto *Clause : D.getClausesOfKind<OMPAlignedClause>()) {
2214  llvm::APInt ClauseAlignment(64, 0);
2215  if (const Expr *AlignmentExpr = Clause->getAlignment()) {
2216  auto *AlignmentCI =
2217  cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AlignmentExpr));
2218  ClauseAlignment = AlignmentCI->getValue();
2219  }
2220  for (const Expr *E : Clause->varlists()) {
2221  llvm::APInt Alignment(ClauseAlignment);
2222  if (Alignment == 0) {
2223  // OpenMP [2.8.1, Description]
2224  // If no optional parameter is specified, implementation-defined default
2225  // alignments for SIMD instructions on the target platforms are assumed.
2226  Alignment =
2227  CGF.getContext()
2229  E->getType()->getPointeeType()))
2230  .getQuantity();
2231  }
2232  assert((Alignment == 0 || Alignment.isPowerOf2()) &&
2233  "alignment is not power of 2");
2234  if (Alignment != 0) {
2235  llvm::Value *PtrValue = CGF.EmitScalarExpr(E);
2237  PtrValue, E, /*No second loc needed*/ SourceLocation(),
2238  llvm::ConstantInt::get(CGF.getLLVMContext(), Alignment));
2239  }
2240  }
2241  }
2242 }
2243 
2245  const OMPLoopDirective &S, CodeGenFunction::OMPPrivateScope &LoopScope) {
2246  if (!HaveInsertPoint())
2247  return;
2248  auto I = S.private_counters().begin();
2249  for (const Expr *E : S.counters()) {
2250  const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
2251  const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl());
2252  // Emit var without initialization.
2253  AutoVarEmission VarEmission = EmitAutoVarAlloca(*PrivateVD);
2254  EmitAutoVarCleanups(VarEmission);
2255  LocalDeclMap.erase(PrivateVD);
2256  (void)LoopScope.addPrivate(VD, VarEmission.getAllocatedAddress());
2257  if (LocalDeclMap.count(VD) || CapturedStmtInfo->lookup(VD) ||
2258  VD->hasGlobalStorage()) {
2259  DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(VD),
2260  LocalDeclMap.count(VD) || CapturedStmtInfo->lookup(VD),
2261  E->getType(), VK_LValue, E->getExprLoc());
2262  (void)LoopScope.addPrivate(PrivateVD, EmitLValue(&DRE).getAddress(*this));
2263  } else {
2264  (void)LoopScope.addPrivate(PrivateVD, VarEmission.getAllocatedAddress());
2265  }
2266  ++I;
2267  }
2268  // Privatize extra loop counters used in loops for ordered(n) clauses.
2269  for (const auto *C : S.getClausesOfKind<OMPOrderedClause>()) {
2270  if (!C->getNumForLoops())
2271  continue;
2272  for (unsigned I = S.getLoopsNumber(), E = C->getLoopNumIterations().size();
2273  I < E; ++I) {
2274  const auto *DRE = cast<DeclRefExpr>(C->getLoopCounter(I));
2275  const auto *VD = cast<VarDecl>(DRE->getDecl());
2276  // Override only those variables that can be captured to avoid re-emission
2277  // of the variables declared within the loops.
2278  if (DRE->refersToEnclosingVariableOrCapture()) {
2279  (void)LoopScope.addPrivate(
2280  VD, CreateMemTemp(DRE->getType(), VD->getName()));
2281  }
2282  }
2283  }
2284 }
2285 
2286 static void emitPreCond(CodeGenFunction &CGF, const OMPLoopDirective &S,
2287  const Expr *Cond, llvm::BasicBlock *TrueBlock,
2288  llvm::BasicBlock *FalseBlock, uint64_t TrueCount) {
2289  if (!CGF.HaveInsertPoint())
2290  return;
2291  {
2292  CodeGenFunction::OMPPrivateScope PreCondScope(CGF);
2293  CGF.EmitOMPPrivateLoopCounters(S, PreCondScope);
2294  (void)PreCondScope.Privatize();
2295  // Get initial values of real counters.
2296  for (const Expr *I : S.inits()) {
2297  CGF.EmitIgnoredExpr(I);
2298  }
2299  }
2300  // Create temp loop control variables with their init values to support
2301  // non-rectangular loops.
2302  CodeGenFunction::OMPMapVars PreCondVars;
2303  for (const Expr *E : S.dependent_counters()) {
2304  if (!E)
2305  continue;
2306  assert(!E->getType().getNonReferenceType()->isRecordType() &&
2307  "dependent counter must not be an iterator.");
2308  const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
2309  Address CounterAddr =
2311  (void)PreCondVars.setVarAddr(CGF, VD, CounterAddr);
2312  }
2313  (void)PreCondVars.apply(CGF);
2314  for (const Expr *E : S.dependent_inits()) {
2315  if (!E)
2316  continue;
2317  CGF.EmitIgnoredExpr(E);
2318  }
2319  // Check that loop is executed at least one time.
2320  CGF.EmitBranchOnBoolExpr(Cond, TrueBlock, FalseBlock, TrueCount);
2321  PreCondVars.restore(CGF);
2322 }
2323 
2325  const OMPLoopDirective &D, CodeGenFunction::OMPPrivateScope &PrivateScope) {
2326  if (!HaveInsertPoint())
2327  return;
2330  const auto *LoopDirective = cast<OMPLoopDirective>(&D);
2331  for (const Expr *C : LoopDirective->counters()) {
2332  SIMDLCVs.insert(
2333  cast<VarDecl>(cast<DeclRefExpr>(C)->getDecl())->getCanonicalDecl());
2334  }
2335  }
2336  for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
2337  auto CurPrivate = C->privates().begin();
2338  for (const Expr *E : C->varlists()) {
2339  const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
2340  const auto *PrivateVD =
2341  cast<VarDecl>(cast<DeclRefExpr>(*CurPrivate)->getDecl());
2342  if (!SIMDLCVs.count(VD->getCanonicalDecl())) {
2343  // Emit private VarDecl with copy init.
2344  EmitVarDecl(*PrivateVD);
2345  bool IsRegistered =
2346  PrivateScope.addPrivate(VD, GetAddrOfLocalVar(PrivateVD));
2347  assert(IsRegistered && "linear var already registered as private");
2348  // Silence the warning about unused variable.
2349  (void)IsRegistered;
2350  } else {
2351  EmitVarDecl(*PrivateVD);
2352  }
2353  ++CurPrivate;
2354  }
2355  }
2356 }
2357 
2359  const OMPExecutableDirective &D) {
2360  if (!CGF.HaveInsertPoint())
2361  return;
2362  if (const auto *C = D.getSingleClause<OMPSimdlenClause>()) {
2363  RValue Len = CGF.EmitAnyExpr(C->getSimdlen(), AggValueSlot::ignored(),
2364  /*ignoreResult=*/true);
2365  auto *Val = cast<llvm::ConstantInt>(Len.getScalarVal());
2366  CGF.LoopStack.setVectorizeWidth(Val->getZExtValue());
2367  // In presence of finite 'safelen', it may be unsafe to mark all
2368  // the memory instructions parallel, because loop-carried
2369  // dependences of 'safelen' iterations are possible.
2371  } else if (const auto *C = D.getSingleClause<OMPSafelenClause>()) {
2372  RValue Len = CGF.EmitAnyExpr(C->getSafelen(), AggValueSlot::ignored(),
2373  /*ignoreResult=*/true);
2374  auto *Val = cast<llvm::ConstantInt>(Len.getScalarVal());
2375  CGF.LoopStack.setVectorizeWidth(Val->getZExtValue());
2376  // In presence of finite 'safelen', it may be unsafe to mark all
2377  // the memory instructions parallel, because loop-carried
2378  // dependences of 'safelen' iterations are possible.
2379  CGF.LoopStack.setParallel(/*Enable=*/false);
2380  }
2381 }
2382 
2384  // Walk clauses and process safelen/lastprivate.
2385  LoopStack.setParallel(/*Enable=*/true);
2386  LoopStack.setVectorizeEnable();
2387  emitSimdlenSafelenClause(*this, D);
2388  if (const auto *C = D.getSingleClause<OMPOrderClause>())
2389  if (C->getKind() == OMPC_ORDER_concurrent)
2390  LoopStack.setParallel(/*Enable=*/true);
2391  if ((D.getDirectiveKind() == OMPD_simd ||
2392  (getLangOpts().OpenMPSimd &&
2394  llvm::any_of(D.getClausesOfKind<OMPReductionClause>(),
2395  [](const OMPReductionClause *C) {
2396  return C->getModifier() == OMPC_REDUCTION_inscan;
2397  }))
2398  // Disable parallel access in case of prefix sum.
2399  LoopStack.setParallel(/*Enable=*/false);
2400 }
2401 
2403  const OMPLoopDirective &D,
2404  const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen) {
2405  if (!HaveInsertPoint())
2406  return;
2407  llvm::BasicBlock *DoneBB = nullptr;
2408  auto IC = D.counters().begin();
2409  auto IPC = D.private_counters().begin();
2410  for (const Expr *F : D.finals()) {
2411  const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>((*IC))->getDecl());
2412  const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>((*IPC))->getDecl());
2413  const auto *CED = dyn_cast<OMPCapturedExprDecl>(OrigVD);
2414  if (LocalDeclMap.count(OrigVD) || CapturedStmtInfo->lookup(OrigVD) ||
2415  OrigVD->hasGlobalStorage() || CED) {
2416  if (!DoneBB) {
2417  if (llvm::Value *Cond = CondGen(*this)) {
2418  // If the first post-update expression is found, emit conditional
2419  // block if it was requested.
2420  llvm::BasicBlock *ThenBB = createBasicBlock(".omp.final.then");
2421  DoneBB = createBasicBlock(".omp.final.done");
2422  Builder.CreateCondBr(Cond, ThenBB, DoneBB);
2423  EmitBlock(ThenBB);
2424  }
2425  }
2426  Address OrigAddr = Address::invalid();
2427  if (CED) {
2428  OrigAddr =
2429  EmitLValue(CED->getInit()->IgnoreImpCasts()).getAddress(*this);
2430  } else {
2431  DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(PrivateVD),
2432  /*RefersToEnclosingVariableOrCapture=*/false,
2433  (*IPC)->getType(), VK_LValue, (*IPC)->getExprLoc());
2434  OrigAddr = EmitLValue(&DRE).getAddress(*this);
2435  }
2436  OMPPrivateScope VarScope(*this);
2437  VarScope.addPrivate(OrigVD, OrigAddr);
2438  (void)VarScope.Privatize();
2439  EmitIgnoredExpr(F);
2440  }
2441  ++IC;
2442  ++IPC;
2443  }
2444  if (DoneBB)
2445  EmitBlock(DoneBB, /*IsFinished=*/true);
2446 }
2447 
2449  const OMPLoopDirective &S,
2451  CGF.EmitOMPLoopBody(S, LoopExit);
2452  CGF.EmitStopPoint(&S);
2453 }
2454 
2455 /// Emit a helper variable and return corresponding lvalue.
2457  const DeclRefExpr *Helper) {
2458  auto VDecl = cast<VarDecl>(Helper->getDecl());
2459  CGF.EmitVarDecl(*VDecl);
2460  return CGF.EmitLValue(Helper);
2461 }
2462 
2464  const RegionCodeGenTy &SimdInitGen,
2465  const RegionCodeGenTy &BodyCodeGen) {
2466  auto &&ThenGen = [&S, &SimdInitGen, &BodyCodeGen](CodeGenFunction &CGF,
2467  PrePostActionTy &) {
2468  CGOpenMPRuntime::NontemporalDeclsRAII NontemporalsRegion(CGF.CGM, S);
2470  SimdInitGen(CGF);
2471 
2472  BodyCodeGen(CGF);
2473  };
2474  auto &&ElseGen = [&BodyCodeGen](CodeGenFunction &CGF, PrePostActionTy &) {
2476  CGF.LoopStack.setVectorizeEnable(/*Enable=*/false);
2477 
2478  BodyCodeGen(CGF);
2479  };
2480  const Expr *IfCond = nullptr;
2481  if (isOpenMPSimdDirective(S.getDirectiveKind())) {
2482  for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
2483  if (CGF.getLangOpts().OpenMP >= 50 &&
2484  (C->getNameModifier() == OMPD_unknown ||
2485  C->getNameModifier() == OMPD_simd)) {
2486  IfCond = C->getCondition();
2487  break;
2488  }
2489  }
2490  }
2491  if (IfCond) {
2492  CGF.CGM.getOpenMPRuntime().emitIfClause(CGF, IfCond, ThenGen, ElseGen);
2493  } else {
2494  RegionCodeGenTy ThenRCG(ThenGen);
2495  ThenRCG(CGF);
2496  }
2497 }
2498 
2500  PrePostActionTy &Action) {
2501  Action.Enter(CGF);
2502  assert(isOpenMPSimdDirective(S.getDirectiveKind()) &&
2503  "Expected simd directive");
2504  OMPLoopScope PreInitScope(CGF, S);
2505  // if (PreCond) {
2506  // for (IV in 0..LastIteration) BODY;
2507  // <Final counter/linear vars updates>;
2508  // }
2509  //
2510  if (isOpenMPDistributeDirective(S.getDirectiveKind()) ||
2511  isOpenMPWorksharingDirective(S.getDirectiveKind()) ||
2512  isOpenMPTaskLoopDirective(S.getDirectiveKind())) {
2513  (void)EmitOMPHelperVar(CGF, cast<DeclRefExpr>(S.getLowerBoundVariable()));
2514  (void)EmitOMPHelperVar(CGF, cast<DeclRefExpr>(S.getUpperBoundVariable()));
2515  }
2516 
2517  // Emit: if (PreCond) - begin.
2518  // If the condition constant folds and can be elided, avoid emitting the
2519  // whole loop.
2520  bool CondConstant;
2521  llvm::BasicBlock *ContBlock = nullptr;
2522  if (CGF.ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) {
2523  if (!CondConstant)
2524  return;
2525  } else {
2526  llvm::BasicBlock *ThenBlock = CGF.createBasicBlock("simd.if.then");
2527  ContBlock = CGF.createBasicBlock("simd.if.end");
2528  emitPreCond(CGF, S, S.getPreCond(), ThenBlock, ContBlock,
2529  CGF.getProfileCount(&S));
2530  CGF.EmitBlock(ThenBlock);
2531  CGF.incrementProfileCounter(&S);
2532  }
2533 
2534  // Emit the loop iteration variable.
2535  const Expr *IVExpr = S.getIterationVariable();
2536  const auto *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl());
2537  CGF.EmitVarDecl(*IVDecl);
2538  CGF.EmitIgnoredExpr(S.getInit());
2539 
2540  // Emit the iterations count variable.
2541  // If it is not a variable, Sema decided to calculate iterations count on
2542  // each iteration (e.g., it is foldable into a constant).
2543  if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
2544  CGF.EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
2545  // Emit calculation of the iterations count.
2546  CGF.EmitIgnoredExpr(S.getCalcLastIteration());
2547  }
2548 
2549  emitAlignedClause(CGF, S);
2550  (void)CGF.EmitOMPLinearClauseInit(S);
2551  {
2552  CodeGenFunction::OMPPrivateScope LoopScope(CGF);
2553  CGF.EmitOMPPrivateLoopCounters(S, LoopScope);
2554  CGF.EmitOMPLinearClause(S, LoopScope);
2555  CGF.EmitOMPPrivateClause(S, LoopScope);
2556  CGF.EmitOMPReductionClauseInit(S, LoopScope);
2558  CGF, S, CGF.EmitLValue(S.getIterationVariable()));
2559  bool HasLastprivateClause = CGF.EmitOMPLastprivateClauseInit(S, LoopScope);
2560  (void)LoopScope.Privatize();
2561  if (isOpenMPTargetExecutionDirective(S.getDirectiveKind()))
2563 
2565  CGF, S,
2566  [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2567  CGF.EmitOMPSimdInit(S);
2568  },
2569  [&S, &LoopScope](CodeGenFunction &CGF, PrePostActionTy &) {
2570  CGF.EmitOMPInnerLoop(
2571  S, LoopScope.requiresCleanups(), S.getCond(), S.getInc(),
2572  [&S](CodeGenFunction &CGF) {
2573  emitOMPLoopBodyWithStopPoint(CGF, S,
2574  CodeGenFunction::JumpDest());
2575  },
2576  [](CodeGenFunction &) {});
2577  });
2578  CGF.EmitOMPSimdFinal(S, [](CodeGenFunction &) { return nullptr; });
2579  // Emit final copy of the lastprivate variables at the end of loops.
2580  if (HasLastprivateClause)
2581  CGF.EmitOMPLastprivateClauseFinal(S, /*NoFinals=*/true);
2582  CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_simd);
2584  [](CodeGenFunction &) { return nullptr; });
2585  }
2586  CGF.EmitOMPLinearClauseFinal(S, [](CodeGenFunction &) { return nullptr; });
2587  // Emit: if (PreCond) - end.
2588  if (ContBlock) {
2589  CGF.EmitBranch(ContBlock);
2590  CGF.EmitBlock(ContBlock, true);
2591  }
2592 }
2593 
2595  // Check for unsupported clauses
2596  if (!S.clauses().empty()) {
2597  // Currently no clause is supported
2598  return false;
2599  }
2600 
2601  // Check if we have a statement with the ordered directive.
2602  // Visit the statement hierarchy to find a compound statement
2603  // with a ordered directive in it.
2604  if (const auto *CanonLoop = dyn_cast<OMPCanonicalLoop>(S.getRawStmt())) {
2605  if (const Stmt *SyntacticalLoop = CanonLoop->getLoopStmt()) {
2606  for (const Stmt *SubStmt : SyntacticalLoop->children()) {
2607  if (!SubStmt)
2608  continue;
2609  if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(SubStmt)) {
2610  for (const Stmt *CSSubStmt : CS->children()) {
2611  if (!CSSubStmt)
2612  continue;
2613  if (isa<OMPOrderedDirective>(CSSubStmt)) {
2614  return false;
2615  }
2616  }
2617  }
2618  }
2619  }
2620  }
2621  return true;
2622 }
2623 
2625  bool UseOMPIRBuilder =
2626  CGM.getLangOpts().OpenMPIRBuilder && isSupportedByOpenMPIRBuilder(S);
2627  if (UseOMPIRBuilder) {
2628  auto &&CodeGenIRBuilder = [this, &S, UseOMPIRBuilder](CodeGenFunction &CGF,
2629  PrePostActionTy &) {
2630  // Use the OpenMPIRBuilder if enabled.
2631  if (UseOMPIRBuilder) {
2632  // Emit the associated statement and get its loop representation.
2633  llvm::DebugLoc DL = SourceLocToDebugLoc(S.getBeginLoc());
2634  const Stmt *Inner = S.getRawStmt();
2635  llvm::CanonicalLoopInfo *CLI =
2636  EmitOMPCollapsedCanonicalLoopNest(Inner, 1);
2637 
2638  llvm::OpenMPIRBuilder &OMPBuilder =
2639  CGM.getOpenMPRuntime().getOMPBuilder();
2640  // Add SIMD specific metadata
2641  OMPBuilder.applySimd(DL, CLI);
2642  return;
2643  }
2644  };
2645  {
2646  auto LPCRegion =
2648  OMPLexicalScope Scope(*this, S, OMPD_unknown);
2649  CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd,
2650  CodeGenIRBuilder);
2651  }
2652  return;
2653  }
2654 
2655  ParentLoopDirectiveForScanRegion ScanRegion(*this, S);
2656  OMPFirstScanLoop = true;
2657  auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
2658  emitOMPSimdRegion(CGF, S, Action);
2659  };
2660  {
2661  auto LPCRegion =
2663  OMPLexicalScope Scope(*this, S, OMPD_unknown);
2664  CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen);
2665  }
2666  // Check for outer lastprivate conditional update.
2668 }
2669 
2671  // Emit the de-sugared statement.
2672  OMPTransformDirectiveScopeRAII TileScope(*this, &S);
2673  EmitStmt(S.getTransformedStmt());
2674 }
2675 
2677  bool UseOMPIRBuilder = CGM.getLangOpts().OpenMPIRBuilder;
2678 
2679  if (UseOMPIRBuilder) {
2680  auto DL = SourceLocToDebugLoc(S.getBeginLoc());
2681  const Stmt *Inner = S.getRawStmt();
2682 
2683  // Consume nested loop. Clear the entire remaining loop stack because a
2684  // fully unrolled loop is non-transformable. For partial unrolling the
2685  // generated outer loop is pushed back to the stack.
2686  llvm::CanonicalLoopInfo *CLI = EmitOMPCollapsedCanonicalLoopNest(Inner, 1);
2687  OMPLoopNestStack.clear();
2688 
2689  llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
2690 
2691  bool NeedsUnrolledCLI = ExpectedOMPLoopDepth >= 1;
2692  llvm::CanonicalLoopInfo *UnrolledCLI = nullptr;
2693 
2694  if (S.hasClausesOfKind<OMPFullClause>()) {
2695  assert(ExpectedOMPLoopDepth == 0);
2696  OMPBuilder.unrollLoopFull(DL, CLI);
2697  } else if (auto *PartialClause = S.getSingleClause<OMPPartialClause>()) {
2698  uint64_t Factor = 0;
2699  if (Expr *FactorExpr = PartialClause->getFactor()) {
2700  Factor = FactorExpr->EvaluateKnownConstInt(getContext()).getZExtValue();
2701  assert(Factor >= 1 && "Only positive factors are valid");
2702  }
2703  OMPBuilder.unrollLoopPartial(DL, CLI, Factor,
2704  NeedsUnrolledCLI ? &UnrolledCLI : nullptr);
2705  } else {
2706  OMPBuilder.unrollLoopHeuristic(DL, CLI);
2707  }
2708 
2709  assert((!NeedsUnrolledCLI || UnrolledCLI) &&
2710  "NeedsUnrolledCLI implies UnrolledCLI to be set");
2711  if (UnrolledCLI)
2712  OMPLoopNestStack.push_back(UnrolledCLI);
2713 
2714  return;
2715  }
2716 
2717  // This function is only called if the unrolled loop is not consumed by any
2718  // other loop-associated construct. Such a loop-associated construct will have
2719  // used the transformed AST.
2720 
2721  // Set the unroll metadata for the next emitted loop.
2722  LoopStack.setUnrollState(LoopAttributes::Enable);
2723 
2724  if (S.hasClausesOfKind<OMPFullClause>()) {
2725  LoopStack.setUnrollState(LoopAttributes::Full);
2726  } else if (auto *PartialClause = S.getSingleClause<OMPPartialClause>()) {
2727  if (Expr *FactorExpr = PartialClause->getFactor()) {
2728  uint64_t Factor =
2729  FactorExpr->EvaluateKnownConstInt(getContext()).getZExtValue();
2730  assert(Factor >= 1 && "Only positive factors are valid");
2731  LoopStack.setUnrollCount(Factor);
2732  }
2733  }
2734 
2735  EmitStmt(S.getAssociatedStmt());
2736 }
2737 
2738 void CodeGenFunction::EmitOMPOuterLoop(
2739  bool DynamicOrOrdered, bool IsMonotonic, const OMPLoopDirective &S,
2741  const CodeGenFunction::OMPLoopArguments &LoopArgs,
2742  const CodeGenFunction::CodeGenLoopTy &CodeGenLoop,
2743  const CodeGenFunction::CodeGenOrderedTy &CodeGenOrdered) {
2744  CGOpenMPRuntime &RT = CGM.getOpenMPRuntime();
2745 
2746  const Expr *IVExpr = S.getIterationVariable();
2747  const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
2748  const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
2749 
2750  JumpDest LoopExit = getJumpDestInCurrentScope("omp.dispatch.end");
2751 
2752  // Start the loop with a block that tests the condition.
2753  llvm::BasicBlock *CondBlock = createBasicBlock("omp.dispatch.cond");
2754  EmitBlock(CondBlock);
2755  const SourceRange R = S.getSourceRange();
2756  OMPLoopNestStack.clear();
2757  LoopStack.push(CondBlock, SourceLocToDebugLoc(R.getBegin()),
2758  SourceLocToDebugLoc(R.getEnd()));
2759 
2760  llvm::Value *BoolCondVal = nullptr;
2761  if (!DynamicOrOrdered) {
2762  // UB = min(UB, GlobalUB) or
2763  // UB = min(UB, PrevUB) for combined loop sharing constructs (e.g.
2764  // 'distribute parallel for')
2765  EmitIgnoredExpr(LoopArgs.EUB);
2766  // IV = LB
2767  EmitIgnoredExpr(LoopArgs.Init);
2768  // IV < UB
2769  BoolCondVal = EvaluateExprAsBool(LoopArgs.Cond);
2770  } else {
2771  BoolCondVal =
2772  RT.emitForNext(*this, S.getBeginLoc(), IVSize, IVSigned, LoopArgs.IL,
2773  LoopArgs.LB, LoopArgs.UB, LoopArgs.ST);
2774  }
2775 
2776  // If there are any cleanups between here and the loop-exit scope,
2777  // create a block to stage a loop exit along.
2778  llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
2779  if (LoopScope.requiresCleanups())
2780  ExitBlock = createBasicBlock("omp.dispatch.cleanup");
2781 
2782  llvm::BasicBlock *LoopBody = createBasicBlock("omp.dispatch.body");
2783  Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock);
2784  if (ExitBlock != LoopExit.getBlock()) {
2785  EmitBlock(ExitBlock);
2786  EmitBranchThroughCleanup(LoopExit);
2787  }
2788  EmitBlock(LoopBody);
2789 
2790  // Emit "IV = LB" (in case of static schedule, we have already calculated new
2791  // LB for loop condition and emitted it above).
2792  if (DynamicOrOrdered)
2793  EmitIgnoredExpr(LoopArgs.Init);
2794 
2795  // Create a block for the increment.
2796  JumpDest Continue = getJumpDestInCurrentScope("omp.dispatch.inc");
2797  BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
2798 
2800  *this, S,
2801  [&S, IsMonotonic](CodeGenFunction &CGF, PrePostActionTy &) {
2802  // Generate !llvm.loop.parallel metadata for loads and stores for loops
2803  // with dynamic/guided scheduling and without ordered clause.
2804  if (!isOpenMPSimdDirective(S.getDirectiveKind())) {
2805  CGF.LoopStack.setParallel(!IsMonotonic);
2806  if (const auto *C = S.getSingleClause<OMPOrderClause>())
2807  if (C->getKind() == OMPC_ORDER_concurrent)
2808  CGF.LoopStack.setParallel(/*Enable=*/true);
2809  } else {
2810  CGF.EmitOMPSimdInit(S);
2811  }
2812  },
2813  [&S, &LoopArgs, LoopExit, &CodeGenLoop, IVSize, IVSigned, &CodeGenOrdered,
2814  &LoopScope](CodeGenFunction &CGF, PrePostActionTy &) {
2815  SourceLocation Loc = S.getBeginLoc();
2816  // when 'distribute' is not combined with a 'for':
2817  // while (idx <= UB) { BODY; ++idx; }
2818  // when 'distribute' is combined with a 'for'
2819  // (e.g. 'distribute parallel for')
2820  // while (idx <= UB) { <CodeGen rest of pragma>; idx += ST; }
2821  CGF.EmitOMPInnerLoop(
2822  S, LoopScope.requiresCleanups(), LoopArgs.Cond, LoopArgs.IncExpr,
2823  [&S, LoopExit, &CodeGenLoop](CodeGenFunction &CGF) {
2824  CodeGenLoop(CGF, S, LoopExit);
2825  },
2826  [IVSize, IVSigned, Loc, &CodeGenOrdered](CodeGenFunction &CGF) {
2827  CodeGenOrdered(CGF, Loc, IVSize, IVSigned);
2828  });
2829  });
2830 
2831  EmitBlock(Continue.getBlock());
2832  BreakContinueStack.pop_back();
2833  if (!DynamicOrOrdered) {
2834  // Emit "LB = LB + Stride", "UB = UB + Stride".
2835  EmitIgnoredExpr(LoopArgs.NextLB);
2836  EmitIgnoredExpr(LoopArgs.NextUB);
2837  }
2838 
2839  EmitBranch(CondBlock);
2840  OMPLoopNestStack.clear();
2841  LoopStack.pop();
2842  // Emit the fall-through block.
2843  EmitBlock(LoopExit.getBlock());
2844 
2845  // Tell the runtime we are done.
2846  auto &&CodeGen = [DynamicOrOrdered, &S](CodeGenFunction &CGF) {
2847  if (!DynamicOrOrdered)
2848  CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getEndLoc(),
2849  S.getDirectiveKind());
2850  };
2851  OMPCancelStack.emitExit(*this, S.getDirectiveKind(), CodeGen);
2852 }
2853 
2854 void CodeGenFunction::EmitOMPForOuterLoop(
2855  const OpenMPScheduleTy &ScheduleKind, bool IsMonotonic,
2856  const OMPLoopDirective &S, OMPPrivateScope &LoopScope, bool Ordered,
2857  const OMPLoopArguments &LoopArgs,
2858  const CodeGenDispatchBoundsTy &CGDispatchBounds) {
2859  CGOpenMPRuntime &RT = CGM.getOpenMPRuntime();
2860 
2861  // Dynamic scheduling of the outer loop (dynamic, guided, auto, runtime).
2862  const bool DynamicOrOrdered = Ordered || RT.isDynamic(ScheduleKind.Schedule);
2863 
2864  assert((Ordered || !RT.isStaticNonchunked(ScheduleKind.Schedule,
2865  LoopArgs.Chunk != nullptr)) &&
2866  "static non-chunked schedule does not need outer loop");
2867 
2868  // Emit outer loop.
2869  //
2870  // OpenMP [2.7.1, Loop Construct, Description, table 2-1]
2871  // When schedule(dynamic,chunk_size) is specified, the iterations are
2872  // distributed to threads in the team in chunks as the threads request them.
2873  // Each thread executes a chunk of iterations, then requests another chunk,
2874  // until no chunks remain to be distributed. Each chunk contains chunk_size
2875  // iterations, except for the last chunk to be distributed, which may have
2876  // fewer iterations. When no chunk_size is specified, it defaults to 1.
2877  //
2878  // When schedule(guided,chunk_size) is specified, the iterations are assigned
2879  // to threads in the team in chunks as the executing threads request them.
2880  // Each thread executes a chunk of iterations, then requests another chunk,
2881  // until no chunks remain to be assigned. For a chunk_size of 1, the size of
2882  // each chunk is proportional to the number of unassigned iterations divided
2883  // by the number of threads in the team, decreasing to 1. For a chunk_size
2884  // with value k (greater than 1), the size of each chunk is determined in the
2885  // same way, with the restriction that the chunks do not contain fewer than k
2886  // iterations (except for the last chunk to be assigned, which may have fewer
2887  // than k iterations).
2888  //
2889  // When schedule(auto) is specified, the decision regarding scheduling is
2890  // delegated to the compiler and/or runtime system. The programmer gives the
2891  // implementation the freedom to choose any possible mapping of iterations to
2892  // threads in the team.
2893  //
2894  // When schedule(runtime) is specified, the decision regarding scheduling is
2895  // deferred until run time, and the schedule and chunk size are taken from the
2896  // run-sched-var ICV. If the ICV is set to auto, the schedule is
2897  // implementation defined
2898  //
2899  // while(__kmpc_dispatch_next(&LB, &UB)) {
2900  // idx = LB;
2901  // while (idx <= UB) { BODY; ++idx;
2902  // __kmpc_dispatch_fini_(4|8)[u](); // For ordered loops only.
2903  // } // inner loop
2904  // }
2905  //
2906  // OpenMP [2.7.1, Loop Construct, Description, table 2-1]
2907  // When schedule(static, chunk_size) is specified, iterations are divided into
2908  // chunks of size chunk_size, and the chunks are assigned to the threads in
2909  // the team in a round-robin fashion in the order of the thread number.
2910  //
2911  // while(UB = min(UB, GlobalUB), idx = LB, idx < UB) {
2912  // while (idx <= UB) { BODY; ++idx; } // inner loop
2913  // LB = LB + ST;
2914  // UB = UB + ST;
2915  // }
2916  //
2917 
2918  const Expr *IVExpr = S.getIterationVariable();
2919  const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
2920  const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
2921 
2922  if (DynamicOrOrdered) {
2923  const std::pair<llvm::Value *, llvm::Value *> DispatchBounds =
2924  CGDispatchBounds(*this, S, LoopArgs.LB, LoopArgs.UB);
2925  llvm::Value *LBVal = DispatchBounds.first;
2926  llvm::Value *UBVal = DispatchBounds.second;
2927  CGOpenMPRuntime::DispatchRTInput DipatchRTInputValues = {LBVal, UBVal,
2928  LoopArgs.Chunk};
2929  RT.emitForDispatchInit(*this, S.getBeginLoc(), ScheduleKind, IVSize,
2930  IVSigned, Ordered, DipatchRTInputValues);
2931  } else {
2932  CGOpenMPRuntime::StaticRTInput StaticInit(
2933  IVSize, IVSigned, Ordered, LoopArgs.IL, LoopArgs.LB, LoopArgs.UB,
2934  LoopArgs.ST, LoopArgs.Chunk);
2935  RT.emitForStaticInit(*this, S.getBeginLoc(), S.getDirectiveKind(),
2936  ScheduleKind, StaticInit);
2937  }
2938 
2939  auto &&CodeGenOrdered = [Ordered](CodeGenFunction &CGF, SourceLocation Loc,
2940  const unsigned IVSize,
2941  const bool IVSigned) {
2942  if (Ordered) {
2943  CGF.CGM.getOpenMPRuntime().emitForOrderedIterationEnd(CGF, Loc, IVSize,
2944  IVSigned);
2945  }
2946  };
2947 
2948  OMPLoopArguments OuterLoopArgs(LoopArgs.LB, LoopArgs.UB, LoopArgs.ST,
2949  LoopArgs.IL, LoopArgs.Chunk, LoopArgs.EUB);
2950  OuterLoopArgs.IncExpr = S.getInc();
2951  OuterLoopArgs.Init = S.getInit();
2952  OuterLoopArgs.Cond = S.getCond();
2953  OuterLoopArgs.NextLB = S.getNextLowerBound();
2954  OuterLoopArgs.NextUB = S.getNextUpperBound();
2955  EmitOMPOuterLoop(DynamicOrOrdered, IsMonotonic, S, LoopScope, OuterLoopArgs,
2956  emitOMPLoopBodyWithStopPoint, CodeGenOrdered);
2957 }
2958 
2960  const unsigned IVSize, const bool IVSigned) {}
2961 
2962 void CodeGenFunction::EmitOMPDistributeOuterLoop(
2963  OpenMPDistScheduleClauseKind ScheduleKind, const OMPLoopDirective &S,
2964  OMPPrivateScope &LoopScope, const OMPLoopArguments &LoopArgs,
2965  const CodeGenLoopTy &CodeGenLoopContent) {
2966 
2967  CGOpenMPRuntime &RT = CGM.getOpenMPRuntime();
2968 
2969  // Emit outer loop.
2970  // Same behavior as a OMPForOuterLoop, except that schedule cannot be
2971  // dynamic
2972  //
2973 
2974  const Expr *IVExpr = S.getIterationVariable();
2975  const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
2976  const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
2977 
2978  CGOpenMPRuntime::StaticRTInput StaticInit(
2979  IVSize, IVSigned, /* Ordered = */ false, LoopArgs.IL, LoopArgs.LB,
2980  LoopArgs.UB, LoopArgs.ST, LoopArgs.Chunk);
2981  RT.emitDistributeStaticInit(*this, S.getBeginLoc(), ScheduleKind, StaticInit);
2982 
2983  // for combined 'distribute' and 'for' the increment expression of distribute
2984  // is stored in DistInc. For 'distribute' alone, it is in Inc.
2985  Expr *IncExpr;
2986  if (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()))
2987  IncExpr = S.getDistInc();
2988  else
2989  IncExpr = S.getInc();
2990 
2991  // this routine is shared by 'omp distribute parallel for' and
2992  // 'omp distribute': select the right EUB expression depending on the
2993  // directive
2994  OMPLoopArguments OuterLoopArgs;
2995  OuterLoopArgs.LB = LoopArgs.LB;
2996  OuterLoopArgs.UB = LoopArgs.UB;
2997  OuterLoopArgs.ST = LoopArgs.ST;
2998  OuterLoopArgs.IL = LoopArgs.IL;
2999  OuterLoopArgs.Chunk = LoopArgs.Chunk;
3000  OuterLoopArgs.EUB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
3001  ? S.getCombinedEnsureUpperBound()
3002  : S.getEnsureUpperBound();
3003  OuterLoopArgs.IncExpr = IncExpr;
3004  OuterLoopArgs.Init = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
3005  ? S.getCombinedInit()
3006  : S.getInit();
3007  OuterLoopArgs.Cond = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
3008  ? S.getCombinedCond()
3009  : S.getCond();
3010  OuterLoopArgs.NextLB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
3011  ? S.getCombinedNextLowerBound()
3012  : S.getNextLowerBound();
3013  OuterLoopArgs.NextUB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
3014  ? S.getCombinedNextUpperBound()
3015  : S.getNextUpperBound();
3016 
3017  EmitOMPOuterLoop(/* DynamicOrOrdered = */ false, /* IsMonotonic = */ false, S,
3018  LoopScope, OuterLoopArgs, CodeGenLoopContent,
3020 }
3021 
3022 static std::pair<LValue, LValue>
3024  const OMPExecutableDirective &S) {
3025  const OMPLoopDirective &LS = cast<OMPLoopDirective>(S);
3026  LValue LB =
3027  EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getLowerBoundVariable()));
3028  LValue UB =
3029  EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getUpperBoundVariable()));
3030 
3031  // When composing 'distribute' with 'for' (e.g. as in 'distribute
3032  // parallel for') we need to use the 'distribute'
3033  // chunk lower and upper bounds rather than the whole loop iteration
3034  // space. These are parameters to the outlined function for 'parallel'
3035  // and we copy the bounds of the previous schedule into the
3036  // the current ones.
3037  LValue PrevLB = CGF.EmitLValue(LS.getPrevLowerBoundVariable());
3038  LValue PrevUB = CGF.EmitLValue(LS.getPrevUpperBoundVariable());
3039  llvm::Value *PrevLBVal = CGF.EmitLoadOfScalar(
3040  PrevLB, LS.getPrevLowerBoundVariable()->getExprLoc());
3041  PrevLBVal = CGF.EmitScalarConversion(
3042  PrevLBVal, LS.getPrevLowerBoundVariable()->getType(),
3043  LS.getIterationVariable()->getType(),
3045  llvm::Value *PrevUBVal = CGF.EmitLoadOfScalar(
3046  PrevUB, LS.getPrevUpperBoundVariable()->getExprLoc());
3047  PrevUBVal = CGF.EmitScalarConversion(
3048  PrevUBVal, LS.getPrevUpperBoundVariable()->getType(),
3049  LS.getIterationVariable()->getType(),
3051 
3052  CGF.EmitStoreOfScalar(PrevLBVal, LB);
3053  CGF.EmitStoreOfScalar(PrevUBVal, UB);
3054 
3055  return {LB, UB};
3056 }
3057 
3058 /// if the 'for' loop has a dispatch schedule (e.g. dynamic, guided) then
3059 /// we need to use the LB and UB expressions generated by the worksharing
3060 /// code generation support, whereas in non combined situations we would
3061 /// just emit 0 and the LastIteration expression
3062 /// This function is necessary due to the difference of the LB and UB
3063 /// types for the RT emission routines for 'for_static_init' and
3064 /// 'for_dispatch_init'
3065 static std::pair<llvm::Value *, llvm::Value *>
3067  const OMPExecutableDirective &S,
3068  Address LB, Address UB) {
3069  const OMPLoopDirective &LS = cast<OMPLoopDirective>(S);
3070  const Expr *IVExpr = LS.getIterationVariable();
3071  // when implementing a dynamic schedule for a 'for' combined with a
3072  // 'distribute' (e.g. 'distribute parallel for'), the 'for' loop
3073  // is not normalized as each team only executes its own assigned
3074  // distribute chunk
3075  QualType IteratorTy = IVExpr->getType();
3076  llvm::Value *LBVal =
3077  CGF.EmitLoadOfScalar(LB, /*Volatile=*/false, IteratorTy, S.getBeginLoc());
3078  llvm::Value *UBVal =
3079  CGF.EmitLoadOfScalar(UB, /*Volatile=*/false, IteratorTy, S.getBeginLoc());
3080  return {LBVal, UBVal};
3081 }
3082 
3084  CodeGenFunction &CGF, const OMPExecutableDirective &S,
3085  llvm::SmallVectorImpl<llvm::Value *> &CapturedVars) {
3086  const auto &Dir = cast<OMPLoopDirective>(S);
3087  LValue LB =
3088  CGF.EmitLValue(cast<DeclRefExpr>(Dir.getCombinedLowerBoundVariable()));
3089  llvm::Value *LBCast =
3090  CGF.Builder.CreateIntCast(CGF.Builder.CreateLoad(LB.getAddress(CGF)),
3091  CGF.SizeTy, /*isSigned=*/false);
3092  CapturedVars.push_back(LBCast);
3093  LValue UB =
3094  CGF.EmitLValue(cast<DeclRefExpr>(Dir.getCombinedUpperBoundVariable()));
3095 
3096  llvm::Value *UBCast =
3097  CGF.Builder.CreateIntCast(CGF.Builder.CreateLoad(UB.getAddress(CGF)),
3098  CGF.SizeTy, /*isSigned=*/false);
3099  CapturedVars.push_back(UBCast);
3100 }
3101 
3102 static void
3104  const OMPLoopDirective &S,
3106  auto &&CGInlinedWorksharingLoop = [&S](CodeGenFunction &CGF,
3107  PrePostActionTy &Action) {
3108  Action.Enter(CGF);
3109  bool HasCancel = false;
3110  if (!isOpenMPSimdDirective(S.getDirectiveKind())) {
3111  if (const auto *D = dyn_cast<OMPTeamsDistributeParallelForDirective>(&S))
3112  HasCancel = D->hasCancel();
3113  else if (const auto *D = dyn_cast<OMPDistributeParallelForDirective>(&S))
3114  HasCancel = D->hasCancel();
3115  else if (const auto *D =
3116  dyn_cast<OMPTargetTeamsDistributeParallelForDirective>(&S))
3117  HasCancel = D->hasCancel();
3118  }
3119  CodeGenFunction::OMPCancelStackRAII CancelRegion(CGF, S.getDirectiveKind(),
3120  HasCancel);
3121  CGF.EmitOMPWorksharingLoop(S, S.getPrevEnsureUpperBound(),
3124  };
3125 
3127  CGF, S,
3128  isOpenMPSimdDirective(S.getDirectiveKind()) ? OMPD_for_simd : OMPD_for,
3129  CGInlinedWorksharingLoop,
3131 }
3132 
3135  auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
3137  S.getDistInc());
3138  };
3139  OMPLexicalScope Scope(*this, S, OMPD_parallel);
3140  CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen);
3141 }
3142 
3145  auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
3147  S.getDistInc());
3148  };
3149  OMPLexicalScope Scope(*this, S, OMPD_parallel);
3150  CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen);
3151 }
3152 
3154  const OMPDistributeSimdDirective &S) {
3155  auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
3157  };
3158  OMPLexicalScope Scope(*this, S, OMPD_unknown);
3159  CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen);
3160 }
3161 
3163  CodeGenModule &CGM, StringRef ParentName, const OMPTargetSimdDirective &S) {
3164  // Emit SPMD target parallel for region as a standalone region.
3165  auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
3166  emitOMPSimdRegion(CGF, S, Action);
3167  };
3168  llvm::Function *Fn;
3169  llvm::Constant *Addr;
3170  // Emit target region as a standalone region.
3172  S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen);
3173  assert(Fn && Addr && "Target device function emission failed.");
3174 }
3175 
3177  const OMPTargetSimdDirective &S) {
3178  auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
3179  emitOMPSimdRegion(CGF, S, Action);
3180  };
3181  emitCommonOMPTargetDirective(*this, S, CodeGen);
3182 }
3183 
3184 namespace {
3185 struct ScheduleKindModifiersTy {
3189  ScheduleKindModifiersTy(OpenMPScheduleClauseKind Kind,
3192  : Kind(Kind), M1(M1), M2(M2) {}
3193 };
3194 } // namespace
3195 
3197  const OMPLoopDirective &S, Expr *EUB,
3198  const CodeGenLoopBoundsTy &CodeGenLoopBounds,
3199  const CodeGenDispatchBoundsTy &CGDispatchBounds) {
3200  // Emit the loop iteration variable.
3201  const auto *IVExpr = cast<DeclRefExpr>(S.getIterationVariable());
3202  const auto *IVDecl = cast<VarDecl>(IVExpr->getDecl());
3203  EmitVarDecl(*IVDecl);
3204 
3205  // Emit the iterations count variable.
3206  // If it is not a variable, Sema decided to calculate iterations count on each
3207  // iteration (e.g., it is foldable into a constant).
3208  if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
3209  EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
3210  // Emit calculation of the iterations count.
3211  EmitIgnoredExpr(S.getCalcLastIteration());
3212  }
3213 
3214  CGOpenMPRuntime &RT = CGM.getOpenMPRuntime();
3215 
3216  bool HasLastprivateClause;
3217  // Check pre-condition.
3218  {
3219  OMPLoopScope PreInitScope(*this, S);
3220  // Skip the entire loop if we don't meet the precondition.
3221  // If the condition constant folds and can be elided, avoid emitting the
3222  // whole loop.
3223  bool CondConstant;
3224  llvm::BasicBlock *ContBlock = nullptr;
3225  if (ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) {
3226  if (!CondConstant)
3227  return false;
3228  } else {
3229  llvm::BasicBlock *ThenBlock = createBasicBlock("omp.precond.then");
3230  ContBlock = createBasicBlock("omp.precond.end");
3231  emitPreCond(*this, S, S.getPreCond(), ThenBlock, ContBlock,
3232  getProfileCount(&S));
3233  EmitBlock(ThenBlock);
3234  incrementProfileCounter(&S);
3235  }
3236 
3237  RunCleanupsScope DoacrossCleanupScope(*this);
3238  bool Ordered = false;
3239  if (const auto *OrderedClause = S.getSingleClause<OMPOrderedClause>()) {
3240  if (OrderedClause->getNumForLoops())
3241  RT.emitDoacrossInit(*this, S, OrderedClause->getLoopNumIterations());
3242  else
3243  Ordered = true;
3244  }
3245 
3246  llvm::DenseSet<const Expr *> EmittedFinals;
3247  emitAlignedClause(*this, S);
3248  bool HasLinears = EmitOMPLinearClauseInit(S);
3249  // Emit helper vars inits.
3250 
3251  std::pair<LValue, LValue> Bounds = CodeGenLoopBounds(*this, S);
3252  LValue LB = Bounds.first;
3253  LValue UB = Bounds.second;
3254  LValue ST =
3255  EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getStrideVariable()));
3256  LValue IL =
3257  EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable()));
3258 
3259  // Emit 'then' code.
3260  {
3261  OMPPrivateScope LoopScope(*this);
3262  if (EmitOMPFirstprivateClause(S, LoopScope) || HasLinears) {
3263  // Emit implicit barrier to synchronize threads and avoid data races on
3264  // initialization of firstprivate variables and post-update of
3265  // lastprivate variables.
3266  CGM.getOpenMPRuntime().emitBarrierCall(
3267  *this, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false,
3268  /*ForceSimpleCall=*/true);
3269  }
3270  EmitOMPPrivateClause(S, LoopScope);
3272  *this, S, EmitLValue(S.getIterationVariable()));
3273  HasLastprivateClause = EmitOMPLastprivateClauseInit(S, LoopScope);
3274  EmitOMPReductionClauseInit(S, LoopScope);
3275  EmitOMPPrivateLoopCounters(S, LoopScope);
3276  EmitOMPLinearClause(S, LoopScope);
3277  (void)LoopScope.Privatize();
3278  if (isOpenMPTargetExecutionDirective(S.getDirectiveKind()))
3279  CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(*this, S);
3280 
3281  // Detect the loop schedule kind and chunk.
3282  const Expr *ChunkExpr = nullptr;
3283  OpenMPScheduleTy ScheduleKind;
3284  if (const auto *C = S.getSingleClause<OMPScheduleClause>()) {
3285  ScheduleKind.Schedule = C->getScheduleKind();
3286  ScheduleKind.M1 = C->getFirstScheduleModifier();
3287  ScheduleKind.M2 = C->getSecondScheduleModifier();
3288  ChunkExpr = C->getChunkSize();
3289  } else {
3290  // Default behaviour for schedule clause.
3291  CGM.getOpenMPRuntime().getDefaultScheduleAndChunk(
3292  *this, S, ScheduleKind.Schedule, ChunkExpr);
3293  }
3294  bool HasChunkSizeOne = false;
3295  llvm::Value *Chunk = nullptr;
3296  if (ChunkExpr) {
3297  Chunk = EmitScalarExpr(ChunkExpr);
3298  Chunk = EmitScalarConversion(Chunk, ChunkExpr->getType(),
3299  S.getIterationVariable()->getType(),
3300  S.getBeginLoc());
3301  Expr::EvalResult Result;
3302  if (ChunkExpr->EvaluateAsInt(Result, getContext())) {
3303  llvm::APSInt EvaluatedChunk = Result.Val.getInt();
3304  HasChunkSizeOne = (EvaluatedChunk.getLimitedValue() == 1);
3305  }
3306  }
3307  const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
3308  const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
3309  // OpenMP 4.5, 2.7.1 Loop Construct, Description.
3310  // If the static schedule kind is specified or if the ordered clause is
3311  // specified, and if no monotonic modifier is specified, the effect will
3312  // be as if the monotonic modifier was specified.
3313  bool StaticChunkedOne =
3314  RT.isStaticChunked(ScheduleKind.Schedule,
3315  /* Chunked */ Chunk != nullptr) &&
3316  HasChunkSizeOne &&
3317  isOpenMPLoopBoundSharingDirective(S.getDirectiveKind());
3318  bool IsMonotonic =
3319  Ordered ||
3320  (ScheduleKind.Schedule == OMPC_SCHEDULE_static &&
3321  !(ScheduleKind.M1 == OMPC_SCHEDULE_MODIFIER_nonmonotonic ||
3322  ScheduleKind.M2 == OMPC_SCHEDULE_MODIFIER_nonmonotonic)) ||
3323  ScheduleKind.M1 == OMPC_SCHEDULE_MODIFIER_monotonic ||
3324  ScheduleKind.M2 == OMPC_SCHEDULE_MODIFIER_monotonic;
3325  if ((RT.isStaticNonchunked(ScheduleKind.Schedule,
3326  /* Chunked */ Chunk != nullptr) ||
3327  StaticChunkedOne) &&
3328  !Ordered) {
3329  JumpDest LoopExit =
3330  getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit"));
3332  *this, S,
3333  [&S](CodeGenFunction &CGF, PrePostActionTy &) {
3334  if (isOpenMPSimdDirective(S.getDirectiveKind())) {
3335  CGF.EmitOMPSimdInit(S);
3336  } else if (const auto *C = S.getSingleClause<OMPOrderClause>()) {
3337  if (C->getKind() == OMPC_ORDER_concurrent)
3338  CGF.LoopStack.setParallel(/*Enable=*/true);
3339  }
3340  },
3341  [IVSize, IVSigned, Ordered, IL, LB, UB, ST, StaticChunkedOne, Chunk,
3342  &S, ScheduleKind, LoopExit,
3343  &LoopScope](CodeGenFunction &CGF, PrePostActionTy &) {
3344  // OpenMP [2.7.1, Loop Construct, Description, table 2-1]
3345  // When no chunk_size is specified, the iteration space is divided
3346  // into chunks that are approximately equal in size, and at most
3347  // one chunk is distributed to each thread. Note that the size of
3348  // the chunks is unspecified in this case.
3349  CGOpenMPRuntime::StaticRTInput StaticInit(
3350  IVSize, IVSigned, Ordered, IL.getAddress(CGF),
3351  LB.getAddress(CGF), UB.getAddress(CGF), ST.getAddress(CGF),
3352  StaticChunkedOne ? Chunk : nullptr);
3353  CGF.CGM.getOpenMPRuntime().emitForStaticInit(
3354  CGF, S.getBeginLoc(), S.getDirectiveKind(), ScheduleKind,
3355  StaticInit);
3356  // UB = min(UB, GlobalUB);
3357  if (!StaticChunkedOne)
3358  CGF.EmitIgnoredExpr(S.getEnsureUpperBound());
3359  // IV = LB;
3360  CGF.EmitIgnoredExpr(S.getInit());
3361  // For unchunked static schedule generate:
3362  //
3363  // while (idx <= UB) {
3364  // BODY;
3365  // ++idx;
3366  // }
3367  //
3368  // For static schedule with chunk one:
3369  //
3370  // while (IV <= PrevUB) {
3371  // BODY;
3372  // IV += ST;
3373  // }
3374  CGF.EmitOMPInnerLoop(
3375  S, LoopScope.requiresCleanups(),
3376  StaticChunkedOne ? S.getCombinedParForInDistCond()
3377  : S.getCond(),
3378  StaticChunkedOne ? S.getDistInc() : S.getInc(),
3379  [&S, LoopExit](CodeGenFunction &CGF) {
3380  emitOMPLoopBodyWithStopPoint(CGF, S, LoopExit);
3381  },
3382  [](CodeGenFunction &) {});
3383  });
3384  EmitBlock(LoopExit.getBlock());
3385  // Tell the runtime we are done.
3386  auto &&CodeGen = [&S](CodeGenFunction &CGF) {
3387  CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getEndLoc(),
3388  S.getDirectiveKind());
3389  };
3390  OMPCancelStack.emitExit(*this, S.getDirectiveKind(), CodeGen);
3391  } else {
3392  // Emit the outer loop, which requests its work chunk [LB..UB] from
3393  // runtime and runs the inner loop to process it.
3394  const OMPLoopArguments LoopArguments(
3395  LB.getAddress(*this), UB.getAddress(*this), ST.getAddress(*this),
3396  IL.getAddress(*this), Chunk, EUB);
3397  EmitOMPForOuterLoop(ScheduleKind, IsMonotonic, S, LoopScope, Ordered,
3398  LoopArguments, CGDispatchBounds);
3399  }
3400  if (isOpenMPSimdDirective(S.getDirectiveKind())) {
3401  EmitOMPSimdFinal(S, [IL, &S](CodeGenFunction &CGF) {
3402  return CGF.Builder.CreateIsNotNull(
3403  CGF.EmitLoadOfScalar(IL, S.getBeginLoc()));
3404  });
3405  }
3406  EmitOMPReductionClauseFinal(
3407  S, /*ReductionKind=*/isOpenMPSimdDirective(S.getDirectiveKind())
3408  ? /*Parallel and Simd*/ OMPD_parallel_for_simd
3409  : /*Parallel only*/ OMPD_parallel);
3410  // Emit post-update of the reduction variables if IsLastIter != 0.
3412  *this, S, [IL, &S](CodeGenFunction &CGF) {
3413  return CGF.Builder.CreateIsNotNull(
3414  CGF.EmitLoadOfScalar(IL, S.getBeginLoc()));
3415  });
3416  // Emit final copy of the lastprivate variables if IsLastIter != 0.
3417  if (HasLastprivateClause)
3418  EmitOMPLastprivateClauseFinal(
3419  S, isOpenMPSimdDirective(S.getDirectiveKind()),
3420  Builder.CreateIsNotNull(EmitLoadOfScalar(IL, S.getBeginLoc())));
3421  }
3422  EmitOMPLinearClauseFinal(S, [IL, &S](CodeGenFunction &CGF) {
3423  return CGF.Builder.CreateIsNotNull(
3424  CGF.EmitLoadOfScalar(IL, S.getBeginLoc()));
3425  });
3426  DoacrossCleanupScope.ForceCleanup();
3427  // We're now done with the loop, so jump to the continuation block.
3428  if (ContBlock) {
3429  EmitBranch(ContBlock);
3430  EmitBlock(ContBlock, /*IsFinished=*/true);
3431  }
3432  }
3433  return HasLastprivateClause;
3434 }
3435 
3436 /// The following two functions generate expressions for the loop lower
3437 /// and upper bounds in case of static and dynamic (dispatch) schedule
3438 /// of the associated 'for' or 'distribute' loop.
3439 static std::pair<LValue, LValue>
3441  const auto &LS = cast<OMPLoopDirective>(S);
3442  LValue LB =
3443  EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getLowerBoundVariable()));
3444  LValue UB =
3445  EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getUpperBoundVariable()));
3446  return {LB, UB};
3447 }
3448 
3449 /// When dealing with dispatch schedules (e.g. dynamic, guided) we do not
3450 /// consider the lower and upper bound expressions generated by the
3451 /// worksharing loop support, but we use 0 and the iteration space size as
3452 /// constants
3453 static std::pair<llvm::Value *, llvm::Value *>
3455  Address LB, Address UB) {
3456  const auto &LS = cast<OMPLoopDirective>(S);
3457  const Expr *IVExpr = LS.getIterationVariable();
3458  const unsigned IVSize = CGF.getContext().getTypeSize(IVExpr->getType());
3459  llvm::Value *LBVal = CGF.Builder.getIntN(IVSize, 0);
3460  llvm::Value *UBVal = CGF.EmitScalarExpr(LS.getLastIteration());
3461  return {LBVal, UBVal};
3462 }
3463 
3464 /// Emits internal temp array declarations for the directive with inscan
3465 /// reductions.
3466 /// The code is the following:
3467 /// \code
3468 /// size num_iters = <num_iters>;
3469 /// <type> buffer[num_iters];
3470 /// \endcode
3472  CodeGenFunction &CGF, const OMPLoopDirective &S,
3473  llvm::function_ref<llvm::Value *(CodeGenFunction &)> NumIteratorsGen) {
3474  llvm::Value *OMPScanNumIterations = CGF.Builder.CreateIntCast(
3475  NumIteratorsGen(CGF), CGF.SizeTy, /*isSigned=*/false);
3478  SmallVector<const Expr *, 4> ReductionOps;
3479  SmallVector<const Expr *, 4> CopyArrayTemps;
3480  for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) {
3481  assert(C->getModifier() == OMPC_REDUCTION_inscan &&
3482  "Only inscan reductions are expected.");
3483  Shareds.append(C->varlist_begin(), C->varlist_end());
3484  Privates.append(C->privates().begin(), C->privates().end());
3485  ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end());
3486  CopyArrayTemps.append(C->copy_array_temps().begin(),
3487  C->copy_array_temps().end());
3488  }
3489  {
3490  // Emit buffers for each reduction variables.
3491  // ReductionCodeGen is required to emit correctly the code for array
3492  // reductions.
3493  ReductionCodeGen RedCG(Shareds, Shareds, Privates, ReductionOps);
3494  unsigned Count = 0;
3495  auto *ITA = CopyArrayTemps.begin();
3496  for (const Expr *IRef : Privates) {
3497  const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(IRef)->getDecl());
3498  // Emit variably modified arrays, used for arrays/array sections
3499  // reductions.
3500  if (PrivateVD->getType()->isVariablyModifiedType()) {
3501  RedCG.emitSharedOrigLValue(CGF, Count);
3502  RedCG.emitAggregateType(CGF, Count);
3503  }
3505  CGF,
3506  cast<OpaqueValueExpr>(
3507  cast<VariableArrayType>((*ITA)->getType()->getAsArrayTypeUnsafe())
3508  ->getSizeExpr()),
3509  RValue::get(OMPScanNumIterations));
3510  // Emit temp buffer.
3511  CGF.EmitVarDecl(*cast<VarDecl>(cast<DeclRefExpr>(*ITA)->getDecl()));
3512  ++ITA;
3513  ++Count;
3514  }
3515  }
3516 }
3517 
3518 /// Copies final inscan reductions values to the original variables.
3519 /// The code is the following:
3520 /// \code
3521 /// <orig_var> = buffer[num_iters-1];
3522 /// \endcode
3524  CodeGenFunction &CGF, const OMPLoopDirective &S,
3525  llvm::function_ref<llvm::Value *(CodeGenFunction &)> NumIteratorsGen) {
3526  llvm::Value *OMPScanNumIterations = CGF.Builder.CreateIntCast(
3527  NumIteratorsGen(CGF), CGF.SizeTy, /*isSigned=*/false);
3533  SmallVector<const Expr *, 4> CopyArrayElems;
3534  for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) {
3535  assert(C->getModifier() == OMPC_REDUCTION_inscan &&
3536  "Only inscan reductions are expected.");
3537  Shareds.append(C->varlist_begin(), C->varlist_end());
3538  LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
3539  RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
3540  Privates.append(C->privates().begin(), C->privates().end());
3541  CopyOps.append(C->copy_ops().begin(), C->copy_ops().end());
3542  CopyArrayElems.append(C->copy_array_elems().begin(),
3543  C->copy_array_elems().end());
3544  }
3545  // Create temp var and copy LHS value to this temp value.
3546  // LHS = TMP[LastIter];
3547  llvm::Value *OMPLast = CGF.Builder.CreateNSWSub(
3548  OMPScanNumIterations,
3549  llvm::ConstantInt::get(CGF.SizeTy, 1, /*isSigned=*/false));
3550  for (unsigned I = 0, E = CopyArrayElems.size(); I < E; ++I) {
3551  const Expr *PrivateExpr = Privates[I];
3552  const Expr *OrigExpr = Shareds[I];
3553  const Expr *CopyArrayElem = CopyArrayElems[I];
3555  CGF,
3556  cast<OpaqueValueExpr>(
3557  cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()),
3558  RValue::get(OMPLast));
3559  LValue DestLVal = CGF.EmitLValue(OrigExpr);
3560  LValue SrcLVal = CGF.EmitLValue(CopyArrayElem);
3561  CGF.EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(CGF),
3562  SrcLVal.getAddress(CGF),
3563  cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()),
3564  cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()),
3565  CopyOps[I]);
3566  }
3567 }
3568 
3569 /// Emits the code for the directive with inscan reductions.
3570 /// The code is the following:
3571 /// \code
3572 /// #pragma omp ...
3573 /// for (i: 0..<num_iters>) {
3574 /// <input phase>;
3575 /// buffer[i] = red;
3576 /// }
3577 /// #pragma omp master // in parallel region
3578 /// for (int k = 0; k != ceil(log2(num_iters)); ++k)
3579 /// for (size cnt = last_iter; cnt >= pow(2, k); --k)
3580 /// buffer[i] op= buffer[i-pow(2,k)];
3581 /// #pragma omp barrier // in parallel region
3582 /// #pragma omp ...
3583 /// for (0..<num_iters>) {
3584 /// red = InclusiveScan ? buffer[i] : buffer[i-1];
3585 /// <scan phase>;
3586 /// }
3587 /// \endcode
3589  CodeGenFunction &CGF, const OMPLoopDirective &S,
3590  llvm::function_ref<llvm::Value *(CodeGenFunction &)> NumIteratorsGen,
3591  llvm::function_ref<void(CodeGenFunction &)> FirstGen,
3592  llvm::function_ref<void(CodeGenFunction &)> SecondGen) {
3593  llvm::Value *OMPScanNumIterations = CGF.Builder.CreateIntCast(
3594  NumIteratorsGen(CGF), CGF.SizeTy, /*isSigned=*/false);
3596  SmallVector<const Expr *, 4> ReductionOps;
3599  SmallVector<const Expr *, 4> CopyArrayElems;
3600  for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) {
3601  assert(C->getModifier() == OMPC_REDUCTION_inscan &&
3602  "Only inscan reductions are expected.");
3603  Privates.append(C->privates().begin(), C->privates().end());
3604  ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end());
3605  LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
3606  RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
3607  CopyArrayElems.append(C->copy_array_elems().begin(),
3608  C->copy_array_elems().end());
3609  }
3611  {
3612  // Emit loop with input phase:
3613  // #pragma omp ...
3614  // for (i: 0..<num_iters>) {
3615  // <input phase>;
3616  // buffer[i] = red;
3617  // }
3618  CGF.OMPFirstScanLoop = true;
3620  FirstGen(CGF);
3621  }
3622  // #pragma omp barrier // in parallel region
3623  auto &&CodeGen = [&S, OMPScanNumIterations, &LHSs, &RHSs, &CopyArrayElems,
3624  &ReductionOps,
3625  &Privates](CodeGenFunction &CGF, PrePostActionTy &Action) {
3626  Action.Enter(CGF);
3627  // Emit prefix reduction:
3628  // #pragma omp master // in parallel region
3629  // for (int k = 0; k <= ceil(log2(n)); ++k)
3630  llvm::BasicBlock *InputBB = CGF.Builder.GetInsertBlock();
3631  llvm::BasicBlock *LoopBB = CGF.createBasicBlock("omp.outer.log.scan.body");
3632  llvm::BasicBlock *ExitBB = CGF.createBasicBlock("omp.outer.log.scan.exit");
3633  llvm::Function *F =
3634  CGF.CGM.getIntrinsic(llvm::Intrinsic::log2, CGF.DoubleTy);
3635  llvm::Value *Arg =
3636  CGF.Builder.CreateUIToFP(OMPScanNumIterations, CGF.DoubleTy);
3637  llvm::Value *LogVal = CGF.EmitNounwindRuntimeCall(F, Arg);
3638  F = CGF.CGM.getIntrinsic(llvm::Intrinsic::ceil, CGF.DoubleTy);
3639  LogVal = CGF.EmitNounwindRuntimeCall(F, LogVal);
3640  LogVal = CGF.Builder.CreateFPToUI(LogVal, CGF.IntTy);
3641  llvm::Value *NMin1 = CGF.Builder.CreateNUWSub(
3642  OMPScanNumIterations, llvm::ConstantInt::get(CGF.SizeTy, 1));
3643  auto DL = ApplyDebugLocation::CreateDefaultArtificial(CGF, S.getBeginLoc());
3644  CGF.EmitBlock(LoopBB);
3645  auto *Counter = CGF.Builder.CreatePHI(CGF.IntTy, 2);
3646  // size pow2k = 1;
3647  auto *Pow2K = CGF.Builder.CreatePHI(CGF.SizeTy, 2);
3648  Counter->addIncoming(llvm::ConstantInt::get(CGF.IntTy, 0), InputBB);
3649  Pow2K->addIncoming(llvm::ConstantInt::get(CGF.SizeTy, 1), InputBB);
3650  // for (size i = n - 1; i >= 2 ^ k; --i)
3651  // tmp[i] op= tmp[i-pow2k];
3652  llvm::BasicBlock *InnerLoopBB =
3653  CGF.createBasicBlock("omp.inner.log.scan.body");
3654  llvm::BasicBlock *InnerExitBB =
3655  CGF.createBasicBlock("omp.inner.log.scan.exit");
3656  llvm::Value *CmpI = CGF.Builder.CreateICmpUGE(NMin1, Pow2K);
3657  CGF.Builder.CreateCondBr(CmpI, InnerLoopBB, InnerExitBB);
3658  CGF.EmitBlock(InnerLoopBB);
3659  auto *IVal = CGF.Builder.CreatePHI(CGF.SizeTy, 2);
3660  IVal->addIncoming(NMin1, LoopBB);
3661  {
3662  CodeGenFunction::OMPPrivateScope PrivScope(CGF);
3663  auto *ILHS = LHSs.begin();
3664  auto *IRHS = RHSs.begin();
3665  for (const Expr *CopyArrayElem : CopyArrayElems) {
3666  const auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
3667  const auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
3668  Address LHSAddr = Address::invalid();
3669  {
3671  CGF,
3672  cast<OpaqueValueExpr>(
3673  cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()),
3674  RValue::get(IVal));
3675  LHSAddr = CGF.EmitLValue(CopyArrayElem).getAddress(CGF);
3676  }
3677  PrivScope.addPrivate(LHSVD, LHSAddr);
3678  Address RHSAddr = Address::invalid();
3679  {
3680  llvm::Value *OffsetIVal = CGF.Builder.CreateNUWSub(IVal, Pow2K);
3682  CGF,
3683  cast<OpaqueValueExpr>(
3684  cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()),
3685  RValue::get(OffsetIVal));
3686  RHSAddr = CGF.EmitLValue(CopyArrayElem).getAddress(CGF);
3687  }
3688  PrivScope.addPrivate(RHSVD, RHSAddr);
3689  ++ILHS;
3690  ++IRHS;
3691  }
3692  PrivScope.Privatize();
3693  CGF.CGM.getOpenMPRuntime().emitReduction(
3694  CGF, S.getEndLoc(), Privates, LHSs, RHSs, ReductionOps,
3695  {/*WithNowait=*/true, /*SimpleReduction=*/true, OMPD_unknown});
3696  }
3697  llvm::Value *NextIVal =
3698  CGF.Builder.CreateNUWSub(IVal, llvm::ConstantInt::get(CGF.SizeTy, 1));
3699  IVal->addIncoming(NextIVal, CGF.Builder.GetInsertBlock());
3700  CmpI = CGF.Builder.CreateICmpUGE(NextIVal, Pow2K);
3701  CGF.Builder.CreateCondBr(CmpI, InnerLoopBB, InnerExitBB);
3702  CGF.EmitBlock(InnerExitBB);
3703  llvm::Value *Next =
3704  CGF.Builder.CreateNUWAdd(Counter, llvm::ConstantInt::get(CGF.IntTy, 1));
3705  Counter->addIncoming(Next, CGF.Builder.GetInsertBlock());
3706  // pow2k <<= 1;
3707  llvm::Value *NextPow2K =
3708  CGF.Builder.CreateShl(Pow2K, 1, "", /*HasNUW=*/true);
3709  Pow2K->addIncoming(NextPow2K, CGF.Builder.GetInsertBlock());
3710  llvm::Value *Cmp = CGF.Builder.CreateICmpNE(Next, LogVal);
3711  CGF.Builder.CreateCondBr(Cmp, LoopBB, ExitBB);
3712  auto DL1 = ApplyDebugLocation::CreateDefaultArtificial(CGF, S.getEndLoc());
3713  CGF.EmitBlock(ExitBB);
3714  };
3715  if (isOpenMPParallelDirective(S.getDirectiveKind())) {
3716  CGF.CGM.getOpenMPRuntime().emitMasterRegion(CGF, CodeGen, S.getBeginLoc());
3717  CGF.CGM.getOpenMPRuntime().emitBarrierCall(
3718  CGF, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false,
3719  /*ForceSimpleCall=*/true);
3720  } else {
3721  RegionCodeGenTy RCG(CodeGen);
3722  RCG(CGF);
3723  }
3724 
3725  CGF.OMPFirstScanLoop = false;
3726  SecondGen(CGF);
3727 }
3728 
3730  const OMPLoopDirective &S,
3731  bool HasCancel) {
3732  bool HasLastprivates;
3733  if (llvm::any_of(S.getClausesOfKind<OMPReductionClause>(),
3734  [](const OMPReductionClause *C) {
3735  return C->getModifier() == OMPC_REDUCTION_inscan;
3736  })) {
3737  const auto &&NumIteratorsGen = [&S](CodeGenFunction &CGF) {
3739  OMPLoopScope LoopScope(CGF, S);
3740  return CGF.EmitScalarExpr(S.getNumIterations());
3741  };
3742  const auto &&FirstGen = [&S, HasCancel](CodeGenFunction &CGF) {
3744  CGF, S.getDirectiveKind(), HasCancel);
3745  (void)CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(),
3748  // Emit an implicit barrier at the end.
3749  CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getBeginLoc(),
3750  OMPD_for);
3751  };
3752  const auto &&SecondGen = [&S, HasCancel,
3753  &HasLastprivates](CodeGenFunction &CGF) {
3755  CGF, S.getDirectiveKind(), HasCancel);
3756  HasLastprivates = CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(),
3759  };
3760  if (!isOpenMPParallelDirective(S.getDirectiveKind()))
3761  emitScanBasedDirectiveDecls(CGF, S, NumIteratorsGen);
3762  emitScanBasedDirective(CGF, S, NumIteratorsGen, FirstGen, SecondGen);
3763  if (!isOpenMPParallelDirective(S.getDirectiveKind()))
3764  emitScanBasedDirectiveFinals(CGF, S, NumIteratorsGen);
3765  } else {
3766  CodeGenFunction::OMPCancelStackRAII CancelRegion(CGF, S.getDirectiveKind(),
3767  HasCancel);
3768  HasLastprivates = CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(),
3771  }
3772  return HasLastprivates;
3773 }
3774 
3776  if (S.hasCancel())
3777  return false;
3778  for (OMPClause *C : S.clauses()) {
3779  if (isa<OMPNowaitClause>(C))
3780  continue;
3781 
3782  if (auto *SC = dyn_cast<OMPScheduleClause>(C)) {
3783  if (SC->getFirstScheduleModifier() != OMPC_SCHEDULE_MODIFIER_unknown)
3784  return false;
3785  if (SC->getSecondScheduleModifier() != OMPC_SCHEDULE_MODIFIER_unknown)
3786  return false;
3787  switch (SC->getScheduleKind()) {
3788  case OMPC_SCHEDULE_auto:
3789  case OMPC_SCHEDULE_dynamic:
3790  case OMPC_SCHEDULE_runtime:
3791  case OMPC_SCHEDULE_guided:
3792  case OMPC_SCHEDULE_static:
3793  continue;
3794  case OMPC_SCHEDULE_unknown:
3795  return false;
3796  }
3797  }
3798 
3799  return false;
3800  }
3801 
3802  return true;
3803 }
3804 
3805 static llvm::omp::ScheduleKind
3807  switch (ScheduleClauseKind) {
3808  case OMPC_SCHEDULE_unknown:
3809  return llvm::omp::OMP_SCHEDULE_Default;
3810  case OMPC_SCHEDULE_auto:
3811  return llvm::omp::OMP_SCHEDULE_Auto;
3812  case OMPC_SCHEDULE_dynamic:
3813  return llvm::omp::OMP_SCHEDULE_Dynamic;
3814  case OMPC_SCHEDULE_guided:
3815  return llvm::omp::OMP_SCHEDULE_Guided;
3816  case OMPC_SCHEDULE_runtime:
3817  return llvm::omp::OMP_SCHEDULE_Runtime;
3818  case OMPC_SCHEDULE_static:
3819  return llvm::omp::OMP_SCHEDULE_Static;
3820  }
3821  llvm_unreachable("Unhandled schedule kind");
3822 }
3823 
3825  bool HasLastprivates = false;
3826  bool UseOMPIRBuilder =
3827  CGM.getLangOpts().OpenMPIRBuilder && isSupportedByOpenMPIRBuilder(S);
3828  auto &&CodeGen = [this, &S, &HasLastprivates,
3829  UseOMPIRBuilder](CodeGenFunction &CGF, PrePostActionTy &) {
3830  // Use the OpenMPIRBuilder if enabled.
3831  if (UseOMPIRBuilder) {
3832  bool NeedsBarrier = !S.getSingleClause<OMPNowaitClause>();
3833 
3834  llvm::omp::ScheduleKind SchedKind = llvm::omp::OMP_SCHEDULE_Default;
3835  llvm::Value *ChunkSize = nullptr;
3836  if (auto *SchedClause = S.getSingleClause<OMPScheduleClause>()) {
3837  SchedKind =
3838  convertClauseKindToSchedKind(SchedClause->getScheduleKind());
3839  if (const Expr *ChunkSizeExpr = SchedClause->getChunkSize())
3840  ChunkSize = EmitScalarExpr(ChunkSizeExpr);
3841  }
3842 
3843  // Emit the associated statement and get its loop representation.
3844  const Stmt *Inner = S.getRawStmt();
3845  llvm::CanonicalLoopInfo *CLI =
3846  EmitOMPCollapsedCanonicalLoopNest(Inner, 1);
3847 
3848  llvm::OpenMPIRBuilder &OMPBuilder =
3849  CGM.getOpenMPRuntime().getOMPBuilder();
3850  llvm::OpenMPIRBuilder::InsertPointTy AllocaIP(
3851  AllocaInsertPt->getParent(), AllocaInsertPt->getIterator());
3852  OMPBuilder.applyWorkshareLoop(
3853  Builder.getCurrentDebugLocation(), CLI, AllocaIP, NeedsBarrier,
3854  SchedKind, ChunkSize, /*HasSimdModifier=*/false,
3855  /*HasMonotonicModifier=*/false, /*HasNonmonotonicModifier=*/false,
3856  /*HasOrderedClause=*/false);
3857  return;
3858  }
3859 
3860  HasLastprivates = emitWorksharingDirective(CGF, S, S.hasCancel());
3861  };
3862  {
3863  auto LPCRegion =
3865  OMPLexicalScope Scope(*this, S, OMPD_unknown);
3866  CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_for, CodeGen,
3867  S.hasCancel());
3868  }
3869 
3870  if (!UseOMPIRBuilder) {
3871  // Emit an implicit barrier at the end.
3872  if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates)
3873  CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), OMPD_for);
3874  }
3875  // Check for outer lastprivate conditional update.
3877 }
3878 
3880  bool HasLastprivates = false;
3881  auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF,
3882  PrePostActionTy &) {
3883  HasLastprivates = emitWorksharingDirective(CGF, S, /*HasCancel=*/false);
3884  };
3885  {
3886  auto LPCRegion =
3888  OMPLexicalScope Scope(*this, S, OMPD_unknown);
3889  CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen);
3890  }
3891 
3892  // Emit an implicit barrier at the end.
3893  if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates)
3894  CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), OMPD_for);
3895  // Check for outer lastprivate conditional update.
3897 }
3898 
3900  const Twine &Name,
3901  llvm::Value *Init = nullptr) {
3902  LValue LVal = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty, Name), Ty);
3903  if (Init)
3904  CGF.EmitStoreThroughLValue(RValue::get(Init), LVal, /*isInit*/ true);
3905  return LVal;
3906 }
3907 
3908 void CodeGenFunction::EmitSections(const OMPExecutableDirective &S) {
3909  const Stmt *CapturedStmt = S.getInnermostCapturedStmt()->getCapturedStmt();
3910  const auto *CS = dyn_cast<CompoundStmt>(CapturedStmt);
3911  bool HasLastprivates = false;
3912  auto &&CodeGen = [&S, CapturedStmt, CS,
3913  &HasLastprivates](CodeGenFunction &CGF, PrePostActionTy &) {
3914  const ASTContext &C = CGF.getContext();
3915  QualType KmpInt32Ty =
3916  C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
3917  // Emit helper vars inits.
3918  LValue LB = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.lb.",
3919  CGF.Builder.getInt32(0));
3920  llvm::ConstantInt *GlobalUBVal = CS != nullptr
3921  ? CGF.Builder.getInt32(CS->size() - 1)
3922  : CGF.Builder.getInt32(0);
3923  LValue UB =
3924  createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.ub.", GlobalUBVal);
3925  LValue ST = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.st.",
3926  CGF.Builder.getInt32(1));
3927  LValue IL = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.il.",
3928  CGF.Builder.getInt32(0));
3929  // Loop counter.
3930  LValue IV = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.iv.");
3931  OpaqueValueExpr IVRefExpr(S.getBeginLoc(), KmpInt32Ty, VK_LValue);
3932  CodeGenFunction::OpaqueValueMapping OpaqueIV(CGF, &IVRefExpr, IV);
3933  OpaqueValueExpr UBRefExpr(S.getBeginLoc(), KmpInt32Ty, VK_LValue);
3934  CodeGenFunction::OpaqueValueMapping OpaqueUB(CGF, &UBRefExpr, UB);
3935  // Generate condition for loop.
3937  C, &IVRefExpr, &UBRefExpr, BO_LE, C.BoolTy, VK_PRValue, OK_Ordinary,
3938  S.getBeginLoc(), FPOptionsOverride());
3939  // Increment for loop counter.
3941  C, &IVRefExpr, UO_PreInc, KmpInt32Ty, VK_PRValue, OK_Ordinary,
3942  S.getBeginLoc(), true, FPOptionsOverride());
3943  auto &&BodyGen = [CapturedStmt, CS, &S, &IV](CodeGenFunction &CGF) {
3944  // Iterate through all sections and emit a switch construct:
3945  // switch (IV) {
3946  // case 0:
3947  // <SectionStmt[0]>;
3948  // break;
3949  // ...
3950  // case <NumSection> - 1:
3951  // <SectionStmt[<NumSection> - 1]>;
3952  // break;
3953  // }
3954  // .omp.sections.exit:
3955  llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".omp.sections.exit");
3956  llvm::SwitchInst *SwitchStmt =
3957  CGF.Builder.CreateSwitch(CGF.EmitLoadOfScalar(IV, S.getBeginLoc()),
3958  ExitBB, CS == nullptr ? 1 : CS->size());
3959  if (CS) {
3960  unsigned CaseNumber = 0;
3961  for (const Stmt *SubStmt : CS->children()) {
3962  auto CaseBB = CGF.createBasicBlock(".omp.sections.case");
3963  CGF.EmitBlock(CaseBB);
3964  SwitchStmt->addCase(CGF.Builder.getInt32(CaseNumber), CaseBB);
3965  CGF.EmitStmt(SubStmt);
3966  CGF.EmitBranch(ExitBB);
3967  ++CaseNumber;
3968  }
3969  } else {
3970  llvm::BasicBlock *CaseBB = CGF.createBasicBlock(".omp.sections.case");
3971  CGF.EmitBlock(CaseBB);
3972  SwitchStmt->addCase(CGF.Builder.getInt32(0), CaseBB);
3973  CGF.EmitStmt(CapturedStmt);
3974  CGF.EmitBranch(ExitBB);
3975  }
3976  CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
3977  };
3978 
3979  CodeGenFunction::OMPPrivateScope LoopScope(CGF);
3980  if (CGF.EmitOMPFirstprivateClause(S, LoopScope)) {
3981  // Emit implicit barrier to synchronize threads and avoid data races on
3982  // initialization of firstprivate variables and post-update of lastprivate
3983  // variables.
3984  CGF.CGM.getOpenMPRuntime().emitBarrierCall(
3985  CGF, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false,
3986  /*ForceSimpleCall=*/true);
3987  }
3988  CGF.EmitOMPPrivateClause(S, LoopScope);
3989  CGOpenMPRuntime::LastprivateConditionalRAII LPCRegion(CGF, S, IV);
3990  HasLastprivates = CGF.EmitOMPLastprivateClauseInit(S, LoopScope);
3991  CGF.EmitOMPReductionClauseInit(S, LoopScope);
3992  (void)LoopScope.Privatize();
3993  if (isOpenMPTargetExecutionDirective(S.getDirectiveKind()))
3994  CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S);
3995 
3996  // Emit static non-chunked loop.
3997  OpenMPScheduleTy ScheduleKind;
3998  ScheduleKind.Schedule = OMPC_SCHEDULE_static;
3999  CGOpenMPRuntime::StaticRTInput StaticInit(
4000  /*IVSize=*/32, /*IVSigned=*/true, /*Ordered=*/false, IL.getAddress(CGF),
4001  LB.getAddress(CGF), UB.getAddress(CGF), ST.getAddress(CGF));
4002  CGF.CGM.getOpenMPRuntime().emitForStaticInit(
4003  CGF, S.getBeginLoc(), S.getDirectiveKind(), ScheduleKind, StaticInit);
4004  // UB = min(UB, GlobalUB);
4005  llvm::Value *UBVal = CGF.EmitLoadOfScalar(UB, S.getBeginLoc());
4006  llvm::Value *MinUBGlobalUB = CGF.Builder.CreateSelect(
4007  CGF.Builder.CreateICmpSLT(UBVal, GlobalUBVal), UBVal, GlobalUBVal);
4008  CGF.EmitStoreOfScalar(MinUBGlobalUB, UB);
4009  // IV = LB;
4010  CGF.EmitStoreOfScalar(CGF.EmitLoadOfScalar(LB, S.getBeginLoc()), IV);
4011  // while (idx <= UB) { BODY; ++idx; }
4012  CGF.EmitOMPInnerLoop(S, /*RequiresCleanup=*/false, Cond, Inc, BodyGen,
4013  [](CodeGenFunction &) {});
4014  // Tell the runtime we are done.
4015  auto &&CodeGen = [&S](CodeGenFunction &CGF) {
4016  CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getEndLoc(),
4017  S.getDirectiveKind());
4018  };
4019  CGF.OMPCancelStack.emitExit(CGF, S.getDirectiveKind(), CodeGen);
4020  CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel);
4021  // Emit post-update of the reduction variables if IsLastIter != 0.
4022  emitPostUpdateForReductionClause(CGF, S, [IL, &S](CodeGenFunction &CGF) {
4023  return CGF.Builder.CreateIsNotNull(
4024  CGF.EmitLoadOfScalar(IL, S.getBeginLoc()));
4025  });
4026 
4027  // Emit final copy of the lastprivate variables if IsLastIter != 0.
4028  if (HasLastprivates)
4030  S, /*NoFinals=*/false,
4031  CGF.Builder.CreateIsNotNull(
4032  CGF.EmitLoadOfScalar(IL, S.getBeginLoc())));
4033  };
4034 
4035  bool HasCancel = false;
4036  if (auto *OSD = dyn_cast<OMPSectionsDirective>(&S))
4037  HasCancel = OSD->hasCancel();
4038  else if (auto *OPSD = dyn_cast<OMPParallelSectionsDirective>(&S))
4039  HasCancel = OPSD->hasCancel();
4040  OMPCancelStackRAII CancelRegion(*this, S.getDirectiveKind(), HasCancel);
4041  CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_sections, CodeGen,
4042  HasCancel);
4043  // Emit barrier for lastprivates only if 'sections' directive has 'nowait'
4044  // clause. Otherwise the barrier will be generated by the codegen for the
4045  // directive.
4046  if (HasLastprivates && S.getSingleClause<OMPNowaitClause>()) {
4047  // Emit implicit barrier to synchronize threads and avoid data races on
4048  // initialization of firstprivate variables.
4049  CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(),
4050  OMPD_unknown);
4051  }
4052 }
4053 
4055  if (CGM.getLangOpts().OpenMPIRBuilder) {
4056  llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
4057  using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
4058  using BodyGenCallbackTy = llvm::OpenMPIRBuilder::StorableBodyGenCallbackTy;
4059 
4060  auto FiniCB = [this](InsertPointTy IP) {
4061  OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP);
4062  };
4063 
4064  const CapturedStmt *ICS = S.getInnermostCapturedStmt();
4065  const Stmt *CapturedStmt = S.getInnermostCapturedStmt()->getCapturedStmt();
4066  const auto *CS = dyn_cast<CompoundStmt>(CapturedStmt);
4068  if (CS) {
4069  for (const Stmt *SubStmt : CS->children()) {
4070  auto SectionCB = [this, SubStmt](InsertPointTy AllocaIP,
4071  InsertPointTy CodeGenIP) {
4072  OMPBuilderCBHelpers::EmitOMPInlinedRegionBody(
4073  *this, SubStmt, AllocaIP, CodeGenIP, "section");
4074  };
4075  SectionCBVector.push_back(SectionCB);
4076  }
4077  } else {
4078  auto SectionCB = [this, CapturedStmt](InsertPointTy AllocaIP,
4079  InsertPointTy CodeGenIP) {
4080  OMPBuilderCBHelpers::EmitOMPInlinedRegionBody(
4081  *this, CapturedStmt, AllocaIP, CodeGenIP, "section");
4082  };
4083  SectionCBVector.push_back(SectionCB);
4084  }
4085 
4086  // Privatization callback that performs appropriate action for
4087  // shared/private/firstprivate/lastprivate/copyin/... variables.
4088  //
4089  // TODO: This defaults to shared right now.
4090  auto PrivCB = [](InsertPointTy AllocaIP, InsertPointTy CodeGenIP,
4091  llvm::Value &, llvm::Value &Val, llvm::Value *&ReplVal) {
4092  // The next line is appropriate only for variables (Val) with the
4093  // data-sharing attribute "shared".
4094  ReplVal = &Val;
4095 
4096  return CodeGenIP;
4097  };
4098 
4099  CGCapturedStmtInfo CGSI(*ICS, CR_OpenMP);
4100  CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(*this, &CGSI);
4101  llvm::OpenMPIRBuilder::InsertPointTy AllocaIP(
4102  AllocaInsertPt->getParent(), AllocaInsertPt->getIterator());
4103  Builder.restoreIP(OMPBuilder.createSections(
4104  Builder, AllocaIP, SectionCBVector, PrivCB, FiniCB, S.hasCancel(),
4105  S.getSingleClause<OMPNowaitClause>()));
4106  return;
4107  }
4108  {
4109  auto LPCRegion =
4111  OMPLexicalScope Scope(*this, S, OMPD_unknown);
4112  EmitSections(S);
4113  }
4114  // Emit an implicit barrier at the end.
4115  if (!S.getSingleClause<OMPNowaitClause>()) {
4116  CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(),
4117  OMPD_sections);
4118  }
4119  // Check for outer lastprivate conditional update.
4121 }
4122 
4124  if (CGM.getLangOpts().OpenMPIRBuilder) {
4125  llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
4126  using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
4127 
4128  const Stmt *SectionRegionBodyStmt = S.getAssociatedStmt();
4129  auto FiniCB = [this](InsertPointTy IP) {
4130  OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP);
4131  };
4132 
4133  auto BodyGenCB = [SectionRegionBodyStmt, this](InsertPointTy AllocaIP,
4134  InsertPointTy CodeGenIP) {
4135  OMPBuilderCBHelpers::EmitOMPInlinedRegionBody(
4136  *this, SectionRegionBodyStmt, AllocaIP, CodeGenIP, "section");
4137  };
4138 
4139  LexicalScope Scope(*this, S.getSourceRange());
4140  EmitStopPoint(&S);
4141  Builder.restoreIP(OMPBuilder.createSection(Builder, BodyGenCB, FiniCB));
4142 
4143  return;
4144  }
4145  LexicalScope Scope(*this, S.getSourceRange());
4146  EmitStopPoint(&S);
4147  EmitStmt(S.getAssociatedStmt());
4148 }
4149 
4151  llvm::SmallVector<const Expr *, 8> CopyprivateVars;
4154  llvm::SmallVector<const Expr *, 8> AssignmentOps;
4155  // Check if there are any 'copyprivate' clauses associated with this
4156  // 'single' construct.
4157  // Build a list of copyprivate variables along with helper expressions
4158  // (<source>, <destination>, <destination>=<source> expressions)
4159  for (const auto *C : S.getClausesOfKind<OMPCopyprivateClause>()) {
4160  CopyprivateVars.append(C->varlists().begin(), C->varlists().end());
4161  DestExprs.append(C->destination_exprs().begin(),
4162  C->destination_exprs().end());
4163  SrcExprs.append(C->source_exprs().begin(), C->source_exprs().end());
4164  AssignmentOps.append(C->assignment_ops().begin(),
4165  C->assignment_ops().end());
4166  }
4167  // Emit code for 'single' region along with 'copyprivate' clauses
4168  auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4169  Action.Enter(CGF);
4170  OMPPrivateScope SingleScope(CGF);
4171  (void)CGF.EmitOMPFirstprivateClause(S, SingleScope);
4172  CGF.EmitOMPPrivateClause(S, SingleScope);
4173  (void)SingleScope.Privatize();
4174  CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt());
4175  };
4176  {
4177  auto LPCRegion =
4179  OMPLexicalScope Scope(*this, S, OMPD_unknown);
4180  CGM.getOpenMPRuntime().emitSingleRegion(*this, CodeGen, S.getBeginLoc(),
4181  CopyprivateVars, DestExprs,
4182  SrcExprs, AssignmentOps);
4183  }
4184  // Emit an implicit barrier at the end (to avoid data race on firstprivate
4185  // init or if no 'nowait' clause was specified and no 'copyprivate' clause).
4186  if (!S.getSingleClause<OMPNowaitClause>() && CopyprivateVars.empty()) {
4187  CGM.getOpenMPRuntime().emitBarrierCall(
4188  *this, S.getBeginLoc(),
4189  S.getSingleClause<OMPNowaitClause>() ? OMPD_unknown : OMPD_single);
4190  }
4191  // Check for outer lastprivate conditional update.
4193 }
4194 
4196  auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4197  Action.Enter(CGF);
4198  CGF.EmitStmt(S.getRawStmt());
4199  };
4200  CGF.CGM.getOpenMPRuntime().emitMasterRegion(CGF, CodeGen, S.getBeginLoc());
4201 }
4202 
4204  if (CGM.getLangOpts().OpenMPIRBuilder) {
4205  llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
4206  using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
4207 
4208  const Stmt *MasterRegionBodyStmt = S.getAssociatedStmt();
4209 
4210  auto FiniCB = [this](InsertPointTy IP) {
4211  OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP);
4212  };
4213 
4214  auto BodyGenCB = [MasterRegionBodyStmt, this](InsertPointTy AllocaIP,
4215  InsertPointTy CodeGenIP) {
4216  OMPBuilderCBHelpers::EmitOMPInlinedRegionBody(
4217  *this, MasterRegionBodyStmt, AllocaIP, CodeGenIP, "master");
4218  };
4219 
4220  LexicalScope Scope(*this, S.getSourceRange());
4221  EmitStopPoint(&S);
4222  Builder.restoreIP(OMPBuilder.createMaster(Builder, BodyGenCB, FiniCB));
4223 
4224  return;
4225  }
4226  LexicalScope Scope(*this, S.getSourceRange());
4227  EmitStopPoint(&S);
4228  emitMaster(*this, S);
4229 }
4230 
4232  auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4233  Action.Enter(CGF);
4234  CGF.EmitStmt(S.getRawStmt());
4235  };
4236  Expr *Filter = nullptr;
4237  if (const auto *FilterClause = S.getSingleClause<OMPFilterClause>())
4238  Filter = FilterClause->getThreadID();
4239  CGF.CGM.getOpenMPRuntime().emitMaskedRegion(CGF, CodeGen, S.getBeginLoc(),
4240  Filter);
4241 }
4242 
4244  if (CGM.getLangOpts().OpenMPIRBuilder) {
4245  llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
4246  using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
4247 
4248  const Stmt *MaskedRegionBodyStmt = S.getAssociatedStmt();
4249  const Expr *Filter = nullptr;
4250  if (const auto *FilterClause = S.getSingleClause<OMPFilterClause>())
4251  Filter = FilterClause->getThreadID();
4252  llvm::Value *FilterVal = Filter
4253  ? EmitScalarExpr(Filter, CGM.Int32Ty)
4254  : llvm::ConstantInt::get(CGM.Int32Ty, /*V=*/0);
4255 
4256  auto FiniCB = [this](InsertPointTy IP) {
4257  OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP);
4258  };
4259 
4260  auto BodyGenCB = [MaskedRegionBodyStmt, this](InsertPointTy AllocaIP,
4261  InsertPointTy CodeGenIP) {
4262  OMPBuilderCBHelpers::EmitOMPInlinedRegionBody(
4263  *this, MaskedRegionBodyStmt, AllocaIP, CodeGenIP, "masked");
4264  };
4265 
4266  LexicalScope Scope(*this, S.getSourceRange());
4267  EmitStopPoint(&S);
4268  Builder.restoreIP(
4269  OMPBuilder.createMasked(Builder, BodyGenCB, FiniCB, FilterVal));
4270 
4271  return;
4272  }
4273  LexicalScope Scope(*this, S.getSourceRange());
4274  EmitStopPoint(&S);
4275  emitMasked(*this, S);
4276 }
4277 
4279  if (CGM.getLangOpts().OpenMPIRBuilder) {
4280  llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
4281  using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
4282 
4283  const Stmt *CriticalRegionBodyStmt = S.getAssociatedStmt();
4284  const Expr *Hint = nullptr;
4285  if (const auto *HintClause = S.getSingleClause<OMPHintClause>())
4286  Hint = HintClause->getHint();
4287 
4288  // TODO: This is slightly different from what's currently being done in
4289  // clang. Fix the Int32Ty to IntPtrTy (pointer width size) when everything
4290  // about typing is final.
4291  llvm::Value *HintInst = nullptr;
4292  if (Hint)
4293  HintInst =
4294  Builder.CreateIntCast(EmitScalarExpr(Hint), CGM.Int32Ty, false);
4295 
4296  auto FiniCB = [this](InsertPointTy IP) {
4297  OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP);
4298  };
4299 
4300  auto BodyGenCB = [CriticalRegionBodyStmt, this](InsertPointTy AllocaIP,
4301  InsertPointTy CodeGenIP) {
4302  OMPBuilderCBHelpers::EmitOMPInlinedRegionBody(
4303  *this, CriticalRegionBodyStmt, AllocaIP, CodeGenIP, "critical");
4304  };
4305 
4306  LexicalScope Scope(*this, S.getSourceRange());
4307  EmitStopPoint(&S);
4308  Builder.restoreIP(OMPBuilder.createCritical(
4309  Builder, BodyGenCB, FiniCB, S.getDirectiveName().getAsString(),
4310  HintInst));
4311 
4312  return;
4313  }
4314 
4315  auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4316  Action.Enter(CGF);
4317  CGF.EmitStmt(S.getAssociatedStmt());
4318  };
4319  const Expr *Hint = nullptr;
4320  if (const auto *HintClause = S.getSingleClause<OMPHintClause>())
4321  Hint = HintClause->getHint();
4322  LexicalScope Scope(*this, S.getSourceRange());
4323  EmitStopPoint(&S);
4324  CGM.getOpenMPRuntime().emitCriticalRegion(*this,
4325  S.getDirectiveName().getAsString(),
4326  CodeGen, S.getBeginLoc(), Hint);
4327 }
4328 
4330  const OMPParallelForDirective &S) {
4331  // Emit directive as a combined directive that consists of two implicit
4332  // directives: 'parallel' with 'for' directive.
4333  auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4334  Action.Enter(CGF);
4335  (void)emitWorksharingDirective(CGF, S, S.hasCancel());
4336  };
4337  {
4338  const auto &&NumIteratorsGen = [&S](CodeGenFunction &CGF) {
4341  CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGSI);
4342  OMPLoopScope LoopScope(CGF, S);
4343  return CGF.EmitScalarExpr(S.getNumIterations());
4344  };
4345  bool IsInscan = llvm::any_of(S.getClausesOfKind<OMPReductionClause>(),
4346  [](const OMPReductionClause *C) {
4347  return C->getModifier() == OMPC_REDUCTION_inscan;
4348  });
4349  if (IsInscan)
4350  emitScanBasedDirectiveDecls(*this, S, NumIteratorsGen);
4351  auto LPCRegion =
4353  emitCommonOMPParallelDirective(*this, S, OMPD_for, CodeGen,
4355  if (IsInscan)
4356  emitScanBasedDirectiveFinals(*this, S, NumIteratorsGen);
4357  }
4358  // Check for outer lastprivate conditional update.
4360 }
4361 
4363  const OMPParallelForSimdDirective &S) {
4364  // Emit directive as a combined directive that consists of two implicit
4365  // directives: 'parallel' with 'for' directive.
4366  auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4367  Action.Enter(CGF);
4368  (void)emitWorksharingDirective(CGF, S, /*HasCancel=*/false);
4369  };
4370  {
4371  const auto &&NumIteratorsGen = [&S](CodeGenFunction &CGF) {
4374  CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGSI);
4375  OMPLoopScope LoopScope(CGF, S);
4376  return CGF.EmitScalarExpr(S.getNumIterations());
4377  };
4378  bool IsInscan = llvm::any_of(S.getClausesOfKind<OMPReductionClause>(),
4379  [](const OMPReductionClause *C) {
4380  return C->getModifier() == OMPC_REDUCTION_inscan;
4381  });
4382  if (IsInscan)
4383  emitScanBasedDirectiveDecls(*this, S, NumIteratorsGen);
4384  auto LPCRegion =
4386  emitCommonOMPParallelDirective(*this, S, OMPD_for_simd, CodeGen,
4388  if (IsInscan)
4389  emitScanBasedDirectiveFinals(*this, S, NumIteratorsGen);
4390  }
4391  // Check for outer lastprivate conditional update.
4393 }
4394 
4396  const OMPParallelMasterDirective &S) {
4397  // Emit directive as a combined directive that consists of two implicit
4398  // directives: 'parallel' with 'master' directive.
4399  auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4400  Action.Enter(CGF);
4401  OMPPrivateScope PrivateScope(CGF);
4402  bool Copyins = CGF.EmitOMPCopyinClause(S);
4403  (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope);
4404  if (Copyins) {
4405  // Emit implicit barrier to synchronize threads and avoid data races on
4406  // propagation master's thread values of threadprivate variables to local
4407  // instances of that variables of all other implicit threads.
4409  CGF, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false,
4410  /*ForceSimpleCall=*/true);
4411  }
4412  CGF.EmitOMPPrivateClause(S, PrivateScope);
4413  CGF.EmitOMPReductionClauseInit(S, PrivateScope);
4414  (void)PrivateScope.Privatize();
4415  emitMaster(CGF, S);
4416  CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel);
4417  };
4418  {
4419  auto LPCRegion =
4421  emitCommonOMPParallelDirective(*this, S, OMPD_master, CodeGen,
4424  [](CodeGenFunction &) { return nullptr; });
4425  }
4426  // Check for outer lastprivate conditional update.
4428 }
4429 
4431  const OMPParallelSectionsDirective &S) {
4432  // Emit directive as a combined directive that consists of two implicit
4433  // directives: 'parallel' with 'sections' directive.
4434  auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4435  Action.Enter(CGF);
4436  CGF.EmitSections(S);
4437  };
4438  {
4439  auto LPCRegion =
4441  emitCommonOMPParallelDirective(*this, S, OMPD_sections, CodeGen,
4443  }
4444  // Check for outer lastprivate conditional update.
4446 }
4447 
4448 namespace {
4449 /// Get the list of variables declared in the context of the untied tasks.
4450 class CheckVarsEscapingUntiedTaskDeclContext final
4451  : public ConstStmtVisitor<CheckVarsEscapingUntiedTaskDeclContext> {
4453 
4454 public:
4455  explicit CheckVarsEscapingUntiedTaskDeclContext() = default;
4456  virtual ~CheckVarsEscapingUntiedTaskDeclContext() = default;
4457  void VisitDeclStmt(const DeclStmt *S) {
4458  if (!S)
4459  return;
4460  // Need to privatize only local vars, static locals can be processed as is.
4461  for (const Decl *D : S->decls()) {
4462  if (const auto *VD = dyn_cast_or_null<VarDecl>(D))
4463  if (VD->hasLocalStorage())
4464  PrivateDecls.push_back(VD);
4465  }
4466  }
4467  void VisitOMPExecutableDirective(const OMPExecutableDirective *) {}
4468  void VisitCapturedStmt(const CapturedStmt *) {}
4469  void VisitLambdaExpr(const LambdaExpr *) {}
4470  void VisitBlockExpr(const BlockExpr *) {}
4471  void VisitStmt(const Stmt *S) {
4472  if (!S)
4473  return;
4474  for (const Stmt *Child : S->children())
4475  if (Child)
4476  Visit(Child);
4477  }
4478 
4479  /// Swaps list of vars with the provided one.
4480  ArrayRef<const VarDecl *> getPrivateDecls() const { return PrivateDecls; }
4481 };
4482 } // anonymous namespace
4483 
4485  const OMPExecutableDirective &S, const OpenMPDirectiveKind CapturedRegion,
4486  const RegionCodeGenTy &BodyGen, const TaskGenTy &TaskGen,
4487  OMPTaskDataTy &Data) {
4488  // Emit outlined function for task construct.
4489  const CapturedStmt *CS = S.getCapturedStmt(CapturedRegion);
4490  auto I = CS->getCapturedDecl()->param_begin();
4491  auto PartId = std::next(I);
4492  auto TaskT = std::next(I, 4);
4493  // Check if the task is final
4494  if (const auto *Clause = S.getSingleClause<OMPFinalClause>()) {
4495  // If the condition constant folds and can be elided, try to avoid emitting
4496  // the condition and the dead arm of the if/else.
4497  const Expr *Cond = Clause->getCondition();
4498  bool CondConstant;
4499  if (ConstantFoldsToSimpleInteger(Cond, CondConstant))
4500  Data.Final.setInt(CondConstant);
4501  else
4502  Data.Final.setPointer(EvaluateExprAsBool(Cond));
4503  } else {
4504  // By default the task is not final.
4505  Data.Final.setInt(/*IntVal=*/false);
4506  }
4507  // Check if the task has 'priority' clause.
4508  if (const auto *Clause = S.getSingleClause<OMPPriorityClause>()) {
4509  const Expr *Prio = Clause->getPriority();
4510  Data.Priority.setInt(/*IntVal=*/true);
4511  Data.Priority.setPointer(EmitScalarConversion(
4512  EmitScalarExpr(Prio), Prio->getType(),
4513  getContext().getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1),
4514  Prio->getExprLoc()));
4515  }
4516  // The first function argument for tasks is a thread id, the second one is a
4517  // part id (0 for tied tasks, >=0 for untied task).
4518  llvm::DenseSet<const VarDecl *> EmittedAsPrivate;
4519  // Get list of private variables.
4520  for (const auto *C : S.getClausesOfKind<OMPPrivateClause>()) {
4521  auto IRef = C->varlist_begin();
4522  for (const Expr *IInit : C->private_copies()) {
4523  const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
4524  if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
4525  Data.PrivateVars.push_back(*IRef);
4526  Data.PrivateCopies.push_back(IInit);
4527  }
4528  ++IRef;
4529  }
4530  }
4531  EmittedAsPrivate.clear();
4532  // Get list of firstprivate variables.
4533  for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) {
4534  auto IRef = C->varlist_begin();
4535  auto IElemInitRef = C->inits().begin();
4536  for (const Expr *IInit : C->private_copies()) {
4537  const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
4538  if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
4539  Data.FirstprivateVars.push_back(*IRef);
4540  Data.FirstprivateCopies.push_back(IInit);
4541  Data.FirstprivateInits.push_back(*IElemInitRef);
4542  }
4543  ++IRef;
4544  ++IElemInitRef;
4545  }
4546  }
4547  // Get list of lastprivate variables (for taskloops).
4548  llvm::MapVector<const VarDecl *, const DeclRefExpr *> LastprivateDstsOrigs;
4549  for (const auto *C : S.getClausesOfKind<OMPLastprivateClause>()) {
4550  auto IRef = C->varlist_begin();
4551  auto ID = C->destination_exprs().begin();
4552  for (const Expr *IInit : C->private_copies()) {
4553  const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
4554  if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
4555  Data.LastprivateVars.push_back(*IRef);
4556  Data.LastprivateCopies.push_back(IInit);
4557  }
4558  LastprivateDstsOrigs.insert(
4559  std::make_pair(cast<VarDecl>(cast<DeclRefExpr>(*ID)->getDecl()),
4560  cast<DeclRefExpr>(*IRef)));
4561  ++IRef;
4562  ++ID;
4563  }
4564  }
4567  for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) {
4568  Data.ReductionVars.append(C->varlist_begin(), C->varlist_end());
4569  Data.ReductionOrigs.append(C->varlist_begin(), C->varlist_end());
4570  Data.ReductionCopies.append(C->privates().begin(), C->privates().end());
4571  Data.ReductionOps.append(C->reduction_ops().begin(),
4572  C->reduction_ops().end());
4573  LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
4574  RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
4575  }
4576  Data.Reductions = CGM.getOpenMPRuntime().emitTaskReductionInit(
4577  *this, S.getBeginLoc(), LHSs, RHSs, Data);
4578  // Build list of dependences.
4579  for (const auto *C : S.getClausesOfKind<OMPDependClause>()) {
4581  Data.Dependences.emplace_back(C->getDependencyKind(), C->getModifier());
4582  DD.DepExprs.append(C->varlist_begin(), C->varlist_end());
4583  }
4584  // Get list of local vars for untied tasks.
4585  if (!Data.Tied) {
4586  CheckVarsEscapingUntiedTaskDeclContext Checker;
4587  Checker.Visit(S.getInnermostCapturedStmt()->getCapturedStmt());
4588  Data.PrivateLocals.append(Checker.getPrivateDecls().begin(),
4589  Checker.getPrivateDecls().end());
4590  }
4591  auto &&CodeGen = [&Data, &S, CS, &BodyGen, &LastprivateDstsOrigs,
4592  CapturedRegion](CodeGenFunction &CGF,
4593  PrePostActionTy &Action) {
4594  llvm::MapVector<CanonicalDeclPtr<const VarDecl>,
4595  std::pair<Address, Address>>
4596  UntiedLocalVars;
4597  // Set proper addresses for generated private copies.
4598  OMPPrivateScope Scope(CGF);
4599  // Generate debug info for variables present in shared clause.
4600  if (auto *DI = CGF.getDebugInfo()) {
4601  llvm::SmallDenseMap<const VarDecl *, FieldDecl *> CaptureFields =
4602  CGF.CapturedStmtInfo->getCaptureFields();
4603  llvm::Value *ContextValue = CGF.CapturedStmtInfo->getContextValue();
4604  if (CaptureFields.size() && ContextValue) {
4605  unsigned CharWidth = CGF.getContext().getCharWidth();
4606  // The shared variables are packed together as members of structure.
4607  // So the address of each shared variable can be computed by adding
4608  // offset of it (within record) to the base address of record. For each
4609  // shared variable, debug intrinsic llvm.dbg.declare is generated with
4610  // appropriate expressions (DIExpression).
4611  // Ex:
4612  // %12 = load %struct.anon*, %struct.anon** %__context.addr.i
4613  // call void @llvm.dbg.declare(metadata %struct.anon* %12,
4614  // metadata !svar1,
4615  // metadata !DIExpression(DW_OP_deref))
4616  // call void @llvm.dbg.declare(metadata %struct.anon* %12,
4617  // metadata !svar2,
4618  // metadata !DIExpression(DW_OP_plus_uconst, 8, DW_OP_deref))
4619  for (auto It = CaptureFields.begin(); It != CaptureFields.end(); ++It) {
4620  const VarDecl *SharedVar = It->first;
4621  RecordDecl *CaptureRecord = It->second->getParent();
4622  const ASTRecordLayout &Layout =
4623  CGF.getContext().getASTRecordLayout(CaptureRecord);
4624  unsigned Offset =
4625  Layout.getFieldOffset(It->second->getFieldIndex()) / CharWidth;
4626  if (CGF.CGM.getCodeGenOpts().hasReducedDebugInfo())
4627  (void)DI->EmitDeclareOfAutoVariable(SharedVar, ContextValue,
4628  CGF.Builder, false);
4629  llvm::Instruction &Last = CGF.Builder.GetInsertBlock()->back();
4630  // Get the call dbg.declare instruction we just created and update
4631  // its DIExpression to add offset to base address.
4632  if (auto DDI = dyn_cast<llvm::DbgVariableIntrinsic>(&Last)) {
4634  // Add offset to the base address if non zero.
4635  if (Offset) {
4636  Ops.push_back(llvm::dwarf::DW_OP_plus_uconst);
4637  Ops.push_back(Offset);
4638  }
4639  Ops.push_back(llvm::dwarf::DW_OP_deref);
4640  auto &Ctx = DDI->getContext();
4641  llvm::DIExpression *DIExpr = llvm::DIExpression::get(Ctx, Ops);
4642  Last.setOperand(2, llvm::MetadataAsValue::get(Ctx, DIExpr));
4643  }
4644  }
4645  }
4646  }
4648  if (!Data.PrivateVars.empty() || !Data.FirstprivateVars.empty() ||
4649  !Data.LastprivateVars.empty() || !Data.PrivateLocals.empty()) {
4650  enum { PrivatesParam = 2, CopyFnParam = 3 };
4651  llvm::Value *CopyFn = CGF.Builder.CreateLoad(
4652  CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(CopyFnParam)));
4653  llvm::Value *PrivatesPtr = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(
4654  CS->getCapturedDecl()->getParam(PrivatesParam)));
4655  // Map privates.
4659  CallArgs.push_back(PrivatesPtr);
4660  ParamTypes.push_back(PrivatesPtr->getType());
4661  for (const Expr *E : Data.PrivateVars) {
4662  const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
4663  Address PrivatePtr = CGF.CreateMemTemp(
4664  CGF.getContext().getPointerType(E->getType()), ".priv.ptr.addr");
4665  PrivatePtrs.emplace_back(VD, PrivatePtr);
4666  CallArgs.push_back(PrivatePtr.getPointer());
4667  ParamTypes.push_back(PrivatePtr.getType());
4668  }
4669  for (const Expr *E : Data.FirstprivateVars) {
4670  const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
4671  Address PrivatePtr =
4672  CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()),
4673  ".firstpriv.ptr.addr");
4674  PrivatePtrs.emplace_back(VD, PrivatePtr);
4675  FirstprivatePtrs.emplace_back(VD, PrivatePtr);
4676  CallArgs.push_back(PrivatePtr.getPointer());
4677  ParamTypes.push_back(PrivatePtr.getType());
4678  }
4679  for (const Expr *E : Data.LastprivateVars) {
4680  const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
4681  Address PrivatePtr =
4682  CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()),
4683  ".lastpriv.ptr.addr");
4684  PrivatePtrs.emplace_back(VD, PrivatePtr);
4685  CallArgs.push_back(PrivatePtr.getPointer());
4686  ParamTypes.push_back(PrivatePtr.getType());
4687  }
4688  for (const VarDecl *VD : Data.PrivateLocals) {
4689  QualType Ty = VD->getType().getNonReferenceType();
4690  if (VD->getType()->isLValueReferenceType())
4691  Ty = CGF.getContext().getPointerType(Ty);
4692  if (isAllocatableDecl(VD))
4693  Ty = CGF.getContext().getPointerType(Ty);
4694  Address PrivatePtr = CGF.CreateMemTemp(
4695  CGF.getContext().getPointerType(Ty), ".local.ptr.addr");
4696  auto Result = UntiedLocalVars.insert(
4697  std::make_pair(VD, std::make_pair(PrivatePtr, Address::invalid())));
4698  // If key exists update in place.
4699  if (Result.second == false)
4700  *Result.first = std::make_pair(
4701  VD, std::make_pair(PrivatePtr, Address::invalid()));
4702  CallArgs.push_back(PrivatePtr.getPointer());
4703  ParamTypes.push_back(PrivatePtr.getType());
4704  }
4705  auto *CopyFnTy = llvm::FunctionType::get(CGF.Builder.getVoidTy(),
4706  ParamTypes, /*isVarArg=*/false);
4707  CopyFn = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4708  CopyFn, CopyFnTy->getPointerTo());
4709  CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
4710  CGF, S.getBeginLoc(), {CopyFnTy, CopyFn}, CallArgs);
4711  for (const auto &Pair : LastprivateDstsOrigs) {
4712  const auto *OrigVD = cast<VarDecl>(Pair.second->getDecl());
4713  DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(OrigVD),
4714  /*RefersToEnclosingVariableOrCapture=*/
4715  CGF.CapturedStmtInfo->lookup(OrigVD) != nullptr,
4716  Pair.second->getType(), VK_LValue,
4717  Pair.second->getExprLoc());
4718  Scope.addPrivate(Pair.first, CGF.EmitLValue(&DRE).getAddress(CGF));
4719  }
4720  for (const auto &Pair : PrivatePtrs) {
4721  Address Replacement = Address(
4722  CGF.Builder.CreateLoad(Pair.second),
4723  CGF.ConvertTypeForMem(Pair.first->getType().getNonReferenceType()),
4724  CGF.getContext().getDeclAlign(Pair.first));
4725  Scope.addPrivate(Pair.first, Replacement);
4726  if (auto *DI = CGF.getDebugInfo())
4727  if (CGF.CGM.getCodeGenOpts().hasReducedDebugInfo())
4728  (void)DI->EmitDeclareOfAutoVariable(
4729  Pair.first, Pair.second.getPointer(), CGF.Builder,
4730  /*UsePointerValue*/ true);
4731  }
4732  // Adjust mapping for internal locals by mapping actual memory instead of
4733  // a pointer to this memory.
4734  for (auto &Pair : UntiedLocalVars) {
4735  QualType VDType = Pair.first->getType().getNonReferenceType();
4736  if (isAllocatableDecl(Pair.first)) {
4737  llvm::Value *Ptr = CGF.Builder.CreateLoad(Pair.second.first);
4738  Address Replacement(
4739  Ptr,
4740  CGF.ConvertTypeForMem(CGF.getContext().getPointerType(VDType)),
4741  CGF.getPointerAlign());
4742  Pair.second.first = Replacement;
4743  Ptr = CGF.Builder.CreateLoad(Replacement);
4744  Replacement = Address(Ptr, CGF.ConvertTypeForMem(VDType),
4745  CGF.getContext().getDeclAlign(Pair.first));
4746  Pair.second.second = Replacement;
4747  } else {
4748  llvm::Value *Ptr = CGF.Builder.CreateLoad(Pair.second.first);
4749  Address Replacement(Ptr, CGF.ConvertTypeForMem(VDType),
4750  CGF.getContext().getDeclAlign(Pair.first));
4751  Pair.second.first = Replacement;
4752  }
4753  }
4754  }
4755  if (Data.Reductions) {
4756  OMPPrivateScope FirstprivateScope(CGF);
4757  for (const auto &Pair : FirstprivatePtrs) {
4758  Address Replacement(
4759  CGF.Builder.CreateLoad(Pair.second),
4760  CGF.ConvertTypeForMem(Pair.first->getType().getNonReferenceType()),
4761  CGF.getContext().getDeclAlign(Pair.first));
4762  FirstprivateScope.addPrivate(Pair.first, Replacement);
4763  }
4764  (void)FirstprivateScope.Privatize();
4765  OMPLexicalScope LexScope(CGF, S, CapturedRegion);
4766  ReductionCodeGen RedCG(Data.ReductionVars, Data.ReductionVars,
4767  Data.ReductionCopies, Data.ReductionOps);
4768  llvm::Value *ReductionsPtr = CGF.Builder.CreateLoad(
4769  CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(9)));
4770  for (unsigned Cnt = 0, E = Data.ReductionVars.size(); Cnt < E; ++Cnt) {
4771  RedCG.emitSharedOrigLValue(CGF, Cnt);
4772  RedCG.emitAggregateType(CGF, Cnt);
4773  // FIXME: This must removed once the runtime library is fixed.
4774  // Emit required threadprivate variables for
4775  // initializer/combiner/finalizer.
4776  CGF.CGM.getOpenMPRuntime().emitTaskReductionFixups(CGF, S.getBeginLoc(),
4777  RedCG, Cnt);
4778  Address Replacement = CGF.CGM.getOpenMPRuntime().getTaskReductionItem(
4779  CGF, S.getBeginLoc(), ReductionsPtr, RedCG.getSharedLValue(Cnt));
4780  Replacement =
4781  Address(CGF.EmitScalarConversion(
4782  Replacement.getPointer(), CGF.getContext().VoidPtrTy,
4783  CGF.getContext().getPointerType(
4784  Data.ReductionCopies[Cnt]->getType()),
4785  Data.ReductionCopies[Cnt]->getExprLoc()),
4786  CGF.ConvertTypeForMem(Data.ReductionCopies[Cnt]->getType()),
4787  Replacement.getAlignment());
4788  Replacement = RedCG.adjustPrivateAddress(CGF, Cnt, Replacement);
4789  Scope.addPrivate(RedCG.getBaseDecl(Cnt), Replacement);
4790  }
4791  }
4792  // Privatize all private variables except for in_reduction items.
4793  (void)Scope.Privatize();
4794  SmallVector<const Expr *, 4> InRedVars;
4795  SmallVector<const Expr *, 4> InRedPrivs;
4797  SmallVector<const Expr *, 4> TaskgroupDescriptors;
4798  for (const auto *C : S.getClausesOfKind<OMPInReductionClause>()) {
4799  auto IPriv = C->privates().begin();
4800  auto IRed = C->reduction_ops().begin();
4801  auto ITD = C->taskgroup_descriptors().begin();
4802  for (const Expr *Ref : C->varlists()) {
4803  InRedVars.emplace_back(Ref);
4804  InRedPrivs.emplace_back(*IPriv);
4805  InRedOps.emplace_back(*IRed);
4806  TaskgroupDescriptors.emplace_back(*ITD);
4807  std::advance(IPriv, 1);
4808  std::advance(IRed, 1);
4809  std::advance(ITD, 1);
4810  }
4811  }
4812  // Privatize in_reduction items here, because taskgroup descriptors must be
4813  // privatized earlier.
4814  OMPPrivateScope InRedScope(CGF);
4815  if (!InRedVars.empty()) {
4816  ReductionCodeGen RedCG(InRedVars, InRedVars, InRedPrivs, InRedOps);
4817  for (unsigned Cnt = 0, E = InRedVars.size(); Cnt < E; ++Cnt) {
4818  RedCG.emitSharedOrigLValue(CGF, Cnt);
4819  RedCG.emitAggregateType(CGF, Cnt);
4820  // The taskgroup descriptor variable is always implicit firstprivate and
4821  // privatized already during processing of the firstprivates.
4822  // FIXME: This must removed once the runtime library is fixed.
4823  // Emit required threadprivate variables for
4824  // initializer/combiner/finalizer.
4825  CGF.CGM.getOpenMPRuntime().emitTaskReductionFixups(CGF, S.getBeginLoc(),
4826  RedCG, Cnt);
4827  llvm::Value *ReductionsPtr;
4828  if (const Expr *TRExpr = TaskgroupDescriptors[Cnt]) {
4829  ReductionsPtr = CGF.EmitLoadOfScalar(CGF.EmitLValue(TRExpr),
4830  TRExpr->getExprLoc());
4831  } else {
4832  ReductionsPtr = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
4833  }
4834  Address Replacement = CGF.CGM.getOpenMPRuntime().getTaskReductionItem(
4835  CGF, S.getBeginLoc(), ReductionsPtr, RedCG.getSharedLValue(Cnt));
4836  Replacement = Address(
4837  CGF.EmitScalarConversion(
4838  Replacement.getPointer(), CGF.getContext().VoidPtrTy,
4839  CGF.getContext().getPointerType(InRedPrivs[Cnt]->getType()),
4840  InRedPrivs[Cnt]->getExprLoc()),
4841  CGF.ConvertTypeForMem(InRedPrivs[Cnt]->getType()),
4842  Replacement.getAlignment());
4843  Replacement = RedCG.adjustPrivateAddress(CGF, Cnt, Replacement);
4844  InRedScope.addPrivate(RedCG.getBaseDecl(Cnt), Replacement);
4845  }
4846  }
4847  (void)InRedScope.Privatize();
4848 
4849  CGOpenMPRuntime::UntiedTaskLocalDeclsRAII LocalVarsScope(CGF,
4850  UntiedLocalVars);
4851  Action.Enter(CGF);
4852  BodyGen(CGF);
4853  };
4854  llvm::Function *OutlinedFn = CGM.getOpenMPRuntime().emitTaskOutlinedFunction(
4855  S, *I, *PartId, *TaskT, S.getDirectiveKind(), CodeGen, Data.Tied,
4856  Data.NumberOfParts);
4857  OMPLexicalScope Scope(*this, S, llvm::None,
4858  !isOpenMPParallelDirective(S.getDirectiveKind()) &&
4859  !isOpenMPSimdDirective(S.getDirectiveKind()));
4860  TaskGen(*this, OutlinedFn, Data);
4861 }
4862 
4863 static ImplicitParamDecl *
4865  QualType Ty, CapturedDecl *CD,
4866  SourceLocation Loc) {
4867  auto *OrigVD = ImplicitParamDecl::Create(C, CD, Loc, /*Id=*/nullptr, Ty,
4869  auto *OrigRef = DeclRefExpr::Create(
4870  C, NestedNameSpecifierLoc(), SourceLocation(), OrigVD,
4871  /*RefersToEnclosingVariableOrCapture=*/false, Loc, Ty, VK_LValue);
4872  auto *PrivateVD = ImplicitParamDecl::Create(C, CD, Loc, /*Id=*/nullptr, Ty,
4874  auto *PrivateRef = DeclRefExpr::Create(
4875  C, NestedNameSpecifierLoc(), SourceLocation(), PrivateVD,
4876  /*RefersToEnclosingVariableOrCapture=*/false, Loc, Ty, VK_LValue);
4877  QualType ElemType = C.getBaseElementType(Ty);
4878  auto *InitVD = ImplicitParamDecl::Create(C, CD, Loc, /*Id=*/nullptr, ElemType,
4880  auto *InitRef = DeclRefExpr::Create(
4881  C, NestedNameSpecifierLoc(), SourceLocation(), InitVD,
4882  /*RefersToEnclosingVariableOrCapture=*/false, Loc, ElemType, VK_LValue);
4883  PrivateVD->setInitStyle(VarDecl::CInit);
4884  PrivateVD->setInit(ImplicitCastExpr::Create(C, ElemType, CK_LValueToRValue,
4885  InitRef, /*BasePath=*/nullptr,
4887  Data.FirstprivateVars.emplace_back(OrigRef);
4888  Data.FirstprivateCopies.emplace_back(PrivateRef);
4889  Data.FirstprivateInits.emplace_back(InitRef);
4890  return OrigVD;
4891 }
4892 
4894  const OMPExecutableDirective &S, const RegionCodeGenTy &BodyGen,
4895  OMPTargetDataInfo &InputInfo) {
4896  // Emit outlined function for task construct.
4897  const CapturedStmt *CS = S.getCapturedStmt(OMPD_task);
4898  Address CapturedStruct = GenerateCapturedStmtArgument(*CS);
4899  QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl());
4900  auto I = CS->getCapturedDecl()->param_begin();
4901  auto PartId = std::next(I);
4902  auto TaskT = std::next(I, 4);
4903  OMPTaskDataTy Data;
4904  // The task is not final.
4905  Data.Final.setInt(/*IntVal=*/false);
4906  // Get list of firstprivate variables.
4907  for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) {
4908  auto IRef = C->varlist_begin();
4909  auto IElemInitRef = C->inits().begin();
4910  for (auto *IInit : C->private_copies()) {
4911  Data.FirstprivateVars.push_back(*IRef);
4912  Data.FirstprivateCopies.push_back(IInit);
4913  Data.FirstprivateInits.push_back(*IElemInitRef);
4914  ++IRef;
4915  ++IElemInitRef;
4916  }
4917  }
4918  OMPPrivateScope TargetScope(*this);
4919  VarDecl *BPVD = nullptr;
4920  VarDecl *PVD = nullptr;
4921  VarDecl *SVD = nullptr;
4922  VarDecl *MVD = nullptr;
4923  if (InputInfo.NumberOfTargetItems > 0) {
4924  auto *CD = CapturedDecl::Create(
4925  getContext(), getContext().getTranslationUnitDecl(), /*NumParams=*/0);
4926  llvm::APInt ArrSize(/*numBits=*/32, InputInfo.NumberOfTargetItems);
4927  QualType BaseAndPointerAndMapperType = getContext().getConstantArrayType(
4928  getContext().VoidPtrTy, ArrSize, nullptr, ArrayType::Normal,
4929  /*IndexTypeQuals=*/0);
4931  getContext(), Data, BaseAndPointerAndMapperType, CD, S.getBeginLoc());
4933  getContext(), Data, BaseAndPointerAndMapperType, CD, S.getBeginLoc());
4934  QualType SizesType = getContext().getConstantArrayType(
4935  getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1),
4936  ArrSize, nullptr, ArrayType::Normal,
4937  /*IndexTypeQuals=*/0);
4938  SVD = createImplicitFirstprivateForType(getContext(), Data, SizesType, CD,
4939  S.getBeginLoc());
4940  TargetScope.addPrivate(BPVD, InputInfo.BasePointersArray);
4941  TargetScope.addPrivate(PVD, InputInfo.PointersArray);
4942  TargetScope.addPrivate(SVD, InputInfo.SizesArray);
4943  // If there is no user-defined mapper, the mapper array will be nullptr. In
4944  // this case, we don't need to privatize it.
4945  if (!isa_and_nonnull<llvm::ConstantPointerNull>(
4946  InputInfo.MappersArray.getPointer())) {
4948  getContext(), Data, BaseAndPointerAndMapperType, CD, S.getBeginLoc());
4949  TargetScope.addPrivate(MVD, InputInfo.MappersArray);
4950  }
4951  }
4952  (void)TargetScope.Privatize();
4953  // Build list of dependences.
4954  for (const auto *C : S.getClausesOfKind<OMPDependClause>()) {
4956  Data.Dependences.emplace_back(C->getDependencyKind(), C->getModifier());
4957  DD.DepExprs.append(C->varlist_begin(), C->varlist_end());
4958  }
4959  auto &&CodeGen = [&Data, &S, CS, &BodyGen, BPVD, PVD, SVD, MVD,
4960  &InputInfo](CodeGenFunction &CGF, PrePostActionTy &Action) {
4961  // Set proper addresses for generated private copies.
4962  OMPPrivateScope Scope(CGF);
4963  if (!Data.FirstprivateVars.empty()) {
4964  enum { PrivatesParam = 2, CopyFnParam = 3 };
4965  llvm::Value *CopyFn = CGF.Builder.CreateLoad(
4966  CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(CopyFnParam)));
4967  llvm::Value *PrivatesPtr = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(
4968  CS->getCapturedDecl()->getParam(PrivatesParam)));
4969  // Map privates.
4973  CallArgs.push_back(PrivatesPtr);
4974  ParamTypes.push_back(PrivatesPtr->getType());
4975  for (const Expr *E : Data.FirstprivateVars) {
4976  const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
4977  Address PrivatePtr =
4978  CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()),
4979  ".firstpriv.ptr.addr");
4980  PrivatePtrs.emplace_back(VD, PrivatePtr);
4981  CallArgs.push_back(PrivatePtr.getPointer());
4982  ParamTypes.push_back(PrivatePtr.getType());
4983  }
4984  auto *CopyFnTy = llvm::FunctionType::get(CGF.Builder.getVoidTy(),
4985  ParamTypes, /*isVarArg=*/false);
4986  CopyFn = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4987  CopyFn, CopyFnTy->getPointerTo());
4988  CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
4989  CGF, S.getBeginLoc(), {CopyFnTy, CopyFn}, CallArgs);
4990  for (const auto &Pair : PrivatePtrs) {
4991  Address Replacement(
4992  CGF.Builder.CreateLoad(Pair.second),
4993  CGF.ConvertTypeForMem(Pair.first->getType().getNonReferenceType()),
4994  CGF.getContext().getDeclAlign(Pair.first));
4995  Scope.addPrivate(Pair.first, Replacement);
4996  }
4997  }
4998  // Privatize all private variables except for in_reduction items.
4999  (void)Scope.Privatize();
5000  if (InputInfo.NumberOfTargetItems > 0) {
5001  InputInfo.BasePointersArray = CGF.Builder.CreateConstArrayGEP(
5002  CGF.GetAddrOfLocalVar(BPVD), /*Index=*/0);
5003  InputInfo.PointersArray = CGF.Builder.CreateConstArrayGEP(
5004  CGF.GetAddrOfLocalVar(PVD), /*Index=*/0);
5005  InputInfo.SizesArray = CGF.Builder.CreateConstArrayGEP(
5006  CGF.GetAddrOfLocalVar(SVD), /*Index=*/0);
5007  // If MVD is nullptr, the mapper array is not privatized
5008  if (MVD)
5009  InputInfo.MappersArray = CGF.Builder.CreateConstArrayGEP(
5010  CGF.GetAddrOfLocalVar(MVD), /*Index=*/0);
5011  }
5012 
5013  Action.Enter(CGF);
5014  OMPLexicalScope LexScope(CGF, S, OMPD_task, /*EmitPreInitStmt=*/false);
5015  BodyGen(CGF);
5016  };
5017  llvm::Function *OutlinedFn = CGM.getOpenMPRuntime().emitTaskOutlinedFunction(
5018  S, *I, *PartId, *TaskT, S.getDirectiveKind(), CodeGen, /*Tied=*/true,
5019  Data.NumberOfParts);
5020  llvm::APInt TrueOrFalse(32, S.hasClausesOfKind<OMPNowaitClause>() ? 1 : 0);
5021  IntegerLiteral IfCond(getContext(), TrueOrFalse,
5022  getContext().getIntTypeForBitwidth(32, /*Signed=*/0),
5023  SourceLocation());
5024 
5025  CGM.getOpenMPRuntime().emitTaskCall(*this, S.getBeginLoc(), S, OutlinedFn,
5026  SharedsTy, CapturedStruct, &IfCond, Data);
5027 }
5028 
5030  // Emit outlined function for task construct.
5031  const CapturedStmt *CS = S.getCapturedStmt(OMPD_task);
5032  Address CapturedStruct = GenerateCapturedStmtArgument(*CS);
5033  QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl());
5034  const Expr *IfCond = nullptr;
5035  for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
5036  if (C->getNameModifier() == OMPD_unknown ||
5037  C->getNameModifier() == OMPD_task) {
5038  IfCond = C->getCondition();
5039  break;
5040  }
5041  }
5042 
5043  OMPTaskDataTy Data;
5044  // Check if we should emit tied or untied task.
5045  Data.Tied = !S.getSingleClause<OMPUntiedClause>();
5046  auto &&BodyGen = [CS](CodeGenFunction &CGF, PrePostActionTy &) {
5047  CGF.EmitStmt(CS->getCapturedStmt());
5048  };
5049  auto &&TaskGen = [&S, SharedsTy, CapturedStruct,
5050  IfCond](CodeGenFunction &CGF, llvm::Function *OutlinedFn,
5051  const OMPTaskDataTy &Data) {
5052  CGF.CGM.getOpenMPRuntime().emitTaskCall(CGF, S.getBeginLoc(), S, OutlinedFn,
5053  SharedsTy, CapturedStruct, IfCond,
5054  Data);
5055  };
5056  auto LPCRegion =
5058  EmitOMPTaskBasedDirective(S, OMPD_task, BodyGen, TaskGen, Data);
5059 }
5060 
5062  const OMPTaskyieldDirective &S) {
5063  CGM.getOpenMPRuntime().emitTaskyieldCall(*this, S.getBeginLoc());
5064 }
5065 
5067  CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), OMPD_barrier);
5068 }
5069 
5071  OMPTaskDataTy Data;
5072  // Build list of dependences
5073  for (const auto *C : S.getClausesOfKind<OMPDependClause>()) {
5075  Data.Dependences.emplace_back(C->getDependencyKind(), C->getModifier());
5076  DD.DepExprs.append(C->varlist_begin(), C->varlist_end());
5077  }
5078  CGM.getOpenMPRuntime().emitTaskwaitCall(*this, S.getBeginLoc(), Data);
5079 }
5080 
5082  const OMPTaskgroupDirective &S) {
5083  auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
5084  Action.Enter(CGF);
5085  if (const Expr *E = S.getReductionRef()) {
5088  OMPTaskDataTy Data;
5089  for (const auto *C : S.getClausesOfKind<OMPTaskReductionClause>()) {
5090  Data.ReductionVars.append(C->varlist_begin(), C->varlist_end());
5091  Data.ReductionOrigs.append(C->varlist_begin(), C->varlist_end());
5092  Data.ReductionCopies.append(C->privates().begin(), C->privates().end());
5093  Data.ReductionOps.append(C->reduction_ops().begin(),
5094  C->reduction_ops().end());
5095  LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
5096  RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
5097  }
5098  llvm::Value *ReductionDesc =
5099  CGF.CGM.getOpenMPRuntime().emitTaskReductionInit(CGF, S.getBeginLoc(),
5100  LHSs, RHSs, Data);
5101  const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
5102  CGF.EmitVarDecl(*VD);
5103  CGF.EmitStoreOfScalar(ReductionDesc, CGF.GetAddrOfLocalVar(VD),
5104  /*Volatile=*/false, E->getType());
5105  }
5106  CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt());
5107  };
5108  OMPLexicalScope Scope(*this, S, OMPD_unknown);
5109  CGM.getOpenMPRuntime().emitTaskgroupRegion(*this, CodeGen, S.getBeginLoc());
5110 }
5111 
5113  llvm::AtomicOrdering AO = S.getSingleClause<OMPFlushClause>()
5114  ? llvm::AtomicOrdering::NotAtomic
5115  : llvm::AtomicOrdering::AcquireRelease;
5116  CGM.getOpenMPRuntime().emitFlush(
5117  *this,
5118  [&S]() -> ArrayRef<const Expr *> {
5119  if (const auto *FlushClause = S.getSingleClause<OMPFlushClause>())
5120  return llvm::makeArrayRef(FlushClause->varlist_begin(),
5121  FlushClause->varlist_end());
5122  return llvm::None;
5123  }(),
5124  S.getBeginLoc(), AO);
5125 }
5126 
5128  const auto *DO = S.getSingleClause<OMPDepobjClause>();
5129  LValue DOLVal = EmitLValue(DO->getDepobj());
5130  if (const auto *DC = S.getSingleClause<OMPDependClause>()) {
5131  OMPTaskDataTy::DependData Dependencies(DC->getDependencyKind(),
5132  DC->getModifier());
5133  Dependencies.DepExprs.append(DC->varlist_begin(), DC->varlist_end());
5134  Address DepAddr = CGM.getOpenMPRuntime().emitDepobjDependClause(
5135  *this, Dependencies, DC->getBeginLoc());
5136  EmitStoreOfScalar(DepAddr.getPointer(), DOLVal);
5137  return;
5138  }
5139  if (const auto *DC = S.getSingleClause<OMPDestroyClause>()) {
5140  CGM.getOpenMPRuntime().emitDestroyClause(*this, DOLVal, DC->getBeginLoc());
5141  return;
5142  }
5143  if (const auto *UC = S.getSingleClause<OMPUpdateClause>()) {
5144  CGM.getOpenMPRuntime().emitUpdateClause(
5145  *this, DOLVal, UC->getDependencyKind(), UC->getBeginLoc());
5146  return;
5147  }
5148 }
5149 
5151  if (!OMPParentLoopDirectiveForScan)
5152  return;
5153  const OMPExecutableDirective &ParentDir = *OMPParentLoopDirectiveForScan;
5154  bool IsInclusive = S.hasClausesOfKind<OMPInclusiveClause>();
5159  SmallVector<const Expr *, 4> ReductionOps;
5161  SmallVector<const Expr *, 4> CopyArrayTemps;
5162  SmallVector<const Expr *, 4> CopyArrayElems;
5163  for (const auto *C : ParentDir.getClausesOfKind<OMPReductionClause>()) {
5164  if (C->getModifier() != OMPC_REDUCTION_inscan)
5165  continue;
5166  Shareds.append(C->varlist_begin(), C->varlist_end());
5167  Privates.append(C->privates().begin(), C->privates().end());
5168  LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
5169  RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
5170  ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end());
5171  CopyOps.append(C->copy_ops().begin(), C->copy_ops().end());
5172  CopyArrayTemps.append(C->copy_array_temps().begin(),
5173  C->copy_array_temps().end());
5174  CopyArrayElems.append(C->copy_array_elems().begin(),
5175  C->copy_array_elems().end());
5176  }
5177  if (ParentDir.getDirectiveKind() == OMPD_simd ||
5178  (getLangOpts().OpenMPSimd &&
5179  isOpenMPSimdDirective(ParentDir.getDirectiveKind()))) {
5180  // For simd directive and simd-based directives in simd only mode, use the
5181  // following codegen:
5182  // int x = 0;
5183  // #pragma omp simd reduction(inscan, +: x)
5184  // for (..) {
5185  // <first part>
5186  // #pragma omp scan inclusive(x)
5187  // <second part>
5188  // }
5189  // is transformed to:
5190  // int x = 0;
5191  // for (..) {
5192  // int x_priv = 0;
5193  // <first part>
5194  // x = x_priv + x;
5195  // x_priv = x;
5196  // <second part>
5197  // }
5198  // and
5199  // int x = 0;
5200  // #pragma omp simd reduction(inscan, +: x)
5201  // for (..) {
5202  // <first part>
5203  // #pragma omp scan exclusive(x)
5204  // <second part>
5205  // }
5206  // to
5207  // int x = 0;
5208  // for (..) {
5209  // int x_priv = 0;
5210  // <second part>
5211  // int temp = x;
5212  // x = x_priv + x;
5213  // x_priv = temp;
5214  // <first part>
5215  // }
5216  llvm::BasicBlock *OMPScanReduce = createBasicBlock("omp.inscan.reduce");
5217  EmitBranch(IsInclusive
5218  ? OMPScanReduce
5219  : BreakContinueStack.back().ContinueBlock.getBlock());
5220  EmitBlock(OMPScanDispatch);
5221  {
5222  // New scope for correct construction/destruction of temp variables for
5223  // exclusive scan.
5224  LexicalScope Scope(*this, S.getSourceRange());
5225  EmitBranch(IsInclusive ? OMPBeforeScanBlock : OMPAfterScanBlock);
5226  EmitBlock(OMPScanReduce);
5227  if (!IsInclusive) {
5228  // Create temp var and copy LHS value to this temp value.
5229  // TMP = LHS;
5230  for (unsigned I = 0, E = CopyArrayElems.size(); I < E; ++I) {
5231  const Expr *PrivateExpr = Privates[I];
5232  const Expr *TempExpr = CopyArrayTemps[I];
5233  EmitAutoVarDecl(
5234  *cast<VarDecl>(cast<DeclRefExpr>(TempExpr)->getDecl()));
5235  LValue DestLVal = EmitLValue(TempExpr);
5236  LValue SrcLVal = EmitLValue(LHSs[I]);
5237  EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this),
5238  SrcLVal.getAddress(*this),
5239  cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()),
5240  cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()),
5241  CopyOps[I]);
5242  }
5243  }
5244  CGM.getOpenMPRuntime().emitReduction(
5245  *this, ParentDir.getEndLoc(), Privates, LHSs, RHSs, ReductionOps,
5246  {/*WithNowait=*/true, /*SimpleReduction=*/true, OMPD_simd});
5247  for (unsigned I = 0, E = CopyArrayElems.size(); I < E; ++I) {
5248  const Expr *PrivateExpr = Privates[I];
5249  LValue DestLVal;
5250  LValue SrcLVal;
5251  if (IsInclusive) {
5252  DestLVal = EmitLValue(RHSs[I]);
5253  SrcLVal = EmitLValue(LHSs[I]);
5254  } else {
5255  const Expr *TempExpr = CopyArrayTemps[I];
5256  DestLVal = EmitLValue(RHSs[I]);
5257  SrcLVal = EmitLValue(TempExpr);
5258  }
5259  EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this),
5260  SrcLVal.getAddress(*this),
5261  cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()),
5262  cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()),
5263  CopyOps[I]);
5264  }
5265  }
5266  EmitBranch(IsInclusive ? OMPAfterScanBlock : OMPBeforeScanBlock);
5267  OMPScanExitBlock = IsInclusive
5268  ? BreakContinueStack.back().ContinueBlock.getBlock()
5269  : OMPScanReduce;
5270  EmitBlock(OMPAfterScanBlock);
5271  return;
5272  }
5273  if (!IsInclusive) {
5274  EmitBranch(BreakContinueStack.back().ContinueBlock.getBlock());
5275  EmitBlock(OMPScanExitBlock);
5276  }
5277  if (OMPFirstScanLoop) {
5278  // Emit buffer[i] = red; at the end of the input phase.
5279  const auto *IVExpr = cast<OMPLoopDirective>(ParentDir)
5280  .getIterationVariable()
5281  ->IgnoreParenImpCasts();
5282  LValue IdxLVal = EmitLValue(IVExpr);
5283  llvm::Value *IdxVal = EmitLoadOfScalar(IdxLVal, IVExpr->getExprLoc());
5284  IdxVal = Builder.CreateIntCast(IdxVal, SizeTy, /*isSigned=*/false);
5285  for (unsigned I = 0, E = CopyArrayElems.size(); I < E; ++I) {
5286  const Expr *PrivateExpr = Privates[I];
5287  const Expr *OrigExpr = Shareds[I];
5288  const Expr *CopyArrayElem = CopyArrayElems[I];
5289  OpaqueValueMapping IdxMapping(
5290  *this,
5291  cast<OpaqueValueExpr>(
5292  cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()),
5293  RValue::get(IdxVal));
5294  LValue DestLVal = EmitLValue(CopyArrayElem);
5295  LValue SrcLVal = EmitLValue(OrigExpr);
5296  EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this),
5297  SrcLVal.getAddress(*this),
5298  cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()),
5299  cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()),
5300  CopyOps[I]);
5301  }
5302  }
5303  EmitBranch(BreakContinueStack.back().ContinueBlock.getBlock());
5304  if (IsInclusive) {
5305  EmitBlock(OMPScanExitBlock);
5306  EmitBranch(BreakContinueStack.back().ContinueBlock.getBlock());
5307  }
5308  EmitBlock(OMPScanDispatch);
5309  if (!OMPFirstScanLoop) {
5310  // Emit red = buffer[i]; at the entrance to the scan phase.
5311  const auto *IVExpr = cast<OMPLoopDirective>(ParentDir)
5312  .getIterationVariable()
5313  ->IgnoreParenImpCasts();
5314  LValue IdxLVal = EmitLValue(IVExpr);
5315  llvm::Value *IdxVal = EmitLoadOfScalar(IdxLVal, IVExpr->getExprLoc());
5316  IdxVal = Builder.CreateIntCast(IdxVal, SizeTy, /*isSigned=*/false);
5317  llvm::BasicBlock *ExclusiveExitBB = nullptr;
5318  if (!IsInclusive) {
5319  llvm::BasicBlock *ContBB = createBasicBlock("omp.exclusive.dec");
5320  ExclusiveExitBB = createBasicBlock("omp.exclusive.copy.exit");
5321  llvm::Value *Cmp = Builder.CreateIsNull(IdxVal);
5322  Builder.CreateCondBr(Cmp, ExclusiveExitBB, ContBB);
5323  EmitBlock(ContBB);
5324  // Use idx - 1 iteration for exclusive scan.
5325  IdxVal = Builder.CreateNUWSub(IdxVal, llvm::ConstantInt::get(SizeTy, 1));
5326  }
5327  for (unsigned I = 0, E = CopyArrayElems.size(); I < E; ++I) {
5328  const Expr *PrivateExpr = Privates[I];
5329  const Expr *OrigExpr = Shareds[I];
5330  const Expr *CopyArrayElem = CopyArrayElems[I];
5331  OpaqueValueMapping IdxMapping(
5332  *this,
5333  cast<OpaqueValueExpr>(
5334  cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()),
5335  RValue::get(IdxVal));
5336  LValue SrcLVal = EmitLValue(CopyArrayElem);
5337  LValue DestLVal = EmitLValue(OrigExpr);
5338  EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this),
5339  SrcLVal.getAddress(*this),
5340  cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()),
5341  cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()),
5342  CopyOps[I]);
5343  }
5344  if (!IsInclusive) {
5345  EmitBlock(ExclusiveExitBB);
5346  }
5347  }
5348  EmitBranch((OMPFirstScanLoop == IsInclusive) ? OMPBeforeScanBlock
5349  : OMPAfterScanBlock);
5350  EmitBlock(OMPAfterScanBlock);
5351 }
5352 
5354  const CodeGenLoopTy &CodeGenLoop,
5355  Expr *IncExpr) {
5356  // Emit the loop iteration variable.
5357  const auto *IVExpr = cast<DeclRefExpr>(S.getIterationVariable());
5358  const auto *IVDecl = cast<VarDecl>(IVExpr->getDecl());
5359  EmitVarDecl(*IVDecl);
5360 
5361  // Emit the iterations count variable.
5362  // If it is not a variable, Sema decided to calculate iterations count on each
5363  // iteration (e.g., it is foldable into a constant).
5364  if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
5365  EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
5366  // Emit calculation of the iterations count.
5367  EmitIgnoredExpr(S.getCalcLastIteration());
5368  }
5369 
5370  CGOpenMPRuntime &RT = CGM.getOpenMPRuntime();
5371 
5372  bool HasLastprivateClause = false;
5373  // Check pre-condition.
5374  {
5375  OMPLoopScope PreInitScope(*this, S);
5376  // Skip the entire loop if we don't meet the precondition.
5377  // If the condition constant folds and can be elided, avoid emitting the
5378  // whole loop.
5379  bool CondConstant;
5380  llvm::BasicBlock *ContBlock = nullptr;
5381  if (ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) {
5382  if (!CondConstant)
5383  return;
5384  } else {
5385  llvm::BasicBlock *ThenBlock = createBasicBlock("omp.precond.then");
5386  ContBlock = createBasicBlock("omp.precond.end");
5387  emitPreCond(*this, S, S.getPreCond(), ThenBlock, ContBlock,
5388  getProfileCount(&S));
5389  EmitBlock(ThenBlock);
5390  incrementProfileCounter(&S);
5391  }
5392 
5393  emitAlignedClause(*this, S);
5394  // Emit 'then' code.
5395  {
5396  // Emit helper vars inits.
5397 
5398  LValue LB = EmitOMPHelperVar(
5399  *this, cast<DeclRefExpr>(
5400  (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
5401  ? S.getCombinedLowerBoundVariable()
5402  : S.getLowerBoundVariable())));
5403  LValue UB = EmitOMPHelperVar(
5404  *this, cast<DeclRefExpr>(
5405  (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
5406  ? S.getCombinedUpperBoundVariable()
5407  : S.getUpperBoundVariable())));
5408  LValue ST =
5409  EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getStrideVariable()));
5410  LValue IL =
5411  EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable()));
5412 
5413  OMPPrivateScope LoopScope(*this);
5414  if (EmitOMPFirstprivateClause(S, LoopScope)) {
5415  // Emit implicit barrier to synchronize threads and avoid data races
5416  // on initialization of firstprivate variables and post-update of
5417  // lastprivate variables.
5418  CGM.getOpenMPRuntime().emitBarrierCall(
5419  *this, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false,
5420  /*ForceSimpleCall=*/true);
5421  }
5422  EmitOMPPrivateClause(S, LoopScope);
5423  if (isOpenMPSimdDirective(S.getDirectiveKind()) &&
5424  !isOpenMPParallelDirective(S.getDirectiveKind()) &&
5425  !isOpenMPTeamsDirective(S.getDirectiveKind()))
5426  EmitOMPReductionClauseInit(S, LoopScope);
5427  HasLastprivateClause = EmitOMPLastprivateClauseInit(S, LoopScope);
5428  EmitOMPPrivateLoopCounters(S, LoopScope);
5429  (void)LoopScope.Privatize();
5430  if (isOpenMPTargetExecutionDirective(S.getDirectiveKind()))
5431  CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(*this, S);
5432 
5433  // Detect the distribute schedule kind and chunk.
5434  llvm::Value *Chunk = nullptr;
5436  if (const auto *C = S.getSingleClause<OMPDistScheduleClause>()) {
5437  ScheduleKind = C->getDistScheduleKind();
5438  if (const Expr *Ch = C->getChunkSize()) {
5439  Chunk = EmitScalarExpr(Ch);
5440  Chunk = EmitScalarConversion(Chunk, Ch->getType(),
5441  S.getIterationVariable()->getType(),
5442  S.getBeginLoc());
5443  }
5444  } else {
5445  // Default behaviour for dist_schedule clause.
5446  CGM.getOpenMPRuntime().getDefaultDistScheduleAndChunk(
5447  *this, S, ScheduleKind, Chunk);
5448  }
5449  const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
5450  const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
5451 
5452  // OpenMP [2.10.8, distribute Construct, Description]
5453  // If dist_schedule is specified, kind must be static. If specified,
5454  // iterations are divided into chunks of size chunk_size, chunks are
5455  // assigned to the teams of the league in a round-robin fashion in the
5456  // order of the team number. When no chunk_size is specified, the
5457  // iteration space is divided into chunks that are approximately equal
5458  // in size, and at most one chunk is distributed to each team of the
5459  // league. The size of the chunks is unspecified in this case.
5460  bool StaticChunked =
5461  RT.isStaticChunked(ScheduleKind, /* Chunked */ Chunk != nullptr) &&
5462  isOpenMPLoopBoundSharingDirective(S.getDirectiveKind());
5463  if (RT.isStaticNonchunked(ScheduleKind,
5464  /* Chunked */ Chunk != nullptr) ||
5465  StaticChunked) {
5466  CGOpenMPRuntime::StaticRTInput StaticInit(
5467  IVSize, IVSigned, /* Ordered = */ false, IL.getAddress(*this),
5468  LB.getAddress(*this), UB.getAddress(*this), ST.getAddress(*this),
5469  StaticChunked ? Chunk : nullptr);
5470  RT.emitDistributeStaticInit(*this, S.getBeginLoc(), ScheduleKind,
5471  StaticInit);
5472  JumpDest LoopExit =
5473  getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit"));
5474  // UB = min(UB, GlobalUB);
5475  EmitIgnoredExpr(isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
5476  ? S.getCombinedEnsureUpperBound()
5477  : S.getEnsureUpperBound());
5478  // IV = LB;
5479  EmitIgnoredExpr(isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
5480  ? S.getCombinedInit()
5481  : S.getInit());
5482 
5483  const Expr *Cond =
5484  isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
5485  ? S.getCombinedCond()
5486  : S.getCond();
5487 
5488  if (StaticChunked)
5489  Cond = S.getCombinedDistCond();
5490 
5491  // For static unchunked schedules generate:
5492  //
5493  // 1. For distribute alone, codegen
5494  // while (idx <= UB) {
5495  // BODY;
5496  // ++idx;
5497  // }
5498  //
5499  // 2. When combined with 'for' (e.g. as in 'distribute parallel for')
5500  // while (idx <= UB) {
5501  // <CodeGen rest of pragma>(LB, UB);
5502  // idx += ST;
5503  // }
5504  //
5505  // For static chunk one schedule generate:
5506  //
5507  // while (IV <= GlobalUB) {
5508  // <CodeGen rest of pragma>(LB, UB);
5509  // LB += ST;
5510  // UB += ST;
5511  // UB = min(UB, GlobalUB);
5512  // IV = LB;
5513  // }
5514  //
5516  *this, S,
5517  [&S](CodeGenFunction &CGF, PrePostActionTy &) {
5518  if (isOpenMPSimdDirective(S.getDirectiveKind()))
5519  CGF.EmitOMPSimdInit(S);
5520  },
5521  [&S, &LoopScope, Cond, IncExpr, LoopExit, &CodeGenLoop,
5522  StaticChunked](CodeGenFunction &CGF, PrePostActionTy &) {
5523  CGF.EmitOMPInnerLoop(
5524  S, LoopScope.requiresCleanups(), Cond, IncExpr,
5525  [&S, LoopExit, &CodeGenLoop](CodeGenFunction &CGF) {
5526  CodeGenLoop(CGF, S, LoopExit);
5527  },
5528  [&S, StaticChunked](CodeGenFunction &CGF) {
5529  if (StaticChunked) {
5530  CGF.EmitIgnoredExpr(S.getCombinedNextLowerBound());
5531  CGF.EmitIgnoredExpr(S.getCombinedNextUpperBound());
5532  CGF.EmitIgnoredExpr(S.getCombinedEnsureUpperBound());
5533  CGF.EmitIgnoredExpr(S.getCombinedInit());
5534  }
5535  });
5536  });
5537  EmitBlock(LoopExit.getBlock());
5538  // Tell the runtime we are done.
5539  RT.emitForStaticFinish(*this, S.getEndLoc(), S.getDirectiveKind());
5540  } else {
5541  // Emit the outer loop, which requests its work chunk [LB..UB] from
5542  // runtime and runs the inner loop to process it.
5543  const OMPLoopArguments LoopArguments = {
5544  LB.getAddress(*this), UB.getAddress(*this), ST.getAddress(*this),
5545  IL.getAddress(*this), Chunk};
5546  EmitOMPDistributeOuterLoop(ScheduleKind, S, LoopScope, LoopArguments,
5547  CodeGenLoop);
5548  }
5549  if (isOpenMPSimdDirective(S.getDirectiveKind())) {
5550  EmitOMPSimdFinal(S, [IL, &S](CodeGenFunction &CGF) {
5551  return CGF.Builder.CreateIsNotNull(
5552  CGF.EmitLoadOfScalar(IL, S.getBeginLoc()));
5553  });
5554  }
5555  if (isOpenMPSimdDirective(S.getDirectiveKind()) &&
5556  !isOpenMPParallelDirective(S.getDirectiveKind()) &&
5557  !isOpenMPTeamsDirective(S.getDirectiveKind())) {
5558  EmitOMPReductionClauseFinal(S, OMPD_simd);
5559  // Emit post-update of the reduction variables if IsLastIter != 0.
5561  *this, S, [IL, &S](CodeGenFunction &CGF) {
5562  return CGF.Builder.CreateIsNotNull(
5563  CGF.EmitLoadOfScalar(IL, S.getBeginLoc()));
5564  });
5565&#