clang  16.0.0git
CGStmtOpenMP.cpp
Go to the documentation of this file.
1 //===--- CGStmtOpenMP.cpp - Emit LLVM Code from Statements ----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This contains code to emit OpenMP nodes as LLVM code.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "CGCleanup.h"
14 #include "CGOpenMPRuntime.h"
15 #include "CodeGenFunction.h"
16 #include "CodeGenModule.h"
17 #include "TargetInfo.h"
18 #include "clang/AST/ASTContext.h"
19 #include "clang/AST/Attr.h"
20 #include "clang/AST/DeclOpenMP.h"
21 #include "clang/AST/OpenMPClause.h"
22 #include "clang/AST/Stmt.h"
23 #include "clang/AST/StmtOpenMP.h"
24 #include "clang/AST/StmtVisitor.h"
27 #include "llvm/ADT/SmallSet.h"
28 #include "llvm/BinaryFormat/Dwarf.h"
29 #include "llvm/Frontend/OpenMP/OMPConstants.h"
30 #include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
31 #include "llvm/IR/Constants.h"
32 #include "llvm/IR/DebugInfoMetadata.h"
33 #include "llvm/IR/Instructions.h"
34 #include "llvm/IR/IntrinsicInst.h"
35 #include "llvm/IR/Metadata.h"
36 #include "llvm/Support/AtomicOrdering.h"
37 using namespace clang;
38 using namespace CodeGen;
39 using namespace llvm::omp;
40 
41 static const VarDecl *getBaseDecl(const Expr *Ref);
42 
43 namespace {
44 /// Lexical scope for OpenMP executable constructs, that handles correct codegen
45 /// for captured expressions.
46 class OMPLexicalScope : public CodeGenFunction::LexicalScope {
47  void emitPreInitStmt(CodeGenFunction &CGF, const OMPExecutableDirective &S) {
48  for (const auto *C : S.clauses()) {
49  if (const auto *CPI = OMPClauseWithPreInit::get(C)) {
50  if (const auto *PreInit =
51  cast_or_null<DeclStmt>(CPI->getPreInitStmt())) {
52  for (const auto *I : PreInit->decls()) {
53  if (!I->hasAttr<OMPCaptureNoInitAttr>()) {
54  CGF.EmitVarDecl(cast<VarDecl>(*I));
55  } else {
57  CGF.EmitAutoVarAlloca(cast<VarDecl>(*I));
58  CGF.EmitAutoVarCleanups(Emission);
59  }
60  }
61  }
62  }
63  }
64  }
65  CodeGenFunction::OMPPrivateScope InlinedShareds;
66 
67  static bool isCapturedVar(CodeGenFunction &CGF, const VarDecl *VD) {
68  return CGF.LambdaCaptureFields.lookup(VD) ||
69  (CGF.CapturedStmtInfo && CGF.CapturedStmtInfo->lookup(VD)) ||
70  (CGF.CurCodeDecl && isa<BlockDecl>(CGF.CurCodeDecl) &&
71  cast<BlockDecl>(CGF.CurCodeDecl)->capturesVariable(VD));
72  }
73 
74 public:
75  OMPLexicalScope(
77  const llvm::Optional<OpenMPDirectiveKind> CapturedRegion = llvm::None,
78  const bool EmitPreInitStmt = true)
79  : CodeGenFunction::LexicalScope(CGF, S.getSourceRange()),
80  InlinedShareds(CGF) {
81  if (EmitPreInitStmt)
82  emitPreInitStmt(CGF, S);
83  if (!CapturedRegion)
84  return;
85  assert(S.hasAssociatedStmt() &&
86  "Expected associated statement for inlined directive.");
87  const CapturedStmt *CS = S.getCapturedStmt(*CapturedRegion);
88  for (const auto &C : CS->captures()) {
89  if (C.capturesVariable() || C.capturesVariableByCopy()) {
90  auto *VD = C.getCapturedVar();
91  assert(VD == VD->getCanonicalDecl() &&
92  "Canonical decl must be captured.");
93  DeclRefExpr DRE(
94  CGF.getContext(), const_cast<VarDecl *>(VD),
95  isCapturedVar(CGF, VD) || (CGF.CapturedStmtInfo &&
96  InlinedShareds.isGlobalVarCaptured(VD)),
97  VD->getType().getNonReferenceType(), VK_LValue, C.getLocation());
98  InlinedShareds.addPrivate(VD, CGF.EmitLValue(&DRE).getAddress(CGF));
99  }
100  }
101  (void)InlinedShareds.Privatize();
102  }
103 };
104 
105 /// Lexical scope for OpenMP parallel construct, that handles correct codegen
106 /// for captured expressions.
107 class OMPParallelScope final : public OMPLexicalScope {
108  bool EmitPreInitStmt(const OMPExecutableDirective &S) {
109  OpenMPDirectiveKind Kind = S.getDirectiveKind();
113  }
114 
115 public:
116  OMPParallelScope(CodeGenFunction &CGF, const OMPExecutableDirective &S)
117  : OMPLexicalScope(CGF, S, /*CapturedRegion=*/llvm::None,
118  EmitPreInitStmt(S)) {}
119 };
120 
121 /// Lexical scope for OpenMP teams construct, that handles correct codegen
122 /// for captured expressions.
123 class OMPTeamsScope final : public OMPLexicalScope {
124  bool EmitPreInitStmt(const OMPExecutableDirective &S) {
125  OpenMPDirectiveKind Kind = S.getDirectiveKind();
128  }
129 
130 public:
131  OMPTeamsScope(CodeGenFunction &CGF, const OMPExecutableDirective &S)
132  : OMPLexicalScope(CGF, S, /*CapturedRegion=*/llvm::None,
133  EmitPreInitStmt(S)) {}
134 };
135 
136 /// Private scope for OpenMP loop-based directives, that supports capturing
137 /// of used expression from loop statement.
138 class OMPLoopScope : public CodeGenFunction::RunCleanupsScope {
139  void emitPreInitStmt(CodeGenFunction &CGF, const OMPLoopBasedDirective &S) {
140  const DeclStmt *PreInits;
141  CodeGenFunction::OMPMapVars PreCondVars;
142  if (auto *LD = dyn_cast<OMPLoopDirective>(&S)) {
143  llvm::DenseSet<const VarDecl *> EmittedAsPrivate;
144  for (const auto *E : LD->counters()) {
145  const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
146  EmittedAsPrivate.insert(VD->getCanonicalDecl());
147  (void)PreCondVars.setVarAddr(
148  CGF, VD, CGF.CreateMemTemp(VD->getType().getNonReferenceType()));
149  }
150  // Mark private vars as undefs.
151  for (const auto *C : LD->getClausesOfKind<OMPPrivateClause>()) {
152  for (const Expr *IRef : C->varlists()) {
153  const auto *OrigVD =
154  cast<VarDecl>(cast<DeclRefExpr>(IRef)->getDecl());
155  if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
156  QualType OrigVDTy = OrigVD->getType().getNonReferenceType();
157  (void)PreCondVars.setVarAddr(
158  CGF, OrigVD,
159  Address(llvm::UndefValue::get(CGF.ConvertTypeForMem(
160  CGF.getContext().getPointerType(OrigVDTy))),
161  CGF.ConvertTypeForMem(OrigVDTy),
162  CGF.getContext().getDeclAlign(OrigVD)));
163  }
164  }
165  }
166  (void)PreCondVars.apply(CGF);
167  // Emit init, __range and __end variables for C++ range loops.
169  LD->getInnermostCapturedStmt()->getCapturedStmt(),
170  /*TryImperfectlyNestedLoops=*/true, LD->getLoopsNumber(),
171  [&CGF](unsigned Cnt, const Stmt *CurStmt) {
172  if (const auto *CXXFor = dyn_cast<CXXForRangeStmt>(CurStmt)) {
173  if (const Stmt *Init = CXXFor->getInit())
174  CGF.EmitStmt(Init);
175  CGF.EmitStmt(CXXFor->getRangeStmt());
176  CGF.EmitStmt(CXXFor->getEndStmt());
177  }
178  return false;
179  });
180  PreInits = cast_or_null<DeclStmt>(LD->getPreInits());
181  } else if (const auto *Tile = dyn_cast<OMPTileDirective>(&S)) {
182  PreInits = cast_or_null<DeclStmt>(Tile->getPreInits());
183  } else if (const auto *Unroll = dyn_cast<OMPUnrollDirective>(&S)) {
184  PreInits = cast_or_null<DeclStmt>(Unroll->getPreInits());
185  } else {
186  llvm_unreachable("Unknown loop-based directive kind.");
187  }
188  if (PreInits) {
189  for (const auto *I : PreInits->decls())
190  CGF.EmitVarDecl(cast<VarDecl>(*I));
191  }
192  PreCondVars.restore(CGF);
193  }
194 
195 public:
196  OMPLoopScope(CodeGenFunction &CGF, const OMPLoopBasedDirective &S)
197  : CodeGenFunction::RunCleanupsScope(CGF) {
198  emitPreInitStmt(CGF, S);
199  }
200 };
201 
202 class OMPSimdLexicalScope : public CodeGenFunction::LexicalScope {
203  CodeGenFunction::OMPPrivateScope InlinedShareds;
204 
205  static bool isCapturedVar(CodeGenFunction &CGF, const VarDecl *VD) {
206  return CGF.LambdaCaptureFields.lookup(VD) ||
207  (CGF.CapturedStmtInfo && CGF.CapturedStmtInfo->lookup(VD)) ||
208  (CGF.CurCodeDecl && isa<BlockDecl>(CGF.CurCodeDecl) &&
209  cast<BlockDecl>(CGF.CurCodeDecl)->capturesVariable(VD));
210  }
211 
212 public:
213  OMPSimdLexicalScope(CodeGenFunction &CGF, const OMPExecutableDirective &S)
214  : CodeGenFunction::LexicalScope(CGF, S.getSourceRange()),
215  InlinedShareds(CGF) {
216  for (const auto *C : S.clauses()) {
217  if (const auto *CPI = OMPClauseWithPreInit::get(C)) {
218  if (const auto *PreInit =
219  cast_or_null<DeclStmt>(CPI->getPreInitStmt())) {
220  for (const auto *I : PreInit->decls()) {
221  if (!I->hasAttr<OMPCaptureNoInitAttr>()) {
222  CGF.EmitVarDecl(cast<VarDecl>(*I));
223  } else {
225  CGF.EmitAutoVarAlloca(cast<VarDecl>(*I));
226  CGF.EmitAutoVarCleanups(Emission);
227  }
228  }
229  }
230  } else if (const auto *UDP = dyn_cast<OMPUseDevicePtrClause>(C)) {
231  for (const Expr *E : UDP->varlists()) {
232  const Decl *D = cast<DeclRefExpr>(E)->getDecl();
233  if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(D))
234  CGF.EmitVarDecl(*OED);
235  }
236  } else if (const auto *UDP = dyn_cast<OMPUseDeviceAddrClause>(C)) {
237  for (const Expr *E : UDP->varlists()) {
238  const Decl *D = getBaseDecl(E);
239  if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(D))
240  CGF.EmitVarDecl(*OED);
241  }
242  }
243  }
244  if (!isOpenMPSimdDirective(S.getDirectiveKind()))
245  CGF.EmitOMPPrivateClause(S, InlinedShareds);
246  if (const auto *TG = dyn_cast<OMPTaskgroupDirective>(&S)) {
247  if (const Expr *E = TG->getReductionRef())
248  CGF.EmitVarDecl(*cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()));
249  }
250  // Temp copy arrays for inscan reductions should not be emitted as they are
251  // not used in simd only mode.
253  for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) {
254  if (C->getModifier() != OMPC_REDUCTION_inscan)
255  continue;
256  for (const Expr *E : C->copy_array_temps())
257  CopyArrayTemps.insert(cast<DeclRefExpr>(E)->getDecl());
258  }
259  const auto *CS = cast_or_null<CapturedStmt>(S.getAssociatedStmt());
260  while (CS) {
261  for (auto &C : CS->captures()) {
262  if (C.capturesVariable() || C.capturesVariableByCopy()) {
263  auto *VD = C.getCapturedVar();
264  if (CopyArrayTemps.contains(VD))
265  continue;
266  assert(VD == VD->getCanonicalDecl() &&
267  "Canonical decl must be captured.");
268  DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(VD),
269  isCapturedVar(CGF, VD) ||
270  (CGF.CapturedStmtInfo &&
271  InlinedShareds.isGlobalVarCaptured(VD)),
273  C.getLocation());
274  InlinedShareds.addPrivate(VD, CGF.EmitLValue(&DRE).getAddress(CGF));
275  }
276  }
277  CS = dyn_cast<CapturedStmt>(CS->getCapturedStmt());
278  }
279  (void)InlinedShareds.Privatize();
280  }
281 };
282 
283 } // namespace
284 
286  const OMPExecutableDirective &S,
287  const RegionCodeGenTy &CodeGen);
288 
290  if (const auto *OrigDRE = dyn_cast<DeclRefExpr>(E)) {
291  if (const auto *OrigVD = dyn_cast<VarDecl>(OrigDRE->getDecl())) {
292  OrigVD = OrigVD->getCanonicalDecl();
293  bool IsCaptured =
294  LambdaCaptureFields.lookup(OrigVD) ||
295  (CapturedStmtInfo && CapturedStmtInfo->lookup(OrigVD)) ||
296  (CurCodeDecl && isa<BlockDecl>(CurCodeDecl));
297  DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), IsCaptured,
298  OrigDRE->getType(), VK_LValue, OrigDRE->getExprLoc());
299  return EmitLValue(&DRE);
300  }
301  }
302  return EmitLValue(E);
303 }
304 
306  ASTContext &C = getContext();
307  llvm::Value *Size = nullptr;
308  auto SizeInChars = C.getTypeSizeInChars(Ty);
309  if (SizeInChars.isZero()) {
310  // getTypeSizeInChars() returns 0 for a VLA.
311  while (const VariableArrayType *VAT = C.getAsVariableArrayType(Ty)) {
312  VlaSizePair VlaSize = getVLASize(VAT);
313  Ty = VlaSize.Type;
314  Size =
315  Size ? Builder.CreateNUWMul(Size, VlaSize.NumElts) : VlaSize.NumElts;
316  }
317  SizeInChars = C.getTypeSizeInChars(Ty);
318  if (SizeInChars.isZero())
319  return llvm::ConstantInt::get(SizeTy, /*V=*/0);
320  return Builder.CreateNUWMul(Size, CGM.getSize(SizeInChars));
321  }
322  return CGM.getSize(SizeInChars);
323 }
324 
326  const CapturedStmt &S, SmallVectorImpl<llvm::Value *> &CapturedVars) {
327  const RecordDecl *RD = S.getCapturedRecordDecl();
328  auto CurField = RD->field_begin();
329  auto CurCap = S.captures().begin();
330  for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(),
331  E = S.capture_init_end();
332  I != E; ++I, ++CurField, ++CurCap) {
333  if (CurField->hasCapturedVLAType()) {
334  const VariableArrayType *VAT = CurField->getCapturedVLAType();
335  llvm::Value *Val = VLASizeMap[VAT->getSizeExpr()];
336  CapturedVars.push_back(Val);
337  } else if (CurCap->capturesThis()) {
338  CapturedVars.push_back(CXXThisValue);
339  } else if (CurCap->capturesVariableByCopy()) {
340  llvm::Value *CV = EmitLoadOfScalar(EmitLValue(*I), CurCap->getLocation());
341 
342  // If the field is not a pointer, we need to save the actual value
343  // and load it as a void pointer.
344  if (!CurField->getType()->isAnyPointerType()) {
345  ASTContext &Ctx = getContext();
346  Address DstAddr = CreateMemTemp(
347  Ctx.getUIntPtrType(),
348  Twine(CurCap->getCapturedVar()->getName(), ".casted"));
349  LValue DstLV = MakeAddrLValue(DstAddr, Ctx.getUIntPtrType());
350 
351  llvm::Value *SrcAddrVal = EmitScalarConversion(
352  DstAddr.getPointer(), Ctx.getPointerType(Ctx.getUIntPtrType()),
353  Ctx.getPointerType(CurField->getType()), CurCap->getLocation());
354  LValue SrcLV =
355  MakeNaturalAlignAddrLValue(SrcAddrVal, CurField->getType());
356 
357  // Store the value using the source type pointer.
358  EmitStoreThroughLValue(RValue::get(CV), SrcLV);
359 
360  // Load the value using the destination type pointer.
361  CV = EmitLoadOfScalar(DstLV, CurCap->getLocation());
362  }
363  CapturedVars.push_back(CV);
364  } else {
365  assert(CurCap->capturesVariable() && "Expected capture by reference.");
366  CapturedVars.push_back(EmitLValue(*I).getAddress(*this).getPointer());
367  }
368  }
369 }
370 
372  QualType DstType, StringRef Name,
373  LValue AddrLV) {
374  ASTContext &Ctx = CGF.getContext();
375 
376  llvm::Value *CastedPtr = CGF.EmitScalarConversion(
377  AddrLV.getAddress(CGF).getPointer(), Ctx.getUIntPtrType(),
378  Ctx.getPointerType(DstType), Loc);
379  Address TmpAddr =
380  CGF.MakeNaturalAlignAddrLValue(CastedPtr, DstType).getAddress(CGF);
381  return TmpAddr;
382 }
383 
385  if (T->isLValueReferenceType())
386  return C.getLValueReferenceType(
388  /*SpelledAsLValue=*/false);
389  if (T->isPointerType())
390  return C.getPointerType(getCanonicalParamType(C, T->getPointeeType()));
391  if (const ArrayType *A = T->getAsArrayTypeUnsafe()) {
392  if (const auto *VLA = dyn_cast<VariableArrayType>(A))
393  return getCanonicalParamType(C, VLA->getElementType());
394  if (!A->isVariablyModifiedType())
395  return C.getCanonicalType(T);
396  }
397  return C.getCanonicalParamType(T);
398 }
399 
400 namespace {
401 /// Contains required data for proper outlined function codegen.
402 struct FunctionOptions {
403  /// Captured statement for which the function is generated.
404  const CapturedStmt *S = nullptr;
405  /// true if cast to/from UIntPtr is required for variables captured by
406  /// value.
407  const bool UIntPtrCastRequired = true;
408  /// true if only casted arguments must be registered as local args or VLA
409  /// sizes.
410  const bool RegisterCastedArgsOnly = false;
411  /// Name of the generated function.
412  const StringRef FunctionName;
413  /// Location of the non-debug version of the outlined function.
414  SourceLocation Loc;
415  explicit FunctionOptions(const CapturedStmt *S, bool UIntPtrCastRequired,
416  bool RegisterCastedArgsOnly, StringRef FunctionName,
417  SourceLocation Loc)
418  : S(S), UIntPtrCastRequired(UIntPtrCastRequired),
419  RegisterCastedArgsOnly(UIntPtrCastRequired && RegisterCastedArgsOnly),
420  FunctionName(FunctionName), Loc(Loc) {}
421 };
422 } // namespace
423 
424 static llvm::Function *emitOutlinedFunctionPrologue(
425  CodeGenFunction &CGF, FunctionArgList &Args,
426  llvm::MapVector<const Decl *, std::pair<const VarDecl *, Address>>
427  &LocalAddrs,
428  llvm::DenseMap<const Decl *, std::pair<const Expr *, llvm::Value *>>
429  &VLASizes,
430  llvm::Value *&CXXThisValue, const FunctionOptions &FO) {
431  const CapturedDecl *CD = FO.S->getCapturedDecl();
432  const RecordDecl *RD = FO.S->getCapturedRecordDecl();
433  assert(CD->hasBody() && "missing CapturedDecl body");
434 
435  CXXThisValue = nullptr;
436  // Build the argument list.
437  CodeGenModule &CGM = CGF.CGM;
438  ASTContext &Ctx = CGM.getContext();
439  FunctionArgList TargetArgs;
440  Args.append(CD->param_begin(),
441  std::next(CD->param_begin(), CD->getContextParamPosition()));
442  TargetArgs.append(
443  CD->param_begin(),
444  std::next(CD->param_begin(), CD->getContextParamPosition()));
445  auto I = FO.S->captures().begin();
446  FunctionDecl *DebugFunctionDecl = nullptr;
447  if (!FO.UIntPtrCastRequired) {
449  QualType FunctionTy = Ctx.getFunctionType(Ctx.VoidTy, llvm::None, EPI);
450  DebugFunctionDecl = FunctionDecl::Create(
451  Ctx, Ctx.getTranslationUnitDecl(), FO.S->getBeginLoc(),
452  SourceLocation(), DeclarationName(), FunctionTy,
453  Ctx.getTrivialTypeSourceInfo(FunctionTy), SC_Static,
454  /*UsesFPIntrin=*/false, /*isInlineSpecified=*/false,
455  /*hasWrittenPrototype=*/false);
456  }
457  for (const FieldDecl *FD : RD->fields()) {
458  QualType ArgType = FD->getType();
459  IdentifierInfo *II = nullptr;
460  VarDecl *CapVar = nullptr;
461 
462  // If this is a capture by copy and the type is not a pointer, the outlined
463  // function argument type should be uintptr and the value properly casted to
464  // uintptr. This is necessary given that the runtime library is only able to
465  // deal with pointers. We can pass in the same way the VLA type sizes to the
466  // outlined function.
467  if (FO.UIntPtrCastRequired &&
468  ((I->capturesVariableByCopy() && !ArgType->isAnyPointerType()) ||
469  I->capturesVariableArrayType()))
470  ArgType = Ctx.getUIntPtrType();
471 
472  if (I->capturesVariable() || I->capturesVariableByCopy()) {
473  CapVar = I->getCapturedVar();
474  II = CapVar->getIdentifier();
475  } else if (I->capturesThis()) {
476  II = &Ctx.Idents.get("this");
477  } else {
478  assert(I->capturesVariableArrayType());
479  II = &Ctx.Idents.get("vla");
480  }
481  if (ArgType->isVariablyModifiedType())
482  ArgType = getCanonicalParamType(Ctx, ArgType);
483  VarDecl *Arg;
484  if (CapVar && (CapVar->getTLSKind() != clang::VarDecl::TLS_None)) {
485  Arg = ImplicitParamDecl::Create(Ctx, /*DC=*/nullptr, FD->getLocation(),
486  II, ArgType,
488  } else if (DebugFunctionDecl && (CapVar || I->capturesThis())) {
489  Arg = ParmVarDecl::Create(
490  Ctx, DebugFunctionDecl,
491  CapVar ? CapVar->getBeginLoc() : FD->getBeginLoc(),
492  CapVar ? CapVar->getLocation() : FD->getLocation(), II, ArgType,
493  /*TInfo=*/nullptr, SC_None, /*DefArg=*/nullptr);
494  } else {
495  Arg = ImplicitParamDecl::Create(Ctx, /*DC=*/nullptr, FD->getLocation(),
496  II, ArgType, ImplicitParamDecl::Other);
497  }
498  Args.emplace_back(Arg);
499  // Do not cast arguments if we emit function with non-original types.
500  TargetArgs.emplace_back(
501  FO.UIntPtrCastRequired
502  ? Arg
503  : CGM.getOpenMPRuntime().translateParameter(FD, Arg));
504  ++I;
505  }
506  Args.append(std::next(CD->param_begin(), CD->getContextParamPosition() + 1),
507  CD->param_end());
508  TargetArgs.append(
509  std::next(CD->param_begin(), CD->getContextParamPosition() + 1),
510  CD->param_end());
511 
512  // Create the function declaration.
513  const CGFunctionInfo &FuncInfo =
514  CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, TargetArgs);
515  llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo);
516 
517  auto *F =
518  llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage,
519  FO.FunctionName, &CGM.getModule());
520  CGM.SetInternalFunctionAttributes(CD, F, FuncInfo);
521  if (CD->isNothrow())
522  F->setDoesNotThrow();
523  F->setDoesNotRecurse();
524 
525  // Always inline the outlined function if optimizations are enabled.
526  if (CGM.getCodeGenOpts().OptimizationLevel != 0) {
527  F->removeFnAttr(llvm::Attribute::NoInline);
528  F->addFnAttr(llvm::Attribute::AlwaysInline);
529  }
530 
531  // Generate the function.
532  CGF.StartFunction(CD, Ctx.VoidTy, F, FuncInfo, TargetArgs,
533  FO.UIntPtrCastRequired ? FO.Loc : FO.S->getBeginLoc(),
534  FO.UIntPtrCastRequired ? FO.Loc
535  : CD->getBody()->getBeginLoc());
536  unsigned Cnt = CD->getContextParamPosition();
537  I = FO.S->captures().begin();
538  for (const FieldDecl *FD : RD->fields()) {
539  // Do not map arguments if we emit function with non-original types.
540  Address LocalAddr(Address::invalid());
541  if (!FO.UIntPtrCastRequired && Args[Cnt] != TargetArgs[Cnt]) {
542  LocalAddr = CGM.getOpenMPRuntime().getParameterAddress(CGF, Args[Cnt],
543  TargetArgs[Cnt]);
544  } else {
545  LocalAddr = CGF.GetAddrOfLocalVar(Args[Cnt]);
546  }
547  // If we are capturing a pointer by copy we don't need to do anything, just
548  // use the value that we get from the arguments.
549  if (I->capturesVariableByCopy() && FD->getType()->isAnyPointerType()) {
550  const VarDecl *CurVD = I->getCapturedVar();
551  if (!FO.RegisterCastedArgsOnly)
552  LocalAddrs.insert({Args[Cnt], {CurVD, LocalAddr}});
553  ++Cnt;
554  ++I;
555  continue;
556  }
557 
558  LValue ArgLVal = CGF.MakeAddrLValue(LocalAddr, Args[Cnt]->getType(),
560  if (FD->hasCapturedVLAType()) {
561  if (FO.UIntPtrCastRequired) {
562  ArgLVal = CGF.MakeAddrLValue(
563  castValueFromUintptr(CGF, I->getLocation(), FD->getType(),
564  Args[Cnt]->getName(), ArgLVal),
566  }
567  llvm::Value *ExprArg = CGF.EmitLoadOfScalar(ArgLVal, I->getLocation());
568  const VariableArrayType *VAT = FD->getCapturedVLAType();
569  VLASizes.try_emplace(Args[Cnt], VAT->getSizeExpr(), ExprArg);
570  } else if (I->capturesVariable()) {
571  const VarDecl *Var = I->getCapturedVar();
572  QualType VarTy = Var->getType();
573  Address ArgAddr = ArgLVal.getAddress(CGF);
574  if (ArgLVal.getType()->isLValueReferenceType()) {
575  ArgAddr = CGF.EmitLoadOfReference(ArgLVal);
576  } else if (!VarTy->isVariablyModifiedType() || !VarTy->isPointerType()) {
577  assert(ArgLVal.getType()->isPointerType());
578  ArgAddr = CGF.EmitLoadOfPointer(
579  ArgAddr, ArgLVal.getType()->castAs<PointerType>());
580  }
581  if (!FO.RegisterCastedArgsOnly) {
582  LocalAddrs.insert(
583  {Args[Cnt], {Var, ArgAddr.withAlignment(Ctx.getDeclAlign(Var))}});
584  }
585  } else if (I->capturesVariableByCopy()) {
586  assert(!FD->getType()->isAnyPointerType() &&
587  "Not expecting a captured pointer.");
588  const VarDecl *Var = I->getCapturedVar();
589  LocalAddrs.insert({Args[Cnt],
590  {Var, FO.UIntPtrCastRequired
592  CGF, I->getLocation(), FD->getType(),
593  Args[Cnt]->getName(), ArgLVal)
594  : ArgLVal.getAddress(CGF)}});
595  } else {
596  // If 'this' is captured, load it into CXXThisValue.
597  assert(I->capturesThis());
598  CXXThisValue = CGF.EmitLoadOfScalar(ArgLVal, I->getLocation());
599  LocalAddrs.insert({Args[Cnt], {nullptr, ArgLVal.getAddress(CGF)}});
600  }
601  ++Cnt;
602  ++I;
603  }
604 
605  return F;
606 }
607 
608 llvm::Function *
610  SourceLocation Loc) {
611  assert(
612  CapturedStmtInfo &&
613  "CapturedStmtInfo should be set when generating the captured function");
614  const CapturedDecl *CD = S.getCapturedDecl();
615  // Build the argument list.
616  bool NeedWrapperFunction =
617  getDebugInfo() && CGM.getCodeGenOpts().hasReducedDebugInfo();
618  FunctionArgList Args;
619  llvm::MapVector<const Decl *, std::pair<const VarDecl *, Address>> LocalAddrs;
620  llvm::DenseMap<const Decl *, std::pair<const Expr *, llvm::Value *>> VLASizes;
621  SmallString<256> Buffer;
622  llvm::raw_svector_ostream Out(Buffer);
623  Out << CapturedStmtInfo->getHelperName();
624  if (NeedWrapperFunction)
625  Out << "_debug__";
626  FunctionOptions FO(&S, !NeedWrapperFunction, /*RegisterCastedArgsOnly=*/false,
627  Out.str(), Loc);
628  llvm::Function *F = emitOutlinedFunctionPrologue(*this, Args, LocalAddrs,
629  VLASizes, CXXThisValue, FO);
630  CodeGenFunction::OMPPrivateScope LocalScope(*this);
631  for (const auto &LocalAddrPair : LocalAddrs) {
632  if (LocalAddrPair.second.first) {
633  LocalScope.addPrivate(LocalAddrPair.second.first,
634  LocalAddrPair.second.second);
635  }
636  }
637  (void)LocalScope.Privatize();
638  for (const auto &VLASizePair : VLASizes)
639  VLASizeMap[VLASizePair.second.first] = VLASizePair.second.second;
640  PGO.assignRegionCounters(GlobalDecl(CD), F);
641  CapturedStmtInfo->EmitBody(*this, CD->getBody());
642  (void)LocalScope.ForceCleanup();
643  FinishFunction(CD->getBodyRBrace());
644  if (!NeedWrapperFunction)
645  return F;
646 
647  FunctionOptions WrapperFO(&S, /*UIntPtrCastRequired=*/true,
648  /*RegisterCastedArgsOnly=*/true,
649  CapturedStmtInfo->getHelperName(), Loc);
650  CodeGenFunction WrapperCGF(CGM, /*suppressNewContext=*/true);
651  WrapperCGF.CapturedStmtInfo = CapturedStmtInfo;
652  Args.clear();
653  LocalAddrs.clear();
654  VLASizes.clear();
655  llvm::Function *WrapperF =
656  emitOutlinedFunctionPrologue(WrapperCGF, Args, LocalAddrs, VLASizes,
657  WrapperCGF.CXXThisValue, WrapperFO);
659  auto *PI = F->arg_begin();
660  for (const auto *Arg : Args) {
661  llvm::Value *CallArg;
662  auto I = LocalAddrs.find(Arg);
663  if (I != LocalAddrs.end()) {
664  LValue LV = WrapperCGF.MakeAddrLValue(
665  I->second.second,
666  I->second.first ? I->second.first->getType() : Arg->getType(),
668  if (LV.getType()->isAnyComplexType())
670  LV.getAddress(WrapperCGF),
671  PI->getType()->getPointerTo(
672  LV.getAddress(WrapperCGF).getAddressSpace()),
673  PI->getType()));
674  CallArg = WrapperCGF.EmitLoadOfScalar(LV, S.getBeginLoc());
675  } else {
676  auto EI = VLASizes.find(Arg);
677  if (EI != VLASizes.end()) {
678  CallArg = EI->second.second;
679  } else {
680  LValue LV =
681  WrapperCGF.MakeAddrLValue(WrapperCGF.GetAddrOfLocalVar(Arg),
683  CallArg = WrapperCGF.EmitLoadOfScalar(LV, S.getBeginLoc());
684  }
685  }
686  CallArgs.emplace_back(WrapperCGF.EmitFromMemory(CallArg, Arg->getType()));
687  ++PI;
688  }
689  CGM.getOpenMPRuntime().emitOutlinedFunctionCall(WrapperCGF, Loc, F, CallArgs);
690  WrapperCGF.FinishFunction();
691  return WrapperF;
692 }
693 
694 //===----------------------------------------------------------------------===//
695 // OpenMP Directive Emission
696 //===----------------------------------------------------------------------===//
698  Address DestAddr, Address SrcAddr, QualType OriginalType,
699  const llvm::function_ref<void(Address, Address)> CopyGen) {
700  // Perform element-by-element initialization.
701  QualType ElementTy;
702 
703  // Drill down to the base element type on both arrays.
704  const ArrayType *ArrayTy = OriginalType->getAsArrayTypeUnsafe();
705  llvm::Value *NumElements = emitArrayLength(ArrayTy, ElementTy, DestAddr);
706  SrcAddr = Builder.CreateElementBitCast(SrcAddr, DestAddr.getElementType());
707 
708  llvm::Value *SrcBegin = SrcAddr.getPointer();
709  llvm::Value *DestBegin = DestAddr.getPointer();
710  // Cast from pointer to array type to pointer to single element.
711  llvm::Value *DestEnd =
712  Builder.CreateGEP(DestAddr.getElementType(), DestBegin, NumElements);
713  // The basic structure here is a while-do loop.
714  llvm::BasicBlock *BodyBB = createBasicBlock("omp.arraycpy.body");
715  llvm::BasicBlock *DoneBB = createBasicBlock("omp.arraycpy.done");
716  llvm::Value *IsEmpty =
717  Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arraycpy.isempty");
718  Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
719 
720  // Enter the loop body, making that address the current address.
721  llvm::BasicBlock *EntryBB = Builder.GetInsertBlock();
722  EmitBlock(BodyBB);
723 
724  CharUnits ElementSize = getContext().getTypeSizeInChars(ElementTy);
725 
726  llvm::PHINode *SrcElementPHI =
727  Builder.CreatePHI(SrcBegin->getType(), 2, "omp.arraycpy.srcElementPast");
728  SrcElementPHI->addIncoming(SrcBegin, EntryBB);
729  Address SrcElementCurrent =
730  Address(SrcElementPHI, SrcAddr.getElementType(),
731  SrcAddr.getAlignment().alignmentOfArrayElement(ElementSize));
732 
733  llvm::PHINode *DestElementPHI = Builder.CreatePHI(
734  DestBegin->getType(), 2, "omp.arraycpy.destElementPast");
735  DestElementPHI->addIncoming(DestBegin, EntryBB);
736  Address DestElementCurrent =
737  Address(DestElementPHI, DestAddr.getElementType(),
738  DestAddr.getAlignment().alignmentOfArrayElement(ElementSize));
739 
740  // Emit copy.
741  CopyGen(DestElementCurrent, SrcElementCurrent);
742 
743  // Shift the address forward by one element.
744  llvm::Value *DestElementNext =
745  Builder.CreateConstGEP1_32(DestAddr.getElementType(), DestElementPHI,
746  /*Idx0=*/1, "omp.arraycpy.dest.element");
747  llvm::Value *SrcElementNext =
748  Builder.CreateConstGEP1_32(SrcAddr.getElementType(), SrcElementPHI,
749  /*Idx0=*/1, "omp.arraycpy.src.element");
750  // Check whether we've reached the end.
751  llvm::Value *Done =
752  Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done");
753  Builder.CreateCondBr(Done, DoneBB, BodyBB);
754  DestElementPHI->addIncoming(DestElementNext, Builder.GetInsertBlock());
755  SrcElementPHI->addIncoming(SrcElementNext, Builder.GetInsertBlock());
756 
757  // Done.
758  EmitBlock(DoneBB, /*IsFinished=*/true);
759 }
760 
761 void CodeGenFunction::EmitOMPCopy(QualType OriginalType, Address DestAddr,
762  Address SrcAddr, const VarDecl *DestVD,
763  const VarDecl *SrcVD, const Expr *Copy) {
764  if (OriginalType->isArrayType()) {
765  const auto *BO = dyn_cast<BinaryOperator>(Copy);
766  if (BO && BO->getOpcode() == BO_Assign) {
767  // Perform simple memcpy for simple copying.
768  LValue Dest = MakeAddrLValue(DestAddr, OriginalType);
769  LValue Src = MakeAddrLValue(SrcAddr, OriginalType);
770  EmitAggregateAssign(Dest, Src, OriginalType);
771  } else {
772  // For arrays with complex element types perform element by element
773  // copying.
774  EmitOMPAggregateAssign(
775  DestAddr, SrcAddr, OriginalType,
776  [this, Copy, SrcVD, DestVD](Address DestElement, Address SrcElement) {
777  // Working with the single array element, so have to remap
778  // destination and source variables to corresponding array
779  // elements.
781  Remap.addPrivate(DestVD, DestElement);
782  Remap.addPrivate(SrcVD, SrcElement);
783  (void)Remap.Privatize();
784  EmitIgnoredExpr(Copy);
785  });
786  }
787  } else {
788  // Remap pseudo source variable to private copy.
790  Remap.addPrivate(SrcVD, SrcAddr);
791  Remap.addPrivate(DestVD, DestAddr);
792  (void)Remap.Privatize();
793  // Emit copying of the whole variable.
794  EmitIgnoredExpr(Copy);
795  }
796 }
797 
799  OMPPrivateScope &PrivateScope) {
800  if (!HaveInsertPoint())
801  return false;
802  bool DeviceConstTarget =
803  getLangOpts().OpenMPIsDevice &&
805  bool FirstprivateIsLastprivate = false;
806  llvm::DenseMap<const VarDecl *, OpenMPLastprivateModifier> Lastprivates;
807  for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) {
808  for (const auto *D : C->varlists())
809  Lastprivates.try_emplace(
810  cast<VarDecl>(cast<DeclRefExpr>(D)->getDecl())->getCanonicalDecl(),
811  C->getKind());
812  }
813  llvm::DenseSet<const VarDecl *> EmittedAsFirstprivate;
815  getOpenMPCaptureRegions(CaptureRegions, D.getDirectiveKind());
816  // Force emission of the firstprivate copy if the directive does not emit
817  // outlined function, like omp for, omp simd, omp distribute etc.
818  bool MustEmitFirstprivateCopy =
819  CaptureRegions.size() == 1 && CaptureRegions.back() == OMPD_unknown;
820  for (const auto *C : D.getClausesOfKind<OMPFirstprivateClause>()) {
821  const auto *IRef = C->varlist_begin();
822  const auto *InitsRef = C->inits().begin();
823  for (const Expr *IInit : C->private_copies()) {
824  const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
825  bool ThisFirstprivateIsLastprivate =
826  Lastprivates.count(OrigVD->getCanonicalDecl()) > 0;
827  const FieldDecl *FD = CapturedStmtInfo->lookup(OrigVD);
828  const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
829  if (!MustEmitFirstprivateCopy && !ThisFirstprivateIsLastprivate && FD &&
830  !FD->getType()->isReferenceType() &&
831  (!VD || !VD->hasAttr<OMPAllocateDeclAttr>())) {
832  EmittedAsFirstprivate.insert(OrigVD->getCanonicalDecl());
833  ++IRef;
834  ++InitsRef;
835  continue;
836  }
837  // Do not emit copy for firstprivate constant variables in target regions,
838  // captured by reference.
839  if (DeviceConstTarget && OrigVD->getType().isConstant(getContext()) &&
840  FD && FD->getType()->isReferenceType() &&
841  (!VD || !VD->hasAttr<OMPAllocateDeclAttr>())) {
842  EmittedAsFirstprivate.insert(OrigVD->getCanonicalDecl());
843  ++IRef;
844  ++InitsRef;
845  continue;
846  }
847  FirstprivateIsLastprivate =
848  FirstprivateIsLastprivate || ThisFirstprivateIsLastprivate;
849  if (EmittedAsFirstprivate.insert(OrigVD->getCanonicalDecl()).second) {
850  const auto *VDInit =
851  cast<VarDecl>(cast<DeclRefExpr>(*InitsRef)->getDecl());
852  bool IsRegistered;
853  DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD),
854  /*RefersToEnclosingVariableOrCapture=*/FD != nullptr,
855  (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc());
856  LValue OriginalLVal;
857  if (!FD) {
858  // Check if the firstprivate variable is just a constant value.
859  ConstantEmission CE = tryEmitAsConstant(&DRE);
860  if (CE && !CE.isReference()) {
861  // Constant value, no need to create a copy.
862  ++IRef;
863  ++InitsRef;
864  continue;
865  }
866  if (CE && CE.isReference()) {
867  OriginalLVal = CE.getReferenceLValue(*this, &DRE);
868  } else {
869  assert(!CE && "Expected non-constant firstprivate.");
870  OriginalLVal = EmitLValue(&DRE);
871  }
872  } else {
873  OriginalLVal = EmitLValue(&DRE);
874  }
875  QualType Type = VD->getType();
876  if (Type->isArrayType()) {
877  // Emit VarDecl with copy init for arrays.
878  // Get the address of the original variable captured in current
879  // captured region.
880  AutoVarEmission Emission = EmitAutoVarAlloca(*VD);
881  const Expr *Init = VD->getInit();
882  if (!isa<CXXConstructExpr>(Init) || isTrivialInitializer(Init)) {
883  // Perform simple memcpy.
884  LValue Dest = MakeAddrLValue(Emission.getAllocatedAddress(), Type);
885  EmitAggregateAssign(Dest, OriginalLVal, Type);
886  } else {
887  EmitOMPAggregateAssign(
888  Emission.getAllocatedAddress(), OriginalLVal.getAddress(*this),
889  Type,
890  [this, VDInit, Init](Address DestElement, Address SrcElement) {
891  // Clean up any temporaries needed by the
892  // initialization.
893  RunCleanupsScope InitScope(*this);
894  // Emit initialization for single element.
895  setAddrOfLocalVar(VDInit, SrcElement);
896  EmitAnyExprToMem(Init, DestElement,
897  Init->getType().getQualifiers(),
898  /*IsInitializer*/ false);
899  LocalDeclMap.erase(VDInit);
900  });
901  }
902  EmitAutoVarCleanups(Emission);
903  IsRegistered =
904  PrivateScope.addPrivate(OrigVD, Emission.getAllocatedAddress());
905  } else {
906  Address OriginalAddr = OriginalLVal.getAddress(*this);
907  // Emit private VarDecl with copy init.
908  // Remap temp VDInit variable to the address of the original
909  // variable (for proper handling of captured global variables).
910  setAddrOfLocalVar(VDInit, OriginalAddr);
911  EmitDecl(*VD);
912  LocalDeclMap.erase(VDInit);
913  Address VDAddr = GetAddrOfLocalVar(VD);
914  if (ThisFirstprivateIsLastprivate &&
915  Lastprivates[OrigVD->getCanonicalDecl()] ==
916  OMPC_LASTPRIVATE_conditional) {
917  // Create/init special variable for lastprivate conditionals.
918  llvm::Value *V =
919  EmitLoadOfScalar(MakeAddrLValue(VDAddr, (*IRef)->getType(),
921  (*IRef)->getExprLoc());
922  VDAddr = CGM.getOpenMPRuntime().emitLastprivateConditionalInit(
923  *this, OrigVD);
924  EmitStoreOfScalar(V, MakeAddrLValue(VDAddr, (*IRef)->getType(),
926  LocalDeclMap.erase(VD);
927  setAddrOfLocalVar(VD, VDAddr);
928  }
929  IsRegistered = PrivateScope.addPrivate(OrigVD, VDAddr);
930  }
931  assert(IsRegistered &&
932  "firstprivate var already registered as private");
933  // Silence the warning about unused variable.
934  (void)IsRegistered;
935  }
936  ++IRef;
937  ++InitsRef;
938  }
939  }
940  return FirstprivateIsLastprivate && !EmittedAsFirstprivate.empty();
941 }
942 
944  const OMPExecutableDirective &D,
945  CodeGenFunction::OMPPrivateScope &PrivateScope) {
946  if (!HaveInsertPoint())
947  return;
948  llvm::DenseSet<const VarDecl *> EmittedAsPrivate;
949  for (const auto *C : D.getClausesOfKind<OMPPrivateClause>()) {
950  auto IRef = C->varlist_begin();
951  for (const Expr *IInit : C->private_copies()) {
952  const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
953  if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
954  const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
955  EmitDecl(*VD);
956  // Emit private VarDecl with copy init.
957  bool IsRegistered =
958  PrivateScope.addPrivate(OrigVD, GetAddrOfLocalVar(VD));
959  assert(IsRegistered && "private var already registered as private");
960  // Silence the warning about unused variable.
961  (void)IsRegistered;
962  }
963  ++IRef;
964  }
965  }
966 }
967 
969  if (!HaveInsertPoint())
970  return false;
971  // threadprivate_var1 = master_threadprivate_var1;
972  // operator=(threadprivate_var2, master_threadprivate_var2);
973  // ...
974  // __kmpc_barrier(&loc, global_tid);
976  llvm::BasicBlock *CopyBegin = nullptr, *CopyEnd = nullptr;
977  for (const auto *C : D.getClausesOfKind<OMPCopyinClause>()) {
978  auto IRef = C->varlist_begin();
979  auto ISrcRef = C->source_exprs().begin();
980  auto IDestRef = C->destination_exprs().begin();
981  for (const Expr *AssignOp : C->assignment_ops()) {
982  const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
983  QualType Type = VD->getType();
984  if (CopiedVars.insert(VD->getCanonicalDecl()).second) {
985  // Get the address of the master variable. If we are emitting code with
986  // TLS support, the address is passed from the master as field in the
987  // captured declaration.
988  Address MasterAddr = Address::invalid();
989  if (getLangOpts().OpenMPUseTLS &&
990  getContext().getTargetInfo().isTLSSupported()) {
991  assert(CapturedStmtInfo->lookup(VD) &&
992  "Copyin threadprivates should have been captured!");
993  DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(VD), true,
994  (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc());
995  MasterAddr = EmitLValue(&DRE).getAddress(*this);
996  LocalDeclMap.erase(VD);
997  } else {
998  MasterAddr =
999  Address(VD->isStaticLocal() ? CGM.getStaticLocalDeclAddress(VD)
1000  : CGM.GetAddrOfGlobal(VD),
1001  CGM.getTypes().ConvertTypeForMem(VD->getType()),
1002  getContext().getDeclAlign(VD));
1003  }
1004  // Get the address of the threadprivate variable.
1005  Address PrivateAddr = EmitLValue(*IRef).getAddress(*this);
1006  if (CopiedVars.size() == 1) {
1007  // At first check if current thread is a master thread. If it is, no
1008  // need to copy data.
1009  CopyBegin = createBasicBlock("copyin.not.master");
1010  CopyEnd = createBasicBlock("copyin.not.master.end");
1011  // TODO: Avoid ptrtoint conversion.
1012  auto *MasterAddrInt =
1013  Builder.CreatePtrToInt(MasterAddr.getPointer(), CGM.IntPtrTy);
1014  auto *PrivateAddrInt =
1015  Builder.CreatePtrToInt(PrivateAddr.getPointer(), CGM.IntPtrTy);
1016  Builder.CreateCondBr(
1017  Builder.CreateICmpNE(MasterAddrInt, PrivateAddrInt), CopyBegin,
1018  CopyEnd);
1019  EmitBlock(CopyBegin);
1020  }
1021  const auto *SrcVD =
1022  cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl());
1023  const auto *DestVD =
1024  cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
1025  EmitOMPCopy(Type, PrivateAddr, MasterAddr, DestVD, SrcVD, AssignOp);
1026  }
1027  ++IRef;
1028  ++ISrcRef;
1029  ++IDestRef;
1030  }
1031  }
1032  if (CopyEnd) {
1033  // Exit out of copying procedure for non-master thread.
1034  EmitBlock(CopyEnd, /*IsFinished=*/true);
1035  return true;
1036  }
1037  return false;
1038 }
1039 
1041  const OMPExecutableDirective &D, OMPPrivateScope &PrivateScope) {
1042  if (!HaveInsertPoint())
1043  return false;
1044  bool HasAtLeastOneLastprivate = false;
1047  const auto *LoopDirective = cast<OMPLoopDirective>(&D);
1048  for (const Expr *C : LoopDirective->counters()) {
1049  SIMDLCVs.insert(
1050  cast<VarDecl>(cast<DeclRefExpr>(C)->getDecl())->getCanonicalDecl());
1051  }
1052  }
1053  llvm::DenseSet<const VarDecl *> AlreadyEmittedVars;
1054  for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) {
1055  HasAtLeastOneLastprivate = true;
1057  !getLangOpts().OpenMPSimd)
1058  break;
1059  const auto *IRef = C->varlist_begin();
1060  const auto *IDestRef = C->destination_exprs().begin();
1061  for (const Expr *IInit : C->private_copies()) {
1062  // Keep the address of the original variable for future update at the end
1063  // of the loop.
1064  const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
1065  // Taskloops do not require additional initialization, it is done in
1066  // runtime support library.
1067  if (AlreadyEmittedVars.insert(OrigVD->getCanonicalDecl()).second) {
1068  const auto *DestVD =
1069  cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
1070  DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD),
1071  /*RefersToEnclosingVariableOrCapture=*/
1072  CapturedStmtInfo->lookup(OrigVD) != nullptr,
1073  (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc());
1074  PrivateScope.addPrivate(DestVD, EmitLValue(&DRE).getAddress(*this));
1075  // Check if the variable is also a firstprivate: in this case IInit is
1076  // not generated. Initialization of this variable will happen in codegen
1077  // for 'firstprivate' clause.
1078  if (IInit && !SIMDLCVs.count(OrigVD->getCanonicalDecl())) {
1079  const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
1080  Address VDAddr = Address::invalid();
1081  if (C->getKind() == OMPC_LASTPRIVATE_conditional) {
1082  VDAddr = CGM.getOpenMPRuntime().emitLastprivateConditionalInit(
1083  *this, OrigVD);
1084  setAddrOfLocalVar(VD, VDAddr);
1085  } else {
1086  // Emit private VarDecl with copy init.
1087  EmitDecl(*VD);
1088  VDAddr = GetAddrOfLocalVar(VD);
1089  }
1090  bool IsRegistered = PrivateScope.addPrivate(OrigVD, VDAddr);
1091  assert(IsRegistered &&
1092  "lastprivate var already registered as private");
1093  (void)IsRegistered;
1094  }
1095  }
1096  ++IRef;
1097  ++IDestRef;
1098  }
1099  }
1100  return HasAtLeastOneLastprivate;
1101 }
1102 
1104  const OMPExecutableDirective &D, bool NoFinals,
1105  llvm::Value *IsLastIterCond) {
1106  if (!HaveInsertPoint())
1107  return;
1108  // Emit following code:
1109  // if (<IsLastIterCond>) {
1110  // orig_var1 = private_orig_var1;
1111  // ...
1112  // orig_varn = private_orig_varn;
1113  // }
1114  llvm::BasicBlock *ThenBB = nullptr;
1115  llvm::BasicBlock *DoneBB = nullptr;
1116  if (IsLastIterCond) {
1117  // Emit implicit barrier if at least one lastprivate conditional is found
1118  // and this is not a simd mode.
1119  if (!getLangOpts().OpenMPSimd &&
1120  llvm::any_of(D.getClausesOfKind<OMPLastprivateClause>(),
1121  [](const OMPLastprivateClause *C) {
1122  return C->getKind() == OMPC_LASTPRIVATE_conditional;
1123  })) {
1124  CGM.getOpenMPRuntime().emitBarrierCall(*this, D.getBeginLoc(),
1125  OMPD_unknown,
1126  /*EmitChecks=*/false,
1127  /*ForceSimpleCall=*/true);
1128  }
1129  ThenBB = createBasicBlock(".omp.lastprivate.then");
1130  DoneBB = createBasicBlock(".omp.lastprivate.done");
1131  Builder.CreateCondBr(IsLastIterCond, ThenBB, DoneBB);
1132  EmitBlock(ThenBB);
1133  }
1134  llvm::DenseSet<const VarDecl *> AlreadyEmittedVars;
1135  llvm::DenseMap<const VarDecl *, const Expr *> LoopCountersAndUpdates;
1136  if (const auto *LoopDirective = dyn_cast<OMPLoopDirective>(&D)) {
1137  auto IC = LoopDirective->counters().begin();
1138  for (const Expr *F : LoopDirective->finals()) {
1139  const auto *D =
1140  cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl())->getCanonicalDecl();
1141  if (NoFinals)
1142  AlreadyEmittedVars.insert(D);
1143  else
1144  LoopCountersAndUpdates[D] = F;
1145  ++IC;
1146  }
1147  }
1148  for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) {
1149  auto IRef = C->varlist_begin();
1150  auto ISrcRef = C->source_exprs().begin();
1151  auto IDestRef = C->destination_exprs().begin();
1152  for (const Expr *AssignOp : C->assignment_ops()) {
1153  const auto *PrivateVD =
1154  cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
1155  QualType Type = PrivateVD->getType();
1156  const auto *CanonicalVD = PrivateVD->getCanonicalDecl();
1157  if (AlreadyEmittedVars.insert(CanonicalVD).second) {
1158  // If lastprivate variable is a loop control variable for loop-based
1159  // directive, update its value before copyin back to original
1160  // variable.
1161  if (const Expr *FinalExpr = LoopCountersAndUpdates.lookup(CanonicalVD))
1162  EmitIgnoredExpr(FinalExpr);
1163  const auto *SrcVD =
1164  cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl());
1165  const auto *DestVD =
1166  cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
1167  // Get the address of the private variable.
1168  Address PrivateAddr = GetAddrOfLocalVar(PrivateVD);
1169  if (const auto *RefTy = PrivateVD->getType()->getAs<ReferenceType>())
1170  PrivateAddr = Address(
1171  Builder.CreateLoad(PrivateAddr),
1172  CGM.getTypes().ConvertTypeForMem(RefTy->getPointeeType()),
1173  CGM.getNaturalTypeAlignment(RefTy->getPointeeType()));
1174  // Store the last value to the private copy in the last iteration.
1175  if (C->getKind() == OMPC_LASTPRIVATE_conditional)
1176  CGM.getOpenMPRuntime().emitLastprivateConditionalFinalUpdate(
1177  *this, MakeAddrLValue(PrivateAddr, (*IRef)->getType()), PrivateVD,
1178  (*IRef)->getExprLoc());
1179  // Get the address of the original variable.
1180  Address OriginalAddr = GetAddrOfLocalVar(DestVD);
1181  EmitOMPCopy(Type, OriginalAddr, PrivateAddr, DestVD, SrcVD, AssignOp);
1182  }
1183  ++IRef;
1184  ++ISrcRef;
1185  ++IDestRef;
1186  }
1187  if (const Expr *PostUpdate = C->getPostUpdateExpr())
1188  EmitIgnoredExpr(PostUpdate);
1189  }
1190  if (IsLastIterCond)
1191  EmitBlock(DoneBB, /*IsFinished=*/true);
1192 }
1193 
1195  const OMPExecutableDirective &D,
1196  CodeGenFunction::OMPPrivateScope &PrivateScope, bool ForInscan) {
1197  if (!HaveInsertPoint())
1198  return;
1201  SmallVector<const Expr *, 4> ReductionOps;
1204  OMPTaskDataTy Data;
1207  for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
1208  if (ForInscan != (C->getModifier() == OMPC_REDUCTION_inscan))
1209  continue;
1210  Shareds.append(C->varlist_begin(), C->varlist_end());
1211  Privates.append(C->privates().begin(), C->privates().end());
1212  ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end());
1213  LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
1214  RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
1215  if (C->getModifier() == OMPC_REDUCTION_task) {
1216  Data.ReductionVars.append(C->privates().begin(), C->privates().end());
1217  Data.ReductionOrigs.append(C->varlist_begin(), C->varlist_end());
1218  Data.ReductionCopies.append(C->privates().begin(), C->privates().end());
1219  Data.ReductionOps.append(C->reduction_ops().begin(),
1220  C->reduction_ops().end());
1221  TaskLHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
1222  TaskRHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
1223  }
1224  }
1225  ReductionCodeGen RedCG(Shareds, Shareds, Privates, ReductionOps);
1226  unsigned Count = 0;
1227  auto *ILHS = LHSs.begin();
1228  auto *IRHS = RHSs.begin();
1229  auto *IPriv = Privates.begin();
1230  for (const Expr *IRef : Shareds) {
1231  const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*IPriv)->getDecl());
1232  // Emit private VarDecl with reduction init.
1233  RedCG.emitSharedOrigLValue(*this, Count);
1234  RedCG.emitAggregateType(*this, Count);
1235  AutoVarEmission Emission = EmitAutoVarAlloca(*PrivateVD);
1236  RedCG.emitInitialization(*this, Count, Emission.getAllocatedAddress(),
1237  RedCG.getSharedLValue(Count).getAddress(*this),
1238  [&Emission](CodeGenFunction &CGF) {
1239  CGF.EmitAutoVarInit(Emission);
1240  return true;
1241  });
1242  EmitAutoVarCleanups(Emission);
1243  Address BaseAddr = RedCG.adjustPrivateAddress(
1244  *this, Count, Emission.getAllocatedAddress());
1245  bool IsRegistered =
1246  PrivateScope.addPrivate(RedCG.getBaseDecl(Count), BaseAddr);
1247  assert(IsRegistered && "private var already registered as private");
1248  // Silence the warning about unused variable.
1249  (void)IsRegistered;
1250 
1251  const auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
1252  const auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
1253  QualType Type = PrivateVD->getType();
1254  bool isaOMPArraySectionExpr = isa<OMPArraySectionExpr>(IRef);
1255  if (isaOMPArraySectionExpr && Type->isVariablyModifiedType()) {
1256  // Store the address of the original variable associated with the LHS
1257  // implicit variable.
1258  PrivateScope.addPrivate(LHSVD,
1259  RedCG.getSharedLValue(Count).getAddress(*this));
1260  PrivateScope.addPrivate(RHSVD, GetAddrOfLocalVar(PrivateVD));
1261  } else if ((isaOMPArraySectionExpr && Type->isScalarType()) ||
1262  isa<ArraySubscriptExpr>(IRef)) {
1263  // Store the address of the original variable associated with the LHS
1264  // implicit variable.
1265  PrivateScope.addPrivate(LHSVD,
1266  RedCG.getSharedLValue(Count).getAddress(*this));
1267  PrivateScope.addPrivate(RHSVD, Builder.CreateElementBitCast(
1268  GetAddrOfLocalVar(PrivateVD),
1269  ConvertTypeForMem(RHSVD->getType()),
1270  "rhs.begin"));
1271  } else {
1272  QualType Type = PrivateVD->getType();
1273  bool IsArray = getContext().getAsArrayType(Type) != nullptr;
1274  Address OriginalAddr = RedCG.getSharedLValue(Count).getAddress(*this);
1275  // Store the address of the original variable associated with the LHS
1276  // implicit variable.
1277  if (IsArray) {
1278  OriginalAddr = Builder.CreateElementBitCast(
1279  OriginalAddr, ConvertTypeForMem(LHSVD->getType()), "lhs.begin");
1280  }
1281  PrivateScope.addPrivate(LHSVD, OriginalAddr);
1282  PrivateScope.addPrivate(
1283  RHSVD, IsArray ? Builder.CreateElementBitCast(
1284  GetAddrOfLocalVar(PrivateVD),
1285  ConvertTypeForMem(RHSVD->getType()), "rhs.begin")
1286  : GetAddrOfLocalVar(PrivateVD));
1287  }
1288  ++ILHS;
1289  ++IRHS;
1290  ++IPriv;
1291  ++Count;
1292  }
1293  if (!Data.ReductionVars.empty()) {
1294  Data.IsReductionWithTaskMod = true;
1295  Data.IsWorksharingReduction =
1297  llvm::Value *ReductionDesc = CGM.getOpenMPRuntime().emitTaskReductionInit(
1298  *this, D.getBeginLoc(), TaskLHSs, TaskRHSs, Data);
1299  const Expr *TaskRedRef = nullptr;
1300  switch (D.getDirectiveKind()) {
1301  case OMPD_parallel:
1302  TaskRedRef = cast<OMPParallelDirective>(D).getTaskReductionRefExpr();
1303  break;
1304  case OMPD_for:
1305  TaskRedRef = cast<OMPForDirective>(D).getTaskReductionRefExpr();
1306  break;
1307  case OMPD_sections:
1308  TaskRedRef = cast<OMPSectionsDirective>(D).getTaskReductionRefExpr();
1309  break;
1310  case OMPD_parallel_for:
1311  TaskRedRef = cast<OMPParallelForDirective>(D).getTaskReductionRefExpr();
1312  break;
1313  case OMPD_parallel_master:
1314  TaskRedRef =
1315  cast<OMPParallelMasterDirective>(D).getTaskReductionRefExpr();
1316  break;
1317  case OMPD_parallel_sections:
1318  TaskRedRef =
1319  cast<OMPParallelSectionsDirective>(D).getTaskReductionRefExpr();
1320  break;
1321  case OMPD_target_parallel:
1322  TaskRedRef =
1323  cast<OMPTargetParallelDirective>(D).getTaskReductionRefExpr();
1324  break;
1325  case OMPD_target_parallel_for:
1326  TaskRedRef =
1327  cast<OMPTargetParallelForDirective>(D).getTaskReductionRefExpr();
1328  break;
1329  case OMPD_distribute_parallel_for:
1330  TaskRedRef =
1331  cast<OMPDistributeParallelForDirective>(D).getTaskReductionRefExpr();
1332  break;
1333  case OMPD_teams_distribute_parallel_for:
1334  TaskRedRef = cast<OMPTeamsDistributeParallelForDirective>(D)
1335  .getTaskReductionRefExpr();
1336  break;
1337  case OMPD_target_teams_distribute_parallel_for:
1338  TaskRedRef = cast<OMPTargetTeamsDistributeParallelForDirective>(D)
1339  .getTaskReductionRefExpr();
1340  break;
1341  case OMPD_simd:
1342  case OMPD_for_simd:
1343  case OMPD_section:
1344  case OMPD_single:
1345  case OMPD_master:
1346  case OMPD_critical:
1347  case OMPD_parallel_for_simd:
1348  case OMPD_task:
1349  case OMPD_taskyield:
1350  case OMPD_error:
1351  case OMPD_barrier:
1352  case OMPD_taskwait:
1353  case OMPD_taskgroup:
1354  case OMPD_flush:
1355  case OMPD_depobj:
1356  case OMPD_scan:
1357  case OMPD_ordered:
1358  case OMPD_atomic:
1359  case OMPD_teams:
1360  case OMPD_target:
1361  case OMPD_cancellation_point:
1362  case OMPD_cancel:
1363  case OMPD_target_data:
1364  case OMPD_target_enter_data:
1365  case OMPD_target_exit_data:
1366  case OMPD_taskloop:
1367  case OMPD_taskloop_simd:
1368  case OMPD_master_taskloop:
1369  case OMPD_master_taskloop_simd:
1370  case OMPD_parallel_master_taskloop:
1371  case OMPD_parallel_master_taskloop_simd:
1372  case OMPD_distribute:
1373  case OMPD_target_update:
1374  case OMPD_distribute_parallel_for_simd:
1375  case OMPD_distribute_simd:
1376  case OMPD_target_parallel_for_simd:
1377  case OMPD_target_simd:
1378  case OMPD_teams_distribute:
1379  case OMPD_teams_distribute_simd:
1380  case OMPD_teams_distribute_parallel_for_simd:
1381  case OMPD_target_teams:
1382  case OMPD_target_teams_distribute:
1383  case OMPD_target_teams_distribute_parallel_for_simd:
1384  case OMPD_target_teams_distribute_simd:
1385  case OMPD_declare_target:
1386  case OMPD_end_declare_target:
1387  case OMPD_threadprivate:
1388  case OMPD_allocate:
1389  case OMPD_declare_reduction:
1390  case OMPD_declare_mapper:
1391  case OMPD_declare_simd:
1392  case OMPD_requires:
1393  case OMPD_declare_variant:
1394  case OMPD_begin_declare_variant:
1395  case OMPD_end_declare_variant:
1396  case OMPD_unknown:
1397  default:
1398  llvm_unreachable("Enexpected directive with task reductions.");
1399  }
1400 
1401  const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(TaskRedRef)->getDecl());
1402  EmitVarDecl(*VD);
1403  EmitStoreOfScalar(ReductionDesc, GetAddrOfLocalVar(VD),
1404  /*Volatile=*/false, TaskRedRef->getType());
1405  }
1406 }
1407 
1409  const OMPExecutableDirective &D, const OpenMPDirectiveKind ReductionKind) {
1410  if (!HaveInsertPoint())
1411  return;
1416  bool HasAtLeastOneReduction = false;
1417  bool IsReductionWithTaskMod = false;
1418  for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
1419  // Do not emit for inscan reductions.
1420  if (C->getModifier() == OMPC_REDUCTION_inscan)
1421  continue;
1422  HasAtLeastOneReduction = true;
1423  Privates.append(C->privates().begin(), C->privates().end());
1424  LHSExprs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
1425  RHSExprs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
1426  ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end());
1427  IsReductionWithTaskMod =
1428  IsReductionWithTaskMod || C->getModifier() == OMPC_REDUCTION_task;
1429  }
1430  if (HasAtLeastOneReduction) {
1431  if (IsReductionWithTaskMod) {
1432  CGM.getOpenMPRuntime().emitTaskReductionFini(
1433  *this, D.getBeginLoc(),
1435  }
1436  bool WithNowait = D.getSingleClause<OMPNowaitClause>() ||
1438  ReductionKind == OMPD_simd;
1439  bool SimpleReduction = ReductionKind == OMPD_simd;
1440  // Emit nowait reduction if nowait clause is present or directive is a
1441  // parallel directive (it always has implicit barrier).
1442  CGM.getOpenMPRuntime().emitReduction(
1443  *this, D.getEndLoc(), Privates, LHSExprs, RHSExprs, ReductionOps,
1444  {WithNowait, SimpleReduction, ReductionKind});
1445  }
1446 }
1447 
1449  CodeGenFunction &CGF, const OMPExecutableDirective &D,
1450  const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen) {
1451  if (!CGF.HaveInsertPoint())
1452  return;
1453  llvm::BasicBlock *DoneBB = nullptr;
1454  for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
1455  if (const Expr *PostUpdate = C->getPostUpdateExpr()) {
1456  if (!DoneBB) {
1457  if (llvm::Value *Cond = CondGen(CGF)) {
1458  // If the first post-update expression is found, emit conditional
1459  // block if it was requested.
1460  llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".omp.reduction.pu");
1461  DoneBB = CGF.createBasicBlock(".omp.reduction.pu.done");
1462  CGF.Builder.CreateCondBr(Cond, ThenBB, DoneBB);
1463  CGF.EmitBlock(ThenBB);
1464  }
1465  }
1466  CGF.EmitIgnoredExpr(PostUpdate);
1467  }
1468  }
1469  if (DoneBB)
1470  CGF.EmitBlock(DoneBB, /*IsFinished=*/true);
1471 }
1472 
1473 namespace {
1474 /// Codegen lambda for appending distribute lower and upper bounds to outlined
1475 /// parallel function. This is necessary for combined constructs such as
1476 /// 'distribute parallel for'
1477 typedef llvm::function_ref<void(CodeGenFunction &,
1478  const OMPExecutableDirective &,
1480  CodeGenBoundParametersTy;
1481 } // anonymous namespace
1482 
1483 static void
1485  const OMPExecutableDirective &S) {
1486  if (CGF.getLangOpts().OpenMP < 50)
1487  return;
1489  for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) {
1490  for (const Expr *Ref : C->varlists()) {
1491  if (!Ref->getType()->isScalarType())
1492  continue;
1493  const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
1494  if (!DRE)
1495  continue;
1496  PrivateDecls.insert(cast<VarDecl>(DRE->getDecl()));
1498  }
1499  }
1500  for (const auto *C : S.getClausesOfKind<OMPLastprivateClause>()) {
1501  for (const Expr *Ref : C->varlists()) {
1502  if (!Ref->getType()->isScalarType())
1503  continue;
1504  const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
1505  if (!DRE)
1506  continue;
1507  PrivateDecls.insert(cast<VarDecl>(DRE->getDecl()));
1509  }
1510  }
1511  for (const auto *C : S.getClausesOfKind<OMPLinearClause>()) {
1512  for (const Expr *Ref : C->varlists()) {
1513  if (!Ref->getType()->isScalarType())
1514  continue;
1515  const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
1516  if (!DRE)
1517  continue;
1518  PrivateDecls.insert(cast<VarDecl>(DRE->getDecl()));
1520  }
1521  }
1522  // Privates should ne analyzed since they are not captured at all.
1523  // Task reductions may be skipped - tasks are ignored.
1524  // Firstprivates do not return value but may be passed by reference - no need
1525  // to check for updated lastprivate conditional.
1526  for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) {
1527  for (const Expr *Ref : C->varlists()) {
1528  if (!Ref->getType()->isScalarType())
1529  continue;
1530  const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
1531  if (!DRE)
1532  continue;
1533  PrivateDecls.insert(cast<VarDecl>(DRE->getDecl()));
1534  }
1535  }
1537  CGF, S, PrivateDecls);
1538 }
1539 
1541  CodeGenFunction &CGF, const OMPExecutableDirective &S,
1542  OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
1543  const CodeGenBoundParametersTy &CodeGenBoundParameters) {
1544  const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel);
1545  llvm::Value *NumThreads = nullptr;
1546  llvm::Function *OutlinedFn =
1548  S, *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen);
1549  if (const auto *NumThreadsClause = S.getSingleClause<OMPNumThreadsClause>()) {
1550  CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF);
1551  NumThreads = CGF.EmitScalarExpr(NumThreadsClause->getNumThreads(),
1552  /*IgnoreResultAssign=*/true);
1554  CGF, NumThreads, NumThreadsClause->getBeginLoc());
1555  }
1556  if (const auto *ProcBindClause = S.getSingleClause<OMPProcBindClause>()) {
1557  CodeGenFunction::RunCleanupsScope ProcBindScope(CGF);
1559  CGF, ProcBindClause->getProcBindKind(), ProcBindClause->getBeginLoc());
1560  }
1561  const Expr *IfCond = nullptr;
1562  for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
1563  if (C->getNameModifier() == OMPD_unknown ||
1564  C->getNameModifier() == OMPD_parallel) {
1565  IfCond = C->getCondition();
1566  break;
1567  }
1568  }
1569 
1570  OMPParallelScope Scope(CGF, S);
1572  // Combining 'distribute' with 'for' requires sharing each 'distribute' chunk
1573  // lower and upper bounds with the pragma 'for' chunking mechanism.
1574  // The following lambda takes care of appending the lower and upper bound
1575  // parameters when necessary
1576  CodeGenBoundParameters(CGF, S, CapturedVars);
1577  CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars);
1578  CGF.CGM.getOpenMPRuntime().emitParallelCall(CGF, S.getBeginLoc(), OutlinedFn,
1579  CapturedVars, IfCond, NumThreads);
1580 }
1581 
1582 static bool isAllocatableDecl(const VarDecl *VD) {
1583  const VarDecl *CVD = VD->getCanonicalDecl();
1584  if (!CVD->hasAttr<OMPAllocateDeclAttr>())
1585  return false;
1586  const auto *AA = CVD->getAttr<OMPAllocateDeclAttr>();
1587  // Use the default allocation.
1588  return !((AA->getAllocatorType() == OMPAllocateDeclAttr::OMPDefaultMemAlloc ||
1589  AA->getAllocatorType() == OMPAllocateDeclAttr::OMPNullMemAlloc) &&
1590  !AA->getAllocator());
1591 }
1592 
1594  const OMPExecutableDirective &,
1596 
1598  const OMPExecutableDirective &S) {
1599  bool Copyins = CGF.EmitOMPCopyinClause(S);
1600  if (Copyins) {
1601  // Emit implicit barrier to synchronize threads and avoid data races on
1602  // propagation master's thread values of threadprivate variables to local
1603  // instances of that variables of all other implicit threads.
1605  CGF, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false,
1606  /*ForceSimpleCall=*/true);
1607  }
1608 }
1609 
1611  CodeGenFunction &CGF, const VarDecl *VD) {
1612  CodeGenModule &CGM = CGF.CGM;
1613  auto &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
1614 
1615  if (!VD)
1616  return Address::invalid();
1617  const VarDecl *CVD = VD->getCanonicalDecl();
1618  if (!isAllocatableDecl(CVD))
1619  return Address::invalid();
1620  llvm::Value *Size;
1621  CharUnits Align = CGM.getContext().getDeclAlign(CVD);
1622  if (CVD->getType()->isVariablyModifiedType()) {
1623  Size = CGF.getTypeSize(CVD->getType());
1624  // Align the size: ((size + align - 1) / align) * align
1625  Size = CGF.Builder.CreateNUWAdd(
1626  Size, CGM.getSize(Align - CharUnits::fromQuantity(1)));
1627  Size = CGF.Builder.CreateUDiv(Size, CGM.getSize(Align));
1628  Size = CGF.Builder.CreateNUWMul(Size, CGM.getSize(Align));
1629  } else {
1630  CharUnits Sz = CGM.getContext().getTypeSizeInChars(CVD->getType());
1631  Size = CGM.getSize(Sz.alignTo(Align));
1632  }
1633 
1634  const auto *AA = CVD->getAttr<OMPAllocateDeclAttr>();
1635  assert(AA->getAllocator() &&
1636  "Expected allocator expression for non-default allocator.");
1637  llvm::Value *Allocator = CGF.EmitScalarExpr(AA->getAllocator());
1638  // According to the standard, the original allocator type is a enum (integer).
1639  // Convert to pointer type, if required.
1640  if (Allocator->getType()->isIntegerTy())
1641  Allocator = CGF.Builder.CreateIntToPtr(Allocator, CGM.VoidPtrTy);
1642  else if (Allocator->getType()->isPointerTy())
1643  Allocator = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Allocator,
1644  CGM.VoidPtrTy);
1645 
1646  llvm::Value *Addr = OMPBuilder.createOMPAlloc(
1647  CGF.Builder, Size, Allocator,
1648  getNameWithSeparators({CVD->getName(), ".void.addr"}, ".", "."));
1649  llvm::CallInst *FreeCI =
1650  OMPBuilder.createOMPFree(CGF.Builder, Addr, Allocator);
1651 
1652  CGF.EHStack.pushCleanup<OMPAllocateCleanupTy>(NormalAndEHCleanup, FreeCI);
1654  Addr,
1656  getNameWithSeparators({CVD->getName(), ".addr"}, ".", "."));
1657  return Address(Addr, CGF.ConvertTypeForMem(CVD->getType()), Align);
1658 }
1659 
1661  CodeGenFunction &CGF, const VarDecl *VD, Address VDAddr,
1662  SourceLocation Loc) {
1663  CodeGenModule &CGM = CGF.CGM;
1664  if (CGM.getLangOpts().OpenMPUseTLS &&
1666  return VDAddr;
1667 
1668  llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
1669 
1670  llvm::Type *VarTy = VDAddr.getElementType();
1671  llvm::Value *Data =
1672  CGF.Builder.CreatePointerCast(VDAddr.getPointer(), CGM.Int8PtrTy);
1673  llvm::ConstantInt *Size = CGM.getSize(CGM.GetTargetTypeStoreSize(VarTy));
1674  std::string Suffix = getNameWithSeparators({"cache", ""});
1675  llvm::Twine CacheName = Twine(CGM.getMangledName(VD)).concat(Suffix);
1676 
1677  llvm::CallInst *ThreadPrivateCacheCall =
1678  OMPBuilder.createCachedThreadPrivate(CGF.Builder, Data, Size, CacheName);
1679 
1680  return Address(ThreadPrivateCacheCall, CGM.Int8Ty, VDAddr.getAlignment());
1681 }
1682 
1684  ArrayRef<StringRef> Parts, StringRef FirstSeparator, StringRef Separator) {
1685  SmallString<128> Buffer;
1686  llvm::raw_svector_ostream OS(Buffer);
1687  StringRef Sep = FirstSeparator;
1688  for (StringRef Part : Parts) {
1689  OS << Sep << Part;
1690  Sep = Separator;
1691  }
1692  return OS.str().str();
1693 }
1694 
1696  CodeGenFunction &CGF, const Stmt *RegionBodyStmt, InsertPointTy AllocaIP,
1697  InsertPointTy CodeGenIP, Twine RegionName) {
1698  CGBuilderTy &Builder = CGF.Builder;
1699  Builder.restoreIP(CodeGenIP);
1700  llvm::BasicBlock *FiniBB = splitBBWithSuffix(Builder, /*CreateBranch=*/false,
1701  "." + RegionName + ".after");
1702 
1703  {
1704  OMPBuilderCBHelpers::InlinedRegionBodyRAII IRB(CGF, AllocaIP, *FiniBB);
1705  CGF.EmitStmt(RegionBodyStmt);
1706  }
1707 
1708  if (Builder.saveIP().isSet())
1709  Builder.CreateBr(FiniBB);
1710 }
1711 
1713  CodeGenFunction &CGF, const Stmt *RegionBodyStmt, InsertPointTy AllocaIP,
1714  InsertPointTy CodeGenIP, Twine RegionName) {
1715  CGBuilderTy &Builder = CGF.Builder;
1716  Builder.restoreIP(CodeGenIP);
1717  llvm::BasicBlock *FiniBB = splitBBWithSuffix(Builder, /*CreateBranch=*/false,
1718  "." + RegionName + ".after");
1719 
1720  {
1721  OMPBuilderCBHelpers::OutlinedRegionBodyRAII IRB(CGF, AllocaIP, *FiniBB);
1722  CGF.EmitStmt(RegionBodyStmt);
1723  }
1724 
1725  if (Builder.saveIP().isSet())
1726  Builder.CreateBr(FiniBB);
1727 }
1728 
1730  if (CGM.getLangOpts().OpenMPIRBuilder) {
1731  llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
1732  // Check if we have any if clause associated with the directive.
1733  llvm::Value *IfCond = nullptr;
1734  if (const auto *C = S.getSingleClause<OMPIfClause>())
1735  IfCond = EmitScalarExpr(C->getCondition(),
1736  /*IgnoreResultAssign=*/true);
1737 
1738  llvm::Value *NumThreads = nullptr;
1739  if (const auto *NumThreadsClause = S.getSingleClause<OMPNumThreadsClause>())
1740  NumThreads = EmitScalarExpr(NumThreadsClause->getNumThreads(),
1741  /*IgnoreResultAssign=*/true);
1742 
1743  ProcBindKind ProcBind = OMP_PROC_BIND_default;
1744  if (const auto *ProcBindClause = S.getSingleClause<OMPProcBindClause>())
1745  ProcBind = ProcBindClause->getProcBindKind();
1746 
1747  using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
1748 
1749  // The cleanup callback that finalizes all variabels at the given location,
1750  // thus calls destructors etc.
1751  auto FiniCB = [this](InsertPointTy IP) {
1752  OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP);
1753  };
1754 
1755  // Privatization callback that performs appropriate action for
1756  // shared/private/firstprivate/lastprivate/copyin/... variables.
1757  //
1758  // TODO: This defaults to shared right now.
1759  auto PrivCB = [](InsertPointTy AllocaIP, InsertPointTy CodeGenIP,
1760  llvm::Value &, llvm::Value &Val, llvm::Value *&ReplVal) {
1761  // The next line is appropriate only for variables (Val) with the
1762  // data-sharing attribute "shared".
1763  ReplVal = &Val;
1764 
1765  return CodeGenIP;
1766  };
1767 
1768  const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel);
1769  const Stmt *ParallelRegionBodyStmt = CS->getCapturedStmt();
1770 
1771  auto BodyGenCB = [&, this](InsertPointTy AllocaIP,
1772  InsertPointTy CodeGenIP) {
1773  OMPBuilderCBHelpers::EmitOMPOutlinedRegionBody(
1774  *this, ParallelRegionBodyStmt, AllocaIP, CodeGenIP, "parallel");
1775  };
1776 
1777  CGCapturedStmtInfo CGSI(*CS, CR_OpenMP);
1778  CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(*this, &CGSI);
1779  llvm::OpenMPIRBuilder::InsertPointTy AllocaIP(
1780  AllocaInsertPt->getParent(), AllocaInsertPt->getIterator());
1781  Builder.restoreIP(
1782  OMPBuilder.createParallel(Builder, AllocaIP, BodyGenCB, PrivCB, FiniCB,
1783  IfCond, NumThreads, ProcBind, S.hasCancel()));
1784  return;
1785  }
1786 
1787  // Emit parallel region as a standalone region.
1788  auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
1789  Action.Enter(CGF);
1790  OMPPrivateScope PrivateScope(CGF);
1791  emitOMPCopyinClause(CGF, S);
1792  (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope);
1793  CGF.EmitOMPPrivateClause(S, PrivateScope);
1794  CGF.EmitOMPReductionClauseInit(S, PrivateScope);
1795  (void)PrivateScope.Privatize();
1796  CGF.EmitStmt(S.getCapturedStmt(OMPD_parallel)->getCapturedStmt());
1797  CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel);
1798  };
1799  {
1800  auto LPCRegion =
1802  emitCommonOMPParallelDirective(*this, S, OMPD_parallel, CodeGen,
1805  [](CodeGenFunction &) { return nullptr; });
1806  }
1807  // Check for outer lastprivate conditional update.
1809 }
1810 
1812  EmitStmt(S.getIfStmt());
1813 }
1814 
1815 namespace {
1816 /// RAII to handle scopes for loop transformation directives.
1817 class OMPTransformDirectiveScopeRAII {
1818  OMPLoopScope *Scope = nullptr;
1819  CodeGenFunction::CGCapturedStmtInfo *CGSI = nullptr;
1820  CodeGenFunction::CGCapturedStmtRAII *CapInfoRAII = nullptr;
1821 
1822 public:
1823  OMPTransformDirectiveScopeRAII(CodeGenFunction &CGF, const Stmt *S) {
1824  if (const auto *Dir = dyn_cast<OMPLoopBasedDirective>(S)) {
1825  Scope = new OMPLoopScope(CGF, *Dir);
1827  CapInfoRAII = new CodeGenFunction::CGCapturedStmtRAII(CGF, CGSI);
1828  }
1829  }
1830  ~OMPTransformDirectiveScopeRAII() {
1831  if (!Scope)
1832  return;
1833  delete CapInfoRAII;
1834  delete CGSI;
1835  delete Scope;
1836  }
1837 };
1838 } // namespace
1839 
1840 static void emitBody(CodeGenFunction &CGF, const Stmt *S, const Stmt *NextLoop,
1841  int MaxLevel, int Level = 0) {
1842  assert(Level < MaxLevel && "Too deep lookup during loop body codegen.");
1843  const Stmt *SimplifiedS = S->IgnoreContainers();
1844  if (const auto *CS = dyn_cast<CompoundStmt>(SimplifiedS)) {
1845  PrettyStackTraceLoc CrashInfo(
1846  CGF.getContext().getSourceManager(), CS->getLBracLoc(),
1847  "LLVM IR generation of compound statement ('{}')");
1848 
1849  // Keep track of the current cleanup stack depth, including debug scopes.
1850  CodeGenFunction::LexicalScope Scope(CGF, S->getSourceRange());
1851  for (const Stmt *CurStmt : CS->body())
1852  emitBody(CGF, CurStmt, NextLoop, MaxLevel, Level);
1853  return;
1854  }
1855  if (SimplifiedS == NextLoop) {
1856  if (auto *Dir = dyn_cast<OMPLoopTransformationDirective>(SimplifiedS))
1857  SimplifiedS = Dir->getTransformedStmt();
1858  if (const auto *CanonLoop = dyn_cast<OMPCanonicalLoop>(SimplifiedS))
1859  SimplifiedS = CanonLoop->getLoopStmt();
1860  if (const auto *For = dyn_cast<ForStmt>(SimplifiedS)) {
1861  S = For->getBody();
1862  } else {
1863  assert(isa<CXXForRangeStmt>(SimplifiedS) &&
1864  "Expected canonical for loop or range-based for loop.");
1865  const auto *CXXFor = cast<CXXForRangeStmt>(SimplifiedS);
1866  CGF.EmitStmt(CXXFor->getLoopVarStmt());
1867  S = CXXFor->getBody();
1868  }
1869  if (Level + 1 < MaxLevel) {
1871  S, /*TryImperfectlyNestedLoops=*/true);
1872  emitBody(CGF, S, NextLoop, MaxLevel, Level + 1);
1873  return;
1874  }
1875  }
1876  CGF.EmitStmt(S);
1877 }
1878 
1880  JumpDest LoopExit) {
1881  RunCleanupsScope BodyScope(*this);
1882  // Update counters values on current iteration.
1883  for (const Expr *UE : D.updates())
1884  EmitIgnoredExpr(UE);
1885  // Update the linear variables.
1886  // In distribute directives only loop counters may be marked as linear, no
1887  // need to generate the code for them.
1889  for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
1890  for (const Expr *UE : C->updates())
1891  EmitIgnoredExpr(UE);
1892  }
1893  }
1894 
1895  // On a continue in the body, jump to the end.
1896  JumpDest Continue = getJumpDestInCurrentScope("omp.body.continue");
1897  BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1898  for (const Expr *E : D.finals_conditions()) {
1899  if (!E)
1900  continue;
1901  // Check that loop counter in non-rectangular nest fits into the iteration
1902  // space.
1903  llvm::BasicBlock *NextBB = createBasicBlock("omp.body.next");
1904  EmitBranchOnBoolExpr(E, NextBB, Continue.getBlock(),
1905  getProfileCount(D.getBody()));
1906  EmitBlock(NextBB);
1907  }
1908 
1909  OMPPrivateScope InscanScope(*this);
1910  EmitOMPReductionClauseInit(D, InscanScope, /*ForInscan=*/true);
1911  bool IsInscanRegion = InscanScope.Privatize();
1912  if (IsInscanRegion) {
1913  // Need to remember the block before and after scan directive
1914  // to dispatch them correctly depending on the clause used in
1915  // this directive, inclusive or exclusive. For inclusive scan the natural
1916  // order of the blocks is used, for exclusive clause the blocks must be
1917  // executed in reverse order.
1918  OMPBeforeScanBlock = createBasicBlock("omp.before.scan.bb");
1919  OMPAfterScanBlock = createBasicBlock("omp.after.scan.bb");
1920  // No need to allocate inscan exit block, in simd mode it is selected in the
1921  // codegen for the scan directive.
1922  if (D.getDirectiveKind() != OMPD_simd && !getLangOpts().OpenMPSimd)
1923  OMPScanExitBlock = createBasicBlock("omp.exit.inscan.bb");
1924  OMPScanDispatch = createBasicBlock("omp.inscan.dispatch");
1925  EmitBranch(OMPScanDispatch);
1926  EmitBlock(OMPBeforeScanBlock);
1927  }
1928 
1929  // Emit loop variables for C++ range loops.
1930  const Stmt *Body =
1932  // Emit loop body.
1933  emitBody(*this, Body,
1935  Body, /*TryImperfectlyNestedLoops=*/true),
1936  D.getLoopsNumber());
1937 
1938  // Jump to the dispatcher at the end of the loop body.
1939  if (IsInscanRegion)
1940  EmitBranch(OMPScanExitBlock);
1941 
1942  // The end (updates/cleanups).
1943  EmitBlock(Continue.getBlock());
1944  BreakContinueStack.pop_back();
1945 }
1946 
1947 using EmittedClosureTy = std::pair<llvm::Function *, llvm::Value *>;
1948 
1949 /// Emit a captured statement and return the function as well as its captured
1950 /// closure context.
1952  const CapturedStmt *S) {
1953  LValue CapStruct = ParentCGF.InitCapturedStruct(*S);
1954  CodeGenFunction CGF(ParentCGF.CGM, /*suppressNewContext=*/true);
1955  std::unique_ptr<CodeGenFunction::CGCapturedStmtInfo> CSI =
1956  std::make_unique<CodeGenFunction::CGCapturedStmtInfo>(*S);
1957  CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, CSI.get());
1958  llvm::Function *F = CGF.GenerateCapturedStmtFunction(*S);
1959 
1960  return {F, CapStruct.getPointer(ParentCGF)};
1961 }
1962 
1963 /// Emit a call to a previously captured closure.
1964 static llvm::CallInst *
1967  // Append the closure context to the argument.
1968  SmallVector<llvm::Value *> EffectiveArgs;
1969  EffectiveArgs.reserve(Args.size() + 1);
1970  llvm::append_range(EffectiveArgs, Args);
1971  EffectiveArgs.push_back(Cap.second);
1972 
1973  return ParentCGF.Builder.CreateCall(Cap.first, EffectiveArgs);
1974 }
1975 
1976 llvm::CanonicalLoopInfo *
1978  assert(Depth == 1 && "Nested loops with OpenMPIRBuilder not yet implemented");
1979 
1980  // The caller is processing the loop-associated directive processing the \p
1981  // Depth loops nested in \p S. Put the previous pending loop-associated
1982  // directive to the stack. If the current loop-associated directive is a loop
1983  // transformation directive, it will push its generated loops onto the stack
1984  // such that together with the loops left here they form the combined loop
1985  // nest for the parent loop-associated directive.
1986  int ParentExpectedOMPLoopDepth = ExpectedOMPLoopDepth;
1987  ExpectedOMPLoopDepth = Depth;
1988 
1989  EmitStmt(S);
1990  assert(OMPLoopNestStack.size() >= (size_t)Depth && "Found too few loops");
1991 
1992  // The last added loop is the outermost one.
1993  llvm::CanonicalLoopInfo *Result = OMPLoopNestStack.back();
1994 
1995  // Pop the \p Depth loops requested by the call from that stack and restore
1996  // the previous context.
1997  OMPLoopNestStack.pop_back_n(Depth);
1998  ExpectedOMPLoopDepth = ParentExpectedOMPLoopDepth;
1999 
2000  return Result;
2001 }
2002 
2004  const Stmt *SyntacticalLoop = S->getLoopStmt();
2005  if (!getLangOpts().OpenMPIRBuilder) {
2006  // Ignore if OpenMPIRBuilder is not enabled.
2007  EmitStmt(SyntacticalLoop);
2008  return;
2009  }
2010 
2011  LexicalScope ForScope(*this, S->getSourceRange());
2012 
2013  // Emit init statements. The Distance/LoopVar funcs may reference variable
2014  // declarations they contain.
2015  const Stmt *BodyStmt;
2016  if (const auto *For = dyn_cast<ForStmt>(SyntacticalLoop)) {
2017  if (const Stmt *InitStmt = For->getInit())
2018  EmitStmt(InitStmt);
2019  BodyStmt = For->getBody();
2020  } else if (const auto *RangeFor =
2021  dyn_cast<CXXForRangeStmt>(SyntacticalLoop)) {
2022  if (const DeclStmt *RangeStmt = RangeFor->getRangeStmt())
2023  EmitStmt(RangeStmt);
2024  if (const DeclStmt *BeginStmt = RangeFor->getBeginStmt())
2025  EmitStmt(BeginStmt);
2026  if (const DeclStmt *EndStmt = RangeFor->getEndStmt())
2027  EmitStmt(EndStmt);
2028  if (const DeclStmt *LoopVarStmt = RangeFor->getLoopVarStmt())
2029  EmitStmt(LoopVarStmt);
2030  BodyStmt = RangeFor->getBody();
2031  } else
2032  llvm_unreachable("Expected for-stmt or range-based for-stmt");
2033 
2034  // Emit closure for later use. By-value captures will be captured here.
2035  const CapturedStmt *DistanceFunc = S->getDistanceFunc();
2036  EmittedClosureTy DistanceClosure = emitCapturedStmtFunc(*this, DistanceFunc);
2037  const CapturedStmt *LoopVarFunc = S->getLoopVarFunc();
2038  EmittedClosureTy LoopVarClosure = emitCapturedStmtFunc(*this, LoopVarFunc);
2039 
2040  // Call the distance function to get the number of iterations of the loop to
2041  // come.
2042  QualType LogicalTy = DistanceFunc->getCapturedDecl()
2043  ->getParam(0)
2044  ->getType()
2046  Address CountAddr = CreateMemTemp(LogicalTy, ".count.addr");
2047  emitCapturedStmtCall(*this, DistanceClosure, {CountAddr.getPointer()});
2048  llvm::Value *DistVal = Builder.CreateLoad(CountAddr, ".count");
2049 
2050  // Emit the loop structure.
2051  llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
2052  auto BodyGen = [&, this](llvm::OpenMPIRBuilder::InsertPointTy CodeGenIP,
2053  llvm::Value *IndVar) {
2054  Builder.restoreIP(CodeGenIP);
2055 
2056  // Emit the loop body: Convert the logical iteration number to the loop
2057  // variable and emit the body.
2058  const DeclRefExpr *LoopVarRef = S->getLoopVarRef();
2059  LValue LCVal = EmitLValue(LoopVarRef);
2060  Address LoopVarAddress = LCVal.getAddress(*this);
2061  emitCapturedStmtCall(*this, LoopVarClosure,
2062  {LoopVarAddress.getPointer(), IndVar});
2063 
2064  RunCleanupsScope BodyScope(*this);
2065  EmitStmt(BodyStmt);
2066  };
2067  llvm::CanonicalLoopInfo *CL =
2068  OMPBuilder.createCanonicalLoop(Builder, BodyGen, DistVal);
2069 
2070  // Finish up the loop.
2071  Builder.restoreIP(CL->getAfterIP());
2072  ForScope.ForceCleanup();
2073 
2074  // Remember the CanonicalLoopInfo for parent AST nodes consuming it.
2075  OMPLoopNestStack.push_back(CL);
2076 }
2077 
2079  const OMPExecutableDirective &S, bool RequiresCleanup, const Expr *LoopCond,
2080  const Expr *IncExpr,
2081  const llvm::function_ref<void(CodeGenFunction &)> BodyGen,
2082  const llvm::function_ref<void(CodeGenFunction &)> PostIncGen) {
2083  auto LoopExit = getJumpDestInCurrentScope("omp.inner.for.end");
2084 
2085  // Start the loop with a block that tests the condition.
2086  auto CondBlock = createBasicBlock("omp.inner.for.cond");
2087  EmitBlock(CondBlock);
2088  const SourceRange R = S.getSourceRange();
2089 
2090  // If attributes are attached, push to the basic block with them.
2091  const auto &OMPED = cast<OMPExecutableDirective>(S);
2092  const CapturedStmt *ICS = OMPED.getInnermostCapturedStmt();
2093  const Stmt *SS = ICS->getCapturedStmt();
2094  const AttributedStmt *AS = dyn_cast_or_null<AttributedStmt>(SS);
2095  OMPLoopNestStack.clear();
2096  if (AS)
2097  LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(),
2098  AS->getAttrs(), SourceLocToDebugLoc(R.getBegin()),
2099  SourceLocToDebugLoc(R.getEnd()));
2100  else
2101  LoopStack.push(CondBlock, SourceLocToDebugLoc(R.getBegin()),
2102  SourceLocToDebugLoc(R.getEnd()));
2103 
2104  // If there are any cleanups between here and the loop-exit scope,
2105  // create a block to stage a loop exit along.
2106  llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
2107  if (RequiresCleanup)
2108  ExitBlock = createBasicBlock("omp.inner.for.cond.cleanup");
2109 
2110  llvm::BasicBlock *LoopBody = createBasicBlock("omp.inner.for.body");
2111 
2112  // Emit condition.
2113  EmitBranchOnBoolExpr(LoopCond, LoopBody, ExitBlock, getProfileCount(&S));
2114  if (ExitBlock != LoopExit.getBlock()) {
2115  EmitBlock(ExitBlock);
2116  EmitBranchThroughCleanup(LoopExit);
2117  }
2118 
2119  EmitBlock(LoopBody);
2120  incrementProfileCounter(&S);
2121 
2122  // Create a block for the increment.
2123  JumpDest Continue = getJumpDestInCurrentScope("omp.inner.for.inc");
2124  BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
2125 
2126  BodyGen(*this);
2127 
2128  // Emit "IV = IV + 1" and a back-edge to the condition block.
2129  EmitBlock(Continue.getBlock());
2130  EmitIgnoredExpr(IncExpr);
2131  PostIncGen(*this);
2132  BreakContinueStack.pop_back();
2133  EmitBranch(CondBlock);
2134  LoopStack.pop();
2135  // Emit the fall-through block.
2136  EmitBlock(LoopExit.getBlock());
2137 }
2138 
2140  if (!HaveInsertPoint())
2141  return false;
2142  // Emit inits for the linear variables.
2143  bool HasLinears = false;
2144  for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
2145  for (const Expr *Init : C->inits()) {
2146  HasLinears = true;
2147  const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(Init)->getDecl());
2148  if (const auto *Ref =
2149  dyn_cast<DeclRefExpr>(VD->getInit()->IgnoreImpCasts())) {
2150  AutoVarEmission Emission = EmitAutoVarAlloca(*VD);
2151  const auto *OrigVD = cast<VarDecl>(Ref->getDecl());
2152  DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD),
2153  CapturedStmtInfo->lookup(OrigVD) != nullptr,
2154  VD->getInit()->getType(), VK_LValue,
2155  VD->getInit()->getExprLoc());
2156  EmitExprAsInit(
2157  &DRE, VD,
2158  MakeAddrLValue(Emission.getAllocatedAddress(), VD->getType()),
2159  /*capturedByInit=*/false);
2160  EmitAutoVarCleanups(Emission);
2161  } else {
2162  EmitVarDecl(*VD);
2163  }
2164  }
2165  // Emit the linear steps for the linear clauses.
2166  // If a step is not constant, it is pre-calculated before the loop.
2167  if (const auto *CS = cast_or_null<BinaryOperator>(C->getCalcStep()))
2168  if (const auto *SaveRef = cast<DeclRefExpr>(CS->getLHS())) {
2169  EmitVarDecl(*cast<VarDecl>(SaveRef->getDecl()));
2170  // Emit calculation of the linear step.
2171  EmitIgnoredExpr(CS);
2172  }
2173  }
2174  return HasLinears;
2175 }
2176 
2178  const OMPLoopDirective &D,
2179  const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen) {
2180  if (!HaveInsertPoint())
2181  return;
2182  llvm::BasicBlock *DoneBB = nullptr;
2183  // Emit the final values of the linear variables.
2184  for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
2185  auto IC = C->varlist_begin();
2186  for (const Expr *F : C->finals()) {
2187  if (!DoneBB) {
2188  if (llvm::Value *Cond = CondGen(*this)) {
2189  // If the first post-update expression is found, emit conditional
2190  // block if it was requested.
2191  llvm::BasicBlock *ThenBB = createBasicBlock(".omp.linear.pu");
2192  DoneBB = createBasicBlock(".omp.linear.pu.done");
2193  Builder.CreateCondBr(Cond, ThenBB, DoneBB);
2194  EmitBlock(ThenBB);
2195  }
2196  }
2197  const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl());
2198  DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD),
2199  CapturedStmtInfo->lookup(OrigVD) != nullptr,
2200  (*IC)->getType(), VK_LValue, (*IC)->getExprLoc());
2201  Address OrigAddr = EmitLValue(&DRE).getAddress(*this);
2202  CodeGenFunction::OMPPrivateScope VarScope(*this);
2203  VarScope.addPrivate(OrigVD, OrigAddr);
2204  (void)VarScope.Privatize();
2205  EmitIgnoredExpr(F);
2206  ++IC;
2207  }
2208  if (const Expr *PostUpdate = C->getPostUpdateExpr())
2209  EmitIgnoredExpr(PostUpdate);
2210  }
2211  if (DoneBB)
2212  EmitBlock(DoneBB, /*IsFinished=*/true);
2213 }
2214 
2216  const OMPExecutableDirective &D) {
2217  if (!CGF.HaveInsertPoint())
2218  return;
2219  for (const auto *Clause : D.getClausesOfKind<OMPAlignedClause>()) {
2220  llvm::APInt ClauseAlignment(64, 0);
2221  if (const Expr *AlignmentExpr = Clause->getAlignment()) {
2222  auto *AlignmentCI =
2223  cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AlignmentExpr));
2224  ClauseAlignment = AlignmentCI->getValue();
2225  }
2226  for (const Expr *E : Clause->varlists()) {
2227  llvm::APInt Alignment(ClauseAlignment);
2228  if (Alignment == 0) {
2229  // OpenMP [2.8.1, Description]
2230  // If no optional parameter is specified, implementation-defined default
2231  // alignments for SIMD instructions on the target platforms are assumed.
2232  Alignment =
2233  CGF.getContext()
2235  E->getType()->getPointeeType()))
2236  .getQuantity();
2237  }
2238  assert((Alignment == 0 || Alignment.isPowerOf2()) &&
2239  "alignment is not power of 2");
2240  if (Alignment != 0) {
2241  llvm::Value *PtrValue = CGF.EmitScalarExpr(E);
2243  PtrValue, E, /*No second loc needed*/ SourceLocation(),
2244  llvm::ConstantInt::get(CGF.getLLVMContext(), Alignment));
2245  }
2246  }
2247  }
2248 }
2249 
2251  const OMPLoopDirective &S, CodeGenFunction::OMPPrivateScope &LoopScope) {
2252  if (!HaveInsertPoint())
2253  return;
2254  auto I = S.private_counters().begin();
2255  for (const Expr *E : S.counters()) {
2256  const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
2257  const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl());
2258  // Emit var without initialization.
2259  AutoVarEmission VarEmission = EmitAutoVarAlloca(*PrivateVD);
2260  EmitAutoVarCleanups(VarEmission);
2261  LocalDeclMap.erase(PrivateVD);
2262  (void)LoopScope.addPrivate(VD, VarEmission.getAllocatedAddress());
2263  if (LocalDeclMap.count(VD) || CapturedStmtInfo->lookup(VD) ||
2264  VD->hasGlobalStorage()) {
2265  DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(VD),
2266  LocalDeclMap.count(VD) || CapturedStmtInfo->lookup(VD),
2267  E->getType(), VK_LValue, E->getExprLoc());
2268  (void)LoopScope.addPrivate(PrivateVD, EmitLValue(&DRE).getAddress(*this));
2269  } else {
2270  (void)LoopScope.addPrivate(PrivateVD, VarEmission.getAllocatedAddress());
2271  }
2272  ++I;
2273  }
2274  // Privatize extra loop counters used in loops for ordered(n) clauses.
2275  for (const auto *C : S.getClausesOfKind<OMPOrderedClause>()) {
2276  if (!C->getNumForLoops())
2277  continue;
2278  for (unsigned I = S.getLoopsNumber(), E = C->getLoopNumIterations().size();
2279  I < E; ++I) {
2280  const auto *DRE = cast<DeclRefExpr>(C->getLoopCounter(I));
2281  const auto *VD = cast<VarDecl>(DRE->getDecl());
2282  // Override only those variables that can be captured to avoid re-emission
2283  // of the variables declared within the loops.
2284  if (DRE->refersToEnclosingVariableOrCapture()) {
2285  (void)LoopScope.addPrivate(
2286  VD, CreateMemTemp(DRE->getType(), VD->getName()));
2287  }
2288  }
2289  }
2290 }
2291 
2292 static void emitPreCond(CodeGenFunction &CGF, const OMPLoopDirective &S,
2293  const Expr *Cond, llvm::BasicBlock *TrueBlock,
2294  llvm::BasicBlock *FalseBlock, uint64_t TrueCount) {
2295  if (!CGF.HaveInsertPoint())
2296  return;
2297  {
2298  CodeGenFunction::OMPPrivateScope PreCondScope(CGF);
2299  CGF.EmitOMPPrivateLoopCounters(S, PreCondScope);
2300  (void)PreCondScope.Privatize();
2301  // Get initial values of real counters.
2302  for (const Expr *I : S.inits()) {
2303  CGF.EmitIgnoredExpr(I);
2304  }
2305  }
2306  // Create temp loop control variables with their init values to support
2307  // non-rectangular loops.
2308  CodeGenFunction::OMPMapVars PreCondVars;
2309  for (const Expr *E : S.dependent_counters()) {
2310  if (!E)
2311  continue;
2312  assert(!E->getType().getNonReferenceType()->isRecordType() &&
2313  "dependent counter must not be an iterator.");
2314  const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
2315  Address CounterAddr =
2317  (void)PreCondVars.setVarAddr(CGF, VD, CounterAddr);
2318  }
2319  (void)PreCondVars.apply(CGF);
2320  for (const Expr *E : S.dependent_inits()) {
2321  if (!E)
2322  continue;
2323  CGF.EmitIgnoredExpr(E);
2324  }
2325  // Check that loop is executed at least one time.
2326  CGF.EmitBranchOnBoolExpr(Cond, TrueBlock, FalseBlock, TrueCount);
2327  PreCondVars.restore(CGF);
2328 }
2329 
2331  const OMPLoopDirective &D, CodeGenFunction::OMPPrivateScope &PrivateScope) {
2332  if (!HaveInsertPoint())
2333  return;
2336  const auto *LoopDirective = cast<OMPLoopDirective>(&D);
2337  for (const Expr *C : LoopDirective->counters()) {
2338  SIMDLCVs.insert(
2339  cast<VarDecl>(cast<DeclRefExpr>(C)->getDecl())->getCanonicalDecl());
2340  }
2341  }
2342  for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
2343  auto CurPrivate = C->privates().begin();
2344  for (const Expr *E : C->varlists()) {
2345  const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
2346  const auto *PrivateVD =
2347  cast<VarDecl>(cast<DeclRefExpr>(*CurPrivate)->getDecl());
2348  if (!SIMDLCVs.count(VD->getCanonicalDecl())) {
2349  // Emit private VarDecl with copy init.
2350  EmitVarDecl(*PrivateVD);
2351  bool IsRegistered =
2352  PrivateScope.addPrivate(VD, GetAddrOfLocalVar(PrivateVD));
2353  assert(IsRegistered && "linear var already registered as private");
2354  // Silence the warning about unused variable.
2355  (void)IsRegistered;
2356  } else {
2357  EmitVarDecl(*PrivateVD);
2358  }
2359  ++CurPrivate;
2360  }
2361  }
2362 }
2363 
2365  const OMPExecutableDirective &D) {
2366  if (!CGF.HaveInsertPoint())
2367  return;
2368  if (const auto *C = D.getSingleClause<OMPSimdlenClause>()) {
2369  RValue Len = CGF.EmitAnyExpr(C->getSimdlen(), AggValueSlot::ignored(),
2370  /*ignoreResult=*/true);
2371  auto *Val = cast<llvm::ConstantInt>(Len.getScalarVal());
2372  CGF.LoopStack.setVectorizeWidth(Val->getZExtValue());
2373  // In presence of finite 'safelen', it may be unsafe to mark all
2374  // the memory instructions parallel, because loop-carried
2375  // dependences of 'safelen' iterations are possible.
2377  } else if (const auto *C = D.getSingleClause<OMPSafelenClause>()) {
2378  RValue Len = CGF.EmitAnyExpr(C->getSafelen(), AggValueSlot::ignored(),
2379  /*ignoreResult=*/true);
2380  auto *Val = cast<llvm::ConstantInt>(Len.getScalarVal());
2381  CGF.LoopStack.setVectorizeWidth(Val->getZExtValue());
2382  // In presence of finite 'safelen', it may be unsafe to mark all
2383  // the memory instructions parallel, because loop-carried
2384  // dependences of 'safelen' iterations are possible.
2385  CGF.LoopStack.setParallel(/*Enable=*/false);
2386  }
2387 }
2388 
2390  // Walk clauses and process safelen/lastprivate.
2391  LoopStack.setParallel(/*Enable=*/true);
2392  LoopStack.setVectorizeEnable();
2393  emitSimdlenSafelenClause(*this, D);
2394  if (const auto *C = D.getSingleClause<OMPOrderClause>())
2395  if (C->getKind() == OMPC_ORDER_concurrent)
2396  LoopStack.setParallel(/*Enable=*/true);
2397  if ((D.getDirectiveKind() == OMPD_simd ||
2398  (getLangOpts().OpenMPSimd &&
2400  llvm::any_of(D.getClausesOfKind<OMPReductionClause>(),
2401  [](const OMPReductionClause *C) {
2402  return C->getModifier() == OMPC_REDUCTION_inscan;
2403  }))
2404  // Disable parallel access in case of prefix sum.
2405  LoopStack.setParallel(/*Enable=*/false);
2406 }
2407 
2409  const OMPLoopDirective &D,
2410  const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen) {
2411  if (!HaveInsertPoint())
2412  return;
2413  llvm::BasicBlock *DoneBB = nullptr;
2414  auto IC = D.counters().begin();
2415  auto IPC = D.private_counters().begin();
2416  for (const Expr *F : D.finals()) {
2417  const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>((*IC))->getDecl());
2418  const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>((*IPC))->getDecl());
2419  const auto *CED = dyn_cast<OMPCapturedExprDecl>(OrigVD);
2420  if (LocalDeclMap.count(OrigVD) || CapturedStmtInfo->lookup(OrigVD) ||
2421  OrigVD->hasGlobalStorage() || CED) {
2422  if (!DoneBB) {
2423  if (llvm::Value *Cond = CondGen(*this)) {
2424  // If the first post-update expression is found, emit conditional
2425  // block if it was requested.
2426  llvm::BasicBlock *ThenBB = createBasicBlock(".omp.final.then");
2427  DoneBB = createBasicBlock(".omp.final.done");
2428  Builder.CreateCondBr(Cond, ThenBB, DoneBB);
2429  EmitBlock(ThenBB);
2430  }
2431  }
2432  Address OrigAddr = Address::invalid();
2433  if (CED) {
2434  OrigAddr =
2435  EmitLValue(CED->getInit()->IgnoreImpCasts()).getAddress(*this);
2436  } else {
2437  DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(PrivateVD),
2438  /*RefersToEnclosingVariableOrCapture=*/false,
2439  (*IPC)->getType(), VK_LValue, (*IPC)->getExprLoc());
2440  OrigAddr = EmitLValue(&DRE).getAddress(*this);
2441  }
2442  OMPPrivateScope VarScope(*this);
2443  VarScope.addPrivate(OrigVD, OrigAddr);
2444  (void)VarScope.Privatize();
2445  EmitIgnoredExpr(F);
2446  }
2447  ++IC;
2448  ++IPC;
2449  }
2450  if (DoneBB)
2451  EmitBlock(DoneBB, /*IsFinished=*/true);
2452 }
2453 
2455  const OMPLoopDirective &S,
2457  CGF.EmitOMPLoopBody(S, LoopExit);
2458  CGF.EmitStopPoint(&S);
2459 }
2460 
2461 /// Emit a helper variable and return corresponding lvalue.
2463  const DeclRefExpr *Helper) {
2464  auto VDecl = cast<VarDecl>(Helper->getDecl());
2465  CGF.EmitVarDecl(*VDecl);
2466  return CGF.EmitLValue(Helper);
2467 }
2468 
2470  const RegionCodeGenTy &SimdInitGen,
2471  const RegionCodeGenTy &BodyCodeGen) {
2472  auto &&ThenGen = [&S, &SimdInitGen, &BodyCodeGen](CodeGenFunction &CGF,
2473  PrePostActionTy &) {
2474  CGOpenMPRuntime::NontemporalDeclsRAII NontemporalsRegion(CGF.CGM, S);
2476  SimdInitGen(CGF);
2477 
2478  BodyCodeGen(CGF);
2479  };
2480  auto &&ElseGen = [&BodyCodeGen](CodeGenFunction &CGF, PrePostActionTy &) {
2482  CGF.LoopStack.setVectorizeEnable(/*Enable=*/false);
2483 
2484  BodyCodeGen(CGF);
2485  };
2486  const Expr *IfCond = nullptr;
2487  if (isOpenMPSimdDirective(S.getDirectiveKind())) {
2488  for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
2489  if (CGF.getLangOpts().OpenMP >= 50 &&
2490  (C->getNameModifier() == OMPD_unknown ||
2491  C->getNameModifier() == OMPD_simd)) {
2492  IfCond = C->getCondition();
2493  break;
2494  }
2495  }
2496  }
2497  if (IfCond) {
2498  CGF.CGM.getOpenMPRuntime().emitIfClause(CGF, IfCond, ThenGen, ElseGen);
2499  } else {
2500  RegionCodeGenTy ThenRCG(ThenGen);
2501  ThenRCG(CGF);
2502  }
2503 }
2504 
2506  PrePostActionTy &Action) {
2507  Action.Enter(CGF);
2508  assert(isOpenMPSimdDirective(S.getDirectiveKind()) &&
2509  "Expected simd directive");
2510  OMPLoopScope PreInitScope(CGF, S);
2511  // if (PreCond) {
2512  // for (IV in 0..LastIteration) BODY;
2513  // <Final counter/linear vars updates>;
2514  // }
2515  //
2516  if (isOpenMPDistributeDirective(S.getDirectiveKind()) ||
2517  isOpenMPWorksharingDirective(S.getDirectiveKind()) ||
2518  isOpenMPTaskLoopDirective(S.getDirectiveKind())) {
2519  (void)EmitOMPHelperVar(CGF, cast<DeclRefExpr>(S.getLowerBoundVariable()));
2520  (void)EmitOMPHelperVar(CGF, cast<DeclRefExpr>(S.getUpperBoundVariable()));
2521  }
2522 
2523  // Emit: if (PreCond) - begin.
2524  // If the condition constant folds and can be elided, avoid emitting the
2525  // whole loop.
2526  bool CondConstant;
2527  llvm::BasicBlock *ContBlock = nullptr;
2528  if (CGF.ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) {
2529  if (!CondConstant)
2530  return;
2531  } else {
2532  llvm::BasicBlock *ThenBlock = CGF.createBasicBlock("simd.if.then");
2533  ContBlock = CGF.createBasicBlock("simd.if.end");
2534  emitPreCond(CGF, S, S.getPreCond(), ThenBlock, ContBlock,
2535  CGF.getProfileCount(&S));
2536  CGF.EmitBlock(ThenBlock);
2537  CGF.incrementProfileCounter(&S);
2538  }
2539 
2540  // Emit the loop iteration variable.
2541  const Expr *IVExpr = S.getIterationVariable();
2542  const auto *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl());
2543  CGF.EmitVarDecl(*IVDecl);
2544  CGF.EmitIgnoredExpr(S.getInit());
2545 
2546  // Emit the iterations count variable.
2547  // If it is not a variable, Sema decided to calculate iterations count on
2548  // each iteration (e.g., it is foldable into a constant).
2549  if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
2550  CGF.EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
2551  // Emit calculation of the iterations count.
2552  CGF.EmitIgnoredExpr(S.getCalcLastIteration());
2553  }
2554 
2555  emitAlignedClause(CGF, S);
2556  (void)CGF.EmitOMPLinearClauseInit(S);
2557  {
2558  CodeGenFunction::OMPPrivateScope LoopScope(CGF);
2559  CGF.EmitOMPPrivateLoopCounters(S, LoopScope);
2560  CGF.EmitOMPLinearClause(S, LoopScope);
2561  CGF.EmitOMPPrivateClause(S, LoopScope);
2562  CGF.EmitOMPReductionClauseInit(S, LoopScope);
2564  CGF, S, CGF.EmitLValue(S.getIterationVariable()));
2565  bool HasLastprivateClause = CGF.EmitOMPLastprivateClauseInit(S, LoopScope);
2566  (void)LoopScope.Privatize();
2567  if (isOpenMPTargetExecutionDirective(S.getDirectiveKind()))
2569 
2571  CGF, S,
2572  [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2573  CGF.EmitOMPSimdInit(S);
2574  },
2575  [&S, &LoopScope](CodeGenFunction &CGF, PrePostActionTy &) {
2576  CGF.EmitOMPInnerLoop(
2577  S, LoopScope.requiresCleanups(), S.getCond(), S.getInc(),
2578  [&S](CodeGenFunction &CGF) {
2579  emitOMPLoopBodyWithStopPoint(CGF, S,
2580  CodeGenFunction::JumpDest());
2581  },
2582  [](CodeGenFunction &) {});
2583  });
2584  CGF.EmitOMPSimdFinal(S, [](CodeGenFunction &) { return nullptr; });
2585  // Emit final copy of the lastprivate variables at the end of loops.
2586  if (HasLastprivateClause)
2587  CGF.EmitOMPLastprivateClauseFinal(S, /*NoFinals=*/true);
2588  CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_simd);
2590  [](CodeGenFunction &) { return nullptr; });
2591  LoopScope.restoreMap();
2592  CGF.EmitOMPLinearClauseFinal(S, [](CodeGenFunction &) { return nullptr; });
2593  }
2594  // Emit: if (PreCond) - end.
2595  if (ContBlock) {
2596  CGF.EmitBranch(ContBlock);
2597  CGF.EmitBlock(ContBlock, true);
2598  }
2599 }
2600 
2602  // Check for unsupported clauses
2603  for (OMPClause *C : S.clauses()) {
2604  // Currently only order, simdlen and safelen clauses are supported
2605  if (!(isa<OMPSimdlenClause>(C) || isa<OMPSafelenClause>(C) ||
2606  isa<OMPOrderClause>(C) || isa<OMPAlignedClause>(C)))
2607  return false;
2608  }
2609 
2610  // Check if we have a statement with the ordered directive.
2611  // Visit the statement hierarchy to find a compound statement
2612  // with a ordered directive in it.
2613  if (const auto *CanonLoop = dyn_cast<OMPCanonicalLoop>(S.getRawStmt())) {
2614  if (const Stmt *SyntacticalLoop = CanonLoop->getLoopStmt()) {
2615  for (const Stmt *SubStmt : SyntacticalLoop->children()) {
2616  if (!SubStmt)
2617  continue;
2618  if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(SubStmt)) {
2619  for (const Stmt *CSSubStmt : CS->children()) {
2620  if (!CSSubStmt)
2621  continue;
2622  if (isa<OMPOrderedDirective>(CSSubStmt)) {
2623  return false;
2624  }
2625  }
2626  }
2627  }
2628  }
2629  }
2630  return true;
2631 }
2632 static llvm::MapVector<llvm::Value *, llvm::Value *>
2634  llvm::MapVector<llvm::Value *, llvm::Value *> AlignedVars;
2635  for (const auto *Clause : S.getClausesOfKind<OMPAlignedClause>()) {
2636  llvm::APInt ClauseAlignment(64, 0);
2637  if (const Expr *AlignmentExpr = Clause->getAlignment()) {
2638  auto *AlignmentCI =
2639  cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AlignmentExpr));
2640  ClauseAlignment = AlignmentCI->getValue();
2641  }
2642  for (const Expr *E : Clause->varlists()) {
2643  llvm::APInt Alignment(ClauseAlignment);
2644  if (Alignment == 0) {
2645  // OpenMP [2.8.1, Description]
2646  // If no optional parameter is specified, implementation-defined default
2647  // alignments for SIMD instructions on the target platforms are assumed.
2648  Alignment =
2649  CGF.getContext()
2651  E->getType()->getPointeeType()))
2652  .getQuantity();
2653  }
2654  assert((Alignment == 0 || Alignment.isPowerOf2()) &&
2655  "alignment is not power of 2");
2656  llvm::Value *PtrValue = CGF.EmitScalarExpr(E);
2657  AlignedVars[PtrValue] = CGF.Builder.getInt64(Alignment.getSExtValue());
2658  }
2659  }
2660  return AlignedVars;
2661 }
2662 
2664  bool UseOMPIRBuilder =
2665  CGM.getLangOpts().OpenMPIRBuilder && isSupportedByOpenMPIRBuilder(S);
2666  if (UseOMPIRBuilder) {
2667  auto &&CodeGenIRBuilder = [this, &S, UseOMPIRBuilder](CodeGenFunction &CGF,
2668  PrePostActionTy &) {
2669  // Use the OpenMPIRBuilder if enabled.
2670  if (UseOMPIRBuilder) {
2671  llvm::MapVector<llvm::Value *, llvm::Value *> AlignedVars =
2672  GetAlignedMapping(S, CGF);
2673  // Emit the associated statement and get its loop representation.
2674  const Stmt *Inner = S.getRawStmt();
2675  llvm::CanonicalLoopInfo *CLI =
2676  EmitOMPCollapsedCanonicalLoopNest(Inner, 1);
2677 
2678  llvm::OpenMPIRBuilder &OMPBuilder =
2679  CGM.getOpenMPRuntime().getOMPBuilder();
2680  // Add SIMD specific metadata
2681  llvm::ConstantInt *Simdlen = nullptr;
2682  if (const auto *C = S.getSingleClause<OMPSimdlenClause>()) {
2683  RValue Len =
2684  this->EmitAnyExpr(C->getSimdlen(), AggValueSlot::ignored(),
2685  /*ignoreResult=*/true);
2686  auto *Val = cast<llvm::ConstantInt>(Len.getScalarVal());
2687  Simdlen = Val;
2688  }
2689  llvm::ConstantInt *Safelen = nullptr;
2690  if (const auto *C = S.getSingleClause<OMPSafelenClause>()) {
2691  RValue Len =
2692  this->EmitAnyExpr(C->getSafelen(), AggValueSlot::ignored(),
2693  /*ignoreResult=*/true);
2694  auto *Val = cast<llvm::ConstantInt>(Len.getScalarVal());
2695  Safelen = Val;
2696  }
2697  llvm::omp::OrderKind Order = llvm::omp::OrderKind::OMP_ORDER_unknown;
2698  if (const auto *C = S.getSingleClause<OMPOrderClause>()) {
2699  if (C->getKind() == OpenMPOrderClauseKind ::OMPC_ORDER_concurrent) {
2700  Order = llvm::omp::OrderKind::OMP_ORDER_concurrent;
2701  }
2702  }
2703  // Add simd metadata to the collapsed loop. Do not generate
2704  // another loop for if clause. Support for if clause is done earlier.
2705  OMPBuilder.applySimd(CLI, AlignedVars,
2706  /*IfCond*/ nullptr, Order, Simdlen, Safelen);
2707  return;
2708  }
2709  };
2710  {
2711  auto LPCRegion =
2713  OMPLexicalScope Scope(*this, S, OMPD_unknown);
2714  CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd,
2715  CodeGenIRBuilder);
2716  }
2717  return;
2718  }
2719 
2720  ParentLoopDirectiveForScanRegion ScanRegion(*this, S);
2721  OMPFirstScanLoop = true;
2722  auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
2723  emitOMPSimdRegion(CGF, S, Action);
2724  };
2725  {
2726  auto LPCRegion =
2728  OMPLexicalScope Scope(*this, S, OMPD_unknown);
2729  CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen);
2730  }
2731  // Check for outer lastprivate conditional update.
2733 }
2734 
2736  // Emit the de-sugared statement.
2737  OMPTransformDirectiveScopeRAII TileScope(*this, &S);
2738  EmitStmt(S.getTransformedStmt());
2739 }
2740 
2742  bool UseOMPIRBuilder = CGM.getLangOpts().OpenMPIRBuilder;
2743 
2744  if (UseOMPIRBuilder) {
2745  auto DL = SourceLocToDebugLoc(S.getBeginLoc());
2746  const Stmt *Inner = S.getRawStmt();
2747 
2748  // Consume nested loop. Clear the entire remaining loop stack because a
2749  // fully unrolled loop is non-transformable. For partial unrolling the
2750  // generated outer loop is pushed back to the stack.
2751  llvm::CanonicalLoopInfo *CLI = EmitOMPCollapsedCanonicalLoopNest(Inner, 1);
2752  OMPLoopNestStack.clear();
2753 
2754  llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
2755 
2756  bool NeedsUnrolledCLI = ExpectedOMPLoopDepth >= 1;
2757  llvm::CanonicalLoopInfo *UnrolledCLI = nullptr;
2758 
2759  if (S.hasClausesOfKind<OMPFullClause>()) {
2760  assert(ExpectedOMPLoopDepth == 0);
2761  OMPBuilder.unrollLoopFull(DL, CLI);
2762  } else if (auto *PartialClause = S.getSingleClause<OMPPartialClause>()) {
2763  uint64_t Factor = 0;
2764  if (Expr *FactorExpr = PartialClause->getFactor()) {
2765  Factor = FactorExpr->EvaluateKnownConstInt(getContext()).getZExtValue();
2766  assert(Factor >= 1 && "Only positive factors are valid");
2767  }
2768  OMPBuilder.unrollLoopPartial(DL, CLI, Factor,
2769  NeedsUnrolledCLI ? &UnrolledCLI : nullptr);
2770  } else {
2771  OMPBuilder.unrollLoopHeuristic(DL, CLI);
2772  }
2773 
2774  assert((!NeedsUnrolledCLI || UnrolledCLI) &&
2775  "NeedsUnrolledCLI implies UnrolledCLI to be set");
2776  if (UnrolledCLI)
2777  OMPLoopNestStack.push_back(UnrolledCLI);
2778 
2779  return;
2780  }
2781 
2782  // This function is only called if the unrolled loop is not consumed by any
2783  // other loop-associated construct. Such a loop-associated construct will have
2784  // used the transformed AST.
2785 
2786  // Set the unroll metadata for the next emitted loop.
2787  LoopStack.setUnrollState(LoopAttributes::Enable);
2788 
2789  if (S.hasClausesOfKind<OMPFullClause>()) {
2790  LoopStack.setUnrollState(LoopAttributes::Full);
2791  } else if (auto *PartialClause = S.getSingleClause<OMPPartialClause>()) {
2792  if (Expr *FactorExpr = PartialClause->getFactor()) {
2793  uint64_t Factor =
2794  FactorExpr->EvaluateKnownConstInt(getContext()).getZExtValue();
2795  assert(Factor >= 1 && "Only positive factors are valid");
2796  LoopStack.setUnrollCount(Factor);
2797  }
2798  }
2799 
2800  EmitStmt(S.getAssociatedStmt());
2801 }
2802 
2803 void CodeGenFunction::EmitOMPOuterLoop(
2804  bool DynamicOrOrdered, bool IsMonotonic, const OMPLoopDirective &S,
2806  const CodeGenFunction::OMPLoopArguments &LoopArgs,
2807  const CodeGenFunction::CodeGenLoopTy &CodeGenLoop,
2808  const CodeGenFunction::CodeGenOrderedTy &CodeGenOrdered) {
2809  CGOpenMPRuntime &RT = CGM.getOpenMPRuntime();
2810 
2811  const Expr *IVExpr = S.getIterationVariable();
2812  const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
2813  const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
2814 
2815  JumpDest LoopExit = getJumpDestInCurrentScope("omp.dispatch.end");
2816 
2817  // Start the loop with a block that tests the condition.
2818  llvm::BasicBlock *CondBlock = createBasicBlock("omp.dispatch.cond");
2819  EmitBlock(CondBlock);
2820  const SourceRange R = S.getSourceRange();
2821  OMPLoopNestStack.clear();
2822  LoopStack.push(CondBlock, SourceLocToDebugLoc(R.getBegin()),
2823  SourceLocToDebugLoc(R.getEnd()));
2824 
2825  llvm::Value *BoolCondVal = nullptr;
2826  if (!DynamicOrOrdered) {
2827  // UB = min(UB, GlobalUB) or
2828  // UB = min(UB, PrevUB) for combined loop sharing constructs (e.g.
2829  // 'distribute parallel for')
2830  EmitIgnoredExpr(LoopArgs.EUB);
2831  // IV = LB
2832  EmitIgnoredExpr(LoopArgs.Init);
2833  // IV < UB
2834  BoolCondVal = EvaluateExprAsBool(LoopArgs.Cond);
2835  } else {
2836  BoolCondVal =
2837  RT.emitForNext(*this, S.getBeginLoc(), IVSize, IVSigned, LoopArgs.IL,
2838  LoopArgs.LB, LoopArgs.UB, LoopArgs.ST);
2839  }
2840 
2841  // If there are any cleanups between here and the loop-exit scope,
2842  // create a block to stage a loop exit along.
2843  llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
2844  if (LoopScope.requiresCleanups())
2845  ExitBlock = createBasicBlock("omp.dispatch.cleanup");
2846 
2847  llvm::BasicBlock *LoopBody = createBasicBlock("omp.dispatch.body");
2848  Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock);
2849  if (ExitBlock != LoopExit.getBlock()) {
2850  EmitBlock(ExitBlock);
2851  EmitBranchThroughCleanup(LoopExit);
2852  }
2853  EmitBlock(LoopBody);
2854 
2855  // Emit "IV = LB" (in case of static schedule, we have already calculated new
2856  // LB for loop condition and emitted it above).
2857  if (DynamicOrOrdered)
2858  EmitIgnoredExpr(LoopArgs.Init);
2859 
2860  // Create a block for the increment.
2861  JumpDest Continue = getJumpDestInCurrentScope("omp.dispatch.inc");
2862  BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
2863 
2865  *this, S,
2866  [&S, IsMonotonic](CodeGenFunction &CGF, PrePostActionTy &) {
2867  // Generate !llvm.loop.parallel metadata for loads and stores for loops
2868  // with dynamic/guided scheduling and without ordered clause.
2869  if (!isOpenMPSimdDirective(S.getDirectiveKind())) {
2870  CGF.LoopStack.setParallel(!IsMonotonic);
2871  if (const auto *C = S.getSingleClause<OMPOrderClause>())
2872  if (C->getKind() == OMPC_ORDER_concurrent)
2873  CGF.LoopStack.setParallel(/*Enable=*/true);
2874  } else {
2875  CGF.EmitOMPSimdInit(S);
2876  }
2877  },
2878  [&S, &LoopArgs, LoopExit, &CodeGenLoop, IVSize, IVSigned, &CodeGenOrdered,
2879  &LoopScope](CodeGenFunction &CGF, PrePostActionTy &) {
2880  SourceLocation Loc = S.getBeginLoc();
2881  // when 'distribute' is not combined with a 'for':
2882  // while (idx <= UB) { BODY; ++idx; }
2883  // when 'distribute' is combined with a 'for'
2884  // (e.g. 'distribute parallel for')
2885  // while (idx <= UB) { <CodeGen rest of pragma>; idx += ST; }
2886  CGF.EmitOMPInnerLoop(
2887  S, LoopScope.requiresCleanups(), LoopArgs.Cond, LoopArgs.IncExpr,
2888  [&S, LoopExit, &CodeGenLoop](CodeGenFunction &CGF) {
2889  CodeGenLoop(CGF, S, LoopExit);
2890  },
2891  [IVSize, IVSigned, Loc, &CodeGenOrdered](CodeGenFunction &CGF) {
2892  CodeGenOrdered(CGF, Loc, IVSize, IVSigned);
2893  });
2894  });
2895 
2896  EmitBlock(Continue.getBlock());
2897  BreakContinueStack.pop_back();
2898  if (!DynamicOrOrdered) {
2899  // Emit "LB = LB + Stride", "UB = UB + Stride".
2900  EmitIgnoredExpr(LoopArgs.NextLB);
2901  EmitIgnoredExpr(LoopArgs.NextUB);
2902  }
2903 
2904  EmitBranch(CondBlock);
2905  OMPLoopNestStack.clear();
2906  LoopStack.pop();
2907  // Emit the fall-through block.
2908  EmitBlock(LoopExit.getBlock());
2909 
2910  // Tell the runtime we are done.
2911  auto &&CodeGen = [DynamicOrOrdered, &S](CodeGenFunction &CGF) {
2912  if (!DynamicOrOrdered)
2913  CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getEndLoc(),
2914  S.getDirectiveKind());
2915  };
2916  OMPCancelStack.emitExit(*this, S.getDirectiveKind(), CodeGen);
2917 }
2918 
2919 void CodeGenFunction::EmitOMPForOuterLoop(
2920  const OpenMPScheduleTy &ScheduleKind, bool IsMonotonic,
2921  const OMPLoopDirective &S, OMPPrivateScope &LoopScope, bool Ordered,
2922  const OMPLoopArguments &LoopArgs,
2923  const CodeGenDispatchBoundsTy &CGDispatchBounds) {
2924  CGOpenMPRuntime &RT = CGM.getOpenMPRuntime();
2925 
2926  // Dynamic scheduling of the outer loop (dynamic, guided, auto, runtime).
2927  const bool DynamicOrOrdered = Ordered || RT.isDynamic(ScheduleKind.Schedule);
2928 
2929  assert((Ordered || !RT.isStaticNonchunked(ScheduleKind.Schedule,
2930  LoopArgs.Chunk != nullptr)) &&
2931  "static non-chunked schedule does not need outer loop");
2932 
2933  // Emit outer loop.
2934  //
2935  // OpenMP [2.7.1, Loop Construct, Description, table 2-1]
2936  // When schedule(dynamic,chunk_size) is specified, the iterations are
2937  // distributed to threads in the team in chunks as the threads request them.
2938  // Each thread executes a chunk of iterations, then requests another chunk,
2939  // until no chunks remain to be distributed. Each chunk contains chunk_size
2940  // iterations, except for the last chunk to be distributed, which may have
2941  // fewer iterations. When no chunk_size is specified, it defaults to 1.
2942  //
2943  // When schedule(guided,chunk_size) is specified, the iterations are assigned
2944  // to threads in the team in chunks as the executing threads request them.
2945  // Each thread executes a chunk of iterations, then requests another chunk,
2946  // until no chunks remain to be assigned. For a chunk_size of 1, the size of
2947  // each chunk is proportional to the number of unassigned iterations divided
2948  // by the number of threads in the team, decreasing to 1. For a chunk_size
2949  // with value k (greater than 1), the size of each chunk is determined in the
2950  // same way, with the restriction that the chunks do not contain fewer than k
2951  // iterations (except for the last chunk to be assigned, which may have fewer
2952  // than k iterations).
2953  //
2954  // When schedule(auto) is specified, the decision regarding scheduling is
2955  // delegated to the compiler and/or runtime system. The programmer gives the
2956  // implementation the freedom to choose any possible mapping of iterations to
2957  // threads in the team.
2958  //
2959  // When schedule(runtime) is specified, the decision regarding scheduling is
2960  // deferred until run time, and the schedule and chunk size are taken from the
2961  // run-sched-var ICV. If the ICV is set to auto, the schedule is
2962  // implementation defined
2963  //
2964  // while(__kmpc_dispatch_next(&LB, &UB)) {
2965  // idx = LB;
2966  // while (idx <= UB) { BODY; ++idx;
2967  // __kmpc_dispatch_fini_(4|8)[u](); // For ordered loops only.
2968  // } // inner loop
2969  // }
2970  //
2971  // OpenMP [2.7.1, Loop Construct, Description, table 2-1]
2972  // When schedule(static, chunk_size) is specified, iterations are divided into
2973  // chunks of size chunk_size, and the chunks are assigned to the threads in
2974  // the team in a round-robin fashion in the order of the thread number.
2975  //
2976  // while(UB = min(UB, GlobalUB), idx = LB, idx < UB) {
2977  // while (idx <= UB) { BODY; ++idx; } // inner loop
2978  // LB = LB + ST;
2979  // UB = UB + ST;
2980  // }
2981  //
2982 
2983  const Expr *IVExpr = S.getIterationVariable();
2984  const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
2985  const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
2986 
2987  if (DynamicOrOrdered) {
2988  const std::pair<llvm::Value *, llvm::Value *> DispatchBounds =
2989  CGDispatchBounds(*this, S, LoopArgs.LB, LoopArgs.UB);
2990  llvm::Value *LBVal = DispatchBounds.first;
2991  llvm::Value *UBVal = DispatchBounds.second;
2992  CGOpenMPRuntime::DispatchRTInput DipatchRTInputValues = {LBVal, UBVal,
2993  LoopArgs.Chunk};
2994  RT.emitForDispatchInit(*this, S.getBeginLoc(), ScheduleKind, IVSize,
2995  IVSigned, Ordered, DipatchRTInputValues);
2996  } else {
2997  CGOpenMPRuntime::StaticRTInput StaticInit(
2998  IVSize, IVSigned, Ordered, LoopArgs.IL, LoopArgs.LB, LoopArgs.UB,
2999  LoopArgs.ST, LoopArgs.Chunk);
3000  RT.emitForStaticInit(*this, S.getBeginLoc(), S.getDirectiveKind(),
3001  ScheduleKind, StaticInit);
3002  }
3003 
3004  auto &&CodeGenOrdered = [Ordered](CodeGenFunction &CGF, SourceLocation Loc,
3005  const unsigned IVSize,
3006  const bool IVSigned) {
3007  if (Ordered) {
3008  CGF.CGM.getOpenMPRuntime().emitForOrderedIterationEnd(CGF, Loc, IVSize,
3009  IVSigned);
3010  }
3011  };
3012 
3013  OMPLoopArguments OuterLoopArgs(LoopArgs.LB, LoopArgs.UB, LoopArgs.ST,
3014  LoopArgs.IL, LoopArgs.Chunk, LoopArgs.EUB);
3015  OuterLoopArgs.IncExpr = S.getInc();
3016  OuterLoopArgs.Init = S.getInit();
3017  OuterLoopArgs.Cond = S.getCond();
3018  OuterLoopArgs.NextLB = S.getNextLowerBound();
3019  OuterLoopArgs.NextUB = S.getNextUpperBound();
3020  EmitOMPOuterLoop(DynamicOrOrdered, IsMonotonic, S, LoopScope, OuterLoopArgs,
3021  emitOMPLoopBodyWithStopPoint, CodeGenOrdered);
3022 }
3023 
3025  const unsigned IVSize, const bool IVSigned) {}
3026 
3027 void CodeGenFunction::EmitOMPDistributeOuterLoop(
3028  OpenMPDistScheduleClauseKind ScheduleKind, const OMPLoopDirective &S,
3029  OMPPrivateScope &LoopScope, const OMPLoopArguments &LoopArgs,
3030  const CodeGenLoopTy &CodeGenLoopContent) {
3031 
3032  CGOpenMPRuntime &RT = CGM.getOpenMPRuntime();
3033 
3034  // Emit outer loop.
3035  // Same behavior as a OMPForOuterLoop, except that schedule cannot be
3036  // dynamic
3037  //
3038 
3039  const Expr *IVExpr = S.getIterationVariable();
3040  const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
3041  const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
3042 
3043  CGOpenMPRuntime::StaticRTInput StaticInit(
3044  IVSize, IVSigned, /* Ordered = */ false, LoopArgs.IL, LoopArgs.LB,
3045  LoopArgs.UB, LoopArgs.ST, LoopArgs.Chunk);
3046  RT.emitDistributeStaticInit(*this, S.getBeginLoc(), ScheduleKind, StaticInit);
3047 
3048  // for combined 'distribute' and 'for' the increment expression of distribute
3049  // is stored in DistInc. For 'distribute' alone, it is in Inc.
3050  Expr *IncExpr;
3051  if (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()))
3052  IncExpr = S.getDistInc();
3053  else
3054  IncExpr = S.getInc();
3055 
3056  // this routine is shared by 'omp distribute parallel for' and
3057  // 'omp distribute': select the right EUB expression depending on the
3058  // directive
3059  OMPLoopArguments OuterLoopArgs;
3060  OuterLoopArgs.LB = LoopArgs.LB;
3061  OuterLoopArgs.UB = LoopArgs.UB;
3062  OuterLoopArgs.ST = LoopArgs.ST;
3063  OuterLoopArgs.IL = LoopArgs.IL;
3064  OuterLoopArgs.Chunk = LoopArgs.Chunk;
3065  OuterLoopArgs.EUB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
3066  ? S.getCombinedEnsureUpperBound()
3067  : S.getEnsureUpperBound();
3068  OuterLoopArgs.IncExpr = IncExpr;
3069  OuterLoopArgs.Init = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
3070  ? S.getCombinedInit()
3071  : S.getInit();
3072  OuterLoopArgs.Cond = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
3073  ? S.getCombinedCond()
3074  : S.getCond();
3075  OuterLoopArgs.NextLB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
3076  ? S.getCombinedNextLowerBound()
3077  : S.getNextLowerBound();
3078  OuterLoopArgs.NextUB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
3079  ? S.getCombinedNextUpperBound()
3080  : S.getNextUpperBound();
3081 
3082  EmitOMPOuterLoop(/* DynamicOrOrdered = */ false, /* IsMonotonic = */ false, S,
3083  LoopScope, OuterLoopArgs, CodeGenLoopContent,
3085 }
3086 
3087 static std::pair<LValue, LValue>
3089  const OMPExecutableDirective &S) {
3090  const OMPLoopDirective &LS = cast<OMPLoopDirective>(S);
3091  LValue LB =
3092  EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getLowerBoundVariable()));
3093  LValue UB =
3094  EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getUpperBoundVariable()));
3095 
3096  // When composing 'distribute' with 'for' (e.g. as in 'distribute
3097  // parallel for') we need to use the 'distribute'
3098  // chunk lower and upper bounds rather than the whole loop iteration
3099  // space. These are parameters to the outlined function for 'parallel'
3100  // and we copy the bounds of the previous schedule into the
3101  // the current ones.
3102  LValue PrevLB = CGF.EmitLValue(LS.getPrevLowerBoundVariable());
3103  LValue PrevUB = CGF.EmitLValue(LS.getPrevUpperBoundVariable());
3104  llvm::Value *PrevLBVal = CGF.EmitLoadOfScalar(
3105  PrevLB, LS.getPrevLowerBoundVariable()->getExprLoc());
3106  PrevLBVal = CGF.EmitScalarConversion(
3107  PrevLBVal, LS.getPrevLowerBoundVariable()->getType(),
3108  LS.getIterationVariable()->getType(),
3110  llvm::Value *PrevUBVal = CGF.EmitLoadOfScalar(
3111  PrevUB, LS.getPrevUpperBoundVariable()->getExprLoc());
3112  PrevUBVal = CGF.EmitScalarConversion(
3113  PrevUBVal, LS.getPrevUpperBoundVariable()->getType(),
3114  LS.getIterationVariable()->getType(),
3116 
3117  CGF.EmitStoreOfScalar(PrevLBVal, LB);
3118  CGF.EmitStoreOfScalar(PrevUBVal, UB);
3119 
3120  return {LB, UB};
3121 }
3122 
3123 /// if the 'for' loop has a dispatch schedule (e.g. dynamic, guided) then
3124 /// we need to use the LB and UB expressions generated by the worksharing
3125 /// code generation support, whereas in non combined situations we would
3126 /// just emit 0 and the LastIteration expression
3127 /// This function is necessary due to the difference of the LB and UB
3128 /// types for the RT emission routines for 'for_static_init' and
3129 /// 'for_dispatch_init'
3130 static std::pair<llvm::Value *, llvm::Value *>
3132  const OMPExecutableDirective &S,
3133  Address LB, Address UB) {
3134  const OMPLoopDirective &LS = cast<OMPLoopDirective>(S);
3135  const Expr *IVExpr = LS.getIterationVariable();
3136  // when implementing a dynamic schedule for a 'for' combined with a
3137  // 'distribute' (e.g. 'distribute parallel for'), the 'for' loop
3138  // is not normalized as each team only executes its own assigned
3139  // distribute chunk
3140  QualType IteratorTy = IVExpr->getType();
3141  llvm::Value *LBVal =
3142  CGF.EmitLoadOfScalar(LB, /*Volatile=*/false, IteratorTy, S.getBeginLoc());
3143  llvm::Value *UBVal =
3144  CGF.EmitLoadOfScalar(UB, /*Volatile=*/false, IteratorTy, S.getBeginLoc());
3145  return {LBVal, UBVal};
3146 }
3147 
3149  CodeGenFunction &CGF, const OMPExecutableDirective &S,
3150  llvm::SmallVectorImpl<llvm::Value *> &CapturedVars) {
3151  const auto &Dir = cast<OMPLoopDirective>(S);
3152  LValue LB =
3153  CGF.EmitLValue(cast<DeclRefExpr>(Dir.getCombinedLowerBoundVariable()));
3154  llvm::Value *LBCast =
3155  CGF.Builder.CreateIntCast(CGF.Builder.CreateLoad(LB.getAddress(CGF)),
3156  CGF.SizeTy, /*isSigned=*/false);
3157  CapturedVars.push_back(LBCast);
3158  LValue UB =
3159  CGF.EmitLValue(cast<DeclRefExpr>(Dir.getCombinedUpperBoundVariable()));
3160 
3161  llvm::Value *UBCast =
3162  CGF.Builder.CreateIntCast(CGF.Builder.CreateLoad(UB.getAddress(CGF)),
3163  CGF.SizeTy, /*isSigned=*/false);
3164  CapturedVars.push_back(UBCast);
3165 }
3166 
3167 static void
3169  const OMPLoopDirective &S,
3171  auto &&CGInlinedWorksharingLoop = [&S](CodeGenFunction &CGF,
3172  PrePostActionTy &Action) {
3173  Action.Enter(CGF);
3174  bool HasCancel = false;
3175  if (!isOpenMPSimdDirective(S.getDirectiveKind())) {
3176  if (const auto *D = dyn_cast<OMPTeamsDistributeParallelForDirective>(&S))
3177  HasCancel = D->hasCancel();
3178  else if (const auto *D = dyn_cast<OMPDistributeParallelForDirective>(&S))
3179  HasCancel = D->hasCancel();
3180  else if (const auto *D =
3181  dyn_cast<OMPTargetTeamsDistributeParallelForDirective>(&S))
3182  HasCancel = D->hasCancel();
3183  }
3184  CodeGenFunction::OMPCancelStackRAII CancelRegion(CGF, S.getDirectiveKind(),
3185  HasCancel);
3186  CGF.EmitOMPWorksharingLoop(S, S.getPrevEnsureUpperBound(),
3189  };
3190 
3192  CGF, S,
3193  isOpenMPSimdDirective(S.getDirectiveKind()) ? OMPD_for_simd : OMPD_for,
3194  CGInlinedWorksharingLoop,
3196 }
3197 
3200  auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
3202  S.getDistInc());
3203  };
3204  OMPLexicalScope Scope(*this, S, OMPD_parallel);
3205  CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen);
3206 }
3207 
3210  auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
3212  S.getDistInc());
3213  };
3214  OMPLexicalScope Scope(*this, S, OMPD_parallel);
3215  CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen);
3216 }
3217 
3219  const OMPDistributeSimdDirective &S) {
3220  auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
3222  };
3223  OMPLexicalScope Scope(*this, S, OMPD_unknown);
3224  CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen);
3225 }
3226 
3228  CodeGenModule &CGM, StringRef ParentName, const OMPTargetSimdDirective &S) {
3229  // Emit SPMD target parallel for region as a standalone region.
3230  auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
3231  emitOMPSimdRegion(CGF, S, Action);
3232  };
3233  llvm::Function *Fn;
3234  llvm::Constant *Addr;
3235  // Emit target region as a standalone region.
3237  S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen);
3238  assert(Fn && Addr && "Target device function emission failed.");
3239 }
3240 
3242  const OMPTargetSimdDirective &S) {
3243  auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
3244  emitOMPSimdRegion(CGF, S, Action);
3245  };
3246  emitCommonOMPTargetDirective(*this, S, CodeGen);
3247 }
3248 
3249 namespace {
3250 struct ScheduleKindModifiersTy {
3254  ScheduleKindModifiersTy(OpenMPScheduleClauseKind Kind,
3257  : Kind(Kind), M1(M1), M2(M2) {}
3258 };
3259 } // namespace
3260 
3262  const OMPLoopDirective &S, Expr *EUB,
3263  const CodeGenLoopBoundsTy &CodeGenLoopBounds,
3264  const CodeGenDispatchBoundsTy &CGDispatchBounds) {
3265  // Emit the loop iteration variable.
3266  const auto *IVExpr = cast<DeclRefExpr>(S.getIterationVariable());
3267  const auto *IVDecl = cast<VarDecl>(IVExpr->getDecl());
3268  EmitVarDecl(*IVDecl);
3269 
3270  // Emit the iterations count variable.
3271  // If it is not a variable, Sema decided to calculate iterations count on each
3272  // iteration (e.g., it is foldable into a constant).
3273  if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
3274  EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
3275  // Emit calculation of the iterations count.
3276  EmitIgnoredExpr(S.getCalcLastIteration());
3277  }
3278 
3279  CGOpenMPRuntime &RT = CGM.getOpenMPRuntime();
3280 
3281  bool HasLastprivateClause;
3282  // Check pre-condition.
3283  {
3284  OMPLoopScope PreInitScope(*this, S);
3285  // Skip the entire loop if we don't meet the precondition.
3286  // If the condition constant folds and can be elided, avoid emitting the
3287  // whole loop.
3288  bool CondConstant;
3289  llvm::BasicBlock *ContBlock = nullptr;
3290  if (ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) {
3291  if (!CondConstant)
3292  return false;
3293  } else {
3294  llvm::BasicBlock *ThenBlock = createBasicBlock("omp.precond.then");
3295  ContBlock = createBasicBlock("omp.precond.end");
3296  emitPreCond(*this, S, S.getPreCond(), ThenBlock, ContBlock,
3297  getProfileCount(&S));
3298  EmitBlock(ThenBlock);
3299  incrementProfileCounter(&S);
3300  }
3301 
3302  RunCleanupsScope DoacrossCleanupScope(*this);
3303  bool Ordered = false;
3304  if (const auto *OrderedClause = S.getSingleClause<OMPOrderedClause>()) {
3305  if (OrderedClause->getNumForLoops())
3306  RT.emitDoacrossInit(*this, S, OrderedClause->getLoopNumIterations());
3307  else
3308  Ordered = true;
3309  }
3310 
3311  llvm::DenseSet<const Expr *> EmittedFinals;
3312  emitAlignedClause(*this, S);
3313  bool HasLinears = EmitOMPLinearClauseInit(S);
3314  // Emit helper vars inits.
3315 
3316  std::pair<LValue, LValue> Bounds = CodeGenLoopBounds(*this, S);
3317  LValue LB = Bounds.first;
3318  LValue UB = Bounds.second;
3319  LValue ST =
3320  EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getStrideVariable()));
3321  LValue IL =
3322  EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable()));
3323 
3324  // Emit 'then' code.
3325  {
3326  OMPPrivateScope LoopScope(*this);
3327  if (EmitOMPFirstprivateClause(S, LoopScope) || HasLinears) {
3328  // Emit implicit barrier to synchronize threads and avoid data races on
3329  // initialization of firstprivate variables and post-update of
3330  // lastprivate variables.
3331  CGM.getOpenMPRuntime().emitBarrierCall(
3332  *this, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false,
3333  /*ForceSimpleCall=*/true);
3334  }
3335  EmitOMPPrivateClause(S, LoopScope);
3337  *this, S, EmitLValue(S.getIterationVariable()));
3338  HasLastprivateClause = EmitOMPLastprivateClauseInit(S, LoopScope);
3339  EmitOMPReductionClauseInit(S, LoopScope);
3340  EmitOMPPrivateLoopCounters(S, LoopScope);
3341  EmitOMPLinearClause(S, LoopScope);
3342  (void)LoopScope.Privatize();
3343  if (isOpenMPTargetExecutionDirective(S.getDirectiveKind()))
3344  CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(*this, S);
3345 
3346  // Detect the loop schedule kind and chunk.
3347  const Expr *ChunkExpr = nullptr;
3348  OpenMPScheduleTy ScheduleKind;
3349  if (const auto *C = S.getSingleClause<OMPScheduleClause>()) {
3350  ScheduleKind.Schedule = C->getScheduleKind();
3351  ScheduleKind.M1 = C->getFirstScheduleModifier();
3352  ScheduleKind.M2 = C->getSecondScheduleModifier();
3353  ChunkExpr = C->getChunkSize();
3354  } else {
3355  // Default behaviour for schedule clause.
3356  CGM.getOpenMPRuntime().getDefaultScheduleAndChunk(
3357  *this, S, ScheduleKind.Schedule, ChunkExpr);
3358  }
3359  bool HasChunkSizeOne = false;
3360  llvm::Value *Chunk = nullptr;
3361  if (ChunkExpr) {
3362  Chunk = EmitScalarExpr(ChunkExpr);
3363  Chunk = EmitScalarConversion(Chunk, ChunkExpr->getType(),
3364  S.getIterationVariable()->getType(),
3365  S.getBeginLoc());
3366  Expr::EvalResult Result;
3367  if (ChunkExpr->EvaluateAsInt(Result, getContext())) {
3368  llvm::APSInt EvaluatedChunk = Result.Val.getInt();
3369  HasChunkSizeOne = (EvaluatedChunk.getLimitedValue() == 1);
3370  }
3371  }
3372  const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
3373  const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
3374  // OpenMP 4.5, 2.7.1 Loop Construct, Description.
3375  // If the static schedule kind is specified or if the ordered clause is
3376  // specified, and if no monotonic modifier is specified, the effect will
3377  // be as if the monotonic modifier was specified.
3378  bool StaticChunkedOne =
3379  RT.isStaticChunked(ScheduleKind.Schedule,
3380  /* Chunked */ Chunk != nullptr) &&
3381  HasChunkSizeOne &&
3382  isOpenMPLoopBoundSharingDirective(S.getDirectiveKind());
3383  bool IsMonotonic =
3384  Ordered ||
3385  (ScheduleKind.Schedule == OMPC_SCHEDULE_static &&
3386  !(ScheduleKind.M1 == OMPC_SCHEDULE_MODIFIER_nonmonotonic ||
3387  ScheduleKind.M2 == OMPC_SCHEDULE_MODIFIER_nonmonotonic)) ||
3388  ScheduleKind.M1 == OMPC_SCHEDULE_MODIFIER_monotonic ||
3389  ScheduleKind.M2 == OMPC_SCHEDULE_MODIFIER_monotonic;
3390  if ((RT.isStaticNonchunked(ScheduleKind.Schedule,
3391  /* Chunked */ Chunk != nullptr) ||
3392  StaticChunkedOne) &&
3393  !Ordered) {
3394  JumpDest LoopExit =
3395  getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit"));
3397  *this, S,
3398  [&S](CodeGenFunction &CGF, PrePostActionTy &) {
3399  if (isOpenMPSimdDirective(S.getDirectiveKind())) {
3400  CGF.EmitOMPSimdInit(S);
3401  } else if (const auto *C = S.getSingleClause<OMPOrderClause>()) {
3402  if (C->getKind() == OMPC_ORDER_concurrent)
3403  CGF.LoopStack.setParallel(/*Enable=*/true);
3404  }
3405  },
3406  [IVSize, IVSigned, Ordered, IL, LB, UB, ST, StaticChunkedOne, Chunk,
3407  &S, ScheduleKind, LoopExit,
3408  &LoopScope](CodeGenFunction &CGF, PrePostActionTy &) {
3409  // OpenMP [2.7.1, Loop Construct, Description, table 2-1]
3410  // When no chunk_size is specified, the iteration space is divided
3411  // into chunks that are approximately equal in size, and at most
3412  // one chunk is distributed to each thread. Note that the size of
3413  // the chunks is unspecified in this case.
3414  CGOpenMPRuntime::StaticRTInput StaticInit(
3415  IVSize, IVSigned, Ordered, IL.getAddress(CGF),
3416  LB.getAddress(CGF), UB.getAddress(CGF), ST.getAddress(CGF),
3417  StaticChunkedOne ? Chunk : nullptr);
3418  CGF.CGM.getOpenMPRuntime().emitForStaticInit(
3419  CGF, S.getBeginLoc(), S.getDirectiveKind(), ScheduleKind,
3420  StaticInit);
3421  // UB = min(UB, GlobalUB);
3422  if (!StaticChunkedOne)
3423  CGF.EmitIgnoredExpr(S.getEnsureUpperBound());
3424  // IV = LB;
3425  CGF.EmitIgnoredExpr(S.getInit());
3426  // For unchunked static schedule generate:
3427  //
3428  // while (idx <= UB) {
3429  // BODY;
3430  // ++idx;
3431  // }
3432  //
3433  // For static schedule with chunk one:
3434  //
3435  // while (IV <= PrevUB) {
3436  // BODY;
3437  // IV += ST;
3438  // }
3439  CGF.EmitOMPInnerLoop(
3440  S, LoopScope.requiresCleanups(),
3441  StaticChunkedOne ? S.getCombinedParForInDistCond()
3442  : S.getCond(),
3443  StaticChunkedOne ? S.getDistInc() : S.getInc(),
3444  [&S, LoopExit](CodeGenFunction &CGF) {
3445  emitOMPLoopBodyWithStopPoint(CGF, S, LoopExit);
3446  },
3447  [](CodeGenFunction &) {});
3448  });
3449  EmitBlock(LoopExit.getBlock());
3450  // Tell the runtime we are done.
3451  auto &&CodeGen = [&S](CodeGenFunction &CGF) {
3452  CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getEndLoc(),
3453  S.getDirectiveKind());
3454  };
3455  OMPCancelStack.emitExit(*this, S.getDirectiveKind(), CodeGen);
3456  } else {
3457  // Emit the outer loop, which requests its work chunk [LB..UB] from
3458  // runtime and runs the inner loop to process it.
3459  const OMPLoopArguments LoopArguments(
3460  LB.getAddress(*this), UB.getAddress(*this), ST.getAddress(*this),
3461  IL.getAddress(*this), Chunk, EUB);
3462  EmitOMPForOuterLoop(ScheduleKind, IsMonotonic, S, LoopScope, Ordered,
3463  LoopArguments, CGDispatchBounds);
3464  }
3465  if (isOpenMPSimdDirective(S.getDirectiveKind())) {
3466  EmitOMPSimdFinal(S, [IL, &S](CodeGenFunction &CGF) {
3467  return CGF.Builder.CreateIsNotNull(
3468  CGF.EmitLoadOfScalar(IL, S.getBeginLoc()));
3469  });
3470  }
3471  EmitOMPReductionClauseFinal(
3472  S, /*ReductionKind=*/isOpenMPSimdDirective(S.getDirectiveKind())
3473  ? /*Parallel and Simd*/ OMPD_parallel_for_simd
3474  : /*Parallel only*/ OMPD_parallel);
3475  // Emit post-update of the reduction variables if IsLastIter != 0.
3477  *this, S, [IL, &S](CodeGenFunction &CGF) {
3478  return CGF.Builder.CreateIsNotNull(
3479  CGF.EmitLoadOfScalar(IL, S.getBeginLoc()));
3480  });
3481  // Emit final copy of the lastprivate variables if IsLastIter != 0.
3482  if (HasLastprivateClause)
3483  EmitOMPLastprivateClauseFinal(
3484  S, isOpenMPSimdDirective(S.getDirectiveKind()),
3485  Builder.CreateIsNotNull(EmitLoadOfScalar(IL, S.getBeginLoc())));
3486  LoopScope.restoreMap();
3487  EmitOMPLinearClauseFinal(S, [IL, &S](CodeGenFunction &CGF) {
3488  return CGF.Builder.CreateIsNotNull(
3489  CGF.EmitLoadOfScalar(IL, S.getBeginLoc()));
3490  });
3491  }
3492  DoacrossCleanupScope.ForceCleanup();
3493  // We're now done with the loop, so jump to the continuation block.
3494  if (ContBlock) {
3495  EmitBranch(ContBlock);
3496  EmitBlock(ContBlock, /*IsFinished=*/true);
3497  }
3498  }
3499  return HasLastprivateClause;
3500 }
3501 
3502 /// The following two functions generate expressions for the loop lower
3503 /// and upper bounds in case of static and dynamic (dispatch) schedule
3504 /// of the associated 'for' or 'distribute' loop.
3505 static std::pair<LValue, LValue>
3507  const auto &LS = cast<OMPLoopDirective>(S);
3508  LValue LB =
3509  EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getLowerBoundVariable()));
3510  LValue UB =
3511  EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getUpperBoundVariable()));
3512  return {LB, UB};
3513 }
3514 
3515 /// When dealing with dispatch schedules (e.g. dynamic, guided) we do not
3516 /// consider the lower and upper bound expressions generated by the
3517 /// worksharing loop support, but we use 0 and the iteration space size as
3518 /// constants
3519 static std::pair<llvm::Value *, llvm::Value *>
3521  Address LB, Address UB) {
3522  const auto &LS = cast<OMPLoopDirective>(S);
3523  const Expr *IVExpr = LS.getIterationVariable();
3524  const unsigned IVSize = CGF.getContext().getTypeSize(IVExpr->getType());
3525  llvm::Value *LBVal = CGF.Builder.getIntN(IVSize, 0);
3526  llvm::Value *UBVal = CGF.EmitScalarExpr(LS.getLastIteration());
3527  return {LBVal, UBVal};
3528 }
3529 
3530 /// Emits internal temp array declarations for the directive with inscan
3531 /// reductions.
3532 /// The code is the following:
3533 /// \code
3534 /// size num_iters = <num_iters>;
3535 /// <type> buffer[num_iters];
3536 /// \endcode
3538  CodeGenFunction &CGF, const OMPLoopDirective &S,
3539  llvm::function_ref<llvm::Value *(CodeGenFunction &)> NumIteratorsGen) {
3540  llvm::Value *OMPScanNumIterations = CGF.Builder.CreateIntCast(
3541  NumIteratorsGen(CGF), CGF.SizeTy, /*isSigned=*/false);
3544  SmallVector<const Expr *, 4> ReductionOps;
3545  SmallVector<const Expr *, 4> CopyArrayTemps;
3546  for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) {
3547  assert(C->getModifier() == OMPC_REDUCTION_inscan &&
3548  "Only inscan reductions are expected.");
3549  Shareds.append(C->varlist_begin(), C->varlist_end());
3550  Privates.append(C->privates().begin(), C->privates().end());
3551  ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end());
3552  CopyArrayTemps.append(C->copy_array_temps().begin(),
3553  C->copy_array_temps().end());
3554  }
3555  {
3556  // Emit buffers for each reduction variables.
3557  // ReductionCodeGen is required to emit correctly the code for array
3558  // reductions.
3559  ReductionCodeGen RedCG(Shareds, Shareds, Privates, ReductionOps);
3560  unsigned Count = 0;
3561  auto *ITA = CopyArrayTemps.begin();
3562  for (const Expr *IRef : Privates) {
3563  const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(IRef)->getDecl());
3564  // Emit variably modified arrays, used for arrays/array sections
3565  // reductions.
3566  if (PrivateVD->getType()->isVariablyModifiedType()) {
3567  RedCG.emitSharedOrigLValue(CGF, Count);
3568  RedCG.emitAggregateType(CGF, Count);
3569  }
3571  CGF,
3572  cast<OpaqueValueExpr>(
3573  cast<VariableArrayType>((*ITA)->getType()->getAsArrayTypeUnsafe())
3574  ->getSizeExpr()),
3575  RValue::get(OMPScanNumIterations));
3576  // Emit temp buffer.
3577  CGF.EmitVarDecl(*cast<VarDecl>(cast<DeclRefExpr>(*ITA)->getDecl()));
3578  ++ITA;
3579  ++Count;
3580  }
3581  }
3582 }
3583 
3584 /// Copies final inscan reductions values to the original variables.
3585 /// The code is the following:
3586 /// \code
3587 /// <orig_var> = buffer[num_iters-1];
3588 /// \endcode
3590  CodeGenFunction &CGF, const OMPLoopDirective &S,
3591  llvm::function_ref<llvm::Value *(CodeGenFunction &)> NumIteratorsGen) {
3592  llvm::Value *OMPScanNumIterations = CGF.Builder.CreateIntCast(
3593  NumIteratorsGen(CGF), CGF.SizeTy, /*isSigned=*/false);
3599  SmallVector<const Expr *, 4> CopyArrayElems;
3600  for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) {
3601  assert(C->getModifier() == OMPC_REDUCTION_inscan &&
3602  "Only inscan reductions are expected.");
3603  Shareds.append(C->varlist_begin(), C->varlist_end());
3604  LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
3605  RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
3606  Privates.append(C->privates().begin(), C->privates().end());
3607  CopyOps.append(C->copy_ops().begin(), C->copy_ops().end());
3608  CopyArrayElems.append(C->copy_array_elems().begin(),
3609  C->copy_array_elems().end());
3610  }
3611  // Create temp var and copy LHS value to this temp value.
3612  // LHS = TMP[LastIter];
3613  llvm::Value *OMPLast = CGF.Builder.CreateNSWSub(
3614  OMPScanNumIterations,
3615  llvm::ConstantInt::get(CGF.SizeTy, 1, /*isSigned=*/false));
3616  for (unsigned I = 0, E = CopyArrayElems.size(); I < E; ++I) {
3617  const Expr *PrivateExpr = Privates[I];
3618  const Expr *OrigExpr = Shareds[I];
3619  const Expr *CopyArrayElem = CopyArrayElems[I];
3621  CGF,
3622  cast<OpaqueValueExpr>(
3623  cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()),
3624  RValue::get(OMPLast));
3625  LValue DestLVal = CGF.EmitLValue(OrigExpr);
3626  LValue SrcLVal = CGF.EmitLValue(CopyArrayElem);
3627  CGF.EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(CGF),
3628  SrcLVal.getAddress(CGF),
3629  cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()),
3630  cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()),
3631  CopyOps[I]);
3632  }
3633 }
3634 
3635 /// Emits the code for the directive with inscan reductions.
3636 /// The code is the following:
3637 /// \code
3638 /// #pragma omp ...
3639 /// for (i: 0..<num_iters>) {
3640 /// <input phase>;
3641 /// buffer[i] = red;
3642 /// }
3643 /// #pragma omp master // in parallel region
3644 /// for (int k = 0; k != ceil(log2(num_iters)); ++k)
3645 /// for (size cnt = last_iter; cnt >= pow(2, k); --k)
3646 /// buffer[i] op= buffer[i-pow(2,k)];
3647 /// #pragma omp barrier // in parallel region
3648 /// #pragma omp ...
3649 /// for (0..<num_iters>) {
3650 /// red = InclusiveScan ? buffer[i] : buffer[i-1];
3651 /// <scan phase>;
3652 /// }
3653 /// \endcode
3655  CodeGenFunction &CGF, const OMPLoopDirective &S,
3656  llvm::function_ref<llvm::Value *(CodeGenFunction &)> NumIteratorsGen,
3657  llvm::function_ref<void(CodeGenFunction &)> FirstGen,
3658  llvm::function_ref<void(CodeGenFunction &)> SecondGen) {
3659  llvm::Value *OMPScanNumIterations = CGF.Builder.CreateIntCast(
3660  NumIteratorsGen(CGF), CGF.SizeTy, /*isSigned=*/false);
3662  SmallVector<const Expr *, 4> ReductionOps;
3665  SmallVector<const Expr *, 4> CopyArrayElems;
3666  for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) {
3667  assert(C->getModifier() == OMPC_REDUCTION_inscan &&
3668  "Only inscan reductions are expected.");
3669  Privates.append(C->privates().begin(), C->privates().end());
3670  ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end());
3671  LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
3672  RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
3673  CopyArrayElems.append(C->copy_array_elems().begin(),
3674  C->copy_array_elems().end());
3675  }
3677  {
3678  // Emit loop with input phase:
3679  // #pragma omp ...
3680  // for (i: 0..<num_iters>) {
3681  // <input phase>;
3682  // buffer[i] = red;
3683  // }
3684  CGF.OMPFirstScanLoop = true;
3686  FirstGen(CGF);
3687  }
3688  // #pragma omp barrier // in parallel region
3689  auto &&CodeGen = [&S, OMPScanNumIterations, &LHSs, &RHSs, &CopyArrayElems,
3690  &ReductionOps,
3691  &Privates](CodeGenFunction &CGF, PrePostActionTy &Action) {
3692  Action.Enter(CGF);
3693  // Emit prefix reduction:
3694  // #pragma omp master // in parallel region
3695  // for (int k = 0; k <= ceil(log2(n)); ++k)
3696  llvm::BasicBlock *InputBB = CGF.Builder.GetInsertBlock();
3697  llvm::BasicBlock *LoopBB = CGF.createBasicBlock("omp.outer.log.scan.body");
3698  llvm::BasicBlock *ExitBB = CGF.createBasicBlock("omp.outer.log.scan.exit");
3699  llvm::Function *F =
3700  CGF.CGM.getIntrinsic(llvm::Intrinsic::log2, CGF.DoubleTy);
3701  llvm::Value *Arg =
3702  CGF.Builder.CreateUIToFP(OMPScanNumIterations, CGF.DoubleTy);
3703  llvm::Value *LogVal = CGF.EmitNounwindRuntimeCall(F, Arg);
3704  F = CGF.CGM.getIntrinsic(llvm::Intrinsic::ceil, CGF.DoubleTy);
3705  LogVal = CGF.EmitNounwindRuntimeCall(F, LogVal);
3706  LogVal = CGF.Builder.CreateFPToUI(LogVal, CGF.IntTy);
3707  llvm::Value *NMin1 = CGF.Builder.CreateNUWSub(
3708  OMPScanNumIterations, llvm::ConstantInt::get(CGF.SizeTy, 1));
3709  auto DL = ApplyDebugLocation::CreateDefaultArtificial(CGF, S.getBeginLoc());
3710  CGF.EmitBlock(LoopBB);
3711  auto *Counter = CGF.Builder.CreatePHI(CGF.IntTy, 2);
3712  // size pow2k = 1;
3713  auto *Pow2K = CGF.Builder.CreatePHI(CGF.SizeTy, 2);
3714  Counter->addIncoming(llvm::ConstantInt::get(CGF.IntTy, 0), InputBB);
3715  Pow2K->addIncoming(llvm::ConstantInt::get(CGF.SizeTy, 1), InputBB);
3716  // for (size i = n - 1; i >= 2 ^ k; --i)
3717  // tmp[i] op= tmp[i-pow2k];
3718  llvm::BasicBlock *InnerLoopBB =
3719  CGF.createBasicBlock("omp.inner.log.scan.body");
3720  llvm::BasicBlock *InnerExitBB =
3721  CGF.createBasicBlock("omp.inner.log.scan.exit");
3722  llvm::Value *CmpI = CGF.Builder.CreateICmpUGE(NMin1, Pow2K);
3723  CGF.Builder.CreateCondBr(CmpI, InnerLoopBB, InnerExitBB);
3724  CGF.EmitBlock(InnerLoopBB);
3725  auto *IVal = CGF.Builder.CreatePHI(CGF.SizeTy, 2);
3726  IVal->addIncoming(NMin1, LoopBB);
3727  {
3728  CodeGenFunction::OMPPrivateScope PrivScope(CGF);
3729  auto *ILHS = LHSs.begin();
3730  auto *IRHS = RHSs.begin();
3731  for (const Expr *CopyArrayElem : CopyArrayElems) {
3732  const auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
3733  const auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
3734  Address LHSAddr = Address::invalid();
3735  {
3737  CGF,
3738  cast<OpaqueValueExpr>(
3739  cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()),
3740  RValue::get(IVal));
3741  LHSAddr = CGF.EmitLValue(CopyArrayElem).getAddress(CGF);
3742  }
3743  PrivScope.addPrivate(LHSVD, LHSAddr);
3744  Address RHSAddr = Address::invalid();
3745  {
3746  llvm::Value *OffsetIVal = CGF.Builder.CreateNUWSub(IVal, Pow2K);
3748  CGF,
3749  cast<OpaqueValueExpr>(
3750  cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()),
3751  RValue::get(OffsetIVal));
3752  RHSAddr = CGF.EmitLValue(CopyArrayElem).getAddress(CGF);
3753  }
3754  PrivScope.addPrivate(RHSVD, RHSAddr);
3755  ++ILHS;
3756  ++IRHS;
3757  }
3758  PrivScope.Privatize();
3759  CGF.CGM.getOpenMPRuntime().emitReduction(
3760  CGF, S.getEndLoc(), Privates, LHSs, RHSs, ReductionOps,
3761  {/*WithNowait=*/true, /*SimpleReduction=*/true, OMPD_unknown});
3762  }
3763  llvm::Value *NextIVal =
3764  CGF.Builder.CreateNUWSub(IVal, llvm::ConstantInt::get(CGF.SizeTy, 1));
3765  IVal->addIncoming(NextIVal, CGF.Builder.GetInsertBlock());
3766  CmpI = CGF.Builder.CreateICmpUGE(NextIVal, Pow2K);
3767  CGF.Builder.CreateCondBr(CmpI, InnerLoopBB, InnerExitBB);
3768  CGF.EmitBlock(InnerExitBB);
3769  llvm::Value *Next =
3770  CGF.Builder.CreateNUWAdd(Counter, llvm::ConstantInt::get(CGF.IntTy, 1));
3771  Counter->addIncoming(Next, CGF.Builder.GetInsertBlock());
3772  // pow2k <<= 1;
3773  llvm::Value *NextPow2K =
3774  CGF.Builder.CreateShl(Pow2K, 1, "", /*HasNUW=*/true);
3775  Pow2K->addIncoming(NextPow2K, CGF.Builder.GetInsertBlock());
3776  llvm::Value *Cmp = CGF.Builder.CreateICmpNE(Next, LogVal);
3777  CGF.Builder.CreateCondBr(Cmp, LoopBB, ExitBB);
3778  auto DL1 = ApplyDebugLocation::CreateDefaultArtificial(CGF, S.getEndLoc());
3779  CGF.EmitBlock(ExitBB);
3780  };
3781  if (isOpenMPParallelDirective(S.getDirectiveKind())) {
3782  CGF.CGM.getOpenMPRuntime().emitMasterRegion(CGF, CodeGen, S.getBeginLoc());
3783  CGF.CGM.getOpenMPRuntime().emitBarrierCall(
3784  CGF, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false,
3785  /*ForceSimpleCall=*/true);
3786  } else {
3787  RegionCodeGenTy RCG(CodeGen);
3788  RCG(CGF);
3789  }
3790 
3791  CGF.OMPFirstScanLoop = false;
3792  SecondGen(CGF);
3793 }
3794 
3796  const OMPLoopDirective &S,
3797  bool HasCancel) {
3798  bool HasLastprivates;
3799  if (llvm::any_of(S.getClausesOfKind<OMPReductionClause>(),
3800  [](const OMPReductionClause *C) {
3801  return C->getModifier() == OMPC_REDUCTION_inscan;
3802  })) {
3803  const auto &&NumIteratorsGen = [&S](CodeGenFunction &CGF) {
3805  OMPLoopScope LoopScope(CGF, S);
3806  return CGF.EmitScalarExpr(S.getNumIterations());
3807  };
3808  const auto &&FirstGen = [&S, HasCancel](CodeGenFunction &CGF) {
3810  CGF, S.getDirectiveKind(), HasCancel);
3811  (void)CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(),
3814  // Emit an implicit barrier at the end.
3815  CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getBeginLoc(),
3816  OMPD_for);
3817  };
3818  const auto &&SecondGen = [&S, HasCancel,
3819  &HasLastprivates](CodeGenFunction &CGF) {
3821  CGF, S.getDirectiveKind(), HasCancel);
3822  HasLastprivates = CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(),
3825  };
3826  if (!isOpenMPParallelDirective(S.getDirectiveKind()))
3827  emitScanBasedDirectiveDecls(CGF, S, NumIteratorsGen);
3828  emitScanBasedDirective(CGF, S, NumIteratorsGen, FirstGen, SecondGen);
3829  if (!isOpenMPParallelDirective(S.getDirectiveKind()))
3830  emitScanBasedDirectiveFinals(CGF, S, NumIteratorsGen);
3831  } else {
3832  CodeGenFunction::OMPCancelStackRAII CancelRegion(CGF, S.getDirectiveKind(),
3833  HasCancel);
3834  HasLastprivates = CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(),
3837  }
3838  return HasLastprivates;
3839 }
3840 
3842  if (S.hasCancel())
3843  return false;
3844  for (OMPClause *C : S.clauses()) {
3845  if (isa<OMPNowaitClause>(C))
3846  continue;
3847 
3848  if (auto *SC = dyn_cast<OMPScheduleClause>(C)) {
3849  if (SC->getFirstScheduleModifier() != OMPC_SCHEDULE_MODIFIER_unknown)
3850  return false;
3851  if (SC->getSecondScheduleModifier() != OMPC_SCHEDULE_MODIFIER_unknown)
3852  return false;
3853  switch (SC->getScheduleKind()) {
3854  case OMPC_SCHEDULE_auto:
3855  case OMPC_SCHEDULE_dynamic:
3856  case OMPC_SCHEDULE_runtime:
3857  case OMPC_SCHEDULE_guided:
3858  case OMPC_SCHEDULE_static:
3859  continue;
3860  case OMPC_SCHEDULE_unknown:
3861  return false;
3862  }
3863  }
3864 
3865  return false;
3866  }
3867 
3868  return true;
3869 }
3870 
3871 static llvm::omp::ScheduleKind
3873  switch (ScheduleClauseKind) {
3874  case OMPC_SCHEDULE_unknown:
3875  return llvm::omp::OMP_SCHEDULE_Default;
3876  case OMPC_SCHEDULE_auto:
3877  return llvm::omp::OMP_SCHEDULE_Auto;
3878  case OMPC_SCHEDULE_dynamic:
3879  return llvm::omp::OMP_SCHEDULE_Dynamic;
3880  case OMPC_SCHEDULE_guided:
3881  return llvm::omp::OMP_SCHEDULE_Guided;
3882  case OMPC_SCHEDULE_runtime:
3883  return llvm::omp::OMP_SCHEDULE_Runtime;
3884  case OMPC_SCHEDULE_static:
3885  return llvm::omp::OMP_SCHEDULE_Static;
3886  }
3887  llvm_unreachable("Unhandled schedule kind");
3888 }
3889 
3891  bool HasLastprivates = false;
3892  bool UseOMPIRBuilder =
3893  CGM.getLangOpts().OpenMPIRBuilder && isSupportedByOpenMPIRBuilder(S);
3894  auto &&CodeGen = [this, &S, &HasLastprivates,
3895  UseOMPIRBuilder](CodeGenFunction &CGF, PrePostActionTy &) {
3896  // Use the OpenMPIRBuilder if enabled.
3897  if (UseOMPIRBuilder) {
3898  bool NeedsBarrier = !S.getSingleClause<OMPNowaitClause>();
3899 
3900  llvm::omp::ScheduleKind SchedKind = llvm::omp::OMP_SCHEDULE_Default;
3901  llvm::Value *ChunkSize = nullptr;
3902  if (auto *SchedClause = S.getSingleClause<OMPScheduleClause>()) {
3903  SchedKind =
3904  convertClauseKindToSchedKind(SchedClause->getScheduleKind());
3905  if (const Expr *ChunkSizeExpr = SchedClause->getChunkSize())
3906  ChunkSize = EmitScalarExpr(ChunkSizeExpr);
3907  }
3908 
3909  // Emit the associated statement and get its loop representation.
3910  const Stmt *Inner = S.getRawStmt();
3911  llvm::CanonicalLoopInfo *CLI =
3912  EmitOMPCollapsedCanonicalLoopNest(Inner, 1);
3913 
3914  llvm::OpenMPIRBuilder &OMPBuilder =
3915  CGM.getOpenMPRuntime().getOMPBuilder();
3916  llvm::OpenMPIRBuilder::InsertPointTy AllocaIP(
3917  AllocaInsertPt->getParent(), AllocaInsertPt->getIterator());
3918  OMPBuilder.applyWorkshareLoop(
3919  Builder.getCurrentDebugLocation(), CLI, AllocaIP, NeedsBarrier,
3920  SchedKind, ChunkSize, /*HasSimdModifier=*/false,
3921  /*HasMonotonicModifier=*/false, /*HasNonmonotonicModifier=*/false,
3922  /*HasOrderedClause=*/false);
3923  return;
3924  }
3925 
3926  HasLastprivates = emitWorksharingDirective(CGF, S, S.hasCancel());
3927  };
3928  {
3929  auto LPCRegion =
3931  OMPLexicalScope Scope(*this, S, OMPD_unknown);
3932  CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_for, CodeGen,
3933  S.hasCancel());
3934  }
3935 
3936  if (!UseOMPIRBuilder) {
3937  // Emit an implicit barrier at the end.
3938  if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates)
3939  CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), OMPD_for);
3940  }
3941  // Check for outer lastprivate conditional update.
3943 }
3944 
3946  bool HasLastprivates = false;
3947  auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF,
3948  PrePostActionTy &) {
3949  HasLastprivates = emitWorksharingDirective(CGF, S, /*HasCancel=*/false);
3950  };
3951  {
3952  auto LPCRegion =
3954  OMPLexicalScope Scope(*this, S, OMPD_unknown);
3955  CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen);
3956  }
3957 
3958  // Emit an implicit barrier at the end.
3959  if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates)
3960  CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), OMPD_for);
3961  // Check for outer lastprivate conditional update.
3963 }
3964 
3966  const Twine &Name,
3967  llvm::Value *Init = nullptr) {
3968  LValue LVal = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty, Name), Ty);
3969  if (Init)
3970  CGF.EmitStoreThroughLValue(RValue::get(Init), LVal, /*isInit*/ true);
3971  return LVal;
3972 }
3973 
3974 void CodeGenFunction::EmitSections(const OMPExecutableDirective &S) {
3975  const Stmt *CapturedStmt = S.getInnermostCapturedStmt()->getCapturedStmt();
3976  const auto *CS = dyn_cast<CompoundStmt>(CapturedStmt);
3977  bool HasLastprivates = false;
3978  auto &&CodeGen = [&S, CapturedStmt, CS,
3979  &HasLastprivates](CodeGenFunction &CGF, PrePostActionTy &) {
3980  const ASTContext &C = CGF.getContext();
3981  QualType KmpInt32Ty =
3982  C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
3983  // Emit helper vars inits.
3984  LValue LB = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.lb.",
3985  CGF.Builder.getInt32(0));
3986  llvm::ConstantInt *GlobalUBVal = CS != nullptr
3987  ? CGF.Builder.getInt32(CS->size() - 1)
3988  : CGF.Builder.getInt32(0);
3989  LValue UB =
3990  createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.ub.", GlobalUBVal);
3991  LValue ST = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.st.",
3992  CGF.Builder.getInt32(1));
3993  LValue IL = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.il.",
3994  CGF.Builder.getInt32(0));
3995  // Loop counter.
3996  LValue IV = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.iv.");
3997  OpaqueValueExpr IVRefExpr(S.getBeginLoc(), KmpInt32Ty, VK_LValue);
3998  CodeGenFunction::OpaqueValueMapping OpaqueIV(CGF, &IVRefExpr, IV);
3999  OpaqueValueExpr UBRefExpr(S.getBeginLoc(), KmpInt32Ty, VK_LValue);
4000  CodeGenFunction::OpaqueValueMapping OpaqueUB(CGF, &UBRefExpr, UB);
4001  // Generate condition for loop.
4003  C, &IVRefExpr, &UBRefExpr, BO_LE, C.BoolTy, VK_PRValue, OK_Ordinary,
4004  S.getBeginLoc(), FPOptionsOverride());
4005  // Increment for loop counter.
4007  C, &IVRefExpr, UO_PreInc, KmpInt32Ty, VK_PRValue, OK_Ordinary,
4008  S.getBeginLoc(), true, FPOptionsOverride());
4009  auto &&BodyGen = [CapturedStmt, CS, &S, &IV](CodeGenFunction &CGF) {
4010  // Iterate through all sections and emit a switch construct:
4011  // switch (IV) {
4012  // case 0:
4013  // <SectionStmt[0]>;
4014  // break;
4015  // ...
4016  // case <NumSection> - 1:
4017  // <SectionStmt[<NumSection> - 1]>;
4018  // break;
4019  // }
4020  // .omp.sections.exit:
4021  llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".omp.sections.exit");
4022  llvm::SwitchInst *SwitchStmt =
4023  CGF.Builder.CreateSwitch(CGF.EmitLoadOfScalar(IV, S.getBeginLoc()),
4024  ExitBB, CS == nullptr ? 1 : CS->size());
4025  if (CS) {
4026  unsigned CaseNumber = 0;
4027  for (const Stmt *SubStmt : CS->children()) {
4028  auto CaseBB = CGF.createBasicBlock(".omp.sections.case");
4029  CGF.EmitBlock(CaseBB);
4030  SwitchStmt->addCase(CGF.Builder.getInt32(CaseNumber), CaseBB);
4031  CGF.EmitStmt(SubStmt);
4032  CGF.EmitBranch(ExitBB);
4033  ++CaseNumber;
4034  }
4035  } else {
4036  llvm::BasicBlock *CaseBB = CGF.createBasicBlock(".omp.sections.case");
4037  CGF.EmitBlock(CaseBB);
4038  SwitchStmt->addCase(CGF.Builder.getInt32(0), CaseBB);
4039  CGF.EmitStmt(CapturedStmt);
4040  CGF.EmitBranch(ExitBB);
4041  }
4042  CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
4043  };
4044 
4045  CodeGenFunction::OMPPrivateScope LoopScope(CGF);
4046  if (CGF.EmitOMPFirstprivateClause(S, LoopScope)) {
4047  // Emit implicit barrier to synchronize threads and avoid data races on
4048  // initialization of firstprivate variables and post-update of lastprivate
4049  // variables.
4050  CGF.CGM.getOpenMPRuntime().emitBarrierCall(
4051  CGF, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false,
4052  /*ForceSimpleCall=*/true);
4053  }
4054  CGF.EmitOMPPrivateClause(S, LoopScope);
4055  CGOpenMPRuntime::LastprivateConditionalRAII LPCRegion(CGF, S, IV);
4056  HasLastprivates = CGF.EmitOMPLastprivateClauseInit(S, LoopScope);
4057  CGF.EmitOMPReductionClauseInit(S, LoopScope);
4058  (void)LoopScope.Privatize();
4059  if (isOpenMPTargetExecutionDirective(S.getDirectiveKind()))
4060  CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S);
4061 
4062  // Emit static non-chunked loop.
4063  OpenMPScheduleTy ScheduleKind;
4064  ScheduleKind.Schedule = OMPC_SCHEDULE_static;
4065  CGOpenMPRuntime::StaticRTInput StaticInit(
4066  /*IVSize=*/32, /*IVSigned=*/true, /*Ordered=*/false, IL.getAddress(CGF),
4067  LB.getAddress(CGF), UB.getAddress(CGF), ST.getAddress(CGF));
4068  CGF.CGM.getOpenMPRuntime().emitForStaticInit(
4069  CGF, S.getBeginLoc(), S.getDirectiveKind(), ScheduleKind, StaticInit);
4070  // UB = min(UB, GlobalUB);
4071  llvm::Value *UBVal = CGF.EmitLoadOfScalar(UB, S.getBeginLoc());
4072  llvm::Value *MinUBGlobalUB = CGF.Builder.CreateSelect(
4073  CGF.Builder.CreateICmpSLT(UBVal, GlobalUBVal), UBVal, GlobalUBVal);
4074  CGF.EmitStoreOfScalar(MinUBGlobalUB, UB);
4075  // IV = LB;
4076  CGF.EmitStoreOfScalar(CGF.EmitLoadOfScalar(LB, S.getBeginLoc()), IV);
4077  // while (idx <= UB) { BODY; ++idx; }
4078  CGF.EmitOMPInnerLoop(S, /*RequiresCleanup=*/false, Cond, Inc, BodyGen,
4079  [](CodeGenFunction &) {});
4080  // Tell the runtime we are done.
4081  auto &&CodeGen = [&S](CodeGenFunction &CGF) {
4082  CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getEndLoc(),
4083  S.getDirectiveKind());
4084  };
4085  CGF.OMPCancelStack.emitExit(CGF, S.getDirectiveKind(), CodeGen);
4086  CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel);
4087  // Emit post-update of the reduction variables if IsLastIter != 0.
4088  emitPostUpdateForReductionClause(CGF, S, [IL, &S](CodeGenFunction &CGF) {
4089  return CGF.Builder.CreateIsNotNull(
4090  CGF.EmitLoadOfScalar(IL, S.getBeginLoc()));
4091  });
4092 
4093  // Emit final copy of the lastprivate variables if IsLastIter != 0.
4094  if (HasLastprivates)
4096  S, /*NoFinals=*/false,
4097  CGF.Builder.CreateIsNotNull(
4098  CGF.EmitLoadOfScalar(IL, S.getBeginLoc())));
4099  };
4100 
4101  bool HasCancel = false;
4102  if (auto *OSD = dyn_cast<OMPSectionsDirective>(&S))
4103  HasCancel = OSD->hasCancel();
4104  else if (auto *OPSD = dyn_cast<OMPParallelSectionsDirective>(&S))
4105  HasCancel = OPSD->hasCancel();
4106  OMPCancelStackRAII CancelRegion(*this, S.getDirectiveKind(), HasCancel);
4107  CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_sections, CodeGen,
4108  HasCancel);
4109  // Emit barrier for lastprivates only if 'sections' directive has 'nowait'
4110  // clause. Otherwise the barrier will be generated by the codegen for the
4111  // directive.
4112  if (HasLastprivates && S.getSingleClause<OMPNowaitClause>()) {
4113  // Emit implicit barrier to synchronize threads and avoid data races on
4114  // initialization of firstprivate variables.
4115  CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(),
4116  OMPD_unknown);
4117  }
4118 }
4119 
4121  if (CGM.getLangOpts().OpenMPIRBuilder) {
4122  llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
4123  using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
4124  using BodyGenCallbackTy = llvm::OpenMPIRBuilder::StorableBodyGenCallbackTy;
4125 
4126  auto FiniCB = [this](InsertPointTy IP) {
4127  OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP);
4128  };
4129 
4130  const CapturedStmt *ICS = S.getInnermostCapturedStmt();
4131  const Stmt *CapturedStmt = S.getInnermostCapturedStmt()->getCapturedStmt();
4132  const auto *CS = dyn_cast<CompoundStmt>(CapturedStmt);
4134  if (CS) {
4135  for (const Stmt *SubStmt : CS->children()) {
4136  auto SectionCB = [this, SubStmt](InsertPointTy AllocaIP,
4137  InsertPointTy CodeGenIP) {
4138  OMPBuilderCBHelpers::EmitOMPInlinedRegionBody(
4139  *this, SubStmt, AllocaIP, CodeGenIP, "section");
4140  };
4141  SectionCBVector.push_back(SectionCB);
4142  }
4143  } else {
4144  auto SectionCB = [this, CapturedStmt](InsertPointTy AllocaIP,
4145  InsertPointTy CodeGenIP) {
4146  OMPBuilderCBHelpers::EmitOMPInlinedRegionBody(
4147  *this, CapturedStmt, AllocaIP, CodeGenIP, "section");
4148  };
4149  SectionCBVector.push_back(SectionCB);
4150  }
4151 
4152  // Privatization callback that performs appropriate action for
4153  // shared/private/firstprivate/lastprivate/copyin/... variables.
4154  //
4155  // TODO: This defaults to shared right now.
4156  auto PrivCB = [](InsertPointTy AllocaIP, InsertPointTy CodeGenIP,
4157  llvm::Value &, llvm::Value &Val, llvm::Value *&ReplVal) {
4158  // The next line is appropriate only for variables (Val) with the
4159  // data-sharing attribute "shared".
4160  ReplVal = &Val;
4161 
4162  return CodeGenIP;
4163  };
4164 
4165  CGCapturedStmtInfo CGSI(*ICS, CR_OpenMP);
4166  CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(*this, &CGSI);
4167  llvm::OpenMPIRBuilder::InsertPointTy AllocaIP(
4168  AllocaInsertPt->getParent(), AllocaInsertPt->getIterator());
4169  Builder.restoreIP(OMPBuilder.createSections(
4170  Builder, AllocaIP, SectionCBVector, PrivCB, FiniCB, S.hasCancel(),
4171  S.getSingleClause<OMPNowaitClause>()));
4172  return;
4173  }
4174  {
4175  auto LPCRegion =
4177  OMPLexicalScope Scope(*this, S, OMPD_unknown);
4178  EmitSections(S);
4179  }
4180  // Emit an implicit barrier at the end.
4181  if (!S.getSingleClause<OMPNowaitClause>()) {
4182  CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(),
4183  OMPD_sections);
4184  }
4185  // Check for outer lastprivate conditional update.
4187 }
4188 
4190  if (CGM.getLangOpts().OpenMPIRBuilder) {
4191  llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
4192  using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
4193 
4194  const Stmt *SectionRegionBodyStmt = S.getAssociatedStmt();
4195  auto FiniCB = [this](InsertPointTy IP) {
4196  OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP);
4197  };
4198 
4199  auto BodyGenCB = [SectionRegionBodyStmt, this](InsertPointTy AllocaIP,
4200  InsertPointTy CodeGenIP) {
4201  OMPBuilderCBHelpers::EmitOMPInlinedRegionBody(
4202  *this, SectionRegionBodyStmt, AllocaIP, CodeGenIP, "section");
4203  };
4204 
4205  LexicalScope Scope(*this, S.getSourceRange());
4206  EmitStopPoint(&S);
4207  Builder.restoreIP(OMPBuilder.createSection(Builder, BodyGenCB, FiniCB));
4208 
4209  return;
4210  }
4211  LexicalScope Scope(*this, S.getSourceRange());
4212  EmitStopPoint(&S);
4213  EmitStmt(S.getAssociatedStmt());
4214 }
4215 
4217  llvm::SmallVector<const Expr *, 8> CopyprivateVars;
4220  llvm::SmallVector<const Expr *, 8> AssignmentOps;
4221  // Check if there are any 'copyprivate' clauses associated with this
4222  // 'single' construct.
4223  // Build a list of copyprivate variables along with helper expressions
4224  // (<source>, <destination>, <destination>=<source> expressions)
4225  for (const auto *C : S.getClausesOfKind<OMPCopyprivateClause>()) {
4226  CopyprivateVars.append(C->varlists().begin(), C->varlists().end());
4227  DestExprs.append(C->destination_exprs().begin(),
4228  C->destination_exprs().end());
4229  SrcExprs.append(C->source_exprs().begin(), C->source_exprs().end());
4230  AssignmentOps.append(C->assignment_ops().begin(),
4231  C->assignment_ops().end());
4232  }
4233  // Emit code for 'single' region along with 'copyprivate' clauses
4234  auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4235  Action.Enter(CGF);
4236  OMPPrivateScope SingleScope(CGF);
4237  (void)CGF.EmitOMPFirstprivateClause(S, SingleScope);
4238  CGF.EmitOMPPrivateClause(S, SingleScope);
4239  (void)SingleScope.Privatize();
4240  CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt());
4241  };
4242  {
4243  auto LPCRegion =
4245  OMPLexicalScope Scope(*this, S, OMPD_unknown);
4246  CGM.getOpenMPRuntime().emitSingleRegion(*this, CodeGen, S.getBeginLoc(),
4247  CopyprivateVars, DestExprs,
4248  SrcExprs, AssignmentOps);
4249  }
4250  // Emit an implicit barrier at the end (to avoid data race on firstprivate
4251  // init or if no 'nowait' clause was specified and no 'copyprivate' clause).
4252  if (!S.getSingleClause<OMPNowaitClause>() && CopyprivateVars.empty()) {
4253  CGM.getOpenMPRuntime().emitBarrierCall(
4254  *this, S.getBeginLoc(),
4255  S.getSingleClause<OMPNowaitClause>() ? OMPD_unknown : OMPD_single);
4256  }
4257  // Check for outer lastprivate conditional update.
4259 }
4260 
4262  auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4263  Action.Enter(CGF);
4264  CGF.EmitStmt(S.getRawStmt());
4265  };
4266  CGF.CGM.getOpenMPRuntime().emitMasterRegion(CGF, CodeGen, S.getBeginLoc());
4267 }
4268 
4270  if (CGM.getLangOpts().OpenMPIRBuilder) {
4271  llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
4272  using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
4273 
4274  const Stmt *MasterRegionBodyStmt = S.getAssociatedStmt();
4275 
4276  auto FiniCB = [this](InsertPointTy IP) {
4277  OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP);
4278  };
4279 
4280  auto BodyGenCB = [MasterRegionBodyStmt, this](InsertPointTy AllocaIP,
4281  InsertPointTy CodeGenIP) {
4282  OMPBuilderCBHelpers::EmitOMPInlinedRegionBody(
4283  *this, MasterRegionBodyStmt, AllocaIP, CodeGenIP, "master");
4284  };
4285 
4286  LexicalScope Scope(*this, S.getSourceRange());
4287  EmitStopPoint(&S);
4288  Builder.restoreIP(OMPBuilder.createMaster(Builder, BodyGenCB, FiniCB));
4289 
4290  return;
4291  }
4292  LexicalScope Scope(*this, S.getSourceRange());
4293  EmitStopPoint(&S);
4294  emitMaster(*this, S);
4295 }
4296 
4298  auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4299  Action.Enter(CGF);
4300  CGF.EmitStmt(S.getRawStmt());
4301  };
4302  Expr *Filter = nullptr;
4303  if (const auto *FilterClause = S.getSingleClause<OMPFilterClause>())
4304  Filter = FilterClause->getThreadID();
4305  CGF.CGM.getOpenMPRuntime().emitMaskedRegion(CGF, CodeGen, S.getBeginLoc(),
4306  Filter);
4307 }
4308 
4310  if (CGM.getLangOpts().OpenMPIRBuilder) {
4311  llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
4312  using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
4313 
4314  const Stmt *MaskedRegionBodyStmt = S.getAssociatedStmt();
4315  const Expr *Filter = nullptr;
4316  if (const auto *FilterClause = S.getSingleClause<OMPFilterClause>())
4317  Filter = FilterClause->getThreadID();
4318  llvm::Value *FilterVal = Filter
4319  ? EmitScalarExpr(Filter, CGM.Int32Ty)
4320  : llvm::ConstantInt::get(CGM.Int32Ty, /*V=*/0);
4321 
4322  auto FiniCB = [this](InsertPointTy IP) {
4323  OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP);
4324  };
4325 
4326  auto BodyGenCB = [MaskedRegionBodyStmt, this](InsertPointTy AllocaIP,
4327  InsertPointTy CodeGenIP) {
4328  OMPBuilderCBHelpers::EmitOMPInlinedRegionBody(
4329  *this, MaskedRegionBodyStmt, AllocaIP, CodeGenIP, "masked");
4330  };
4331 
4332  LexicalScope Scope(*this, S.getSourceRange());
4333  EmitStopPoint(&S);
4334  Builder.restoreIP(
4335  OMPBuilder.createMasked(Builder, BodyGenCB, FiniCB, FilterVal));
4336 
4337  return;
4338  }
4339  LexicalScope Scope(*this, S.getSourceRange());
4340  EmitStopPoint(&S);
4341  emitMasked(*this, S);
4342 }
4343 
4345  if (CGM.getLangOpts().OpenMPIRBuilder) {
4346  llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
4347  using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
4348 
4349  const Stmt *CriticalRegionBodyStmt = S.getAssociatedStmt();
4350  const Expr *Hint = nullptr;
4351  if (const auto *HintClause = S.getSingleClause<OMPHintClause>())
4352  Hint = HintClause->getHint();
4353 
4354  // TODO: This is slightly different from what's currently being done in
4355  // clang. Fix the Int32Ty to IntPtrTy (pointer width size) when everything
4356  // about typing is final.
4357  llvm::Value *HintInst = nullptr;
4358  if (Hint)
4359  HintInst =
4360  Builder.CreateIntCast(EmitScalarExpr(Hint), CGM.Int32Ty, false);
4361 
4362  auto FiniCB = [this](InsertPointTy IP) {
4363  OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP);
4364  };
4365 
4366  auto BodyGenCB = [CriticalRegionBodyStmt, this](InsertPointTy AllocaIP,
4367  InsertPointTy CodeGenIP) {
4368  OMPBuilderCBHelpers::EmitOMPInlinedRegionBody(
4369  *this, CriticalRegionBodyStmt, AllocaIP, CodeGenIP, "critical");
4370  };
4371 
4372  LexicalScope Scope(*this, S.getSourceRange());
4373  EmitStopPoint(&S);
4374  Builder.restoreIP(OMPBuilder.createCritical(
4375  Builder, BodyGenCB, FiniCB, S.getDirectiveName().getAsString(),
4376  HintInst));
4377 
4378  return;
4379  }
4380 
4381  auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4382  Action.Enter(CGF);
4383  CGF.EmitStmt(S.getAssociatedStmt());
4384  };
4385  const Expr *Hint = nullptr;
4386  if (const auto *HintClause = S.getSingleClause<OMPHintClause>())
4387  Hint = HintClause->getHint();
4388  LexicalScope Scope(*this, S.getSourceRange());
4389  EmitStopPoint(&S);
4390  CGM.getOpenMPRuntime().emitCriticalRegion(*this,
4391  S.getDirectiveName().getAsString(),
4392  CodeGen, S.getBeginLoc(), Hint);
4393 }
4394 
4396  const OMPParallelForDirective &S) {
4397  // Emit directive as a combined directive that consists of two implicit
4398  // directives: 'parallel' with 'for' directive.
4399  auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4400  Action.Enter(CGF);
4401  emitOMPCopyinClause(CGF, S);
4402  (void)emitWorksharingDirective(CGF, S, S.hasCancel());
4403  };
4404  {
4405  const auto &&NumIteratorsGen = [&S](CodeGenFunction &CGF) {
4408  CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGSI);
4409  OMPLoopScope LoopScope(CGF, S);
4410  return CGF.EmitScalarExpr(S.getNumIterations());
4411  };
4412  bool IsInscan = llvm::any_of(S.getClausesOfKind<OMPReductionClause>(),
4413  [](const OMPReductionClause *C) {
4414  return C->getModifier() == OMPC_REDUCTION_inscan;
4415  });
4416  if (IsInscan)
4417  emitScanBasedDirectiveDecls(*this, S, NumIteratorsGen);
4418  auto LPCRegion =
4420  emitCommonOMPParallelDirective(*this, S, OMPD_for, CodeGen,
4422  if (IsInscan)
4423  emitScanBasedDirectiveFinals(*this, S, NumIteratorsGen);
4424  }
4425  // Check for outer lastprivate conditional update.
4427 }
4428 
4430  const OMPParallelForSimdDirective &S) {
4431  // Emit directive as a combined directive that consists of two implicit
4432  // directives: 'parallel' with 'for' directive.
4433  auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4434  Action.Enter(CGF);
4435  emitOMPCopyinClause(CGF, S);
4436  (void)emitWorksharingDirective(CGF, S, /*HasCancel=*/false);
4437  };
4438  {
4439  const auto &&NumIteratorsGen = [&S](CodeGenFunction &CGF) {
4442  CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGSI);
4443  OMPLoopScope LoopScope(CGF, S);
4444  return CGF.EmitScalarExpr(S.getNumIterations());
4445  };
4446  bool IsInscan = llvm::any_of(S.getClausesOfKind<OMPReductionClause>(),
4447  [](const OMPReductionClause *C) {
4448  return C->getModifier() == OMPC_REDUCTION_inscan;
4449  });
4450  if (IsInscan)
4451  emitScanBasedDirectiveDecls(*this, S, NumIteratorsGen);
4452  auto LPCRegion =
4454  emitCommonOMPParallelDirective(*this, S, OMPD_for_simd, CodeGen,
4456  if (IsInscan)
4457  emitScanBasedDirectiveFinals(*this, S, NumIteratorsGen);
4458  }
4459  // Check for outer lastprivate conditional update.
4461 }
4462 
4464  const OMPParallelMasterDirective &S) {
4465  // Emit directive as a combined directive that consists of two implicit
4466  // directives: 'parallel' with 'master' directive.
4467  auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4468  Action.Enter(CGF);
4469  OMPPrivateScope PrivateScope(CGF);
4470  emitOMPCopyinClause(CGF, S);
4471  (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope);
4472  CGF.EmitOMPPrivateClause(S, PrivateScope);
4473  CGF.EmitOMPReductionClauseInit(S, PrivateScope);
4474  (void)PrivateScope.Privatize();
4475  emitMaster(CGF, S);
4476  CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel);
4477  };
4478  {
4479  auto LPCRegion =
4481  emitCommonOMPParallelDirective(*this, S, OMPD_master, CodeGen,
4484  [](CodeGenFunction &) { return nullptr; });
4485  }
4486  // Check for outer lastprivate conditional update.
4488 }
4489 
4491  const OMPParallelSectionsDirective &S) {
4492  // Emit directive as a combined directive that consists of two implicit
4493  // directives: 'parallel' with 'sections' directive.
4494  auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4495  Action.Enter(CGF);
4496  emitOMPCopyinClause(CGF, S);
4497  CGF.EmitSections(S);
4498  };
4499  {
4500  auto LPCRegion =
4502  emitCommonOMPParallelDirective(*this, S, OMPD_sections, CodeGen,
4504  }
4505  // Check for outer lastprivate conditional update.
4507 }
4508 
4509 namespace {
4510 /// Get the list of variables declared in the context of the untied tasks.
4511 class CheckVarsEscapingUntiedTaskDeclContext final
4512  : public ConstStmtVisitor<CheckVarsEscapingUntiedTaskDeclContext> {
4514 
4515 public:
4516  explicit CheckVarsEscapingUntiedTaskDeclContext() = default;
4517  virtual ~CheckVarsEscapingUntiedTaskDeclContext() = default;
4518  void VisitDeclStmt(const DeclStmt *S) {
4519  if (!S)
4520  return;
4521  // Need to privatize only local vars, static locals can be processed as is.
4522  for (const Decl *D : S->decls()) {
4523  if (const auto *VD = dyn_cast_or_null<VarDecl>(D))
4524  if (VD->hasLocalStorage())
4525  PrivateDecls.push_back(VD);
4526  }
4527  }
4528  void VisitOMPExecutableDirective(const OMPExecutableDirective *) {}
4529  void VisitCapturedStmt(const CapturedStmt *) {}
4530  void VisitLambdaExpr(const LambdaExpr *) {}
4531  void VisitBlockExpr(const BlockExpr *) {}
4532  void VisitStmt(const Stmt *S) {
4533  if (!S)
4534  return;
4535  for (const Stmt *Child : S->children())
4536  if (Child)
4537  Visit(Child);
4538  }
4539 
4540  /// Swaps list of vars with the provided one.
4541  ArrayRef<const VarDecl *> getPrivateDecls() const { return PrivateDecls; }
4542 };
4543 } // anonymous namespace
4544 
4546  OMPTaskDataTy &Data) {
4547 
4548  // First look for 'omp_all_memory' and add this first.
4549  bool OmpAllMemory = false;
4550  if (llvm::any_of(
4551  S.getClausesOfKind<OMPDependClause>(), [](const OMPDependClause *C) {
4552  return C->getDependencyKind() == OMPC_DEPEND_outallmemory ||
4553  C->getDependencyKind() == OMPC_DEPEND_inoutallmemory;
4554  })) {
4555  OmpAllMemory = true;
4556  // Since both OMPC_DEPEND_outallmemory and OMPC_DEPEND_inoutallmemory are
4557  // equivalent to the runtime, always use OMPC_DEPEND_outallmemory to
4558  // simplify.
4560  Data.Dependences.emplace_back(OMPC_DEPEND_outallmemory,
4561  /*IteratorExpr=*/nullptr);
4562  // Add a nullptr Expr to simplify the codegen in emitDependData.
4563  DD.DepExprs.push_back(nullptr);
4564  }
4565  // Add remaining dependences skipping any 'out' or 'inout' if they are
4566  // overridden by 'omp_all_memory'.
4567  for (const auto *C : S.getClausesOfKind<OMPDependClause>()) {
4568  OpenMPDependClauseKind Kind = C->getDependencyKind();
4569  if (Kind == OMPC_DEPEND_outallmemory || Kind == OMPC_DEPEND_inoutallmemory)
4570  continue;
4571  if (OmpAllMemory && (Kind == OMPC_DEPEND_out || Kind == OMPC_DEPEND_inout))
4572  continue;
4574  Data.Dependences.emplace_back(C->getDependencyKind(), C->getModifier());
4575  DD.DepExprs.append(C->varlist_begin(), C->varlist_end());
4576  }
4577 }
4578 
4580  const OMPExecutableDirective &S, const OpenMPDirectiveKind CapturedRegion,
4581  const RegionCodeGenTy &BodyGen, const TaskGenTy &TaskGen,
4582  OMPTaskDataTy &Data) {
4583  // Emit outlined function for task construct.
4584  const CapturedStmt *CS = S.getCapturedStmt(CapturedRegion);
4585  auto I = CS->getCapturedDecl()->param_begin();
4586  auto PartId = std::next(I);
4587  auto TaskT = std::next(I, 4);
4588  // Check if the task is final
4589  if (const auto *Clause = S.getSingleClause<OMPFinalClause>()) {
4590  // If the condition constant folds and can be elided, try to avoid emitting
4591  // the condition and the dead arm of the if/else.
4592  const Expr *Cond = Clause->getCondition();
4593  bool CondConstant;
4594  if (ConstantFoldsToSimpleInteger(Cond, CondConstant))
4595  Data.Final.setInt(CondConstant);
4596  else
4597  Data.Final.setPointer(EvaluateExprAsBool(Cond));
4598  } else {
4599  // By default the task is not final.
4600  Data.Final.setInt(/*IntVal=*/false);
4601  }
4602  // Check if the task has 'priority' clause.
4603  if (const auto *Clause = S.getSingleClause<OMPPriorityClause>()) {
4604  const Expr *Prio = Clause->getPriority();
4605  Data.Priority.setInt(/*IntVal=*/true);
4606  Data.Priority.setPointer(EmitScalarConversion(
4607  EmitScalarExpr(Prio), Prio->getType(),
4608  getContext().getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1),
4609  Prio->getExprLoc()));
4610  }
4611  // The first function argument for tasks is a thread id, the second one is a
4612  // part id (0 for tied tasks, >=0 for untied task).
4613  llvm::DenseSet<const VarDecl *> EmittedAsPrivate;
4614  // Get list of private variables.
4615  for (const auto *C : S.getClausesOfKind<OMPPrivateClause>()) {
4616  auto IRef = C->varlist_begin();
4617  for (const Expr *IInit : C->private_copies()) {
4618  const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
4619  if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
4620  Data.PrivateVars.push_back(*IRef);
4621  Data.PrivateCopies.push_back(IInit);
4622  }
4623  ++IRef;
4624  }
4625  }
4626  EmittedAsPrivate.clear();
4627  // Get list of firstprivate variables.
4628  for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) {
4629  auto IRef = C->varlist_begin();
4630  auto IElemInitRef = C->inits().begin();
4631  for (const Expr *IInit : C->private_copies()) {
4632  const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
4633  if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
4634  Data.FirstprivateVars.push_back(*IRef);
4635  Data.FirstprivateCopies.push_back(IInit);
4636  Data.FirstprivateInits.push_back(*IElemInitRef);
4637  }
4638  ++IRef;
4639  ++IElemInitRef;
4640  }
4641  }
4642  // Get list of lastprivate variables (for taskloops).
4643  llvm::MapVector<const VarDecl *, const DeclRefExpr *> LastprivateDstsOrigs;
4644  for (const auto *C : S.getClausesOfKind<OMPLastprivateClause>()) {
4645  auto IRef = C->varlist_begin();
4646  auto ID = C->destination_exprs().begin();
4647  for (const Expr *IInit : C->private_copies()) {
4648  const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
4649  if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
4650  Data.LastprivateVars.push_back(*IRef);
4651  Data.LastprivateCopies.push_back(IInit);
4652  }
4653  LastprivateDstsOrigs.insert(
4654  std::make_pair(cast<VarDecl>(cast<DeclRefExpr>(*ID)->getDecl()),
4655  cast<DeclRefExpr>(*IRef)));
4656  ++IRef;
4657  ++ID;
4658  }
4659  }
4662  for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) {
4663  Data.ReductionVars.append(C->varlist_begin(), C->varlist_end());
4664  Data.ReductionOrigs.append(C->varlist_begin(), C->varlist_end());
4665  Data.ReductionCopies.append(C->privates().begin(), C->privates().end());
4666  Data.ReductionOps.append(C->reduction_ops().begin(),
4667  C->reduction_ops().end());
4668  LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
4669  RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
4670  }
4671  Data.Reductions = CGM.getOpenMPRuntime().emitTaskReductionInit(
4672  *this, S.getBeginLoc(), LHSs, RHSs, Data);
4673  // Build list of dependences.
4674  buildDependences(S, Data);
4675  // Get list of local vars for untied tasks.
4676  if (!Data.Tied) {
4677  CheckVarsEscapingUntiedTaskDeclContext Checker;
4678  Checker.Visit(S.getInnermostCapturedStmt()->getCapturedStmt());
4679  Data.PrivateLocals.append(Checker.getPrivateDecls().begin(),
4680  Checker.getPrivateDecls().end());
4681  }
4682  auto &&CodeGen = [&Data, &S, CS, &BodyGen, &LastprivateDstsOrigs,
4683  CapturedRegion](CodeGenFunction &CGF,
4684  PrePostActionTy &Action) {
4685  llvm::MapVector<CanonicalDeclPtr<const VarDecl>,
4686  std::pair<Address, Address>>
4687  UntiedLocalVars;
4688  // Set proper addresses for generated private copies.
4689  OMPPrivateScope Scope(CGF);
4690  // Generate debug info for variables present in shared clause.
4691  if (auto *DI = CGF.getDebugInfo()) {
4692  llvm::SmallDenseMap<const VarDecl *, FieldDecl *> CaptureFields =
4693  CGF.CapturedStmtInfo->getCaptureFields();
4694  llvm::Value *ContextValue = CGF.CapturedStmtInfo->getContextValue();
4695  if (CaptureFields.size() && ContextValue) {
4696  unsigned CharWidth = CGF.getContext().getCharWidth();
4697  // The shared variables are packed together as members of structure.
4698  // So the address of each shared variable can be computed by adding
4699  // offset of it (within record) to the base address of record. For each
4700  // shared variable, debug intrinsic llvm.dbg.declare is generated with
4701  // appropriate expressions (DIExpression).
4702  // Ex:
4703  // %12 = load %struct.anon*, %struct.anon** %__context.addr.i
4704  // call void @llvm.dbg.declare(metadata %struct.anon* %12,
4705  // metadata !svar1,
4706  // metadata !DIExpression(DW_OP_deref))
4707  // call void @llvm.dbg.declare(metadata %struct.anon* %12,
4708  // metadata !svar2,
4709  // metadata !DIExpression(DW_OP_plus_uconst, 8, DW_OP_deref))
4710  for (auto It = CaptureFields.begin(); It != CaptureFields.end(); ++It) {
4711  const VarDecl *SharedVar = It->first;
4712  RecordDecl *CaptureRecord = It->second->getParent();
4713  const ASTRecordLayout &Layout =
4714  CGF.getContext().getASTRecordLayout(CaptureRecord);
4715  unsigned Offset =
4716  Layout.getFieldOffset(It->second->getFieldIndex()) / CharWidth;
4717  if (CGF.CGM.getCodeGenOpts().hasReducedDebugInfo())
4718  (void)DI->EmitDeclareOfAutoVariable(SharedVar, ContextValue,
4719  CGF.Builder, false);
4720  llvm::Instruction &Last = CGF.Builder.GetInsertBlock()->back();
4721  // Get the call dbg.declare instruction we just created and update
4722  // its DIExpression to add offset to base address.
4723  if (auto DDI = dyn_cast<llvm::DbgVariableIntrinsic>(&Last)) {
4725  // Add offset to the base address if non zero.
4726  if (Offset) {
4727  Ops.push_back(llvm::dwarf::DW_OP_plus_uconst);
4728  Ops.push_back(Offset);
4729  }
4730  Ops.push_back(llvm::dwarf::DW_OP_deref);
4731  auto &Ctx = DDI->getContext();
4732  llvm::DIExpression *DIExpr = llvm::DIExpression::get(Ctx, Ops);
4733  Last.setOperand(2, llvm::MetadataAsValue::get(Ctx, DIExpr));
4734  }
4735  }
4736  }
4737  }
4739  if (!Data.PrivateVars.empty() || !Data.FirstprivateVars.empty() ||
4740  !Data.LastprivateVars.empty() || !Data.PrivateLocals.empty()) {
4741  enum { PrivatesParam = 2, CopyFnParam = 3 };
4742  llvm::Value *CopyFn = CGF.Builder.CreateLoad(
4743  CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(CopyFnParam)));
4744  llvm::Value *PrivatesPtr = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(
4745  CS->getCapturedDecl()->getParam(PrivatesParam)));
4746  // Map privates.
4750  CallArgs.push_back(PrivatesPtr);
4751  ParamTypes.push_back(PrivatesPtr->getType());
4752  for (const Expr *E : Data.PrivateVars) {
4753  const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
4754  Address PrivatePtr = CGF.CreateMemTemp(
4755  CGF.getContext().getPointerType(E->getType()), ".priv.ptr.addr");
4756  PrivatePtrs.emplace_back(VD, PrivatePtr);
4757  CallArgs.push_back(PrivatePtr.getPointer());
4758  ParamTypes.push_back(PrivatePtr.getType());
4759  }
4760  for (const Expr *E : Data.FirstprivateVars) {
4761  const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
4762  Address PrivatePtr =
4763  CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()),
4764  ".firstpriv.ptr.addr");
4765  PrivatePtrs.emplace_back(VD, PrivatePtr);
4766  FirstprivatePtrs.emplace_back(VD, PrivatePtr);
4767  CallArgs.push_back(PrivatePtr.getPointer());
4768  ParamTypes.push_back(PrivatePtr.getType());
4769  }
4770  for (const Expr *E : Data.LastprivateVars) {
4771  const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
4772  Address PrivatePtr =
4773  CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()),
4774  ".lastpriv.ptr.addr");
4775  PrivatePtrs.emplace_back(VD, PrivatePtr);
4776  CallArgs.push_back(PrivatePtr.getPointer());
4777  ParamTypes.push_back(PrivatePtr.getType());
4778  }
4779  for (const VarDecl *VD : Data.PrivateLocals) {
4780  QualType Ty = VD->getType().getNonReferenceType();
4781  if (VD->getType()->isLValueReferenceType())
4782  Ty = CGF.getContext().getPointerType(Ty);
4783  if (isAllocatableDecl(VD))
4784  Ty = CGF.getContext().getPointerType(Ty);
4785  Address PrivatePtr = CGF.CreateMemTemp(
4786  CGF.getContext().getPointerType(Ty), ".local.ptr.addr");
4787  auto Result = UntiedLocalVars.insert(
4788  std::make_pair(VD, std::make_pair(PrivatePtr, Address::invalid())));
4789  // If key exists update in place.
4790  if (Result.second == false)
4791  *Result.first = std::make_pair(
4792  VD, std::make_pair(PrivatePtr, Address::invalid()));
4793  CallArgs.push_back(PrivatePtr.getPointer());
4794  ParamTypes.push_back(PrivatePtr.getType());
4795  }
4796  auto *CopyFnTy = llvm::FunctionType::get(CGF.Builder.getVoidTy(),
4797  ParamTypes, /*isVarArg=*/false);
4798  CopyFn = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4799  CopyFn, CopyFnTy->getPointerTo());
4800  CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
4801  CGF, S.getBeginLoc(), {CopyFnTy, CopyFn}, CallArgs);
4802  for (const auto &Pair : LastprivateDstsOrigs) {
4803  const auto *OrigVD = cast<VarDecl>(Pair.second->getDecl());
4804  DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(OrigVD),
4805  /*RefersToEnclosingVariableOrCapture=*/
4806  CGF.CapturedStmtInfo->lookup(OrigVD) != nullptr,
4807  Pair.second->getType(), VK_LValue,
4808  Pair.second->getExprLoc());
4809  Scope.addPrivate(Pair.first, CGF.EmitLValue(&DRE).getAddress(CGF));
4810  }
4811  for (const auto &Pair : PrivatePtrs) {
4812  Address Replacement = Address(
4813  CGF.Builder.CreateLoad(Pair.second),
4814  CGF.ConvertTypeForMem(Pair.first->getType().getNonReferenceType()),
4815  CGF.getContext().getDeclAlign(Pair.first));
4816  Scope.addPrivate(Pair.first, Replacement);
4817  if (auto *DI = CGF.getDebugInfo())
4818  if (CGF.CGM.getCodeGenOpts().hasReducedDebugInfo())
4819  (void)DI->EmitDeclareOfAutoVariable(
4820  Pair.first, Pair.second.getPointer(), CGF.Builder,
4821  /*UsePointerValue*/ true);
4822  }
4823  // Adjust mapping for internal locals by mapping actual memory instead of
4824  // a pointer to this memory.
4825  for (auto &Pair : UntiedLocalVars) {
4826  QualType VDType = Pair.first->getType().getNonReferenceType();
4827  if (isAllocatableDecl(Pair.first)) {
4828  llvm::Value *Ptr = CGF.Builder.CreateLoad(Pair.second.first);
4829  Address Replacement(
4830  Ptr,
4831  CGF.ConvertTypeForMem(CGF.getContext().getPointerType(VDType)),
4832  CGF.getPointerAlign());
4833  Pair.second.first = Replacement;
4834  Ptr = CGF.Builder.CreateLoad(Replacement);
4835  Replacement = Address(Ptr, CGF.ConvertTypeForMem(VDType),
4836  CGF.getContext().getDeclAlign(Pair.first));
4837  Pair.second.second = Replacement;
4838  } else {
4839  llvm::Value *Ptr = CGF.Builder.CreateLoad(Pair.second.first);
4840  Address Replacement(Ptr, CGF.ConvertTypeForMem(VDType),
4841  CGF.getContext().getDeclAlign(Pair.first));
4842  Pair.second.first = Replacement;
4843  }
4844  }
4845  }
4846  if (Data.Reductions) {
4847  OMPPrivateScope FirstprivateScope(CGF);
4848  for (const auto &Pair : FirstprivatePtrs) {
4849  Address Replacement(
4850  CGF.Builder.CreateLoad(Pair.second),
4851  CGF.ConvertTypeForMem(Pair.first->getType().getNonReferenceType()),
4852  CGF.getContext().getDeclAlign(Pair.first));
4853  FirstprivateScope.addPrivate(Pair.first, Replacement);
4854  }
4855  (void)FirstprivateScope.Privatize();
4856  OMPLexicalScope LexScope(CGF, S, CapturedRegion);
4857  ReductionCodeGen RedCG(Data.ReductionVars, Data.ReductionVars,
4858  Data.ReductionCopies, Data.ReductionOps);
4859  llvm::Value *ReductionsPtr = CGF.Builder.CreateLoad(
4860  CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(9)));
4861  for (unsigned Cnt = 0, E = Data.ReductionVars.size(); Cnt < E; ++Cnt) {
4862  RedCG.emitSharedOrigLValue(CGF, Cnt);
4863  RedCG.emitAggregateType(CGF, Cnt);
4864  // FIXME: This must removed once the runtime library is fixed.
4865  // Emit required threadprivate variables for
4866  // initializer/combiner/finalizer.
4867  CGF.CGM.getOpenMPRuntime().emitTaskReductionFixups(CGF, S.getBeginLoc(),
4868  RedCG, Cnt);
4869  Address Replacement = CGF.CGM.getOpenMPRuntime().getTaskReductionItem(
4870  CGF, S.getBeginLoc(), ReductionsPtr, RedCG.getSharedLValue(Cnt));
4871  Replacement =
4872  Address(CGF.EmitScalarConversion(
4873  Replacement.getPointer(), CGF.getContext().VoidPtrTy,
4874  CGF.getContext().getPointerType(
4875  Data.ReductionCopies[Cnt]->getType()),
4876  Data.ReductionCopies[Cnt]->getExprLoc()),
4877  CGF.ConvertTypeForMem(Data.ReductionCopies[Cnt]->getType()),
4878  Replacement.getAlignment());
4879  Replacement = RedCG.adjustPrivateAddress(CGF, Cnt, Replacement);
4880  Scope.addPrivate(RedCG.getBaseDecl(Cnt), Replacement);
4881  }
4882  }
4883  // Privatize all private variables except for in_reduction items.
4884  (void)Scope.Privatize();
4885  SmallVector<const Expr *, 4> InRedVars;
4886  SmallVector<const Expr *, 4> InRedPrivs;
4888  SmallVector<const Expr *, 4> TaskgroupDescriptors;
4889  for (const auto *C : S.getClausesOfKind<OMPInReductionClause>()) {
4890  auto IPriv = C->privates().begin();
4891  auto IRed = C->reduction_ops().begin();
4892  auto ITD = C->taskgroup_descriptors().begin();
4893  for (const Expr *Ref : C->varlists()) {
4894  InRedVars.emplace_back(Ref);
4895  InRedPrivs.emplace_back(*IPriv);
4896  InRedOps.emplace_back(*IRed);
4897  TaskgroupDescriptors.emplace_back(*ITD);
4898  std::advance(IPriv, 1);
4899  std::advance(IRed, 1);
4900  std::advance(ITD, 1);
4901  }
4902  }
4903  // Privatize in_reduction items here, because taskgroup descriptors must be
4904  // privatized earlier.
4905  OMPPrivateScope InRedScope(CGF);
4906  if (!InRedVars.empty()) {
4907  ReductionCodeGen RedCG(InRedVars, InRedVars, InRedPrivs, InRedOps);
4908  for (unsigned Cnt = 0, E = InRedVars.size(); Cnt < E; ++Cnt) {
4909  RedCG.emitSharedOrigLValue(CGF, Cnt);
4910  RedCG.emitAggregateType(CGF, Cnt);
4911  // The taskgroup descriptor variable is always implicit firstprivate and
4912  // privatized already during processing of the firstprivates.
4913  // FIXME: This must removed once the runtime library is fixed.
4914  // Emit required threadprivate variables for
4915  // initializer/combiner/finalizer.
4916  CGF.CGM.getOpenMPRuntime().emitTaskReductionFixups(CGF, S.getBeginLoc(),
4917  RedCG, Cnt);
4918  llvm::Value *ReductionsPtr;
4919  if (const Expr *TRExpr = TaskgroupDescriptors[Cnt]) {
4920  ReductionsPtr = CGF.EmitLoadOfScalar(CGF.EmitLValue(TRExpr),
4921  TRExpr->getExprLoc());
4922  } else {
4923  ReductionsPtr = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
4924  }
4925  Address Replacement = CGF.CGM.getOpenMPRuntime().getTaskReductionItem(
4926  CGF, S.getBeginLoc(), ReductionsPtr, RedCG.getSharedLValue(Cnt));
4927  Replacement = Address(
4928  CGF.EmitScalarConversion(
4929  Replacement.getPointer(), CGF.getContext().VoidPtrTy,
4930  CGF.getContext().getPointerType(InRedPrivs[Cnt]->getType()),
4931  InRedPrivs[Cnt]->getExprLoc()),
4932  CGF.ConvertTypeForMem(InRedPrivs[Cnt]->getType()),
4933  Replacement.getAlignment());
4934  Replacement = RedCG.adjustPrivateAddress(CGF, Cnt, Replacement);
4935  InRedScope.addPrivate(RedCG.getBaseDecl(Cnt), Replacement);
4936  }
4937  }
4938  (void)InRedScope.Privatize();
4939 
4940  CGOpenMPRuntime::UntiedTaskLocalDeclsRAII LocalVarsScope(CGF,
4941  UntiedLocalVars);
4942  Action.Enter(CGF);
4943  BodyGen(CGF);
4944  };
4945  llvm::Function *OutlinedFn = CGM.getOpenMPRuntime().emitTaskOutlinedFunction(
4946  S, *I, *PartId, *TaskT, S.getDirectiveKind(), CodeGen, Data.Tied,
4947  Data.NumberOfParts);
4948  OMPLexicalScope Scope(*this, S, llvm::None,
4949  !isOpenMPParallelDirective(S.getDirectiveKind()) &&
4950  !isOpenMPSimdDirective(S.getDirectiveKind()));
4951  TaskGen(*this, OutlinedFn, Data);
4952 }
4953 
4954 static ImplicitParamDecl *
4956  QualType Ty, CapturedDecl *CD,
4957  SourceLocation Loc) {
4958  auto *OrigVD = ImplicitParamDecl::Create(C, CD, Loc, /*Id=*/nullptr, Ty,
4960  auto *OrigRef = DeclRefExpr::Create(
4961  C, NestedNameSpecifierLoc(), SourceLocation(), OrigVD,
4962  /*RefersToEnclosingVariableOrCapture=*/false, Loc, Ty, VK_LValue);
4963  auto *PrivateVD = ImplicitParamDecl::Create(C, CD, Loc, /*Id=*/nullptr, Ty,
4965  auto *PrivateRef = DeclRefExpr::Create(
4966  C, NestedNameSpecifierLoc(), SourceLocation(), PrivateVD,
4967  /*RefersToEnclosingVariableOrCapture=*/false, Loc, Ty, VK_LValue);
4968  QualType ElemType = C.getBaseElementType(Ty);
4969  auto *InitVD = ImplicitParamDecl::Create(C, CD, Loc, /*Id=*/nullptr, ElemType,
4971  auto *InitRef = DeclRefExpr::Create(
4972  C, NestedNameSpecifierLoc(), SourceLocation(), InitVD,
4973  /*RefersToEnclosingVariableOrCapture=*/false, Loc, ElemType, VK_LValue);
4974  PrivateVD->setInitStyle(VarDecl::CInit);
4975  PrivateVD->setInit(ImplicitCastExpr::Create(C, ElemType, CK_LValueToRValue,
4976  InitRef, /*BasePath=*/nullptr,
4978  Data.FirstprivateVars.emplace_back(OrigRef);
4979  Data.FirstprivateCopies.emplace_back(PrivateRef);
4980  Data.FirstprivateInits.emplace_back(InitRef);
4981  return OrigVD;
4982 }
4983 
4985  const OMPExecutableDirective &S, const RegionCodeGenTy &BodyGen,
4986  OMPTargetDataInfo &InputInfo) {
4987  // Emit outlined function for task construct.
4988  const CapturedStmt *CS = S.getCapturedStmt(OMPD_task);
4989  Address CapturedStruct = GenerateCapturedStmtArgument(*CS);
4990  QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl());
4991  auto I = CS->getCapturedDecl()->param_begin();
4992  auto PartId = std::next(I);
4993  auto TaskT = std::next(I, 4);
4994  OMPTaskDataTy Data;
4995  // The task is not final.
4996  Data.Final.setInt(/*IntVal=*/false);
4997  // Get list of firstprivate variables.
4998  for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) {
4999  auto IRef = C->varlist_begin();
5000  auto IElemInitRef = C->inits().begin();
5001  for (auto *IInit : C->private_copies()) {
5002  Data.FirstprivateVars.push_back(*IRef);
5003  Data.FirstprivateCopies.push_back(IInit);
5004  Data.FirstprivateInits.push_back(*IElemInitRef);
5005  ++IRef;
5006  ++IElemInitRef;
5007  }
5008  }
5011  for (const auto *C : S.getClausesOfKind<OMPInReductionClause>()) {
5012  Data.ReductionVars.append(C->varlist_begin(), C->varlist_end());
5013  Data.ReductionOrigs.append(C->varlist_begin(), C->varlist_end());
5014  Data.ReductionCopies.append(C->privates().begin(), C->privates().end());
5015  Data.ReductionOps.append(C->reduction_ops().begin(),
5016  C->reduction_ops().end());
5017  LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
5018  RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
5019  }
5020  OMPPrivateScope TargetScope(*this);
5021  VarDecl *BPVD = nullptr;
5022  VarDecl *PVD = nullptr;
5023  VarDecl *SVD = nullptr;
5024  VarDecl *MVD = nullptr;
5025  if (InputInfo.NumberOfTargetItems > 0) {
5026  auto *CD = CapturedDecl::Create(
5027  getContext(), getContext().getTranslationUnitDecl(), /*NumParams=*/0);
5028  llvm::APInt ArrSize(/*numBits=*/32, InputInfo.NumberOfTargetItems);
5029  QualType BaseAndPointerAndMapperType = getContext().getConstantArrayType(
5030  getContext().VoidPtrTy, ArrSize, nullptr, ArrayType::Normal,
5031  /*IndexTypeQuals=*/0);
5033  getContext(), Data, BaseAndPointerAndMapperType, CD, S.getBeginLoc());
5035  getContext(), Data, BaseAndPointerAndMapperType, CD, S.getBeginLoc());
5036  QualType SizesType = getContext().getConstantArrayType(
5037  getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1),
5038  ArrSize, nullptr, ArrayType::Normal,
5039  /*IndexTypeQuals=*/0);
5040  SVD = createImplicitFirstprivateForType(getContext(), Data, SizesType, CD,
5041  S.getBeginLoc());
5042  TargetScope.addPrivate(BPVD, InputInfo.BasePointersArray);
5043  TargetScope.addPrivate(PVD, InputInfo.PointersArray);
5044  TargetScope.addPrivate(SVD, InputInfo.SizesArray);
5045  // If there is no user-defined mapper, the mapper array will be nullptr. In
5046  // this case, we don't need to privatize it.
5047  if (!isa_and_nonnull<llvm::ConstantPointerNull>(
5048  InputInfo.MappersArray.getPointer())) {
5050  getContext(), Data, BaseAndPointerAndMapperType, CD, S.getBeginLoc());
5051  TargetScope.addPrivate(MVD, InputInfo.MappersArray);
5052  }
5053  }
5054  (void)TargetScope.Privatize();
5055  buildDependences(S, Data);
5056  auto &&CodeGen = [&Data, &S, CS, &BodyGen, BPVD, PVD, SVD, MVD,
5057  &InputInfo](CodeGenFunction &CGF, PrePostActionTy &Action) {
5058  // Set proper addresses for generated private copies.
5059  OMPPrivateScope Scope(CGF);
5060  if (!Data.FirstprivateVars.empty()) {
5061  enum { PrivatesParam = 2, CopyFnParam = 3 };
5062  llvm::Value *CopyFn = CGF.Builder.CreateLoad(
5063  CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(CopyFnParam)));
5064  llvm::Value *PrivatesPtr = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(
5065  CS->getCapturedDecl()->getParam(PrivatesParam)));
5066  // Map privates.
5070  CallArgs.push_back(PrivatesPtr);
5071  ParamTypes.push_back(PrivatesPtr->getType());
5072  for (const Expr *E : Data.FirstprivateVars) {
5073  const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
5074  Address PrivatePtr =
5075  CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()),
5076  ".firstpriv.ptr.addr");
5077  PrivatePtrs.emplace_back(VD, PrivatePtr);
5078  CallArgs.push_back(PrivatePtr.getPointer());
5079  ParamTypes.push_back(PrivatePtr.getType());
5080  }
5081  auto *CopyFnTy = llvm::FunctionType::get(CGF.Builder.getVoidTy(),
5082  ParamTypes, /*isVarArg=*/false);
5083  CopyFn = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
5084  CopyFn, CopyFnTy->getPointerTo());
5085  CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
5086  CGF, S.getBeginLoc(), {CopyFnTy, CopyFn}, CallArgs);
5087  for (const auto &Pair : PrivatePtrs) {
5088  Address Replacement(
5089  CGF.Builder.CreateLoad(Pair.second),
5090  CGF.ConvertTypeForMem(Pair.first->getType().getNonReferenceType()),
5091  CGF.getContext().getDeclAlign(Pair.first));
5092  Scope.addPrivate(Pair.first, Replacement);
5093  }
5094  }
5095  CGF.processInReduction(S, Data, CGF, CS, Scope);
5096  if (InputInfo.NumberOfTargetItems > 0) {
5097  InputInfo.BasePointersArray = CGF.Builder.CreateConstArrayGEP(
5098  CGF.GetAddrOfLocalVar(BPVD), /*Index=*/0);
5099  InputInfo.PointersArray = CGF.Builder.CreateConstArrayGEP(
5100  CGF.GetAddrOfLocalVar(PVD), /*Index=*/0);
5101  InputInfo.SizesArray = CGF.Builder.CreateConstArrayGEP(
5102  CGF.GetAddrOfLocalVar(SVD), /*Index=*/0);
5103  // If MVD is nullptr, the mapper array is not privatized
5104  if (MVD)
5105  InputInfo.MappersArray = CGF.Builder.CreateConstArrayGEP(
5106  CGF.GetAddrOfLocalVar(MVD), /*Index=*/0);
5107  }
5108 
5109  Action.Enter(CGF);
5110  OMPLexicalScope LexScope(CGF, S, OMPD_task, /*EmitPreInitStmt=*/false);
5111  BodyGen(CGF);
5112  };
5113  llvm::Function *OutlinedFn = CGM.getOpenMPRuntime().emitTaskOutlinedFunction(
5114  S, *I, *PartId, *TaskT, S.getDirectiveKind(), CodeGen, /*Tied=*/true,
5115  Data.NumberOfParts);
5116  llvm::APInt TrueOrFalse(32, S.hasClausesOfKind<OMPNowaitClause>() ? 1 : 0);
5117  IntegerLiteral IfCond(getContext(), TrueOrFalse,
5118  getContext().getIntTypeForBitwidth(32, /*Signed=*/0),
5119  SourceLocation());
5120  CGM.getOpenMPRuntime().emitTaskCall(*this, S.getBeginLoc(), S, OutlinedFn,
5121  SharedsTy, CapturedStruct, &IfCond, Data);
5122 }
5123 
5125  OMPTaskDataTy &Data,
5126  CodeGenFunction &CGF,
5127  const CapturedStmt *CS,
5129  if (Data.Reductions) {
5130  OpenMPDirectiveKind CapturedRegion = S.getDirectiveKind();
5131  OMPLexicalScope LexScope(CGF, S, CapturedRegion);
5132  ReductionCodeGen RedCG(Data.ReductionVars, Data.ReductionVars,
5133  Data.ReductionCopies, Data.ReductionOps);
5134  llvm::Value *ReductionsPtr = CGF.Builder.CreateLoad(
5135  CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(4)));
5136  for (unsigned Cnt = 0, E = Data.ReductionVars.size(); Cnt < E; ++Cnt) {
5137  RedCG.emitSharedOrigLValue(CGF, Cnt);
5138  RedCG.emitAggregateType(CGF, Cnt);
5139  // FIXME: This must removed once the runtime library is fixed.
5140  // Emit required threadprivate variables for
5141  // initializer/combiner/finalizer.
5142  CGF.CGM.getOpenMPRuntime().emitTaskReductionFixups(CGF, S.getBeginLoc(),
5143  RedCG, Cnt);
5144  Address Replacement = CGF.CGM.getOpenMPRuntime().getTaskReductionItem(
5145  CGF, S.getBeginLoc(), ReductionsPtr, RedCG.getSharedLValue(Cnt));
5146  Replacement =
5148  Replacement.getPointer(), CGF.getContext().VoidPtrTy,
5149  CGF.getContext().getPointerType(
5150  Data.ReductionCopies[Cnt]->getType()),
5151  Data.ReductionCopies[Cnt]->getExprLoc()),
5152  CGF.ConvertTypeForMem(Data.ReductionCopies[Cnt]->getType()),
5153  Replacement.getAlignment());
5154  Replacement = RedCG.adjustPrivateAddress(CGF, Cnt, Replacement);
5155  Scope.addPrivate(RedCG.getBaseDecl(Cnt), Replacement);
5156  }
5157  }
5158  (void)Scope.Privatize();
5159  SmallVector<const Expr *, 4> InRedVars;
5160  SmallVector<const Expr *, 4> InRedPrivs;
5162  SmallVector<const Expr *, 4> TaskgroupDescriptors;
5163  for (const auto *C : S.getClausesOfKind<OMPInReductionClause>()) {
5164  auto IPriv = C->privates().begin();
5165  auto IRed = C->reduction_ops().begin();
5166  auto ITD = C->taskgroup_descriptors().begin();
5167  for (const Expr *Ref : C->varlists()) {
5168  InRedVars.emplace_back(Ref);
5169  InRedPrivs.emplace_back(*IPriv);
5170  InRedOps.emplace_back(*IRed);
5171  TaskgroupDescriptors.emplace_back(*ITD);
5172  std::advance(IPriv, 1);
5173  std::advance(IRed, 1);
5174  std::advance(ITD, 1);
5175  }
5176  }
5177  OMPPrivateScope InRedScope(CGF);
5178  if (!InRedVars.empty()) {
5179  ReductionCodeGen RedCG(InRedVars, InRedVars, InRedPrivs, InRedOps);
5180  for (unsigned Cnt = 0, E = InRedVars.size(); Cnt < E; ++Cnt) {
5181  RedCG.emitSharedOrigLValue(CGF, Cnt);
5182  RedCG.emitAggregateType(CGF, Cnt);
5183  // FIXME: This must removed once the runtime library is fixed.
5184  // Emit required threadprivate variables for
5185  // initializer/combiner/finalizer.
5186  CGF.CGM.getOpenMPRuntime().emitTaskReductionFixups(CGF, S.getBeginLoc(),
5187  RedCG, Cnt);
5188  llvm::Value *ReductionsPtr;
5189  if (const Expr *TRExpr = TaskgroupDescriptors[Cnt]) {
5190  ReductionsPtr =
5191  CGF.EmitLoadOfScalar(CGF.EmitLValue(TRExpr), TRExpr->getExprLoc());
5192  } else {
5193  ReductionsPtr = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
5194  }
5195  Address Replacement = CGF.CGM.getOpenMPRuntime().getTaskReductionItem(
5196  CGF, S.getBeginLoc(), ReductionsPtr, RedCG.getSharedLValue(Cnt));
5197  Replacement = Address(
5199  Replacement.getPointer(), CGF.getContext().VoidPtrTy,
5200  CGF.getContext().getPointerType(InRedPrivs[Cnt]->getType()),
5201  InRedPrivs[Cnt]->getExprLoc()),
5202  CGF.ConvertTypeForMem(InRedPrivs[Cnt]->getType()),
5203  Replacement.getAlignment());
5204  Replacement = RedCG.adjustPrivateAddress(CGF, Cnt, Replacement);
5205  InRedScope.addPrivate(RedCG.getBaseDecl(Cnt), Replacement);
5206  }
5207  }
5208  (void)InRedScope.Privatize();
5209 }
5210 
5212  // Emit outlined function for task construct.
5213  const CapturedStmt *CS = S.getCapturedStmt(OMPD_task);
5214  Address CapturedStruct = GenerateCapturedStmtArgument(*CS);
5215  QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl());
5216  const Expr *IfCond = nullptr;
5217  for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
5218  if (C->getNameModifier() == OMPD_unknown ||
5219  C->getNameModifier() == OMPD_task) {
5220  IfCond = C->getCondition();
5221  break;
5222  }
5223  }
5224 
5225  OMPTaskDataTy Data;
5226  // Check if we should emit tied or untied task.
5227  Data.Tied = !S.getSingleClause<OMPUntiedClause>();
5228  auto &&BodyGen = [CS](CodeGenFunction &CGF, PrePostActionTy &) {
5229  CGF.EmitStmt(CS->getCapturedStmt());
5230  };
5231  auto &&TaskGen = [&S, SharedsTy, CapturedStruct,
5232  IfCond](CodeGenFunction &CGF, llvm::Function *OutlinedFn,
5233  const OMPTaskDataTy &Data) {
5234  CGF.CGM.getOpenMPRuntime().emitTaskCall(CGF, S.getBeginLoc(), S, OutlinedFn,
5235  SharedsTy, CapturedStruct, IfCond,
5236  Data);
5237  };
5238  auto LPCRegion =
5240  EmitOMPTaskBasedDirective(S, OMPD_task, BodyGen, TaskGen, Data);
5241 }
5242 
5244  const OMPTaskyieldDirective &S) {
5245  CGM.getOpenMPRuntime().emitTaskyieldCall(*this, S.getBeginLoc());
5246 }
5247 
5249  llvm_unreachable("CodeGen for 'omp error' is not supported yet.");
5250 }
5251 
5253  CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), OMPD_barrier);
5254 }
5255 
5257  OMPTaskDataTy Data;
5258  // Build list of dependences
5259  buildDependences(S, Data);
5260  CGM.getOpenMPRuntime().emitTaskwaitCall(*this, S.getBeginLoc(), Data);
5261 }
5262 
5264  return T.clauses().empty();
5265 }
5266 
5268  const OMPTaskgroupDirective &S) {
5269  OMPLexicalScope Scope(*this, S, OMPD_unknown);
5270  if (CGM.getLangOpts().OpenMPIRBuilder && isSupportedByOpenMPIRBuilder(S)) {
5271  llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
5272  using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
5273  InsertPointTy AllocaIP(AllocaInsertPt->getParent(),
5274  AllocaInsertPt->getIterator());
5275 
5276  auto BodyGenCB = [&, this](InsertPointTy AllocaIP,
5277  InsertPointTy CodeGenIP) {
5278  Builder.restoreIP(CodeGenIP);
5279  EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt());
5280  };
5282  if (!CapturedStmtInfo)
5283  CapturedStmtInfo = &CapStmtInfo;
5284  Builder.restoreIP(OMPBuilder.createTaskgroup(Builder, AllocaIP, BodyGenCB));
5285  return;
5286  }
5287  auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
5288  Action.Enter(CGF);
5289  if (const Expr *E = S.getReductionRef()) {
5292  OMPTaskDataTy Data;
5293  for (const auto *C : S.getClausesOfKind<OMPTaskReductionClause>()) {
5294  Data.ReductionVars.append(C->varlist_begin(), C->varlist_end());
5295  Data.ReductionOrigs.append(C->varlist_begin(), C->varlist_end());
5296  Data.ReductionCopies.append(C->privates().begin(), C->privates().end());
5297  Data.ReductionOps.append(C->reduction_ops().begin(),
5298  C->reduction_ops().end());
5299  LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
5300  RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
5301  }
5302  llvm::Value *ReductionDesc =
5303  CGF.CGM.getOpenMPRuntime().emitTaskReductionInit(CGF, S.getBeginLoc(),
5304  LHSs, RHSs, Data);
5305  const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
5306  CGF.EmitVarDecl(*VD);
5307  CGF.EmitStoreOfScalar(ReductionDesc, CGF.GetAddrOfLocalVar(VD),
5308  /*Volatile=*/false, E->getType());
5309  }
5310  CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt());
5311  };
5312  CGM.getOpenMPRuntime().emitTaskgroupRegion(*this, CodeGen, S.getBeginLoc());
5313 }
5314 
5316  llvm::AtomicOrdering AO = S.getSingleClause<OMPFlushClause>()
5317  ? llvm::AtomicOrdering::NotAtomic
5318  : llvm::AtomicOrdering::AcquireRelease;
5319  CGM.getOpenMPRuntime().emitFlush(
5320  *this,
5321  [&S]() -> ArrayRef<const Expr *> {
5322  if (const auto *FlushClause = S.getSingleClause<OMPFlushClause>())
5323  return llvm::makeArrayRef(FlushClause->varlist_begin(),
5324  FlushClause->varlist_end());
5325  return llvm::None;
5326  }(),
5327  S.getBeginLoc(), AO);
5328 }
5329 
5331  const auto *DO = S.getSingleClause<OMPDepobjClause>();
5332  LValue DOLVal = EmitLValue(DO->getDepobj());
5333  if (const auto *DC = S.getSingleClause<OMPDependClause>()) {
5334  OMPTaskDataTy::DependData Dependencies(DC->getDependencyKind(),
5335  DC->getModifier());
5336  Dependencies.DepExprs.append(DC->varlist_begin(), DC->varlist_end());
5337  Address DepAddr = CGM.getOpenMPRuntime().emitDepobjDependClause(
5338  *this, Dependencies, DC->getBeginLoc());
5339  EmitStoreOfScalar(DepAddr.getPointer(), DOLVal);
5340  return;
5341  }
5342  if (const auto *DC = S.getSingleClause<OMPDestroyClause>()) {
5343  CGM.getOpenMPRuntime().emitDestroyClause(*this, DOLVal, DC->getBeginLoc());
5344  return;
5345  }
5346  if (const auto *UC = S.getSingleClause<OMPUpdateClause>()) {
5347  CGM.getOpenMPRuntime().emitUpdateClause(
5348  *this, DOLVal, UC->getDependencyKind(), UC->getBeginLoc());
5349  return;
5350  }
5351 }
5352 
5354  if (!OMPParentLoopDirectiveForScan)
5355  return;
5356  const OMPExecutableDirective &ParentDir = *OMPParentLoopDirectiveForScan;
5357  bool IsInclusive = S.hasClausesOfKind<OMPInclusiveClause>();
5362  SmallVector<const Expr *, 4> ReductionOps;
5364  SmallVector<const Expr *, 4> CopyArrayTemps;
5365  SmallVector<const Expr *, 4> CopyArrayElems;
5366  for (const auto *C : ParentDir.getClausesOfKind<OMPReductionClause>()) {
5367  if (C->getModifier() != OMPC_REDUCTION_inscan)
5368  continue;
5369  Shareds.append(C->varlist_begin(), C->varlist_end());
5370  Privates.append(C->privates().begin(), C->privates().end());
5371  LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
5372  RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
5373  ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end());
5374  CopyOps.append(C->copy_ops().begin(), C->copy_ops().end());
5375  CopyArrayTemps.append(C->copy_array_temps().begin(),
5376  C->copy_array_temps().end());
5377  CopyArrayElems.append(C->copy_array_elems().begin(),
5378  C->copy_array_elems().end());
5379  }
5380  if (ParentDir.getDirectiveKind() == OMPD_simd ||
5381  (getLangOpts().OpenMPSimd &&
5382  isOpenMPSimdDirective(ParentDir.getDirectiveKind()))) {
5383  // For simd directive and simd-based directives in simd only mode, use the
5384  // following codegen:
5385  // int x = 0;
5386  // #pragma omp simd reduction(inscan, +: x)
5387  // for (..) {
5388  // <first part>
5389  // #pragma omp scan inclusive(x)
5390  // <second part>
5391  // }
5392  // is transformed to:
5393  // int x = 0;
5394  // for (..) {
5395  // int x_priv = 0;
5396  // <first part>
5397  // x = x_priv + x;
5398  // x_priv = x;
5399  // <second part>
5400  // }
5401  // and
5402  // int x = 0;
5403  // #pragma omp simd reduction(inscan, +: x)
5404  // for (..) {
5405  // <first part>
5406  // #pragma omp scan exclusive(x)
5407  // <second part>
5408  // }
5409  // to
5410  // int x = 0;
5411  // for (..) {
5412  // int x_priv = 0;
5413  // <second part>
5414  // int temp = x;
5415  // x = x_priv + x;
5416  // x_priv = temp;
5417  // <first part>
5418  // }
5419  llvm::BasicBlock *OMPScanReduce = createBasicBlock("omp.inscan.reduce");
5420  EmitBranch(IsInclusive
5421  ? OMPScanReduce
5422  : BreakContinueStack.back().ContinueBlock.getBlock());
5423  EmitBlock(OMPScanDispatch);
5424  {
5425  // New scope for correct construction/destruction of temp variables for
5426  // exclusive scan.
5427  LexicalScope Scope(*this, S.getSourceRange());
5428  EmitBranch(IsInclusive ? OMPBeforeScanBlock : OMPAfterScanBlock);
5429  EmitBlock(OMPScanReduce);
5430  if (!IsInclusive) {
5431  // Create temp var and copy LHS value to this temp value.
5432  // TMP = LHS;
5433  for (unsigned I = 0, E = CopyArrayElems.size(); I < E; ++I) {
5434  const Expr *PrivateExpr = Privates[I];
5435  const Expr *TempExpr = CopyArrayTemps[I];
5436  EmitAutoVarDecl(
5437  *cast<VarDecl>(cast<DeclRefExpr>(TempExpr)->getDecl()));
5438  LValue DestLVal = EmitLValue(TempExpr);
5439  LValue SrcLVal = EmitLValue(LHSs[I]);
5440  EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this),
5441  SrcLVal.getAddress(*this),
5442  cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()),
5443  cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()),
5444  CopyOps[I]);
5445  }
5446  }
5447  CGM.getOpenMPRuntime().emitReduction(
5448  *this, ParentDir.getEndLoc(), Privates, LHSs, RHSs, ReductionOps,
5449  {/*WithNowait=*/true, /*SimpleReduction=*/true, OMPD_simd});
5450  for (unsigned I = 0, E = CopyArrayElems.size(); I < E; ++I) {
5451  const Expr *PrivateExpr = Privates[I];
5452  LValue DestLVal;
5453  LValue SrcLVal;
5454  if (IsInclusive) {
5455  DestLVal = EmitLValue(RHSs[I]);
5456  SrcLVal = EmitLValue(LHSs[I]);
5457  } else {
5458  const Expr *TempExpr = CopyArrayTemps[I];
5459  DestLVal = EmitLValue(RHSs[I]);
5460  SrcLVal = EmitLValue(TempExpr);
5461  }
5462  EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this),
5463  SrcLVal.getAddress(*this),
5464  cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()),
5465  cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()),
5466  CopyOps[I]);
5467  }
5468  }
5469  EmitBranch(IsInclusive ? OMPAfterScanBlock : OMPBeforeScanBlock);
5470  OMPScanExitBlock = IsInclusive
5471  ? BreakContinueStack.back().ContinueBlock.getBlock()
5472  : OMPScanReduce;
5473  EmitBlock(OMPAfterScanBlock);
5474  return;
5475  }
5476  if (!IsInclusive) {
5477  EmitBranch(BreakContinueStack.back().ContinueBlock.getBlock());
5478  EmitBlock(OMPScanExitBlock);
5479  }
5480  if (OMPFirstScanLoop) {
5481  // Emit buffer[i] = red; at the end of the input phase.
5482  const auto *IVExpr = cast<OMPLoopDirective>(ParentDir)
5483  .getIterationVariable()
5484  ->IgnoreParenImpCasts();
5485  LValue IdxLVal = EmitLValue(IVExpr);
5486  llvm::Value *IdxVal = EmitLoadOfScalar(IdxLVal, IVExpr->getExprLoc());
5487  IdxVal = Builder.CreateIntCast(IdxVal, SizeTy, /*isSigned=*/false);
5488  for (unsigned I = 0, E = CopyArrayElems.size(); I < E; ++I) {
5489  const Expr *PrivateExpr = Privates[I];
5490  const Expr *OrigExpr = Shareds[I];
5491  const Expr *CopyArrayElem = CopyArrayElems[I];
5492  OpaqueValueMapping IdxMapping(
5493  *this,
5494  cast<OpaqueValueExpr>(
5495  cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()),
5496  RValue::get(IdxVal));
5497  LValue DestLVal = EmitLValue(CopyArrayElem);
5498  LValue SrcLVal = EmitLValue(OrigExpr);
5499  EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this),
5500  SrcLVal.getAddress(*this),
5501  cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()),
5502  cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()),
5503  CopyOps[I]);
5504  }
5505  }
5506  EmitBranch(BreakContinueStack.back().ContinueBlock.getBlock());
5507  if (IsInclusive) {
5508  EmitBlock(OMPScanExitBlock);
5509  EmitBranch(BreakContinueStack.back().ContinueBlock.getBlock());
5510  }
5511  EmitBlock(OMPScanDispatch);
5512  if (!OMPFirstScanLoop) {
5513  // Emit red = buffer[i]; at the entrance to the scan phase.
5514  const auto *IVExpr = cast<OMPLoopDirective>(ParentDir)
5515  .getIterationVariable()
5516  ->IgnoreParenImpCasts();
5517  LValue IdxLVal = EmitLValue(IVExpr);
5518  llvm::Value *IdxVal = EmitLoadOfScalar(IdxLVal, IVExpr->getExprLoc());
5519  IdxVal = Builder.CreateIntCast(IdxVal, SizeTy, /*isSigned=*/false);
5520  llvm::BasicBlock *ExclusiveExitBB = nullptr;
5521  if (!IsInclusive) {
5522  llvm::BasicBlock *ContBB = createBasicBlock("omp.exclusive.dec");
5523  ExclusiveExitBB = createBasicBlock("omp.exclusive.copy.exit");
5524  llvm::Value *Cmp = Builder.CreateIsNull(IdxVal);
5525  Builder.CreateC