clang  15.0.0git
CGOpenMPRuntimeGPU.cpp
Go to the documentation of this file.
1 //===---- CGOpenMPRuntimeGPU.cpp - Interface to OpenMP GPU Runtimes ----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This provides a generalized class for OpenMP runtime code generation
10 // specialized by GPU targets NVPTX and AMDGCN.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CGOpenMPRuntimeGPU.h"
15 #include "CodeGenFunction.h"
16 #include "clang/AST/Attr.h"
17 #include "clang/AST/DeclOpenMP.h"
18 #include "clang/AST/StmtOpenMP.h"
19 #include "clang/AST/StmtVisitor.h"
20 #include "clang/Basic/Cuda.h"
21 #include "llvm/ADT/SmallPtrSet.h"
22 #include "llvm/Frontend/OpenMP/OMPGridValues.h"
23 #include "llvm/Support/MathExtras.h"
24 
25 using namespace clang;
26 using namespace CodeGen;
27 using namespace llvm::omp;
28 
29 namespace {
30 /// Pre(post)-action for different OpenMP constructs specialized for NVPTX.
31 class NVPTXActionTy final : public PrePostActionTy {
32  llvm::FunctionCallee EnterCallee = nullptr;
33  ArrayRef<llvm::Value *> EnterArgs;
34  llvm::FunctionCallee ExitCallee = nullptr;
35  ArrayRef<llvm::Value *> ExitArgs;
36  bool Conditional = false;
37  llvm::BasicBlock *ContBlock = nullptr;
38 
39 public:
40  NVPTXActionTy(llvm::FunctionCallee EnterCallee,
41  ArrayRef<llvm::Value *> EnterArgs,
42  llvm::FunctionCallee ExitCallee,
43  ArrayRef<llvm::Value *> ExitArgs, bool Conditional = false)
44  : EnterCallee(EnterCallee), EnterArgs(EnterArgs), ExitCallee(ExitCallee),
45  ExitArgs(ExitArgs), Conditional(Conditional) {}
46  void Enter(CodeGenFunction &CGF) override {
47  llvm::Value *EnterRes = CGF.EmitRuntimeCall(EnterCallee, EnterArgs);
48  if (Conditional) {
49  llvm::Value *CallBool = CGF.Builder.CreateIsNotNull(EnterRes);
50  auto *ThenBlock = CGF.createBasicBlock("omp_if.then");
51  ContBlock = CGF.createBasicBlock("omp_if.end");
52  // Generate the branch (If-stmt)
53  CGF.Builder.CreateCondBr(CallBool, ThenBlock, ContBlock);
54  CGF.EmitBlock(ThenBlock);
55  }
56  }
57  void Done(CodeGenFunction &CGF) {
58  // Emit the rest of blocks/branches
59  CGF.EmitBranch(ContBlock);
60  CGF.EmitBlock(ContBlock, true);
61  }
62  void Exit(CodeGenFunction &CGF) override {
63  CGF.EmitRuntimeCall(ExitCallee, ExitArgs);
64  }
65 };
66 
67 /// A class to track the execution mode when codegening directives within
68 /// a target region. The appropriate mode (SPMD|NON-SPMD) is set on entry
69 /// to the target region and used by containing directives such as 'parallel'
70 /// to emit optimized code.
71 class ExecutionRuntimeModesRAII {
72 private:
73  CGOpenMPRuntimeGPU::ExecutionMode SavedExecMode =
76  bool SavedRuntimeMode = false;
77  bool *RuntimeMode = nullptr;
78 
79 public:
80  /// Constructor for Non-SPMD mode.
81  ExecutionRuntimeModesRAII(CGOpenMPRuntimeGPU::ExecutionMode &ExecMode)
82  : ExecMode(ExecMode) {
83  SavedExecMode = ExecMode;
85  }
86  /// Constructor for SPMD mode.
87  ExecutionRuntimeModesRAII(CGOpenMPRuntimeGPU::ExecutionMode &ExecMode,
88  bool &RuntimeMode, bool FullRuntimeMode)
89  : ExecMode(ExecMode), RuntimeMode(&RuntimeMode) {
90  SavedExecMode = ExecMode;
91  SavedRuntimeMode = RuntimeMode;
92  ExecMode = CGOpenMPRuntimeGPU::EM_SPMD;
93  RuntimeMode = FullRuntimeMode;
94  }
95  ~ExecutionRuntimeModesRAII() {
96  ExecMode = SavedExecMode;
97  if (RuntimeMode)
98  *RuntimeMode = SavedRuntimeMode;
99  }
100 };
101 
102 /// GPU Configuration: This information can be derived from cuda registers,
103 /// however, providing compile time constants helps generate more efficient
104 /// code. For all practical purposes this is fine because the configuration
105 /// is the same for all known NVPTX architectures.
106 enum MachineConfiguration : unsigned {
107  /// See "llvm/Frontend/OpenMP/OMPGridValues.h" for various related target
108  /// specific Grid Values like GV_Warp_Size, GV_Slot_Size
109 
110  /// Global memory alignment for performance.
111  GlobalMemoryAlignment = 128,
112 
113  /// Maximal size of the shared memory buffer.
114  SharedMemorySize = 128,
115 };
116 
117 static const ValueDecl *getPrivateItem(const Expr *RefExpr) {
118  RefExpr = RefExpr->IgnoreParens();
119  if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(RefExpr)) {
120  const Expr *Base = ASE->getBase()->IgnoreParenImpCasts();
121  while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
122  Base = TempASE->getBase()->IgnoreParenImpCasts();
123  RefExpr = Base;
124  } else if (auto *OASE = dyn_cast<OMPArraySectionExpr>(RefExpr)) {
125  const Expr *Base = OASE->getBase()->IgnoreParenImpCasts();
126  while (const auto *TempOASE = dyn_cast<OMPArraySectionExpr>(Base))
127  Base = TempOASE->getBase()->IgnoreParenImpCasts();
128  while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
129  Base = TempASE->getBase()->IgnoreParenImpCasts();
130  RefExpr = Base;
131  }
132  RefExpr = RefExpr->IgnoreParenImpCasts();
133  if (const auto *DE = dyn_cast<DeclRefExpr>(RefExpr))
134  return cast<ValueDecl>(DE->getDecl()->getCanonicalDecl());
135  const auto *ME = cast<MemberExpr>(RefExpr);
136  return cast<ValueDecl>(ME->getMemberDecl()->getCanonicalDecl());
137 }
138 
139 
140 static RecordDecl *buildRecordForGlobalizedVars(
141  ASTContext &C, ArrayRef<const ValueDecl *> EscapedDecls,
142  ArrayRef<const ValueDecl *> EscapedDeclsForTeams,
143  llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
144  &MappedDeclsFields, int BufSize) {
145  using VarsDataTy = std::pair<CharUnits /*Align*/, const ValueDecl *>;
146  if (EscapedDecls.empty() && EscapedDeclsForTeams.empty())
147  return nullptr;
148  SmallVector<VarsDataTy, 4> GlobalizedVars;
149  for (const ValueDecl *D : EscapedDecls)
150  GlobalizedVars.emplace_back(
152  C.getDeclAlign(D).getQuantity(),
153  static_cast<CharUnits::QuantityType>(GlobalMemoryAlignment))),
154  D);
155  for (const ValueDecl *D : EscapedDeclsForTeams)
156  GlobalizedVars.emplace_back(C.getDeclAlign(D), D);
157  llvm::stable_sort(GlobalizedVars, [](VarsDataTy L, VarsDataTy R) {
158  return L.first > R.first;
159  });
160 
161  // Build struct _globalized_locals_ty {
162  // /* globalized vars */[WarSize] align (max(decl_align,
163  // GlobalMemoryAlignment))
164  // /* globalized vars */ for EscapedDeclsForTeams
165  // };
166  RecordDecl *GlobalizedRD = C.buildImplicitRecord("_globalized_locals_ty");
167  GlobalizedRD->startDefinition();
169  EscapedDeclsForTeams.begin(), EscapedDeclsForTeams.end());
170  for (const auto &Pair : GlobalizedVars) {
171  const ValueDecl *VD = Pair.second;
172  QualType Type = VD->getType();
174  Type = C.getPointerType(Type.getNonReferenceType());
175  else
176  Type = Type.getNonReferenceType();
177  SourceLocation Loc = VD->getLocation();
178  FieldDecl *Field;
179  if (SingleEscaped.count(VD)) {
181  C, GlobalizedRD, Loc, Loc, VD->getIdentifier(), Type,
182  C.getTrivialTypeSourceInfo(Type, SourceLocation()),
183  /*BW=*/nullptr, /*Mutable=*/false,
184  /*InitStyle=*/ICIS_NoInit);
185  Field->setAccess(AS_public);
186  if (VD->hasAttrs()) {
187  for (specific_attr_iterator<AlignedAttr> I(VD->getAttrs().begin()),
188  E(VD->getAttrs().end());
189  I != E; ++I)
190  Field->addAttr(*I);
191  }
192  } else {
193  llvm::APInt ArraySize(32, BufSize);
194  Type = C.getConstantArrayType(Type, ArraySize, nullptr, ArrayType::Normal,
195  0);
197  C, GlobalizedRD, Loc, Loc, VD->getIdentifier(), Type,
198  C.getTrivialTypeSourceInfo(Type, SourceLocation()),
199  /*BW=*/nullptr, /*Mutable=*/false,
200  /*InitStyle=*/ICIS_NoInit);
201  Field->setAccess(AS_public);
202  llvm::APInt Align(32, std::max(C.getDeclAlign(VD).getQuantity(),
203  static_cast<CharUnits::QuantityType>(
204  GlobalMemoryAlignment)));
205  Field->addAttr(AlignedAttr::CreateImplicit(
206  C, /*IsAlignmentExpr=*/true,
207  IntegerLiteral::Create(C, Align,
208  C.getIntTypeForBitwidth(32, /*Signed=*/0),
209  SourceLocation()),
210  {}, AttributeCommonInfo::AS_GNU, AlignedAttr::GNU_aligned));
211  }
212  GlobalizedRD->addDecl(Field);
213  MappedDeclsFields.try_emplace(VD, Field);
214  }
215  GlobalizedRD->completeDefinition();
216  return GlobalizedRD;
217 }
218 
219 /// Get the list of variables that can escape their declaration context.
220 class CheckVarsEscapingDeclContext final
221  : public ConstStmtVisitor<CheckVarsEscapingDeclContext> {
222  CodeGenFunction &CGF;
223  llvm::SetVector<const ValueDecl *> EscapedDecls;
224  llvm::SetVector<const ValueDecl *> EscapedVariableLengthDecls;
225  llvm::SmallPtrSet<const Decl *, 4> EscapedParameters;
226  RecordDecl *GlobalizedRD = nullptr;
227  llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> MappedDeclsFields;
228  bool AllEscaped = false;
229  bool IsForCombinedParallelRegion = false;
230 
231  void markAsEscaped(const ValueDecl *VD) {
232  // Do not globalize declare target variables.
233  if (!isa<VarDecl>(VD) ||
234  OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
235  return;
236  VD = cast<ValueDecl>(VD->getCanonicalDecl());
237  // Use user-specified allocation.
238  if (VD->hasAttrs() && VD->hasAttr<OMPAllocateDeclAttr>())
239  return;
240  // Variables captured by value must be globalized.
241  if (auto *CSI = CGF.CapturedStmtInfo) {
242  if (const FieldDecl *FD = CSI->lookup(cast<VarDecl>(VD))) {
243  // Check if need to capture the variable that was already captured by
244  // value in the outer region.
245  if (!IsForCombinedParallelRegion) {
246  if (!FD->hasAttrs())
247  return;
248  const auto *Attr = FD->getAttr<OMPCaptureKindAttr>();
249  if (!Attr)
250  return;
251  if (((Attr->getCaptureKind() != OMPC_map) &&
252  !isOpenMPPrivate(Attr->getCaptureKind())) ||
253  ((Attr->getCaptureKind() == OMPC_map) &&
254  !FD->getType()->isAnyPointerType()))
255  return;
256  }
257  if (!FD->getType()->isReferenceType()) {
258  assert(!VD->getType()->isVariablyModifiedType() &&
259  "Parameter captured by value with variably modified type");
260  EscapedParameters.insert(VD);
261  } else if (!IsForCombinedParallelRegion) {
262  return;
263  }
264  }
265  }
266  if ((!CGF.CapturedStmtInfo ||
267  (IsForCombinedParallelRegion && CGF.CapturedStmtInfo)) &&
268  VD->getType()->isReferenceType())
269  // Do not globalize variables with reference type.
270  return;
271  if (VD->getType()->isVariablyModifiedType())
272  EscapedVariableLengthDecls.insert(VD);
273  else
274  EscapedDecls.insert(VD);
275  }
276 
277  void VisitValueDecl(const ValueDecl *VD) {
278  if (VD->getType()->isLValueReferenceType())
279  markAsEscaped(VD);
280  if (const auto *VarD = dyn_cast<VarDecl>(VD)) {
281  if (!isa<ParmVarDecl>(VarD) && VarD->hasInit()) {
282  const bool SavedAllEscaped = AllEscaped;
283  AllEscaped = VD->getType()->isLValueReferenceType();
284  Visit(VarD->getInit());
285  AllEscaped = SavedAllEscaped;
286  }
287  }
288  }
289  void VisitOpenMPCapturedStmt(const CapturedStmt *S,
290  ArrayRef<OMPClause *> Clauses,
291  bool IsCombinedParallelRegion) {
292  if (!S)
293  return;
294  for (const CapturedStmt::Capture &C : S->captures()) {
295  if (C.capturesVariable() && !C.capturesVariableByCopy()) {
296  const ValueDecl *VD = C.getCapturedVar();
297  bool SavedIsForCombinedParallelRegion = IsForCombinedParallelRegion;
298  if (IsCombinedParallelRegion) {
299  // Check if the variable is privatized in the combined construct and
300  // those private copies must be shared in the inner parallel
301  // directive.
302  IsForCombinedParallelRegion = false;
303  for (const OMPClause *C : Clauses) {
304  if (!isOpenMPPrivate(C->getClauseKind()) ||
305  C->getClauseKind() == OMPC_reduction ||
306  C->getClauseKind() == OMPC_linear ||
307  C->getClauseKind() == OMPC_private)
308  continue;
310  if (const auto *PC = dyn_cast<OMPFirstprivateClause>(C))
311  Vars = PC->getVarRefs();
312  else if (const auto *PC = dyn_cast<OMPLastprivateClause>(C))
313  Vars = PC->getVarRefs();
314  else
315  llvm_unreachable("Unexpected clause.");
316  for (const auto *E : Vars) {
317  const Decl *D =
318  cast<DeclRefExpr>(E)->getDecl()->getCanonicalDecl();
319  if (D == VD->getCanonicalDecl()) {
320  IsForCombinedParallelRegion = true;
321  break;
322  }
323  }
324  if (IsForCombinedParallelRegion)
325  break;
326  }
327  }
328  markAsEscaped(VD);
329  if (isa<OMPCapturedExprDecl>(VD))
330  VisitValueDecl(VD);
331  IsForCombinedParallelRegion = SavedIsForCombinedParallelRegion;
332  }
333  }
334  }
335 
336  void buildRecordForGlobalizedVars(bool IsInTTDRegion) {
337  assert(!GlobalizedRD &&
338  "Record for globalized variables is built already.");
339  ArrayRef<const ValueDecl *> EscapedDeclsForParallel, EscapedDeclsForTeams;
340  unsigned WarpSize = CGF.getTarget().getGridValue().GV_Warp_Size;
341  if (IsInTTDRegion)
342  EscapedDeclsForTeams = EscapedDecls.getArrayRef();
343  else
344  EscapedDeclsForParallel = EscapedDecls.getArrayRef();
345  GlobalizedRD = ::buildRecordForGlobalizedVars(
346  CGF.getContext(), EscapedDeclsForParallel, EscapedDeclsForTeams,
347  MappedDeclsFields, WarpSize);
348  }
349 
350 public:
351  CheckVarsEscapingDeclContext(CodeGenFunction &CGF,
352  ArrayRef<const ValueDecl *> TeamsReductions)
353  : CGF(CGF), EscapedDecls(TeamsReductions.begin(), TeamsReductions.end()) {
354  }
355  virtual ~CheckVarsEscapingDeclContext() = default;
356  void VisitDeclStmt(const DeclStmt *S) {
357  if (!S)
358  return;
359  for (const Decl *D : S->decls())
360  if (const auto *VD = dyn_cast_or_null<ValueDecl>(D))
361  VisitValueDecl(VD);
362  }
363  void VisitOMPExecutableDirective(const OMPExecutableDirective *D) {
364  if (!D)
365  return;
366  if (!D->hasAssociatedStmt())
367  return;
368  if (const auto *S =
369  dyn_cast_or_null<CapturedStmt>(D->getAssociatedStmt())) {
370  // Do not analyze directives that do not actually require capturing,
371  // like `omp for` or `omp simd` directives.
373  getOpenMPCaptureRegions(CaptureRegions, D->getDirectiveKind());
374  if (CaptureRegions.size() == 1 && CaptureRegions.back() == OMPD_unknown) {
375  VisitStmt(S->getCapturedStmt());
376  return;
377  }
378  VisitOpenMPCapturedStmt(
379  S, D->clauses(),
380  CaptureRegions.back() == OMPD_parallel &&
382  }
383  }
384  void VisitCapturedStmt(const CapturedStmt *S) {
385  if (!S)
386  return;
387  for (const CapturedStmt::Capture &C : S->captures()) {
388  if (C.capturesVariable() && !C.capturesVariableByCopy()) {
389  const ValueDecl *VD = C.getCapturedVar();
390  markAsEscaped(VD);
391  if (isa<OMPCapturedExprDecl>(VD))
392  VisitValueDecl(VD);
393  }
394  }
395  }
396  void VisitLambdaExpr(const LambdaExpr *E) {
397  if (!E)
398  return;
399  for (const LambdaCapture &C : E->captures()) {
400  if (C.capturesVariable()) {
401  if (C.getCaptureKind() == LCK_ByRef) {
402  const ValueDecl *VD = C.getCapturedVar();
403  markAsEscaped(VD);
404  if (E->isInitCapture(&C) || isa<OMPCapturedExprDecl>(VD))
405  VisitValueDecl(VD);
406  }
407  }
408  }
409  }
410  void VisitBlockExpr(const BlockExpr *E) {
411  if (!E)
412  return;
413  for (const BlockDecl::Capture &C : E->getBlockDecl()->captures()) {
414  if (C.isByRef()) {
415  const VarDecl *VD = C.getVariable();
416  markAsEscaped(VD);
417  if (isa<OMPCapturedExprDecl>(VD) || VD->isInitCapture())
418  VisitValueDecl(VD);
419  }
420  }
421  }
422  void VisitCallExpr(const CallExpr *E) {
423  if (!E)
424  return;
425  for (const Expr *Arg : E->arguments()) {
426  if (!Arg)
427  continue;
428  if (Arg->isLValue()) {
429  const bool SavedAllEscaped = AllEscaped;
430  AllEscaped = true;
431  Visit(Arg);
432  AllEscaped = SavedAllEscaped;
433  } else {
434  Visit(Arg);
435  }
436  }
437  Visit(E->getCallee());
438  }
439  void VisitDeclRefExpr(const DeclRefExpr *E) {
440  if (!E)
441  return;
442  const ValueDecl *VD = E->getDecl();
443  if (AllEscaped)
444  markAsEscaped(VD);
445  if (isa<OMPCapturedExprDecl>(VD))
446  VisitValueDecl(VD);
447  else if (const auto *VarD = dyn_cast<VarDecl>(VD))
448  if (VarD->isInitCapture())
449  VisitValueDecl(VD);
450  }
451  void VisitUnaryOperator(const UnaryOperator *E) {
452  if (!E)
453  return;
454  if (E->getOpcode() == UO_AddrOf) {
455  const bool SavedAllEscaped = AllEscaped;
456  AllEscaped = true;
457  Visit(E->getSubExpr());
458  AllEscaped = SavedAllEscaped;
459  } else {
460  Visit(E->getSubExpr());
461  }
462  }
463  void VisitImplicitCastExpr(const ImplicitCastExpr *E) {
464  if (!E)
465  return;
466  if (E->getCastKind() == CK_ArrayToPointerDecay) {
467  const bool SavedAllEscaped = AllEscaped;
468  AllEscaped = true;
469  Visit(E->getSubExpr());
470  AllEscaped = SavedAllEscaped;
471  } else {
472  Visit(E->getSubExpr());
473  }
474  }
475  void VisitExpr(const Expr *E) {
476  if (!E)
477  return;
478  bool SavedAllEscaped = AllEscaped;
479  if (!E->isLValue())
480  AllEscaped = false;
481  for (const Stmt *Child : E->children())
482  if (Child)
483  Visit(Child);
484  AllEscaped = SavedAllEscaped;
485  }
486  void VisitStmt(const Stmt *S) {
487  if (!S)
488  return;
489  for (const Stmt *Child : S->children())
490  if (Child)
491  Visit(Child);
492  }
493 
494  /// Returns the record that handles all the escaped local variables and used
495  /// instead of their original storage.
496  const RecordDecl *getGlobalizedRecord(bool IsInTTDRegion) {
497  if (!GlobalizedRD)
498  buildRecordForGlobalizedVars(IsInTTDRegion);
499  return GlobalizedRD;
500  }
501 
502  /// Returns the field in the globalized record for the escaped variable.
503  const FieldDecl *getFieldForGlobalizedVar(const ValueDecl *VD) const {
504  assert(GlobalizedRD &&
505  "Record for globalized variables must be generated already.");
506  auto I = MappedDeclsFields.find(VD);
507  if (I == MappedDeclsFields.end())
508  return nullptr;
509  return I->getSecond();
510  }
511 
512  /// Returns the list of the escaped local variables/parameters.
513  ArrayRef<const ValueDecl *> getEscapedDecls() const {
514  return EscapedDecls.getArrayRef();
515  }
516 
517  /// Checks if the escaped local variable is actually a parameter passed by
518  /// value.
519  const llvm::SmallPtrSetImpl<const Decl *> &getEscapedParameters() const {
520  return EscapedParameters;
521  }
522 
523  /// Returns the list of the escaped variables with the variably modified
524  /// types.
525  ArrayRef<const ValueDecl *> getEscapedVariableLengthDecls() const {
526  return EscapedVariableLengthDecls.getArrayRef();
527  }
528 };
529 } // anonymous namespace
530 
531 /// Get the id of the warp in the block.
532 /// We assume that the warp size is 32, which is always the case
533 /// on the NVPTX device, to generate more efficient code.
534 static llvm::Value *getNVPTXWarpID(CodeGenFunction &CGF) {
535  CGBuilderTy &Bld = CGF.Builder;
536  unsigned LaneIDBits =
537  llvm::Log2_32(CGF.getTarget().getGridValue().GV_Warp_Size);
538  auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
539  return Bld.CreateAShr(RT.getGPUThreadID(CGF), LaneIDBits, "nvptx_warp_id");
540 }
541 
542 /// Get the id of the current lane in the Warp.
543 /// We assume that the warp size is 32, which is always the case
544 /// on the NVPTX device, to generate more efficient code.
545 static llvm::Value *getNVPTXLaneID(CodeGenFunction &CGF) {
546  CGBuilderTy &Bld = CGF.Builder;
547  unsigned LaneIDBits =
548  llvm::Log2_32(CGF.getTarget().getGridValue().GV_Warp_Size);
549  unsigned LaneIDMask = ~0u >> (32u - LaneIDBits);
550  auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
551  return Bld.CreateAnd(RT.getGPUThreadID(CGF), Bld.getInt32(LaneIDMask),
552  "nvptx_lane_id");
553 }
554 
556 CGOpenMPRuntimeGPU::getExecutionMode() const {
557  return CurrentExecutionMode;
558 }
559 
562  return CGM.getLangOpts().OpenMPCUDAMode ? CGOpenMPRuntimeGPU::CUDA
564 }
565 
566 /// Check for inner (nested) SPMD construct, if any
568  const OMPExecutableDirective &D) {
569  const auto *CS = D.getInnermostCapturedStmt();
570  const auto *Body =
571  CS->getCapturedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true);
572  const Stmt *ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
573 
574  if (const auto *NestedDir =
575  dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
576  OpenMPDirectiveKind DKind = NestedDir->getDirectiveKind();
577  switch (D.getDirectiveKind()) {
578  case OMPD_target:
579  if (isOpenMPParallelDirective(DKind))
580  return true;
581  if (DKind == OMPD_teams) {
582  Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
583  /*IgnoreCaptured=*/true);
584  if (!Body)
585  return false;
586  ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
587  if (const auto *NND =
588  dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
589  DKind = NND->getDirectiveKind();
590  if (isOpenMPParallelDirective(DKind))
591  return true;
592  }
593  }
594  return false;
595  case OMPD_target_teams:
596  return isOpenMPParallelDirective(DKind);
597  case OMPD_target_simd:
598  case OMPD_target_parallel:
599  case OMPD_target_parallel_for:
600  case OMPD_target_parallel_for_simd:
601  case OMPD_target_teams_distribute:
602  case OMPD_target_teams_distribute_simd:
603  case OMPD_target_teams_distribute_parallel_for:
604  case OMPD_target_teams_distribute_parallel_for_simd:
605  case OMPD_parallel:
606  case OMPD_for:
607  case OMPD_parallel_for:
608  case OMPD_parallel_master:
609  case OMPD_parallel_sections:
610  case OMPD_for_simd:
611  case OMPD_parallel_for_simd:
612  case OMPD_cancel:
613  case OMPD_cancellation_point:
614  case OMPD_ordered:
615  case OMPD_threadprivate:
616  case OMPD_allocate:
617  case OMPD_task:
618  case OMPD_simd:
619  case OMPD_sections:
620  case OMPD_section:
621  case OMPD_single:
622  case OMPD_master:
623  case OMPD_critical:
624  case OMPD_taskyield:
625  case OMPD_barrier:
626  case OMPD_taskwait:
627  case OMPD_taskgroup:
628  case OMPD_atomic:
629  case OMPD_flush:
630  case OMPD_depobj:
631  case OMPD_scan:
632  case OMPD_teams:
633  case OMPD_target_data:
634  case OMPD_target_exit_data:
635  case OMPD_target_enter_data:
636  case OMPD_distribute:
637  case OMPD_distribute_simd:
638  case OMPD_distribute_parallel_for:
639  case OMPD_distribute_parallel_for_simd:
640  case OMPD_teams_distribute:
641  case OMPD_teams_distribute_simd:
642  case OMPD_teams_distribute_parallel_for:
643  case OMPD_teams_distribute_parallel_for_simd:
644  case OMPD_target_update:
645  case OMPD_declare_simd:
646  case OMPD_declare_variant:
647  case OMPD_begin_declare_variant:
648  case OMPD_end_declare_variant:
649  case OMPD_declare_target:
650  case OMPD_end_declare_target:
651  case OMPD_declare_reduction:
652  case OMPD_declare_mapper:
653  case OMPD_taskloop:
654  case OMPD_taskloop_simd:
655  case OMPD_master_taskloop:
656  case OMPD_master_taskloop_simd:
657  case OMPD_parallel_master_taskloop:
658  case OMPD_parallel_master_taskloop_simd:
659  case OMPD_requires:
660  case OMPD_unknown:
661  default:
662  llvm_unreachable("Unexpected directive.");
663  }
664  }
665 
666  return false;
667 }
668 
670  const OMPExecutableDirective &D) {
671  OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind();
672  switch (DirectiveKind) {
673  case OMPD_target:
674  case OMPD_target_teams:
675  return hasNestedSPMDDirective(Ctx, D);
676  case OMPD_target_parallel:
677  case OMPD_target_parallel_for:
678  case OMPD_target_parallel_for_simd:
679  case OMPD_target_teams_distribute_parallel_for:
680  case OMPD_target_teams_distribute_parallel_for_simd:
681  case OMPD_target_simd:
682  case OMPD_target_teams_distribute_simd:
683  return true;
684  case OMPD_target_teams_distribute:
685  return false;
686  case OMPD_parallel:
687  case OMPD_for:
688  case OMPD_parallel_for:
689  case OMPD_parallel_master:
690  case OMPD_parallel_sections:
691  case OMPD_for_simd:
692  case OMPD_parallel_for_simd:
693  case OMPD_cancel:
694  case OMPD_cancellation_point:
695  case OMPD_ordered:
696  case OMPD_threadprivate:
697  case OMPD_allocate:
698  case OMPD_task:
699  case OMPD_simd:
700  case OMPD_sections:
701  case OMPD_section:
702  case OMPD_single:
703  case OMPD_master:
704  case OMPD_critical:
705  case OMPD_taskyield:
706  case OMPD_barrier:
707  case OMPD_taskwait:
708  case OMPD_taskgroup:
709  case OMPD_atomic:
710  case OMPD_flush:
711  case OMPD_depobj:
712  case OMPD_scan:
713  case OMPD_teams:
714  case OMPD_target_data:
715  case OMPD_target_exit_data:
716  case OMPD_target_enter_data:
717  case OMPD_distribute:
718  case OMPD_distribute_simd:
719  case OMPD_distribute_parallel_for:
720  case OMPD_distribute_parallel_for_simd:
721  case OMPD_teams_distribute:
722  case OMPD_teams_distribute_simd:
723  case OMPD_teams_distribute_parallel_for:
724  case OMPD_teams_distribute_parallel_for_simd:
725  case OMPD_target_update:
726  case OMPD_declare_simd:
727  case OMPD_declare_variant:
728  case OMPD_begin_declare_variant:
729  case OMPD_end_declare_variant:
730  case OMPD_declare_target:
731  case OMPD_end_declare_target:
732  case OMPD_declare_reduction:
733  case OMPD_declare_mapper:
734  case OMPD_taskloop:
735  case OMPD_taskloop_simd:
736  case OMPD_master_taskloop:
737  case OMPD_master_taskloop_simd:
738  case OMPD_parallel_master_taskloop:
739  case OMPD_parallel_master_taskloop_simd:
740  case OMPD_requires:
741  case OMPD_unknown:
742  default:
743  break;
744  }
745  llvm_unreachable(
746  "Unknown programming model for OpenMP directive on NVPTX target.");
747 }
748 
749 /// Check if the directive is loops based and has schedule clause at all or has
750 /// static scheduling.
754  "Expected loop-based directive.");
755  return !D.hasClausesOfKind<OMPOrderedClause>() &&
757  llvm::any_of(D.getClausesOfKind<OMPScheduleClause>(),
758  [](const OMPScheduleClause *C) {
759  return C->getScheduleKind() == OMPC_SCHEDULE_static;
760  }));
761 }
762 
763 /// Check for inner (nested) lightweight runtime construct, if any
765  const OMPExecutableDirective &D) {
766  assert(supportsSPMDExecutionMode(Ctx, D) && "Expected SPMD mode directive.");
767  const auto *CS = D.getInnermostCapturedStmt();
768  const auto *Body =
769  CS->getCapturedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true);
770  const Stmt *ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
771 
772  if (const auto *NestedDir =
773  dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
774  OpenMPDirectiveKind DKind = NestedDir->getDirectiveKind();
775  switch (D.getDirectiveKind()) {
776  case OMPD_target:
777  if (isOpenMPParallelDirective(DKind) &&
779  hasStaticScheduling(*NestedDir))
780  return true;
781  if (DKind == OMPD_teams_distribute_simd || DKind == OMPD_simd)
782  return true;
783  if (DKind == OMPD_parallel) {
784  Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
785  /*IgnoreCaptured=*/true);
786  if (!Body)
787  return false;
788  ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
789  if (const auto *NND =
790  dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
791  DKind = NND->getDirectiveKind();
792  if (isOpenMPWorksharingDirective(DKind) &&
794  return true;
795  }
796  } else if (DKind == OMPD_teams) {
797  Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
798  /*IgnoreCaptured=*/true);
799  if (!Body)
800  return false;
801  ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
802  if (const auto *NND =
803  dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
804  DKind = NND->getDirectiveKind();
805  if (isOpenMPParallelDirective(DKind) &&
808  return true;
809  if (DKind == OMPD_parallel) {
810  Body = NND->getInnermostCapturedStmt()->IgnoreContainers(
811  /*IgnoreCaptured=*/true);
812  if (!Body)
813  return false;
814  ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
815  if (const auto *NND =
816  dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
817  DKind = NND->getDirectiveKind();
818  if (isOpenMPWorksharingDirective(DKind) &&
820  return true;
821  }
822  }
823  }
824  }
825  return false;
826  case OMPD_target_teams:
827  if (isOpenMPParallelDirective(DKind) &&
829  hasStaticScheduling(*NestedDir))
830  return true;
831  if (DKind == OMPD_distribute_simd || DKind == OMPD_simd)
832  return true;
833  if (DKind == OMPD_parallel) {
834  Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
835  /*IgnoreCaptured=*/true);
836  if (!Body)
837  return false;
838  ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
839  if (const auto *NND =
840  dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
841  DKind = NND->getDirectiveKind();
842  if (isOpenMPWorksharingDirective(DKind) &&
844  return true;
845  }
846  }
847  return false;
848  case OMPD_target_parallel:
849  if (DKind == OMPD_simd)
850  return true;
851  return isOpenMPWorksharingDirective(DKind) &&
852  isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NestedDir);
853  case OMPD_target_teams_distribute:
854  case OMPD_target_simd:
855  case OMPD_target_parallel_for:
856  case OMPD_target_parallel_for_simd:
857  case OMPD_target_teams_distribute_simd:
858  case OMPD_target_teams_distribute_parallel_for:
859  case OMPD_target_teams_distribute_parallel_for_simd:
860  case OMPD_parallel:
861  case OMPD_for:
862  case OMPD_parallel_for:
863  case OMPD_parallel_master:
864  case OMPD_parallel_sections:
865  case OMPD_for_simd:
866  case OMPD_parallel_for_simd:
867  case OMPD_cancel:
868  case OMPD_cancellation_point:
869  case OMPD_ordered:
870  case OMPD_threadprivate:
871  case OMPD_allocate:
872  case OMPD_task:
873  case OMPD_simd:
874  case OMPD_sections:
875  case OMPD_section:
876  case OMPD_single:
877  case OMPD_master:
878  case OMPD_critical:
879  case OMPD_taskyield:
880  case OMPD_barrier:
881  case OMPD_taskwait:
882  case OMPD_taskgroup:
883  case OMPD_atomic:
884  case OMPD_flush:
885  case OMPD_depobj:
886  case OMPD_scan:
887  case OMPD_teams:
888  case OMPD_target_data:
889  case OMPD_target_exit_data:
890  case OMPD_target_enter_data:
891  case OMPD_distribute:
892  case OMPD_distribute_simd:
893  case OMPD_distribute_parallel_for:
894  case OMPD_distribute_parallel_for_simd:
895  case OMPD_teams_distribute:
896  case OMPD_teams_distribute_simd:
897  case OMPD_teams_distribute_parallel_for:
898  case OMPD_teams_distribute_parallel_for_simd:
899  case OMPD_target_update:
900  case OMPD_declare_simd:
901  case OMPD_declare_variant:
902  case OMPD_begin_declare_variant:
903  case OMPD_end_declare_variant:
904  case OMPD_declare_target:
905  case OMPD_end_declare_target:
906  case OMPD_declare_reduction:
907  case OMPD_declare_mapper:
908  case OMPD_taskloop:
909  case OMPD_taskloop_simd:
910  case OMPD_master_taskloop:
911  case OMPD_master_taskloop_simd:
912  case OMPD_parallel_master_taskloop:
913  case OMPD_parallel_master_taskloop_simd:
914  case OMPD_requires:
915  case OMPD_unknown:
916  default:
917  llvm_unreachable("Unexpected directive.");
918  }
919  }
920 
921  return false;
922 }
923 
924 /// Checks if the construct supports lightweight runtime. It must be SPMD
925 /// construct + inner loop-based construct with static scheduling.
927  const OMPExecutableDirective &D) {
928  if (!supportsSPMDExecutionMode(Ctx, D))
929  return false;
930  OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind();
931  switch (DirectiveKind) {
932  case OMPD_target:
933  case OMPD_target_teams:
934  case OMPD_target_parallel:
935  return hasNestedLightweightDirective(Ctx, D);
936  case OMPD_target_parallel_for:
937  case OMPD_target_parallel_for_simd:
938  case OMPD_target_teams_distribute_parallel_for:
939  case OMPD_target_teams_distribute_parallel_for_simd:
940  // (Last|First)-privates must be shared in parallel region.
941  return hasStaticScheduling(D);
942  case OMPD_target_simd:
943  case OMPD_target_teams_distribute_simd:
944  return true;
945  case OMPD_target_teams_distribute:
946  return false;
947  case OMPD_parallel:
948  case OMPD_for:
949  case OMPD_parallel_for:
950  case OMPD_parallel_master:
951  case OMPD_parallel_sections:
952  case OMPD_for_simd:
953  case OMPD_parallel_for_simd:
954  case OMPD_cancel:
955  case OMPD_cancellation_point:
956  case OMPD_ordered:
957  case OMPD_threadprivate:
958  case OMPD_allocate:
959  case OMPD_task:
960  case OMPD_simd:
961  case OMPD_sections:
962  case OMPD_section:
963  case OMPD_single:
964  case OMPD_master:
965  case OMPD_critical:
966  case OMPD_taskyield:
967  case OMPD_barrier:
968  case OMPD_taskwait:
969  case OMPD_taskgroup:
970  case OMPD_atomic:
971  case OMPD_flush:
972  case OMPD_depobj:
973  case OMPD_scan:
974  case OMPD_teams:
975  case OMPD_target_data:
976  case OMPD_target_exit_data:
977  case OMPD_target_enter_data:
978  case OMPD_distribute:
979  case OMPD_distribute_simd:
980  case OMPD_distribute_parallel_for:
981  case OMPD_distribute_parallel_for_simd:
982  case OMPD_teams_distribute:
983  case OMPD_teams_distribute_simd:
984  case OMPD_teams_distribute_parallel_for:
985  case OMPD_teams_distribute_parallel_for_simd:
986  case OMPD_target_update:
987  case OMPD_declare_simd:
988  case OMPD_declare_variant:
989  case OMPD_begin_declare_variant:
990  case OMPD_end_declare_variant:
991  case OMPD_declare_target:
992  case OMPD_end_declare_target:
993  case OMPD_declare_reduction:
994  case OMPD_declare_mapper:
995  case OMPD_taskloop:
996  case OMPD_taskloop_simd:
997  case OMPD_master_taskloop:
998  case OMPD_master_taskloop_simd:
999  case OMPD_parallel_master_taskloop:
1000  case OMPD_parallel_master_taskloop_simd:
1001  case OMPD_requires:
1002  case OMPD_unknown:
1003  default:
1004  break;
1005  }
1006  llvm_unreachable(
1007  "Unknown programming model for OpenMP directive on NVPTX target.");
1008 }
1009 
1010 void CGOpenMPRuntimeGPU::emitNonSPMDKernel(const OMPExecutableDirective &D,
1011  StringRef ParentName,
1012  llvm::Function *&OutlinedFn,
1013  llvm::Constant *&OutlinedFnID,
1014  bool IsOffloadEntry,
1015  const RegionCodeGenTy &CodeGen) {
1016  ExecutionRuntimeModesRAII ModeRAII(CurrentExecutionMode);
1017  EntryFunctionState EST;
1018  WrapperFunctionsMap.clear();
1019 
1020  // Emit target region as a standalone region.
1021  class NVPTXPrePostActionTy : public PrePostActionTy {
1022  CGOpenMPRuntimeGPU::EntryFunctionState &EST;
1023 
1024  public:
1025  NVPTXPrePostActionTy(CGOpenMPRuntimeGPU::EntryFunctionState &EST)
1026  : EST(EST) {}
1027  void Enter(CodeGenFunction &CGF) override {
1028  auto &RT =
1029  static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
1030  RT.emitKernelInit(CGF, EST, /* IsSPMD */ false);
1031  // Skip target region initialization.
1032  RT.setLocThreadIdInsertPt(CGF, /*AtCurrentPoint=*/true);
1033  }
1034  void Exit(CodeGenFunction &CGF) override {
1035  auto &RT =
1036  static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
1037  RT.clearLocThreadIdInsertPt(CGF);
1038  RT.emitKernelDeinit(CGF, EST, /* IsSPMD */ false);
1039  }
1040  } Action(EST);
1041  CodeGen.setAction(Action);
1042  IsInTTDRegion = true;
1043  emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID,
1044  IsOffloadEntry, CodeGen);
1045  IsInTTDRegion = false;
1046 }
1047 
1048 void CGOpenMPRuntimeGPU::emitKernelInit(CodeGenFunction &CGF,
1049  EntryFunctionState &EST, bool IsSPMD) {
1050  CGBuilderTy &Bld = CGF.Builder;
1051  Bld.restoreIP(OMPBuilder.createTargetInit(Bld, IsSPMD, requiresFullRuntime()));
1052  IsInTargetMasterThreadRegion = IsSPMD;
1053  if (!IsSPMD)
1054  emitGenericVarsProlog(CGF, EST.Loc);
1055 }
1056 
1057 void CGOpenMPRuntimeGPU::emitKernelDeinit(CodeGenFunction &CGF,
1058  EntryFunctionState &EST,
1059  bool IsSPMD) {
1060  if (!IsSPMD)
1061  emitGenericVarsEpilog(CGF);
1062 
1063  CGBuilderTy &Bld = CGF.Builder;
1064  OMPBuilder.createTargetDeinit(Bld, IsSPMD, requiresFullRuntime());
1065 }
1066 
1067 void CGOpenMPRuntimeGPU::emitSPMDKernel(const OMPExecutableDirective &D,
1068  StringRef ParentName,
1069  llvm::Function *&OutlinedFn,
1070  llvm::Constant *&OutlinedFnID,
1071  bool IsOffloadEntry,
1072  const RegionCodeGenTy &CodeGen) {
1073  ExecutionRuntimeModesRAII ModeRAII(
1074  CurrentExecutionMode, RequiresFullRuntime,
1075  CGM.getLangOpts().OpenMPCUDAForceFullRuntime ||
1076  !supportsLightweightRuntime(CGM.getContext(), D));
1077  EntryFunctionState EST;
1078 
1079  // Emit target region as a standalone region.
1080  class NVPTXPrePostActionTy : public PrePostActionTy {
1081  CGOpenMPRuntimeGPU &RT;
1082  CGOpenMPRuntimeGPU::EntryFunctionState &EST;
1083 
1084  public:
1085  NVPTXPrePostActionTy(CGOpenMPRuntimeGPU &RT,
1086  CGOpenMPRuntimeGPU::EntryFunctionState &EST)
1087  : RT(RT), EST(EST) {}
1088  void Enter(CodeGenFunction &CGF) override {
1089  RT.emitKernelInit(CGF, EST, /* IsSPMD */ true);
1090  // Skip target region initialization.
1091  RT.setLocThreadIdInsertPt(CGF, /*AtCurrentPoint=*/true);
1092  }
1093  void Exit(CodeGenFunction &CGF) override {
1094  RT.clearLocThreadIdInsertPt(CGF);
1095  RT.emitKernelDeinit(CGF, EST, /* IsSPMD */ true);
1096  }
1097  } Action(*this, EST);
1098  CodeGen.setAction(Action);
1099  IsInTTDRegion = true;
1100  emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID,
1101  IsOffloadEntry, CodeGen);
1102  IsInTTDRegion = false;
1103 }
1104 
1105 // Create a unique global variable to indicate the execution mode of this target
1106 // region. The execution mode is either 'generic', or 'spmd' depending on the
1107 // target directive. This variable is picked up by the offload library to setup
1108 // the device appropriately before kernel launch. If the execution mode is
1109 // 'generic', the runtime reserves one warp for the master, otherwise, all
1110 // warps participate in parallel work.
1111 static void setPropertyExecutionMode(CodeGenModule &CGM, StringRef Name,
1112  bool Mode) {
1113  auto *GVMode = new llvm::GlobalVariable(
1114  CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
1115  llvm::GlobalValue::WeakAnyLinkage,
1116  llvm::ConstantInt::get(CGM.Int8Ty, Mode ? OMP_TGT_EXEC_MODE_SPMD
1117  : OMP_TGT_EXEC_MODE_GENERIC),
1118  Twine(Name, "_exec_mode"));
1119  CGM.addCompilerUsedGlobal(GVMode);
1120 }
1121 
1122 void CGOpenMPRuntimeGPU::createOffloadEntry(llvm::Constant *ID,
1123  llvm::Constant *Addr,
1124  uint64_t Size, int32_t,
1125  llvm::GlobalValue::LinkageTypes) {
1126  // TODO: Add support for global variables on the device after declare target
1127  // support.
1128  llvm::Function *Fn = dyn_cast<llvm::Function>(Addr);
1129  if (!Fn)
1130  return;
1131 
1132  llvm::Module &M = CGM.getModule();
1133  llvm::LLVMContext &Ctx = CGM.getLLVMContext();
1134 
1135  // Get "nvvm.annotations" metadata node.
1136  llvm::NamedMDNode *MD = M.getOrInsertNamedMetadata("nvvm.annotations");
1137 
1138  llvm::Metadata *MDVals[] = {
1139  llvm::ConstantAsMetadata::get(Fn), llvm::MDString::get(Ctx, "kernel"),
1140  llvm::ConstantAsMetadata::get(
1141  llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), 1))};
1142  // Append metadata to nvvm.annotations.
1143  MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
1144 
1145  // Add a function attribute for the kernel.
1146  Fn->addFnAttr(llvm::Attribute::get(Ctx, "kernel"));
1147 }
1148 
1149 void CGOpenMPRuntimeGPU::emitTargetOutlinedFunction(
1150  const OMPExecutableDirective &D, StringRef ParentName,
1151  llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
1152  bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) {
1153  if (!IsOffloadEntry) // Nothing to do.
1154  return;
1155 
1156  assert(!ParentName.empty() && "Invalid target region parent name!");
1157 
1158  bool Mode = supportsSPMDExecutionMode(CGM.getContext(), D);
1159  if (Mode)
1160  emitSPMDKernel(D, ParentName, OutlinedFn, OutlinedFnID, IsOffloadEntry,
1161  CodeGen);
1162  else
1163  emitNonSPMDKernel(D, ParentName, OutlinedFn, OutlinedFnID, IsOffloadEntry,
1164  CodeGen);
1165 
1166  setPropertyExecutionMode(CGM, OutlinedFn->getName(), Mode);
1167 }
1168 
1169 namespace {
1171 /// Enum for accesseing the reserved_2 field of the ident_t struct.
1172 enum ModeFlagsTy : unsigned {
1173  /// Bit set to 1 when in SPMD mode.
1174  KMP_IDENT_SPMD_MODE = 0x01,
1175  /// Bit set to 1 when a simplified runtime is used.
1176  KMP_IDENT_SIMPLE_RT_MODE = 0x02,
1177  LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/KMP_IDENT_SIMPLE_RT_MODE)
1178 };
1179 
1180 /// Special mode Undefined. Is the combination of Non-SPMD mode + SimpleRuntime.
1181 static const ModeFlagsTy UndefinedMode =
1182  (~KMP_IDENT_SPMD_MODE) & KMP_IDENT_SIMPLE_RT_MODE;
1183 } // anonymous namespace
1184 
1186  switch (getExecutionMode()) {
1187  case EM_SPMD:
1188  if (requiresFullRuntime())
1189  return KMP_IDENT_SPMD_MODE & (~KMP_IDENT_SIMPLE_RT_MODE);
1190  return KMP_IDENT_SPMD_MODE | KMP_IDENT_SIMPLE_RT_MODE;
1191  case EM_NonSPMD:
1192  assert(requiresFullRuntime() && "Expected full runtime.");
1193  return (~KMP_IDENT_SPMD_MODE) & (~KMP_IDENT_SIMPLE_RT_MODE);
1194  case EM_Unknown:
1195  return UndefinedMode;
1196  }
1197  llvm_unreachable("Unknown flags are requested.");
1198 }
1199 
1201  : CGOpenMPRuntime(CGM, "_", "$") {
1202  if (!CGM.getLangOpts().OpenMPIsDevice)
1203  llvm_unreachable("OpenMP can only handle device code.");
1204 
1205  llvm::OpenMPIRBuilder &OMPBuilder = getOMPBuilder();
1206  if (CGM.getLangOpts().NoGPULib || CGM.getLangOpts().OMPHostIRFile.empty())
1207  return;
1208 
1209  OMPBuilder.createGlobalFlag(CGM.getLangOpts().OpenMPTargetDebug,
1210  "__omp_rtl_debug_kind");
1211  OMPBuilder.createGlobalFlag(CGM.getLangOpts().OpenMPTeamSubscription,
1212  "__omp_rtl_assume_teams_oversubscription");
1213  OMPBuilder.createGlobalFlag(CGM.getLangOpts().OpenMPThreadSubscription,
1214  "__omp_rtl_assume_threads_oversubscription");
1215  OMPBuilder.createGlobalFlag(CGM.getLangOpts().OpenMPNoThreadState,
1216  "__omp_rtl_assume_no_thread_state");
1217 }
1218 
1220  ProcBindKind ProcBind,
1221  SourceLocation Loc) {
1222  // Do nothing in case of SPMD mode and L0 parallel.
1223  if (getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD)
1224  return;
1225 
1226  CGOpenMPRuntime::emitProcBindClause(CGF, ProcBind, Loc);
1227 }
1228 
1230  llvm::Value *NumThreads,
1231  SourceLocation Loc) {
1232  // Nothing to do.
1233 }
1234 
1236  const Expr *NumTeams,
1237  const Expr *ThreadLimit,
1238  SourceLocation Loc) {}
1239 
1241  const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
1242  OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
1243  // Emit target region as a standalone region.
1244  class NVPTXPrePostActionTy : public PrePostActionTy {
1245  bool &IsInParallelRegion;
1246  bool PrevIsInParallelRegion;
1247 
1248  public:
1249  NVPTXPrePostActionTy(bool &IsInParallelRegion)
1250  : IsInParallelRegion(IsInParallelRegion) {}
1251  void Enter(CodeGenFunction &CGF) override {
1252  PrevIsInParallelRegion = IsInParallelRegion;
1253  IsInParallelRegion = true;
1254  }
1255  void Exit(CodeGenFunction &CGF) override {
1256  IsInParallelRegion = PrevIsInParallelRegion;
1257  }
1258  } Action(IsInParallelRegion);
1259  CodeGen.setAction(Action);
1260  bool PrevIsInTTDRegion = IsInTTDRegion;
1261  IsInTTDRegion = false;
1262  bool PrevIsInTargetMasterThreadRegion = IsInTargetMasterThreadRegion;
1263  IsInTargetMasterThreadRegion = false;
1264  auto *OutlinedFun =
1265  cast<llvm::Function>(CGOpenMPRuntime::emitParallelOutlinedFunction(
1266  D, ThreadIDVar, InnermostKind, CodeGen));
1267  IsInTargetMasterThreadRegion = PrevIsInTargetMasterThreadRegion;
1268  IsInTTDRegion = PrevIsInTTDRegion;
1269  if (getExecutionMode() != CGOpenMPRuntimeGPU::EM_SPMD &&
1270  !IsInParallelRegion) {
1271  llvm::Function *WrapperFun =
1272  createParallelDataSharingWrapper(OutlinedFun, D);
1273  WrapperFunctionsMap[OutlinedFun] = WrapperFun;
1274  }
1275 
1276  return OutlinedFun;
1277 }
1278 
1279 /// Get list of lastprivate variables from the teams distribute ... or
1280 /// teams {distribute ...} directives.
1281 static void
1285  "expected teams directive.");
1286  const OMPExecutableDirective *Dir = &D;
1289  Ctx,
1291  /*IgnoreCaptured=*/true))) {
1292  Dir = dyn_cast_or_null<OMPExecutableDirective>(S);
1293  if (Dir && !isOpenMPDistributeDirective(Dir->getDirectiveKind()))
1294  Dir = nullptr;
1295  }
1296  }
1297  if (!Dir)
1298  return;
1299  for (const auto *C : Dir->getClausesOfKind<OMPLastprivateClause>()) {
1300  for (const Expr *E : C->getVarRefs())
1301  Vars.push_back(getPrivateItem(E));
1302  }
1303 }
1304 
1305 /// Get list of reduction variables from the teams ... directives.
1306 static void
1310  "expected teams directive.");
1311  for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
1312  for (const Expr *E : C->privates())
1313  Vars.push_back(getPrivateItem(E));
1314  }
1315 }
1316 
1318  const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
1319  OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
1320  SourceLocation Loc = D.getBeginLoc();
1321 
1322  const RecordDecl *GlobalizedRD = nullptr;
1323  llvm::SmallVector<const ValueDecl *, 4> LastPrivatesReductions;
1324  llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> MappedDeclsFields;
1325  unsigned WarpSize = CGM.getTarget().getGridValue().GV_Warp_Size;
1326  // Globalize team reductions variable unconditionally in all modes.
1327  if (getExecutionMode() != CGOpenMPRuntimeGPU::EM_SPMD)
1328  getTeamsReductionVars(CGM.getContext(), D, LastPrivatesReductions);
1329  if (getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD) {
1330  getDistributeLastprivateVars(CGM.getContext(), D, LastPrivatesReductions);
1331  if (!LastPrivatesReductions.empty()) {
1332  GlobalizedRD = ::buildRecordForGlobalizedVars(
1333  CGM.getContext(), llvm::None, LastPrivatesReductions,
1334  MappedDeclsFields, WarpSize);
1335  }
1336  } else if (!LastPrivatesReductions.empty()) {
1337  assert(!TeamAndReductions.first &&
1338  "Previous team declaration is not expected.");
1339  TeamAndReductions.first = D.getCapturedStmt(OMPD_teams)->getCapturedDecl();
1340  std::swap(TeamAndReductions.second, LastPrivatesReductions);
1341  }
1342 
1343  // Emit target region as a standalone region.
1344  class NVPTXPrePostActionTy : public PrePostActionTy {
1345  SourceLocation &Loc;
1346  const RecordDecl *GlobalizedRD;
1347  llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
1348  &MappedDeclsFields;
1349 
1350  public:
1351  NVPTXPrePostActionTy(
1352  SourceLocation &Loc, const RecordDecl *GlobalizedRD,
1353  llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
1354  &MappedDeclsFields)
1355  : Loc(Loc), GlobalizedRD(GlobalizedRD),
1356  MappedDeclsFields(MappedDeclsFields) {}
1357  void Enter(CodeGenFunction &CGF) override {
1358  auto &Rt =
1359  static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
1360  if (GlobalizedRD) {
1361  auto I = Rt.FunctionGlobalizedDecls.try_emplace(CGF.CurFn).first;
1362  I->getSecond().MappedParams =
1363  std::make_unique<CodeGenFunction::OMPMapVars>();
1364  DeclToAddrMapTy &Data = I->getSecond().LocalVarData;
1365  for (const auto &Pair : MappedDeclsFields) {
1366  assert(Pair.getFirst()->isCanonicalDecl() &&
1367  "Expected canonical declaration");
1368  Data.insert(std::make_pair(Pair.getFirst(), MappedVarData()));
1369  }
1370  }
1371  Rt.emitGenericVarsProlog(CGF, Loc);
1372  }
1373  void Exit(CodeGenFunction &CGF) override {
1374  static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime())
1375  .emitGenericVarsEpilog(CGF);
1376  }
1377  } Action(Loc, GlobalizedRD, MappedDeclsFields);
1378  CodeGen.setAction(Action);
1379  llvm::Function *OutlinedFun = CGOpenMPRuntime::emitTeamsOutlinedFunction(
1380  D, ThreadIDVar, InnermostKind, CodeGen);
1381 
1382  return OutlinedFun;
1383 }
1384 
1385 void CGOpenMPRuntimeGPU::emitGenericVarsProlog(CodeGenFunction &CGF,
1386  SourceLocation Loc,
1387  bool WithSPMDCheck) {
1389  getExecutionMode() != CGOpenMPRuntimeGPU::EM_SPMD)
1390  return;
1391 
1392  CGBuilderTy &Bld = CGF.Builder;
1393 
1394  const auto I = FunctionGlobalizedDecls.find(CGF.CurFn);
1395  if (I == FunctionGlobalizedDecls.end())
1396  return;
1397 
1398  for (auto &Rec : I->getSecond().LocalVarData) {
1399  const auto *VD = cast<VarDecl>(Rec.first);
1400  bool EscapedParam = I->getSecond().EscapedParameters.count(Rec.first);
1401  QualType VarTy = VD->getType();
1402 
1403  // Get the local allocation of a firstprivate variable before sharing
1404  llvm::Value *ParValue;
1405  if (EscapedParam) {
1406  LValue ParLVal =
1407  CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(VD), VD->getType());
1408  ParValue = CGF.EmitLoadOfScalar(ParLVal, Loc);
1409  }
1410 
1411  // Allocate space for the variable to be globalized
1412  llvm::Value *AllocArgs[] = {CGF.getTypeSize(VD->getType())};
1413  llvm::CallBase *VoidPtr =
1414  CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1415  CGM.getModule(), OMPRTL___kmpc_alloc_shared),
1416  AllocArgs, VD->getName());
1417  // FIXME: We should use the variables actual alignment as an argument.
1418  VoidPtr->addRetAttr(llvm::Attribute::get(
1419  CGM.getLLVMContext(), llvm::Attribute::Alignment,
1421 
1422  // Cast the void pointer and get the address of the globalized variable.
1423  llvm::PointerType *VarPtrTy = CGF.ConvertTypeForMem(VarTy)->getPointerTo();
1424  llvm::Value *CastedVoidPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
1425  VoidPtr, VarPtrTy, VD->getName() + "_on_stack");
1426  LValue VarAddr = CGF.MakeNaturalAlignAddrLValue(CastedVoidPtr, VarTy);
1427  Rec.second.PrivateAddr = VarAddr.getAddress(CGF);
1428  Rec.second.GlobalizedVal = VoidPtr;
1429 
1430  // Assign the local allocation to the newly globalized location.
1431  if (EscapedParam) {
1432  CGF.EmitStoreOfScalar(ParValue, VarAddr);
1433  I->getSecond().MappedParams->setVarAddr(CGF, VD, VarAddr.getAddress(CGF));
1434  }
1435  if (auto *DI = CGF.getDebugInfo())
1436  VoidPtr->setDebugLoc(DI->SourceLocToDebugLoc(VD->getLocation()));
1437  }
1438  for (const auto *VD : I->getSecond().EscapedVariableLengthDecls) {
1439  // Use actual memory size of the VLA object including the padding
1440  // for alignment purposes.
1441  llvm::Value *Size = CGF.getTypeSize(VD->getType());
1442  CharUnits Align = CGM.getContext().getDeclAlign(VD);
1443  Size = Bld.CreateNUWAdd(
1444  Size, llvm::ConstantInt::get(CGF.SizeTy, Align.getQuantity() - 1));
1445  llvm::Value *AlignVal =
1446  llvm::ConstantInt::get(CGF.SizeTy, Align.getQuantity());
1447 
1448  Size = Bld.CreateUDiv(Size, AlignVal);
1449  Size = Bld.CreateNUWMul(Size, AlignVal);
1450 
1451  // Allocate space for this VLA object to be globalized.
1452  llvm::Value *AllocArgs[] = {CGF.getTypeSize(VD->getType())};
1453  llvm::CallBase *VoidPtr =
1454  CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1455  CGM.getModule(), OMPRTL___kmpc_alloc_shared),
1456  AllocArgs, VD->getName());
1457  VoidPtr->addRetAttr(
1458  llvm::Attribute::get(CGM.getLLVMContext(), llvm::Attribute::Alignment,
1460 
1461  I->getSecond().EscapedVariableLengthDeclsAddrs.emplace_back(
1462  std::pair<llvm::Value *, llvm::Value *>(
1463  {VoidPtr, CGF.getTypeSize(VD->getType())}));
1464  LValue Base = CGF.MakeAddrLValue(VoidPtr, VD->getType(),
1465  CGM.getContext().getDeclAlign(VD),
1467  I->getSecond().MappedParams->setVarAddr(CGF, cast<VarDecl>(VD),
1468  Base.getAddress(CGF));
1469  }
1470  I->getSecond().MappedParams->apply(CGF);
1471 }
1472 
1473 void CGOpenMPRuntimeGPU::emitGenericVarsEpilog(CodeGenFunction &CGF,
1474  bool WithSPMDCheck) {
1476  getExecutionMode() != CGOpenMPRuntimeGPU::EM_SPMD)
1477  return;
1478 
1479  const auto I = FunctionGlobalizedDecls.find(CGF.CurFn);
1480  if (I != FunctionGlobalizedDecls.end()) {
1481  // Deallocate the memory for each globalized VLA object
1482  for (auto AddrSizePair :
1483  llvm::reverse(I->getSecond().EscapedVariableLengthDeclsAddrs)) {
1484  CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1485  CGM.getModule(), OMPRTL___kmpc_free_shared),
1486  {AddrSizePair.first, AddrSizePair.second});
1487  }
1488  // Deallocate the memory for each globalized value
1489  for (auto &Rec : llvm::reverse(I->getSecond().LocalVarData)) {
1490  const auto *VD = cast<VarDecl>(Rec.first);
1491  I->getSecond().MappedParams->restore(CGF);
1492 
1493  llvm::Value *FreeArgs[] = {Rec.second.GlobalizedVal,
1494  CGF.getTypeSize(VD->getType())};
1495  CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1496  CGM.getModule(), OMPRTL___kmpc_free_shared),
1497  FreeArgs);
1498  }
1499  }
1500 }
1501 
1503  const OMPExecutableDirective &D,
1504  SourceLocation Loc,
1505  llvm::Function *OutlinedFn,
1506  ArrayRef<llvm::Value *> CapturedVars) {
1507  if (!CGF.HaveInsertPoint())
1508  return;
1509 
1510  Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
1511  /*Name=*/".zero.addr");
1512  CGF.Builder.CreateStore(CGF.Builder.getInt32(/*C*/ 0), ZeroAddr);
1513  llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
1514  OutlinedFnArgs.push_back(emitThreadIDAddress(CGF, Loc).getPointer());
1515  OutlinedFnArgs.push_back(ZeroAddr.getPointer());
1516  OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
1517  emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs);
1518 }
1519 
1521  SourceLocation Loc,
1522  llvm::Function *OutlinedFn,
1523  ArrayRef<llvm::Value *> CapturedVars,
1524  const Expr *IfCond,
1525  llvm::Value *NumThreads) {
1526  if (!CGF.HaveInsertPoint())
1527  return;
1528 
1529  auto &&ParallelGen = [this, Loc, OutlinedFn, CapturedVars, IfCond,
1530  NumThreads](CodeGenFunction &CGF,
1531  PrePostActionTy &Action) {
1532  CGBuilderTy &Bld = CGF.Builder;
1533  llvm::Value *NumThreadsVal = NumThreads;
1534  llvm::Function *WFn = WrapperFunctionsMap[OutlinedFn];
1535  llvm::Value *ID = llvm::ConstantPointerNull::get(CGM.Int8PtrTy);
1536  if (WFn)
1537  ID = Bld.CreateBitOrPointerCast(WFn, CGM.Int8PtrTy);
1538  llvm::Value *FnPtr = Bld.CreateBitOrPointerCast(OutlinedFn, CGM.Int8PtrTy);
1539 
1540  // Create a private scope that will globalize the arguments
1541  // passed from the outside of the target region.
1542  // TODO: Is that needed?
1543  CodeGenFunction::OMPPrivateScope PrivateArgScope(CGF);
1544 
1545  Address CapturedVarsAddrs = CGF.CreateDefaultAlignTempAlloca(
1546  llvm::ArrayType::get(CGM.VoidPtrTy, CapturedVars.size()),
1547  "captured_vars_addrs");
1548  // There's something to share.
1549  if (!CapturedVars.empty()) {
1550  // Prepare for parallel region. Indicate the outlined function.
1551  ASTContext &Ctx = CGF.getContext();
1552  unsigned Idx = 0;
1553  for (llvm::Value *V : CapturedVars) {
1554  Address Dst = Bld.CreateConstArrayGEP(CapturedVarsAddrs, Idx);
1555  llvm::Value *PtrV;
1556  if (V->getType()->isIntegerTy())
1557  PtrV = Bld.CreateIntToPtr(V, CGF.VoidPtrTy);
1558  else
1559  PtrV = Bld.CreatePointerBitCastOrAddrSpaceCast(V, CGF.VoidPtrTy);
1560  CGF.EmitStoreOfScalar(PtrV, Dst, /*Volatile=*/false,
1561  Ctx.getPointerType(Ctx.VoidPtrTy));
1562  ++Idx;
1563  }
1564  }
1565 
1566  llvm::Value *IfCondVal = nullptr;
1567  if (IfCond)
1568  IfCondVal = Bld.CreateIntCast(CGF.EvaluateExprAsBool(IfCond), CGF.Int32Ty,
1569  /* isSigned */ false);
1570  else
1571  IfCondVal = llvm::ConstantInt::get(CGF.Int32Ty, 1);
1572 
1573  if (!NumThreadsVal)
1574  NumThreadsVal = llvm::ConstantInt::get(CGF.Int32Ty, -1);
1575  else
1576  NumThreadsVal = Bld.CreateZExtOrTrunc(NumThreadsVal, CGF.Int32Ty),
1577 
1578  assert(IfCondVal && "Expected a value");
1579  llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
1580  llvm::Value *Args[] = {
1581  RTLoc,
1582  getThreadID(CGF, Loc),
1583  IfCondVal,
1584  NumThreadsVal,
1585  llvm::ConstantInt::get(CGF.Int32Ty, -1),
1586  FnPtr,
1587  ID,
1588  Bld.CreateBitOrPointerCast(CapturedVarsAddrs.getPointer(),
1589  CGF.VoidPtrPtrTy),
1590  llvm::ConstantInt::get(CGM.SizeTy, CapturedVars.size())};
1591  CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1592  CGM.getModule(), OMPRTL___kmpc_parallel_51),
1593  Args);
1594  };
1595 
1596  RegionCodeGenTy RCG(ParallelGen);
1597  RCG(CGF);
1598 }
1599 
1600 void CGOpenMPRuntimeGPU::syncCTAThreads(CodeGenFunction &CGF) {
1601  // Always emit simple barriers!
1602  if (!CGF.HaveInsertPoint())
1603  return;
1604  // Build call __kmpc_barrier_simple_spmd(nullptr, 0);
1605  // This function does not use parameters, so we can emit just default values.
1606  llvm::Value *Args[] = {
1607  llvm::ConstantPointerNull::get(
1608  cast<llvm::PointerType>(getIdentTyPointerTy())),
1609  llvm::ConstantInt::get(CGF.Int32Ty, /*V=*/0, /*isSigned=*/true)};
1610  CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1611  CGM.getModule(), OMPRTL___kmpc_barrier_simple_spmd),
1612  Args);
1613 }
1614 
1616  SourceLocation Loc,
1617  OpenMPDirectiveKind Kind, bool,
1618  bool) {
1619  // Always emit simple barriers!
1620  if (!CGF.HaveInsertPoint())
1621  return;
1622  // Build call __kmpc_cancel_barrier(loc, thread_id);
1623  unsigned Flags = getDefaultFlagsForBarriers(Kind);
1624  llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc, Flags),
1625  getThreadID(CGF, Loc)};
1626 
1627  CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1628  CGM.getModule(), OMPRTL___kmpc_barrier),
1629  Args);
1630 }
1631 
1633  CodeGenFunction &CGF, StringRef CriticalName,
1634  const RegionCodeGenTy &CriticalOpGen, SourceLocation Loc,
1635  const Expr *Hint) {
1636  llvm::BasicBlock *LoopBB = CGF.createBasicBlock("omp.critical.loop");
1637  llvm::BasicBlock *TestBB = CGF.createBasicBlock("omp.critical.test");
1638  llvm::BasicBlock *SyncBB = CGF.createBasicBlock("omp.critical.sync");
1639  llvm::BasicBlock *BodyBB = CGF.createBasicBlock("omp.critical.body");
1640  llvm::BasicBlock *ExitBB = CGF.createBasicBlock("omp.critical.exit");
1641 
1642  auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
1643 
1644  // Get the mask of active threads in the warp.
1645  llvm::Value *Mask = CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1646  CGM.getModule(), OMPRTL___kmpc_warp_active_thread_mask));
1647  // Fetch team-local id of the thread.
1648  llvm::Value *ThreadID = RT.getGPUThreadID(CGF);
1649 
1650  // Get the width of the team.
1651  llvm::Value *TeamWidth = RT.getGPUNumThreads(CGF);
1652 
1653  // Initialize the counter variable for the loop.
1654  QualType Int32Ty =
1655  CGF.getContext().getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/0);
1656  Address Counter = CGF.CreateMemTemp(Int32Ty, "critical_counter");
1657  LValue CounterLVal = CGF.MakeAddrLValue(Counter, Int32Ty);
1658  CGF.EmitStoreOfScalar(llvm::Constant::getNullValue(CGM.Int32Ty), CounterLVal,
1659  /*isInit=*/true);
1660 
1661  // Block checks if loop counter exceeds upper bound.
1662  CGF.EmitBlock(LoopBB);
1663  llvm::Value *CounterVal = CGF.EmitLoadOfScalar(CounterLVal, Loc);
1664  llvm::Value *CmpLoopBound = CGF.Builder.CreateICmpSLT(CounterVal, TeamWidth);
1665  CGF.Builder.CreateCondBr(CmpLoopBound, TestBB, ExitBB);
1666 
1667  // Block tests which single thread should execute region, and which threads
1668  // should go straight to synchronisation point.
1669  CGF.EmitBlock(TestBB);
1670  CounterVal = CGF.EmitLoadOfScalar(CounterLVal, Loc);
1671  llvm::Value *CmpThreadToCounter =
1672  CGF.Builder.CreateICmpEQ(ThreadID, CounterVal);
1673  CGF.Builder.CreateCondBr(CmpThreadToCounter, BodyBB, SyncBB);
1674 
1675  // Block emits the body of the critical region.
1676  CGF.EmitBlock(BodyBB);
1677 
1678  // Output the critical statement.
1679  CGOpenMPRuntime::emitCriticalRegion(CGF, CriticalName, CriticalOpGen, Loc,
1680  Hint);
1681 
1682  // After the body surrounded by the critical region, the single executing
1683  // thread will jump to the synchronisation point.
1684  // Block waits for all threads in current team to finish then increments the
1685  // counter variable and returns to the loop.
1686  CGF.EmitBlock(SyncBB);
1687  // Reconverge active threads in the warp.
1688  (void)CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1689  CGM.getModule(), OMPRTL___kmpc_syncwarp),
1690  Mask);
1691 
1692  llvm::Value *IncCounterVal =
1693  CGF.Builder.CreateNSWAdd(CounterVal, CGF.Builder.getInt32(1));
1694  CGF.EmitStoreOfScalar(IncCounterVal, CounterLVal);
1695  CGF.EmitBranch(LoopBB);
1696 
1697  // Block that is reached when all threads in the team complete the region.
1698  CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1699 }
1700 
1701 /// Cast value to the specified type.
1702 static llvm::Value *castValueToType(CodeGenFunction &CGF, llvm::Value *Val,
1703  QualType ValTy, QualType CastTy,
1704  SourceLocation Loc) {
1705  assert(!CGF.getContext().getTypeSizeInChars(CastTy).isZero() &&
1706  "Cast type must sized.");
1707  assert(!CGF.getContext().getTypeSizeInChars(ValTy).isZero() &&
1708  "Val type must sized.");
1709  llvm::Type *LLVMCastTy = CGF.ConvertTypeForMem(CastTy);
1710  if (ValTy == CastTy)
1711  return Val;
1712  if (CGF.getContext().getTypeSizeInChars(ValTy) ==
1713  CGF.getContext().getTypeSizeInChars(CastTy))
1714  return CGF.Builder.CreateBitCast(Val, LLVMCastTy);
1715  if (CastTy->isIntegerType() && ValTy->isIntegerType())
1716  return CGF.Builder.CreateIntCast(Val, LLVMCastTy,
1717  CastTy->hasSignedIntegerRepresentation());
1718  Address CastItem = CGF.CreateMemTemp(CastTy);
1720  CastItem, Val->getType()->getPointerTo(CastItem.getAddressSpace()),
1721  Val->getType());
1722  CGF.EmitStoreOfScalar(Val, ValCastItem, /*Volatile=*/false, ValTy,
1724  TBAAAccessInfo());
1725  return CGF.EmitLoadOfScalar(CastItem, /*Volatile=*/false, CastTy, Loc,
1727  TBAAAccessInfo());
1728 }
1729 
1730 /// This function creates calls to one of two shuffle functions to copy
1731 /// variables between lanes in a warp.
1733  llvm::Value *Elem,
1734  QualType ElemType,
1735  llvm::Value *Offset,
1736  SourceLocation Loc) {
1737  CodeGenModule &CGM = CGF.CGM;
1738  CGBuilderTy &Bld = CGF.Builder;
1739  CGOpenMPRuntimeGPU &RT =
1740  *(static_cast<CGOpenMPRuntimeGPU *>(&CGM.getOpenMPRuntime()));
1741  llvm::OpenMPIRBuilder &OMPBuilder = RT.getOMPBuilder();
1742 
1743  CharUnits Size = CGF.getContext().getTypeSizeInChars(ElemType);
1744  assert(Size.getQuantity() <= 8 &&
1745  "Unsupported bitwidth in shuffle instruction.");
1746 
1747  RuntimeFunction ShuffleFn = Size.getQuantity() <= 4
1748  ? OMPRTL___kmpc_shuffle_int32
1749  : OMPRTL___kmpc_shuffle_int64;
1750 
1751  // Cast all types to 32- or 64-bit values before calling shuffle routines.
1752  QualType CastTy = CGF.getContext().getIntTypeForBitwidth(
1753  Size.getQuantity() <= 4 ? 32 : 64, /*Signed=*/1);
1754  llvm::Value *ElemCast = castValueToType(CGF, Elem, ElemType, CastTy, Loc);
1755  llvm::Value *WarpSize =
1756  Bld.CreateIntCast(RT.getGPUWarpSize(CGF), CGM.Int16Ty, /*isSigned=*/true);
1757 
1758  llvm::Value *ShuffledVal = CGF.EmitRuntimeCall(
1759  OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(), ShuffleFn),
1760  {ElemCast, Offset, WarpSize});
1761 
1762  return castValueToType(CGF, ShuffledVal, CastTy, ElemType, Loc);
1763 }
1764 
1765 static void shuffleAndStore(CodeGenFunction &CGF, Address SrcAddr,
1766  Address DestAddr, QualType ElemType,
1767  llvm::Value *Offset, SourceLocation Loc) {
1768  CGBuilderTy &Bld = CGF.Builder;
1769 
1770  CharUnits Size = CGF.getContext().getTypeSizeInChars(ElemType);
1771  // Create the loop over the big sized data.
1772  // ptr = (void*)Elem;
1773  // ptrEnd = (void*) Elem + 1;
1774  // Step = 8;
1775  // while (ptr + Step < ptrEnd)
1776  // shuffle((int64_t)*ptr);
1777  // Step = 4;
1778  // while (ptr + Step < ptrEnd)
1779  // shuffle((int32_t)*ptr);
1780  // ...
1781  Address ElemPtr = DestAddr;
1782  Address Ptr = SrcAddr;
1784  Bld.CreateConstGEP(SrcAddr, 1), CGF.VoidPtrTy, CGF.Int8Ty);
1785  for (int IntSize = 8; IntSize >= 1; IntSize /= 2) {
1786  if (Size < CharUnits::fromQuantity(IntSize))
1787  continue;
1788  QualType IntType = CGF.getContext().getIntTypeForBitwidth(
1789  CGF.getContext().toBits(CharUnits::fromQuantity(IntSize)),
1790  /*Signed=*/1);
1791  llvm::Type *IntTy = CGF.ConvertTypeForMem(IntType);
1792  Ptr = Bld.CreatePointerBitCastOrAddrSpaceCast(Ptr, IntTy->getPointerTo(),
1793  IntTy);
1795  ElemPtr, IntTy->getPointerTo(), IntTy);
1796  if (Size.getQuantity() / IntSize > 1) {
1797  llvm::BasicBlock *PreCondBB = CGF.createBasicBlock(".shuffle.pre_cond");
1798  llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".shuffle.then");
1799  llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".shuffle.exit");
1800  llvm::BasicBlock *CurrentBB = Bld.GetInsertBlock();
1801  CGF.EmitBlock(PreCondBB);
1802  llvm::PHINode *PhiSrc =
1803  Bld.CreatePHI(Ptr.getType(), /*NumReservedValues=*/2);
1804  PhiSrc->addIncoming(Ptr.getPointer(), CurrentBB);
1805  llvm::PHINode *PhiDest =
1806  Bld.CreatePHI(ElemPtr.getType(), /*NumReservedValues=*/2);
1807  PhiDest->addIncoming(ElemPtr.getPointer(), CurrentBB);
1808  Ptr = Address(PhiSrc, Ptr.getElementType(), Ptr.getAlignment());
1809  ElemPtr =
1810  Address(PhiDest, ElemPtr.getElementType(), ElemPtr.getAlignment());
1811  llvm::Value *PtrDiff = Bld.CreatePtrDiff(
1812  CGF.Int8Ty, PtrEnd.getPointer(),
1814  CGF.VoidPtrTy));
1815  Bld.CreateCondBr(Bld.CreateICmpSGT(PtrDiff, Bld.getInt64(IntSize - 1)),
1816  ThenBB, ExitBB);
1817  CGF.EmitBlock(ThenBB);
1818  llvm::Value *Res = createRuntimeShuffleFunction(
1819  CGF,
1820  CGF.EmitLoadOfScalar(Ptr, /*Volatile=*/false, IntType, Loc,
1822  TBAAAccessInfo()),
1823  IntType, Offset, Loc);
1824  CGF.EmitStoreOfScalar(Res, ElemPtr, /*Volatile=*/false, IntType,
1826  TBAAAccessInfo());
1827  Address LocalPtr = Bld.CreateConstGEP(Ptr, 1);
1828  Address LocalElemPtr = Bld.CreateConstGEP(ElemPtr, 1);
1829  PhiSrc->addIncoming(LocalPtr.getPointer(), ThenBB);
1830  PhiDest->addIncoming(LocalElemPtr.getPointer(), ThenBB);
1831  CGF.EmitBranch(PreCondBB);
1832  CGF.EmitBlock(ExitBB);
1833  } else {
1834  llvm::Value *Res = createRuntimeShuffleFunction(
1835  CGF,
1836  CGF.EmitLoadOfScalar(Ptr, /*Volatile=*/false, IntType, Loc,
1838  TBAAAccessInfo()),
1839  IntType, Offset, Loc);
1840  CGF.EmitStoreOfScalar(Res, ElemPtr, /*Volatile=*/false, IntType,
1842  TBAAAccessInfo());
1843  Ptr = Bld.CreateConstGEP(Ptr, 1);
1844  ElemPtr = Bld.CreateConstGEP(ElemPtr, 1);
1845  }
1846  Size = Size % IntSize;
1847  }
1848 }
1849 
1850 namespace {
1851 enum CopyAction : unsigned {
1852  // RemoteLaneToThread: Copy over a Reduce list from a remote lane in
1853  // the warp using shuffle instructions.
1854  RemoteLaneToThread,
1855  // ThreadCopy: Make a copy of a Reduce list on the thread's stack.
1856  ThreadCopy,
1857  // ThreadToScratchpad: Copy a team-reduced array to the scratchpad.
1858  ThreadToScratchpad,
1859  // ScratchpadToThread: Copy from a scratchpad array in global memory
1860  // containing team-reduced data to a thread's stack.
1861  ScratchpadToThread,
1862 };
1863 } // namespace
1864 
1866  llvm::Value *RemoteLaneOffset;
1867  llvm::Value *ScratchpadIndex;
1868  llvm::Value *ScratchpadWidth;
1869 };
1870 
1871 /// Emit instructions to copy a Reduce list, which contains partially
1872 /// aggregated values, in the specified direction.
1874  CopyAction Action, CodeGenFunction &CGF, QualType ReductionArrayTy,
1875  ArrayRef<const Expr *> Privates, Address SrcBase, Address DestBase,
1876  CopyOptionsTy CopyOptions = {nullptr, nullptr, nullptr}) {
1877 
1878  CodeGenModule &CGM = CGF.CGM;
1879  ASTContext &C = CGM.getContext();
1880  CGBuilderTy &Bld = CGF.Builder;
1881 
1882  llvm::Value *RemoteLaneOffset = CopyOptions.RemoteLaneOffset;
1883  llvm::Value *ScratchpadIndex = CopyOptions.ScratchpadIndex;
1884  llvm::Value *ScratchpadWidth = CopyOptions.ScratchpadWidth;
1885 
1886  // Iterates, element-by-element, through the source Reduce list and
1887  // make a copy.
1888  unsigned Idx = 0;
1889  unsigned Size = Privates.size();
1890  for (const Expr *Private : Privates) {
1891  Address SrcElementAddr = Address::invalid();
1892  Address DestElementAddr = Address::invalid();
1893  Address DestElementPtrAddr = Address::invalid();
1894  // Should we shuffle in an element from a remote lane?
1895  bool ShuffleInElement = false;
1896  // Set to true to update the pointer in the dest Reduce list to a
1897  // newly created element.
1898  bool UpdateDestListPtr = false;
1899  // Increment the src or dest pointer to the scratchpad, for each
1900  // new element.
1901  bool IncrScratchpadSrc = false;
1902  bool IncrScratchpadDest = false;
1903  QualType PrivatePtrType = C.getPointerType(Private->getType());
1904  llvm::Type *PrivateLlvmPtrType = CGF.ConvertType(PrivatePtrType);
1905 
1906  switch (Action) {
1907  case RemoteLaneToThread: {
1908  // Step 1.1: Get the address for the src element in the Reduce list.
1909  Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx);
1910  SrcElementAddr =
1912  SrcElementPtrAddr, PrivateLlvmPtrType),
1913  PrivatePtrType->castAs<PointerType>());
1914 
1915  // Step 1.2: Create a temporary to store the element in the destination
1916  // Reduce list.
1917  DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx);
1918  DestElementAddr =
1919  CGF.CreateMemTemp(Private->getType(), ".omp.reduction.element");
1920  ShuffleInElement = true;
1921  UpdateDestListPtr = true;
1922  break;
1923  }
1924  case ThreadCopy: {
1925  // Step 1.1: Get the address for the src element in the Reduce list.
1926  Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx);
1927  SrcElementAddr =
1929  SrcElementPtrAddr, PrivateLlvmPtrType),
1930  PrivatePtrType->castAs<PointerType>());
1931 
1932  // Step 1.2: Get the address for dest element. The destination
1933  // element has already been created on the thread's stack.
1934  DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx);
1935  DestElementAddr =
1937  DestElementPtrAddr, PrivateLlvmPtrType),
1938  PrivatePtrType->castAs<PointerType>());
1939  break;
1940  }
1941  case ThreadToScratchpad: {
1942  // Step 1.1: Get the address for the src element in the Reduce list.
1943  Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx);
1944  SrcElementAddr =
1946  SrcElementPtrAddr, PrivateLlvmPtrType),
1947  PrivatePtrType->castAs<PointerType>());
1948 
1949  // Step 1.2: Get the address for dest element:
1950  // address = base + index * ElementSizeInChars.
1951  llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType());
1952  llvm::Value *CurrentOffset =
1953  Bld.CreateNUWMul(ElementSizeInChars, ScratchpadIndex);
1954  llvm::Value *ScratchPadElemAbsolutePtrVal =
1955  Bld.CreateNUWAdd(DestBase.getPointer(), CurrentOffset);
1956  ScratchPadElemAbsolutePtrVal =
1957  Bld.CreateIntToPtr(ScratchPadElemAbsolutePtrVal, CGF.VoidPtrTy);
1958  DestElementAddr = Address(ScratchPadElemAbsolutePtrVal, CGF.Int8Ty,
1959  C.getTypeAlignInChars(Private->getType()));
1960  IncrScratchpadDest = true;
1961  break;
1962  }
1963  case ScratchpadToThread: {
1964  // Step 1.1: Get the address for the src element in the scratchpad.
1965  // address = base + index * ElementSizeInChars.
1966  llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType());
1967  llvm::Value *CurrentOffset =
1968  Bld.CreateNUWMul(ElementSizeInChars, ScratchpadIndex);
1969  llvm::Value *ScratchPadElemAbsolutePtrVal =
1970  Bld.CreateNUWAdd(SrcBase.getPointer(), CurrentOffset);
1971  ScratchPadElemAbsolutePtrVal =
1972  Bld.CreateIntToPtr(ScratchPadElemAbsolutePtrVal, CGF.VoidPtrTy);
1973  SrcElementAddr = Address(ScratchPadElemAbsolutePtrVal, CGF.Int8Ty,
1974  C.getTypeAlignInChars(Private->getType()));
1975  IncrScratchpadSrc = true;
1976 
1977  // Step 1.2: Create a temporary to store the element in the destination
1978  // Reduce list.
1979  DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx);
1980  DestElementAddr =
1981  CGF.CreateMemTemp(Private->getType(), ".omp.reduction.element");
1982  UpdateDestListPtr = true;
1983  break;
1984  }
1985  }
1986 
1987  // Regardless of src and dest of copy, we emit the load of src
1988  // element as this is required in all directions
1989  SrcElementAddr = Bld.CreateElementBitCast(
1990  SrcElementAddr, CGF.ConvertTypeForMem(Private->getType()));
1991  DestElementAddr = Bld.CreateElementBitCast(DestElementAddr,
1992  SrcElementAddr.getElementType());
1993 
1994  // Now that all active lanes have read the element in the
1995  // Reduce list, shuffle over the value from the remote lane.
1996  if (ShuffleInElement) {
1997  shuffleAndStore(CGF, SrcElementAddr, DestElementAddr, Private->getType(),
1998  RemoteLaneOffset, Private->getExprLoc());
1999  } else {
2000  switch (CGF.getEvaluationKind(Private->getType())) {
2001  case TEK_Scalar: {
2002  llvm::Value *Elem = CGF.EmitLoadOfScalar(
2003  SrcElementAddr, /*Volatile=*/false, Private->getType(),
2004  Private->getExprLoc(), LValueBaseInfo(AlignmentSource::Type),
2005  TBAAAccessInfo());
2006  // Store the source element value to the dest element address.
2007  CGF.EmitStoreOfScalar(
2008  Elem, DestElementAddr, /*Volatile=*/false, Private->getType(),
2010  break;
2011  }
2012  case TEK_Complex: {
2014  CGF.MakeAddrLValue(SrcElementAddr, Private->getType()),
2015  Private->getExprLoc());
2016  CGF.EmitStoreOfComplex(
2017  Elem, CGF.MakeAddrLValue(DestElementAddr, Private->getType()),
2018  /*isInit=*/false);
2019  break;
2020  }
2021  case TEK_Aggregate:
2022  CGF.EmitAggregateCopy(
2023  CGF.MakeAddrLValue(DestElementAddr, Private->getType()),
2024  CGF.MakeAddrLValue(SrcElementAddr, Private->getType()),
2026  break;
2027  }
2028  }
2029 
2030  // Step 3.1: Modify reference in dest Reduce list as needed.
2031  // Modifying the reference in Reduce list to point to the newly
2032  // created element. The element is live in the current function
2033  // scope and that of functions it invokes (i.e., reduce_function).
2034  // RemoteReduceData[i] = (void*)&RemoteElem
2035  if (UpdateDestListPtr) {
2037  DestElementAddr.getPointer(), CGF.VoidPtrTy),
2038  DestElementPtrAddr, /*Volatile=*/false,
2039  C.VoidPtrTy);
2040  }
2041 
2042  // Step 4.1: Increment SrcBase/DestBase so that it points to the starting
2043  // address of the next element in scratchpad memory, unless we're currently
2044  // processing the last one. Memory alignment is also taken care of here.
2045  if ((IncrScratchpadDest || IncrScratchpadSrc) && (Idx + 1 < Size)) {
2046  // FIXME: This code doesn't make any sense, it's trying to perform
2047  // integer arithmetic on pointers.
2048  llvm::Value *ScratchpadBasePtr =
2049  IncrScratchpadDest ? DestBase.getPointer() : SrcBase.getPointer();
2050  llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType());
2051  ScratchpadBasePtr = Bld.CreateNUWAdd(
2052  ScratchpadBasePtr,
2053  Bld.CreateNUWMul(ScratchpadWidth, ElementSizeInChars));
2054 
2055  // Take care of global memory alignment for performance
2056  ScratchpadBasePtr = Bld.CreateNUWSub(
2057  ScratchpadBasePtr, llvm::ConstantInt::get(CGM.SizeTy, 1));
2058  ScratchpadBasePtr = Bld.CreateUDiv(
2059  ScratchpadBasePtr,
2060  llvm::ConstantInt::get(CGM.SizeTy, GlobalMemoryAlignment));
2061  ScratchpadBasePtr = Bld.CreateNUWAdd(
2062  ScratchpadBasePtr, llvm::ConstantInt::get(CGM.SizeTy, 1));
2063  ScratchpadBasePtr = Bld.CreateNUWMul(
2064  ScratchpadBasePtr,
2065  llvm::ConstantInt::get(CGM.SizeTy, GlobalMemoryAlignment));
2066 
2067  if (IncrScratchpadDest)
2068  DestBase =
2069  Address(ScratchpadBasePtr, CGF.VoidPtrTy, CGF.getPointerAlign());
2070  else /* IncrScratchpadSrc = true */
2071  SrcBase =
2072  Address(ScratchpadBasePtr, CGF.VoidPtrTy, CGF.getPointerAlign());
2073  }
2074 
2075  ++Idx;
2076  }
2077 }
2078 
2079 /// This function emits a helper that gathers Reduce lists from the first
2080 /// lane of every active warp to lanes in the first warp.
2081 ///
2082 /// void inter_warp_copy_func(void* reduce_data, num_warps)
2083 /// shared smem[warp_size];
2084 /// For all data entries D in reduce_data:
2085 /// sync
2086 /// If (I am the first lane in each warp)
2087 /// Copy my local D to smem[warp_id]
2088 /// sync
2089 /// if (I am the first warp)
2090 /// Copy smem[thread_id] to my local D
2091 static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM,
2092  ArrayRef<const Expr *> Privates,
2093  QualType ReductionArrayTy,
2094  SourceLocation Loc) {
2095  ASTContext &C = CGM.getContext();
2096  llvm::Module &M = CGM.getModule();
2097 
2098  // ReduceList: thread local Reduce list.
2099  // At the stage of the computation when this function is called, partially
2100  // aggregated values reside in the first lane of every active warp.
2101  ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2102  C.VoidPtrTy, ImplicitParamDecl::Other);
2103  // NumWarps: number of warps active in the parallel region. This could
2104  // be smaller than 32 (max warps in a CTA) for partial block reduction.
2105  ImplicitParamDecl NumWarpsArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2106  C.getIntTypeForBitwidth(32, /* Signed */ true),
2108  FunctionArgList Args;
2109  Args.push_back(&ReduceListArg);
2110  Args.push_back(&NumWarpsArg);
2111 
2112  const CGFunctionInfo &CGFI =
2113  CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
2114  auto *Fn = llvm::Function::Create(CGM.getTypes().GetFunctionType(CGFI),
2116  "_omp_reduction_inter_warp_copy_func", &M);
2117  CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
2118  Fn->setDoesNotRecurse();
2119  CodeGenFunction CGF(CGM);
2120  CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
2121 
2122  CGBuilderTy &Bld = CGF.Builder;
2123 
2124  // This array is used as a medium to transfer, one reduce element at a time,
2125  // the data from the first lane of every warp to lanes in the first warp
2126  // in order to perform the final step of a reduction in a parallel region
2127  // (reduction across warps). The array is placed in NVPTX __shared__ memory
2128  // for reduced latency, as well as to have a distinct copy for concurrently
2129  // executing target regions. The array is declared with common linkage so
2130  // as to be shared across compilation units.
2131  StringRef TransferMediumName =
2132  "__openmp_nvptx_data_transfer_temporary_storage";
2133  llvm::GlobalVariable *TransferMedium =
2134  M.getGlobalVariable(TransferMediumName);
2135  unsigned WarpSize = CGF.getTarget().getGridValue().GV_Warp_Size;
2136  if (!TransferMedium) {
2137  auto *Ty = llvm::ArrayType::get(CGM.Int32Ty, WarpSize);
2138  unsigned SharedAddressSpace = C.getTargetAddressSpace(LangAS::cuda_shared);
2139  TransferMedium = new llvm::GlobalVariable(
2140  M, Ty, /*isConstant=*/false, llvm::GlobalVariable::WeakAnyLinkage,
2141  llvm::UndefValue::get(Ty), TransferMediumName,
2142  /*InsertBefore=*/nullptr, llvm::GlobalVariable::NotThreadLocal,
2143  SharedAddressSpace);
2144  CGM.addCompilerUsedGlobal(TransferMedium);
2145  }
2146 
2147  auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
2148  // Get the CUDA thread id of the current OpenMP thread on the GPU.
2149  llvm::Value *ThreadID = RT.getGPUThreadID(CGF);
2150  // nvptx_lane_id = nvptx_id % warpsize
2151  llvm::Value *LaneID = getNVPTXLaneID(CGF);
2152  // nvptx_warp_id = nvptx_id / warpsize
2153  llvm::Value *WarpID = getNVPTXWarpID(CGF);
2154 
2155  Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
2156  llvm::Type *ElemTy = CGF.ConvertTypeForMem(ReductionArrayTy);
2157  Address LocalReduceList(
2159  CGF.EmitLoadOfScalar(
2160  AddrReduceListArg, /*Volatile=*/false, C.VoidPtrTy, Loc,
2162  ElemTy->getPointerTo()),
2163  ElemTy, CGF.getPointerAlign());
2164 
2165  unsigned Idx = 0;
2166  for (const Expr *Private : Privates) {
2167  //
2168  // Warp master copies reduce element to transfer medium in __shared__
2169  // memory.
2170  //
2171  unsigned RealTySize =
2172  C.getTypeSizeInChars(Private->getType())
2173  .alignTo(C.getTypeAlignInChars(Private->getType()))
2174  .getQuantity();
2175  for (unsigned TySize = 4; TySize > 0 && RealTySize > 0; TySize /=2) {
2176  unsigned NumIters = RealTySize / TySize;
2177  if (NumIters == 0)
2178  continue;
2179  QualType CType = C.getIntTypeForBitwidth(
2180  C.toBits(CharUnits::fromQuantity(TySize)), /*Signed=*/1);
2181  llvm::Type *CopyType = CGF.ConvertTypeForMem(CType);
2182  CharUnits Align = CharUnits::fromQuantity(TySize);
2183  llvm::Value *Cnt = nullptr;
2184  Address CntAddr = Address::invalid();
2185  llvm::BasicBlock *PrecondBB = nullptr;
2186  llvm::BasicBlock *ExitBB = nullptr;
2187  if (NumIters > 1) {
2188  CntAddr = CGF.CreateMemTemp(C.IntTy, ".cnt.addr");
2189  CGF.EmitStoreOfScalar(llvm::Constant::getNullValue(CGM.IntTy), CntAddr,
2190  /*Volatile=*/false, C.IntTy);
2191  PrecondBB = CGF.createBasicBlock("precond");
2192  ExitBB = CGF.createBasicBlock("exit");
2193  llvm::BasicBlock *BodyBB = CGF.createBasicBlock("body");
2194  // There is no need to emit line number for unconditional branch.
2196  CGF.EmitBlock(PrecondBB);
2197  Cnt = CGF.EmitLoadOfScalar(CntAddr, /*Volatile=*/false, C.IntTy, Loc);
2198  llvm::Value *Cmp =
2199  Bld.CreateICmpULT(Cnt, llvm::ConstantInt::get(CGM.IntTy, NumIters));
2200  Bld.CreateCondBr(Cmp, BodyBB, ExitBB);
2201  CGF.EmitBlock(BodyBB);
2202  }
2203  // kmpc_barrier.
2204  CGM.getOpenMPRuntime().emitBarrierCall(CGF, Loc, OMPD_unknown,
2205  /*EmitChecks=*/false,
2206  /*ForceSimpleCall=*/true);
2207  llvm::BasicBlock *ThenBB = CGF.createBasicBlock("then");
2208  llvm::BasicBlock *ElseBB = CGF.createBasicBlock("else");
2209  llvm::BasicBlock *MergeBB = CGF.createBasicBlock("ifcont");
2210 
2211  // if (lane_id == 0)
2212  llvm::Value *IsWarpMaster = Bld.CreateIsNull(LaneID, "warp_master");
2213  Bld.CreateCondBr(IsWarpMaster, ThenBB, ElseBB);
2214  CGF.EmitBlock(ThenBB);
2215 
2216  // Reduce element = LocalReduceList[i]
2217  Address ElemPtrPtrAddr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
2218  llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar(
2219  ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
2220  // elemptr = ((CopyType*)(elemptrptr)) + I
2221  Address ElemPtr(ElemPtrPtr, CGF.Int8Ty, Align);
2222  ElemPtr = Bld.CreateElementBitCast(ElemPtr, CopyType);
2223  if (NumIters > 1)
2224  ElemPtr = Bld.CreateGEP(ElemPtr, Cnt);
2225 
2226  // Get pointer to location in transfer medium.
2227  // MediumPtr = &medium[warp_id]
2228  llvm::Value *MediumPtrVal = Bld.CreateInBoundsGEP(
2229  TransferMedium->getValueType(), TransferMedium,
2230  {llvm::Constant::getNullValue(CGM.Int64Ty), WarpID});
2231  // Casting to actual data type.
2232  // MediumPtr = (CopyType*)MediumPtrAddr;
2233  Address MediumPtr(
2234  Bld.CreateBitCast(
2235  MediumPtrVal,
2236  CopyType->getPointerTo(
2237  MediumPtrVal->getType()->getPointerAddressSpace())),
2238  CopyType, Align);
2239 
2240  // elem = *elemptr
2241  //*MediumPtr = elem
2242  llvm::Value *Elem = CGF.EmitLoadOfScalar(
2243  ElemPtr, /*Volatile=*/false, CType, Loc,
2245  // Store the source element value to the dest element address.
2246  CGF.EmitStoreOfScalar(Elem, MediumPtr, /*Volatile=*/true, CType,
2248  TBAAAccessInfo());
2249 
2250  Bld.CreateBr(MergeBB);
2251 
2252  CGF.EmitBlock(ElseBB);
2253  Bld.CreateBr(MergeBB);
2254 
2255  CGF.EmitBlock(MergeBB);
2256 
2257  // kmpc_barrier.
2258  CGM.getOpenMPRuntime().emitBarrierCall(CGF, Loc, OMPD_unknown,
2259  /*EmitChecks=*/false,
2260  /*ForceSimpleCall=*/true);
2261 
2262  //
2263  // Warp 0 copies reduce element from transfer medium.
2264  //
2265  llvm::BasicBlock *W0ThenBB = CGF.createBasicBlock("then");
2266  llvm::BasicBlock *W0ElseBB = CGF.createBasicBlock("else");
2267  llvm::BasicBlock *W0MergeBB = CGF.createBasicBlock("ifcont");
2268 
2269  Address AddrNumWarpsArg = CGF.GetAddrOfLocalVar(&NumWarpsArg);
2270  llvm::Value *NumWarpsVal = CGF.EmitLoadOfScalar(
2271  AddrNumWarpsArg, /*Volatile=*/false, C.IntTy, Loc);
2272 
2273  // Up to 32 threads in warp 0 are active.
2274  llvm::Value *IsActiveThread =
2275  Bld.CreateICmpULT(ThreadID, NumWarpsVal, "is_active_thread");
2276  Bld.CreateCondBr(IsActiveThread, W0ThenBB, W0ElseBB);
2277 
2278  CGF.EmitBlock(W0ThenBB);
2279 
2280  // SrcMediumPtr = &medium[tid]
2281  llvm::Value *SrcMediumPtrVal = Bld.CreateInBoundsGEP(
2282  TransferMedium->getValueType(), TransferMedium,
2283  {llvm::Constant::getNullValue(CGM.Int64Ty), ThreadID});
2284  // SrcMediumVal = *SrcMediumPtr;
2285  Address SrcMediumPtr(
2286  Bld.CreateBitCast(
2287  SrcMediumPtrVal,
2288  CopyType->getPointerTo(
2289  SrcMediumPtrVal->getType()->getPointerAddressSpace())),
2290  CopyType, Align);
2291 
2292  // TargetElemPtr = (CopyType*)(SrcDataAddr[i]) + I
2293  Address TargetElemPtrPtr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
2294  llvm::Value *TargetElemPtrVal = CGF.EmitLoadOfScalar(
2295  TargetElemPtrPtr, /*Volatile=*/false, C.VoidPtrTy, Loc);
2296  Address TargetElemPtr(TargetElemPtrVal, CGF.Int8Ty, Align);
2297  TargetElemPtr = Bld.CreateElementBitCast(TargetElemPtr, CopyType);
2298  if (NumIters > 1)
2299  TargetElemPtr = Bld.CreateGEP(TargetElemPtr, Cnt);
2300 
2301  // *TargetElemPtr = SrcMediumVal;
2302  llvm::Value *SrcMediumValue =
2303  CGF.EmitLoadOfScalar(SrcMediumPtr, /*Volatile=*/true, CType, Loc);
2304  CGF.EmitStoreOfScalar(SrcMediumValue, TargetElemPtr, /*Volatile=*/false,
2305  CType);
2306  Bld.CreateBr(W0MergeBB);
2307 
2308  CGF.EmitBlock(W0ElseBB);
2309  Bld.CreateBr(W0MergeBB);
2310 
2311  CGF.EmitBlock(W0MergeBB);
2312 
2313  if (NumIters > 1) {
2314  Cnt = Bld.CreateNSWAdd(Cnt, llvm::ConstantInt::get(CGM.IntTy, /*V=*/1));
2315  CGF.EmitStoreOfScalar(Cnt, CntAddr, /*Volatile=*/false, C.IntTy);
2316  CGF.EmitBranch(PrecondBB);
2318  CGF.EmitBlock(ExitBB);
2319  }
2320  RealTySize %= TySize;
2321  }
2322  ++Idx;
2323  }
2324 
2325  CGF.FinishFunction();
2326  return Fn;
2327 }
2328 
2329 /// Emit a helper that reduces data across two OpenMP threads (lanes)
2330 /// in the same warp. It uses shuffle instructions to copy over data from
2331 /// a remote lane's stack. The reduction algorithm performed is specified
2332 /// by the fourth parameter.
2333 ///
2334 /// Algorithm Versions.
2335 /// Full Warp Reduce (argument value 0):
2336 /// This algorithm assumes that all 32 lanes are active and gathers
2337 /// data from these 32 lanes, producing a single resultant value.
2338 /// Contiguous Partial Warp Reduce (argument value 1):
2339 /// This algorithm assumes that only a *contiguous* subset of lanes
2340 /// are active. This happens for the last warp in a parallel region
2341 /// when the user specified num_threads is not an integer multiple of
2342 /// 32. This contiguous subset always starts with the zeroth lane.
2343 /// Partial Warp Reduce (argument value 2):
2344 /// This algorithm gathers data from any number of lanes at any position.
2345 /// All reduced values are stored in the lowest possible lane. The set
2346 /// of problems every algorithm addresses is a super set of those
2347 /// addressable by algorithms with a lower version number. Overhead
2348 /// increases as algorithm version increases.
2349 ///
2350 /// Terminology
2351 /// Reduce element:
2352 /// Reduce element refers to the individual data field with primitive
2353 /// data types to be combined and reduced across threads.
2354 /// Reduce list:
2355 /// Reduce list refers to a collection of local, thread-private
2356 /// reduce elements.
2357 /// Remote Reduce list:
2358 /// Remote Reduce list refers to a collection of remote (relative to
2359 /// the current thread) reduce elements.
2360 ///
2361 /// We distinguish between three states of threads that are important to
2362 /// the implementation of this function.
2363 /// Alive threads:
2364 /// Threads in a warp executing the SIMT instruction, as distinguished from
2365 /// threads that are inactive due to divergent control flow.
2366 /// Active threads:
2367 /// The minimal set of threads that has to be alive upon entry to this
2368 /// function. The computation is correct iff active threads are alive.
2369 /// Some threads are alive but they are not active because they do not
2370 /// contribute to the computation in any useful manner. Turning them off
2371 /// may introduce control flow overheads without any tangible benefits.
2372 /// Effective threads:
2373 /// In order to comply with the argument requirements of the shuffle
2374 /// function, we must keep all lanes holding data alive. But at most
2375 /// half of them perform value aggregation; we refer to this half of
2376 /// threads as effective. The other half is simply handing off their
2377 /// data.
2378 ///
2379 /// Procedure
2380 /// Value shuffle:
2381 /// In this step active threads transfer data from higher lane positions
2382 /// in the warp to lower lane positions, creating Remote Reduce list.
2383 /// Value aggregation:
2384 /// In this step, effective threads combine their thread local Reduce list
2385 /// with Remote Reduce list and store the result in the thread local
2386 /// Reduce list.
2387 /// Value copy:
2388 /// In this step, we deal with the assumption made by algorithm 2
2389 /// (i.e. contiguity assumption). When we have an odd number of lanes
2390 /// active, say 2k+1, only k threads will be effective and therefore k
2391 /// new values will be produced. However, the Reduce list owned by the
2392 /// (2k+1)th thread is ignored in the value aggregation. Therefore
2393 /// we copy the Reduce list from the (2k+1)th lane to (k+1)th lane so
2394 /// that the contiguity assumption still holds.
2395 static llvm::Function *emitShuffleAndReduceFunction(
2396  CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
2397  QualType ReductionArrayTy, llvm::Function *ReduceFn, SourceLocation Loc) {
2398  ASTContext &C = CGM.getContext();
2399 
2400  // Thread local Reduce list used to host the values of data to be reduced.
2401  ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2402  C.VoidPtrTy, ImplicitParamDecl::Other);
2403  // Current lane id; could be logical.
2404  ImplicitParamDecl LaneIDArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.ShortTy,
2406  // Offset of the remote source lane relative to the current lane.
2407  ImplicitParamDecl RemoteLaneOffsetArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2408  C.ShortTy, ImplicitParamDecl::Other);
2409  // Algorithm version. This is expected to be known at compile time.
2410  ImplicitParamDecl AlgoVerArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2411  C.ShortTy, ImplicitParamDecl::Other);
2412  FunctionArgList Args;
2413  Args.push_back(&ReduceListArg);
2414  Args.push_back(&LaneIDArg);
2415  Args.push_back(&RemoteLaneOffsetArg);
2416  Args.push_back(&AlgoVerArg);
2417 
2418  const CGFunctionInfo &CGFI =
2419  CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
2420  auto *Fn = llvm::Function::Create(
2422  "_omp_reduction_shuffle_and_reduce_func", &CGM.getModule());
2423  CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
2424  Fn->setDoesNotRecurse();
2425 
2426  CodeGenFunction CGF(CGM);
2427  CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
2428 
2429  CGBuilderTy &Bld = CGF.Builder;
2430 
2431  Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
2432  llvm::Type *ElemTy = CGF.ConvertTypeForMem(ReductionArrayTy);
2433  Address LocalReduceList(
2435  CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
2436  C.VoidPtrTy, SourceLocation()),
2437  ElemTy->getPointerTo()),
2438  ElemTy, CGF.getPointerAlign());
2439 
2440  Address AddrLaneIDArg = CGF.GetAddrOfLocalVar(&LaneIDArg);
2441  llvm::Value *LaneIDArgVal = CGF.EmitLoadOfScalar(
2442  AddrLaneIDArg, /*Volatile=*/false, C.ShortTy, SourceLocation());
2443 
2444  Address AddrRemoteLaneOffsetArg = CGF.GetAddrOfLocalVar(&RemoteLaneOffsetArg);
2445  llvm::Value *RemoteLaneOffsetArgVal = CGF.EmitLoadOfScalar(
2446  AddrRemoteLaneOffsetArg, /*Volatile=*/false, C.ShortTy, SourceLocation());
2447 
2448  Address AddrAlgoVerArg = CGF.GetAddrOfLocalVar(&AlgoVerArg);
2449  llvm::Value *AlgoVerArgVal = CGF.EmitLoadOfScalar(
2450  AddrAlgoVerArg, /*Volatile=*/false, C.ShortTy, SourceLocation());
2451 
2452  // Create a local thread-private variable to host the Reduce list
2453  // from a remote lane.
2454  Address RemoteReduceList =
2455  CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.remote_reduce_list");
2456 
2457  // This loop iterates through the list of reduce elements and copies,
2458  // element by element, from a remote lane in the warp to RemoteReduceList,
2459  // hosted on the thread's stack.
2460  emitReductionListCopy(RemoteLaneToThread, CGF, ReductionArrayTy, Privates,
2461  LocalReduceList, RemoteReduceList,
2462  {/*RemoteLaneOffset=*/RemoteLaneOffsetArgVal,
2463  /*ScratchpadIndex=*/nullptr,
2464  /*ScratchpadWidth=*/nullptr});
2465 
2466  // The actions to be performed on the Remote Reduce list is dependent
2467  // on the algorithm version.
2468  //
2469  // if (AlgoVer==0) || (AlgoVer==1 && (LaneId < Offset)) || (AlgoVer==2 &&
2470  // LaneId % 2 == 0 && Offset > 0):
2471  // do the reduction value aggregation
2472  //
2473  // The thread local variable Reduce list is mutated in place to host the
2474  // reduced data, which is the aggregated value produced from local and
2475  // remote lanes.
2476  //
2477  // Note that AlgoVer is expected to be a constant integer known at compile
2478  // time.
2479  // When AlgoVer==0, the first conjunction evaluates to true, making
2480  // the entire predicate true during compile time.
2481  // When AlgoVer==1, the second conjunction has only the second part to be
2482  // evaluated during runtime. Other conjunctions evaluates to false
2483  // during compile time.
2484  // When AlgoVer==2, the third conjunction has only the second part to be
2485  // evaluated during runtime. Other conjunctions evaluates to false
2486  // during compile time.
2487  llvm::Value *CondAlgo0 = Bld.CreateIsNull(AlgoVerArgVal);
2488 
2489  llvm::Value *Algo1 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(1));
2490  llvm::Value *CondAlgo1 = Bld.CreateAnd(
2491  Algo1, Bld.CreateICmpULT(LaneIDArgVal, RemoteLaneOffsetArgVal));
2492 
2493  llvm::Value *Algo2 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(2));
2494  llvm::Value *CondAlgo2 = Bld.CreateAnd(
2495  Algo2, Bld.CreateIsNull(Bld.CreateAnd(LaneIDArgVal, Bld.getInt16(1))));
2496  CondAlgo2 = Bld.CreateAnd(
2497  CondAlgo2, Bld.CreateICmpSGT(RemoteLaneOffsetArgVal, Bld.getInt16(0)));
2498 
2499  llvm::Value *CondReduce = Bld.CreateOr(CondAlgo0, CondAlgo1);
2500  CondReduce = Bld.CreateOr(CondReduce, CondAlgo2);
2501 
2502  llvm::BasicBlock *ThenBB = CGF.createBasicBlock("then");
2503  llvm::BasicBlock *ElseBB = CGF.createBasicBlock("else");
2504  llvm::BasicBlock *MergeBB = CGF.createBasicBlock("ifcont");
2505  Bld.CreateCondBr(CondReduce, ThenBB, ElseBB);
2506 
2507  CGF.EmitBlock(ThenBB);
2508  // reduce_function(LocalReduceList, RemoteReduceList)
2509  llvm::Value *LocalReduceListPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2510  LocalReduceList.getPointer(), CGF.VoidPtrTy);
2511  llvm::Value *RemoteReduceListPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2512  RemoteReduceList.getPointer(), CGF.VoidPtrTy);
2514  CGF, Loc, ReduceFn, {LocalReduceListPtr, RemoteReduceListPtr});
2515  Bld.CreateBr(MergeBB);
2516 
2517  CGF.EmitBlock(ElseBB);
2518  Bld.CreateBr(MergeBB);
2519 
2520  CGF.EmitBlock(MergeBB);
2521 
2522  // if (AlgoVer==1 && (LaneId >= Offset)) copy Remote Reduce list to local
2523  // Reduce list.
2524  Algo1 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(1));
2525  llvm::Value *CondCopy = Bld.CreateAnd(
2526  Algo1, Bld.CreateICmpUGE(LaneIDArgVal, RemoteLaneOffsetArgVal));
2527 
2528  llvm::BasicBlock *CpyThenBB = CGF.createBasicBlock("then");
2529  llvm::BasicBlock *CpyElseBB = CGF.createBasicBlock("else");
2530  llvm::BasicBlock *CpyMergeBB = CGF.createBasicBlock("ifcont");
2531  Bld.CreateCondBr(CondCopy, CpyThenBB, CpyElseBB);
2532 
2533  CGF.EmitBlock(CpyThenBB);
2534  emitReductionListCopy(ThreadCopy, CGF, ReductionArrayTy, Privates,
2535  RemoteReduceList, LocalReduceList);
2536  Bld.CreateBr(CpyMergeBB);
2537 
2538  CGF.EmitBlock(CpyElseBB);
2539  Bld.CreateBr(CpyMergeBB);
2540 
2541  CGF.EmitBlock(CpyMergeBB);
2542 
2543  CGF.FinishFunction();
2544  return Fn;
2545 }
2546 
2547 /// This function emits a helper that copies all the reduction variables from
2548 /// the team into the provided global buffer for the reduction variables.
2549 ///
2550 /// void list_to_global_copy_func(void *buffer, int Idx, void *reduce_data)
2551 /// For all data entries D in reduce_data:
2552 /// Copy local D to buffer.D[Idx]
2553 static llvm::Value *emitListToGlobalCopyFunction(
2554  CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
2555  QualType ReductionArrayTy, SourceLocation Loc,
2556  const RecordDecl *TeamReductionRec,
2557  const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
2558  &VarFieldMap) {
2559  ASTContext &C = CGM.getContext();
2560 
2561  // Buffer: global reduction buffer.
2562  ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2563  C.VoidPtrTy, ImplicitParamDecl::Other);
2564  // Idx: index of the buffer.
2565  ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
2567  // ReduceList: thread local Reduce list.
2568  ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2569  C.VoidPtrTy, ImplicitParamDecl::Other);
2570  FunctionArgList Args;
2571  Args.push_back(&BufferArg);
2572  Args.push_back(&IdxArg);
2573  Args.push_back(&ReduceListArg);
2574 
2575  const CGFunctionInfo &CGFI =
2576  CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
2577  auto *Fn = llvm::Function::Create(
2579  "_omp_reduction_list_to_global_copy_func", &CGM.getModule());
2580  CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
2581  Fn->setDoesNotRecurse();
2582  CodeGenFunction CGF(CGM);
2583  CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
2584 
2585  CGBuilderTy &Bld = CGF.Builder;
2586 
2587  Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
2588  Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
2589  llvm::Type *ElemTy = CGF.ConvertTypeForMem(ReductionArrayTy);
2590  Address LocalReduceList(
2592  CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
2593  C.VoidPtrTy, Loc),
2594  ElemTy->getPointerTo()),
2595  ElemTy, CGF.getPointerAlign());
2596  QualType StaticTy = C.getRecordType(TeamReductionRec);
2597  llvm::Type *LLVMReductionsBufferTy =
2598  CGM.getTypes().ConvertTypeForMem(StaticTy);
2599  llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2600  CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
2601  LLVMReductionsBufferTy->getPointerTo());
2602  llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty),
2603  CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
2604  /*Volatile=*/false, C.IntTy,
2605  Loc)};
2606  unsigned Idx = 0;
2607  for (const Expr *Private : Privates) {
2608  // Reduce element = LocalReduceList[i]
2609  Address ElemPtrPtrAddr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
2610  llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar(
2611  ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
2612  // elemptr = ((CopyType*)(elemptrptr)) + I
2613  ElemTy = CGF.ConvertTypeForMem(Private->getType());
2614  ElemPtrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2615  ElemPtrPtr, ElemTy->getPointerTo());
2616  Address ElemPtr =
2617  Address(ElemPtrPtr, ElemTy, C.getTypeAlignInChars(Private->getType()));
2618  const ValueDecl *VD = cast<DeclRefExpr>(Private)->getDecl();
2619  // Global = Buffer.VD[Idx];
2620  const FieldDecl *FD = VarFieldMap.lookup(VD);
2621  LValue GlobLVal = CGF.EmitLValueForField(
2622  CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
2623  Address GlobAddr = GlobLVal.getAddress(CGF);
2624  llvm::Value *BufferPtr = Bld.CreateInBoundsGEP(GlobAddr.getElementType(),
2625  GlobAddr.getPointer(), Idxs);
2626  GlobLVal.setAddress(Address(BufferPtr,
2627  CGF.ConvertTypeForMem(Private->getType()),
2628  GlobAddr.getAlignment()));
2629  switch (CGF.getEvaluationKind(Private->getType())) {
2630  case TEK_Scalar: {
2631  llvm::Value *V = CGF.EmitLoadOfScalar(
2632  ElemPtr, /*Volatile=*/false, Private->getType(), Loc,
2634  CGF.EmitStoreOfScalar(V, GlobLVal);
2635  break;
2636  }
2637  case TEK_Complex: {
2639  CGF.MakeAddrLValue(ElemPtr, Private->getType()), Loc);
2640  CGF.EmitStoreOfComplex(V, GlobLVal, /*isInit=*/false);
2641  break;
2642  }
2643  case TEK_Aggregate:
2644  CGF.EmitAggregateCopy(GlobLVal,
2645  CGF.MakeAddrLValue(ElemPtr, Private->getType()),
2647  break;
2648  }
2649  ++Idx;
2650  }
2651 
2652  CGF.FinishFunction();
2653  return Fn;
2654 }
2655 
2656 /// This function emits a helper that reduces all the reduction variables from
2657 /// the team into the provided global buffer for the reduction variables.
2658 ///
2659 /// void list_to_global_reduce_func(void *buffer, int Idx, void *reduce_data)
2660 /// void *GlobPtrs[];
2661 /// GlobPtrs[0] = (void*)&buffer.D0[Idx];
2662 /// ...
2663 /// GlobPtrs[N] = (void*)&buffer.DN[Idx];
2664 /// reduce_function(GlobPtrs, reduce_data);
2666  CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
2667  QualType ReductionArrayTy, SourceLocation Loc,
2668  const RecordDecl *TeamReductionRec,
2669  const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
2670  &VarFieldMap,
2671  llvm::Function *ReduceFn) {
2672  ASTContext &C = CGM.getContext();
2673 
2674  // Buffer: global reduction buffer.
2675  ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2676  C.VoidPtrTy, ImplicitParamDecl::Other);
2677  // Idx: index of the buffer.
2678  ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
2680  // ReduceList: thread local Reduce list.
2681  ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2682  C.VoidPtrTy, ImplicitParamDecl::Other);
2683  FunctionArgList Args;
2684  Args.push_back(&BufferArg);
2685  Args.push_back(&IdxArg);
2686  Args.push_back(&ReduceListArg);
2687 
2688  const CGFunctionInfo &CGFI =
2689  CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
2690  auto *Fn = llvm::Function::Create(
2692  "_omp_reduction_list_to_global_reduce_func", &CGM.getModule());
2693  CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
2694  Fn->setDoesNotRecurse();
2695  CodeGenFunction CGF(CGM);
2696  CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
2697 
2698  CGBuilderTy &Bld = CGF.Builder;
2699 
2700  Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
2701  QualType StaticTy = C.getRecordType(TeamReductionRec);
2702  llvm::Type *LLVMReductionsBufferTy =
2703  CGM.getTypes().ConvertTypeForMem(StaticTy);
2704  llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2705  CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
2706  LLVMReductionsBufferTy->getPointerTo());
2707 
2708  // 1. Build a list of reduction variables.
2709  // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
2710  Address ReductionList =
2711  CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
2712  auto IPriv = Privates.begin();
2713  llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty),
2714  CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
2715  /*Volatile=*/false, C.IntTy,
2716  Loc)};
2717  unsigned Idx = 0;
2718  for (unsigned I = 0, E = Privates.size(); I < E; ++I, ++IPriv, ++Idx) {
2719  Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
2720  // Global = Buffer.VD[Idx];
2721  const ValueDecl *VD = cast<DeclRefExpr>(*IPriv)->getDecl();
2722  const FieldDecl *FD = VarFieldMap.lookup(VD);
2723  LValue GlobLVal = CGF.EmitLValueForField(
2724  CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
2725  Address GlobAddr = GlobLVal.getAddress(CGF);
2726  llvm::Value *BufferPtr = Bld.CreateInBoundsGEP(
2727  GlobAddr.getElementType(), GlobAddr.getPointer(), Idxs);
2728  llvm::Value *Ptr = CGF.EmitCastToVoidPtr(BufferPtr);
2729  CGF.EmitStoreOfScalar(Ptr, Elem, /*Volatile=*/false, C.VoidPtrTy);
2730  if ((*IPriv)->getType()->isVariablyModifiedType()) {
2731  // Store array size.
2732  ++Idx;
2733  Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
2734  llvm::Value *Size = CGF.Builder.CreateIntCast(
2735  CGF.getVLASize(
2736  CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
2737  .NumElts,
2738  CGF.SizeTy, /*isSigned=*/false);
2739  CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
2740  Elem);
2741  }
2742  }
2743 
2744  // Call reduce_function(GlobalReduceList, ReduceList)
2745  llvm::Value *GlobalReduceList =
2746  CGF.EmitCastToVoidPtr(ReductionList.getPointer());
2747  Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
2748  llvm::Value *ReducedPtr = CGF.EmitLoadOfScalar(
2749  AddrReduceListArg, /*Volatile=*/false, C.VoidPtrTy, Loc);
2751  CGF, Loc, ReduceFn, {GlobalReduceList, ReducedPtr});
2752  CGF.FinishFunction();
2753  return Fn;
2754 }
2755 
2756 /// This function emits a helper that copies all the reduction variables from
2757 /// the team into the provided global buffer for the reduction variables.
2758 ///
2759 /// void list_to_global_copy_func(void *buffer, int Idx, void *reduce_data)
2760 /// For all data entries D in reduce_data:
2761 /// Copy buffer.D[Idx] to local D;
2762 static llvm::Value *emitGlobalToListCopyFunction(
2763  CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
2764  QualType ReductionArrayTy, SourceLocation Loc,
2765  const RecordDecl *TeamReductionRec,
2766  const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
2767  &VarFieldMap) {
2768  ASTContext &C = CGM.getContext();
2769 
2770  // Buffer: global reduction buffer.
2771  ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2772  C.VoidPtrTy, ImplicitParamDecl::Other);
2773  // Idx: index of the buffer.
2774  ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
2776  // ReduceList: thread local Reduce list.
2777  ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2778  C.VoidPtrTy, ImplicitParamDecl::Other);
2779  FunctionArgList Args;
2780  Args.push_back(&BufferArg);
2781  Args.push_back(&IdxArg);
2782  Args.push_back(&ReduceListArg);
2783 
2784  const CGFunctionInfo &CGFI =
2785  CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
2786  auto *Fn = llvm::Function::Create(
2788  "_omp_reduction_global_to_list_copy_func", &CGM.getModule());
2789  CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
2790  Fn->setDoesNotRecurse();
2791  CodeGenFunction CGF(CGM);
2792  CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
2793 
2794  CGBuilderTy &Bld = CGF.Builder;
2795 
2796  Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
2797  Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
2798  llvm::Type *ElemTy = CGF.ConvertTypeForMem(ReductionArrayTy);
2799  Address LocalReduceList(
2801  CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
2802  C.VoidPtrTy, Loc),
2803  ElemTy->getPointerTo()),
2804  ElemTy, CGF.getPointerAlign());
2805  QualType StaticTy = C.getRecordType(TeamReductionRec);
2806  llvm::Type *LLVMReductionsBufferTy =
2807  CGM.getTypes().ConvertTypeForMem(StaticTy);
2808  llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2809  CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
2810  LLVMReductionsBufferTy->getPointerTo());
2811 
2812  llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty),
2813  CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
2814  /*Volatile=*/false, C.IntTy,
2815  Loc)};
2816  unsigned Idx = 0;
2817  for (const Expr *Private : Privates) {
2818  // Reduce element = LocalReduceList[i]
2819  Address ElemPtrPtrAddr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
2820  llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar(
2821  ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
2822  // elemptr = ((CopyType*)(elemptrptr)) + I
2823  ElemTy = CGF.ConvertTypeForMem(Private->getType());
2824  ElemPtrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2825  ElemPtrPtr, ElemTy->getPointerTo());
2826  Address ElemPtr =
2827  Address(ElemPtrPtr, ElemTy, C.getTypeAlignInChars(Private->getType()));
2828  const ValueDecl *VD = cast<DeclRefExpr>(Private)->getDecl();
2829  // Global = Buffer.VD[Idx];
2830  const FieldDecl *FD = VarFieldMap.lookup(VD);
2831  LValue GlobLVal = CGF.EmitLValueForField(
2832  CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
2833  Address GlobAddr = GlobLVal.getAddress(CGF);
2834  llvm::Value *BufferPtr = Bld.CreateInBoundsGEP(GlobAddr.getElementType(),
2835  GlobAddr.getPointer(), Idxs);
2836  GlobLVal.setAddress(Address(BufferPtr,
2837  CGF.ConvertTypeForMem(Private->getType()),
2838  GlobAddr.getAlignment()));
2839  switch (CGF.getEvaluationKind(Private->getType())) {
2840  case TEK_Scalar: {
2841  llvm::Value *V = CGF.EmitLoadOfScalar(GlobLVal, Loc);
2842  CGF.EmitStoreOfScalar(V, ElemPtr, /*Volatile=*/false, Private->getType(),
2844  TBAAAccessInfo());
2845  break;
2846  }
2847  case TEK_Complex: {
2848  CodeGenFunction::ComplexPairTy V = CGF.EmitLoadOfComplex(GlobLVal, Loc);
2849  CGF.EmitStoreOfComplex(V, CGF.MakeAddrLValue(ElemPtr, Private->getType()),
2850  /*isInit=*/false);
2851  break;
2852  }
2853  case TEK_Aggregate:
2854  CGF.EmitAggregateCopy(CGF.MakeAddrLValue(ElemPtr, Private->getType()),
2855  GlobLVal, Private->getType(),
2857  break;
2858  }
2859  ++Idx;
2860  }
2861 
2862  CGF.FinishFunction();
2863  return Fn;
2864 }
2865 
2866 /// This function emits a helper that reduces all the reduction variables from
2867 /// the team into the provided global buffer for the reduction variables.
2868 ///
2869 /// void global_to_list_reduce_func(void *buffer, int Idx, void *reduce_data)
2870 /// void *GlobPtrs[];
2871 /// GlobPtrs[0] = (void*)&buffer.D0[Idx];
2872 /// ...
2873 /// GlobPtrs[N] = (void*)&buffer.DN[Idx];
2874 /// reduce_function(reduce_data, GlobPtrs);
2876  CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
2877  QualType ReductionArrayTy, SourceLocation Loc,
2878  const RecordDecl *TeamReductionRec,
2879  const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
2880  &VarFieldMap,
2881  llvm::Function *ReduceFn) {
2882  ASTContext &C = CGM.getContext();
2883 
2884  // Buffer: global reduction buffer.
2885  ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2886  C.VoidPtrTy, ImplicitParamDecl::Other);
2887  // Idx: index of the buffer.
2888  ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
2890  // ReduceList: thread local Reduce list.
2891  ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2892  C.VoidPtrTy, ImplicitParamDecl::Other);
2893  FunctionArgList Args;
2894  Args.push_back(&BufferArg);
2895  Args.push_back(&IdxArg);
2896  Args.push_back(&ReduceListArg);
2897 
2898  const CGFunctionInfo &CGFI =
2899  CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
2900  auto *Fn = llvm::Function::Create(
2902  "_omp_reduction_global_to_list_reduce_func", &CGM.getModule());
2903  CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
2904  Fn->setDoesNotRecurse();
2905  CodeGenFunction CGF(CGM);
2906  CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
2907 
2908  CGBuilderTy &Bld = CGF.Builder;
2909 
2910  Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
2911  QualType StaticTy = C.getRecordType(TeamReductionRec);
2912  llvm::Type *LLVMReductionsBufferTy =
2913  CGM.getTypes().ConvertTypeForMem(StaticTy);
2914  llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2915  CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
2916  LLVMReductionsBufferTy->getPointerTo());
2917 
2918  // 1. Build a list of reduction variables.
2919  // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
2920  Address ReductionList =
2921  CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
2922  auto IPriv = Privates.begin();
2923  llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty),
2924  CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
2925  /*Volatile=*/false, C.IntTy,
2926  Loc)};
2927  unsigned Idx = 0;
2928  for (unsigned I = 0, E = Privates.size(); I < E; ++I, ++IPriv, ++Idx) {
2929  Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
2930  // Global = Buffer.VD[Idx];
2931  const ValueDecl *VD = cast<DeclRefExpr>(*IPriv)->getDecl();
2932  const FieldDecl *FD = VarFieldMap.lookup(VD);
2933  LValue GlobLVal = CGF.EmitLValueForField(
2934  CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
2935  Address GlobAddr = GlobLVal.getAddress(CGF);
2936  llvm::Value *BufferPtr = Bld.CreateInBoundsGEP(
2937  GlobAddr.getElementType(), GlobAddr.getPointer(), Idxs);
2938  llvm::Value *Ptr = CGF.EmitCastToVoidPtr(BufferPtr);
2939  CGF.EmitStoreOfScalar(Ptr, Elem, /*Volatile=*/false, C.VoidPtrTy);
2940  if ((*IPriv)->getType()->isVariablyModifiedType()) {
2941  // Store array size.
2942  ++Idx;
2943  Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
2944  llvm::Value *Size = CGF.Builder.CreateIntCast(
2945  CGF.getVLASize(
2946  CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
2947  .NumElts,
2948  CGF.SizeTy, /*isSigned=*/false);
2949  CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
2950  Elem);
2951  }
2952  }
2953 
2954  // Call reduce_function(ReduceList, GlobalReduceList)
2955  llvm::Value *GlobalReduceList =
2956  CGF.EmitCastToVoidPtr(ReductionList.getPointer());
2957  Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
2958  llvm::Value *ReducedPtr = CGF.EmitLoadOfScalar(
2959  AddrReduceListArg, /*Volatile=*/false, C.VoidPtrTy, Loc);
2961  CGF, Loc, ReduceFn, {ReducedPtr, GlobalReduceList});
2962  CGF.FinishFunction();
2963  return Fn;
2964 }
2965 
2966 ///
2967 /// Design of OpenMP reductions on the GPU
2968 ///
2969 /// Consider a typical OpenMP program with one or more reduction
2970 /// clauses:
2971 ///
2972 /// float foo;
2973 /// double bar;
2974 /// #pragma omp target teams distribute parallel for \
2975 /// reduction(+:foo) reduction(*:bar)
2976 /// for (int i = 0; i < N; i++) {
2977 /// foo += A[i]; bar *= B[i];
2978 /// }
2979 ///
2980 /// where 'foo' and 'bar' are reduced across all OpenMP threads in
2981 /// all teams. In our OpenMP implementation on the NVPTX device an
2982 /// OpenMP team is mapped to a CUDA threadblock and OpenMP threads
2983 /// within a team are mapped to CUDA threads within a threadblock.
2984 /// Our goal is to efficiently aggregate values across all OpenMP
2985 /// threads such that:
2986 ///
2987 /// - the compiler and runtime are logically concise, and
2988 /// - the reduction is performed efficiently in a hierarchical
2989 /// manner as follows: within OpenMP threads in the same warp,
2990 /// across warps in a threadblock, and finally across teams on
2991 /// the NVPTX device.
2992 ///
2993 /// Introduction to Decoupling
2994 ///
2995 /// We would like to decouple the compiler and the runtime so that the
2996 /// latter is ignorant of the reduction variables (number, data types)
2997 /// and the reduction operators. This allows a simpler interface
2998 /// and implementation while still attaining good performance.
2999 ///
3000 /// Pseudocode for the aforementioned OpenMP program generated by the
3001 /// compiler is as follows:
3002 ///
3003 /// 1. Create private copies of reduction variables on each OpenMP
3004 /// thread: 'foo_private', 'bar_private'
3005 /// 2. Each OpenMP thread reduces the chunk of 'A' and 'B' assigned
3006 /// to it and writes the result in 'foo_private' and 'bar_private'
3007 /// respectively.
3008 /// 3. Call the OpenMP runtime on the GPU to reduce within a team
3009 /// and store the result on the team master:
3010 ///
3011 /// __kmpc_nvptx_parallel_reduce_nowait_v2(...,
3012 /// reduceData, shuffleReduceFn, interWarpCpyFn)
3013 ///
3014 /// where:
3015 /// struct ReduceData {
3016 /// double *foo;
3017 /// double *bar;
3018 /// } reduceData
3019 /// reduceData.foo = &foo_private
3020 /// reduceData.bar = &bar_private
3021 ///
3022 /// 'shuffleReduceFn' and 'interWarpCpyFn' are pointers to two
3023 /// auxiliary functions generated by the compiler that operate on
3024 /// variables of type 'ReduceData'. They aid the runtime perform
3025 /// algorithmic steps in a data agnostic manner.
3026 ///
3027 /// 'shuffleReduceFn' is a pointer to a function that reduces data
3028 /// of type 'ReduceData' across two OpenMP threads (lanes) in the
3029 /// same warp. It takes the following arguments as input:
3030 ///
3031 /// a. variable of type 'ReduceData' on the calling lane,
3032 /// b. its lane_id,
3033 /// c. an offset relative to the current lane_id to generate a
3034 /// remote_lane_id. The remote lane contains the second
3035 /// variable of type 'ReduceData' that is to be reduced.
3036 /// d. an algorithm version parameter determining which reduction
3037 /// algorithm to use.
3038 ///
3039 /// 'shuffleReduceFn' retrieves data from the remote lane using
3040 /// efficient GPU shuffle intrinsics and reduces, using the
3041 /// algorithm specified by the 4th parameter, the two operands
3042 /// element-wise. The result is written to the first operand.
3043 ///
3044 /// Different reduction algorithms are implemented in different
3045 /// runtime functions, all calling 'shuffleReduceFn' to perform
3046 /// the essential reduction step. Therefore, based on the 4th
3047 /// parameter, this function behaves slightly differently to
3048 /// cooperate with the runtime to ensure correctness under
3049 /// different circumstances.
3050 ///
3051 /// 'InterWarpCpyFn' is a pointer to a function that transfers
3052 /// reduced variables across warps. It tunnels, through CUDA
3053 /// shared memory, the thread-private data of type 'ReduceData'
3054 /// from lane 0 of each warp to a lane in the first warp.
3055 /// 4. Call the OpenMP runtime on the GPU to reduce across teams.
3056 /// The last team writes the global reduced value to memory.
3057 ///
3058 /// ret = __kmpc_nvptx_teams_reduce_nowait(...,
3059 /// reduceData, shuffleReduceFn, interWarpCpyFn,
3060 /// scratchpadCopyFn, loadAndReduceFn)
3061 ///
3062 /// 'scratchpadCopyFn' is a helper that stores reduced
3063 /// data from the team master to a scratchpad array in
3064 /// global memory.
3065 ///
3066 /// 'loadAndReduceFn' is a helper that loads data from
3067 /// the scratchpad array and reduces it with the input
3068 /// operand.
3069 ///
3070 /// These compiler generated functions hide address
3071 /// calculation and alignment information from the runtime.
3072 /// 5. if ret == 1:
3073 /// The team master of the last team stores the reduced
3074 /// result to the globals in memory.
3075 /// foo += reduceData.foo; bar *= reduceData.bar
3076 ///
3077 ///
3078 /// Warp Reduction Algorithms
3079 ///
3080 /// On the warp level, we have three algorithms implemented in the
3081 /// OpenMP runtime depending on the number of active lanes:
3082 ///
3083 /// Full Warp Reduction
3084 ///
3085 /// The reduce algorithm within a warp where all lanes are active
3086 /// is implemented in the runtime as follows:
3087 ///
3088 /// full_warp_reduce(void *reduce_data,
3089 /// kmp_ShuffleReductFctPtr ShuffleReduceFn) {
3090 /// for (int offset = WARPSIZE/2; offset > 0; offset /= 2)
3091 /// ShuffleReduceFn(reduce_data, 0, offset, 0);
3092 /// }
3093 ///
3094 /// The algorithm completes in log(2, WARPSIZE) steps.
3095 ///
3096 /// 'ShuffleReduceFn' is used here with lane_id set to 0 because it is
3097 /// not used therefore we save instructions by not retrieving lane_id
3098 /// from the corresponding special registers. The 4th parameter, which
3099 /// represents the version of the algorithm being used, is set to 0 to
3100 /// signify full warp reduction.
3101 ///
3102 /// In this version, 'ShuffleReduceFn' behaves, per element, as follows:
3103 ///
3104 /// #reduce_elem refers to an element in the local lane's data structure
3105 /// #remote_elem is retrieved from a remote lane
3106 /// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE);
3107 /// reduce_elem = reduce_elem REDUCE_OP remote_elem;
3108 ///
3109 /// Contiguous Partial Warp Reduction
3110 ///
3111 /// This reduce algorithm is used within a warp where only the first
3112 /// 'n' (n <= WARPSIZE) lanes are active. It is typically used when the
3113 /// number of OpenMP threads in a parallel region is not a multiple of
3114 /// WARPSIZE. The algorithm is implemented in the runtime as follows:
3115 ///
3116 /// void
3117 /// contiguous_partial_reduce(void *reduce_data,
3118 /// kmp_ShuffleReductFctPtr ShuffleReduceFn,
3119 /// int size, int lane_id) {
3120 /// int curr_size;
3121 /// int offset;
3122 /// curr_size = size;
3123 /// mask = curr_size/2;
3124 /// while (offset>0) {
3125 /// ShuffleReduceFn(reduce_data, lane_id, offset, 1);
3126 /// curr_size = (curr_size+1)/2;
3127 /// offset = curr_size/2;
3128 /// }
3129 /// }
3130 ///
3131 /// In this version, 'ShuffleReduceFn' behaves, per element, as follows:
3132 ///
3133 /// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE);
3134 /// if (lane_id < offset)
3135 /// reduce_elem = reduce_elem REDUCE_OP remote_elem
3136 /// else
3137 /// reduce_elem = remote_elem
3138 ///
3139 /// This algorithm assumes that the data to be reduced are located in a
3140 /// contiguous subset of lanes starting from the first. When there is
3141 /// an odd number of active lanes, the data in the last lane is not
3142 /// aggregated with any other lane's dat but is instead copied over.
3143 ///
3144 /// Dispersed Partial Warp Reduction
3145 ///
3146 /// This algorithm is used within a warp when any discontiguous subset of
3147 /// lanes are active. It is used to implement the reduction operation
3148 /// across lanes in an OpenMP simd region or in a nested parallel region.
3149 ///
3150 /// void
3151 /// dispersed_partial_reduce(void *reduce_data,
3152 /// kmp_ShuffleReductFctPtr ShuffleReduceFn) {
3153 /// int size, remote_id;
3154 /// int logical_lane_id = number_of_active_lanes_before_me() * 2;
3155 /// do {
3156 /// remote_id = next_active_lane_id_right_after_me();
3157 /// # the above function returns 0 of no active lane
3158 /// # is present right after the current lane.
3159 /// size = number_of_active_lanes_in_this_warp();
3160 /// logical_lane_id /= 2;
3161 /// ShuffleReduceFn(reduce_data, logical_lane_id,
3162 /// remote_id-1-threadIdx.x, 2);
3163 /// } while (logical_lane_id % 2 == 0 && size > 1);
3164 /// }
3165 ///
3166 /// There is no assumption made about the initial state of the reduction.
3167 /// Any number of lanes (>=1) could be active at any position. The reduction
3168 /// result is returned in the first active lane.
3169 ///
3170 /// In this version, 'ShuffleReduceFn' behaves, per element, as follows:
3171 ///
3172 /// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE);
3173 /// if (lane_id % 2 == 0 && offset > 0)
3174 /// reduce_elem = reduce_elem REDUCE_OP remote_elem
3175 /// else
3176 /// reduce_elem = remote_elem
3177 ///
3178 ///
3179 /// Intra-Team Reduction
3180 ///
3181 /// This function, as implemented in the runtime call
3182 /// '__kmpc_nvptx_parallel_reduce_nowait_v2', aggregates data across OpenMP
3183 /// threads in a team. It first reduces within a warp using the
3184 /// aforementioned algorithms. We then proceed to gather all such
3185 /// reduced values at the first warp.
3186 ///
3187 /// The runtime makes use of the function 'InterWarpCpyFn', which copies
3188 /// data from each of the "warp master" (zeroth lane of each warp, where
3189 /// warp-reduced data is held) to the zeroth warp. This step reduces (in
3190 /// a mathematical sense) the problem of reduction across warp masters in
3191 /// a block to the problem of warp reduction.
3192 ///
3193 ///
3194 /// Inter-Team Reduction
3195 ///
3196 /// Once a team has reduced its data to a single value, it is stored in
3197 /// a global scratchpad array. Since each team has a distinct slot, this
3198 /// can be done without locking.
3199 ///
3200 /// The last team to write to the scratchpad array proceeds to reduce the
3201 /// scratchpad array. One or more workers in the last team use the helper
3202 /// 'loadAndReduceDataFn' to load and reduce values from the array, i.e.,
3203 /// the k'th worker reduces every k'th element.
3204 ///
3205 /// Finally, a call is made to '__kmpc_nvptx_parallel_reduce_nowait_v2' to
3206 /// reduce across workers and compute a globally reduced value.
3207 ///
3211  ArrayRef<const Expr *> ReductionOps, ReductionOptionsTy Options) {
3212  if (!CGF.HaveInsertPoint())
3213  return;
3214 
3215  bool ParallelReduction = isOpenMPParallelDirective(Options.ReductionKind);
3216 #ifndef NDEBUG
3217  bool TeamsReduction = isOpenMPTeamsDirective(Options.ReductionKind);
3218 #endif
3219 
3220  if (Options.SimpleReduction) {
3221  assert(!TeamsReduction && !ParallelReduction &&
3222  "Invalid reduction selection in emitReduction.");
3223  CGOpenMPRuntime::emitReduction(CGF, Loc, Privates, LHSExprs, RHSExprs,
3224  ReductionOps, Options);
3225  return;
3226  }
3227 
3228  assert((TeamsReduction || ParallelReduction) &&
3229  "Invalid reduction selection in emitReduction.");
3230 
3231  // Build res = __kmpc_reduce{_nowait}(<gtid>, <n>, sizeof(RedList),
3232  // RedList, shuffle_reduce_func, interwarp_copy_func);
3233  // or
3234  // Build res = __kmpc_reduce_teams_nowait_simple(<loc>, <gtid>, <lck>);
3235  llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
3236  llvm::Value *ThreadId = getThreadID(CGF, Loc);
3237 
3238  llvm::Value *Res;
3239  ASTContext &C = CGM.getContext();
3240  // 1. Build a list of reduction variables.
3241  // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
3242  auto Size = RHSExprs.size();
3243  for (const Expr *E : Privates) {
3244  if (E->getType()->isVariablyModifiedType())
3245  // Reserve place for array size.
3246  ++Size;
3247  }
3248  llvm::APInt ArraySize(/*unsigned int numBits=*/32, Size);
3249  QualType ReductionArrayTy =
3250  C.getConstantArrayType(C.VoidPtrTy, ArraySize, nullptr, ArrayType::Normal,
3251  /*IndexTypeQuals=*/0);
3252  Address ReductionList =
3253  CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
3254  auto IPriv = Privates.begin();
3255  unsigned Idx = 0;
3256  for (unsigned I = 0, E = RHSExprs.size(); I < E; ++I, ++IPriv, ++Idx) {
3257  Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
3258  CGF.Builder.CreateStore(
3260  CGF.EmitLValue(RHSExprs[I]).getPointer(CGF), CGF.VoidPtrTy),
3261  Elem);
3262  if ((*IPriv)->getType()->isVariablyModifiedType()) {
3263  // Store array size.
3264  ++Idx;
3265  Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
3266  llvm::Value *Size = CGF.Builder.CreateIntCast(
3267  CGF.getVLASize(
3268  CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
3269  .NumElts,
3270  CGF.SizeTy, /*isSigned=*/false);
3271  CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
3272  Elem);
3273  }
3274  }
3275 
3276  llvm::Value *RL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3277  ReductionList.getPointer(), CGF.VoidPtrTy);
3278  llvm::Function *ReductionFn =
3279  emitReductionFunction(Loc, CGF.ConvertTypeForMem(ReductionArrayTy),
3280  Privates, LHSExprs, RHSExprs, ReductionOps);
3281  llvm::Value *ReductionArrayTySize = CGF.getTypeSize(ReductionArrayTy);
3282  llvm::Function *ShuffleAndReduceFn = emitShuffleAndReduceFunction(
3283  CGM, Privates, ReductionArrayTy, ReductionFn, Loc);
3284  llvm::Value *InterWarpCopyFn =
3285  emitInterWarpCopyFunction(CGM, Privates, ReductionArrayTy, Loc);
3286 
3287  if (ParallelReduction) {
3288  llvm::Value *Args[] = {RTLoc,
3289  ThreadId,
3290  CGF.Builder.getInt32(RHSExprs.size()),
3291  ReductionArrayTySize,
3292  RL,
3293  ShuffleAndReduceFn,
3294  InterWarpCopyFn};
3295 
3296  Res = CGF.EmitRuntimeCall(
3297  OMPBuilder.getOrCreateRuntimeFunction(
3298  CGM.getModule(), OMPRTL___kmpc_nvptx_parallel_reduce_nowait_v2),
3299  Args);
3300  } else {
3301  assert(TeamsReduction && "expected teams reduction.");
3302  llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> VarFieldMap;
3303  llvm::SmallVector<const ValueDecl *, 4> PrivatesReductions(Privates.size());
3304  int Cnt = 0;
3305  for (const Expr *DRE : Privates) {
3306  PrivatesReductions[Cnt] = cast<DeclRefExpr>(DRE)->getDecl();
3307  ++Cnt;
3308  }
3309  const RecordDecl *TeamReductionRec = ::buildRecordForGlobalizedVars(
3310  CGM.getContext(), PrivatesReductions, llvm::None, VarFieldMap,
3311  C.getLangOpts().OpenMPCUDAReductionBufNum);
3312  TeamsReductions.push_back(TeamReductionRec);
3313  if (!KernelTeamsReductionPtr) {
3314  KernelTeamsReductionPtr = new llvm::GlobalVariable(
3315  CGM.getModule(), CGM.VoidPtrTy, /*isConstant=*/true,
3317  "_openmp_teams_reductions_buffer_$_$ptr");
3318  }
3319  llvm::Value *GlobalBufferPtr = CGF.EmitLoadOfScalar(
3320  Address(KernelTeamsReductionPtr, CGF.VoidPtrTy, CGM.getPointerAlign()),
3321  /*Volatile=*/false, C.getPointerType(C.VoidPtrTy), Loc);
3322  llvm::Value *GlobalToBufferCpyFn = ::emitListToGlobalCopyFunction(
3323  CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap);
3324  llvm::Value *GlobalToBufferRedFn = ::emitListToGlobalReduceFunction(
3325  CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap,
3326  ReductionFn);
3327  llvm::Value *BufferToGlobalCpyFn = ::emitGlobalToListCopyFunction(
3328  CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap);
3329  llvm::Value *BufferToGlobalRedFn = ::emitGlobalToListReduceFunction(
3330  CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap,
3331  ReductionFn);
3332 
3333  llvm::Value *Args[] = {
3334  RTLoc,
3335  ThreadId,
3336  GlobalBufferPtr,
3337  CGF.Builder.getInt32(C.getLangOpts().OpenMPCUDAReductionBufNum),
3338  RL,
3339  ShuffleAndReduceFn,
3340  InterWarpCopyFn,
3341  GlobalToBufferCpyFn,
3342  GlobalToBufferRedFn,
3343  BufferToGlobalCpyFn,
3344  BufferToGlobalRedFn};
3345 
3346  Res = CGF.EmitRuntimeCall(
3347  OMPBuilder.getOrCreateRuntimeFunction(
3348  CGM.getModule(), OMPRTL___kmpc_nvptx_teams_reduce_nowait_v2),
3349  Args);
3350  }
3351 
3352  // 5. Build if (res == 1)
3353  llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".omp.reduction.done");
3354  llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".omp.reduction.then");
3355  llvm::Value *Cond = CGF.Builder.CreateICmpEQ(
3356  Res, llvm::ConstantInt::get(CGM.Int32Ty, /*V=*/1));
3357  CGF.Builder.CreateCondBr(Cond, ThenBB, ExitBB);
3358 
3359  // 6. Build then branch: where we have reduced values in the master
3360  // thread in each team.
3361  // __kmpc_end_reduce{_nowait}(<gtid>);
3362  // break;
3363  CGF.EmitBlock(ThenBB);
3364 
3365  // Add emission of __kmpc_end_reduce{_nowait}(<gtid>);
3366  auto &&CodeGen = [Privates, LHSExprs, RHSExprs, ReductionOps,
3367  this](CodeGenFunction &CGF, PrePostActionTy &Action) {
3368  auto IPriv = Privates.begin();
3369  auto ILHS = LHSExprs.begin();
3370  auto IRHS = RHSExprs.begin();
3371  for (const Expr *E : ReductionOps) {
3372  emitSingleReductionCombiner(CGF, E, *IPriv, cast<DeclRefExpr>(*ILHS),
3373  cast<DeclRefExpr>(*IRHS));
3374  ++IPriv;
3375  ++ILHS;
3376  ++IRHS;
3377  }
3378  };
3379  llvm::Value *EndArgs[] = {ThreadId};
3380  RegionCodeGenTy RCG(CodeGen);
3381  NVPTXActionTy Action(
3382  nullptr, llvm::None,
3383  OMPBuilder.getOrCreateRuntimeFunction(
3384  CGM.getModule(), OMPRTL___kmpc_nvptx_end_reduce_nowait),
3385  EndArgs);
3386  RCG.setAction(Action);
3387  RCG(CGF);
3388  // There is no need to emit line number for unconditional branch.
3390  CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
3391 }
3392 
3393 const VarDecl *
3395  const VarDecl *NativeParam) const {
3396  if (!NativeParam->getType()->isReferenceType())
3397  return NativeParam;
3398  QualType ArgType = NativeParam->getType();
3399  QualifierCollector QC;
3400  const Type *NonQualTy = QC.strip(ArgType);
3401  QualType PointeeTy = cast<ReferenceType>(NonQualTy)->getPointeeType();
3402  if (const auto *Attr = FD->getAttr<OMPCaptureKindAttr>()) {
3403  if (Attr->getCaptureKind() == OMPC_map) {
3404  PointeeTy = CGM.getContext().getAddrSpaceQualType(PointeeTy,
3406  }
3407  }
3408  ArgType = CGM.getContext().getPointerType(PointeeTy);
3409  QC.addRestrict();
3410  enum { NVPTX_local_addr = 5 };
3411  QC.addAddressSpace(getLangASFromTargetAS(NVPTX_local_addr));
3412  ArgType = QC.apply(CGM.getContext(), ArgType);
3413  if (isa<ImplicitParamDecl>(NativeParam))
3415  CGM.getContext(), /*DC=*/nullptr, NativeParam->getLocation(),
3416  NativeParam->getIdentifier(), ArgType, ImplicitParamDecl::Other);
3417  return ParmVarDecl::Create(
3418  CGM.getContext(),
3419  const_cast<DeclContext *>(NativeParam->getDeclContext()),
3420  NativeParam->getBeginLoc(), NativeParam->getLocation(),
3421  NativeParam->getIdentifier(), ArgType,
3422  /*TInfo=*/nullptr, SC_None, /*DefArg=*/nullptr);
3423 }
3424 
3425 Address
3427  const VarDecl *NativeParam,
3428  const VarDecl *TargetParam) const {
3429  assert(NativeParam != TargetParam &&
3430  NativeParam->getType()->isReferenceType() &&
3431  "Native arg must not be the same as target arg.");
3432  Address LocalAddr = CGF.GetAddrOfLocalVar(TargetParam);
3433  QualType NativeParamType = NativeParam->getType();
3434  QualifierCollector QC;
3435  const Type *NonQualTy = QC.strip(NativeParamType);
3436  QualType NativePointeeTy = cast<ReferenceType>(NonQualTy)->getPointeeType();
3437  unsigned NativePointeeAddrSpace =
3438  CGF.getContext().getTargetAddressSpace(NativePointeeTy);
3439  QualType TargetTy = TargetParam->getType();
3440  llvm::Value *TargetAddr = CGF.EmitLoadOfScalar(
3441  LocalAddr, /*Volatile=*/false, TargetTy, SourceLocation());
3442  // First cast to generic.
3444  TargetAddr, llvm::PointerType::getWithSamePointeeType(
3445  cast<llvm::PointerType>(TargetAddr->getType()), /*AddrSpace=*/0));
3446  // Cast from generic to native address space.
3448  TargetAddr, llvm::PointerType::getWithSamePointeeType(
3449  cast<llvm::PointerType>(TargetAddr->getType()),
3450  NativePointeeAddrSpace));
3451  Address NativeParamAddr = CGF.CreateMemTemp(NativeParamType);
3452  CGF.EmitStoreOfScalar(TargetAddr, NativeParamAddr, /*Volatile=*/false,
3453  NativeParamType);
3454  return NativeParamAddr;
3455 }
3456 
3458  CodeGenFunction &CGF, SourceLocation Loc, llvm::FunctionCallee OutlinedFn,
3459  ArrayRef<llvm::Value *> Args) const {
3460  SmallVector<llvm::Value *, 4> TargetArgs;
3461  TargetArgs.reserve(Args.size());
3462  auto *FnType = OutlinedFn.getFunctionType();
3463  for (unsigned I = 0, E = Args.size(); I < E; ++I) {
3464  if (FnType->isVarArg() && FnType->getNumParams() <= I) {
3465  TargetArgs.append(std::next(Args.begin(), I), Args.end());
3466  break;
3467  }
3468  llvm::Type *TargetType = FnType->getParamType(I);
3469  llvm::Value *NativeArg = Args[I];
3470  if (!TargetType->isPointerTy()) {
3471  TargetArgs.emplace_back(NativeArg);
3472  continue;
3473  }
3474  llvm::Value *TargetArg = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3475  NativeArg, llvm::PointerType::getWithSamePointeeType(
3476  cast<llvm::PointerType>(NativeArg->getType()), /*AddrSpace*/ 0));
3477  TargetArgs.emplace_back(
3478  CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(TargetArg, TargetType));
3479  }
3480  CGOpenMPRuntime::emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, TargetArgs);
3481 }
3482 
3483 /// Emit function which wraps the outline parallel region
3484 /// and controls the arguments which are passed to this function.
3485 /// The wrapper ensures that the outlined function is called
3486 /// with the correct arguments when data is shared.
3487 llvm::Function *CGOpenMPRuntimeGPU::createParallelDataSharingWrapper(
3488  llvm::Function *OutlinedParallelFn, const OMPExecutableDirective &D) {
3489  ASTContext &Ctx = CGM.getContext();
3490  const auto &CS = *D.getCapturedStmt(OMPD_parallel);
3491 
3492  // Create a function that takes as argument the source thread.
3493  FunctionArgList WrapperArgs;
3494  QualType Int16QTy =
3495  Ctx.getIntTypeForBitwidth(/*DestWidth=*/16, /*Signed=*/false);
3496  QualType Int32QTy =
3497  Ctx.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/false);
3498  ImplicitParamDecl ParallelLevelArg(Ctx, /*DC=*/nullptr, D.getBeginLoc(),
3499  /*Id=*/nullptr, Int16QTy,
3501  ImplicitParamDecl WrapperArg(Ctx, /*DC=*/nullptr, D.getBeginLoc(),
3502  /*Id=*/nullptr, Int32QTy,
3504  WrapperArgs.emplace_back(&ParallelLevelArg);
3505  WrapperArgs.emplace_back(&WrapperArg);
3506 
3507  const CGFunctionInfo &CGFI =
3509 
3510  auto *Fn = llvm::Function::Create(
3512  Twine(OutlinedParallelFn->getName(), "_wrapper"), &CGM.getModule());
3513 
3514  // Ensure we do not inline the function. This is trivially true for the ones
3515  // passed to __kmpc_fork_call but the ones calles in serialized regions
3516  // could be inlined. This is not a perfect but it is closer to the invariant
3517  // we want, namely, every data environment starts with a new function.
3518  // TODO: We should pass the if condition to the runtime function and do the
3519  // handling there. Much cleaner code.
3520  Fn->addFnAttr(llvm::Attribute::NoInline);
3521 
3523  Fn->setLinkage(llvm::GlobalValue::InternalLinkage);
3524  Fn->setDoesNotRecurse();
3525 
3526  CodeGenFunction CGF(CGM, /*suppressNewContext=*/true);
3527  CGF.StartFunction(GlobalDecl(), Ctx.VoidTy, Fn, CGFI, WrapperArgs,
3528  D.getBeginLoc(), D.getBeginLoc());
3529 
3530  const auto *RD = CS.getCapturedRecordDecl();
3531  auto CurField = RD->field_begin();
3532 
3533  Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
3534  /*Name=*/".zero.addr");
3535  CGF.Builder.CreateStore(CGF.Builder.getInt32(/*C*/ 0), ZeroAddr);
3536  // Get the array of arguments.
3538 
3539  Args.emplace_back(CGF.GetAddrOfLocalVar(&WrapperArg).getPointer());
3540  Args.emplace_back(ZeroAddr.getPointer());
3541 
3542  CGBuilderTy &Bld = CGF.Builder;
3543  auto CI = CS.capture_begin();
3544 
3545  // Use global memory for data sharing.
3546  // Handle passing of global args to workers.
3547  Address GlobalArgs =
3548  CGF.CreateDefaultAlignTempAlloca(CGF.VoidPtrPtrTy, "global_args");
3549  llvm::Value *GlobalArgsPtr = GlobalArgs.getPointer();
3550  llvm::Value *DataSharingArgs[] = {GlobalArgsPtr};
3551  CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
3552  CGM.getModule(), OMPRTL___kmpc_get_shared_variables),
3553  DataSharingArgs);
3554 
3555  // Retrieve the shared variables from the list of references returned
3556  // by the runtime. Pass the variables to the outlined function.
3557  Address SharedArgListAddress = Address::invalid();
3558  if (CS.capture_size() > 0 ||
3560  SharedArgListAddress = CGF.EmitLoadOfPointer(
3561  GlobalArgs, CGF.getContext()
3563  .castAs<PointerType>());
3564  }
3565  unsigned Idx = 0;
3567  Address Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx);
3568  Address TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast(
3569  Src, CGF.SizeTy->getPointerTo(), CGF.SizeTy);
3570  llvm::Value *LB = CGF.EmitLoadOfScalar(
3571  TypedAddress,
3572  /*Volatile=*/false,
3574  cast<OMPLoopDirective>(D).getLowerBoundVariable()->getExprLoc());
3575  Args.emplace_back(LB);
3576  ++Idx;
3577  Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx);
3578  TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast(
3579  Src, CGF.SizeTy->getPointerTo(), CGF.SizeTy);
3580  llvm::Value *UB = CGF.EmitLoadOfScalar(
3581  TypedAddress,
3582  /*Volatile=*/false,
3584  cast<OMPLoopDirective>(D).getUpperBoundVariable()->getExprLoc());
3585  Args.emplace_back(UB);
3586  ++Idx;
3587  }
3588  if (CS.capture_size() > 0) {
3589  ASTContext &CGFContext = CGF.getContext();
3590  for (unsigned I = 0, E = CS.capture_size(); I < E; ++I, ++CI, ++CurField) {
3591  QualType ElemTy = CurField->getType();
3592  Address Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, I + Idx);
3593  Address TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast(
3594  Src, CGF.ConvertTypeForMem(CGFContext.getPointerType(ElemTy)),
3595  CGF.ConvertTypeForMem(ElemTy));
3596  llvm::Value *Arg = CGF.EmitLoadOfScalar(TypedAddress,
3597  /*Volatile=*/false,
3598  CGFContext.getPointerType(ElemTy),
3599  CI->getLocation());
3600  if (CI->capturesVariableByCopy() &&
3601  !CI->getCapturedVar()->getType()->isAnyPointerType()) {
3602  Arg = castValueToType(CGF, Arg, ElemTy, CGFContext.getUIntPtrType(),
3603  CI->getLocation());
3604  }
3605  Args.emplace_back(Arg);
3606  }
3607  }
3608 
3609  emitOutlinedFunctionCall(CGF, D.getBeginLoc(), OutlinedParallelFn, Args);
3610  CGF.FinishFunction();
3611  return Fn;
3612 }
3613 
3615  const Decl *D) {
3617  return;
3618 
3619  assert(D && "Expected function or captured|block decl.");
3620  assert(FunctionGlobalizedDecls.count(CGF.CurFn) == 0 &&
3621  "Function is registered already.");
3622  assert((!TeamAndReductions.first || TeamAndReductions.first == D) &&
3623  "Team is set but not processed.");
3624  const Stmt *Body = nullptr;
3625  bool NeedToDelayGlobalization = false;
3626  if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
3627  Body = FD->getBody();
3628  } else if (const auto *BD = dyn_cast<BlockDecl>(D)) {
3629  Body = BD->getBody();
3630  } else if (const auto *CD = dyn_cast<CapturedDecl>(D)) {
3631  Body = CD->getBody();
3632  NeedToDelayGlobalization = CGF.CapturedStmtInfo->getKind() == CR_OpenMP;
3633  if (NeedToDelayGlobalization &&
3634  getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD)
3635  return;
3636  }
3637  if (!Body)
3638  return;
3639  CheckVarsEscapingDeclContext VarChecker(CGF, TeamAndReductions.second);
3640  VarChecker.Visit(Body);
3641  const RecordDecl *GlobalizedVarsRecord =
3642  VarChecker.getGlobalizedRecord(IsInTTDRegion);
3643  TeamAndReductions.first = nullptr;
3644  TeamAndReductions.second.clear();
3645  ArrayRef<const ValueDecl *> EscapedVariableLengthDecls =
3646  VarChecker.getEscapedVariableLengthDecls();
3647  if (!GlobalizedVarsRecord && EscapedVariableLengthDecls.empty())
3648  return;
3649  auto I = FunctionGlobalizedDecls.try_emplace(CGF.CurFn).first;
3650  I->getSecond().MappedParams =
3651  std::make_unique<CodeGenFunction::OMPMapVars>();
3652  I->getSecond().EscapedParameters.insert(
3653  VarChecker.getEscapedParameters().begin(),
3654  VarChecker.getEscapedParameters().end());
3655  I->getSecond().EscapedVariableLengthDecls.append(
3656  EscapedVariableLengthDecls.begin(), EscapedVariableLengthDecls.end());
3657  DeclToAddrMapTy &Data = I->getSecond().LocalVarData;
3658  for (const ValueDecl *VD : VarChecker.getEscapedDecls()) {
3659  assert(VD->isCanonicalDecl() && "Expected canonical declaration");
3660  Data.insert(std::make_pair(VD, MappedVarData()));
3661  }
3662  if (!IsInTTDRegion && !NeedToDelayGlobalization && !IsInParallelRegion) {
3663  CheckVarsEscapingDeclContext VarChecker(CGF, llvm::None);
3664  VarChecker.Visit(Body);
3665  I->getSecond().SecondaryLocalVarData.emplace();
3666  DeclToAddrMapTy &Data = I->getSecond().SecondaryLocalVarData.getValue();
3667  for (const ValueDecl *VD : VarChecker.getEscapedDecls()) {
3668  assert(VD->isCanonicalDecl() && "Expected canonical declaration");
3669  Data.insert(std::make_pair(VD, MappedVarData()));
3670  }
3671  }
3672  if (!NeedToDelayGlobalization) {
3673  emitGenericVarsProlog(CGF, D->getBeginLoc(), /*WithSPMDCheck=*/true);
3674  struct GlobalizationScope final : EHScopeStack::Cleanup {
3675  GlobalizationScope() = default;
3676 
3677  void Emit(CodeGenFunction &CGF, Flags flags) override {
3678  static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime())
3679  .emitGenericVarsEpilog(CGF, /*WithSPMDCheck=*/true);
3680  }
3681  };
3682  CGF.EHStack.pushCleanup<GlobalizationScope>(NormalAndEHCleanup);
3683  }
3684 }
3685 
3687  const VarDecl *VD) {
3688  if (VD && VD->hasAttr<OMPAllocateDeclAttr>()) {
3689  const auto *A = VD->getAttr<OMPAllocateDeclAttr>();
3690  auto AS = LangAS::Default;
3691  switch (A->getAllocatorType()) {
3692  // Use the default allocator here as by default local vars are
3693  // threadlocal.
3694  case OMPAllocateDeclAttr::OMPNullMemAlloc:
3695  case OMPAllocateDeclAttr::OMPDefaultMemAlloc:
3696  case OMPAllocateDeclAttr::OMPThreadMemAlloc:
3697  case OMPAllocateDeclAttr::OMPHighBWMemAlloc:
3698  case OMPAllocateDeclAttr::OMPLowLatMemAlloc:
3699  // Follow the user decision - use default allocation.
3700  return Address::invalid();
3701  case OMPAllocateDeclAttr::OMPUserDefinedMemAlloc:
3702  // TODO: implement aupport for user-defined allocators.
3703  return Address::invalid();
3704  case OMPAllocateDeclAttr::OMPConstMemAlloc:
3705  AS = LangAS::cuda_constant;
3706  break;
3707  case OMPAllocateDeclAttr::OMPPTeamMemAlloc:
3708  AS = LangAS::cuda_shared;
3709  break;
3710  case OMPAllocateDeclAttr::OMPLargeCapMemAlloc:
3711  case OMPAllocateDeclAttr::OMPCGroupMemAlloc:
3712  break;
3713  }
3714  llvm::Type *VarTy = CGF.ConvertTypeForMem(VD->getType());
3715  auto *GV = new llvm::GlobalVariable(
3716  CGM.getModule(), VarTy, /*isConstant=*/false,
3717  llvm::GlobalValue::InternalLinkage, llvm::Constant::getNullValue(VarTy),
3718  VD->getName(),
3719  /*InsertBefore=*/nullptr, llvm::GlobalValue::NotThreadLocal,
3721  CharUnits Align = CGM.getContext().getDeclAlign(VD);
3722  GV->setAlignment(Align.getAsAlign());
3723  return Address(
3725  GV, VarTy->getPointerTo(CGM.getContext().getTargetAddressSpace(
3726  VD->getType().getAddressSpace()))),
3727  VarTy, Align);
3728  }
3729 
3731  return Address::invalid();
3732 
3733  VD = VD->getCanonicalDecl();
3734  auto I = FunctionGlobalizedDecls.find(CGF.CurFn);
3735  if (I == FunctionGlobalizedDecls.end())
3736  return Address::invalid();
3737  auto VDI = I->getSecond().LocalVarData.find(VD);
3738  if (VDI != I->getSecond().LocalVarData.end())
3739  return VDI->second.PrivateAddr;
3740  if (VD->hasAttrs()) {
3742  E(VD->attr_end());
3743  IT != E; ++IT) {
3744  auto VDI = I->getSecond().LocalVarData.find(
3745  cast<VarDecl>(cast<DeclRefExpr>(IT->getRef())->getDecl())
3746  ->getCanonicalDecl());
3747  if (VDI != I->getSecond().LocalVarData.end())
3748  return VDI->second.PrivateAddr;
3749  }
3750  }
3751 
3752  return Address::invalid();
3753 }
3754 
3756  FunctionGlobalizedDecls.erase(CGF.CurFn);
3758 }
3759 
3761  CodeGenFunction &CGF, const OMPLoopDirective &S,
3762  OpenMPDistScheduleClauseKind &ScheduleKind,
3763  llvm::Value *&Chunk) const {
3764  auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
3765  if (getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD) {
3766  ScheduleKind = OMPC_DIST_SCHEDULE_static;
3767  Chunk = CGF.EmitScalarConversion(
3768  RT.getGPUNumThreads(CGF),
3769  CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/0),
3770  S.getIterationVariable()->getType(), S.getBeginLoc());
3771  return;
3772  }
3774  CGF, S, ScheduleKind, Chunk);
3775 }
3776 
3778  CodeGenFunction &CGF, const OMPLoopDirective &S,
3779  OpenMPScheduleClauseKind &ScheduleKind,
3780  const Expr *&ChunkExpr) const {
3781  ScheduleKind = OMPC_SCHEDULE_static;
3782  // Chunk size is 1 in this case.
3783  llvm::APInt ChunkSize(32, 1);
3784  ChunkExpr = IntegerLiteral::Create(CGF.getContext(), ChunkSize,
3785  CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/0),
3786  SourceLocation());
3787 }
3788 
3790  CodeGenFunction &CGF, const OMPExecutableDirective &D) const {
3792  " Expected target-based directive.");
3793  const CapturedStmt *CS = D.getCapturedStmt(OMPD_target);
3794  for (const CapturedStmt::Capture &C : CS->captures()) {
3795  // Capture variables captured by reference in lambdas for target-based
3796  // directives.
3797  if (!C.capturesVariable())
3798  continue;
3799  const VarDecl *VD = C.getCapturedVar();
3800  const auto *RD = VD->getType()
3801  .getCanonicalType()
3803  ->getAsCXXRecordDecl();
3804  if (!RD || !RD->isLambda())
3805  continue;
3806  Address VDAddr = CGF.GetAddrOfLocalVar(VD);
3807  LValue VDLVal;
3808  if (VD->getType().getCanonicalType()->isReferenceType())
3809  VDLVal = CGF.EmitLoadOfReferenceLValue(VDAddr, VD->getType());
3810  else
3811  VDLVal = CGF.MakeAddrLValue(
3812  VDAddr, VD->getType().getCanonicalType().getNonReferenceType());
3813  llvm::DenseMap<const VarDecl *, FieldDecl *> Captures;
3814  FieldDecl *ThisCapture = nullptr;
3815  RD->getCaptureFields(Captures, ThisCapture);
3816  if (ThisCapture && CGF.CapturedStmtInfo->isCXXThisExprCaptured()) {
3817  LValue ThisLVal =
3818  CGF.EmitLValueForFieldInitialization(VDLVal, ThisCapture);
3819  llvm::Value *CXXThis = CGF.LoadCXXThis();
3820  CGF.EmitStoreOfScalar(CXXThis, ThisLVal);
3821  }
3822  for (const LambdaCapture &LC : RD->captures()) {
3823  if (LC.getCaptureKind() != LCK_ByRef)
3824  continue;
3825  const VarDecl *VD = LC.getCapturedVar();
3826  if (!CS->capturesVariable(VD))
3827  continue;
3828  auto It = Captures.find(VD);
3829  assert(It != Captures.end() && "Found lambda capture without field.");
3830  LValue VarLVal = CGF.EmitLValueForFieldInitialization(VDLVal, It->second);
3831  Address VDAddr = CGF.GetAddrOfLocalVar(VD);
3832  if (VD->getType().getCanonicalType()->isReferenceType())
3833  VDAddr = CGF.EmitLoadOfReferenceLValue(VDAddr,
3834  VD->getType().getCanonicalType())
3835  .getAddress(CGF);
3836  CGF.EmitStoreOfScalar(VDAddr.getPointer(), VarLVal);
3837  }
3838  }
3839 }
3840 
3842  LangAS &AS) {
3843  if (!VD || !VD->hasAttr<OMPAllocateDeclAttr>())
3844  return false;
3845  const auto *A = VD->getAttr<OMPAllocateDeclAttr>();
3846  switch(A->getAllocatorType()) {
3847  case OMPAllocateDeclAttr::OMPNullMemAlloc:
3848  case OMPAllocateDeclAttr::OMPDefaultMemAlloc:
3849  // Not supported, fallback to the default mem space.
3850  case OMPAllocateDeclAttr::OMPThreadMemAlloc:
3851  case OMPAllocateDeclAttr::OMPLargeCapMemAlloc:
3852  case OMPAllocateDeclAttr::OMPCGroupMemAlloc:
3853  case OMPAllocateDeclAttr::OMPHighBWMemAlloc:
3854  case OMPAllocateDeclAttr::OMPLowLatMemAlloc:
3855  AS = LangAS::Default;
3856  return true;
3857  case OMPAllocateDeclAttr::OMPConstMemAlloc:
3858  AS = LangAS::cuda_constant;
3859  return true;
3860  case OMPAllocateDeclAttr::OMPPTeamMemAlloc:
3861  AS = LangAS::cuda_shared;
3862  return true;
3863  case OMPAllocateDeclAttr::OMPUserDefinedMemAlloc:
3864  llvm_unreachable("Expected predefined allocator for the variables with the "
3865  "static storage.");
3866  }
3867  return false;
3868 }
3869 
3870 // Get current CudaArch and ignore any unknown values
3872  if (!CGM.getTarget().hasFeature("ptx"))
3873  return CudaArch::UNKNOWN;
3874  for (const auto &Feature : CGM.getTarget().getTargetOpts().FeatureMap) {
3875  if (Feature.getValue()) {
3876  CudaArch Arch = StringToCudaArch(Feature.getKey());
3877  if (Arch != CudaArch::UNKNOWN)
3878  return Arch;
3879  }
3880  }
3881  return CudaArch::UNKNOWN;
3882 }
3883 
3884 /// Check to see if target architecture supports unified addressing which is
3885 /// a restriction for OpenMP requires clause "unified_shared_memory".
3887  const OMPRequiresDecl *D) {
3888  for (const OMPClause *Clause : D->clauselists()) {
3889  if (Clause->getClauseKind() == OMPC_unified_shared_memory) {
3890  CudaArch Arch = getCudaArch(CGM);
3891  switch (Arch) {
3892  case CudaArch::SM_20:
3893  case CudaArch::SM_21:
3894  case CudaArch::SM_30:
3895  case CudaArch::SM_32:
3896  case CudaArch::SM_35:
3897  case CudaArch::SM_37:
3898  case CudaArch::SM_50:
3899  case CudaArch::SM_52:
3900  case CudaArch::SM_53: {
3901  SmallString<256> Buffer;
3902  llvm::raw_svector_ostream Out(Buffer);
3903  Out << "Target architecture " << CudaArchToString(Arch)
3904  << " does not support unified addressing";
3905  CGM.Error(Clause->getBeginLoc(), Out.str());
3906  return;
3907  }
3908  case CudaArch::SM_60:
3909  case CudaArch::SM_61:
3910  case CudaArch::SM_62:
3911  case CudaArch::SM_70:
3912  case CudaArch::SM_72:
3913  case CudaArch::SM_75:
3914  case CudaArch::SM_80:
3915  case CudaArch::SM_86:
3916  case CudaArch::GFX600:
3917  case CudaArch::GFX601:
3918  case CudaArch::GFX602:
3919  case CudaArch::GFX700:
3920  case CudaArch::GFX701:
3921  case CudaArch::GFX702:
3922  case CudaArch::GFX703:
3923  case CudaArch::GFX704:
3924  case CudaArch::GFX705:
3925  case CudaArch::GFX801:
3926  case CudaArch::GFX802:
3927  case CudaArch::GFX803:
3928  case CudaArch::GFX805:
3929  case CudaArch::GFX810:
3930  case CudaArch::GFX900:
3931  case CudaArch::GFX902:
3932  case CudaArch::GFX904:
3933  case CudaArch::GFX906:
3934  case CudaArch::GFX908:
3935  case CudaArch::GFX909:
3936  case CudaArch::GFX90a:
3937  case CudaArch::GFX90c:
3938  case CudaArch::GFX940:
3939  case CudaArch::GFX1010:
3940  case CudaArch::GFX1011:
3941  case CudaArch::GFX1012:
3942  case CudaArch::GFX1013:
3943  case CudaArch::GFX1030:
3944  case CudaArch::GFX1031:
3945  case CudaArch::GFX1032:
3946  case CudaArch::GFX1033:
3947  case CudaArch::GFX1034:
3948  case CudaArch::GFX1035:
3949  case CudaArch::GFX1036:
3950  case CudaArch::GFX1100:
3951  case CudaArch::GFX1101:
3952  case CudaArch::GFX1102:
3953  case CudaArch::GFX1103:
3954  case CudaArch::Generic:
3955  case CudaArch::UNUSED:
3956  case CudaArch::UNKNOWN:
3957  break;
3958  case CudaArch::LAST:
3959  llvm_unreachable("Unexpected Cuda arch.");
3960  }
3961  }
3962  }
3964 }
3965 
3967 
3968  if (!TeamsReductions.empty()) {
3969  ASTContext &C = CGM.getContext();
3970  RecordDecl *StaticRD = C.buildImplicitRecord(
3971  "_openmp_teams_reduction_type_$_", RecordDecl::TagKind::TTK_Union);
3972  StaticRD->startDefinition();
3973  for (const RecordDecl *TeamReductionRec : TeamsReductions) {
3974  QualType RecTy = C.getRecordType(TeamReductionRec);
3975  auto *Field = FieldDecl::Create(
3976  C, StaticRD, SourceLocation(), SourceLocation(), nullptr, RecTy,
3977  C.getTrivialTypeSourceInfo(RecTy, SourceLocation()),
3978  /*BW=*/nullptr, /*Mutable=*/false,
3979  /*InitStyle=*/ICIS_NoInit);
3980  Field->setAccess(AS_public);
3981  StaticRD->addDecl(Field);
3982  }
3983  StaticRD->completeDefinition();
3984  QualType StaticTy = C.getRecordType(StaticRD);
3985  llvm::Type *LLVMReductionsBufferTy =
3986  CGM.getTypes().ConvertTypeForMem(StaticTy);
3987  // FIXME: nvlink does not handle weak linkage correctly (object with the
3988  // different size are reported as erroneous).
3989  // Restore CommonLinkage as soon as nvlink is fixed.
3990  auto *GV = new llvm::GlobalVariable(
3991  CGM.getModule(), LLVMReductionsBufferTy,
3992  /*isConstant=*/false, llvm::GlobalValue::InternalLinkage,
3993  llvm::Constant::getNullValue(LLVMReductionsBufferTy),
3994  "_openmp_teams_reductions_buffer_$_");
3995  KernelTeamsReductionPtr->setInitializer(
3996  llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(GV,
3997  CGM.VoidPtrTy));
3998  }
4000 }
4001 
4003  CGBuilderTy &Bld = CGF.Builder;
4004  llvm::Module *M = &CGF.CGM.getModule();
4005  const char *LocSize = "__kmpc_get_hardware_num_threads_in_block";
4006  llvm::Function *F = M->getFunction(LocSize);
4007  if (!F) {
4008  F = llvm::Function::Create(
4009  llvm::FunctionType::get(CGF.Int32Ty, llvm::None, false),
4011  }
4012  return Bld.CreateCall(F, llvm::None, "nvptx_num_threads");
4013 }
4014 
4016  ArrayRef<llvm::Value *> Args{};
4017  return CGF.EmitRuntimeCall(
4018  OMPBuilder.getOrCreateRuntimeFunction(
4019  CGM.getModule(), OMPRTL___kmpc_get_hardware_thread_id_in_block),
4020  Args);
4021 }
4022 
4024  ArrayRef<llvm::Value *> Args{};
4025  return CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
4026  CGM.getModule(), OMPRTL___kmpc_get_warp_size),
4027  Args);
4028 }
clang::QualifierCollector::strip
const Type * strip(QualType type)
Collect any qualifiers on the given type and return an unqualified type.
Definition: Type.h:6447
clang::CodeGen::CGOpenMPRuntimeGPU::CGOpenMPRuntimeGPU
CGOpenMPRuntimeGPU(CodeGenModule &CGM)
Definition: CGOpenMPRuntimeGPU.cpp:1200
clang::CudaArch::SM_35
@ SM_35
clang::InternalLinkage
@ InternalLinkage
Internal linkage, which indicates that the entity can be referred to from within the translation unit...
Definition: Linkage.h:31
clang::CudaArch::GFX805
@ GFX805
clang::LangAS::cuda_shared
@ cuda_shared
clang::CodeGen::CGOpenMPRuntimeGPU::emitOutlinedFunctionCall
void emitOutlinedFunctionCall(CodeGenFunction &CGF, SourceLocation Loc, llvm::FunctionCallee OutlinedFn, ArrayRef< llvm::Value * > Args=llvm::None) const override
Emits call of the outlined function with the provided arguments, translating these arguments to corre...
Definition: CGOpenMPRuntimeGPU.cpp:3457
clang::CodeGen::CodeGenFunction::ConvertTypeForMem
llvm::Type * ConvertTypeForMem(QualType T)
Definition: CodeGenFunction.cpp:208
clang::ASTContext::getTypeSizeInChars
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
Definition: ASTContext.cpp:2462
supportsSPMDExecutionMode
static bool supportsSPMDExecutionMode(ASTContext &Ctx, const OMPExecutableDirective &D)
Definition: CGOpenMPRuntimeGPU.cpp:669
clang::CudaArch::SM_70
@ SM_70
clang::CudaArch::GFX704
@ GFX704
clang::prec::Conditional
@ Conditional
Definition: OperatorPrecedence.h:30
clang::CodeGen::CodeGenTypeCache::SizeTy
llvm::IntegerType * SizeTy
Definition: CodeGenTypeCache.h:50
max
__DEVICE__ int max(int __a, int __b)
Definition: __clang_cuda_math.h:196
clang::CodeGen::CodeGenFunction::getTypeSize
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
Definition: CGStmtOpenMP.cpp:305
clang::LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE
LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE()
clang::CodeGen::LValueBaseInfo
Definition: CGValue.h:154
clang::OMPLastprivateClause
This represents clause 'lastprivate' in the '#pragma omp ...' directives.
Definition: OpenMPClause.h:2728
clang::CodeGen::CodeGenFunction::EmitScalarConversion
llvm::Value * EmitScalarConversion(llvm::Value *Src, QualType SrcTy, QualType DstTy, SourceLocation Loc)
Emit a conversion from the specified type to the specified destination type, both of which are LLVM s...
Definition: CGExprScalar.cpp:4879
clang::CodeGen::CodeGenTypeCache::Int8PtrTy
llvm::PointerType * Int8PtrTy
Definition: CodeGenTypeCache.h:57
clang::Decl::getBeginLoc
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: DeclBase.h:417
clang::CodeGen::CodeGenFunction::LoadCXXThis
llvm::Value * LoadCXXThis()
LoadCXXThis - Load the value of 'this'.
Definition: CodeGenFunction.h:2804
clang::CharUnits::getAsAlign
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition: CharUnits.h:183
clang::CudaArch::GFX1035
@ GFX1035
clang::CodeGen::CGOpenMPRuntime::getThreadID
llvm::Value * getThreadID(CodeGenFunction &CGF, SourceLocation Loc)
Gets thread id value for the current thread.
Definition: CGOpenMPRuntime.cpp:1394
clang::interp::APInt
llvm::APInt APInt
Definition: Integral.h:27
setPropertyExecutionMode
static void setPropertyExecutionMode(CodeGenModule &CGM, StringRef Name, bool Mode)
Definition: CGOpenMPRuntimeGPU.cpp:1111
clang::CodeGen::CodeGenTypeCache::IntTy
llvm::IntegerType * IntTy
int
Definition: CodeGenTypeCache.h:42
clang::CodeGen::TEK_Aggregate
@ TEK_Aggregate
Definition: CodeGenFunction.h:109
clang::CodeGen::CGOpenMPRuntime
Definition: CGOpenMPRuntime.h:228
clang::CudaArch::GFX601
@ GFX601
clang::DeclaratorDecl::getBeginLoc
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: Decl.h:778
clang::Expr::isLValue
bool isLValue() const
isLValue - True if this expression is an "l-value" according to the rules of the current language.
Definition: Expr.h:270
clang::CodeGen::CGOpenMPRuntime::emitSingleReductionCombiner
void emitSingleReductionCombiner(CodeGenFunction &CGF, const Expr *ReductionOp, const Expr *PrivateRef, const DeclRefExpr *LHS, const DeclRefExpr *RHS)
Emits single reduction combiner.
Definition: CGOpenMPRuntime.cpp:5397
clang::Qualifiers::addRestrict
void addRestrict()
Definition: Type.h:277
CodeGenFunction.h
clang::DeclContext
DeclContext - This is used only as base class of specific decl types that can act as declaration cont...
Definition: DeclBase.h:1356
clang::CapturedStmt::captures
capture_range captures()
Definition: Stmt.h:3639
clang::CodeGen::CGOpenMPRuntimeGPU::EM_SPMD
@ EM_SPMD
SPMD execution mode (all threads are worker threads).
Definition: CGOpenMPRuntimeGPU.h:29
clang::LambdaCapture
Describes the capture of a variable or of this, or of a C++1y init-capture.
Definition: LambdaCapture.h:25
clang::OMPExecutableDirective::getClausesOfKind
static llvm::iterator_range< specific_clause_iterator< SpecificClause > > getClausesOfKind(ArrayRef< OMPClause * > Clauses)
Definition: StmtOpenMP.h:445
clang::Decl::hasAttr
bool hasAttr() const
Definition: DeclBase.h:542
emitShuffleAndReduceFunction
static llvm::Function * emitShuffleAndReduceFunction(CodeGenModule &CGM, ArrayRef< const Expr * > Privates, QualType ReductionArrayTy, llvm::Function *ReduceFn, SourceLocation Loc)
Emit a helper that reduces data across two OpenMP threads (lanes) in the same warp.
Definition: CGOpenMPRuntimeGPU.cpp:2395
clang::CudaArch::GFX908
@ GFX908
clang::CodeGen::CGOpenMPRuntimeGPU::emitBarrierCall
void emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind Kind, bool EmitChecks=true, bool ForceSimpleCall=false) override
Emit an implicit/explicit barrier for OpenMP threads.
Definition: CGOpenMPRuntimeGPU.cpp:1615
emitListToGlobalCopyFunction
static llvm::Value * emitListToGlobalCopyFunction(CodeGenModule &CGM, ArrayRef< const Expr * > Privates, QualType ReductionArrayTy, SourceLocation Loc, const RecordDecl *TeamReductionRec, const llvm::SmallDenseMap< const ValueDecl *, const FieldDecl * > &VarFieldMap)
This function emits a helper that copies all the reduction variables from the team into the provided ...
Definition: CGOpenMPRuntimeGPU.cpp:2553
clang::VarDecl::getCanonicalDecl
VarDecl * getCanonicalDecl() override
Retrieves the "canonical" declaration of the given declaration.
Definition: Decl.cpp:2162
clang::QualifierCollector::apply
QualType apply(const ASTContext &Context, QualType QT) const
Apply the collected qualifiers to the given type.
Definition: Type.cpp:3752
clang::CodeGen::CGOpenMPRuntime::emitParallelOutlinedFunction
virtual llvm::Function * emitParallelOutlinedFunction(const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen)
Emits outlined function for the specified OpenMP parallel directive D.
Definition: CGOpenMPRuntime.cpp:1269
clang::CodeGen::Address::getAlignment
CharUnits getAlignment() const
Return the alignment of this pointer.
Definition: Address.h:120
clang::CodeGen::CGOpenMPRuntime::emitProcBindClause
virtual void emitProcBindClause(CodeGenFunction &CGF, llvm::omp::ProcBindKind ProcBind, SourceLocation Loc)
Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 global_tid, int proc_bind) to generat...
Definition: CGOpenMPRuntime.cpp:2899
clang::isOpenMPParallelDirective
bool isOpenMPParallelDirective(OpenMPDirectiveKind DKind)
Checks if the specified directive is a parallel-kind directive.
Definition: OpenMPKinds.cpp:527
clang::CodeGen::CGOpenMPRuntime::clearLocThreadIdInsertPt
void clearLocThreadIdInsertPt(CodeGenFunction &CGF)
Definition: CGOpenMPRuntime.cpp:1348
clang::ASTContext::VoidTy
CanQualType VoidTy
Definition: ASTContext.h:1096
clang::TargetInfo::getNewAlign
unsigned getNewAlign() const
Return the largest alignment for which a suitably-sized allocation with '::operator new(size_t)' is g...
Definition: TargetInfo.h:651
llvm::SmallVector
Definition: LLVM.h:38
CopyOptionsTy
Definition: CGOpenMPRuntimeGPU.cpp:1865
clang::SourceLocation
Encodes a location in the source.
Definition: SourceLocation.h:86
CopyOptionsTy::ScratchpadWidth
llvm::Value * ScratchpadWidth
Definition: CGOpenMPRuntimeGPU.cpp:1868
clang::CodeGen::CodeGenModule::SetInternalFunctionAttributes
void SetInternalFunctionAttributes(GlobalDecl GD, llvm::Function *F, const CGFunctionInfo &FI)
Set the attributes on the LLVM function for the given decl and function info.
Definition: CodeGenModule.cpp:2193
clang::VarDecl::isInitCapture
bool isInitCapture() const
Whether this variable is the implicit variable for a lambda init-capture.
Definition: Decl.h:1486
clang::CodeGen::CGOpenMPRuntimeGPU::getGPUWarpSize
llvm::Value * getGPUWarpSize(CodeGenFunction &CGF)
Declare generalized virtual functions which need to be defined by all specializations of OpenMPGPURun...
Definition: CGOpenMPRuntimeGPU.cpp:4023
clang::CodeGen::LValue::getAddress
Address getAddress(CodeGenFunction &CGF) const
Definition: CGValue.h:341
emitReductionListCopy
static void emitReductionListCopy(CopyAction Action, CodeGenFunction &CGF, QualType ReductionArrayTy, ArrayRef< const Expr * > Privates, Address SrcBase, Address DestBase, CopyOptionsTy CopyOptions={nullptr, nullptr, nullptr})
Emit instructions to copy a Reduce list, which contains partially aggregated values,...
Definition: CGOpenMPRuntimeGPU.cpp:1873
clang::CodeGen::CodeGenFunction::EmitLoadOfComplex
ComplexPairTy EmitLoadOfComplex(LValue src, SourceLocation loc)
EmitLoadOfComplex - Load a complex number from the specified l-value.
Definition: CGExprComplex.cpp:1145
clang::CudaArch::GFX906
@ GFX906
clang::QualType::getNonReferenceType
QualType getNonReferenceType() const
If Type is a reference type (e.g., const int&), returns the type that the reference refers to ("const...
Definition: Type.h:6696
clang::CastExpr::getSubExpr
Expr * getSubExpr()
Definition: Expr.h:3525
clang::CodeGen::CGOpenMPRuntime::setLocThreadIdInsertPt
void setLocThreadIdInsertPt(CodeGenFunction &CGF, bool AtCurrentPoint=false)
Definition: CGOpenMPRuntime.cpp:1332
clang::CharUnits::QuantityType
int64_t QuantityType
Definition: CharUnits.h:40
clang::CodeGen::CodeGenTypeCache::getPointerAlign
CharUnits getPointerAlign() const
Definition: CodeGenTypeCache.h:117
clang::QualType
A (possibly-)qualified type.
Definition: Type.h:675
clang::CudaArch::GFX802
@ GFX802
clang::CudaArch::GFX940
@ GFX940
clang::ASTContext::getTargetAddressSpace
unsigned getTargetAddressSpace(QualType T) const
Definition: ASTContext.cpp:11998
Attr.h
clang::CodeGen::CGOpenMPRuntimeGPU::emitParallelCall
void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc, llvm::Function *OutlinedFn, ArrayRef< llvm::Value * > CapturedVars, const Expr *IfCond, llvm::Value *NumThreads) override
Emits code for parallel or serial call of the OutlinedFn with variables captured in a record which ad...
Definition: CGOpenMPRuntimeGPU.cpp:1520
AttributeLangSupport::C
@ C
Definition: SemaDeclAttr.cpp:55
clang::CudaArch::SM_53
@ SM_53
clang::CodeGen::CodeGenModule::getContext
ASTContext & getContext() const
Definition: CodeGenModule.h:697
clang::QualType::getCanonicalType
QualType getCanonicalType() const
Definition: Type.h:6539
clang::FieldDecl
Represents a member of a struct/union/class.
Definition: Decl.h:2855
clang::CodeGen::CGOpenMPRuntime::OMPBuilder
llvm::OpenMPIRBuilder OMPBuilder
An OpenMP-IR-Builder instance.
Definition: CGOpenMPRuntime.h:312
clang::CodeGen::CGOpenMPRuntime::emitCriticalRegion
virtual void emitCriticalRegion(CodeGenFunction &CGF, StringRef CriticalName, const RegionCodeGenTy &CriticalOpGen, SourceLocation Loc, const Expr *Hint=nullptr)
Emits a critical region.
Definition: CGOpenMPRuntime.cpp:2199
clang::CodeGen::CGOpenMPRuntimeGPU::adjustTargetSpecificDataForLambdas
void adjustTargetSpecificDataForLambdas(CodeGenFunction &CGF, const OMPExecutableDirective &D) const override
Adjust some parameters for the target-based directives, like addresses of the variables captured by r...
Definition: CGOpenMPRuntimeGPU.cpp:3789
clang::isOpenMPLoopDirective
bool isOpenMPLoopDirective(OpenMPDirectiveKind DKind)
Checks if the specified directive is a directive with an associated loop construct.
Definition: OpenMPKinds.cpp:480
clang::CudaArch::GFX1034
@ GFX1034
clang::CodeGen::CodeGenFunction::createBasicBlock
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
Definition: CodeGenFunction.h:2430
clang::CudaArch::GFX1102
@ GFX1102
clang::CodeGen::CGOpenMPRuntimeGPU::Generic
@ Generic
Generic data-sharing mode.
Definition: CGOpenMPRuntimeGPU.h:352
clang::CodeGen::CGOpenMPRuntimeGPU::functionFinished
void functionFinished(CodeGenFunction &CGF) override
Cleans up references to the objects in finished function.
Definition: CGOpenMPRuntimeGPU.cpp:3755
DeclOpenMP.h
clang::CodeGen::CGBuilderTy::CreateStore
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition: CGBuilder.h:97
clang::CodeGen::LValue::setAddress
void setAddress(Address address)
Definition: CGValue.h:344
clang::StringToCudaArch
CudaArch StringToCudaArch(llvm::StringRef S)
Definition: Cuda.cpp:157
createRuntimeShuffleFunction
static llvm::Value * createRuntimeShuffleFunction(CodeGenFunction &CGF, llvm::Value *Elem, QualType ElemType, llvm::Value *Offset, SourceLocation Loc)
This function creates calls to one of two shuffle functions to copy variables between lanes in a warp...
Definition: CGOpenMPRuntimeGPU.cpp:1732
clang::CodeGen::CodeGenTypes::arrangeBuiltinFunctionDeclaration
const CGFunctionInfo & arrangeBuiltinFunctionDeclaration(QualType resultType, const FunctionArgList &args)
A builtin function is a freestanding function using the default C conventions.
Definition: CGCall.cpp:666
clang::TargetInfo::hasFeature
virtual bool hasFeature(StringRef Feature) const
Determine whether the given target has the given feature.
Definition: TargetInfo.h:1341
llvm::SmallPtrSet
Definition: ASTContext.h:82
clang::UnaryOperator
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition: Expr.h:2163
clang::CodeGen::CodeGenModule::getLangOpts
const LangOptions & getLangOpts() const
Definition: CodeGenModule.h:698
clang::OMPReductionClause
This represents clause 'reduction' in the '#pragma omp ...' directives.
Definition: OpenMPClause.h:3039
clang::CodeGen::CodeGenFunction::EmitLoadOfPointer
Address EmitLoadOfPointer(Address Ptr, const PointerType *PtrTy, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
Load a pointer with type PtrTy stored at address Ptr.
Definition: CGExpr.cpp:2571
clang::CodeGen::CGOpenMPRuntime::emitThreadIDAddress
virtual Address emitThreadIDAddress(CodeGenFunction &CGF, SourceLocation Loc)
Emits address of the word in a memory where current thread id is stored.
Definition: CGOpenMPRuntime.cpp:2117
clang::CudaArch::GFX1010
@ GFX1010
clang::CodeGen::CodeGenFunction::HaveInsertPoint
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
Definition: CodeGenFunction.h:2471
clang::CodeGen::CodeGenFunction::EmitLValueForField
LValue EmitLValueForField(LValue Base, const FieldDecl *Field)
Definition: CGExpr.cpp:4357
clang::Decl::getAttr
T * getAttr() const
Definition: DeclBase.h:538
clang::CodeGen::CodeGenFunction::EmitStoreOfScalar
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
Definition: CodeGenFunction.h:3848
clang::CudaArch::SM_60
@ SM_60
clang::CodeGen::CGOpenMPRuntimeGPU::hasAllocateAttributeForGlobalVar
bool hasAllocateAttributeForGlobalVar(const VarDecl *VD, LangAS &AS) override
Checks if the variable has associated OMPAllocateDeclAttr attribute with the predefined allocator and...
Definition: CGOpenMPRuntimeGPU.cpp:3841
clang::CodeGen::CGOpenMPRuntimeGPU::emitParallelOutlinedFunction
llvm::Function * emitParallelOutlinedFunction(const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) override
Emits inlined function for the specified OpenMP parallel.
Definition: CGOpenMPRuntimeGPU.cpp:1240
clang::CudaArch::GFX1100
@ GFX1100
getTeamsReductionVars
static void getTeamsReductionVars(ASTContext &Ctx, const OMPExecutableDirective &D, llvm::SmallVectorImpl< const ValueDecl * > &Vars)
Get list of reduction variables from the teams ... directives.
Definition: CGOpenMPRuntimeGPU.cpp:1307
clang::CodeGen::CGOpenMPRuntime::emitBarrierCall
virtual void emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind Kind, bool EmitChecks=true, bool ForceSimpleCall=false)
Emit an implicit/explicit barrier for OpenMP threads.
Definition: CGOpenMPRuntime.cpp:2536
clang::CodeGen::CGBuilderTy
Definition: CGBuilder.h:44
clang::ASTContext::toBits
int64_t toBits(CharUnits CharSize) const
Convert a size in characters to a size in bits.
Definition: ASTContext.cpp:2456
clang::AttributeCommonInfo::AS_GNU
@ AS_GNU
attribute((...))
Definition: AttributeCommonInfo.h:27
clang::CodeGen::CGOpenMPRuntimeGPU::CUDA
@ CUDA
CUDA data sharing mode.
Definition: CGOpenMPRuntimeGPU.h:350
clang::ArrayType::Normal
@ Normal
Definition: Type.h:2903
clang::CodeGen::CGBuilderTy::CreateConstGEP
Address CreateConstGEP(Address Addr, uint64_t Index, const llvm::Twine &Name="")
Given addr = T* ...
Definition: CGBuilder.h:246
emitGlobalToListReduceFunction
static llvm::Value * emitGlobalToListReduceFunction(CodeGenModule &CGM, ArrayRef< const Expr * > Privates, QualType ReductionArrayTy, SourceLocation Loc, const RecordDecl *TeamReductionRec, const llvm::SmallDenseMap< const ValueDecl *, const FieldDecl * > &VarFieldMap, llvm::Function *ReduceFn)
This function emits a helper that reduces all the reduction variables from the team into the provided...
Definition: CGOpenMPRuntimeGPU.cpp:2875
clang::CodeGen::CodeGenFunction::Builder
CGBuilderTy Builder
Definition: CodeGenFunction.h:269
clang::OMPScheduleClause
This represents 'schedule' clause in the '#pragma omp ...' directive.
Definition: OpenMPClause.h:1573
clang::Type
The base class of the type hierarchy.
Definition: Type.h:1500
clang::FieldDecl::Create
static FieldDecl * Create(const ASTContext &C, DeclContext *DC, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, QualType T, TypeSourceInfo *TInfo, Expr *BW, bool Mutable, InClassInitStyle InitStyle)
Definition: Decl.cpp:4233
clang::CodeGen::CodeGenTypeCache::VoidPtrTy
llvm::PointerType * VoidPtrTy
Definition: CodeGenTypeCache.h:56
clang::CodeGen::CGOpenMPRuntimeGPU
Definition: CGOpenMPRuntimeGPU.h:24
clang::Decl::attr_end
attr_iterator attr_end() const
Definition: DeclBase.h:508
clang::Type::hasSignedIntegerRepresentation
bool hasSignedIntegerRepresentation() const
Determine whether this type has an signed integer representation of some sort, e.g....
Definition: Type.cpp:2058
clang::CodeGen::CodeGenFunction::GetAddrOfLocalVar
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
Definition: CodeGenFunction.h:2720
clang::CodeGen::CGOpenMPRuntimeGPU::emitNumThreadsClause
virtual void emitNumThreadsClause(CodeGenFunction &CGF, llvm::Value *NumThreads, SourceLocation Loc) override
Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_threads)...
Definition: CGOpenMPRuntimeGPU.cpp:1229
Offset
unsigned Offset
Definition: Format.cpp:2553
clang::CodeGen::CodeGenModule::getOpenMPRuntime
CGOpenMPRuntime & getOpenMPRuntime()
Return a reference to the configured OpenMP runtime.
Definition: CodeGenModule.h:605
clang::CudaArch::GFX1011
@ GFX1011
clang::GlobalDecl
GlobalDecl - represents a global declaration.
Definition: GlobalDecl.h:56
clang::CodeGen::AlignmentSource::Decl
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
clang::BlockDecl::captures
ArrayRef< Capture > captures() const
Definition: Decl.h:4328
clang::CodeGen::CodeGenFunction::CGCapturedStmtInfo::isCXXThisExprCaptured
bool isCXXThisExprCaptured() const
Definition: CodeGenFunction.h:441
clang::OMPOrderedClause
This represents 'ordered' clause in the '#pragma omp ...' directive.
Definition: OpenMPClause.h:1766
clang::CodeGen::CodeGenFunction::EmitLoadOfScalar
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
Definition: CodeGenFunction.h:3826
clang::CodeGen::CodeGenFunction::ComplexPairTy
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
Definition: CodeGenFunction.h:267
clang::CodeGen::CodeGenTypes::ConvertTypeForMem
llvm::Type * ConvertTypeForMem(QualType T, bool ForBitField=false)
ConvertTypeForMem - Convert type T into a llvm::Type.
Definition: CodeGenTypes.cpp:91
clang::CudaArch::SM_20
@ SM_20
clang::isOpenMPDistributeDirective
bool isOpenMPDistributeDirective(OpenMPDirectiveKind DKind)
Checks if the specified directive is a distribute directive.
Definition: OpenMPKinds.cpp:598
clang::CodeGen::CGOpenMPRuntimeGPU::getGPUNumThreads
llvm::Value * getGPUNumThreads(CodeGenFunction &CGF)
Get the maximum number of threads in a block of the GPU.
Definition: CGOpenMPRuntimeGPU.cpp:4002
clang::CodeGen::CodeGenFunction::CreateDefaultAlignTempAlloca
Address CreateDefaultAlignTempAlloca(llvm::Type *Ty, const Twine &Name="tmp")
CreateDefaultAlignedTempAlloca - This creates an alloca with the default ABI alignment of the given L...
Definition: CGExpr.cpp:123
clang::CapturedStmt::capturesVariable
bool capturesVariable(const VarDecl *Var) const
True if this variable has been captured.
Definition: Stmt.cpp:1415
clang::ASTContext::getUIntPtrType
QualType getUIntPtrType() const
Return a type compatible with "uintptr_t" (C99 7.18.1.4), as defined by the target.
Definition: ASTContext.cpp:5861
clang::Type::isReferenceType
bool isReferenceType() const
Definition: Type.h:6760
clang::CapturedStmt::getCapturedStmt
Stmt * getCapturedStmt()
Retrieve the statement being captured.
Definition: Stmt.h:3605
clang::CallExpr::getCallee
Expr * getCallee()
Definition: Expr.h:2951
V
#define V(N, I)
Definition: ASTContext.h:3176
clang::CodeGen::CGOpenMPRuntime::getOMPBuilder
llvm::OpenMPIRBuilder & getOMPBuilder()
Definition: CGOpenMPRuntime.h:305
clang::CodeGen::CGBuilderTy::CreateConstInBoundsGEP
Address CreateConstInBoundsGEP(Address Addr, uint64_t Index, const llvm::Twine &Name="")
Given addr = T* ...
Definition: CGBuilder.h:229
clang::CodeGen::CGOpenMPRuntimeGPU::getDefaultScheduleAndChunk
void getDefaultScheduleAndChunk(CodeGenFunction &CGF, const OMPLoopDirective &S, OpenMPScheduleClauseKind &ScheduleKind, const Expr *&ChunkExpr) const override
Choose a default value for the schedule clause.
Definition: CGOpenMPRuntimeGPU.cpp:3777
clang::CodeGen::TBAAAccessInfo
Definition: CodeGenTBAA.h:41
clang::CodeGen::CGBuilderTy::CreatePointerBitCastOrAddrSpaceCast
Address CreatePointerBitCastOrAddrSpaceCast(Address Addr, llvm::Type *Ty, llvm::Type *ElementTy, const llvm::Twine &Name="")
Definition: CGBuilder.h:173
getDistributeLastprivateVars
static void getDistributeLastprivateVars(ASTContext &Ctx, const OMPExecutableDirective &D, llvm::SmallVectorImpl< const ValueDecl * > &Vars)
Get list of lastprivate variables from the teams distribute ...
Definition: CGOpenMPRuntimeGPU.cpp:1282
clang::CodeGen::CGOpenMPRuntimeGPU::DataSharingMode
DataSharingMode
Target codegen is specialized based on two data-sharing modes: CUDA, in which the local variables are...
Definition: CGOpenMPRuntimeGPU.h:348
clang::CudaArch::SM_86
@ SM_86
clang::CudaArch::GFX702
@ GFX702
clang::CodeGen::EHScopeStack::Cleanup
Information for lazily generating a cleanup.
Definition: EHScopeStack.h:141
StmtOpenMP.h
getDataSharingMode
static CGOpenMPRuntimeGPU::DataSharingMode getDataSharingMode(CodeGenModule &CGM)
Definition: CGOpenMPRuntimeGPU.cpp:561
clang::QualType::getAddressSpace
LangAS getAddressSpace() const
Return the address space of this type.
Definition: Type.h:6621
clang::OMPClause
This is a basic class for representing single OpenMP clause.
Definition: OpenMPClause.h:55
clang::CodeGen::CGOpenMPRuntimeGPU::emitReduction
virtual void emitReduction(CodeGenFunction &CGF, SourceLocation Loc, ArrayRef< const Expr * > Privates, ArrayRef< const Expr * > LHSExprs, ArrayRef< const Expr * > RHSExprs, ArrayRef< const Expr * > ReductionOps, ReductionOptionsTy Options) override
Emit a code for reduction clause.
Definition: CGOpenMPRuntimeGPU.cpp:3208
clang::LambdaExpr
A C++ lambda expression, which produces a function object (of unspecified type) that can be invoked l...
Definition: ExprCXX.h:1865
clang::OMPLoopDirective
This is a common base class for loop directives ('omp simd', 'omp for', 'omp for simd' etc....
Definition: StmtOpenMP.h:1002
clang::CudaArch::SM_72
@ SM_72
clang::CharUnits::fromQuantity
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition: CharUnits.h:63
clang::CodeGen::CodeGenFunction::EmitCastToVoidPtr
llvm::Value * EmitCastToVoidPtr(llvm::Value *value)
Emit a cast to void* in the appropriate address space.
Definition: CGExpr.cpp:54
clang::AS_public
@ AS_public
Definition: Specifiers.h:109
clang::BlockExpr::getBlockDecl
const BlockDecl * getBlockDecl() const
Definition: Expr.h:5982
clang::CodeGen::CGOpenMPRuntimeGPU::emitNumTeamsClause
void emitNumTeamsClause(CodeGenFunction &CGF, const Expr *NumTeams, const Expr *ThreadLimit, SourceLocation Loc) override
This function ought to emit, in the general case, a call to.
Definition: CGOpenMPRuntimeGPU.cpp:1235
clang::OMPRequiresDecl::clauselists
clauselist_range clauselists()
Definition: DeclOpenMP.h:441
clang::CodeGen::Address::getType
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition: Address.h:99
clang::getOpenMPCaptureRegions
void getOpenMPCaptureRegions(llvm::SmallVectorImpl< OpenMPDirectiveKind > &CaptureRegions, OpenMPDirectiveKind DKind)
Return the captured regions of an OpenMP directive.
Definition: OpenMPKinds.cpp:643
clang::ASTContext
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:208
clang::ASTContext::getSizeType
CanQualType getSizeType() const
Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.
Definition: ASTContext.cpp:5823
clang::CodeGen::CodeGenFunction::OMPPrivateScope
The scope used to remap some variables as private in the OpenMP loop body (or other captured region e...
Definition: CodeGenFunction.h:1066
clang::CudaArch::GFX90c
@ GFX90c
clang::CodeGen::CGOpenMPRuntimeGPU::ExecutionMode
ExecutionMode
Defines the execution mode.
Definition: CGOpenMPRuntimeGPU.h:27
clang::CodeGen::CodeGenFunction::EmitRuntimeCall
llvm::CallInst * EmitRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
clang::CodeGen::TEK_Complex
@ TEK_Complex
Definition: CodeGenFunction.h:108
clang::CodeGen::CodeGenTypeCache::Int16Ty
llvm::IntegerType * Int16Ty
Definition: CodeGenTypeCache.h:37
clang::CudaArch::GFX1012
@ GFX1012
clang::CodeGen::CodeGenFunction::getTarget
const TargetInfo & getTarget() const
Definition: CodeGenFunction.h:2024
clang::CudaArch::GFX1103
@ GFX1103
clang::CodeGen::CodeGenFunction::getDebugInfo
CGDebugInfo * getDebugInfo()
Definition: CodeGenFunction.h:1983
clang::ASTContext::getAsVariableArrayType
const VariableArrayType * getAsVariableArrayType(QualType T) const
Definition: ASTContext.h:2693
supportsLightweightRuntime
static bool supportsLightweightRuntime(ASTContext &Ctx, const OMPExecutableDirective &D)
Checks if the construct supports lightweight runtime.
Definition: CGOpenMPRuntimeGPU.cpp:926
clang::CudaArch::GFX1033
@ GFX1033
clang::CudaArch::GFX1031
@ GFX1031
clang::CudaArch::GFX90a
@ GFX90a
clang::CudaArch::SM_21
@ SM_21
clang::Decl::getCanonicalDecl
virtual Decl * getCanonicalDecl()
Retrieves the "canonical" declaration of the given declaration.
Definition: DeclBase.h:910
clang::OMPExecutableDirective::getInnermostCapturedStmt
CapturedStmt * getInnermostCapturedStmt()
Get innermost captured statement for the construct.
Definition: StmtOpenMP.h:542
clang::Stmt::children
child_range children()
Definition: Stmt.cpp:285
clang::IntegerLiteral::Create
static IntegerLiteral * Create(const ASTContext &C, const llvm::APInt &V, QualType type, SourceLocation l)
Returns a new integer literal with value 'V' and type 'type'.
Definition: Expr.cpp:900
clang::CodeGen::CodeGenModule::getModule
llvm::Module & getModule() const
Definition: CodeGenModule.h:704
clang::Type::isLValueReferenceType
bool isLValueReferenceType() const
Definition: Type.h:6764
clang::index::SymbolKind::Field
@ Field
clang::DeclRefExpr::getDecl
ValueDecl * getDecl()
Definition: Expr.h:1295
clang::ImplicitParamDecl
Definition: Decl.h:1613
clang::CodeGen::CGOpenMPRuntime::getDefaultFlagsForBarriers
static unsigned getDefaultFlagsForBarriers(OpenMPDirectiveKind Kind)
Returns default flags for the barriers depending on the directive, for which this barier is going to ...
Definition: CGOpenMPRuntime.cpp:2503
clang::CR_OpenMP
@ CR_OpenMP
Definition: CapturedStmt.h:19
clang::CudaArch::GFX1032
@ GFX1032
clang::CodeGen::LValue::getType
QualType getType() const
Definition: CGValue.h:278
llvm::SmallString
Definition: LLVM.h:37
clang::getLangASFromTargetAS
LangAS getLangASFromTargetAS(unsigned TargetAS)
Definition: AddressSpaces.h:80
clang::ASTContext::getIntTypeForBitwidth
QualType getIntTypeForBitwidth(unsigned DestWidth, unsigned Signed) const
getIntTypeForBitwidth - sets integer QualTy according to specified details: bitwidth,...
Definition: ASTContext.cpp:11728
clang::VarDecl
Represents a variable declaration or definition.
Definition: Decl.h:874
hasNestedSPMDDirective
static bool hasNestedSPMDDirective(ASTContext &Ctx, const OMPExecutableDirective &D)
Check for inner (nested) SPMD construct, if any.
Definition: CGOpenMPRuntimeGPU.cpp:567
clang::Type::getPointeeType
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition: Type.cpp:625
clang::CodeGen::CodeGenModule::getTypes
CodeGenTypes & getTypes()
Definition: CodeGenModule.h:721
clang::CudaArch::GFX904
@ GFX904
clang::CapturedStmt
This captures a statement into a function.
Definition: Stmt.h:3504
clang::Type::getAsCXXRecordDecl
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition: Type.cpp:1759
clang::TTK_Union
@ TTK_Union
The "union" keyword.
Definition: Type.h:5392
clang::CodeGen::CodeGenFunction::getVLASize
VlaSizePair getVLASize(const VariableArrayType *vla)
Returns an LLVM value that corresponds to the size, in non-variably-sized elements,...
Definition: CodeGenFunction.cpp:2155
clang::Type::isVariablyModifiedType
bool isVariablyModifiedType() const
Whether this type is a variably-modified type (C99 6.7.5).
Definition: Type.h:2203
clang::CapturedStmt::Capture
Describes the capture of either a variable, or 'this', or variable-length array type.
Definition: Stmt.h:3517
clang::CudaArch::SM_52
@ SM_52
clang::CodeGen::CGOpenMPRuntime::getDefaultDistScheduleAndChunk
virtual void getDefaultDistScheduleAndChunk(CodeGenFunction &CGF, const OMPLoopDirective &S, OpenMPDistScheduleClauseKind &ScheduleKind, llvm::Value *&Chunk) const
Choose default schedule type and chunk value for the dist_schedule clause.
Definition: CGOpenMPRuntime.h:1782
clang::CodeGen::CGOpenMPRuntime::getIdentTyPointerTy
llvm::Type * getIdentTyPointerTy()
Returns pointer to ident_t type.
Definition: CGOpenMPRuntime.cpp:1484
clang::CodeGen::CGOpenMPRuntime::ReductionOptionsTy
Definition: CGOpenMPRuntime.h:1430
clang::CodeGen::CodeGenFunction::getContext
ASTContext & getContext() const
Definition: CodeGenFunction.h:1982
clang::QualifierCollector
A qualifier set is used to build a set of qualifiers.
Definition: Type.h:6440
clang::CodeGen::CGOpenMPRuntime::getSingleCompoundChild
static const Stmt * getSingleCompoundChild(ASTContext &Ctx, const Stmt *Body)
Checks if the Body is the CompoundStmt and returns its child statement iff there is only one that is ...
Definition: CGOpenMPRuntime.cpp:6501
clang::CodeGen::CodeGenFunction::FinishFunction
void FinishFunction(SourceLocation EndLoc=SourceLocation())
FinishFunction - Complete IR generation of the current function.
Definition: CodeGenFunction.cpp:325
clang::CodeGen::Address
An aligned address.
Definition: Address.h:74
castValueToType
static llvm::Value * castValueToType(CodeGenFunction &CGF, llvm::Value *Val, QualType ValTy, QualType CastTy, SourceLocation Loc)
Cast value to the specified type.
Definition: CGOpenMPRuntimeGPU.cpp:1702
Base
clang::CodeGen::CGBuilderTy::CreateGEP
Address CreateGEP(Address Addr, llvm::Value *Index, const llvm::Twine &Name="")
Definition: CGBuilder.h:261
clang::CodeGen::CodeGenFunction::getEvaluationKind
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
Definition: CodeGenFunction.cpp:216
clang::OMPExecutableDirective::hasClausesOfKind
bool hasClausesOfKind() const
Returns true if the current directive has one or more clauses of a specific kind.
Definition: StmtOpenMP.h:482
clang::isOpenMPPrivate
bool isOpenMPPrivate(OpenMPClauseKind Kind)
Checks if the specified clause is one of private clauses like 'private', 'firstprivate',...
Definition: OpenMPKinds.cpp:615
clang::CodeGen::CGOpenMPRuntime::emitUpdateLocation
llvm::Value * emitUpdateLocation(CodeGenFunction &CGF, SourceLocation Loc, unsigned Flags=0)
Emits object of ident_t type with info for source location.
Definition: CGOpenMPRuntime.cpp:1370
clang::CodeGen::Address::getAddressSpace
unsigned getAddressSpace() const
Return the address space that this address resides in.
Definition: Address.h:110
clang::CastExpr::getCastKind
CastKind getCastKind() const
Definition: Expr.h:3519
clang::CudaArch::SM_61
@ SM_61
clang::isOpenMPTeamsDirective
bool isOpenMPTeamsDirective(OpenMPDirectiveKind DKind)
Checks if the specified directive is a teams-kind directive.
Definition: OpenMPKinds.cpp:568
clang::CodeGen::CodeGenModule::getTarget
const TargetInfo & getTarget() const
Definition: CodeGenModule.h:709
clang::CudaArch::GFX803
@ GFX803
getNVPTXWarpID
static llvm::Value * getNVPTXWarpID(CodeGenFunction &CGF)
Get the id of the warp in the block.
Definition: CGOpenMPRuntimeGPU.cpp:534
clang::CodeGen::CodeGenTypeCache::Int32Ty
llvm::IntegerType * Int32Ty
Definition: CodeGenTypeCache.h:37
clang::CodeGen::CGOpenMPRuntimeGPU::translateParameter
const VarDecl * translateParameter(const FieldDecl *FD, const VarDecl *NativeParam) const override
Translates the native parameter of outlined function if this is required for target.
Definition: CGOpenMPRuntimeGPU.cpp:3394
clang::LangAS
LangAS
Defines the address space values used by the address space qualifier of QualType.
Definition: AddressSpaces.h:25
clang::CodeGen::CodeGenTypes::GetFunctionType
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.
Definition: CGCall.cpp:1613
clang::Type::castAs
const T * castAs() const
Member-template castAs<specific type>.
Definition: Type.h:7310
clang::CodeGen::RegionCodeGenTy
Class provides a way to call simple version of codegen for OpenMP region, or an advanced with possibl...
Definition: CGOpenMPRuntime.h:68
clang::CodeGen::CodeGenFunction::StartFunction
void StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, const CGFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation Loc=SourceLocation(), SourceLocation StartLoc=SourceLocation())
Emit code for the start of a function.
Definition: CodeGenFunction.cpp:709
clang::CudaArch::LAST
@ LAST
clang::CodeGen::CodeGenFunction::VlaSizePair::NumElts
llvm::Value * NumElts
Definition: CodeGenFunction.h:2784
clang::CodeGen::LValue
LValue - This represents an lvalue references.
Definition: CGValue.h:171
clang::CodeGen::CodeGenFunction::EHStack
EHScopeStack EHStack
Definition: CodeGenFunction.h:619
clang::TargetOptions::FeatureMap
llvm::StringMap< bool > FeatureMap
The map of which features have been enabled disabled based on the command line.
Definition: TargetOptions.h:62
clang::TargetInfo::getTargetOpts
TargetOptions & getTargetOpts() const
Retrieve the target options.
Definition: TargetInfo.h:260
clang::CodeGen::CodeGenFunction::MakeNaturalAlignAddrLValue
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T)
Definition: CodeGenFunction.cpp:187
clang::OMPExecutableDirective::getDirectiveKind
OpenMPDirectiveKind getDirectiveKind() const
Definition: StmtOpenMP.h:555
clang::LambdaExpr::isInitCapture
bool isInitCapture(const LambdaCapture *Capture) const
Determine whether one of this lambda's captures is an init-capture.
Definition: ExprCXX.cpp:1213
clang::CodeGen::CGOpenMPRuntimeGPU::processRequiresDirective
void processRequiresDirective(const OMPRequiresDecl *D) override
Perform check on requires decl to ensure that target architecture supports unified addressing.
Definition: CGOpenMPRuntimeGPU.cpp:3886
clang::TagDecl::startDefinition
void startDefinition()
Starts the definition of this tag declaration.
Definition: Decl.cpp:4382
clang::ASTContext::VoidPtrTy
CanQualType VoidPtrTy
Definition: ASTContext.h:1123
clang::Decl::hasAttrs
bool hasAttrs() const
Definition: DeclBase.h:484
clang::CudaArch::SM_80
@ SM_80
clang::syntax::NodeRole::Size
@ Size
clang::CodeGen::Address::getPointer
llvm::Value * getPointer() const
Definition: Address.h:93
clang::CodeGen::CodeGenFunction
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
Definition: CodeGenFunction.h:231
clang::Stmt::IgnoreContainers
Stmt * IgnoreContainers(bool IgnoreCaptured=false)
Skip no-op (attributed, compound) container stmts and skip captured stmt at the top,...
Definition: Stmt.cpp:195
clang::ASTContext::getDeclAlign
CharUnits getDeclAlign(const Decl *D, bool ForAlignof=false) const
Return a conservative estimate of the alignment of the specified decl D.
Definition: ASTContext.cpp:1727
clang::CudaArch::GFX909
@ GFX909
clang::CudaArch::SM_30
@ SM_30
clang::LCK_ByRef
@ LCK_ByRef
Capturing by reference.
Definition: Lambda.h:37
clang::NamedDecl::getIdentifier
IdentifierInfo * getIdentifier() const
Get the identifier that names this declaration, if there is one.
Definition: Decl.h:268
clang::ValueDecl
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition: Decl.h:674
clang::RecordDecl::completeDefinition
virtual void completeDefinition()
Note that the definition of this type is now complete.
Definition: Decl.cpp:4689
clang::CodeGen::CGOpenMPRuntime::processRequiresDirective
virtual void processRequiresDirective(const OMPRequiresDecl *D)
Perform check on requires decl to ensure that target architecture supports unified addressing.
Definition: CGOpenMPRuntime.cpp:10884
clang::CharUnits::isZero
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition: CharUnits.h:116
clang::DeclContext::addDecl
void addDecl(Decl *D)
Add the declaration D into this context.
Definition: DeclBase.cpp:1572
clang::CodeGen::CodeGenModule
This class organizes the cross-function state that is used while generating LLVM code.
Definition: CodeGenModule.h:275
clang::LambdaExpr::captures
capture_range captures() const
Retrieve this lambda's captures.
Definition: ExprCXX.cpp:1226
clang::BlockExpr
BlockExpr - Adaptor class for mixing a BlockDecl with expressions.
Definition: Expr.h:5970
llvm::ArrayRef
Definition: LLVM.h:34
clang::LangAS::opencl_global
@ opencl_global
clang::Decl::attr_begin
attr_iterator attr_begin() const
Definition: DeclBase.h:505
clang::Decl
Decl - This represents one declaration (or definition), e.g.
Definition: DeclBase.h:83
clang::CudaArch::GFX600
@ GFX600
clang::CodeGen::CodeGenFunction::CGCapturedStmtInfo::getKind
CapturedRegionKind getKind() const
Definition: CodeGenFunction.h:430
clang::Expr::IgnoreParenImpCasts
Expr * IgnoreParenImpCasts() LLVM_READONLY
Skip past any parentheses and implicit casts which might surround this expression until reaching a fi...
Definition: Expr.cpp:2947
clang::RISCV::TypeModifier::LLVM_MARK_AS_BITMASK_ENUM
@ LLVM_MARK_AS_BITMASK_ENUM
clang::OMPExecutableDirective::getAssociatedStmt
const Stmt * getAssociatedStmt() const
Returns statement associated with the directive.
Definition: StmtOpenMP.h:520
CopyOptionsTy::ScratchpadIndex
llvm::Value * ScratchpadIndex
Definition: CGOpenMPRuntimeGPU.cpp:1867
emitListToGlobalReduceFunction
static llvm::Value * emitListToGlobalReduceFunction(CodeGenModule &CGM, ArrayRef< const Expr * > Privates, QualType ReductionArrayTy, SourceLocation Loc, const RecordDecl *TeamReductionRec, const llvm::SmallDenseMap< const ValueDecl *, const FieldDecl * > &VarFieldMap, llvm::Function *ReduceFn)
This function emits a helper that reduces all the reduction variables from the team into the provided...
Definition: CGOpenMPRuntimeGPU.cpp:2665
clang::CodeGen::Address::invalid
static Address invalid()
Definition: Address.h:90
clang::CudaArchToString
const char * CudaArchToString(CudaArch A)
Definition: Cuda.cpp:139
clang::CodeGen::FunctionArgList
FunctionArgList - Type for representing both the decl and type of parameters to a function.
Definition: CGCall.h:356
StmtVisitor.h
clang::CudaArch::UNKNOWN
@ UNKNOWN
clang::CodeGen::CodeGenFunction::ConvertType
llvm::Type * ConvertType(QualType T)
Definition: CodeGenFunction.cpp:212
clang::CudaArch::SM_32
@ SM_32
clang::OMPExecutableDirective::hasAssociatedStmt
bool hasAssociatedStmt() const
Returns true if directive has associated statement.
Definition: StmtOpenMP.h:517
clang::CodeGen::CGOpenMPRuntime::emitOutlinedFunctionCall
virtual void emitOutlinedFunctionCall(CodeGenFunction &CGF, SourceLocation Loc, llvm::FunctionCallee OutlinedFn, ArrayRef< llvm::Value * > Args=llvm::None) const
Emits call of the outlined function with the provided arguments, translating these arguments to corre...
Definition: CGOpenMPRuntime.cpp:12137
clang::CodeGen::NormalAndEHCleanup
@ NormalAndEHCleanup
Definition: EHScopeStack.h:86
hasNestedLightweightDirective
static bool hasNestedLightweightDirective(ASTContext &Ctx, const OMPExecutableDirective &D)
Check for inner (nested) lightweight runtime construct, if any.
Definition: CGOpenMPRuntimeGPU.cpp:764
clang::CudaArch::GFX900
@ GFX900
getNVPTXLaneID
static llvm::Value * getNVPTXLaneID(CodeGenFunction &CGF)
Get the id of the current lane in the Warp.
Definition: CGOpenMPRuntimeGPU.cpp:545
clang::CodeGen::CGOpenMPRuntimeGPU::getDefaultDistScheduleAndChunk
void getDefaultDistScheduleAndChunk(CodeGenFunction &CGF, const OMPLoopDirective &S, OpenMPDistScheduleClauseKind &ScheduleKind, llvm::Value *&Chunk) const override
Choose a default value for the dist_schedule clause.
Definition: CGOpenMPRuntimeGPU.cpp:3760
clang::CodeGen::Address::getElementType
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:104
clang::CodeGen::CodeGenFunction::MakeAddrLValue
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
Definition: CodeGenFunction.h:2492
clang::CudaArch::GFX801
@ GFX801
clang::ConstStmtVisitor
ConstStmtVisitor - This class implements a simple visitor for Stmt subclasses.
Definition: StmtVisitor.h:193
clang::CodeGen::CGOpenMPRuntime::emitReductionFunction
llvm::Function * emitReductionFunction(SourceLocation Loc, llvm::Type *ArgsElemType, ArrayRef< const Expr * > Privates, ArrayRef< const Expr * > LHSExprs, ArrayRef< const Expr * > RHSExprs, ArrayRef< const Expr * > ReductionOps)
Emits reduction function.
Definition: CGOpenMPRuntime.cpp:5307
clang::CodeGen::CGOpenMPRuntimeGPU::getGPUThreadID
llvm::Value * getGPUThreadID(CodeGenFunction &CGF)
Get the id of the current thread on the GPU.
Definition: CGOpenMPRuntimeGPU.cpp:4015
clang::ASTContext::getTargetInfo
const TargetInfo & getTargetInfo() const
Definition: ASTContext.h:768
clang::CodeGen::LValue::getPointer
llvm::Value * getPointer(CodeGenFunction &CGF) const
Definition: CGValue.h:337
clang::CodeGen::CGBuilderTy::CreateElementBitCast
Address CreateElementBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
Cast the element type of the given address to a different type, preserving information like the align...
Definition: CGBuilder.h:165
clang::CodeGen::CGOpenMPRuntimeGPU::EM_NonSPMD
@ EM_NonSPMD
Non-SPMD execution mode (1 master thread, others are workers).
Definition: CGOpenMPRuntimeGPU.h:31
getPrivateItem
static std::pair< ValueDecl *, bool > getPrivateItem(Sema &S, Expr *&RefExpr, SourceLocation &ELoc, SourceRange &ERange, bool AllowArraySection=false)
Definition: SemaOpenMP.cpp:5115
CopyOptionsTy::RemoteLaneOffset
llvm::Value * RemoteLaneOffset
Definition: CGOpenMPRuntimeGPU.cpp:1866
clang::OMPExecutableDirective
This is a basic class for representing single OpenMP executable directive.
Definition: StmtOpenMP.h:266
clang::CudaArch::GFX703
@ GFX703
clang::ObjCPropertyAttribute::Kind
Kind
Definition: DeclObjCCommon.h:22
clang::CodeGen::AlignmentSource::Type
@ Type
The l-value was considered opaque, so the alignment was determined from a type.
clang::DeclStmt
DeclStmt - Adaptor class for mixing declarations with statements and expressions.
Definition: Stmt.h:1294
clang::CodeGen::CGFunctionInfo
CGFunctionInfo - Class to encapsulate the information about a function definition.
Definition: CGFunctionInfo.h:546
clang::isOpenMPLoopBoundSharingDirective
bool isOpenMPLoopBoundSharingDirective(OpenMPDirectiveKind Kind)
Checks if the specified directive kind is one of the composite or combined directives that need loop ...
Definition: OpenMPKinds.cpp:630
clang::PointerType
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition: Type.h:2653
clang::CodeGen::CodeGenFunction::EmitStoreOfComplex
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
Definition: CGExprComplex.cpp:1139
clang::CodeGen::CGOpenMPRuntimeGPU::clear
void clear() override
Definition: CGOpenMPRuntimeGPU.cpp:3966
clang::isOpenMPWorksharingDirective
bool isOpenMPWorksharingDirective(OpenMPDirectiveKind DKind)
Checks if the specified directive is a worksharing directive.
Definition: OpenMPKinds.cpp:505
clang::LangAS::cuda_constant
@ cuda_constant
clang::CudaArch::GFX705
@ GFX705
clang::LangAS::Default
@ Default
Cuda.h
clang::CodeGen::CGBuilderTy::CreateConstArrayGEP
Address CreateConstArrayGEP(Address Addr, uint64_t Index, const llvm::Twine &Name="")
Given addr = [n x T]* ...
Definition: CGBuilder.h:210
clang::CodeGen::CodeGenFunction::CGM
CodeGenModule & CGM
Definition: CodeGenFunction.h:261
clang::TargetInfo::getGridValue
virtual const llvm::omp::GV & getGridValue() const
Definition: TargetInfo.h:1472
clang::ImplicitParamDecl::Create
static ImplicitParamDecl * Create(ASTContext &C, DeclContext *DC, SourceLocation IdLoc, IdentifierInfo *Id, QualType T, ImplicitParamKind ParamKind)
Create implicit parameter.
Definition: Decl.cpp:4950
clang::CodeGen::TEK_Scalar
@ TEK_Scalar
Definition: CodeGenFunction.h:107
clang::ASTContext::getAddrSpaceQualType
QualType getAddrSpaceQualType(QualType T, LangAS AddressSpace) const
Return the uniqued reference to the type for an address space qualified type with the specified type ...
Definition: ASTContext.cpp:3013
clang::CodeGen::CodeGenFunction::CurFn
llvm::Function * CurFn
Definition: CodeGenFunction.h:325
clang::Builtin::ID
ID
Definition: Builtins.h:51
clang::OMPExecutableDirective::getBeginLoc
SourceLocation getBeginLoc() const
Returns starting location of directive kind.
Definition: StmtOpenMP.h:488
clang::UnaryOperator::getSubExpr
Expr * getSubExpr() const
Definition: Expr.h:2210
clang::Expr::IgnoreParens
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition: Expr.cpp:2943
clang
Definition: CalledOnceCheck.h:17
clang::LangOptions::OMPHostIRFile
std::string OMPHostIRFile
Name of the IR file that contains the result of the OpenMP target host code generation.
Definition: LangOptions.h:433
clang::CodeGen::CGOpenMPRuntimeGPU::getParameterAddress
Address getParameterAddress(CodeGenFunction &CGF, const VarDecl *NativeParam, const VarDecl *TargetParam) const override
Gets the address of the native argument basing on the address of the target-specific parameter.
Definition: CGOpenMPRuntimeGPU.cpp:3426
emitGlobalToListCopyFunction
static llvm::Value * emitGlobalToListCopyFunction(CodeGenModule &CGM, ArrayRef< const Expr * > Privates, QualType ReductionArrayTy, SourceLocation Loc, const RecordDecl *TeamReductionRec, const llvm::SmallDenseMap< const ValueDecl *, const FieldDecl * > &VarFieldMap)
This function emits a helper that copies all the reduction variables from the team into the provided ...
Definition: CGOpenMPRuntimeGPU.cpp:2762
clang::CodeGen::CodeGenModule::addCompilerUsedGlobal
void addCompilerUsedGlobal(llvm::GlobalValue *GV)
Add a global to a list to be added to the llvm.compiler.used metadata.
Definition: CodeGenModule.cpp:2346
clang::CudaArch::GFX1036
@ GFX1036
clang::CodeGen::CodeGenFunction::EmitAggregateCopy
void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)
EmitAggregateCopy - Emit an aggregate copy.
Definition: CGExprAgg.cpp:2056
clang::OpenMPDistScheduleClauseKind
OpenMPDistScheduleClauseKind
OpenMP attributes for 'dist_schedule' clause.
Definition: OpenMPKinds.h:103
clang::CudaArch::GFX902
@ GFX902
clang::ExternalLinkage
@ ExternalLinkage
External linkage, which indicates that the entity can be referred to from other translation units.
Definition: Linkage.h:59
clang::Stmt
Stmt - This represents one statement.
Definition: Stmt.h:69
clang::UnaryOperator::getOpcode
Opcode getOpcode() const
Definition: Expr.h:2205
clang::CodeGen::CGOpenMPRuntime::clear
virtual void clear()
Definition: CGOpenMPRuntime.cpp:1071
clang::CodeGen::CGOpenMPRuntimeGPU::emitCriticalRegion
void emitCriticalRegion(CodeGenFunction &CGF, StringRef CriticalName, const RegionCodeGenTy &CriticalOpGen, SourceLocation Loc, const Expr *Hint=nullptr) override
Emits a critical region.
Definition: CGOpenMPRuntimeGPU.cpp:1632
clang::CodeGen::CGOpenMPRuntimeGPU::emitProcBindClause
virtual void emitProcBindClause(CodeGenFunction &CGF, llvm::omp::ProcBindKind ProcBind, SourceLocation Loc) override
Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 global_tid, int proc_bind) to generat...
Definition: CGOpenMPRuntimeGPU.cpp:1219
clang::CodeGen::CGOpenMPRuntimeGPU::emitTeamsOutlinedFunction
llvm::Function * emitTeamsOutlinedFunction(const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) override
Emits inlined function for the specified OpenMP teams.
Definition: CGOpenMPRuntimeGPU.cpp:1317
clang::CudaArch::GFX1101
@ GFX1101
clang::CodeGen::CodeGenFunction::CapturedStmtInfo
CGCapturedStmtInfo * CapturedStmtInfo
Definition: CodeGenFunction.h:476
clang::CodeGen::CGOpenMPRuntime::emitTeamsOutlinedFunction
virtual llvm::Function * emitTeamsOutlinedFunction(const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen)
Emits outlined function for the specified OpenMP teams directive D.
Definition: CGOpenMPRuntime.cpp:1277
clang::CodeGen::CodeGenFunction::EmitLoadOfReferenceLValue
LValue EmitLoadOfReferenceLValue(LValue RefLVal)
Definition: CGExpr.cpp:2562
clang::CudaArch::SM_37
@ SM_37
clang::CudaArch::GFX602
@ GFX602
clang::CudaArch::SM_50
@ SM_50
clang::Expr::getType
QualType getType() const
Definition: Expr.h:141
clang::Attr
Attr - This represents one attribute.
Definition: Attr.h:41
clang::Qualifiers::addAddressSpace
void addAddressSpace(LangAS space)
Definition: Type.h:389
clang::CudaArch::GFX1013
@ GFX1013
clang::CodeGen::RegionCodeGenTy::setAction
void setAction(PrePostActionTy &Action) const
Definition: CGOpenMPRuntime.h:89
clang::OMPRequiresDecl
This represents '#pragma omp requires...' directive.
Definition: DeclOpenMP.h:416
clang::CudaArch
CudaArch
Definition: Cuda.h:46
clang::CodeGen::ApplyDebugLocation::CreateEmpty
static ApplyDebugLocation CreateEmpty(CodeGenFunction &CGF)
Set the IRBuilder to not attach debug locations.
Definition: CGDebugInfo.h:847
clang::Decl::getAttrs
AttrVec & getAttrs()
Definition: DeclBase.h:490
clang::CudaArch::Generic
@ Generic
clang::ImplicitCastExpr
ImplicitCastExpr - Allows us to explicitly represent implicit type conversions, which have no direct ...
Definition: Expr.h:3619
clang::CudaArch::GFX810
@ GFX810
clang::CodeGen::CodeGenFunction::EmitLValue
LValue EmitLValue(const Expr *E)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
Definition: CGExpr.cpp:1305
hasStaticScheduling
static bool hasStaticScheduling(const OMPExecutableDirective &D)
Check if the directive is loops based and has schedule clause at all or has static scheduling.
Definition: CGOpenMPRuntimeGPU.cpp:751
clang::CudaArch::UNUSED
@ UNUSED
clang::OpenMPDirectiveKind
llvm::omp::Directive OpenMPDirectiveKind
OpenMP directives.
Definition: OpenMPKinds.h:24
clang::ASTContext::getPointerType
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
Definition: ASTContext.cpp:3260
clang::OMPExecutableDirective::clauses
ArrayRef< OMPClause * > clauses() const
Definition: StmtOpenMP.h:572
clang::CharUnits
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:38
clang::isOpenMPTargetExecutionDirective
bool isOpenMPTargetExecutionDirective(OpenMPDirectiveKind DKind)
Checks if the specified directive is a target code offload directive.
Definition: OpenMPKinds.cpp:544
clang::CodeGen::CGOpenMPRuntimeGPU::emitFunctionProlog
void emitFunctionProlog(CodeGenFunction &CGF, const Decl *D) override
Emits OpenMP-specific function prolog.
Definition: CGOpenMPRuntimeGPU.cpp:3614
clang::CudaArch::SM_62
@ SM_62
llvm::SmallVectorImpl
Definition: Randstruct.h:18
emitInterWarpCopyFunction
static llvm::Value * emitInterWarpCopyFunction(CodeGenModule &CGM, ArrayRef< const Expr * > Privates, QualType ReductionArrayTy, SourceLocation Loc)
This function emits a helper that gathers Reduce lists from the first lane of every active warp to la...
Definition: CGOpenMPRuntimeGPU.cpp:2091
clang::CallExpr::arguments
arg_range arguments()
Definition: Expr.h:3040
clang::SC_None
@ SC_None
Definition: Specifiers.h:235
clang::ValueDecl::getType
QualType getType() const
Definition: Decl.h:685
clang::Type::isIntegerType
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition: Type.h:7069
clang::CodeGen::CodeGenFunction::CreateMemTemp
Address CreateMemTemp(QualType T, const Twine &Name="tmp", Address *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
Definition: CGExpr.cpp:135
clang::Expr
This represents one expression.
Definition: Expr.h:109
clang::OpenMPScheduleClauseKind
OpenMPScheduleClauseKind
OpenMP attributes for 'schedule' clause.
Definition: OpenMPKinds.h:30
clang::CodeGen::CGOpenMPRuntime::CGM
CodeGenModule & CGM
Definition: CGOpenMPRuntime.h:308
clang::CapturedStmt::getCapturedDecl
CapturedDecl * getCapturedDecl()
Retrieve the outlined function declaration.
Definition: Stmt.cpp:1391
clang::ParmVarDecl::Create
static ParmVarDecl * Create(ASTContext &C, DeclContext *DC, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, QualType T, TypeSourceInfo *TInfo, StorageClass S, Expr *DefArg)
Definition: Decl.cpp:2810
clang::CodeGen::CodeGenTypeCache::Int8Ty
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
Definition: CodeGenTypeCache.h:37
clang::BlockDecl::Capture
A class which contains all the information about a particular captured value.
Definition: Decl.h:4207
shuffleAndStore
static void shuffleAndStore(CodeGenFunction &CGF, Address SrcAddr, Address DestAddr, QualType ElemType, llvm::Value *Offset, SourceLocation Loc)
Definition: CGOpenMPRuntimeGPU.cpp:1765
clang::OMPExecutableDirective::getCapturedStmt
const CapturedStmt * getCapturedStmt(OpenMPDirectiveKind RegionKind) const
Returns the captured statement associated with the component region within the (combined) directive.
Definition: StmtOpenMP.h:533
clang::CodeGen::CodeGenTypeCache::VoidPtrPtrTy
llvm::PointerType * VoidPtrPtrTy
Definition: CodeGenTypeCache.h:62
getCudaArch
static CudaArch getCudaArch(CodeGenModule &CGM)
Definition: CGOpenMPRuntimeGPU.cpp:3871
clang::Decl::getLocation
SourceLocation getLocation() const
Definition: DeclBase.h:425
clang::CodeGen::CGOpenMPRuntimeGPU::emitTeamsCall
void emitTeamsCall(CodeGenFunction &CGF, const OMPExecutableDirective &D, SourceLocation Loc, llvm::Function *OutlinedFn, ArrayRef< llvm::Value * > CapturedVars) override
Emits code for teams call of the OutlinedFn with variables captured in a record which address is stor...
Definition: CGOpenMPRuntimeGPU.cpp:1502
clang::Decl::isCanonicalDecl
bool isCanonicalDecl() const
Whether this particular Decl is a canonical one.
Definition: DeclBase.h:916
clang::DeclRefExpr
A reference to a declared variable, function, enum, etc.
Definition: Expr.h:1223
clang::CudaArch::SM_75
@ SM_75
clang::RecordDecl
Represents a struct/union/class.
Definition: Decl.h:3901
clang::CodeGen::CGOpenMPRuntime::emitReduction
virtual void emitReduction(CodeGenFunction &CGF, SourceLocation Loc, ArrayRef< const Expr * > Privates, ArrayRef< const Expr * > LHSExprs, ArrayRef< const Expr * > RHSExprs, ArrayRef< const Expr * > ReductionOps, ReductionOptionsTy Options)
Emit a code for reduction clause.
Definition: CGOpenMPRuntime.cpp:5417
clang::CallExpr
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition: Expr.h:2801
clang::CodeGen::CodeGenFunction::EmitBlock
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
Definition: CGStmt.cpp:544
clang::CudaArch::GFX701
@ GFX701
clang::CodeGen::CodeGenFunction::EmitLValueForFieldInitialization
LValue EmitLValueForFieldInitialization(LValue Base, const FieldDecl *Field)
EmitLValueForFieldInitialization - Like EmitLValueForField, except that if the Field is a reference,...
Definition: CGExpr.cpp:4526
clang::CodeGen::CodeGenModule::Error
void Error(SourceLocation loc, StringRef error)
Emit a general error that something can't be done.
Definition: CodeGenModule.cpp:1052
clang::CodeGen::CodeGenModule::getLLVMContext
llvm::LLVMContext & getLLVMContext()
Definition: CodeGenModule.h:715
clang::CharUnits::getQuantity
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:179
clang::CudaArch::GFX1030
@ GFX1030
clang::CodeGen::PrePostActionTy
A basic class for pre|post-action for advanced codegen sequence for OpenMP region.
Definition: CGOpenMPRuntime.h:58
clang::ICIS_NoInit
@ ICIS_NoInit
No in-class initializer.
Definition: Specifiers.h:257
clang::specific_attr_iterator
specific_attr_iterator - Iterates over a subrange of an AttrVec, only providing attributes that are o...
Definition: AttrIterator.h:33
CGOpenMPRuntimeGPU.h
clang::CudaArch::GFX700
@ GFX700
clang::CodeGen::CGOpenMPRuntimeGPU::EM_Unknown
@ EM_Unknown
Unknown execution mode (orphaned directive).
Definition: CGOpenMPRuntimeGPU.h:33
clang::CodeGen::CGOpenMPRuntimeGPU::getDefaultLocationReserved2Flags
unsigned getDefaultLocationReserved2Flags() const override
Returns additional flags that can be stored in reserved_2 field of the default location.
Definition: CGOpenMPRuntimeGPU.cpp:1185
clang::CodeGen::CGOpenMPRuntime::functionFinished
virtual void functionFinished(CodeGenFunction &CGF)
Cleans up references to the objects in finished function.
Definition: CGOpenMPRuntime.cpp:1463
clang::ImplicitParamDecl::Other
@ Other
Other implicit parameter.
Definition: Decl.h:1640
clang::CodeGen::CGOpenMPRuntimeGPU::getAddressOfLocalVariable
Address getAddressOfLocalVariable(CodeGenFunction &CGF, const VarDecl *VD) override
Gets the OpenMP-specific address of the local variable.
Definition: CGOpenMPRuntimeGPU.cpp:3686
clang::CodeGen::CodeGenFunction::EmitBranch
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block,...
Definition: CGStmt.cpp:564
clang::Decl::getDeclContext
DeclContext * getDeclContext()
Definition: DeclBase.h:434
clang::CodeGen::AggValueSlot::DoesNotOverlap
@ DoesNotOverlap
Definition: CGValue.h:553
clang::NamedDecl::getName
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition: Decl.h:274