clang  7.0.0svn
CGOpenMPRuntime.cpp
Go to the documentation of this file.
1 //===----- CGOpenMPRuntime.cpp - Interface to OpenMP Runtimes -------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This provides a class for OpenMP runtime code generation.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CGCXXABI.h"
15 #include "CGCleanup.h"
16 #include "CGOpenMPRuntime.h"
17 #include "CodeGenFunction.h"
19 #include "clang/AST/Decl.h"
20 #include "clang/AST/StmtOpenMP.h"
21 #include "llvm/ADT/ArrayRef.h"
22 #include "llvm/ADT/BitmaskEnum.h"
23 #include "llvm/Bitcode/BitcodeReader.h"
24 #include "llvm/IR/CallSite.h"
25 #include "llvm/IR/DerivedTypes.h"
26 #include "llvm/IR/GlobalValue.h"
27 #include "llvm/IR/Value.h"
28 #include "llvm/Support/Format.h"
29 #include "llvm/Support/raw_ostream.h"
30 #include <cassert>
31 
32 using namespace clang;
33 using namespace CodeGen;
34 
35 namespace {
36 /// \brief Base class for handling code generation inside OpenMP regions.
37 class CGOpenMPRegionInfo : public CodeGenFunction::CGCapturedStmtInfo {
38 public:
39  /// \brief Kinds of OpenMP regions used in codegen.
40  enum CGOpenMPRegionKind {
41  /// \brief Region with outlined function for standalone 'parallel'
42  /// directive.
43  ParallelOutlinedRegion,
44  /// \brief Region with outlined function for standalone 'task' directive.
45  TaskOutlinedRegion,
46  /// \brief Region for constructs that do not require function outlining,
47  /// like 'for', 'sections', 'atomic' etc. directives.
48  InlinedRegion,
49  /// \brief Region with outlined function for standalone 'target' directive.
50  TargetRegion,
51  };
52 
53  CGOpenMPRegionInfo(const CapturedStmt &CS,
54  const CGOpenMPRegionKind RegionKind,
56  bool HasCancel)
57  : CGCapturedStmtInfo(CS, CR_OpenMP), RegionKind(RegionKind),
58  CodeGen(CodeGen), Kind(Kind), HasCancel(HasCancel) {}
59 
60  CGOpenMPRegionInfo(const CGOpenMPRegionKind RegionKind,
62  bool HasCancel)
63  : CGCapturedStmtInfo(CR_OpenMP), RegionKind(RegionKind), CodeGen(CodeGen),
64  Kind(Kind), HasCancel(HasCancel) {}
65 
66  /// \brief Get a variable or parameter for storing global thread id
67  /// inside OpenMP construct.
68  virtual const VarDecl *getThreadIDVariable() const = 0;
69 
70  /// \brief Emit the captured statement body.
71  void EmitBody(CodeGenFunction &CGF, const Stmt *S) override;
72 
73  /// \brief Get an LValue for the current ThreadID variable.
74  /// \return LValue for thread id variable. This LValue always has type int32*.
75  virtual LValue getThreadIDVariableLValue(CodeGenFunction &CGF);
76 
77  virtual void emitUntiedSwitch(CodeGenFunction & /*CGF*/) {}
78 
79  CGOpenMPRegionKind getRegionKind() const { return RegionKind; }
80 
81  OpenMPDirectiveKind getDirectiveKind() const { return Kind; }
82 
83  bool hasCancel() const { return HasCancel; }
84 
85  static bool classof(const CGCapturedStmtInfo *Info) {
86  return Info->getKind() == CR_OpenMP;
87  }
88 
89  ~CGOpenMPRegionInfo() override = default;
90 
91 protected:
92  CGOpenMPRegionKind RegionKind;
93  RegionCodeGenTy CodeGen;
95  bool HasCancel;
96 };
97 
98 /// \brief API for captured statement code generation in OpenMP constructs.
99 class CGOpenMPOutlinedRegionInfo final : public CGOpenMPRegionInfo {
100 public:
101  CGOpenMPOutlinedRegionInfo(const CapturedStmt &CS, const VarDecl *ThreadIDVar,
102  const RegionCodeGenTy &CodeGen,
103  OpenMPDirectiveKind Kind, bool HasCancel,
104  StringRef HelperName)
105  : CGOpenMPRegionInfo(CS, ParallelOutlinedRegion, CodeGen, Kind,
106  HasCancel),
107  ThreadIDVar(ThreadIDVar), HelperName(HelperName) {
108  assert(ThreadIDVar != nullptr && "No ThreadID in OpenMP region.");
109  }
110 
111  /// \brief Get a variable or parameter for storing global thread id
112  /// inside OpenMP construct.
113  const VarDecl *getThreadIDVariable() const override { return ThreadIDVar; }
114 
115  /// \brief Get the name of the capture helper.
116  StringRef getHelperName() const override { return HelperName; }
117 
118  static bool classof(const CGCapturedStmtInfo *Info) {
119  return CGOpenMPRegionInfo::classof(Info) &&
120  cast<CGOpenMPRegionInfo>(Info)->getRegionKind() ==
121  ParallelOutlinedRegion;
122  }
123 
124 private:
125  /// \brief A variable or parameter storing global thread id for OpenMP
126  /// constructs.
127  const VarDecl *ThreadIDVar;
128  StringRef HelperName;
129 };
130 
131 /// \brief API for captured statement code generation in OpenMP constructs.
132 class CGOpenMPTaskOutlinedRegionInfo final : public CGOpenMPRegionInfo {
133 public:
134  class UntiedTaskActionTy final : public PrePostActionTy {
135  bool Untied;
136  const VarDecl *PartIDVar;
137  const RegionCodeGenTy UntiedCodeGen;
138  llvm::SwitchInst *UntiedSwitch = nullptr;
139 
140  public:
141  UntiedTaskActionTy(bool Tied, const VarDecl *PartIDVar,
142  const RegionCodeGenTy &UntiedCodeGen)
143  : Untied(!Tied), PartIDVar(PartIDVar), UntiedCodeGen(UntiedCodeGen) {}
144  void Enter(CodeGenFunction &CGF) override {
145  if (Untied) {
146  // Emit task switching point.
147  auto PartIdLVal = CGF.EmitLoadOfPointerLValue(
148  CGF.GetAddrOfLocalVar(PartIDVar),
149  PartIDVar->getType()->castAs<PointerType>());
150  auto *Res = CGF.EmitLoadOfScalar(PartIdLVal, PartIDVar->getLocation());
151  auto *DoneBB = CGF.createBasicBlock(".untied.done.");
152  UntiedSwitch = CGF.Builder.CreateSwitch(Res, DoneBB);
153  CGF.EmitBlock(DoneBB);
155  CGF.EmitBlock(CGF.createBasicBlock(".untied.jmp."));
156  UntiedSwitch->addCase(CGF.Builder.getInt32(0),
157  CGF.Builder.GetInsertBlock());
158  emitUntiedSwitch(CGF);
159  }
160  }
161  void emitUntiedSwitch(CodeGenFunction &CGF) const {
162  if (Untied) {
163  auto PartIdLVal = CGF.EmitLoadOfPointerLValue(
164  CGF.GetAddrOfLocalVar(PartIDVar),
165  PartIDVar->getType()->castAs<PointerType>());
166  CGF.EmitStoreOfScalar(CGF.Builder.getInt32(UntiedSwitch->getNumCases()),
167  PartIdLVal);
168  UntiedCodeGen(CGF);
169  CodeGenFunction::JumpDest CurPoint =
170  CGF.getJumpDestInCurrentScope(".untied.next.");
172  CGF.EmitBlock(CGF.createBasicBlock(".untied.jmp."));
173  UntiedSwitch->addCase(CGF.Builder.getInt32(UntiedSwitch->getNumCases()),
174  CGF.Builder.GetInsertBlock());
175  CGF.EmitBranchThroughCleanup(CurPoint);
176  CGF.EmitBlock(CurPoint.getBlock());
177  }
178  }
179  unsigned getNumberOfParts() const { return UntiedSwitch->getNumCases(); }
180  };
181  CGOpenMPTaskOutlinedRegionInfo(const CapturedStmt &CS,
182  const VarDecl *ThreadIDVar,
183  const RegionCodeGenTy &CodeGen,
184  OpenMPDirectiveKind Kind, bool HasCancel,
185  const UntiedTaskActionTy &Action)
186  : CGOpenMPRegionInfo(CS, TaskOutlinedRegion, CodeGen, Kind, HasCancel),
187  ThreadIDVar(ThreadIDVar), Action(Action) {
188  assert(ThreadIDVar != nullptr && "No ThreadID in OpenMP region.");
189  }
190 
191  /// \brief Get a variable or parameter for storing global thread id
192  /// inside OpenMP construct.
193  const VarDecl *getThreadIDVariable() const override { return ThreadIDVar; }
194 
195  /// \brief Get an LValue for the current ThreadID variable.
196  LValue getThreadIDVariableLValue(CodeGenFunction &CGF) override;
197 
198  /// \brief Get the name of the capture helper.
199  StringRef getHelperName() const override { return ".omp_outlined."; }
200 
201  void emitUntiedSwitch(CodeGenFunction &CGF) override {
202  Action.emitUntiedSwitch(CGF);
203  }
204 
205  static bool classof(const CGCapturedStmtInfo *Info) {
206  return CGOpenMPRegionInfo::classof(Info) &&
207  cast<CGOpenMPRegionInfo>(Info)->getRegionKind() ==
208  TaskOutlinedRegion;
209  }
210 
211 private:
212  /// \brief A variable or parameter storing global thread id for OpenMP
213  /// constructs.
214  const VarDecl *ThreadIDVar;
215  /// Action for emitting code for untied tasks.
216  const UntiedTaskActionTy &Action;
217 };
218 
219 /// \brief API for inlined captured statement code generation in OpenMP
220 /// constructs.
221 class CGOpenMPInlinedRegionInfo : public CGOpenMPRegionInfo {
222 public:
223  CGOpenMPInlinedRegionInfo(CodeGenFunction::CGCapturedStmtInfo *OldCSI,
224  const RegionCodeGenTy &CodeGen,
225  OpenMPDirectiveKind Kind, bool HasCancel)
226  : CGOpenMPRegionInfo(InlinedRegion, CodeGen, Kind, HasCancel),
227  OldCSI(OldCSI),
228  OuterRegionInfo(dyn_cast_or_null<CGOpenMPRegionInfo>(OldCSI)) {}
229 
230  // \brief Retrieve the value of the context parameter.
231  llvm::Value *getContextValue() const override {
232  if (OuterRegionInfo)
233  return OuterRegionInfo->getContextValue();
234  llvm_unreachable("No context value for inlined OpenMP region");
235  }
236 
237  void setContextValue(llvm::Value *V) override {
238  if (OuterRegionInfo) {
239  OuterRegionInfo->setContextValue(V);
240  return;
241  }
242  llvm_unreachable("No context value for inlined OpenMP region");
243  }
244 
245  /// \brief Lookup the captured field decl for a variable.
246  const FieldDecl *lookup(const VarDecl *VD) const override {
247  if (OuterRegionInfo)
248  return OuterRegionInfo->lookup(VD);
249  // If there is no outer outlined region,no need to lookup in a list of
250  // captured variables, we can use the original one.
251  return nullptr;
252  }
253 
254  FieldDecl *getThisFieldDecl() const override {
255  if (OuterRegionInfo)
256  return OuterRegionInfo->getThisFieldDecl();
257  return nullptr;
258  }
259 
260  /// \brief Get a variable or parameter for storing global thread id
261  /// inside OpenMP construct.
262  const VarDecl *getThreadIDVariable() const override {
263  if (OuterRegionInfo)
264  return OuterRegionInfo->getThreadIDVariable();
265  return nullptr;
266  }
267 
268  /// \brief Get an LValue for the current ThreadID variable.
269  LValue getThreadIDVariableLValue(CodeGenFunction &CGF) override {
270  if (OuterRegionInfo)
271  return OuterRegionInfo->getThreadIDVariableLValue(CGF);
272  llvm_unreachable("No LValue for inlined OpenMP construct");
273  }
274 
275  /// \brief Get the name of the capture helper.
276  StringRef getHelperName() const override {
277  if (auto *OuterRegionInfo = getOldCSI())
278  return OuterRegionInfo->getHelperName();
279  llvm_unreachable("No helper name for inlined OpenMP construct");
280  }
281 
282  void emitUntiedSwitch(CodeGenFunction &CGF) override {
283  if (OuterRegionInfo)
284  OuterRegionInfo->emitUntiedSwitch(CGF);
285  }
286 
287  CodeGenFunction::CGCapturedStmtInfo *getOldCSI() const { return OldCSI; }
288 
289  static bool classof(const CGCapturedStmtInfo *Info) {
290  return CGOpenMPRegionInfo::classof(Info) &&
291  cast<CGOpenMPRegionInfo>(Info)->getRegionKind() == InlinedRegion;
292  }
293 
294  ~CGOpenMPInlinedRegionInfo() override = default;
295 
296 private:
297  /// \brief CodeGen info about outer OpenMP region.
299  CGOpenMPRegionInfo *OuterRegionInfo;
300 };
301 
302 /// \brief API for captured statement code generation in OpenMP target
303 /// constructs. For this captures, implicit parameters are used instead of the
304 /// captured fields. The name of the target region has to be unique in a given
305 /// application so it is provided by the client, because only the client has
306 /// the information to generate that.
307 class CGOpenMPTargetRegionInfo final : public CGOpenMPRegionInfo {
308 public:
309  CGOpenMPTargetRegionInfo(const CapturedStmt &CS,
310  const RegionCodeGenTy &CodeGen, StringRef HelperName)
311  : CGOpenMPRegionInfo(CS, TargetRegion, CodeGen, OMPD_target,
312  /*HasCancel=*/false),
313  HelperName(HelperName) {}
314 
315  /// \brief This is unused for target regions because each starts executing
316  /// with a single thread.
317  const VarDecl *getThreadIDVariable() const override { return nullptr; }
318 
319  /// \brief Get the name of the capture helper.
320  StringRef getHelperName() const override { return HelperName; }
321 
322  static bool classof(const CGCapturedStmtInfo *Info) {
323  return CGOpenMPRegionInfo::classof(Info) &&
324  cast<CGOpenMPRegionInfo>(Info)->getRegionKind() == TargetRegion;
325  }
326 
327 private:
328  StringRef HelperName;
329 };
330 
331 static void EmptyCodeGen(CodeGenFunction &, PrePostActionTy &) {
332  llvm_unreachable("No codegen for expressions");
333 }
334 /// \brief API for generation of expressions captured in a innermost OpenMP
335 /// region.
336 class CGOpenMPInnerExprInfo final : public CGOpenMPInlinedRegionInfo {
337 public:
338  CGOpenMPInnerExprInfo(CodeGenFunction &CGF, const CapturedStmt &CS)
339  : CGOpenMPInlinedRegionInfo(CGF.CapturedStmtInfo, EmptyCodeGen,
340  OMPD_unknown,
341  /*HasCancel=*/false),
342  PrivScope(CGF) {
343  // Make sure the globals captured in the provided statement are local by
344  // using the privatization logic. We assume the same variable is not
345  // captured more than once.
346  for (auto &C : CS.captures()) {
347  if (!C.capturesVariable() && !C.capturesVariableByCopy())
348  continue;
349 
350  const VarDecl *VD = C.getCapturedVar();
351  if (VD->isLocalVarDeclOrParm())
352  continue;
353 
354  DeclRefExpr DRE(const_cast<VarDecl *>(VD),
355  /*RefersToEnclosingVariableOrCapture=*/false,
357  C.getLocation());
358  PrivScope.addPrivate(VD, [&CGF, &DRE]() -> Address {
359  return CGF.EmitLValue(&DRE).getAddress();
360  });
361  }
362  (void)PrivScope.Privatize();
363  }
364 
365  /// \brief Lookup the captured field decl for a variable.
366  const FieldDecl *lookup(const VarDecl *VD) const override {
367  if (auto *FD = CGOpenMPInlinedRegionInfo::lookup(VD))
368  return FD;
369  return nullptr;
370  }
371 
372  /// \brief Emit the captured statement body.
373  void EmitBody(CodeGenFunction &CGF, const Stmt *S) override {
374  llvm_unreachable("No body for expressions");
375  }
376 
377  /// \brief Get a variable or parameter for storing global thread id
378  /// inside OpenMP construct.
379  const VarDecl *getThreadIDVariable() const override {
380  llvm_unreachable("No thread id for expressions");
381  }
382 
383  /// \brief Get the name of the capture helper.
384  StringRef getHelperName() const override {
385  llvm_unreachable("No helper name for expressions");
386  }
387 
388  static bool classof(const CGCapturedStmtInfo *Info) { return false; }
389 
390 private:
391  /// Private scope to capture global variables.
393 };
394 
395 /// \brief RAII for emitting code of OpenMP constructs.
396 class InlinedOpenMPRegionRAII {
397  CodeGenFunction &CGF;
398  llvm::DenseMap<const VarDecl *, FieldDecl *> LambdaCaptureFields;
399  FieldDecl *LambdaThisCaptureField = nullptr;
400  const CodeGen::CGBlockInfo *BlockInfo = nullptr;
401 
402 public:
403  /// \brief Constructs region for combined constructs.
404  /// \param CodeGen Code generation sequence for combined directives. Includes
405  /// a list of functions used for code generation of implicitly inlined
406  /// regions.
407  InlinedOpenMPRegionRAII(CodeGenFunction &CGF, const RegionCodeGenTy &CodeGen,
408  OpenMPDirectiveKind Kind, bool HasCancel)
409  : CGF(CGF) {
410  // Start emission for the construct.
411  CGF.CapturedStmtInfo = new CGOpenMPInlinedRegionInfo(
412  CGF.CapturedStmtInfo, CodeGen, Kind, HasCancel);
413  std::swap(CGF.LambdaCaptureFields, LambdaCaptureFields);
414  LambdaThisCaptureField = CGF.LambdaThisCaptureField;
415  CGF.LambdaThisCaptureField = nullptr;
416  BlockInfo = CGF.BlockInfo;
417  CGF.BlockInfo = nullptr;
418  }
419 
420  ~InlinedOpenMPRegionRAII() {
421  // Restore original CapturedStmtInfo only if we're done with code emission.
422  auto *OldCSI =
423  cast<CGOpenMPInlinedRegionInfo>(CGF.CapturedStmtInfo)->getOldCSI();
424  delete CGF.CapturedStmtInfo;
425  CGF.CapturedStmtInfo = OldCSI;
426  std::swap(CGF.LambdaCaptureFields, LambdaCaptureFields);
427  CGF.LambdaThisCaptureField = LambdaThisCaptureField;
428  CGF.BlockInfo = BlockInfo;
429  }
430 };
431 
432 /// \brief Values for bit flags used in the ident_t to describe the fields.
433 /// All enumeric elements are named and described in accordance with the code
434 /// from http://llvm.org/svn/llvm-project/openmp/trunk/runtime/src/kmp.h
435 enum OpenMPLocationFlags : unsigned {
436  /// \brief Use trampoline for internal microtask.
437  OMP_IDENT_IMD = 0x01,
438  /// \brief Use c-style ident structure.
439  OMP_IDENT_KMPC = 0x02,
440  /// \brief Atomic reduction option for kmpc_reduce.
441  OMP_ATOMIC_REDUCE = 0x10,
442  /// \brief Explicit 'barrier' directive.
443  OMP_IDENT_BARRIER_EXPL = 0x20,
444  /// \brief Implicit barrier in code.
445  OMP_IDENT_BARRIER_IMPL = 0x40,
446  /// \brief Implicit barrier in 'for' directive.
447  OMP_IDENT_BARRIER_IMPL_FOR = 0x40,
448  /// \brief Implicit barrier in 'sections' directive.
449  OMP_IDENT_BARRIER_IMPL_SECTIONS = 0xC0,
450  /// \brief Implicit barrier in 'single' directive.
451  OMP_IDENT_BARRIER_IMPL_SINGLE = 0x140,
452  /// Call of __kmp_for_static_init for static loop.
453  OMP_IDENT_WORK_LOOP = 0x200,
454  /// Call of __kmp_for_static_init for sections.
455  OMP_IDENT_WORK_SECTIONS = 0x400,
456  /// Call of __kmp_for_static_init for distribute.
457  OMP_IDENT_WORK_DISTRIBUTE = 0x800,
458  LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/OMP_IDENT_WORK_DISTRIBUTE)
459 };
460 
461 /// \brief Describes ident structure that describes a source location.
462 /// All descriptions are taken from
463 /// http://llvm.org/svn/llvm-project/openmp/trunk/runtime/src/kmp.h
464 /// Original structure:
465 /// typedef struct ident {
466 /// kmp_int32 reserved_1; /**< might be used in Fortran;
467 /// see above */
468 /// kmp_int32 flags; /**< also f.flags; KMP_IDENT_xxx flags;
469 /// KMP_IDENT_KMPC identifies this union
470 /// member */
471 /// kmp_int32 reserved_2; /**< not really used in Fortran any more;
472 /// see above */
473 ///#if USE_ITT_BUILD
474 /// /* but currently used for storing
475 /// region-specific ITT */
476 /// /* contextual information. */
477 ///#endif /* USE_ITT_BUILD */
478 /// kmp_int32 reserved_3; /**< source[4] in Fortran, do not use for
479 /// C++ */
480 /// char const *psource; /**< String describing the source location.
481 /// The string is composed of semi-colon separated
482 // fields which describe the source file,
483 /// the function and a pair of line numbers that
484 /// delimit the construct.
485 /// */
486 /// } ident_t;
488  /// \brief might be used in Fortran
490  /// \brief OMP_IDENT_xxx flags; OMP_IDENT_KMPC identifies this union member.
492  /// \brief Not really used in Fortran any more
494  /// \brief Source[4] in Fortran, do not use for C++
496  /// \brief String describing the source location. The string is composed of
497  /// semi-colon separated fields which describe the source file, the function
498  /// and a pair of line numbers that delimit the construct.
500 };
501 
502 /// \brief Schedule types for 'omp for' loops (these enumerators are taken from
503 /// the enum sched_type in kmp.h).
505  /// \brief Lower bound for default (unordered) versions.
513  /// static with chunk adjustment (e.g., simd)
515  /// \brief Lower bound for 'ordered' versions.
524  /// \brief dist_schedule types
527  /// Support for OpenMP 4.5 monotonic and nonmonotonic schedule modifiers.
528  /// Set if the monotonic schedule modifier was present.
530  /// Set if the nonmonotonic schedule modifier was present.
532 };
533 
535  /// \brief Call to void __kmpc_fork_call(ident_t *loc, kmp_int32 argc,
536  /// kmpc_micro microtask, ...);
538  /// \brief Call to void *__kmpc_threadprivate_cached(ident_t *loc,
539  /// kmp_int32 global_tid, void *data, size_t size, void ***cache);
541  /// \brief Call to void __kmpc_threadprivate_register( ident_t *,
542  /// void *data, kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor);
544  // Call to __kmpc_int32 kmpc_global_thread_num(ident_t *loc);
546  // Call to void __kmpc_critical(ident_t *loc, kmp_int32 global_tid,
547  // kmp_critical_name *crit);
549  // Call to void __kmpc_critical_with_hint(ident_t *loc, kmp_int32
550  // global_tid, kmp_critical_name *crit, uintptr_t hint);
552  // Call to void __kmpc_end_critical(ident_t *loc, kmp_int32 global_tid,
553  // kmp_critical_name *crit);
555  // Call to kmp_int32 __kmpc_cancel_barrier(ident_t *loc, kmp_int32
556  // global_tid);
558  // Call to void __kmpc_barrier(ident_t *loc, kmp_int32 global_tid);
560  // Call to void __kmpc_for_static_fini(ident_t *loc, kmp_int32 global_tid);
562  // Call to void __kmpc_serialized_parallel(ident_t *loc, kmp_int32
563  // global_tid);
565  // Call to void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32
566  // global_tid);
568  // Call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid,
569  // kmp_int32 num_threads);
571  // Call to void __kmpc_flush(ident_t *loc);
573  // Call to kmp_int32 __kmpc_master(ident_t *, kmp_int32 global_tid);
575  // Call to void __kmpc_end_master(ident_t *, kmp_int32 global_tid);
577  // Call to kmp_int32 __kmpc_omp_taskyield(ident_t *, kmp_int32 global_tid,
578  // int end_part);
580  // Call to kmp_int32 __kmpc_single(ident_t *, kmp_int32 global_tid);
582  // Call to void __kmpc_end_single(ident_t *, kmp_int32 global_tid);
584  // Call to kmp_task_t * __kmpc_omp_task_alloc(ident_t *, kmp_int32 gtid,
585  // kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
586  // kmp_routine_entry_t *task_entry);
588  // Call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t *
589  // new_task);
591  // Call to void __kmpc_copyprivate(ident_t *loc, kmp_int32 global_tid,
592  // size_t cpy_size, void *cpy_data, void(*cpy_func)(void *, void *),
593  // kmp_int32 didit);
595  // Call to kmp_int32 __kmpc_reduce(ident_t *loc, kmp_int32 global_tid,
596  // kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void
597  // (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck);
599  // Call to kmp_int32 __kmpc_reduce_nowait(ident_t *loc, kmp_int32
600  // global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data,
601  // void (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name
602  // *lck);
604  // Call to void __kmpc_end_reduce(ident_t *loc, kmp_int32 global_tid,
605  // kmp_critical_name *lck);
607  // Call to void __kmpc_end_reduce_nowait(ident_t *loc, kmp_int32 global_tid,
608  // kmp_critical_name *lck);
610  // Call to void __kmpc_omp_task_begin_if0(ident_t *, kmp_int32 gtid,
611  // kmp_task_t * new_task);
613  // Call to void __kmpc_omp_task_complete_if0(ident_t *, kmp_int32 gtid,
614  // kmp_task_t * new_task);
616  // Call to void __kmpc_ordered(ident_t *loc, kmp_int32 global_tid);
618  // Call to void __kmpc_end_ordered(ident_t *loc, kmp_int32 global_tid);
620  // Call to kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32
621  // global_tid);
623  // Call to void __kmpc_taskgroup(ident_t *loc, kmp_int32 global_tid);
625  // Call to void __kmpc_end_taskgroup(ident_t *loc, kmp_int32 global_tid);
627  // Call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 global_tid,
628  // int proc_bind);
630  // Call to kmp_int32 __kmpc_omp_task_with_deps(ident_t *loc_ref, kmp_int32
631  // gtid, kmp_task_t * new_task, kmp_int32 ndeps, kmp_depend_info_t
632  // *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list);
634  // Call to void __kmpc_omp_wait_deps(ident_t *loc_ref, kmp_int32
635  // gtid, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32
636  // ndeps_noalias, kmp_depend_info_t *noalias_dep_list);
638  // Call to kmp_int32 __kmpc_cancellationpoint(ident_t *loc, kmp_int32
639  // global_tid, kmp_int32 cncl_kind);
641  // Call to kmp_int32 __kmpc_cancel(ident_t *loc, kmp_int32 global_tid,
642  // kmp_int32 cncl_kind);
644  // Call to void __kmpc_push_num_teams(ident_t *loc, kmp_int32 global_tid,
645  // kmp_int32 num_teams, kmp_int32 thread_limit);
647  // Call to void __kmpc_fork_teams(ident_t *loc, kmp_int32 argc, kmpc_micro
648  // microtask, ...);
650  // Call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t *task, int
651  // if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int nogroup, int
652  // sched, kmp_uint64 grainsize, void *task_dup);
654  // Call to void __kmpc_doacross_init(ident_t *loc, kmp_int32 gtid, kmp_int32
655  // num_dims, struct kmp_dim *dims);
657  // Call to void __kmpc_doacross_fini(ident_t *loc, kmp_int32 gtid);
659  // Call to void __kmpc_doacross_post(ident_t *loc, kmp_int32 gtid, kmp_int64
660  // *vec);
662  // Call to void __kmpc_doacross_wait(ident_t *loc, kmp_int32 gtid, kmp_int64
663  // *vec);
665  // Call to void *__kmpc_task_reduction_init(int gtid, int num_data, void
666  // *data);
668  // Call to void *__kmpc_task_reduction_get_th_data(int gtid, void *tg, void
669  // *d);
671 
672  //
673  // Offloading related calls
674  //
675  // Call to int32_t __tgt_target(int64_t device_id, void *host_ptr, int32_t
676  // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
677  // *arg_types);
679  // Call to int32_t __tgt_target_nowait(int64_t device_id, void *host_ptr,
680  // int32_t arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
681  // *arg_types);
683  // Call to int32_t __tgt_target_teams(int64_t device_id, void *host_ptr,
684  // int32_t arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
685  // *arg_types, int32_t num_teams, int32_t thread_limit);
687  // Call to int32_t __tgt_target_teams_nowait(int64_t device_id, void
688  // *host_ptr, int32_t arg_num, void** args_base, void **args, size_t
689  // *arg_sizes, int64_t *arg_types, int32_t num_teams, int32_t thread_limit);
691  // Call to void __tgt_register_lib(__tgt_bin_desc *desc);
693  // Call to void __tgt_unregister_lib(__tgt_bin_desc *desc);
695  // Call to void __tgt_target_data_begin(int64_t device_id, int32_t arg_num,
696  // void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types);
698  // Call to void __tgt_target_data_begin_nowait(int64_t device_id, int32_t
699  // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
700  // *arg_types);
702  // Call to void __tgt_target_data_end(int64_t device_id, int32_t arg_num,
703  // void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types);
705  // Call to void __tgt_target_data_end_nowait(int64_t device_id, int32_t
706  // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
707  // *arg_types);
709  // Call to void __tgt_target_data_update(int64_t device_id, int32_t arg_num,
710  // void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types);
712  // Call to void __tgt_target_data_update_nowait(int64_t device_id, int32_t
713  // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
714  // *arg_types);
716 };
717 
718 /// A basic class for pre|post-action for advanced codegen sequence for OpenMP
719 /// region.
720 class CleanupTy final : public EHScopeStack::Cleanup {
721  PrePostActionTy *Action;
722 
723 public:
724  explicit CleanupTy(PrePostActionTy *Action) : Action(Action) {}
725  void Emit(CodeGenFunction &CGF, Flags /*flags*/) override {
726  if (!CGF.HaveInsertPoint())
727  return;
728  Action->Exit(CGF);
729  }
730 };
731 
732 } // anonymous namespace
733 
736  if (PrePostAction) {
737  CGF.EHStack.pushCleanup<CleanupTy>(NormalAndEHCleanup, PrePostAction);
738  Callback(CodeGen, CGF, *PrePostAction);
739  } else {
740  PrePostActionTy Action;
741  Callback(CodeGen, CGF, Action);
742  }
743 }
744 
745 /// Check if the combiner is a call to UDR combiner and if it is so return the
746 /// UDR decl used for reduction.
747 static const OMPDeclareReductionDecl *
748 getReductionInit(const Expr *ReductionOp) {
749  if (auto *CE = dyn_cast<CallExpr>(ReductionOp))
750  if (auto *OVE = dyn_cast<OpaqueValueExpr>(CE->getCallee()))
751  if (auto *DRE =
752  dyn_cast<DeclRefExpr>(OVE->getSourceExpr()->IgnoreImpCasts()))
753  if (auto *DRD = dyn_cast<OMPDeclareReductionDecl>(DRE->getDecl()))
754  return DRD;
755  return nullptr;
756 }
757 
759  const OMPDeclareReductionDecl *DRD,
760  const Expr *InitOp,
761  Address Private, Address Original,
762  QualType Ty) {
763  if (DRD->getInitializer()) {
764  std::pair<llvm::Function *, llvm::Function *> Reduction =
765  CGF.CGM.getOpenMPRuntime().getUserDefinedReduction(DRD);
766  auto *CE = cast<CallExpr>(InitOp);
767  auto *OVE = cast<OpaqueValueExpr>(CE->getCallee());
768  const Expr *LHS = CE->getArg(/*Arg=*/0)->IgnoreParenImpCasts();
769  const Expr *RHS = CE->getArg(/*Arg=*/1)->IgnoreParenImpCasts();
770  auto *LHSDRE = cast<DeclRefExpr>(cast<UnaryOperator>(LHS)->getSubExpr());
771  auto *RHSDRE = cast<DeclRefExpr>(cast<UnaryOperator>(RHS)->getSubExpr());
772  CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
773  PrivateScope.addPrivate(cast<VarDecl>(LHSDRE->getDecl()),
774  [=]() -> Address { return Private; });
775  PrivateScope.addPrivate(cast<VarDecl>(RHSDRE->getDecl()),
776  [=]() -> Address { return Original; });
777  (void)PrivateScope.Privatize();
778  RValue Func = RValue::get(Reduction.second);
779  CodeGenFunction::OpaqueValueMapping Map(CGF, OVE, Func);
780  CGF.EmitIgnoredExpr(InitOp);
781  } else {
782  llvm::Constant *Init = CGF.CGM.EmitNullConstant(Ty);
783  auto *GV = new llvm::GlobalVariable(
784  CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true,
785  llvm::GlobalValue::PrivateLinkage, Init, ".init");
786  LValue LV = CGF.MakeNaturalAlignAddrLValue(GV, Ty);
787  RValue InitRVal;
788  switch (CGF.getEvaluationKind(Ty)) {
789  case TEK_Scalar:
790  InitRVal = CGF.EmitLoadOfLValue(LV, DRD->getLocation());
791  break;
792  case TEK_Complex:
793  InitRVal =
795  break;
796  case TEK_Aggregate:
797  InitRVal = RValue::getAggregate(LV.getAddress());
798  break;
799  }
800  OpaqueValueExpr OVE(DRD->getLocation(), Ty, VK_RValue);
801  CodeGenFunction::OpaqueValueMapping OpaqueMap(CGF, &OVE, InitRVal);
802  CGF.EmitAnyExprToMem(&OVE, Private, Ty.getQualifiers(),
803  /*IsInitializer=*/false);
804  }
805 }
806 
807 /// \brief Emit initialization of arrays of complex types.
808 /// \param DestAddr Address of the array.
809 /// \param Type Type of array.
810 /// \param Init Initial expression of array.
811 /// \param SrcAddr Address of the original array.
812 static void EmitOMPAggregateInit(CodeGenFunction &CGF, Address DestAddr,
813  QualType Type, bool EmitDeclareReductionInit,
814  const Expr *Init,
815  const OMPDeclareReductionDecl *DRD,
816  Address SrcAddr = Address::invalid()) {
817  // Perform element-by-element initialization.
818  QualType ElementTy;
819 
820  // Drill down to the base element type on both arrays.
821  auto ArrayTy = Type->getAsArrayTypeUnsafe();
822  auto NumElements = CGF.emitArrayLength(ArrayTy, ElementTy, DestAddr);
823  DestAddr =
824  CGF.Builder.CreateElementBitCast(DestAddr, DestAddr.getElementType());
825  if (DRD)
826  SrcAddr =
827  CGF.Builder.CreateElementBitCast(SrcAddr, DestAddr.getElementType());
828 
829  llvm::Value *SrcBegin = nullptr;
830  if (DRD)
831  SrcBegin = SrcAddr.getPointer();
832  auto DestBegin = DestAddr.getPointer();
833  // Cast from pointer to array type to pointer to single element.
834  auto DestEnd = CGF.Builder.CreateGEP(DestBegin, NumElements);
835  // The basic structure here is a while-do loop.
836  auto BodyBB = CGF.createBasicBlock("omp.arrayinit.body");
837  auto DoneBB = CGF.createBasicBlock("omp.arrayinit.done");
838  auto IsEmpty =
839  CGF.Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arrayinit.isempty");
840  CGF.Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
841 
842  // Enter the loop body, making that address the current address.
843  auto EntryBB = CGF.Builder.GetInsertBlock();
844  CGF.EmitBlock(BodyBB);
845 
846  CharUnits ElementSize = CGF.getContext().getTypeSizeInChars(ElementTy);
847 
848  llvm::PHINode *SrcElementPHI = nullptr;
849  Address SrcElementCurrent = Address::invalid();
850  if (DRD) {
851  SrcElementPHI = CGF.Builder.CreatePHI(SrcBegin->getType(), 2,
852  "omp.arraycpy.srcElementPast");
853  SrcElementPHI->addIncoming(SrcBegin, EntryBB);
854  SrcElementCurrent =
855  Address(SrcElementPHI,
856  SrcAddr.getAlignment().alignmentOfArrayElement(ElementSize));
857  }
858  llvm::PHINode *DestElementPHI = CGF.Builder.CreatePHI(
859  DestBegin->getType(), 2, "omp.arraycpy.destElementPast");
860  DestElementPHI->addIncoming(DestBegin, EntryBB);
861  Address DestElementCurrent =
862  Address(DestElementPHI,
863  DestAddr.getAlignment().alignmentOfArrayElement(ElementSize));
864 
865  // Emit copy.
866  {
867  CodeGenFunction::RunCleanupsScope InitScope(CGF);
868  if (EmitDeclareReductionInit) {
869  emitInitWithReductionInitializer(CGF, DRD, Init, DestElementCurrent,
870  SrcElementCurrent, ElementTy);
871  } else
872  CGF.EmitAnyExprToMem(Init, DestElementCurrent, ElementTy.getQualifiers(),
873  /*IsInitializer=*/false);
874  }
875 
876  if (DRD) {
877  // Shift the address forward by one element.
878  auto SrcElementNext = CGF.Builder.CreateConstGEP1_32(
879  SrcElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
880  SrcElementPHI->addIncoming(SrcElementNext, CGF.Builder.GetInsertBlock());
881  }
882 
883  // Shift the address forward by one element.
884  auto DestElementNext = CGF.Builder.CreateConstGEP1_32(
885  DestElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
886  // Check whether we've reached the end.
887  auto Done =
888  CGF.Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done");
889  CGF.Builder.CreateCondBr(Done, DoneBB, BodyBB);
890  DestElementPHI->addIncoming(DestElementNext, CGF.Builder.GetInsertBlock());
891 
892  // Done.
893  CGF.EmitBlock(DoneBB, /*IsFinished=*/true);
894 }
895 
896 LValue ReductionCodeGen::emitSharedLValue(CodeGenFunction &CGF, const Expr *E) {
897  return CGF.EmitOMPSharedLValue(E);
898 }
899 
900 LValue ReductionCodeGen::emitSharedLValueUB(CodeGenFunction &CGF,
901  const Expr *E) {
902  if (const auto *OASE = dyn_cast<OMPArraySectionExpr>(E))
903  return CGF.EmitOMPArraySectionExpr(OASE, /*IsLowerBound=*/false);
904  return LValue();
905 }
906 
907 void ReductionCodeGen::emitAggregateInitialization(
908  CodeGenFunction &CGF, unsigned N, Address PrivateAddr, LValue SharedLVal,
909  const OMPDeclareReductionDecl *DRD) {
910  // Emit VarDecl with copy init for arrays.
911  // Get the address of the original variable captured in current
912  // captured region.
913  auto *PrivateVD =
914  cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
915  bool EmitDeclareReductionInit =
916  DRD && (DRD->getInitializer() || !PrivateVD->hasInit());
917  EmitOMPAggregateInit(CGF, PrivateAddr, PrivateVD->getType(),
918  EmitDeclareReductionInit,
919  EmitDeclareReductionInit ? ClausesData[N].ReductionOp
920  : PrivateVD->getInit(),
921  DRD, SharedLVal.getAddress());
922 }
923 
926  ArrayRef<const Expr *> ReductionOps) {
927  ClausesData.reserve(Shareds.size());
928  SharedAddresses.reserve(Shareds.size());
929  Sizes.reserve(Shareds.size());
930  BaseDecls.reserve(Shareds.size());
931  auto IPriv = Privates.begin();
932  auto IRed = ReductionOps.begin();
933  for (const auto *Ref : Shareds) {
934  ClausesData.emplace_back(Ref, *IPriv, *IRed);
935  std::advance(IPriv, 1);
936  std::advance(IRed, 1);
937  }
938 }
939 
940 void ReductionCodeGen::emitSharedLValue(CodeGenFunction &CGF, unsigned N) {
941  assert(SharedAddresses.size() == N &&
942  "Number of generated lvalues must be exactly N.");
943  LValue First = emitSharedLValue(CGF, ClausesData[N].Ref);
944  LValue Second = emitSharedLValueUB(CGF, ClausesData[N].Ref);
945  SharedAddresses.emplace_back(First, Second);
946 }
947 
949  auto *PrivateVD =
950  cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
951  QualType PrivateType = PrivateVD->getType();
952  bool AsArraySection = isa<OMPArraySectionExpr>(ClausesData[N].Ref);
953  if (!PrivateType->isVariablyModifiedType()) {
954  Sizes.emplace_back(
955  CGF.getTypeSize(
956  SharedAddresses[N].first.getType().getNonReferenceType()),
957  nullptr);
958  return;
959  }
960  llvm::Value *Size;
961  llvm::Value *SizeInChars;
962  llvm::Type *ElemType =
963  cast<llvm::PointerType>(SharedAddresses[N].first.getPointer()->getType())
964  ->getElementType();
965  auto *ElemSizeOf = llvm::ConstantExpr::getSizeOf(ElemType);
966  if (AsArraySection) {
967  Size = CGF.Builder.CreatePtrDiff(SharedAddresses[N].second.getPointer(),
968  SharedAddresses[N].first.getPointer());
969  Size = CGF.Builder.CreateNUWAdd(
970  Size, llvm::ConstantInt::get(Size->getType(), /*V=*/1));
971  SizeInChars = CGF.Builder.CreateNUWMul(Size, ElemSizeOf);
972  } else {
973  SizeInChars = CGF.getTypeSize(
974  SharedAddresses[N].first.getType().getNonReferenceType());
975  Size = CGF.Builder.CreateExactUDiv(SizeInChars, ElemSizeOf);
976  }
977  Sizes.emplace_back(SizeInChars, Size);
979  CGF,
980  cast<OpaqueValueExpr>(
981  CGF.getContext().getAsVariableArrayType(PrivateType)->getSizeExpr()),
982  RValue::get(Size));
983  CGF.EmitVariablyModifiedType(PrivateType);
984 }
985 
987  llvm::Value *Size) {
988  auto *PrivateVD =
989  cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
990  QualType PrivateType = PrivateVD->getType();
991  if (!PrivateType->isVariablyModifiedType()) {
992  assert(!Size && !Sizes[N].second &&
993  "Size should be nullptr for non-variably modified reduction "
994  "items.");
995  return;
996  }
998  CGF,
999  cast<OpaqueValueExpr>(
1000  CGF.getContext().getAsVariableArrayType(PrivateType)->getSizeExpr()),
1001  RValue::get(Size));
1002  CGF.EmitVariablyModifiedType(PrivateType);
1003 }
1004 
1006  CodeGenFunction &CGF, unsigned N, Address PrivateAddr, LValue SharedLVal,
1007  llvm::function_ref<bool(CodeGenFunction &)> DefaultInit) {
1008  assert(SharedAddresses.size() > N && "No variable was generated");
1009  auto *PrivateVD =
1010  cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
1011  auto *DRD = getReductionInit(ClausesData[N].ReductionOp);
1012  QualType PrivateType = PrivateVD->getType();
1013  PrivateAddr = CGF.Builder.CreateElementBitCast(
1014  PrivateAddr, CGF.ConvertTypeForMem(PrivateType));
1015  QualType SharedType = SharedAddresses[N].first.getType();
1016  SharedLVal = CGF.MakeAddrLValue(
1017  CGF.Builder.CreateElementBitCast(SharedLVal.getAddress(),
1018  CGF.ConvertTypeForMem(SharedType)),
1019  SharedType, SharedAddresses[N].first.getBaseInfo(),
1020  CGF.CGM.getTBAAInfoForSubobject(SharedAddresses[N].first, SharedType));
1021  if (CGF.getContext().getAsArrayType(PrivateVD->getType())) {
1022  emitAggregateInitialization(CGF, N, PrivateAddr, SharedLVal, DRD);
1023  } else if (DRD && (DRD->getInitializer() || !PrivateVD->hasInit())) {
1024  emitInitWithReductionInitializer(CGF, DRD, ClausesData[N].ReductionOp,
1025  PrivateAddr, SharedLVal.getAddress(),
1026  SharedLVal.getType());
1027  } else if (!DefaultInit(CGF) && PrivateVD->hasInit() &&
1028  !CGF.isTrivialInitializer(PrivateVD->getInit())) {
1029  CGF.EmitAnyExprToMem(PrivateVD->getInit(), PrivateAddr,
1030  PrivateVD->getType().getQualifiers(),
1031  /*IsInitializer=*/false);
1032  }
1033 }
1034 
1036  auto *PrivateVD =
1037  cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
1038  QualType PrivateType = PrivateVD->getType();
1039  QualType::DestructionKind DTorKind = PrivateType.isDestructedType();
1040  return DTorKind != QualType::DK_none;
1041 }
1042 
1044  Address PrivateAddr) {
1045  auto *PrivateVD =
1046  cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
1047  QualType PrivateType = PrivateVD->getType();
1048  QualType::DestructionKind DTorKind = PrivateType.isDestructedType();
1049  if (needCleanups(N)) {
1050  PrivateAddr = CGF.Builder.CreateElementBitCast(
1051  PrivateAddr, CGF.ConvertTypeForMem(PrivateType));
1052  CGF.pushDestroy(DTorKind, PrivateAddr, PrivateType);
1053  }
1054 }
1055 
1057  LValue BaseLV) {
1058  BaseTy = BaseTy.getNonReferenceType();
1059  while ((BaseTy->isPointerType() || BaseTy->isReferenceType()) &&
1060  !CGF.getContext().hasSameType(BaseTy, ElTy)) {
1061  if (auto *PtrTy = BaseTy->getAs<PointerType>())
1062  BaseLV = CGF.EmitLoadOfPointerLValue(BaseLV.getAddress(), PtrTy);
1063  else {
1064  LValue RefLVal = CGF.MakeAddrLValue(BaseLV.getAddress(), BaseTy);
1065  BaseLV = CGF.EmitLoadOfReferenceLValue(RefLVal);
1066  }
1067  BaseTy = BaseTy->getPointeeType();
1068  }
1069  return CGF.MakeAddrLValue(
1071  CGF.ConvertTypeForMem(ElTy)),
1072  BaseLV.getType(), BaseLV.getBaseInfo(),
1073  CGF.CGM.getTBAAInfoForSubobject(BaseLV, BaseLV.getType()));
1074 }
1075 
1077  llvm::Type *BaseLVType, CharUnits BaseLVAlignment,
1078  llvm::Value *Addr) {
1079  Address Tmp = Address::invalid();
1080  Address TopTmp = Address::invalid();
1081  Address MostTopTmp = Address::invalid();
1082  BaseTy = BaseTy.getNonReferenceType();
1083  while ((BaseTy->isPointerType() || BaseTy->isReferenceType()) &&
1084  !CGF.getContext().hasSameType(BaseTy, ElTy)) {
1085  Tmp = CGF.CreateMemTemp(BaseTy);
1086  if (TopTmp.isValid())
1087  CGF.Builder.CreateStore(Tmp.getPointer(), TopTmp);
1088  else
1089  MostTopTmp = Tmp;
1090  TopTmp = Tmp;
1091  BaseTy = BaseTy->getPointeeType();
1092  }
1093  llvm::Type *Ty = BaseLVType;
1094  if (Tmp.isValid())
1095  Ty = Tmp.getElementType();
1096  Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Addr, Ty);
1097  if (Tmp.isValid()) {
1098  CGF.Builder.CreateStore(Addr, Tmp);
1099  return MostTopTmp;
1100  }
1101  return Address(Addr, BaseLVAlignment);
1102 }
1103 
1105  Address PrivateAddr) {
1106  const DeclRefExpr *DE;
1107  const VarDecl *OrigVD = nullptr;
1108  if (auto *OASE = dyn_cast<OMPArraySectionExpr>(ClausesData[N].Ref)) {
1109  auto *Base = OASE->getBase()->IgnoreParenImpCasts();
1110  while (auto *TempOASE = dyn_cast<OMPArraySectionExpr>(Base))
1111  Base = TempOASE->getBase()->IgnoreParenImpCasts();
1112  while (auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
1113  Base = TempASE->getBase()->IgnoreParenImpCasts();
1114  DE = cast<DeclRefExpr>(Base);
1115  OrigVD = cast<VarDecl>(DE->getDecl());
1116  } else if (auto *ASE = dyn_cast<ArraySubscriptExpr>(ClausesData[N].Ref)) {
1117  auto *Base = ASE->getBase()->IgnoreParenImpCasts();
1118  while (auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
1119  Base = TempASE->getBase()->IgnoreParenImpCasts();
1120  DE = cast<DeclRefExpr>(Base);
1121  OrigVD = cast<VarDecl>(DE->getDecl());
1122  }
1123  if (OrigVD) {
1124  BaseDecls.emplace_back(OrigVD);
1125  auto OriginalBaseLValue = CGF.EmitLValue(DE);
1126  LValue BaseLValue =
1127  loadToBegin(CGF, OrigVD->getType(), SharedAddresses[N].first.getType(),
1128  OriginalBaseLValue);
1129  llvm::Value *Adjustment = CGF.Builder.CreatePtrDiff(
1130  BaseLValue.getPointer(), SharedAddresses[N].first.getPointer());
1131  llvm::Value *PrivatePointer =
1133  PrivateAddr.getPointer(),
1134  SharedAddresses[N].first.getAddress().getType());
1135  llvm::Value *Ptr = CGF.Builder.CreateGEP(PrivatePointer, Adjustment);
1136  return castToBase(CGF, OrigVD->getType(),
1137  SharedAddresses[N].first.getType(),
1138  OriginalBaseLValue.getAddress().getType(),
1139  OriginalBaseLValue.getAlignment(), Ptr);
1140  }
1141  BaseDecls.emplace_back(
1142  cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Ref)->getDecl()));
1143  return PrivateAddr;
1144 }
1145 
1147  auto *DRD = getReductionInit(ClausesData[N].ReductionOp);
1148  return DRD && DRD->getInitializer();
1149 }
1150 
1151 LValue CGOpenMPRegionInfo::getThreadIDVariableLValue(CodeGenFunction &CGF) {
1152  return CGF.EmitLoadOfPointerLValue(
1153  CGF.GetAddrOfLocalVar(getThreadIDVariable()),
1154  getThreadIDVariable()->getType()->castAs<PointerType>());
1155 }
1156 
1157 void CGOpenMPRegionInfo::EmitBody(CodeGenFunction &CGF, const Stmt * /*S*/) {
1158  if (!CGF.HaveInsertPoint())
1159  return;
1160  // 1.2.2 OpenMP Language Terminology
1161  // Structured block - An executable statement with a single entry at the
1162  // top and a single exit at the bottom.
1163  // The point of exit cannot be a branch out of the structured block.
1164  // longjmp() and throw() must not violate the entry/exit criteria.
1165  CGF.EHStack.pushTerminate();
1166  CodeGen(CGF);
1167  CGF.EHStack.popTerminate();
1168 }
1169 
1170 LValue CGOpenMPTaskOutlinedRegionInfo::getThreadIDVariableLValue(
1171  CodeGenFunction &CGF) {
1172  return CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(getThreadIDVariable()),
1173  getThreadIDVariable()->getType(),
1175 }
1176 
1178  : CGM(CGM), OffloadEntriesInfoManager(CGM) {
1179  IdentTy = llvm::StructType::create(
1180  "ident_t", CGM.Int32Ty /* reserved_1 */, CGM.Int32Ty /* flags */,
1181  CGM.Int32Ty /* reserved_2 */, CGM.Int32Ty /* reserved_3 */,
1182  CGM.Int8PtrTy /* psource */);
1183  KmpCriticalNameTy = llvm::ArrayType::get(CGM.Int32Ty, /*NumElements*/ 8);
1184 
1186 }
1187 
1188 void CGOpenMPRuntime::clear() {
1189  InternalVars.clear();
1190 }
1191 
1192 static llvm::Function *
1194  const Expr *CombinerInitializer, const VarDecl *In,
1195  const VarDecl *Out, bool IsCombiner) {
1196  // void .omp_combiner.(Ty *in, Ty *out);
1197  auto &C = CGM.getContext();
1198  QualType PtrTy = C.getPointerType(Ty).withRestrict();
1199  FunctionArgList Args;
1200  ImplicitParamDecl OmpOutParm(C, /*DC=*/nullptr, Out->getLocation(),
1201  /*Id=*/nullptr, PtrTy, ImplicitParamDecl::Other);
1202  ImplicitParamDecl OmpInParm(C, /*DC=*/nullptr, In->getLocation(),
1203  /*Id=*/nullptr, PtrTy, ImplicitParamDecl::Other);
1204  Args.push_back(&OmpOutParm);
1205  Args.push_back(&OmpInParm);
1206  auto &FnInfo =
1207  CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
1208  auto *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
1209  auto *Fn = llvm::Function::Create(
1211  IsCombiner ? ".omp_combiner." : ".omp_initializer.", &CGM.getModule());
1212  CGM.SetInternalFunctionAttributes(/*D=*/nullptr, Fn, FnInfo);
1213  Fn->removeFnAttr(llvm::Attribute::NoInline);
1214  Fn->removeFnAttr(llvm::Attribute::OptimizeNone);
1215  Fn->addFnAttr(llvm::Attribute::AlwaysInline);
1216  CodeGenFunction CGF(CGM);
1217  // Map "T omp_in;" variable to "*omp_in_parm" value in all expressions.
1218  // Map "T omp_out;" variable to "*omp_out_parm" value in all expressions.
1219  CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, In->getLocation(),
1220  Out->getLocation());
1222  Address AddrIn = CGF.GetAddrOfLocalVar(&OmpInParm);
1223  Scope.addPrivate(In, [&CGF, AddrIn, PtrTy]() -> Address {
1224  return CGF.EmitLoadOfPointerLValue(AddrIn, PtrTy->castAs<PointerType>())
1225  .getAddress();
1226  });
1227  Address AddrOut = CGF.GetAddrOfLocalVar(&OmpOutParm);
1228  Scope.addPrivate(Out, [&CGF, AddrOut, PtrTy]() -> Address {
1229  return CGF.EmitLoadOfPointerLValue(AddrOut, PtrTy->castAs<PointerType>())
1230  .getAddress();
1231  });
1232  (void)Scope.Privatize();
1233  if (!IsCombiner && Out->hasInit() &&
1234  !CGF.isTrivialInitializer(Out->getInit())) {
1235  CGF.EmitAnyExprToMem(Out->getInit(), CGF.GetAddrOfLocalVar(Out),
1236  Out->getType().getQualifiers(),
1237  /*IsInitializer=*/true);
1238  }
1239  if (CombinerInitializer)
1240  CGF.EmitIgnoredExpr(CombinerInitializer);
1241  Scope.ForceCleanup();
1242  CGF.FinishFunction();
1243  return Fn;
1244 }
1245 
1247  CodeGenFunction *CGF, const OMPDeclareReductionDecl *D) {
1248  if (UDRMap.count(D) > 0)
1249  return;
1250  auto &C = CGM.getContext();
1251  if (!In || !Out) {
1252  In = &C.Idents.get("omp_in");
1253  Out = &C.Idents.get("omp_out");
1254  }
1255  llvm::Function *Combiner = emitCombinerOrInitializer(
1256  CGM, D->getType(), D->getCombiner(), cast<VarDecl>(D->lookup(In).front()),
1257  cast<VarDecl>(D->lookup(Out).front()),
1258  /*IsCombiner=*/true);
1259  llvm::Function *Initializer = nullptr;
1260  if (auto *Init = D->getInitializer()) {
1261  if (!Priv || !Orig) {
1262  Priv = &C.Idents.get("omp_priv");
1263  Orig = &C.Idents.get("omp_orig");
1264  }
1265  Initializer = emitCombinerOrInitializer(
1266  CGM, D->getType(),
1268  : nullptr,
1269  cast<VarDecl>(D->lookup(Orig).front()),
1270  cast<VarDecl>(D->lookup(Priv).front()),
1271  /*IsCombiner=*/false);
1272  }
1273  UDRMap.insert(std::make_pair(D, std::make_pair(Combiner, Initializer)));
1274  if (CGF) {
1275  auto &Decls = FunctionUDRMap.FindAndConstruct(CGF->CurFn);
1276  Decls.second.push_back(D);
1277  }
1278 }
1279 
1280 std::pair<llvm::Function *, llvm::Function *>
1282  auto I = UDRMap.find(D);
1283  if (I != UDRMap.end())
1284  return I->second;
1285  emitUserDefinedReduction(/*CGF=*/nullptr, D);
1286  return UDRMap.lookup(D);
1287 }
1288 
1289 // Layout information for ident_t.
1291  return CGM.getPointerAlign();
1292 }
1294  assert((4 * CGM.getPointerSize()).isMultipleOf(CGM.getPointerAlign()));
1295  return CharUnits::fromQuantity(16) + CGM.getPointerSize();
1296 }
1298  // All the fields except the last are i32, so this works beautifully.
1299  return unsigned(Field) * CharUnits::fromQuantity(4);
1300 }
1302  IdentFieldIndex Field,
1303  const llvm::Twine &Name = "") {
1304  auto Offset = getOffsetOfIdentField(Field);
1305  return CGF.Builder.CreateStructGEP(Addr, Field, Offset, Name);
1306 }
1307 
1309  CodeGenModule &CGM, const OMPExecutableDirective &D, const CapturedStmt *CS,
1310  const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind,
1311  const StringRef OutlinedHelperName, const RegionCodeGenTy &CodeGen) {
1312  assert(ThreadIDVar->getType()->isPointerType() &&
1313  "thread id variable must be of type kmp_int32 *");
1314  CodeGenFunction CGF(CGM, true);
1315  bool HasCancel = false;
1316  if (auto *OPD = dyn_cast<OMPParallelDirective>(&D))
1317  HasCancel = OPD->hasCancel();
1318  else if (auto *OPSD = dyn_cast<OMPParallelSectionsDirective>(&D))
1319  HasCancel = OPSD->hasCancel();
1320  else if (auto *OPFD = dyn_cast<OMPParallelForDirective>(&D))
1321  HasCancel = OPFD->hasCancel();
1322  else if (auto *OPFD = dyn_cast<OMPTargetParallelForDirective>(&D))
1323  HasCancel = OPFD->hasCancel();
1324  else if (auto *OPFD = dyn_cast<OMPDistributeParallelForDirective>(&D))
1325  HasCancel = OPFD->hasCancel();
1326  else if (auto *OPFD = dyn_cast<OMPTeamsDistributeParallelForDirective>(&D))
1327  HasCancel = OPFD->hasCancel();
1328  else if (auto *OPFD =
1329  dyn_cast<OMPTargetTeamsDistributeParallelForDirective>(&D))
1330  HasCancel = OPFD->hasCancel();
1331  CGOpenMPOutlinedRegionInfo CGInfo(*CS, ThreadIDVar, CodeGen, InnermostKind,
1332  HasCancel, OutlinedHelperName);
1333  CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
1334  return CGF.GenerateOpenMPCapturedStmtFunction(*CS);
1335 }
1336 
1338  const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
1339  OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
1340  const CapturedStmt *CS = D.getCapturedStmt(OMPD_parallel);
1342  CGM, D, CS, ThreadIDVar, InnermostKind, getOutlinedHelperName(), CodeGen);
1343 }
1344 
1346  const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
1347  OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
1348  const CapturedStmt *CS = D.getCapturedStmt(OMPD_teams);
1350  CGM, D, CS, ThreadIDVar, InnermostKind, getOutlinedHelperName(), CodeGen);
1351 }
1352 
1354  const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
1355  const VarDecl *PartIDVar, const VarDecl *TaskTVar,
1356  OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
1357  bool Tied, unsigned &NumberOfParts) {
1358  auto &&UntiedCodeGen = [this, &D, TaskTVar](CodeGenFunction &CGF,
1359  PrePostActionTy &) {
1360  auto *ThreadID = getThreadID(CGF, D.getLocStart());
1361  auto *UpLoc = emitUpdateLocation(CGF, D.getLocStart());
1362  llvm::Value *TaskArgs[] = {
1363  UpLoc, ThreadID,
1364  CGF.EmitLoadOfPointerLValue(CGF.GetAddrOfLocalVar(TaskTVar),
1365  TaskTVar->getType()->castAs<PointerType>())
1366  .getPointer()};
1367  CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_omp_task), TaskArgs);
1368  };
1369  CGOpenMPTaskOutlinedRegionInfo::UntiedTaskActionTy Action(Tied, PartIDVar,
1370  UntiedCodeGen);
1371  CodeGen.setAction(Action);
1372  assert(!ThreadIDVar->getType()->isPointerType() &&
1373  "thread id variable must be of type kmp_int32 for tasks");
1374  const OpenMPDirectiveKind Region =
1375  isOpenMPTaskLoopDirective(D.getDirectiveKind()) ? OMPD_taskloop
1376  : OMPD_task;
1377  auto *CS = D.getCapturedStmt(Region);
1378  auto *TD = dyn_cast<OMPTaskDirective>(&D);
1379  CodeGenFunction CGF(CGM, true);
1380  CGOpenMPTaskOutlinedRegionInfo CGInfo(*CS, ThreadIDVar, CodeGen,
1381  InnermostKind,
1382  TD ? TD->hasCancel() : false, Action);
1383  CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
1384  auto *Res = CGF.GenerateCapturedStmtFunction(*CS);
1385  if (!Tied)
1386  NumberOfParts = Action.getNumberOfParts();
1387  return Res;
1388 }
1389 
1390 Address CGOpenMPRuntime::getOrCreateDefaultLocation(unsigned Flags) {
1391  CharUnits Align = getIdentAlign(CGM);
1392  llvm::Value *Entry = OpenMPDefaultLocMap.lookup(Flags);
1393  if (!Entry) {
1394  if (!DefaultOpenMPPSource) {
1395  // Initialize default location for psource field of ident_t structure of
1396  // all ident_t objects. Format is ";file;function;line;column;;".
1397  // Taken from
1398  // http://llvm.org/svn/llvm-project/openmp/trunk/runtime/src/kmp_str.c
1399  DefaultOpenMPPSource =
1400  CGM.GetAddrOfConstantCString(";unknown;unknown;0;0;;").getPointer();
1401  DefaultOpenMPPSource =
1402  llvm::ConstantExpr::getBitCast(DefaultOpenMPPSource, CGM.Int8PtrTy);
1403  }
1404 
1405  ConstantInitBuilder builder(CGM);
1406  auto fields = builder.beginStruct(IdentTy);
1407  fields.addInt(CGM.Int32Ty, 0);
1408  fields.addInt(CGM.Int32Ty, Flags);
1409  fields.addInt(CGM.Int32Ty, 0);
1410  fields.addInt(CGM.Int32Ty, 0);
1411  fields.add(DefaultOpenMPPSource);
1412  auto DefaultOpenMPLocation =
1413  fields.finishAndCreateGlobal("", Align, /*isConstant*/ true,
1414  llvm::GlobalValue::PrivateLinkage);
1415  DefaultOpenMPLocation->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1416 
1417  OpenMPDefaultLocMap[Flags] = Entry = DefaultOpenMPLocation;
1418  }
1419  return Address(Entry, Align);
1420 }
1421 
1423  SourceLocation Loc,
1424  unsigned Flags) {
1425  Flags |= OMP_IDENT_KMPC;
1426  // If no debug info is generated - return global default location.
1427  if (CGM.getCodeGenOpts().getDebugInfo() == codegenoptions::NoDebugInfo ||
1428  Loc.isInvalid())
1429  return getOrCreateDefaultLocation(Flags).getPointer();
1430 
1431  assert(CGF.CurFn && "No function in current CodeGenFunction.");
1432 
1433  Address LocValue = Address::invalid();
1434  auto I = OpenMPLocThreadIDMap.find(CGF.CurFn);
1435  if (I != OpenMPLocThreadIDMap.end())
1436  LocValue = Address(I->second.DebugLoc, getIdentAlign(CGF.CGM));
1437 
1438  // OpenMPLocThreadIDMap may have null DebugLoc and non-null ThreadID, if
1439  // GetOpenMPThreadID was called before this routine.
1440  if (!LocValue.isValid()) {
1441  // Generate "ident_t .kmpc_loc.addr;"
1442  Address AI = CGF.CreateTempAlloca(IdentTy, getIdentAlign(CGF.CGM),
1443  ".kmpc_loc.addr");
1444  auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
1445  Elem.second.DebugLoc = AI.getPointer();
1446  LocValue = AI;
1447 
1448  CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
1449  CGF.Builder.SetInsertPoint(CGF.AllocaInsertPt);
1450  CGF.Builder.CreateMemCpy(LocValue, getOrCreateDefaultLocation(Flags),
1451  CGM.getSize(getIdentSize(CGF.CGM)));
1452  }
1453 
1454  // char **psource = &.kmpc_loc_<flags>.addr.psource;
1455  Address PSource = createIdentFieldGEP(CGF, LocValue, IdentField_PSource);
1456 
1457  auto OMPDebugLoc = OpenMPDebugLocMap.lookup(Loc.getRawEncoding());
1458  if (OMPDebugLoc == nullptr) {
1459  SmallString<128> Buffer2;
1460  llvm::raw_svector_ostream OS2(Buffer2);
1461  // Build debug location
1463  OS2 << ";" << PLoc.getFilename() << ";";
1464  if (const FunctionDecl *FD =
1465  dyn_cast_or_null<FunctionDecl>(CGF.CurFuncDecl)) {
1466  OS2 << FD->getQualifiedNameAsString();
1467  }
1468  OS2 << ";" << PLoc.getLine() << ";" << PLoc.getColumn() << ";;";
1469  OMPDebugLoc = CGF.Builder.CreateGlobalStringPtr(OS2.str());
1470  OpenMPDebugLocMap[Loc.getRawEncoding()] = OMPDebugLoc;
1471  }
1472  // *psource = ";<File>;<Function>;<Line>;<Column>;;";
1473  CGF.Builder.CreateStore(OMPDebugLoc, PSource);
1474 
1475  // Our callers always pass this to a runtime function, so for
1476  // convenience, go ahead and return a naked pointer.
1477  return LocValue.getPointer();
1478 }
1479 
1481  SourceLocation Loc) {
1482  assert(CGF.CurFn && "No function in current CodeGenFunction.");
1483 
1484  llvm::Value *ThreadID = nullptr;
1485  // Check whether we've already cached a load of the thread id in this
1486  // function.
1487  auto I = OpenMPLocThreadIDMap.find(CGF.CurFn);
1488  if (I != OpenMPLocThreadIDMap.end()) {
1489  ThreadID = I->second.ThreadID;
1490  if (ThreadID != nullptr)
1491  return ThreadID;
1492  }
1493  // If exceptions are enabled, do not use parameter to avoid possible crash.
1494  if (!CGF.EHStack.requiresLandingPad() || !CGF.getLangOpts().Exceptions ||
1495  !CGF.getLangOpts().CXXExceptions ||
1496  CGF.Builder.GetInsertBlock() == CGF.AllocaInsertPt->getParent()) {
1497  if (auto *OMPRegionInfo =
1498  dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
1499  if (OMPRegionInfo->getThreadIDVariable()) {
1500  // Check if this an outlined function with thread id passed as argument.
1501  auto LVal = OMPRegionInfo->getThreadIDVariableLValue(CGF);
1502  ThreadID = CGF.EmitLoadOfScalar(LVal, Loc);
1503  // If value loaded in entry block, cache it and use it everywhere in
1504  // function.
1505  if (CGF.Builder.GetInsertBlock() == CGF.AllocaInsertPt->getParent()) {
1506  auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
1507  Elem.second.ThreadID = ThreadID;
1508  }
1509  return ThreadID;
1510  }
1511  }
1512  }
1513 
1514  // This is not an outlined function region - need to call __kmpc_int32
1515  // kmpc_global_thread_num(ident_t *loc).
1516  // Generate thread id value and cache this value for use across the
1517  // function.
1518  CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
1519  CGF.Builder.SetInsertPoint(CGF.AllocaInsertPt);
1520  auto *Call = CGF.Builder.CreateCall(
1522  emitUpdateLocation(CGF, Loc));
1523  Call->setCallingConv(CGF.getRuntimeCC());
1524  auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
1525  Elem.second.ThreadID = Call;
1526  return Call;
1527 }
1528 
1530  assert(CGF.CurFn && "No function in current CodeGenFunction.");
1531  if (OpenMPLocThreadIDMap.count(CGF.CurFn))
1532  OpenMPLocThreadIDMap.erase(CGF.CurFn);
1533  if (FunctionUDRMap.count(CGF.CurFn) > 0) {
1534  for(auto *D : FunctionUDRMap[CGF.CurFn]) {
1535  UDRMap.erase(D);
1536  }
1537  FunctionUDRMap.erase(CGF.CurFn);
1538  }
1539 }
1540 
1542  if (!IdentTy) {
1543  }
1544  return llvm::PointerType::getUnqual(IdentTy);
1545 }
1546 
1548  if (!Kmpc_MicroTy) {
1549  // Build void (*kmpc_micro)(kmp_int32 *global_tid, kmp_int32 *bound_tid,...)
1550  llvm::Type *MicroParams[] = {llvm::PointerType::getUnqual(CGM.Int32Ty),
1551  llvm::PointerType::getUnqual(CGM.Int32Ty)};
1552  Kmpc_MicroTy = llvm::FunctionType::get(CGM.VoidTy, MicroParams, true);
1553  }
1554  return llvm::PointerType::getUnqual(Kmpc_MicroTy);
1555 }
1556 
1557 llvm::Constant *
1559  llvm::Constant *RTLFn = nullptr;
1560  switch (static_cast<OpenMPRTLFunction>(Function)) {
1561  case OMPRTL__kmpc_fork_call: {
1562  // Build void __kmpc_fork_call(ident_t *loc, kmp_int32 argc, kmpc_micro
1563  // microtask, ...);
1564  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
1566  llvm::FunctionType *FnTy =
1567  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ true);
1568  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_fork_call");
1569  break;
1570  }
1572  // Build kmp_int32 __kmpc_global_thread_num(ident_t *loc);
1573  llvm::Type *TypeParams[] = {getIdentTyPointerTy()};
1574  llvm::FunctionType *FnTy =
1575  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
1576  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_global_thread_num");
1577  break;
1578  }
1580  // Build void *__kmpc_threadprivate_cached(ident_t *loc,
1581  // kmp_int32 global_tid, void *data, size_t size, void ***cache);
1582  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
1584  CGM.VoidPtrTy->getPointerTo()->getPointerTo()};
1585  llvm::FunctionType *FnTy =
1586  llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg*/ false);
1587  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_threadprivate_cached");
1588  break;
1589  }
1590  case OMPRTL__kmpc_critical: {
1591  // Build void __kmpc_critical(ident_t *loc, kmp_int32 global_tid,
1592  // kmp_critical_name *crit);
1593  llvm::Type *TypeParams[] = {
1595  llvm::PointerType::getUnqual(KmpCriticalNameTy)};
1596  llvm::FunctionType *FnTy =
1597  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1598  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_critical");
1599  break;
1600  }
1602  // Build void __kmpc_critical_with_hint(ident_t *loc, kmp_int32 global_tid,
1603  // kmp_critical_name *crit, uintptr_t hint);
1604  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
1605  llvm::PointerType::getUnqual(KmpCriticalNameTy),
1606  CGM.IntPtrTy};
1607  llvm::FunctionType *FnTy =
1608  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1609  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_critical_with_hint");
1610  break;
1611  }
1613  // Build void __kmpc_threadprivate_register(ident_t *, void *data,
1614  // kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor);
1615  // typedef void *(*kmpc_ctor)(void *);
1616  auto KmpcCtorTy =
1617  llvm::FunctionType::get(CGM.VoidPtrTy, CGM.VoidPtrTy,
1618  /*isVarArg*/ false)->getPointerTo();
1619  // typedef void *(*kmpc_cctor)(void *, void *);
1620  llvm::Type *KmpcCopyCtorTyArgs[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
1621  auto KmpcCopyCtorTy =
1622  llvm::FunctionType::get(CGM.VoidPtrTy, KmpcCopyCtorTyArgs,
1623  /*isVarArg*/ false)->getPointerTo();
1624  // typedef void (*kmpc_dtor)(void *);
1625  auto KmpcDtorTy =
1626  llvm::FunctionType::get(CGM.VoidTy, CGM.VoidPtrTy, /*isVarArg*/ false)
1627  ->getPointerTo();
1628  llvm::Type *FnTyArgs[] = {getIdentTyPointerTy(), CGM.VoidPtrTy, KmpcCtorTy,
1629  KmpcCopyCtorTy, KmpcDtorTy};
1630  auto FnTy = llvm::FunctionType::get(CGM.VoidTy, FnTyArgs,
1631  /*isVarArg*/ false);
1632  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_threadprivate_register");
1633  break;
1634  }
1636  // Build void __kmpc_end_critical(ident_t *loc, kmp_int32 global_tid,
1637  // kmp_critical_name *crit);
1638  llvm::Type *TypeParams[] = {
1640  llvm::PointerType::getUnqual(KmpCriticalNameTy)};
1641  llvm::FunctionType *FnTy =
1642  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1643  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_critical");
1644  break;
1645  }
1647  // Build kmp_int32 __kmpc_cancel_barrier(ident_t *loc, kmp_int32
1648  // global_tid);
1649  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1650  llvm::FunctionType *FnTy =
1651  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
1652  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name*/ "__kmpc_cancel_barrier");
1653  break;
1654  }
1655  case OMPRTL__kmpc_barrier: {
1656  // Build void __kmpc_barrier(ident_t *loc, kmp_int32 global_tid);
1657  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1658  llvm::FunctionType *FnTy =
1659  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1660  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name*/ "__kmpc_barrier");
1661  break;
1662  }
1664  // Build void __kmpc_for_static_fini(ident_t *loc, kmp_int32 global_tid);
1665  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1666  llvm::FunctionType *FnTy =
1667  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1668  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_for_static_fini");
1669  break;
1670  }
1672  // Build void __kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid,
1673  // kmp_int32 num_threads)
1674  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
1675  CGM.Int32Ty};
1676  llvm::FunctionType *FnTy =
1677  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1678  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_push_num_threads");
1679  break;
1680  }
1682  // Build void __kmpc_serialized_parallel(ident_t *loc, kmp_int32
1683  // global_tid);
1684  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1685  llvm::FunctionType *FnTy =
1686  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1687  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_serialized_parallel");
1688  break;
1689  }
1691  // Build void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32
1692  // global_tid);
1693  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1694  llvm::FunctionType *FnTy =
1695  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1696  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_serialized_parallel");
1697  break;
1698  }
1699  case OMPRTL__kmpc_flush: {
1700  // Build void __kmpc_flush(ident_t *loc);
1701  llvm::Type *TypeParams[] = {getIdentTyPointerTy()};
1702  llvm::FunctionType *FnTy =
1703  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1704  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_flush");
1705  break;
1706  }
1707  case OMPRTL__kmpc_master: {
1708  // Build kmp_int32 __kmpc_master(ident_t *loc, kmp_int32 global_tid);
1709  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1710  llvm::FunctionType *FnTy =
1711  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1712  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_master");
1713  break;
1714  }
1715  case OMPRTL__kmpc_end_master: {
1716  // Build void __kmpc_end_master(ident_t *loc, kmp_int32 global_tid);
1717  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1718  llvm::FunctionType *FnTy =
1719  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1720  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_master");
1721  break;
1722  }
1724  // Build kmp_int32 __kmpc_omp_taskyield(ident_t *, kmp_int32 global_tid,
1725  // int end_part);
1726  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.IntTy};
1727  llvm::FunctionType *FnTy =
1728  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1729  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_taskyield");
1730  break;
1731  }
1732  case OMPRTL__kmpc_single: {
1733  // Build kmp_int32 __kmpc_single(ident_t *loc, kmp_int32 global_tid);
1734  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1735  llvm::FunctionType *FnTy =
1736  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1737  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_single");
1738  break;
1739  }
1740  case OMPRTL__kmpc_end_single: {
1741  // Build void __kmpc_end_single(ident_t *loc, kmp_int32 global_tid);
1742  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1743  llvm::FunctionType *FnTy =
1744  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1745  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_single");
1746  break;
1747  }
1749  // Build kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 gtid,
1750  // kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
1751  // kmp_routine_entry_t *task_entry);
1752  assert(KmpRoutineEntryPtrTy != nullptr &&
1753  "Type kmp_routine_entry_t must be created.");
1754  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.Int32Ty,
1755  CGM.SizeTy, CGM.SizeTy, KmpRoutineEntryPtrTy};
1756  // Return void * and then cast to particular kmp_task_t type.
1757  llvm::FunctionType *FnTy =
1758  llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
1759  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task_alloc");
1760  break;
1761  }
1762  case OMPRTL__kmpc_omp_task: {
1763  // Build kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t
1764  // *new_task);
1765  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
1766  CGM.VoidPtrTy};
1767  llvm::FunctionType *FnTy =
1768  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1769  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task");
1770  break;
1771  }
1772  case OMPRTL__kmpc_copyprivate: {
1773  // Build void __kmpc_copyprivate(ident_t *loc, kmp_int32 global_tid,
1774  // size_t cpy_size, void *cpy_data, void(*cpy_func)(void *, void *),
1775  // kmp_int32 didit);
1776  llvm::Type *CpyTypeParams[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
1777  auto *CpyFnTy =
1778  llvm::FunctionType::get(CGM.VoidTy, CpyTypeParams, /*isVarArg=*/false);
1779  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.SizeTy,
1780  CGM.VoidPtrTy, CpyFnTy->getPointerTo(),
1781  CGM.Int32Ty};
1782  llvm::FunctionType *FnTy =
1783  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1784  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_copyprivate");
1785  break;
1786  }
1787  case OMPRTL__kmpc_reduce: {
1788  // Build kmp_int32 __kmpc_reduce(ident_t *loc, kmp_int32 global_tid,
1789  // kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void
1790  // (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck);
1791  llvm::Type *ReduceTypeParams[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
1792  auto *ReduceFnTy = llvm::FunctionType::get(CGM.VoidTy, ReduceTypeParams,
1793  /*isVarArg=*/false);
1794  llvm::Type *TypeParams[] = {
1796  CGM.VoidPtrTy, ReduceFnTy->getPointerTo(),
1797  llvm::PointerType::getUnqual(KmpCriticalNameTy)};
1798  llvm::FunctionType *FnTy =
1799  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1800  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_reduce");
1801  break;
1802  }
1804  // Build kmp_int32 __kmpc_reduce_nowait(ident_t *loc, kmp_int32
1805  // global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data,
1806  // void (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name
1807  // *lck);
1808  llvm::Type *ReduceTypeParams[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
1809  auto *ReduceFnTy = llvm::FunctionType::get(CGM.VoidTy, ReduceTypeParams,
1810  /*isVarArg=*/false);
1811  llvm::Type *TypeParams[] = {
1813  CGM.VoidPtrTy, ReduceFnTy->getPointerTo(),
1814  llvm::PointerType::getUnqual(KmpCriticalNameTy)};
1815  llvm::FunctionType *FnTy =
1816  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1817  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_reduce_nowait");
1818  break;
1819  }
1820  case OMPRTL__kmpc_end_reduce: {
1821  // Build void __kmpc_end_reduce(ident_t *loc, kmp_int32 global_tid,
1822  // kmp_critical_name *lck);
1823  llvm::Type *TypeParams[] = {
1825  llvm::PointerType::getUnqual(KmpCriticalNameTy)};
1826  llvm::FunctionType *FnTy =
1827  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1828  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_reduce");
1829  break;
1830  }
1832  // Build __kmpc_end_reduce_nowait(ident_t *loc, kmp_int32 global_tid,
1833  // kmp_critical_name *lck);
1834  llvm::Type *TypeParams[] = {
1836  llvm::PointerType::getUnqual(KmpCriticalNameTy)};
1837  llvm::FunctionType *FnTy =
1838  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1839  RTLFn =
1840  CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_reduce_nowait");
1841  break;
1842  }
1844  // Build void __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t
1845  // *new_task);
1846  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
1847  CGM.VoidPtrTy};
1848  llvm::FunctionType *FnTy =
1849  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1850  RTLFn =
1851  CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task_begin_if0");
1852  break;
1853  }
1855  // Build void __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t
1856  // *new_task);
1857  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
1858  CGM.VoidPtrTy};
1859  llvm::FunctionType *FnTy =
1860  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1861  RTLFn = CGM.CreateRuntimeFunction(FnTy,
1862  /*Name=*/"__kmpc_omp_task_complete_if0");
1863  break;
1864  }
1865  case OMPRTL__kmpc_ordered: {
1866  // Build void __kmpc_ordered(ident_t *loc, kmp_int32 global_tid);
1867  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1868  llvm::FunctionType *FnTy =
1869  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1870  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_ordered");
1871  break;
1872  }
1873  case OMPRTL__kmpc_end_ordered: {
1874  // Build void __kmpc_end_ordered(ident_t *loc, kmp_int32 global_tid);
1875  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1876  llvm::FunctionType *FnTy =
1877  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1878  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_ordered");
1879  break;
1880  }
1882  // Build kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32 global_tid);
1883  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1884  llvm::FunctionType *FnTy =
1885  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1886  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_omp_taskwait");
1887  break;
1888  }
1889  case OMPRTL__kmpc_taskgroup: {
1890  // Build void __kmpc_taskgroup(ident_t *loc, kmp_int32 global_tid);
1891  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1892  llvm::FunctionType *FnTy =
1893  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1894  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_taskgroup");
1895  break;
1896  }
1898  // Build void __kmpc_end_taskgroup(ident_t *loc, kmp_int32 global_tid);
1899  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1900  llvm::FunctionType *FnTy =
1901  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1902  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_taskgroup");
1903  break;
1904  }
1906  // Build void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 global_tid,
1907  // int proc_bind)
1908  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.IntTy};
1909  llvm::FunctionType *FnTy =
1910  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1911  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_push_proc_bind");
1912  break;
1913  }
1915  // Build kmp_int32 __kmpc_omp_task_with_deps(ident_t *, kmp_int32 gtid,
1916  // kmp_task_t *new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list,
1917  // kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list);
1918  llvm::Type *TypeParams[] = {
1921  llvm::FunctionType *FnTy =
1922  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1923  RTLFn =
1924  CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task_with_deps");
1925  break;
1926  }
1928  // Build void __kmpc_omp_wait_deps(ident_t *, kmp_int32 gtid,
1929  // kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias,
1930  // kmp_depend_info_t *noalias_dep_list);
1931  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
1934  llvm::FunctionType *FnTy =
1935  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1936  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_wait_deps");
1937  break;
1938  }
1940  // Build kmp_int32 __kmpc_cancellationpoint(ident_t *loc, kmp_int32
1941  // global_tid, kmp_int32 cncl_kind)
1942  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.IntTy};
1943  llvm::FunctionType *FnTy =
1944  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
1945  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_cancellationpoint");
1946  break;
1947  }
1948  case OMPRTL__kmpc_cancel: {
1949  // Build kmp_int32 __kmpc_cancel(ident_t *loc, kmp_int32 global_tid,
1950  // kmp_int32 cncl_kind)
1951  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.IntTy};
1952  llvm::FunctionType *FnTy =
1953  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
1954  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_cancel");
1955  break;
1956  }
1958  // Build void kmpc_push_num_teams (ident_t loc, kmp_int32 global_tid,
1959  // kmp_int32 num_teams, kmp_int32 num_threads)
1960  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.Int32Ty,
1961  CGM.Int32Ty};
1962  llvm::FunctionType *FnTy =
1963  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
1964  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_push_num_teams");
1965  break;
1966  }
1967  case OMPRTL__kmpc_fork_teams: {
1968  // Build void __kmpc_fork_teams(ident_t *loc, kmp_int32 argc, kmpc_micro
1969  // microtask, ...);
1970  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
1972  llvm::FunctionType *FnTy =
1973  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ true);
1974  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_fork_teams");
1975  break;
1976  }
1977  case OMPRTL__kmpc_taskloop: {
1978  // Build void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t *task, int
1979  // if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int nogroup, int
1980  // sched, kmp_uint64 grainsize, void *task_dup);
1981  llvm::Type *TypeParams[] = {getIdentTyPointerTy(),
1982  CGM.IntTy,
1983  CGM.VoidPtrTy,
1984  CGM.IntTy,
1985  CGM.Int64Ty->getPointerTo(),
1986  CGM.Int64Ty->getPointerTo(),
1987  CGM.Int64Ty,
1988  CGM.IntTy,
1989  CGM.IntTy,
1990  CGM.Int64Ty,
1991  CGM.VoidPtrTy};
1992  llvm::FunctionType *FnTy =
1993  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1994  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_taskloop");
1995  break;
1996  }
1998  // Build void __kmpc_doacross_init(ident_t *loc, kmp_int32 gtid, kmp_int32
1999  // num_dims, struct kmp_dim *dims);
2000  llvm::Type *TypeParams[] = {getIdentTyPointerTy(),
2001  CGM.Int32Ty,
2002  CGM.Int32Ty,
2003  CGM.VoidPtrTy};
2004  llvm::FunctionType *FnTy =
2005  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2006  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_doacross_init");
2007  break;
2008  }
2010  // Build void __kmpc_doacross_fini(ident_t *loc, kmp_int32 gtid);
2011  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
2012  llvm::FunctionType *FnTy =
2013  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2014  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_doacross_fini");
2015  break;
2016  }
2018  // Build void __kmpc_doacross_post(ident_t *loc, kmp_int32 gtid, kmp_int64
2019  // *vec);
2020  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
2021  CGM.Int64Ty->getPointerTo()};
2022  llvm::FunctionType *FnTy =
2023  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2024  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_doacross_post");
2025  break;
2026  }
2028  // Build void __kmpc_doacross_wait(ident_t *loc, kmp_int32 gtid, kmp_int64
2029  // *vec);
2030  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
2031  CGM.Int64Ty->getPointerTo()};
2032  llvm::FunctionType *FnTy =
2033  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2034  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_doacross_wait");
2035  break;
2036  }
2038  // Build void *__kmpc_task_reduction_init(int gtid, int num_data, void
2039  // *data);
2040  llvm::Type *TypeParams[] = {CGM.IntTy, CGM.IntTy, CGM.VoidPtrTy};
2041  llvm::FunctionType *FnTy =
2042  llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
2043  RTLFn =
2044  CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_task_reduction_init");
2045  break;
2046  }
2048  // Build void *__kmpc_task_reduction_get_th_data(int gtid, void *tg, void
2049  // *d);
2050  llvm::Type *TypeParams[] = {CGM.IntTy, CGM.VoidPtrTy, CGM.VoidPtrTy};
2051  llvm::FunctionType *FnTy =
2052  llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
2053  RTLFn = CGM.CreateRuntimeFunction(
2054  FnTy, /*Name=*/"__kmpc_task_reduction_get_th_data");
2055  break;
2056  }
2057  case OMPRTL__tgt_target: {
2058  // Build int32_t __tgt_target(int64_t device_id, void *host_ptr, int32_t
2059  // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
2060  // *arg_types);
2061  llvm::Type *TypeParams[] = {CGM.Int64Ty,
2062  CGM.VoidPtrTy,
2063  CGM.Int32Ty,
2064  CGM.VoidPtrPtrTy,
2065  CGM.VoidPtrPtrTy,
2066  CGM.SizeTy->getPointerTo(),
2067  CGM.Int64Ty->getPointerTo()};
2068  llvm::FunctionType *FnTy =
2069  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2070  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target");
2071  break;
2072  }
2074  // Build int32_t __tgt_target_nowait(int64_t device_id, void *host_ptr,
2075  // int32_t arg_num, void** args_base, void **args, size_t *arg_sizes,
2076  // int64_t *arg_types);
2077  llvm::Type *TypeParams[] = {CGM.Int64Ty,
2078  CGM.VoidPtrTy,
2079  CGM.Int32Ty,
2080  CGM.VoidPtrPtrTy,
2081  CGM.VoidPtrPtrTy,
2082  CGM.SizeTy->getPointerTo(),
2083  CGM.Int64Ty->getPointerTo()};
2084  llvm::FunctionType *FnTy =
2085  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2086  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_nowait");
2087  break;
2088  }
2089  case OMPRTL__tgt_target_teams: {
2090  // Build int32_t __tgt_target_teams(int64_t device_id, void *host_ptr,
2091  // int32_t arg_num, void** args_base, void **args, size_t *arg_sizes,
2092  // int64_t *arg_types, int32_t num_teams, int32_t thread_limit);
2093  llvm::Type *TypeParams[] = {CGM.Int64Ty,
2094  CGM.VoidPtrTy,
2095  CGM.Int32Ty,
2096  CGM.VoidPtrPtrTy,
2097  CGM.VoidPtrPtrTy,
2098  CGM.SizeTy->getPointerTo(),
2099  CGM.Int64Ty->getPointerTo(),
2100  CGM.Int32Ty,
2101  CGM.Int32Ty};
2102  llvm::FunctionType *FnTy =
2103  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2104  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_teams");
2105  break;
2106  }
2108  // Build int32_t __tgt_target_teams_nowait(int64_t device_id, void
2109  // *host_ptr, int32_t arg_num, void** args_base, void **args, size_t
2110  // *arg_sizes, int64_t *arg_types, int32_t num_teams, int32_t thread_limit);
2111  llvm::Type *TypeParams[] = {CGM.Int64Ty,
2112  CGM.VoidPtrTy,
2113  CGM.Int32Ty,
2114  CGM.VoidPtrPtrTy,
2115  CGM.VoidPtrPtrTy,
2116  CGM.SizeTy->getPointerTo(),
2117  CGM.Int64Ty->getPointerTo(),
2118  CGM.Int32Ty,
2119  CGM.Int32Ty};
2120  llvm::FunctionType *FnTy =
2121  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2122  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_teams_nowait");
2123  break;
2124  }
2125  case OMPRTL__tgt_register_lib: {
2126  // Build void __tgt_register_lib(__tgt_bin_desc *desc);
2127  QualType ParamTy =
2129  llvm::Type *TypeParams[] = {CGM.getTypes().ConvertTypeForMem(ParamTy)};
2130  llvm::FunctionType *FnTy =
2131  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2132  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_register_lib");
2133  break;
2134  }
2136  // Build void __tgt_unregister_lib(__tgt_bin_desc *desc);
2137  QualType ParamTy =
2139  llvm::Type *TypeParams[] = {CGM.getTypes().ConvertTypeForMem(ParamTy)};
2140  llvm::FunctionType *FnTy =
2141  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2142  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_unregister_lib");
2143  break;
2144  }
2146  // Build void __tgt_target_data_begin(int64_t device_id, int32_t arg_num,
2147  // void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types);
2148  llvm::Type *TypeParams[] = {CGM.Int64Ty,
2149  CGM.Int32Ty,
2150  CGM.VoidPtrPtrTy,
2151  CGM.VoidPtrPtrTy,
2152  CGM.SizeTy->getPointerTo(),
2153  CGM.Int64Ty->getPointerTo()};
2154  llvm::FunctionType *FnTy =
2155  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
2156  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_begin");
2157  break;
2158  }
2160  // Build void __tgt_target_data_begin_nowait(int64_t device_id, int32_t
2161  // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
2162  // *arg_types);
2163  llvm::Type *TypeParams[] = {CGM.Int64Ty,
2164  CGM.Int32Ty,
2165  CGM.VoidPtrPtrTy,
2166  CGM.VoidPtrPtrTy,
2167  CGM.SizeTy->getPointerTo(),
2168  CGM.Int64Ty->getPointerTo()};
2169  auto *FnTy =
2170  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2171  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_begin_nowait");
2172  break;
2173  }
2175  // Build void __tgt_target_data_end(int64_t device_id, int32_t arg_num,
2176  // void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types);
2177  llvm::Type *TypeParams[] = {CGM.Int64Ty,
2178  CGM.Int32Ty,
2179  CGM.VoidPtrPtrTy,
2180  CGM.VoidPtrPtrTy,
2181  CGM.SizeTy->getPointerTo(),
2182  CGM.Int64Ty->getPointerTo()};
2183  llvm::FunctionType *FnTy =
2184  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
2185  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_end");
2186  break;
2187  }
2189  // Build void __tgt_target_data_end_nowait(int64_t device_id, int32_t
2190  // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
2191  // *arg_types);
2192  llvm::Type *TypeParams[] = {CGM.Int64Ty,
2193  CGM.Int32Ty,
2194  CGM.VoidPtrPtrTy,
2195  CGM.VoidPtrPtrTy,
2196  CGM.SizeTy->getPointerTo(),
2197  CGM.Int64Ty->getPointerTo()};
2198  auto *FnTy =
2199  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2200  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_end_nowait");
2201  break;
2202  }
2204  // Build void __tgt_target_data_update(int64_t device_id, int32_t arg_num,
2205  // void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types);
2206  llvm::Type *TypeParams[] = {CGM.Int64Ty,
2207  CGM.Int32Ty,
2208  CGM.VoidPtrPtrTy,
2209  CGM.VoidPtrPtrTy,
2210  CGM.SizeTy->getPointerTo(),
2211  CGM.Int64Ty->getPointerTo()};
2212  llvm::FunctionType *FnTy =
2213  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
2214  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_update");
2215  break;
2216  }
2218  // Build void __tgt_target_data_update_nowait(int64_t device_id, int32_t
2219  // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
2220  // *arg_types);
2221  llvm::Type *TypeParams[] = {CGM.Int64Ty,
2222  CGM.Int32Ty,
2223  CGM.VoidPtrPtrTy,
2224  CGM.VoidPtrPtrTy,
2225  CGM.SizeTy->getPointerTo(),
2226  CGM.Int64Ty->getPointerTo()};
2227  auto *FnTy =
2228  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2229  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_update_nowait");
2230  break;
2231  }
2232  }
2233  assert(RTLFn && "Unable to find OpenMP runtime function");
2234  return RTLFn;
2235 }
2236 
2237 llvm::Constant *CGOpenMPRuntime::createForStaticInitFunction(unsigned IVSize,
2238  bool IVSigned) {
2239  assert((IVSize == 32 || IVSize == 64) &&
2240  "IV size is not compatible with the omp runtime");
2241  auto Name = IVSize == 32 ? (IVSigned ? "__kmpc_for_static_init_4"
2242  : "__kmpc_for_static_init_4u")
2243  : (IVSigned ? "__kmpc_for_static_init_8"
2244  : "__kmpc_for_static_init_8u");
2245  auto ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty;
2246  auto PtrTy = llvm::PointerType::getUnqual(ITy);
2247  llvm::Type *TypeParams[] = {
2248  getIdentTyPointerTy(), // loc
2249  CGM.Int32Ty, // tid
2250  CGM.Int32Ty, // schedtype
2251  llvm::PointerType::getUnqual(CGM.Int32Ty), // p_lastiter
2252  PtrTy, // p_lower
2253  PtrTy, // p_upper
2254  PtrTy, // p_stride
2255  ITy, // incr
2256  ITy // chunk
2257  };
2258  llvm::FunctionType *FnTy =
2259  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
2260  return CGM.CreateRuntimeFunction(FnTy, Name);
2261 }
2262 
2263 llvm::Constant *CGOpenMPRuntime::createDispatchInitFunction(unsigned IVSize,
2264  bool IVSigned) {
2265  assert((IVSize == 32 || IVSize == 64) &&
2266  "IV size is not compatible with the omp runtime");
2267  auto Name =
2268  IVSize == 32
2269  ? (IVSigned ? "__kmpc_dispatch_init_4" : "__kmpc_dispatch_init_4u")
2270  : (IVSigned ? "__kmpc_dispatch_init_8" : "__kmpc_dispatch_init_8u");
2271  auto ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty;
2272  llvm::Type *TypeParams[] = { getIdentTyPointerTy(), // loc
2273  CGM.Int32Ty, // tid
2274  CGM.Int32Ty, // schedtype
2275  ITy, // lower
2276  ITy, // upper
2277  ITy, // stride
2278  ITy // chunk
2279  };
2280  llvm::FunctionType *FnTy =
2281  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
2282  return CGM.CreateRuntimeFunction(FnTy, Name);
2283 }
2284 
2285 llvm::Constant *CGOpenMPRuntime::createDispatchFiniFunction(unsigned IVSize,
2286  bool IVSigned) {
2287  assert((IVSize == 32 || IVSize == 64) &&
2288  "IV size is not compatible with the omp runtime");
2289  auto Name =
2290  IVSize == 32
2291  ? (IVSigned ? "__kmpc_dispatch_fini_4" : "__kmpc_dispatch_fini_4u")
2292  : (IVSigned ? "__kmpc_dispatch_fini_8" : "__kmpc_dispatch_fini_8u");
2293  llvm::Type *TypeParams[] = {
2294  getIdentTyPointerTy(), // loc
2295  CGM.Int32Ty, // tid
2296  };
2297  llvm::FunctionType *FnTy =
2298  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2299  return CGM.CreateRuntimeFunction(FnTy, Name);
2300 }
2301 
2302 llvm::Constant *CGOpenMPRuntime::createDispatchNextFunction(unsigned IVSize,
2303  bool IVSigned) {
2304  assert((IVSize == 32 || IVSize == 64) &&
2305  "IV size is not compatible with the omp runtime");
2306  auto Name =
2307  IVSize == 32
2308  ? (IVSigned ? "__kmpc_dispatch_next_4" : "__kmpc_dispatch_next_4u")
2309  : (IVSigned ? "__kmpc_dispatch_next_8" : "__kmpc_dispatch_next_8u");
2310  auto ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty;
2311  auto PtrTy = llvm::PointerType::getUnqual(ITy);
2312  llvm::Type *TypeParams[] = {
2313  getIdentTyPointerTy(), // loc
2314  CGM.Int32Ty, // tid
2315  llvm::PointerType::getUnqual(CGM.Int32Ty), // p_lastiter
2316  PtrTy, // p_lower
2317  PtrTy, // p_upper
2318  PtrTy // p_stride
2319  };
2320  llvm::FunctionType *FnTy =
2321  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2322  return CGM.CreateRuntimeFunction(FnTy, Name);
2323 }
2324 
2325 llvm::Constant *
2327  assert(!CGM.getLangOpts().OpenMPUseTLS ||
2329  // Lookup the entry, lazily creating it if necessary.
2331  Twine(CGM.getMangledName(VD)) + ".cache.");
2332 }
2333 
2335  const VarDecl *VD,
2336  Address VDAddr,
2337  SourceLocation Loc) {
2338  if (CGM.getLangOpts().OpenMPUseTLS &&
2340  return VDAddr;
2341 
2342  auto VarTy = VDAddr.getElementType();
2343  llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
2344  CGF.Builder.CreatePointerCast(VDAddr.getPointer(),
2345  CGM.Int8PtrTy),
2348  return Address(CGF.EmitRuntimeCall(
2350  VDAddr.getAlignment());
2351 }
2352 
2354  CodeGenFunction &CGF, Address VDAddr, llvm::Value *Ctor,
2355  llvm::Value *CopyCtor, llvm::Value *Dtor, SourceLocation Loc) {
2356  // Call kmp_int32 __kmpc_global_thread_num(&loc) to init OpenMP runtime
2357  // library.
2358  auto OMPLoc = emitUpdateLocation(CGF, Loc);
2360  OMPLoc);
2361  // Call __kmpc_threadprivate_register(&loc, &var, ctor, cctor/*NULL*/, dtor)
2362  // to register constructor/destructor for variable.
2363  llvm::Value *Args[] = {OMPLoc,
2364  CGF.Builder.CreatePointerCast(VDAddr.getPointer(),
2365  CGM.VoidPtrTy),
2366  Ctor, CopyCtor, Dtor};
2367  CGF.EmitRuntimeCall(
2369 }
2370 
2372  const VarDecl *VD, Address VDAddr, SourceLocation Loc,
2373  bool PerformInit, CodeGenFunction *CGF) {
2374  if (CGM.getLangOpts().OpenMPUseTLS &&
2376  return nullptr;
2377 
2378  VD = VD->getDefinition(CGM.getContext());
2379  if (VD && ThreadPrivateWithDefinition.count(VD) == 0) {
2380  ThreadPrivateWithDefinition.insert(VD);
2381  QualType ASTTy = VD->getType();
2382 
2383  llvm::Value *Ctor = nullptr, *CopyCtor = nullptr, *Dtor = nullptr;
2384  auto Init = VD->getAnyInitializer();
2385  if (CGM.getLangOpts().CPlusPlus && PerformInit) {
2386  // Generate function that re-emits the declaration's initializer into the
2387  // threadprivate copy of the variable VD
2388  CodeGenFunction CtorCGF(CGM);
2389  FunctionArgList Args;
2390  ImplicitParamDecl Dst(CGM.getContext(), /*DC=*/nullptr, Loc,
2391  /*Id=*/nullptr, CGM.getContext().VoidPtrTy,
2393  Args.push_back(&Dst);
2394 
2396  CGM.getContext().VoidPtrTy, Args);
2397  auto FTy = CGM.getTypes().GetFunctionType(FI);
2399  FTy, ".__kmpc_global_ctor_.", FI, Loc);
2400  CtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidPtrTy, Fn, FI,
2401  Args, Loc, Loc);
2402  auto ArgVal = CtorCGF.EmitLoadOfScalar(
2403  CtorCGF.GetAddrOfLocalVar(&Dst), /*Volatile=*/false,
2404  CGM.getContext().VoidPtrTy, Dst.getLocation());
2405  Address Arg = Address(ArgVal, VDAddr.getAlignment());
2406  Arg = CtorCGF.Builder.CreateElementBitCast(
2407  Arg, CtorCGF.ConvertTypeForMem(ASTTy));
2408  CtorCGF.EmitAnyExprToMem(Init, Arg, Init->getType().getQualifiers(),
2409  /*IsInitializer=*/true);
2410  ArgVal = CtorCGF.EmitLoadOfScalar(
2411  CtorCGF.GetAddrOfLocalVar(&Dst), /*Volatile=*/false,
2412  CGM.getContext().VoidPtrTy, Dst.getLocation());
2413  CtorCGF.Builder.CreateStore(ArgVal, CtorCGF.ReturnValue);
2414  CtorCGF.FinishFunction();
2415  Ctor = Fn;
2416  }
2417  if (VD->getType().isDestructedType() != QualType::DK_none) {
2418  // Generate function that emits destructor call for the threadprivate copy
2419  // of the variable VD
2420  CodeGenFunction DtorCGF(CGM);
2421  FunctionArgList Args;
2422  ImplicitParamDecl Dst(CGM.getContext(), /*DC=*/nullptr, Loc,
2423  /*Id=*/nullptr, CGM.getContext().VoidPtrTy,
2425  Args.push_back(&Dst);
2426 
2428  CGM.getContext().VoidTy, Args);
2429  auto FTy = CGM.getTypes().GetFunctionType(FI);
2431  FTy, ".__kmpc_global_dtor_.", FI, Loc);
2432  auto NL = ApplyDebugLocation::CreateEmpty(DtorCGF);
2433  DtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, Fn, FI, Args,
2434  Loc, Loc);
2435  // Create a scope with an artificial location for the body of this function.
2436  auto AL = ApplyDebugLocation::CreateArtificial(DtorCGF);
2437  auto ArgVal = DtorCGF.EmitLoadOfScalar(
2438  DtorCGF.GetAddrOfLocalVar(&Dst),
2439  /*Volatile=*/false, CGM.getContext().VoidPtrTy, Dst.getLocation());
2440  DtorCGF.emitDestroy(Address(ArgVal, VDAddr.getAlignment()), ASTTy,
2441  DtorCGF.getDestroyer(ASTTy.isDestructedType()),
2442  DtorCGF.needsEHCleanup(ASTTy.isDestructedType()));
2443  DtorCGF.FinishFunction();
2444  Dtor = Fn;
2445  }
2446  // Do not emit init function if it is not required.
2447  if (!Ctor && !Dtor)
2448  return nullptr;
2449 
2450  llvm::Type *CopyCtorTyArgs[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
2451  auto CopyCtorTy =
2452  llvm::FunctionType::get(CGM.VoidPtrTy, CopyCtorTyArgs,
2453  /*isVarArg=*/false)->getPointerTo();
2454  // Copying constructor for the threadprivate variable.
2455  // Must be NULL - reserved by runtime, but currently it requires that this
2456  // parameter is always NULL. Otherwise it fires assertion.
2457  CopyCtor = llvm::Constant::getNullValue(CopyCtorTy);
2458  if (Ctor == nullptr) {
2459  auto CtorTy = llvm::FunctionType::get(CGM.VoidPtrTy, CGM.VoidPtrTy,
2460  /*isVarArg=*/false)->getPointerTo();
2461  Ctor = llvm::Constant::getNullValue(CtorTy);
2462  }
2463  if (Dtor == nullptr) {
2464  auto DtorTy = llvm::FunctionType::get(CGM.VoidTy, CGM.VoidPtrTy,
2465  /*isVarArg=*/false)->getPointerTo();
2466  Dtor = llvm::Constant::getNullValue(DtorTy);
2467  }
2468  if (!CGF) {
2469  auto InitFunctionTy =
2470  llvm::FunctionType::get(CGM.VoidTy, /*isVarArg*/ false);
2471  auto InitFunction = CGM.CreateGlobalInitOrDestructFunction(
2472  InitFunctionTy, ".__omp_threadprivate_init_.",
2474  CodeGenFunction InitCGF(CGM);
2475  FunctionArgList ArgList;
2476  InitCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, InitFunction,
2477  CGM.getTypes().arrangeNullaryFunction(), ArgList,
2478  Loc, Loc);
2479  emitThreadPrivateVarInit(InitCGF, VDAddr, Ctor, CopyCtor, Dtor, Loc);
2480  InitCGF.FinishFunction();
2481  return InitFunction;
2482  }
2483  emitThreadPrivateVarInit(*CGF, VDAddr, Ctor, CopyCtor, Dtor, Loc);
2484  }
2485  return nullptr;
2486 }
2487 
2489  QualType VarType,
2490  StringRef Name) {
2491  llvm::Twine VarName(Name, ".artificial.");
2492  llvm::Type *VarLVType = CGF.ConvertTypeForMem(VarType);
2493  llvm::Value *GAddr = getOrCreateInternalVariable(VarLVType, VarName);
2494  llvm::Value *Args[] = {
2496  getThreadID(CGF, SourceLocation()),
2498  CGF.Builder.CreateIntCast(CGF.getTypeSize(VarType), CGM.SizeTy,
2499  /*IsSigned=*/false),
2500  getOrCreateInternalVariable(CGM.VoidPtrPtrTy, VarName + ".cache.")};
2501  return Address(
2503  CGF.EmitRuntimeCall(
2505  VarLVType->getPointerTo(/*AddrSpace=*/0)),
2506  CGM.getPointerAlign());
2507 }
2508 
2509 /// \brief Emits code for OpenMP 'if' clause using specified \a CodeGen
2510 /// function. Here is the logic:
2511 /// if (Cond) {
2512 /// ThenGen();
2513 /// } else {
2514 /// ElseGen();
2515 /// }
2517  const RegionCodeGenTy &ThenGen,
2518  const RegionCodeGenTy &ElseGen) {
2519  CodeGenFunction::LexicalScope ConditionScope(CGF, Cond->getSourceRange());
2520 
2521  // If the condition constant folds and can be elided, try to avoid emitting
2522  // the condition and the dead arm of the if/else.
2523  bool CondConstant;
2524  if (CGF.ConstantFoldsToSimpleInteger(Cond, CondConstant)) {
2525  if (CondConstant)
2526  ThenGen(CGF);
2527  else
2528  ElseGen(CGF);
2529  return;
2530  }
2531 
2532  // Otherwise, the condition did not fold, or we couldn't elide it. Just
2533  // emit the conditional branch.
2534  auto ThenBlock = CGF.createBasicBlock("omp_if.then");
2535  auto ElseBlock = CGF.createBasicBlock("omp_if.else");
2536  auto ContBlock = CGF.createBasicBlock("omp_if.end");
2537  CGF.EmitBranchOnBoolExpr(Cond, ThenBlock, ElseBlock, /*TrueCount=*/0);
2538 
2539  // Emit the 'then' code.
2540  CGF.EmitBlock(ThenBlock);
2541  ThenGen(CGF);
2542  CGF.EmitBranch(ContBlock);
2543  // Emit the 'else' code if present.
2544  // There is no need to emit line number for unconditional branch.
2546  CGF.EmitBlock(ElseBlock);
2547  ElseGen(CGF);
2548  // There is no need to emit line number for unconditional branch.
2550  CGF.EmitBranch(ContBlock);
2551  // Emit the continuation block for code after the if.
2552  CGF.EmitBlock(ContBlock, /*IsFinished=*/true);
2553 }
2554 
2556  llvm::Value *OutlinedFn,
2557  ArrayRef<llvm::Value *> CapturedVars,
2558  const Expr *IfCond) {
2559  if (!CGF.HaveInsertPoint())
2560  return;
2561  auto *RTLoc = emitUpdateLocation(CGF, Loc);
2562  auto &&ThenGen = [OutlinedFn, CapturedVars, RTLoc](CodeGenFunction &CGF,
2563  PrePostActionTy &) {
2564  // Build call __kmpc_fork_call(loc, n, microtask, var1, .., varn);
2565  auto &RT = CGF.CGM.getOpenMPRuntime();
2566  llvm::Value *Args[] = {
2567  RTLoc,
2568  CGF.Builder.getInt32(CapturedVars.size()), // Number of captured vars
2569  CGF.Builder.CreateBitCast(OutlinedFn, RT.getKmpc_MicroPointerTy())};
2571  RealArgs.append(std::begin(Args), std::end(Args));
2572  RealArgs.append(CapturedVars.begin(), CapturedVars.end());
2573 
2574  auto RTLFn = RT.createRuntimeFunction(OMPRTL__kmpc_fork_call);
2575  CGF.EmitRuntimeCall(RTLFn, RealArgs);
2576  };
2577  auto &&ElseGen = [OutlinedFn, CapturedVars, RTLoc, Loc](CodeGenFunction &CGF,
2578  PrePostActionTy &) {
2579  auto &RT = CGF.CGM.getOpenMPRuntime();
2580  auto ThreadID = RT.getThreadID(CGF, Loc);
2581  // Build calls:
2582  // __kmpc_serialized_parallel(&Loc, GTid);
2583  llvm::Value *Args[] = {RTLoc, ThreadID};
2584  CGF.EmitRuntimeCall(
2585  RT.createRuntimeFunction(OMPRTL__kmpc_serialized_parallel), Args);
2586 
2587  // OutlinedFn(&GTid, &zero, CapturedStruct);
2588  auto ThreadIDAddr = RT.emitThreadIDAddress(CGF, Loc);
2589  Address ZeroAddr =
2590  CGF.CreateTempAlloca(CGF.Int32Ty, CharUnits::fromQuantity(4),
2591  /*Name*/ ".zero.addr");
2592  CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
2593  llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
2594  OutlinedFnArgs.push_back(ThreadIDAddr.getPointer());
2595  OutlinedFnArgs.push_back(ZeroAddr.getPointer());
2596  OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
2597  RT.emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs);
2598 
2599  // __kmpc_end_serialized_parallel(&Loc, GTid);
2600  llvm::Value *EndArgs[] = {RT.emitUpdateLocation(CGF, Loc), ThreadID};
2601  CGF.EmitRuntimeCall(
2602  RT.createRuntimeFunction(OMPRTL__kmpc_end_serialized_parallel),
2603  EndArgs);
2604  };
2605  if (IfCond)
2606  emitOMPIfClause(CGF, IfCond, ThenGen, ElseGen);
2607  else {
2608  RegionCodeGenTy ThenRCG(ThenGen);
2609  ThenRCG(CGF);
2610  }
2611 }
2612 
2613 // If we're inside an (outlined) parallel region, use the region info's
2614 // thread-ID variable (it is passed in a first argument of the outlined function
2615 // as "kmp_int32 *gtid"). Otherwise, if we're not inside parallel region, but in
2616 // regular serial code region, get thread ID by calling kmp_int32
2617 // kmpc_global_thread_num(ident_t *loc), stash this thread ID in a temporary and
2618 // return the address of that temp.
2620  SourceLocation Loc) {
2621  if (auto *OMPRegionInfo =
2622  dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
2623  if (OMPRegionInfo->getThreadIDVariable())
2624  return OMPRegionInfo->getThreadIDVariableLValue(CGF).getAddress();
2625 
2626  auto ThreadID = getThreadID(CGF, Loc);
2627  auto Int32Ty =
2628  CGF.getContext().getIntTypeForBitwidth(/*DestWidth*/ 32, /*Signed*/ true);
2629  auto ThreadIDTemp = CGF.CreateMemTemp(Int32Ty, /*Name*/ ".threadid_temp.");
2630  CGF.EmitStoreOfScalar(ThreadID,
2631  CGF.MakeAddrLValue(ThreadIDTemp, Int32Ty));
2632 
2633  return ThreadIDTemp;
2634 }
2635 
2636 llvm::Constant *
2638  const llvm::Twine &Name) {
2639  SmallString<256> Buffer;
2640  llvm::raw_svector_ostream Out(Buffer);
2641  Out << Name;
2642  auto RuntimeName = Out.str();
2643  auto &Elem = *InternalVars.insert(std::make_pair(RuntimeName, nullptr)).first;
2644  if (Elem.second) {
2645  assert(Elem.second->getType()->getPointerElementType() == Ty &&
2646  "OMP internal variable has different type than requested");
2647  return &*Elem.second;
2648  }
2649 
2650  return Elem.second = new llvm::GlobalVariable(
2651  CGM.getModule(), Ty, /*IsConstant*/ false,
2652  llvm::GlobalValue::CommonLinkage, llvm::Constant::getNullValue(Ty),
2653  Elem.first());
2654 }
2655 
2657  llvm::Twine Name(".gomp_critical_user_", CriticalName);
2658  return getOrCreateInternalVariable(KmpCriticalNameTy, Name.concat(".var"));
2659 }
2660 
2661 namespace {
2662 /// Common pre(post)-action for different OpenMP constructs.
2663 class CommonActionTy final : public PrePostActionTy {
2664  llvm::Value *EnterCallee;
2665  ArrayRef<llvm::Value *> EnterArgs;
2666  llvm::Value *ExitCallee;
2667  ArrayRef<llvm::Value *> ExitArgs;
2668  bool Conditional;
2669  llvm::BasicBlock *ContBlock = nullptr;
2670 
2671 public:
2672  CommonActionTy(llvm::Value *EnterCallee, ArrayRef<llvm::Value *> EnterArgs,
2673  llvm::Value *ExitCallee, ArrayRef<llvm::Value *> ExitArgs,
2674  bool Conditional = false)
2675  : EnterCallee(EnterCallee), EnterArgs(EnterArgs), ExitCallee(ExitCallee),
2676  ExitArgs(ExitArgs), Conditional(Conditional) {}
2677  void Enter(CodeGenFunction &CGF) override {
2678  llvm::Value *EnterRes = CGF.EmitRuntimeCall(EnterCallee, EnterArgs);
2679  if (Conditional) {
2680  llvm::Value *CallBool = CGF.Builder.CreateIsNotNull(EnterRes);
2681  auto *ThenBlock = CGF.createBasicBlock("omp_if.then");
2682  ContBlock = CGF.createBasicBlock("omp_if.end");
2683  // Generate the branch (If-stmt)
2684  CGF.Builder.CreateCondBr(CallBool, ThenBlock, ContBlock);
2685  CGF.EmitBlock(ThenBlock);
2686  }
2687  }
2688  void Done(CodeGenFunction &CGF) {
2689  // Emit the rest of blocks/branches
2690  CGF.EmitBranch(ContBlock);
2691  CGF.EmitBlock(ContBlock, true);
2692  }
2693  void Exit(CodeGenFunction &CGF) override {
2694  CGF.EmitRuntimeCall(ExitCallee, ExitArgs);
2695  }
2696 };
2697 } // anonymous namespace
2698 
2700  StringRef CriticalName,
2701  const RegionCodeGenTy &CriticalOpGen,
2702  SourceLocation Loc, const Expr *Hint) {
2703  // __kmpc_critical[_with_hint](ident_t *, gtid, Lock[, hint]);
2704  // CriticalOpGen();
2705  // __kmpc_end_critical(ident_t *, gtid, Lock);
2706  // Prepare arguments and build a call to __kmpc_critical
2707  if (!CGF.HaveInsertPoint())
2708  return;
2709  llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
2710  getCriticalRegionLock(CriticalName)};
2711  llvm::SmallVector<llvm::Value *, 4> EnterArgs(std::begin(Args),
2712  std::end(Args));
2713  if (Hint) {
2714  EnterArgs.push_back(CGF.Builder.CreateIntCast(
2715  CGF.EmitScalarExpr(Hint), CGM.IntPtrTy, /*isSigned=*/false));
2716  }
2717  CommonActionTy Action(
2721  CriticalOpGen.setAction(Action);
2722  emitInlinedDirective(CGF, OMPD_critical, CriticalOpGen);
2723 }
2724 
2726  const RegionCodeGenTy &MasterOpGen,
2727  SourceLocation Loc) {
2728  if (!CGF.HaveInsertPoint())
2729  return;
2730  // if(__kmpc_master(ident_t *, gtid)) {
2731  // MasterOpGen();
2732  // __kmpc_end_master(ident_t *, gtid);
2733  // }
2734  // Prepare arguments and build a call to __kmpc_master
2735  llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
2736  CommonActionTy Action(createRuntimeFunction(OMPRTL__kmpc_master), Args,
2738  /*Conditional=*/true);
2739  MasterOpGen.setAction(Action);
2740  emitInlinedDirective(CGF, OMPD_master, MasterOpGen);
2741  Action.Done(CGF);
2742 }
2743 
2745  SourceLocation Loc) {
2746  if (!CGF.HaveInsertPoint())
2747  return;
2748  // Build call __kmpc_omp_taskyield(loc, thread_id, 0);
2749  llvm::Value *Args[] = {
2750  emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
2751  llvm::ConstantInt::get(CGM.IntTy, /*V=*/0, /*isSigned=*/true)};
2753  if (auto *Region = dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
2754  Region->emitUntiedSwitch(CGF);
2755 }
2756 
2758  const RegionCodeGenTy &TaskgroupOpGen,
2759  SourceLocation Loc) {
2760  if (!CGF.HaveInsertPoint())
2761  return;
2762  // __kmpc_taskgroup(ident_t *, gtid);
2763  // TaskgroupOpGen();
2764  // __kmpc_end_taskgroup(ident_t *, gtid);
2765  // Prepare arguments and build a call to __kmpc_taskgroup
2766  llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
2767  CommonActionTy Action(createRuntimeFunction(OMPRTL__kmpc_taskgroup), Args,
2769  Args);
2770  TaskgroupOpGen.setAction(Action);
2771  emitInlinedDirective(CGF, OMPD_taskgroup, TaskgroupOpGen);
2772 }
2773 
2774 /// Given an array of pointers to variables, project the address of a
2775 /// given variable.
2777  unsigned Index, const VarDecl *Var) {
2778  // Pull out the pointer to the variable.
2779  Address PtrAddr =
2780  CGF.Builder.CreateConstArrayGEP(Array, Index, CGF.getPointerSize());
2781  llvm::Value *Ptr = CGF.Builder.CreateLoad(PtrAddr);
2782 
2783  Address Addr = Address(Ptr, CGF.getContext().getDeclAlign(Var));
2784  Addr = CGF.Builder.CreateElementBitCast(
2785  Addr, CGF.ConvertTypeForMem(Var->getType()));
2786  return Addr;
2787 }
2788 
2790  CodeGenModule &CGM, llvm::Type *ArgsType,
2791  ArrayRef<const Expr *> CopyprivateVars, ArrayRef<const Expr *> DestExprs,
2792  ArrayRef<const Expr *> SrcExprs, ArrayRef<const Expr *> AssignmentOps,
2793  SourceLocation Loc) {
2794  auto &C = CGM.getContext();
2795  // void copy_func(void *LHSArg, void *RHSArg);
2796  FunctionArgList Args;
2797  ImplicitParamDecl LHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
2799  ImplicitParamDecl RHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
2801  Args.push_back(&LHSArg);
2802  Args.push_back(&RHSArg);
2803  auto &CGFI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
2804  auto *Fn = llvm::Function::Create(
2806  ".omp.copyprivate.copy_func", &CGM.getModule());
2807  CGM.SetInternalFunctionAttributes(/*D=*/nullptr, Fn, CGFI);
2808  CodeGenFunction CGF(CGM);
2809  CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
2810  // Dest = (void*[n])(LHSArg);
2811  // Src = (void*[n])(RHSArg);
2813  CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&LHSArg)),
2814  ArgsType), CGF.getPointerAlign());
2816  CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&RHSArg)),
2817  ArgsType), CGF.getPointerAlign());
2818  // *(Type0*)Dst[0] = *(Type0*)Src[0];
2819  // *(Type1*)Dst[1] = *(Type1*)Src[1];
2820  // ...
2821  // *(Typen*)Dst[n] = *(Typen*)Src[n];
2822  for (unsigned I = 0, E = AssignmentOps.size(); I < E; ++I) {
2823  auto DestVar = cast<VarDecl>(cast<DeclRefExpr>(DestExprs[I])->getDecl());
2824  Address DestAddr = emitAddrOfVarFromArray(CGF, LHS, I, DestVar);
2825 
2826  auto SrcVar = cast<VarDecl>(cast<DeclRefExpr>(SrcExprs[I])->getDecl());
2827  Address SrcAddr = emitAddrOfVarFromArray(CGF, RHS, I, SrcVar);
2828 
2829  auto *VD = cast<DeclRefExpr>(CopyprivateVars[I])->getDecl();
2830  QualType Type = VD->getType();
2831  CGF.EmitOMPCopy(Type, DestAddr, SrcAddr, DestVar, SrcVar, AssignmentOps[I]);
2832  }
2833  CGF.FinishFunction();
2834  return Fn;
2835 }
2836 
2838  const RegionCodeGenTy &SingleOpGen,
2839  SourceLocation Loc,
2840  ArrayRef<const Expr *> CopyprivateVars,
2841  ArrayRef<const Expr *> SrcExprs,
2842  ArrayRef<const Expr *> DstExprs,
2843  ArrayRef<const Expr *> AssignmentOps) {
2844  if (!CGF.HaveInsertPoint())
2845  return;
2846  assert(CopyprivateVars.size() == SrcExprs.size() &&
2847  CopyprivateVars.size() == DstExprs.size() &&
2848  CopyprivateVars.size() == AssignmentOps.size());
2849  auto &C = CGM.getContext();
2850  // int32 did_it = 0;
2851  // if(__kmpc_single(ident_t *, gtid)) {
2852  // SingleOpGen();
2853  // __kmpc_end_single(ident_t *, gtid);
2854  // did_it = 1;
2855  // }
2856  // call __kmpc_copyprivate(ident_t *, gtid, <buf_size>, <copyprivate list>,
2857  // <copy_func>, did_it);
2858 
2859  Address DidIt = Address::invalid();
2860  if (!CopyprivateVars.empty()) {
2861  // int32 did_it = 0;
2862  auto KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
2863  DidIt = CGF.CreateMemTemp(KmpInt32Ty, ".omp.copyprivate.did_it");
2864  CGF.Builder.CreateStore(CGF.Builder.getInt32(0), DidIt);
2865  }
2866  // Prepare arguments and build a call to __kmpc_single
2867  llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
2868  CommonActionTy Action(createRuntimeFunction(OMPRTL__kmpc_single), Args,
2870  /*Conditional=*/true);
2871  SingleOpGen.setAction(Action);
2872  emitInlinedDirective(CGF, OMPD_single, SingleOpGen);
2873  if (DidIt.isValid()) {
2874  // did_it = 1;
2875  CGF.Builder.CreateStore(CGF.Builder.getInt32(1), DidIt);
2876  }
2877  Action.Done(CGF);
2878  // call __kmpc_copyprivate(ident_t *, gtid, <buf_size>, <copyprivate list>,
2879  // <copy_func>, did_it);
2880  if (DidIt.isValid()) {
2881  llvm::APInt ArraySize(/*unsigned int numBits=*/32, CopyprivateVars.size());
2882  auto CopyprivateArrayTy =
2883  C.getConstantArrayType(C.VoidPtrTy, ArraySize, ArrayType::Normal,
2884  /*IndexTypeQuals=*/0);
2885  // Create a list of all private variables for copyprivate.
2886  Address CopyprivateList =
2887  CGF.CreateMemTemp(CopyprivateArrayTy, ".omp.copyprivate.cpr_list");
2888  for (unsigned I = 0, E = CopyprivateVars.size(); I < E; ++I) {
2889  Address Elem = CGF.Builder.CreateConstArrayGEP(
2890  CopyprivateList, I, CGF.getPointerSize());
2891  CGF.Builder.CreateStore(
2893  CGF.EmitLValue(CopyprivateVars[I]).getPointer(), CGF.VoidPtrTy),
2894  Elem);
2895  }
2896  // Build function that copies private values from single region to all other
2897  // threads in the corresponding parallel region.
2898  auto *CpyFn = emitCopyprivateCopyFunction(
2899  CGM, CGF.ConvertTypeForMem(CopyprivateArrayTy)->getPointerTo(),
2900  CopyprivateVars, SrcExprs, DstExprs, AssignmentOps, Loc);
2901  auto *BufSize = CGF.getTypeSize(CopyprivateArrayTy);
2902  Address CL =
2903  CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(CopyprivateList,
2904  CGF.VoidPtrTy);
2905  auto *DidItVal = CGF.Builder.CreateLoad(DidIt);
2906  llvm::Value *Args[] = {
2907  emitUpdateLocation(CGF, Loc), // ident_t *<loc>
2908  getThreadID(CGF, Loc), // i32 <gtid>
2909  BufSize, // size_t <buf_size>
2910  CL.getPointer(), // void *<copyprivate list>
2911  CpyFn, // void (*) (void *, void *) <copy_func>
2912  DidItVal // i32 did_it
2913  };
2915  }
2916 }
2917 
2919  const RegionCodeGenTy &OrderedOpGen,
2920  SourceLocation Loc, bool IsThreads) {
2921  if (!CGF.HaveInsertPoint())
2922  return;
2923  // __kmpc_ordered(ident_t *, gtid);
2924  // OrderedOpGen();
2925  // __kmpc_end_ordered(ident_t *, gtid);
2926  // Prepare arguments and build a call to __kmpc_ordered
2927  if (IsThreads) {
2928  llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
2929  CommonActionTy Action(createRuntimeFunction(OMPRTL__kmpc_ordered), Args,
2931  Args);
2932  OrderedOpGen.setAction(Action);
2933  emitInlinedDirective(CGF, OMPD_ordered, OrderedOpGen);
2934  return;
2935  }
2936  emitInlinedDirective(CGF, OMPD_ordered, OrderedOpGen);
2937 }
2938 
2940  OpenMPDirectiveKind Kind, bool EmitChecks,
2941  bool ForceSimpleCall) {
2942  if (!CGF.HaveInsertPoint())
2943  return;
2944  // Build call __kmpc_cancel_barrier(loc, thread_id);
2945  // Build call __kmpc_barrier(loc, thread_id);
2946  unsigned Flags;
2947  if (Kind == OMPD_for)
2948  Flags = OMP_IDENT_BARRIER_IMPL_FOR;
2949  else if (Kind == OMPD_sections)
2950  Flags = OMP_IDENT_BARRIER_IMPL_SECTIONS;
2951  else if (Kind == OMPD_single)
2952  Flags = OMP_IDENT_BARRIER_IMPL_SINGLE;
2953  else if (Kind == OMPD_barrier)
2954  Flags = OMP_IDENT_BARRIER_EXPL;
2955  else
2956  Flags = OMP_IDENT_BARRIER_IMPL;
2957  // Build call __kmpc_cancel_barrier(loc, thread_id) or __kmpc_barrier(loc,
2958  // thread_id);
2959  llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc, Flags),
2960  getThreadID(CGF, Loc)};
2961  if (auto *OMPRegionInfo =
2962  dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
2963  if (!ForceSimpleCall && OMPRegionInfo->hasCancel()) {
2964  auto *Result = CGF.EmitRuntimeCall(
2966  if (EmitChecks) {
2967  // if (__kmpc_cancel_barrier()) {
2968  // exit from construct;
2969  // }
2970  auto *ExitBB = CGF.createBasicBlock(".cancel.exit");
2971  auto *ContBB = CGF.createBasicBlock(".cancel.continue");
2972  auto *Cmp = CGF.Builder.CreateIsNotNull(Result);
2973  CGF.Builder.CreateCondBr(Cmp, ExitBB, ContBB);
2974  CGF.EmitBlock(ExitBB);
2975  // exit from construct;
2976  auto CancelDestination =
2977  CGF.getOMPCancelDestination(OMPRegionInfo->getDirectiveKind());
2978  CGF.EmitBranchThroughCleanup(CancelDestination);
2979  CGF.EmitBlock(ContBB, /*IsFinished=*/true);
2980  }
2981  return;
2982  }
2983  }
2985 }
2986 
2987 /// \brief Map the OpenMP loop schedule to the runtime enumeration.
2989  bool Chunked, bool Ordered) {
2990  switch (ScheduleKind) {
2991  case OMPC_SCHEDULE_static:
2992  return Chunked ? (Ordered ? OMP_ord_static_chunked : OMP_sch_static_chunked)
2993  : (Ordered ? OMP_ord_static : OMP_sch_static);
2994  case OMPC_SCHEDULE_dynamic:
2996  case OMPC_SCHEDULE_guided:
2998  case OMPC_SCHEDULE_runtime:
2999  return Ordered ? OMP_ord_runtime : OMP_sch_runtime;
3000  case OMPC_SCHEDULE_auto:
3001  return Ordered ? OMP_ord_auto : OMP_sch_auto;
3002  case OMPC_SCHEDULE_unknown:
3003  assert(!Chunked && "chunk was specified but schedule kind not known");
3004  return Ordered ? OMP_ord_static : OMP_sch_static;
3005  }
3006  llvm_unreachable("Unexpected runtime schedule");
3007 }
3008 
3009 /// \brief Map the OpenMP distribute schedule to the runtime enumeration.
3010 static OpenMPSchedType
3012  // only static is allowed for dist_schedule
3014 }
3015 
3017  bool Chunked) const {
3018  auto Schedule = getRuntimeSchedule(ScheduleKind, Chunked, /*Ordered=*/false);
3019  return Schedule == OMP_sch_static;
3020 }
3021 
3023  OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) const {
3024  auto Schedule = getRuntimeSchedule(ScheduleKind, Chunked);
3025  return Schedule == OMP_dist_sch_static;
3026 }
3027 
3028 
3030  auto Schedule =
3031  getRuntimeSchedule(ScheduleKind, /*Chunked=*/false, /*Ordered=*/false);
3032  assert(Schedule != OMP_sch_static_chunked && "cannot be chunked here");
3033  return Schedule != OMP_sch_static;
3034 }
3035 
3039  int Modifier = 0;
3040  switch (M1) {
3041  case OMPC_SCHEDULE_MODIFIER_monotonic:
3042  Modifier = OMP_sch_modifier_monotonic;
3043  break;
3044  case OMPC_SCHEDULE_MODIFIER_nonmonotonic:
3045  Modifier = OMP_sch_modifier_nonmonotonic;
3046  break;
3047  case OMPC_SCHEDULE_MODIFIER_simd:
3048  if (Schedule == OMP_sch_static_chunked)
3050  break;
3053  break;
3054  }
3055  switch (M2) {
3056  case OMPC_SCHEDULE_MODIFIER_monotonic:
3057  Modifier = OMP_sch_modifier_monotonic;
3058  break;
3059  case OMPC_SCHEDULE_MODIFIER_nonmonotonic:
3060  Modifier = OMP_sch_modifier_nonmonotonic;
3061  break;
3062  case OMPC_SCHEDULE_MODIFIER_simd:
3063  if (Schedule == OMP_sch_static_chunked)
3065  break;
3068  break;
3069  }
3070  return Schedule | Modifier;
3071 }
3072 
3074  CodeGenFunction &CGF, SourceLocation Loc,
3075  const OpenMPScheduleTy &ScheduleKind, unsigned IVSize, bool IVSigned,
3076  bool Ordered, const DispatchRTInput &DispatchValues) {
3077  if (!CGF.HaveInsertPoint())
3078  return;
3080  ScheduleKind.Schedule, DispatchValues.Chunk != nullptr, Ordered);
3081  assert(Ordered ||
3082  (Schedule != OMP_sch_static && Schedule != OMP_sch_static_chunked &&
3083  Schedule != OMP_ord_static && Schedule != OMP_ord_static_chunked &&
3084  Schedule != OMP_sch_static_balanced_chunked));
3085  // Call __kmpc_dispatch_init(
3086  // ident_t *loc, kmp_int32 tid, kmp_int32 schedule,
3087  // kmp_int[32|64] lower, kmp_int[32|64] upper,
3088  // kmp_int[32|64] stride, kmp_int[32|64] chunk);
3089 
3090  // If the Chunk was not specified in the clause - use default value 1.
3091  llvm::Value *Chunk = DispatchValues.Chunk ? DispatchValues.Chunk
3092  : CGF.Builder.getIntN(IVSize, 1);
3093  llvm::Value *Args[] = {
3094  emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
3095  CGF.Builder.getInt32(addMonoNonMonoModifier(
3096  Schedule, ScheduleKind.M1, ScheduleKind.M2)), // Schedule type
3097  DispatchValues.LB, // Lower
3098  DispatchValues.UB, // Upper
3099  CGF.Builder.getIntN(IVSize, 1), // Stride
3100  Chunk // Chunk
3101  };
3102  CGF.EmitRuntimeCall(createDispatchInitFunction(IVSize, IVSigned), Args);
3103 }
3104 
3106  CodeGenFunction &CGF, llvm::Value *UpdateLocation, llvm::Value *ThreadId,
3107  llvm::Constant *ForStaticInitFunction, OpenMPSchedType Schedule,
3109  const CGOpenMPRuntime::StaticRTInput &Values) {
3110  if (!CGF.HaveInsertPoint())
3111  return;
3112 
3113  assert(!Values.Ordered);
3114  assert(Schedule == OMP_sch_static || Schedule == OMP_sch_static_chunked ||
3115  Schedule == OMP_sch_static_balanced_chunked ||
3116  Schedule == OMP_ord_static || Schedule == OMP_ord_static_chunked ||
3117  Schedule == OMP_dist_sch_static ||
3118  Schedule == OMP_dist_sch_static_chunked);
3119 
3120  // Call __kmpc_for_static_init(
3121  // ident_t *loc, kmp_int32 tid, kmp_int32 schedtype,
3122  // kmp_int32 *p_lastiter, kmp_int[32|64] *p_lower,
3123  // kmp_int[32|64] *p_upper, kmp_int[32|64] *p_stride,
3124  // kmp_int[32|64] incr, kmp_int[32|64] chunk);
3125  llvm::Value *Chunk = Values.Chunk;
3126  if (Chunk == nullptr) {
3127  assert((Schedule == OMP_sch_static || Schedule == OMP_ord_static ||
3128  Schedule == OMP_dist_sch_static) &&
3129  "expected static non-chunked schedule");
3130  // If the Chunk was not specified in the clause - use default value 1.
3131  Chunk = CGF.Builder.getIntN(Values.IVSize, 1);
3132  } else {
3133  assert((Schedule == OMP_sch_static_chunked ||
3134  Schedule == OMP_sch_static_balanced_chunked ||
3135  Schedule == OMP_ord_static_chunked ||
3136  Schedule == OMP_dist_sch_static_chunked) &&
3137  "expected static chunked schedule");
3138  }
3139  llvm::Value *Args[] = {
3140  UpdateLocation,
3141  ThreadId,
3142  CGF.Builder.getInt32(addMonoNonMonoModifier(Schedule, M1,
3143  M2)), // Schedule type
3144  Values.IL.getPointer(), // &isLastIter
3145  Values.LB.getPointer(), // &LB
3146  Values.UB.getPointer(), // &UB
3147  Values.ST.getPointer(), // &Stride
3148  CGF.Builder.getIntN(Values.IVSize, 1), // Incr
3149  Chunk // Chunk
3150  };
3151  CGF.EmitRuntimeCall(ForStaticInitFunction, Args);
3152 }
3153 
3155  SourceLocation Loc,
3156  OpenMPDirectiveKind DKind,
3157  const OpenMPScheduleTy &ScheduleKind,
3158  const StaticRTInput &Values) {
3159  OpenMPSchedType ScheduleNum = getRuntimeSchedule(
3160  ScheduleKind.Schedule, Values.Chunk != nullptr, Values.Ordered);
3161  assert(isOpenMPWorksharingDirective(DKind) &&
3162  "Expected loop-based or sections-based directive.");
3163  auto *UpdatedLocation = emitUpdateLocation(CGF, Loc,
3164  isOpenMPLoopDirective(DKind)
3165  ? OMP_IDENT_WORK_LOOP
3166  : OMP_IDENT_WORK_SECTIONS);
3167  auto *ThreadId = getThreadID(CGF, Loc);
3168  auto *StaticInitFunction =
3170  emitForStaticInitCall(CGF, UpdatedLocation, ThreadId, StaticInitFunction,
3171  ScheduleNum, ScheduleKind.M1, ScheduleKind.M2, Values);
3172 }
3173 
3175  CodeGenFunction &CGF, SourceLocation Loc,
3176  OpenMPDistScheduleClauseKind SchedKind,
3177  const CGOpenMPRuntime::StaticRTInput &Values) {
3178  OpenMPSchedType ScheduleNum =
3179  getRuntimeSchedule(SchedKind, Values.Chunk != nullptr);
3180  auto *UpdatedLocation =
3181  emitUpdateLocation(CGF, Loc, OMP_IDENT_WORK_DISTRIBUTE);
3182  auto *ThreadId = getThreadID(CGF, Loc);
3183  auto *StaticInitFunction =
3184  createForStaticInitFunction(Values.IVSize, Values.IVSigned);
3185  emitForStaticInitCall(CGF, UpdatedLocation, ThreadId, StaticInitFunction,
3186  ScheduleNum, OMPC_SCHEDULE_MODIFIER_unknown,
3188 }
3189 
3191  SourceLocation Loc,
3192  OpenMPDirectiveKind DKind) {
3193  if (!CGF.HaveInsertPoint())
3194  return;
3195  // Call __kmpc_for_static_fini(ident_t *loc, kmp_int32 tid);
3196  llvm::Value *Args[] = {
3197  emitUpdateLocation(CGF, Loc,
3199  ? OMP_IDENT_WORK_DISTRIBUTE
3200  : isOpenMPLoopDirective(DKind)
3201  ? OMP_IDENT_WORK_LOOP
3202  : OMP_IDENT_WORK_SECTIONS),
3203  getThreadID(CGF, Loc)};
3205  Args);
3206 }
3207 
3209  SourceLocation Loc,
3210  unsigned IVSize,
3211  bool IVSigned) {
3212  if (!CGF.HaveInsertPoint())
3213  return;
3214  // Call __kmpc_for_dynamic_fini_(4|8)[u](ident_t *loc, kmp_int32 tid);
3215  llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
3216  CGF.EmitRuntimeCall(createDispatchFiniFunction(IVSize, IVSigned), Args);
3217 }
3218 
3220  SourceLocation Loc, unsigned IVSize,
3221  bool IVSigned, Address IL,
3222  Address LB, Address UB,
3223  Address ST) {
3224  // Call __kmpc_dispatch_next(
3225  // ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter,
3226  // kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper,
3227  // kmp_int[32|64] *p_stride);
3228  llvm::Value *Args[] = {
3229  emitUpdateLocation(CGF, Loc),
3230  getThreadID(CGF, Loc),
3231  IL.getPointer(), // &isLastIter
3232  LB.getPointer(), // &Lower
3233  UB.getPointer(), // &Upper
3234  ST.getPointer() // &Stride
3235  };
3236  llvm::Value *Call =
3237  CGF.EmitRuntimeCall(createDispatchNextFunction(IVSize, IVSigned), Args);
3238  return CGF.EmitScalarConversion(
3239  Call, CGF.getContext().getIntTypeForBitwidth(32, /* Signed */ true),
3240  CGF.getContext().BoolTy, Loc);
3241 }
3242 
3244  llvm::Value *NumThreads,
3245  SourceLocation Loc) {
3246  if (!CGF.HaveInsertPoint())
3247  return;
3248  // Build call __kmpc_push_num_threads(&loc, global_tid, num_threads)
3249  llvm::Value *Args[] = {
3250  emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
3251  CGF.Builder.CreateIntCast(NumThreads, CGF.Int32Ty, /*isSigned*/ true)};
3253  Args);
3254 }
3255 
3257  OpenMPProcBindClauseKind ProcBind,
3258  SourceLocation Loc) {
3259  if (!CGF.HaveInsertPoint())
3260  return;
3261  // Constants for proc bind value accepted by the runtime.
3262  enum ProcBindTy {
3263  ProcBindFalse = 0,
3264  ProcBindTrue,
3265  ProcBindMaster,
3266  ProcBindClose,
3267  ProcBindSpread,
3268  ProcBindIntel,
3269  ProcBindDefault
3270  } RuntimeProcBind;
3271  switch (ProcBind) {
3272  case OMPC_PROC_BIND_master:
3273  RuntimeProcBind = ProcBindMaster;
3274  break;
3275  case OMPC_PROC_BIND_close:
3276  RuntimeProcBind = ProcBindClose;
3277  break;
3278  case OMPC_PROC_BIND_spread:
3279  RuntimeProcBind = ProcBindSpread;
3280  break;
3282  llvm_unreachable("Unsupported proc_bind value.");
3283  }
3284  // Build call __kmpc_push_proc_bind(&loc, global_tid, proc_bind)
3285  llvm::Value *Args[] = {
3286  emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
3287  llvm::ConstantInt::get(CGM.IntTy, RuntimeProcBind, /*isSigned=*/true)};
3289 }
3290 
3291 void CGOpenMPRuntime::emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *>,
3292  SourceLocation Loc) {
3293  if (!CGF.HaveInsertPoint())
3294  return;
3295  // Build call void __kmpc_flush(ident_t *loc)
3297  emitUpdateLocation(CGF, Loc));
3298 }
3299 
3300 namespace {
3301 /// \brief Indexes of fields for type kmp_task_t.
3303  /// \brief List of shared variables.
3304  KmpTaskTShareds,
3305  /// \brief Task routine.
3306  KmpTaskTRoutine,
3307  /// \brief Partition id for the untied tasks.
3308  KmpTaskTPartId,
3309  /// Function with call of destructors for private variables.
3310  Data1,
3311  /// Task priority.
3312  Data2,
3313  /// (Taskloops only) Lower bound.
3314  KmpTaskTLowerBound,
3315  /// (Taskloops only) Upper bound.
3316  KmpTaskTUpperBound,
3317  /// (Taskloops only) Stride.
3318  KmpTaskTStride,
3319  /// (Taskloops only) Is last iteration flag.
3320  KmpTaskTLastIter,
3321  /// (Taskloops only) Reduction data.
3322  KmpTaskTReductions,
3323 };
3324 } // anonymous namespace
3325 
3326 bool CGOpenMPRuntime::OffloadEntriesInfoManagerTy::empty() const {
3327  // FIXME: Add other entries type when they become supported.
3328  return OffloadEntriesTargetRegion.empty();
3329 }
3330 
3331 /// \brief Initialize target region entry.
3332 void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
3333  initializeTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
3334  StringRef ParentName, unsigned LineNum,
3335  unsigned Order) {
3336  assert(CGM.getLangOpts().OpenMPIsDevice && "Initialization of entries is "
3337  "only required for the device "
3338  "code generation.");
3339  OffloadEntriesTargetRegion[DeviceID][FileID][ParentName][LineNum] =
3340  OffloadEntryInfoTargetRegion(Order, /*Addr=*/nullptr, /*ID=*/nullptr,
3341  /*Flags=*/0);
3342  ++OffloadingEntriesNum;
3343 }
3344 
3345 void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
3346  registerTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
3347  StringRef ParentName, unsigned LineNum,
3348  llvm::Constant *Addr, llvm::Constant *ID,
3349  int32_t Flags) {
3350  // If we are emitting code for a target, the entry is already initialized,
3351  // only has to be registered.
3352  if (CGM.getLangOpts().OpenMPIsDevice) {
3353  assert(hasTargetRegionEntryInfo(DeviceID, FileID, ParentName, LineNum) &&
3354  "Entry must exist.");
3355  auto &Entry =
3356  OffloadEntriesTargetRegion[DeviceID][FileID][ParentName][LineNum];
3357  assert(Entry.isValid() && "Entry not initialized!");
3358  Entry.setAddress(Addr);
3359  Entry.setID(ID);
3360  Entry.setFlags(Flags);
3361  return;
3362  } else {
3363  OffloadEntryInfoTargetRegion Entry(OffloadingEntriesNum++, Addr, ID, Flags);
3364  OffloadEntriesTargetRegion[DeviceID][FileID][ParentName][LineNum] = Entry;
3365  }
3366 }
3367 
3368 bool CGOpenMPRuntime::OffloadEntriesInfoManagerTy::hasTargetRegionEntryInfo(
3369  unsigned DeviceID, unsigned FileID, StringRef ParentName,
3370  unsigned LineNum) const {
3371  auto PerDevice = OffloadEntriesTargetRegion.find(DeviceID);
3372  if (PerDevice == OffloadEntriesTargetRegion.end())
3373  return false;
3374  auto PerFile = PerDevice->second.find(FileID);
3375  if (PerFile == PerDevice->second.end())
3376  return false;
3377  auto PerParentName = PerFile->second.find(ParentName);
3378  if (PerParentName == PerFile->second.end())
3379  return false;
3380  auto PerLine = PerParentName->second.find(LineNum);
3381  if (PerLine == PerParentName->second.end())
3382  return false;
3383  // Fail if this entry is already registered.
3384  if (PerLine->second.getAddress() || PerLine->second.getID())
3385  return false;
3386  return true;
3387 }
3388 
3389 void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::actOnTargetRegionEntriesInfo(
3390  const OffloadTargetRegionEntryInfoActTy &Action) {
3391  // Scan all target region entries and perform the provided action.
3392  for (auto &D : OffloadEntriesTargetRegion)
3393  for (auto &F : D.second)
3394  for (auto &P : F.second)
3395  for (auto &L : P.second)
3396  Action(D.first, F.first, P.first(), L.first, L.second);
3397 }
3398 
3399 /// \brief Create a Ctor/Dtor-like function whose body is emitted through
3400 /// \a Codegen. This is used to emit the two functions that register and
3401 /// unregister the descriptor of the current compilation unit.
3402 static llvm::Function *
3404  const RegionCodeGenTy &Codegen) {
3405  auto &C = CGM.getContext();
3406  FunctionArgList Args;
3407  ImplicitParamDecl DummyPtr(C, C.VoidPtrTy, ImplicitParamDecl::Other);
3408  Args.push_back(&DummyPtr);
3409 
3410  CodeGenFunction CGF(CGM);
3411  // Disable debug info for global (de-)initializer because they are not part of
3412  // some particular construct.
3413  CGF.disableDebugInfo();
3414  auto &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
3415  auto FTy = CGM.getTypes().GetFunctionType(FI);
3416  auto *Fn = CGM.CreateGlobalInitOrDestructFunction(FTy, Name, FI);
3417  CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FI, Args);
3418  Codegen(CGF);
3419  CGF.FinishFunction();
3420  return Fn;
3421 }
3422 
3423 llvm::Function *
3425  // If we don't have entries or if we are emitting code for the device, we
3426  // don't need to do anything.
3427  if (CGM.getLangOpts().OpenMPIsDevice || OffloadEntriesInfoManager.empty())
3428  return nullptr;
3429 
3430  auto &M = CGM.getModule();
3431  auto &C = CGM.getContext();
3432 
3433  // Get list of devices we care about
3434  auto &Devices = CGM.getLangOpts().OMPTargetTriples;
3435 
3436  // We should be creating an offloading descriptor only if there are devices
3437  // specified.
3438  assert(!Devices.empty() && "No OpenMP offloading devices??");
3439 
3440  // Create the external variables that will point to the begin and end of the
3441  // host entries section. These will be defined by the linker.
3442  auto *OffloadEntryTy =
3444  llvm::GlobalVariable *HostEntriesBegin = new llvm::GlobalVariable(
3445  M, OffloadEntryTy, /*isConstant=*/true,
3446  llvm::GlobalValue::ExternalLinkage, /*Initializer=*/nullptr,
3447  ".omp_offloading.entries_begin");
3448  llvm::GlobalVariable *HostEntriesEnd = new llvm::GlobalVariable(
3449  M, OffloadEntryTy, /*isConstant=*/true,
3450  llvm::GlobalValue::ExternalLinkage, /*Initializer=*/nullptr,
3451  ".omp_offloading.entries_end");
3452 
3453  // Create all device images
3454  auto *DeviceImageTy = cast<llvm::StructType>(
3456  ConstantInitBuilder DeviceImagesBuilder(CGM);
3457  auto DeviceImagesEntries = DeviceImagesBuilder.beginArray(DeviceImageTy);
3458 
3459  for (unsigned i = 0; i < Devices.size(); ++i) {
3460  StringRef T = Devices[i].getTriple();
3461  auto *ImgBegin = new llvm::GlobalVariable(
3462  M, CGM.Int8Ty, /*isConstant=*/true, llvm::GlobalValue::ExternalLinkage,
3463  /*Initializer=*/nullptr,
3464  Twine(".omp_offloading.img_start.") + Twine(T));
3465  auto *ImgEnd = new llvm::GlobalVariable(
3466  M, CGM.Int8Ty, /*isConstant=*/true, llvm::GlobalValue::ExternalLinkage,
3467  /*Initializer=*/nullptr, Twine(".omp_offloading.img_end.") + Twine(T));
3468 
3469  auto Dev = DeviceImagesEntries.beginStruct(DeviceImageTy);
3470  Dev.add(ImgBegin);
3471  Dev.add(ImgEnd);
3472  Dev.add(HostEntriesBegin);
3473  Dev.add(HostEntriesEnd);
3474  Dev.finishAndAddTo(DeviceImagesEntries);
3475  }
3476 
3477  // Create device images global array.
3478  llvm::GlobalVariable *DeviceImages =
3479  DeviceImagesEntries.finishAndCreateGlobal(".omp_offloading.device_images",
3480  CGM.getPointerAlign(),
3481  /*isConstant=*/true);
3482  DeviceImages->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3483 
3484  // This is a Zero array to be used in the creation of the constant expressions
3485  llvm::Constant *Index[] = {llvm::Constant::getNullValue(CGM.Int32Ty),
3486  llvm::Constant::getNullValue(CGM.Int32Ty)};
3487 
3488  // Create the target region descriptor.
3489  auto *BinaryDescriptorTy = cast<llvm::StructType>(
3491  ConstantInitBuilder DescBuilder(CGM);
3492  auto DescInit = DescBuilder.beginStruct(BinaryDescriptorTy);
3493  DescInit.addInt(CGM.Int32Ty, Devices.size());
3494  DescInit.add(llvm::ConstantExpr::getGetElementPtr(DeviceImages->getValueType(),
3495  DeviceImages,
3496  Index));
3497  DescInit.add(HostEntriesBegin);
3498  DescInit.add(HostEntriesEnd);
3499 
3500  auto *Desc = DescInit.finishAndCreateGlobal(".omp_offloading.descriptor",
3501  CGM.getPointerAlign(),
3502  /*isConstant=*/true);
3503 
3504  // Emit code to register or unregister the descriptor at execution
3505  // startup or closing, respectively.
3506 
3507  // Create a variable to drive the registration and unregistration of the
3508  // descriptor, so we can reuse the logic that emits Ctors and Dtors.
3509  auto *IdentInfo = &C.Idents.get(".omp_offloading.reg_unreg_var");
3510  ImplicitParamDecl RegUnregVar(C, C.getTranslationUnitDecl(), SourceLocation(),
3511  IdentInfo, C.CharTy, ImplicitParamDecl::Other);
3512 
3514  CGM, ".omp_offloading.descriptor_unreg",
3515  [&](CodeGenFunction &CGF, PrePostActionTy &) {
3517  Desc);
3518  });
3520  CGM, ".omp_offloading.descriptor_reg",
3521  [&](CodeGenFunction &CGF, PrePostActionTy &) {
3523  Desc);
3524  CGM.getCXXABI().registerGlobalDtor(CGF, RegUnregVar, UnRegFn, Desc);
3525  });
3526  if (CGM.supportsCOMDAT()) {
3527  // It is sufficient to call registration function only once, so create a
3528  // COMDAT group for registration/unregistration functions and associated
3529  // data. That would reduce startup time and code size. Registration
3530  // function serves as a COMDAT group key.
3531  auto ComdatKey = M.getOrInsertComdat(RegFn->getName());
3532  RegFn->setLinkage(llvm::GlobalValue::LinkOnceAnyLinkage);
3533  RegFn->setVisibility(llvm::GlobalValue::HiddenVisibility);
3534  RegFn->setComdat(ComdatKey);
3535  UnRegFn->setComdat(ComdatKey);
3536  DeviceImages->setComdat(ComdatKey);
3537  Desc->setComdat(ComdatKey);
3538  }
3539  return RegFn;
3540 }
3541 
3542 void CGOpenMPRuntime::createOffloadEntry(llvm::Constant *ID,
3543  llvm::Constant *Addr, uint64_t Size,
3544  int32_t Flags) {
3545  StringRef Name = Addr->getName();
3546  auto *TgtOffloadEntryType = cast<llvm::StructType>(
3548  llvm::LLVMContext &C = CGM.getModule().getContext();
3549  llvm::Module &M = CGM.getModule();
3550 
3551  // Make sure the address has the right type.
3552  llvm::Constant *AddrPtr = llvm::ConstantExpr::getBitCast(ID, CGM.VoidPtrTy);
3553 
3554  // Create constant string with the name.
3555  llvm::Constant *StrPtrInit = llvm::ConstantDataArray::getString(C, Name);
3556 
3557  llvm::GlobalVariable *Str =
3558  new llvm::GlobalVariable(M, StrPtrInit->getType(), /*isConstant=*/true,
3560  ".omp_offloading.entry_name");
3561  Str->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3562  llvm::Constant *StrPtr = llvm::ConstantExpr::getBitCast(Str, CGM.Int8PtrTy);
3563 
3564  // We can't have any padding between symbols, so we need to have 1-byte
3565  // alignment.
3566  auto Align = CharUnits::fromQuantity(1);
3567 
3568  // Create the entry struct.
3569  ConstantInitBuilder EntryBuilder(CGM);
3570  auto EntryInit = EntryBuilder.beginStruct(TgtOffloadEntryType);
3571  EntryInit.add(AddrPtr);
3572  EntryInit.add(StrPtr);
3573  EntryInit.addInt(CGM.SizeTy, Size);
3574  EntryInit.addInt(CGM.Int32Ty, Flags);
3575  EntryInit.addInt(CGM.Int32Ty, 0);
3576  llvm::GlobalVariable *Entry = EntryInit.finishAndCreateGlobal(
3577  Twine(".omp_offloading.entry.") + Name, Align,
3578  /*constant*/ true, llvm::GlobalValue::ExternalLinkage);
3579 
3580  // The entry has to be created in the section the linker expects it to be.
3581  Entry->setSection(".omp_offloading.entries");
3582 }
3583 
3585  // Emit the offloading entries and metadata so that the device codegen side
3586  // can easily figure out what to emit. The produced metadata looks like
3587  // this:
3588  //
3589  // !omp_offload.info = !{!1, ...}
3590  //
3591  // Right now we only generate metadata for function that contain target
3592  // regions.
3593 
3594  // If we do not have entries, we dont need to do anything.
3596  return;
3597 
3598  llvm::Module &M = CGM.getModule();
3599  llvm::LLVMContext &C = M.getContext();
3601  OrderedEntries(OffloadEntriesInfoManager.size());
3602 
3603  // Create the offloading info metadata node.
3604  llvm::NamedMDNode *MD = M.getOrInsertNamedMetadata("omp_offload.info");
3605 
3606  // Auxiliary methods to create metadata values and strings.
3607  auto getMDInt = [&](unsigned v) {
3608  return llvm::ConstantAsMetadata::get(
3609  llvm::ConstantInt::get(llvm::Type::getInt32Ty(C), v));
3610  };
3611 
3612  auto getMDString = [&](StringRef v) { return llvm::MDString::get(C, v); };
3613 
3614  // Create function that emits metadata for each target region entry;
3615  auto &&TargetRegionMetadataEmitter = [&](
3616  unsigned DeviceID, unsigned FileID, StringRef ParentName, unsigned Line,
3619  // Generate metadata for target regions. Each entry of this metadata
3620  // contains:
3621  // - Entry 0 -> Kind of this type of metadata (0).
3622  // - Entry 1 -> Device ID of the file where the entry was identified.
3623  // - Entry 2 -> File ID of the file where the entry was identified.
3624  // - Entry 3 -> Mangled name of the function where the entry was identified.
3625  // - Entry 4 -> Line in the file where the entry was identified.
3626  // - Entry 5 -> Order the entry was created.
3627  // The first element of the metadata node is the kind.
3628  Ops.push_back(getMDInt(E.getKind()));
3629  Ops.push_back(getMDInt(DeviceID));
3630  Ops.push_back(getMDInt(FileID));
3631  Ops.push_back(getMDString(ParentName));
3632  Ops.push_back(getMDInt(Line));
3633  Ops.push_back(getMDInt(E.getOrder()));
3634 
3635  // Save this entry in the right position of the ordered entries array.
3636  OrderedEntries[E.getOrder()] = &E;
3637 
3638  // Add metadata to the named metadata node.
3639  MD->addOperand(llvm::MDNode::get(C, Ops));
3640  };
3641 
3643  TargetRegionMetadataEmitter);
3644 
3645  for (auto *E : OrderedEntries) {
3646  assert(E && "All ordered entries must exist!");
3647  if (auto *CE =
3648  dyn_cast<OffloadEntriesInfoManagerTy::OffloadEntryInfoTargetRegion>(
3649  E)) {
3650  assert(CE->getID() && CE->getAddress() &&
3651  "Entry ID and Addr are invalid!");
3652  createOffloadEntry(CE->getID(), CE->getAddress(), /*Size=*/0);
3653  } else
3654  llvm_unreachable("Unsupported entry kind.");
3655  }
3656 }
3657 
3658 /// \brief Loads all the offload entries information from the host IR
3659 /// metadata.
3661  // If we are in target mode, load the metadata from the host IR. This code has
3662  // to match the metadaata creation in createOffloadEntriesAndInfoMetadata().
3663 
3664  if (!CGM.getLangOpts().OpenMPIsDevice)
3665  return;
3666 
3667  if (CGM.getLangOpts().OMPHostIRFile.empty())
3668  return;
3669 
3670  auto Buf = llvm::MemoryBuffer::getFile(CGM.getLangOpts().OMPHostIRFile);
3671  if (Buf.getError())
3672  return;
3673 
3674  llvm::LLVMContext C;
3675  auto ME = expectedToErrorOrAndEmitErrors(
3676  C, llvm::parseBitcodeFile(Buf.get()->getMemBufferRef(), C));
3677 
3678  if (ME.getError())
3679  return;
3680 
3681  llvm::NamedMDNode *MD = ME.get()->getNamedMetadata("omp_offload.info");
3682  if (!MD)
3683  return;
3684 
3685  for (auto I : MD->operands()) {
3686  llvm::MDNode *MN = cast<llvm::MDNode>(I);
3687 
3688  auto getMDInt = [&](unsigned Idx) {
3689  llvm::ConstantAsMetadata *V =
3690  cast<llvm::ConstantAsMetadata>(MN->getOperand(Idx));
3691  return cast<llvm::ConstantInt>(V->getValue())->getZExtValue();
3692  };
3693 
3694  auto getMDString = [&](unsigned Idx) {
3695  llvm::MDString *V = cast<llvm::MDString>(MN->getOperand(Idx));
3696  return V->getString();
3697  };
3698 
3699  switch (getMDInt(0)) {
3700  default:
3701  llvm_unreachable("Unexpected metadata!");
3702  break;
3706  /*DeviceID=*/getMDInt(1), /*FileID=*/getMDInt(2),
3707  /*ParentName=*/getMDString(3), /*Line=*/getMDInt(4),
3708  /*Order=*/getMDInt(5));
3709  break;
3710  }
3711  }
3712 }
3713 
3715  if (!KmpRoutineEntryPtrTy) {
3716  // Build typedef kmp_int32 (* kmp_routine_entry_t)(kmp_int32, void *); type.
3717  auto &C = CGM.getContext();
3718  QualType KmpRoutineEntryTyArgs[] = {KmpInt32Ty, C.VoidPtrTy};
3720  KmpRoutineEntryPtrQTy = C.getPointerType(
3721  C.getFunctionType(KmpInt32Ty, KmpRoutineEntryTyArgs, EPI));
3722  KmpRoutineEntryPtrTy = CGM.getTypes().ConvertType(KmpRoutineEntryPtrQTy);
3723  }
3724 }
3725 
3727  QualType FieldTy) {
3728  auto *Field = FieldDecl::Create(
3729  C, DC, SourceLocation(), SourceLocation(), /*Id=*/nullptr, FieldTy,
3731  /*BW=*/nullptr, /*Mutable=*/false, /*InitStyle=*/ICIS_NoInit);
3732  Field->setAccess(AS_public);
3733  DC->addDecl(Field);
3734  return Field;
3735 }
3736 
3738 
3739  // Make sure the type of the entry is already created. This is the type we
3740  // have to create:
3741  // struct __tgt_offload_entry{
3742  // void *addr; // Pointer to the offload entry info.
3743  // // (function or global)
3744  // char *name; // Name of the function or global.
3745  // size_t size; // Size of the entry info (0 if it a function).
3746  // int32_t flags; // Flags associated with the entry, e.g. 'link'.
3747  // int32_t reserved; // Reserved, to use by the runtime library.
3748  // };
3749  if (TgtOffloadEntryQTy.isNull()) {
3750  ASTContext &C = CGM.getContext();
3751  auto *RD = C.buildImplicitRecord("__tgt_offload_entry");
3752  RD->startDefinition();
3753  addFieldToRecordDecl(C, RD, C.VoidPtrTy);
3755  addFieldToRecordDecl(C, RD, C.getSizeType());
3757  C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/true));
3759  C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/true));
3760  RD->completeDefinition();
3761  RD->addAttr(PackedAttr::CreateImplicit(C));
3763  }
3764  return TgtOffloadEntryQTy;
3765 }
3766 
3768  // These are the types we need to build:
3769  // struct __tgt_device_image{
3770  // void *ImageStart; // Pointer to the target code start.
3771  // void *ImageEnd; // Pointer to the target code end.
3772  // // We also add the host entries to the device image, as it may be useful
3773  // // for the target runtime to have access to that information.
3774  // __tgt_offload_entry *EntriesBegin; // Begin of the table with all
3775  // // the entries.
3776  // __tgt_offload_entry *EntriesEnd; // End of the table with all the
3777  // // entries (non inclusive).
3778  // };
3779  if (TgtDeviceImageQTy.isNull()) {
3780  ASTContext &C = CGM.getContext();
3781  auto *RD = C.buildImplicitRecord("__tgt_device_image");
3782  RD->startDefinition();
3783  addFieldToRecordDecl(C, RD, C.VoidPtrTy);
3784  addFieldToRecordDecl(C, RD, C.VoidPtrTy);
3787  RD->completeDefinition();
3789  }
3790  return TgtDeviceImageQTy;
3791 }
3792 
3794  // struct __tgt_bin_desc{
3795  // int32_t NumDevices; // Number of devices supported.
3796  // __tgt_device_image *DeviceImages; // Arrays of device images
3797  // // (one per device).
3798  // __tgt_offload_entry *EntriesBegin; // Begin of the table with all the
3799  // // entries.
3800  // __tgt_offload_entry *EntriesEnd; // End of the table with all the
3801  // // entries (non inclusive).
3802  // };
3804  ASTContext &C = CGM.getContext();
3805  auto *RD = C.buildImplicitRecord("__tgt_bin_desc");
3806  RD->startDefinition();
3808  C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/true));
3812  RD->completeDefinition();
3814  }
3815  return TgtBinaryDescriptorQTy;
3816 }
3817 
3818 namespace {
3819 struct PrivateHelpersTy {
3820  PrivateHelpersTy(const VarDecl *Original, const VarDecl *PrivateCopy,
3821  const VarDecl *PrivateElemInit)
3822  : Original(Original), PrivateCopy(PrivateCopy),
3823  PrivateElemInit(PrivateElemInit) {}
3824  const VarDecl *Original;
3825  const VarDecl *PrivateCopy;
3826  const VarDecl *PrivateElemInit;
3827 };
3828 typedef std::pair<CharUnits /*Align*/, PrivateHelpersTy> PrivateDataTy;
3829 } // anonymous namespace
3830 
3831 static RecordDecl *
3833  if (!Privates.empty()) {
3834  auto &C = CGM.getContext();
3835  // Build struct .kmp_privates_t. {
3836  // /* private vars */
3837  // };
3838  auto *RD = C.buildImplicitRecord(".kmp_privates.t");
3839  RD->startDefinition();
3840  for (auto &&Pair : Privates) {
3841  auto *VD = Pair.second.Original;
3842  auto Type = VD->getType();
3843  Type = Type.getNonReferenceType();
3844  auto *FD = addFieldToRecordDecl(C, RD, Type);
3845  if (VD->hasAttrs()) {
3846  for (specific_attr_iterator<AlignedAttr> I(VD->getAttrs().begin()),
3847  E(VD->getAttrs().end());
3848  I != E; ++I)
3849  FD->addAttr(*I);
3850  }
3851  }
3852  RD->completeDefinition();
3853  return RD;
3854  }
3855  return nullptr;
3856 }
3857 
3858 static RecordDecl *
3860  QualType KmpInt32Ty,
3861  QualType KmpRoutineEntryPointerQTy) {
3862  auto &C = CGM.getContext();
3863  // Build struct kmp_task_t {
3864  // void * shareds;
3865  // kmp_routine_entry_t routine;
3866  // kmp_int32 part_id;
3867  // kmp_cmplrdata_t data1;
3868  // kmp_cmplrdata_t data2;
3869  // For taskloops additional fields:
3870  // kmp_uint64 lb;
3871  // kmp_uint64 ub;
3872  // kmp_int64 st;
3873  // kmp_int32 liter;
3874  // void * reductions;
3875  // };
3876  auto *UD = C.buildImplicitRecord("kmp_cmplrdata_t", TTK_Union);
3877  UD->startDefinition();
3878  addFieldToRecordDecl(C, UD, KmpInt32Ty);
3879  addFieldToRecordDecl(C, UD, KmpRoutineEntryPointerQTy);
3880  UD->completeDefinition();
3881  QualType KmpCmplrdataTy = C.getRecordType(UD);
3882  auto *RD = C.buildImplicitRecord("kmp_task_t");
3883  RD->startDefinition();
3884  addFieldToRecordDecl(C, RD, C.VoidPtrTy);
3885  addFieldToRecordDecl(C, RD, KmpRoutineEntryPointerQTy);
3886  addFieldToRecordDecl(C, RD, KmpInt32Ty);
3887  addFieldToRecordDecl(C, RD, KmpCmplrdataTy);
3888  addFieldToRecordDecl(C, RD, KmpCmplrdataTy);
3889  if (isOpenMPTaskLoopDirective(Kind)) {
3890  QualType KmpUInt64Ty =
3891  CGM.getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0);
3892  QualType KmpInt64Ty =
3893  CGM.getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1);
3894  addFieldToRecordDecl(C, RD, KmpUInt64Ty);
3895  addFieldToRecordDecl(C, RD, KmpUInt64Ty);
3896  addFieldToRecordDecl(C, RD, KmpInt64Ty);
3897  addFieldToRecordDecl(C, RD, KmpInt32Ty);
3898  addFieldToRecordDecl(C, RD, C.VoidPtrTy);
3899  }
3900  RD->completeDefinition();
3901  return RD;
3902 }
3903 
3904 static RecordDecl *
3906  ArrayRef<PrivateDataTy> Privates) {
3907  auto &C = CGM.getContext();
3908  // Build struct kmp_task_t_with_privates {
3909  // kmp_task_t task_data;
3910  // .kmp_privates_t. privates;
3911  // };
3912  auto *RD = C.buildImplicitRecord("kmp_task_t_with_privates");
3913  RD->startDefinition();
3914  addFieldToRecordDecl(C, RD, KmpTaskTQTy);
3915  if (auto *PrivateRD = createPrivatesRecordDecl(CGM, Privates)) {
3916  addFieldToRecordDecl(C, RD, C.getRecordType(PrivateRD));
3917  }
3918  RD->completeDefinition();
3919  return RD;
3920 }
3921 
3922 /// \brief Emit a proxy function which accepts kmp_task_t as the second
3923 /// argument.
3924 /// \code
3925 /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
3926 /// TaskFunction(gtid, tt->part_id, &tt->privates, task_privates_map, tt,
3927 /// For taskloops:
3928 /// tt->task_data.lb, tt->task_data.ub, tt->task_data.st, tt->task_data.liter,
3929 /// tt->reductions, tt->shareds);
3930 /// return 0;
3931 /// }
3932 /// \endcode
3933 static llvm::Value *
3935  OpenMPDirectiveKind Kind, QualType KmpInt32Ty,
3936  QualType KmpTaskTWithPrivatesPtrQTy,
3937  QualType KmpTaskTWithPrivatesQTy, QualType KmpTaskTQTy,
3938  QualType SharedsPtrTy, llvm::Value *TaskFunction,
3939  llvm::Value *TaskPrivatesMap) {
3940  auto &C = CGM.getContext();
3941  FunctionArgList Args;
3942  ImplicitParamDecl GtidArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, KmpInt32Ty,
3944  ImplicitParamDecl TaskTypeArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3945  KmpTaskTWithPrivatesPtrQTy.withRestrict(),
3947  Args.push_back(&GtidArg);
3948  Args.push_back(&TaskTypeArg);
3949  auto &TaskEntryFnInfo =
3950  CGM.getTypes().arrangeBuiltinFunctionDeclaration(KmpInt32Ty, Args);
3951  auto *TaskEntryTy = CGM.getTypes().GetFunctionType(TaskEntryFnInfo);
3952  auto *TaskEntry =
3954  ".omp_task_entry.", &CGM.getModule());
3955  CGM.SetInternalFunctionAttributes(/*D=*/nullptr, TaskEntry, TaskEntryFnInfo);
3956  CodeGenFunction CGF(CGM);
3957  CGF.StartFunction(GlobalDecl(), KmpInt32Ty, TaskEntry, TaskEntryFnInfo, Args,
3958  Loc, Loc);
3959 
3960  // TaskFunction(gtid, tt->task_data.part_id, &tt->privates, task_privates_map,
3961  // tt,
3962  // For taskloops:
3963  // tt->task_data.lb, tt->task_data.ub, tt->task_data.st, tt->task_data.liter,
3964  // tt->task_data.shareds);
3965  auto *GtidParam = CGF.EmitLoadOfScalar(
3966  CGF.GetAddrOfLocalVar(&GtidArg), /*Volatile=*/false, KmpInt32Ty, Loc);
3967  LValue TDBase = CGF.EmitLoadOfPointerLValue(
3968  CGF.GetAddrOfLocalVar(&TaskTypeArg),
3969  KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>());
3970  auto *KmpTaskTWithPrivatesQTyRD =
3971  cast<RecordDecl>(KmpTaskTWithPrivatesQTy->getAsTagDecl());
3972  LValue Base =
3973  CGF.EmitLValueForField(TDBase, *KmpTaskTWithPrivatesQTyRD->field_begin());
3974  auto *KmpTaskTQTyRD = cast<RecordDecl>(KmpTaskTQTy->getAsTagDecl());
3975  auto PartIdFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTPartId);
3976  auto PartIdLVal = CGF.EmitLValueForField(Base, *PartIdFI);
3977  auto *PartidParam = PartIdLVal.getPointer();
3978 
3979  auto SharedsFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTShareds);
3980  auto SharedsLVal = CGF.EmitLValueForField(Base, *SharedsFI);
3981  auto *SharedsParam = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3982  CGF.EmitLoadOfScalar(SharedsLVal, Loc),
3983  CGF.ConvertTypeForMem(SharedsPtrTy));
3984 
3985  auto PrivatesFI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin(), 1);
3986  llvm::Value *PrivatesParam;
3987  if (PrivatesFI != KmpTaskTWithPrivatesQTyRD->field_end()) {
3988  auto PrivatesLVal = CGF.EmitLValueForField(TDBase, *PrivatesFI);
3989  PrivatesParam = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3990  PrivatesLVal.getPointer(), CGF.VoidPtrTy);
3991  } else
3992  PrivatesParam = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
3993 
3994  llvm::Value *CommonArgs[] = {GtidParam, PartidParam, PrivatesParam,
3995  TaskPrivatesMap,
3996  CGF.Builder
3998  TDBase.getAddress(), CGF.VoidPtrTy)
3999  .getPointer()};
4000  SmallVector<llvm::Value *, 16> CallArgs(std::begin(CommonArgs),
4001  std::end(CommonArgs));
4002  if (isOpenMPTaskLoopDirective(Kind)) {
4003  auto LBFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTLowerBound);
4004  auto LBLVal = CGF.EmitLValueForField(Base, *LBFI);
4005  auto *LBParam = CGF.EmitLoadOfScalar(LBLVal, Loc);
4006  auto UBFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTUpperBound);
4007  auto UBLVal = CGF.EmitLValueForField(Base, *UBFI);
4008  auto *UBParam = CGF.EmitLoadOfScalar(UBLVal, Loc);
4009  auto StFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTStride);
4010  auto StLVal = CGF.EmitLValueForField(Base, *StFI);
4011  auto *StParam = CGF.EmitLoadOfScalar(StLVal, Loc);
4012  auto LIFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTLastIter);
4013  auto LILVal = CGF.EmitLValueForField(Base, *LIFI);
4014  auto *LIParam = CGF.EmitLoadOfScalar(LILVal, Loc);
4015  auto RFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTReductions);
4016  auto RLVal = CGF.EmitLValueForField(Base, *RFI);
4017  auto *RParam = CGF.EmitLoadOfScalar(RLVal, Loc);
4018  CallArgs.push_back(LBParam);
4019  CallArgs.push_back(UBParam);
4020  CallArgs.push_back(StParam);
4021  CallArgs.push_back(LIParam);
4022  CallArgs.push_back(RParam);
4023  }
4024  CallArgs.push_back(SharedsParam);
4025 
4026  CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, Loc, TaskFunction,
4027  CallArgs);
4029  RValue::get(CGF.Builder.getInt32(/*C=*/0)),
4030  CGF.MakeAddrLValue(CGF.ReturnValue, KmpInt32Ty));
4031  CGF.FinishFunction();
4032  return TaskEntry;
4033 }
4034 
4036  SourceLocation Loc,
4037  QualType KmpInt32Ty,
4038  QualType KmpTaskTWithPrivatesPtrQTy,
4039  QualType KmpTaskTWithPrivatesQTy) {
4040  auto &C = CGM.getContext();
4041  FunctionArgList Args;
4042  ImplicitParamDecl GtidArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, KmpInt32Ty,
4044  ImplicitParamDecl TaskTypeArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
4045  KmpTaskTWithPrivatesPtrQTy.withRestrict(),
4047  Args.push_back(&GtidArg);
4048  Args.push_back(&TaskTypeArg);
4049  auto &DestructorFnInfo =
4050  CGM.getTypes().arrangeBuiltinFunctionDeclaration(KmpInt32Ty, Args);
4051  auto *DestructorFnTy = CGM.getTypes().GetFunctionType(DestructorFnInfo);
4052  auto *DestructorFn =
4054  ".omp_task_destructor.", &CGM.getModule());
4055  CGM.SetInternalFunctionAttributes(/*D=*/nullptr, DestructorFn,
4056  DestructorFnInfo);
4057  CodeGenFunction CGF(CGM);
4058  CGF.StartFunction(GlobalDecl(), KmpInt32Ty, DestructorFn, DestructorFnInfo,
4059  Args, Loc, Loc);
4060 
4062  CGF.GetAddrOfLocalVar(&TaskTypeArg),
4063  KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>());
4064  auto *KmpTaskTWithPrivatesQTyRD =
4065  cast<RecordDecl>(KmpTaskTWithPrivatesQTy->getAsTagDecl());
4066  auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin());
4067  Base = CGF.EmitLValueForField(Base, *FI);
4068  for (auto *Field :
4069  cast<RecordDecl>(FI->getType()->getAsTagDecl())->fields()) {
4070  if (auto DtorKind = Field->getType().isDestructedType()) {
4071  auto FieldLValue = CGF.EmitLValueForField(Base, Field);
4072  CGF.pushDestroy(DtorKind, FieldLValue.getAddress(), Field->getType());
4073  }
4074  }
4075  CGF.FinishFunction();
4076  return DestructorFn;
4077 }
4078 
4079 /// \brief Emit a privates mapping function for correct handling of private and
4080 /// firstprivate variables.
4081 /// \code
4082 /// void .omp_task_privates_map.(const .privates. *noalias privs, <ty1>
4083 /// **noalias priv1,..., <tyn> **noalias privn) {
4084 /// *priv1 = &.privates.priv1;
4085 /// ...;
4086 /// *privn = &.privates.privn;
4087 /// }
4088 /// \endcode
4089 static llvm::Value *
4091  ArrayRef<const Expr *> PrivateVars,
4092  ArrayRef<const Expr *> FirstprivateVars,
4093  ArrayRef<const Expr *> LastprivateVars,
4094  QualType PrivatesQTy,
4095  ArrayRef<PrivateDataTy> Privates) {
4096  auto &C = CGM.getContext();
4097  FunctionArgList Args;
4098  ImplicitParamDecl TaskPrivatesArg(
4099  C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
4100  C.getPointerType(PrivatesQTy).withConst().withRestrict(),
4102  Args.push_back(&TaskPrivatesArg);
4103  llvm::DenseMap<const VarDecl *, unsigned> PrivateVarsPos;
4104  unsigned Counter = 1;
4105  for (auto *E: PrivateVars) {
4106  Args.push_back(ImplicitParamDecl::Create(
4107  C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
4108  C.getPointerType(C.getPointerType(E->getType()))
4109  .withConst()
4110  .withRestrict(),
4112  auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
4113  PrivateVarsPos[VD] = Counter;
4114  ++Counter;
4115  }
4116  for (auto *E : FirstprivateVars) {
4117  Args.push_back(ImplicitParamDecl::Create(
4118  C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
4119  C.getPointerType(C.getPointerType(E->getType()))
4120  .withConst()
4121  .withRestrict(),
4123  auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
4124  PrivateVarsPos[VD] = Counter;
4125  ++Counter;
4126  }
4127  for (auto *E: LastprivateVars) {
4128  Args.push_back(ImplicitParamDecl::Create(
4129  C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
4130  C.getPointerType(C.getPointerType(E->getType()))
4131  .withConst()
4132  .withRestrict(),
4134  auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
4135  PrivateVarsPos[VD] = Counter;
4136  ++Counter;
4137  }
4138  auto &TaskPrivatesMapFnInfo =
4139  CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
4140  auto *TaskPrivatesMapTy =
4141  CGM.getTypes().GetFunctionType(TaskPrivatesMapFnInfo);
4142  auto *TaskPrivatesMap = llvm::Function::Create(
4143  TaskPrivatesMapTy, llvm::GlobalValue::InternalLinkage,
4144  ".omp_task_privates_map.", &CGM.getModule());
4145  CGM.SetInternalFunctionAttributes(/*D=*/nullptr, TaskPrivatesMap,
4146  TaskPrivatesMapFnInfo);
4147  TaskPrivatesMap->removeFnAttr(llvm::Attribute::NoInline);
4148  TaskPrivatesMap->removeFnAttr(llvm::Attribute::OptimizeNone);
4149  TaskPrivatesMap->addFnAttr(llvm::Attribute::AlwaysInline);
4150  CodeGenFunction CGF(CGM);
4151  CGF.StartFunction(GlobalDecl(), C.VoidTy, TaskPrivatesMap,
4152  TaskPrivatesMapFnInfo, Args, Loc, Loc);
4153 
4154  // *privi = &.privates.privi;
4156  CGF.GetAddrOfLocalVar(&TaskPrivatesArg),
4157  TaskPrivatesArg.getType()->castAs<PointerType>());
4158  auto *PrivatesQTyRD = cast<RecordDecl>(PrivatesQTy->getAsTagDecl());
4159  Counter = 0;
4160  for (auto *Field : PrivatesQTyRD->fields()) {
4161  auto FieldLVal = CGF.EmitLValueForField(Base, Field);
4162  auto *VD = Args[PrivateVarsPos[Privates[Counter].second.Original]];
4163  auto RefLVal = CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(VD), VD->getType());
4164  auto RefLoadLVal = CGF.EmitLoadOfPointerLValue(
4165  RefLVal.getAddress(), RefLVal.getType()->castAs<PointerType>());
4166  CGF.EmitStoreOfScalar(FieldLVal.getPointer(), RefLoadLVal);
4167  ++Counter;
4168  }
4169  CGF.FinishFunction();
4170  return TaskPrivatesMap;
4171 }
4172 
4173 static bool stable_sort_comparator(const PrivateDataTy P1,
4174  const PrivateDataTy P2) {
4175  return P1.first > P2.first;
4176 }
4177 
4178 /// Emit initialization for private variables in task-based directives.
4180  const OMPExecutableDirective &D,
4181  Address KmpTaskSharedsPtr, LValue TDBase,
4182  const RecordDecl *KmpTaskTWithPrivatesQTyRD,
4183  QualType SharedsTy, QualType SharedsPtrTy,
4184  const OMPTaskDataTy &Data,
4185  ArrayRef<PrivateDataTy> Privates, bool ForDup) {
4186  auto &C = CGF.getContext();
4187  auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin());
4188  LValue PrivatesBase = CGF.EmitLValueForField(TDBase, *FI);
4190  ? OMPD_taskloop
4191  : OMPD_task;
4192  const CapturedStmt &CS = *D.getCapturedStmt(Kind);
4193  CodeGenFunction::CGCapturedStmtInfo CapturesInfo(CS);
4194  LValue SrcBase;
4195  bool IsTargetTask =
4198  // For target-based directives skip 3 firstprivate arrays BasePointersArray,
4199  // PointersArray and SizesArray. The original variables for these arrays are
4200  // not captured and we get their addresses explicitly.
4201  if ((!IsTargetTask && !Data.FirstprivateVars.empty()) ||
4202  (IsTargetTask && KmpTaskSharedsPtr.isValid())) {
4203  SrcBase = CGF.MakeAddrLValue(
4205  KmpTaskSharedsPtr, CGF.ConvertTypeForMem(SharedsPtrTy)),
4206  SharedsTy);
4207  }
4208  FI = cast<RecordDecl>(FI->getType()->getAsTagDecl())->field_begin();
4209  for (auto &&Pair : Privates) {
4210  auto *VD = Pair.second.PrivateCopy;
4211  auto *Init = VD->getAnyInitializer();
4212  if (Init && (!ForDup || (isa<CXXConstructExpr>(Init) &&
4213  !CGF.isTrivialInitializer(Init)))) {
4214  LValue PrivateLValue = CGF.EmitLValueForField(PrivatesBase, *FI);
4215  if (auto *Elem = Pair.second.PrivateElemInit) {
4216  auto *OriginalVD = Pair.second.Original;
4217  // Check if the variable is the target-based BasePointersArray,
4218  // PointersArray or SizesArray.
4219  LValue SharedRefLValue;
4220  QualType Type = OriginalVD->getType();
4221  auto *SharedField = CapturesInfo.lookup(OriginalVD);
4222  if (IsTargetTask && !SharedField) {
4223  assert(isa<ImplicitParamDecl>(OriginalVD) &&
4224  isa<CapturedDecl>(OriginalVD->getDeclContext()) &&
4225  cast<CapturedDecl>(OriginalVD->getDeclContext())
4226  ->getNumParams() == 0 &&
4227  isa<TranslationUnitDecl>(
4228  cast<CapturedDecl>(OriginalVD->getDeclContext())
4229  ->getDeclContext()) &&
4230  "Expected artificial target data variable.");
4231  SharedRefLValue =
4232  CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(OriginalVD), Type);
4233  } else {
4234  SharedRefLValue = CGF.EmitLValueForField(SrcBase, SharedField);
4235  SharedRefLValue = CGF.MakeAddrLValue(
4236  Address(SharedRefLValue.getPointer(), C.getDeclAlign(OriginalVD)),
4237  SharedRefLValue.getType(), LValueBaseInfo(AlignmentSource::Decl),
4238  SharedRefLValue.getTBAAInfo());
4239  }
4240  if (Type->isArrayType()) {
4241  // Initialize firstprivate array.
4242  if (!isa<CXXConstructExpr>(Init) || CGF.isTrivialInitializer(Init)) {
4243  // Perform simple memcpy.
4244  CGF.EmitAggregateAssign(PrivateLValue, SharedRefLValue, Type);
4245  } else {
4246  // Initialize firstprivate array using element-by-element
4247  // initialization.
4249  PrivateLValue.getAddress(), SharedRefLValue.getAddress(), Type,
4250  [&CGF, Elem, Init, &CapturesInfo](Address DestElement,
4251  Address SrcElement) {
4252  // Clean up any temporaries needed by the initialization.
4253  CodeGenFunction::OMPPrivateScope InitScope(CGF);
4254  InitScope.addPrivate(
4255  Elem, [SrcElement]() -> Address { return SrcElement; });
4256  (void)InitScope.Privatize();
4257  // Emit initialization for single element.
4259  CGF, &CapturesInfo);
4260  CGF.EmitAnyExprToMem(Init, DestElement,
4261  Init->getType().getQualifiers(),
4262  /*IsInitializer=*/false);
4263  });
4264  }
4265  } else {
4266  CodeGenFunction::OMPPrivateScope InitScope(CGF);
4267  InitScope.addPrivate(Elem, [SharedRefLValue]() -> Address {
4268  return SharedRefLValue.getAddress();
4269  });
4270  (void)InitScope.Privatize();
4271  CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CapturesInfo);
4272  CGF.EmitExprAsInit(Init, VD, PrivateLValue,
4273  /*capturedByInit=*/false);
4274  }
4275  } else
4276  CGF.EmitExprAsInit(Init, VD, PrivateLValue, /*capturedByInit=*/false);
4277  }
4278  ++FI;
4279  }
4280 }
4281 
4282 /// Check if duplication function is required for taskloops.
4284  ArrayRef<PrivateDataTy> Privates) {
4285  bool InitRequired = false;
4286  for (auto &&Pair : Privates) {
4287  auto *VD = Pair.second.PrivateCopy;
4288  auto *Init = VD->getAnyInitializer();
4289  InitRequired = InitRequired || (Init && isa<CXXConstructExpr>(Init) &&
4290  !CGF.isTrivialInitializer(Init));
4291  }
4292  return InitRequired;
4293 }
4294 
4295 
4296 /// Emit task_dup function (for initialization of
4297 /// private/firstprivate/lastprivate vars and last_iter flag)
4298 /// \code
4299 /// void __task_dup_entry(kmp_task_t *task_dst, const kmp_task_t *task_src, int
4300 /// lastpriv) {
4301 /// // setup lastprivate flag
4302 /// task_dst->last = lastpriv;
4303 /// // could be constructor calls here...
4304 /// }
4305 /// \endcode
4306 static llvm::Value *
4308  const OMPExecutableDirective &D,
4309  QualType KmpTaskTWithPrivatesPtrQTy,
4310  const RecordDecl *KmpTaskTWithPrivatesQTyRD,
4311  const RecordDecl *KmpTaskTQTyRD, QualType SharedsTy,
4312  QualType SharedsPtrTy, const OMPTaskDataTy &Data,
4313  ArrayRef<PrivateDataTy> Privates, bool WithLastIter) {
4314  auto &C = CGM.getContext();
4315  FunctionArgList Args;
4316  ImplicitParamDecl DstArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
4317  KmpTaskTWithPrivatesPtrQTy,
4319  ImplicitParamDecl SrcArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
4320  KmpTaskTWithPrivatesPtrQTy,
4322  ImplicitParamDecl LastprivArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
4324  Args.push_back(&DstArg);
4325  Args.push_back(&SrcArg);
4326  Args.push_back(&LastprivArg);
4327  auto &TaskDupFnInfo =
4328  CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
4329  auto *TaskDupTy = CGM.getTypes().GetFunctionType(TaskDupFnInfo);
4330  auto *TaskDup =
4332  ".omp_task_dup.", &CGM.getModule());
4333  CGM.SetInternalFunctionAttributes(/*D=*/nullptr, TaskDup, TaskDupFnInfo);
4334  CodeGenFunction CGF(CGM);
4335  CGF.StartFunction(GlobalDecl(), C.VoidTy, TaskDup, TaskDupFnInfo, Args, Loc,
4336  Loc);
4337 
4338  LValue TDBase = CGF.EmitLoadOfPointerLValue(
4339  CGF.GetAddrOfLocalVar(&DstArg),
4340  KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>());
4341  // task_dst->liter = lastpriv;
4342  if (WithLastIter) {
4343  auto LIFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTLastIter);
4345  TDBase, *KmpTaskTWithPrivatesQTyRD->field_begin());
4346  LValue LILVal = CGF.EmitLValueForField(Base, *LIFI);
4347  llvm::Value *Lastpriv = CGF.EmitLoadOfScalar(
4348  CGF.GetAddrOfLocalVar(&LastprivArg), /*Volatile=*/false, C.IntTy, Loc);
4349  CGF.EmitStoreOfScalar(Lastpriv, LILVal);
4350  }
4351 
4352  // Emit initial values for private copies (if any).
4353  assert(!Privates.empty());
4354  Address KmpTaskSharedsPtr = Address::invalid();
4355  if (!Data.FirstprivateVars.empty()) {
4356  LValue TDBase = CGF.EmitLoadOfPointerLValue(
4357  CGF.GetAddrOfLocalVar(&SrcArg),
4358  KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>());
4360  TDBase, *KmpTaskTWithPrivatesQTyRD->field_begin());
4361  KmpTaskSharedsPtr = Address(
4363  Base, *std::next(KmpTaskTQTyRD->field_begin(),
4364  KmpTaskTShareds)),
4365  Loc),
4366  CGF.getNaturalTypeAlignment(SharedsTy));
4367  }
4368  emitPrivatesInit(CGF, D, KmpTaskSharedsPtr, TDBase, KmpTaskTWithPrivatesQTyRD,
4369  SharedsTy, SharedsPtrTy, Data, Privates, /*ForDup=*/true);
4370  CGF.FinishFunction();
4371  return TaskDup;
4372 }
4373 
4374 /// Checks if destructor function is required to be generated.
4375 /// \return true if cleanups are required, false otherwise.
4376 static bool
4377 checkDestructorsRequired(const RecordDecl *KmpTaskTWithPrivatesQTyRD) {
4378  bool NeedsCleanup = false;
4379  auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin());
4380  auto *PrivateRD = cast<RecordDecl>(FI->getType()->getAsTagDecl());
4381  for (auto *FD : PrivateRD->fields()) {
4382  NeedsCleanup = NeedsCleanup || FD->getType().isDestructedType();
4383  if (NeedsCleanup)
4384  break;
4385  }
4386  return NeedsCleanup;
4387 }
4388 
4389 CGOpenMPRuntime::TaskResultTy
4391  const OMPExecutableDirective &D,
4392  llvm::Value *TaskFunction, QualType SharedsTy,
4393  Address Shareds, const OMPTaskDataTy &Data) {
4394  auto &C = CGM.getContext();
4396  // Aggregate privates and sort them by the alignment.
4397  auto I = Data.PrivateCopies.begin();
4398  for (auto *E : Data.PrivateVars) {
4399  auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
4400  Privates.push_back(std::make_pair(
4401  C.getDeclAlign(VD),
4402  PrivateHelpersTy(VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()),
4403  /*PrivateElemInit=*/nullptr)));
4404  ++I;
4405  }
4406  I = Data.FirstprivateCopies.begin();
4407  auto IElemInitRef = Data.FirstprivateInits.begin();
4408  for (auto *E : Data.FirstprivateVars) {
4409  auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
4410  Privates.push_back(std::make_pair(
4411  C.getDeclAlign(VD),
4412  PrivateHelpersTy(
4413  VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()),
4414  cast<VarDecl>(cast<DeclRefExpr>(*IElemInitRef)->getDecl()))));
4415  ++I;
4416  ++IElemInitRef;
4417  }
4418  I = Data.LastprivateCopies.begin();
4419  for (auto *E : Data.LastprivateVars) {
4420  auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
4421  Privates.push_back(std::make_pair(
4422  C.getDeclAlign(VD),
4423  PrivateHelpersTy(VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()),
4424  /*PrivateElemInit=*/nullptr)));
4425  ++I;
4426  }
4427  std::stable_sort(Privates.begin(), Privates.end(), stable_sort_comparator);
4428  auto KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
4429  // Build type kmp_routine_entry_t (if not built yet).
4430  emitKmpRoutineEntryT(KmpInt32Ty);
4431  // Build type kmp_task_t (if not built yet).
4433  if (SavedKmpTaskloopTQTy.isNull()) {
4435  CGM, D.getDirectiveKind(), KmpInt32Ty, KmpRoutineEntryPtrQTy));
4436  }
4438  } else {
4439  assert((D.getDirectiveKind() == OMPD_task ||
4442  "Expected taskloop, task or target directive");
4443  if (SavedKmpTaskTQTy.isNull()) {
4444  SavedKmpTaskTQTy = C.getRecordType(createKmpTaskTRecordDecl(
4445  CGM, D.getDirectiveKind(), KmpInt32Ty, KmpRoutineEntryPtrQTy));
4446  }
4448  }
4449  auto *KmpTaskTQTyRD = cast<RecordDecl>(KmpTaskTQTy->getAsTagDecl());
4450  // Build particular struct kmp_task_t for the given task.
4451  auto *KmpTaskTWithPrivatesQTyRD =
4453  auto KmpTaskTWithPrivatesQTy = C.getRecordType(KmpTaskTWithPrivatesQTyRD);
4454  QualType KmpTaskTWithPrivatesPtrQTy =
4455  C.getPointerType(KmpTaskTWithPrivatesQTy);
4456  auto *KmpTaskTWithPrivatesTy = CGF.ConvertType(KmpTaskTWithPrivatesQTy);
4457  auto *KmpTaskTWithPrivatesPtrTy = KmpTaskTWithPrivatesTy->getPointerTo();
4458  auto *KmpTaskTWithPrivatesTySize = CGF.getTypeSize(KmpTaskTWithPrivatesQTy);
4459  QualType SharedsPtrTy = C.getPointerType(SharedsTy);
4460 
4461  // Emit initial values for private copies (if any).
4462  llvm::Value *TaskPrivatesMap = nullptr;
4463  auto *TaskPrivatesMapTy =
4464  std::next(cast<llvm::Function>(TaskFunction)->arg_begin(), 3)->getType();
4465  if (!Privates.empty()) {
4466  auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin());
4467  TaskPrivatesMap = emitTaskPrivateMappingFunction(
4468  CGM, Loc, Data.PrivateVars, Data.FirstprivateVars, Data.LastprivateVars,
4469  FI->getType(), Privates);
4470  TaskPrivatesMap = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4471  TaskPrivatesMap, TaskPrivatesMapTy);
4472  } else {
4473  TaskPrivatesMap = llvm::ConstantPointerNull::get(
4474  cast<llvm::PointerType>(TaskPrivatesMapTy));
4475  }
4476  // Build a proxy function kmp_int32 .omp_task_entry.(kmp_int32 gtid,
4477  // kmp_task_t *tt);
4478  auto *TaskEntry = emitProxyTaskFunction(
4479  CGM, Loc, D.getDirectiveKind(), KmpInt32Ty, KmpTaskTWithPrivatesPtrQTy,
4480  KmpTaskTWithPrivatesQTy, KmpTaskTQTy, SharedsPtrTy, TaskFunction,
4481  TaskPrivatesMap);
4482 
4483  // Build call kmp_task_t * __kmpc_omp_task_alloc(ident_t *, kmp_int32 gtid,
4484  // kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
4485  // kmp_routine_entry_t *task_entry);
4486  // Task flags. Format is taken from
4487  // http://llvm.org/svn/llvm-project/openmp/trunk/runtime/src/kmp.h,
4488  // description of kmp_tasking_flags struct.
4489  enum {
4490  TiedFlag = 0x1,
4491  FinalFlag = 0x2,
4492  DestructorsFlag = 0x8,
4493  PriorityFlag = 0x20
4494  };
4495  unsigned Flags = Data.Tied ? TiedFlag : 0;
4496  bool NeedsCleanup = false;
4497  if (!Privates.empty()) {
4498  NeedsCleanup = checkDestructorsRequired(KmpTaskTWithPrivatesQTyRD);
4499  if (NeedsCleanup)
4500  Flags = Flags | DestructorsFlag;
4501  }
4502  if (Data.Priority.getInt())
4503  Flags = Flags | PriorityFlag;
4504  auto *TaskFlags =
4505  Data.Final.getPointer()
4506  ? CGF.Builder.CreateSelect(Data.Final.getPointer(),
4507  CGF.Builder.getInt32(FinalFlag),
4508  CGF.Builder.getInt32(/*C=*/0))
4509  : CGF.Builder.getInt32(Data.Final.getInt() ? FinalFlag : 0);
4510  TaskFlags = CGF.Builder.CreateOr(TaskFlags, CGF.Builder.getInt32(Flags));
4511  auto *SharedsSize = CGM.getSize(C.getTypeSizeInChars(SharedsTy));
4512  llvm::Value *AllocArgs[] = {emitUpdateLocation(CGF, Loc),
4513  getThreadID(CGF, Loc), TaskFlags,
4514  KmpTaskTWithPrivatesTySize, SharedsSize,
4516  TaskEntry, KmpRoutineEntryPtrTy)};
4517  auto *NewTask = CGF.EmitRuntimeCall(
4519  auto *NewTaskNewTaskTTy = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4520  NewTask, KmpTaskTWithPrivatesPtrTy);
4521  LValue Base = CGF.MakeNaturalAlignAddrLValue(NewTaskNewTaskTTy,
4522  KmpTaskTWithPrivatesQTy);
4523  LValue TDBase =
4524  CGF.EmitLValueForField(Base, *KmpTaskTWithPrivatesQTyRD->field_begin());
4525  // Fill the data in the resulting kmp_task_t record.
4526  // Copy shareds if there are any.
4527  Address KmpTaskSharedsPtr = Address::invalid();
4528  if (!SharedsTy->getAsStructureType()->getDecl()->field_empty()) {
4529  KmpTaskSharedsPtr =
4531  CGF.EmitLValueForField(
4532  TDBase, *std::next(KmpTaskTQTyRD->field_begin(),
4533  KmpTaskTShareds)),
4534  Loc),
4535  CGF.getNaturalTypeAlignment(SharedsTy));
4536  LValue Dest = CGF.MakeAddrLValue(KmpTaskSharedsPtr, SharedsTy);
4537  LValue Src = CGF.MakeAddrLValue(Shareds, SharedsTy);
4538  CGF.EmitAggregateCopy(Dest, Src, SharedsTy);
4539  }
4540  // Emit initial values for private copies (if any).
4541  TaskResultTy Result;
4542  if (!Privates.empty()) {
4543  emitPrivatesInit(CGF, D, KmpTaskSharedsPtr, Base, KmpTaskTWithPrivatesQTyRD,
4544  SharedsTy, SharedsPtrTy, Data, Privates,
4545  /*ForDup=*/false);
4547  (!Data.LastprivateVars.empty() || checkInitIsRequired(CGF, Privates))) {
4548  Result.TaskDupFn = emitTaskDupFunction(
4549  CGM, Loc, D, KmpTaskTWithPrivatesPtrQTy, KmpTaskTWithPrivatesQTyRD,
4550  KmpTaskTQTyRD, SharedsTy, SharedsPtrTy, Data, Privates,
4551  /*WithLastIter=*/!Data.LastprivateVars.empty());
4552  }
4553  }
4554  // Fields of union "kmp_cmplrdata_t" for destructors and priority.
4555  enum { Priority = 0, Destructors = 1 };
4556  // Provide pointer to function with destructors for privates.
4557  auto FI = std::next(KmpTaskTQTyRD->field_begin(), Data1);
4558  auto *KmpCmplrdataUD = (*FI)->getType()->getAsUnionType()->getDecl();
4559  if (NeedsCleanup) {
4560  llvm::Value *DestructorFn = emitDestructorsFunction(
4561  CGM, Loc, KmpInt32Ty, KmpTaskTWithPrivatesPtrQTy,
4562  KmpTaskTWithPrivatesQTy);
4563  LValue Data1LV = CGF.EmitLValueForField(TDBase, *FI);
4564  LValue DestructorsLV = CGF.EmitLValueForField(
4565  Data1LV, *std::next(KmpCmplrdataUD->field_begin(), Destructors));
4567  DestructorFn, KmpRoutineEntryPtrTy),
4568  DestructorsLV);
4569  }
4570  // Set priority.
4571  if (Data.Priority.getInt()) {
4572  LValue Data2LV = CGF.EmitLValueForField(
4573  TDBase, *std::next(KmpTaskTQTyRD->field_begin(), Data2));
4574  LValue PriorityLV = CGF.EmitLValueForField(
4575  Data2LV, *std::next(KmpCmplrdataUD->field_begin(), Priority));
4576  CGF.EmitStoreOfScalar(Data.Priority.getPointer(), PriorityLV);
4577  }
4578  Result.NewTask = NewTask;
4579  Result.TaskEntry = TaskEntry;
4580  Result.NewTaskNewTaskTTy = NewTaskNewTaskTTy;
4581  Result.TDBase = TDBase;
4582  Result.KmpTaskTQTyRD = KmpTaskTQTyRD;
4583  return Result;
4584 }
4585 
4587  const OMPExecutableDirective &D,
4588  llvm::Value *TaskFunction,
4589  QualType SharedsTy, Address Shareds,
4590  const Expr *IfCond,
4591  const OMPTaskDataTy &Data) {
4592  if (!CGF.HaveInsertPoint())
4593  return;
4594 
4595  TaskResultTy Result =
4596  emitTaskInit(CGF, Loc, D, TaskFunction, SharedsTy, Shareds, Data);
4597  llvm::Value *NewTask = Result.NewTask;
4598  llvm::Value *TaskEntry = Result.TaskEntry;
4599  llvm::Value *NewTaskNewTaskTTy = Result.NewTaskNewTaskTTy;
4600  LValue TDBase = Result.TDBase;
4601  RecordDecl *KmpTaskTQTyRD = Result.KmpTaskTQTyRD;
4602  auto &C = CGM.getContext();
4603  // Process list of dependences.
4604  Address DependenciesArray = Address::invalid();
4605  unsigned NumDependencies = Data.Dependences.size();
4606  if (NumDependencies) {
4607  // Dependence kind for RTL.
4608  enum RTLDependenceKindTy { DepIn = 0x01, DepInOut = 0x3 };
4609  enum RTLDependInfoFieldsTy { BaseAddr, Len, Flags };
4610  RecordDecl *KmpDependInfoRD;
4611  QualType FlagsTy =
4612  C.getIntTypeForBitwidth(C.getTypeSize(C.BoolTy), /*Signed=*/false);
4613  llvm::Type *LLVMFlagsTy = CGF.ConvertTypeForMem(FlagsTy);
4614  if (KmpDependInfoTy.isNull()) {
4615  KmpDependInfoRD = C.buildImplicitRecord("kmp_depend_info");
4616  KmpDependInfoRD->startDefinition();
4617  addFieldToRecordDecl(C, KmpDependInfoRD, C.getIntPtrType());
4618  addFieldToRecordDecl(C, KmpDependInfoRD, C.getSizeType());
4619  addFieldToRecordDecl(C, KmpDependInfoRD, FlagsTy);
4620  KmpDependInfoRD->completeDefinition();
4621  KmpDependInfoTy = C.getRecordType(KmpDependInfoRD);
4622  } else
4623  KmpDependInfoRD = cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
4624  CharUnits DependencySize = C.getTypeSizeInChars(KmpDependInfoTy);
4625  // Define type kmp_depend_info[<Dependences.size()>];
4626  QualType KmpDependInfoArrayTy = C.getConstantArrayType(
4627  KmpDependInfoTy, llvm::APInt(/*numBits=*/64, NumDependencies),
4628  ArrayType::Normal, /*IndexTypeQuals=*/0);
4629  // kmp_depend_info[<Dependences.size()>] deps;
4630  DependenciesArray =
4631  CGF.CreateMemTemp(KmpDependInfoArrayTy, ".dep.arr.addr");
4632  for (unsigned i = 0; i < NumDependencies; ++i) {
4633  const Expr *E = Data.Dependences[i].second;
4634  auto Addr = CGF.EmitLValue(E);
4635  llvm::Value *Size;
4636  QualType Ty = E->getType();
4637  if (auto *ASE = dyn_cast<OMPArraySectionExpr>(E->IgnoreParenImpCasts())) {
4638  LValue UpAddrLVal =
4639  CGF.EmitOMPArraySectionExpr(ASE, /*LowerBound=*/false);
4640  llvm::Value *UpAddr =
4641  CGF.Builder.CreateConstGEP1_32(UpAddrLVal.getPointer(), /*Idx0=*/1);
4642  llvm::Value *LowIntPtr =
4643  CGF.Builder.CreatePtrToInt(Addr.getPointer(), CGM.SizeTy);
4644  llvm::Value *UpIntPtr = CGF.Builder.CreatePtrToInt(UpAddr, CGM.SizeTy);
4645  Size = CGF.Builder.CreateNUWSub(UpIntPtr, LowIntPtr);
4646  } else
4647  Size = CGF.getTypeSize(Ty);
4648  auto Base = CGF.MakeAddrLValue(
4649  CGF.Builder.CreateConstArrayGEP(DependenciesArray, i, DependencySize),
4650  KmpDependInfoTy);
4651  // deps[i].base_addr = &<Dependences[i].second>;
4652  auto BaseAddrLVal = CGF.EmitLValueForField(
4653  Base, *std::next(KmpDependInfoRD->field_begin(), BaseAddr));
4654  CGF.EmitStoreOfScalar(
4655  CGF.Builder.CreatePtrToInt(Addr.getPointer(), CGF.IntPtrTy),
4656  BaseAddrLVal);
4657  // deps[i].len = sizeof(<Dependences[i].second>);
4658  auto LenLVal = CGF.EmitLValueForField(
4659  Base, *std::next(KmpDependInfoRD->field_begin(), Len));
4660  CGF.EmitStoreOfScalar(Size, LenLVal);
4661  // deps[i].flags = <Dependences[i].first>;
4662  RTLDependenceKindTy DepKind;
4663  switch (Data.Dependences[i].first) {
4664  case OMPC_DEPEND_in:
4665  DepKind = DepIn;
4666  break;
4667  // Out and InOut dependencies must use the same code.
4668  case OMPC_DEPEND_out:
4669  case OMPC_DEPEND_inout:
4670  DepKind = DepInOut;
4671  break;
4672  case OMPC_DEPEND_source:
4673  case OMPC_DEPEND_sink:
4674  case OMPC_DEPEND_unknown:
4675  llvm_unreachable("Unknown task dependence type");
4676  }
4677  auto FlagsLVal = CGF.EmitLValueForField(
4678  Base, *std::next(KmpDependInfoRD->field_begin(), Flags));
4679  CGF.EmitStoreOfScalar(llvm::ConstantInt::get(LLVMFlagsTy, DepKind),
4680  FlagsLVal);
4681  }
4682  DependenciesArray = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4683  CGF.Builder.CreateStructGEP(DependenciesArray, 0, CharUnits::Zero()),
4684  CGF.VoidPtrTy);
4685  }
4686 
4687  // NOTE: routine and part_id fields are intialized by __kmpc_omp_task_alloc()
4688  // libcall.
4689  // Build kmp_int32 __kmpc_omp_task_with_deps(ident_t *, kmp_int32 gtid,
4690  // kmp_task_t *new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list,
4691  // kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list) if dependence
4692  // list is not empty
4693  auto *ThreadID = getThreadID(CGF, Loc);
4694  auto *UpLoc = emitUpdateLocation(CGF, Loc);
4695  llvm::Value *TaskArgs[] = { UpLoc, ThreadID, NewTask };
4696  llvm::Value *DepTaskArgs[7];
4697  if (NumDependencies) {
4698  DepTaskArgs[0] = UpLoc;
4699  DepTaskArgs[1] = ThreadID;
4700  DepTaskArgs[2] = NewTask;
4701  DepTaskArgs[3] = CGF.Builder.getInt32(NumDependencies);
4702  DepTaskArgs[4] = DependenciesArray.getPointer();
4703  DepTaskArgs[5] = CGF.Builder.getInt32(0);
4704  DepTaskArgs[6] = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
4705  }
4706  auto &&ThenCodeGen = [this, &Data, TDBase, KmpTaskTQTyRD, NumDependencies,
4707  &TaskArgs,
4708  &DepTaskArgs](CodeGenFunction &CGF, PrePostActionTy &) {
4709  if (!Data.Tied) {
4710  auto PartIdFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTPartId);
4711  auto PartIdLVal = CGF.EmitLValueForField(TDBase, *PartIdFI);
4712  CGF.EmitStoreOfScalar(CGF.Builder.getInt32(0), PartIdLVal);
4713  }
4714  if (NumDependencies) {
4715  CGF.EmitRuntimeCall(
4717  } else {
4718  CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_omp_task),
4719  TaskArgs);
4720  }
4721  // Check if parent region is untied and build return for untied task;
4722  if (auto *Region =
4723  dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
4724  Region->emitUntiedSwitch(CGF);
4725  };
4726 
4727  llvm::Value *DepWaitTaskArgs[6];
4728  if (NumDependencies) {
4729  DepWaitTaskArgs[0] = UpLoc;
4730  DepWaitTaskArgs[1] = ThreadID;
4731  DepWaitTaskArgs[2] = CGF.Builder.getInt32(NumDependencies);
4732  DepWaitTaskArgs[3] = DependenciesArray.getPointer();
4733  DepWaitTaskArgs[4] = CGF.Builder.getInt32(0);