clang  10.0.0svn
CGOpenMPRuntime.cpp
Go to the documentation of this file.
1 //===----- CGOpenMPRuntime.cpp - Interface to OpenMP Runtimes -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This provides a class for OpenMP runtime code generation.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "CGCXXABI.h"
14 #include "CGCleanup.h"
15 #include "CGOpenMPRuntime.h"
16 #include "CGRecordLayout.h"
17 #include "CodeGenFunction.h"
19 #include "clang/AST/Decl.h"
20 #include "clang/AST/StmtOpenMP.h"
22 #include "llvm/ADT/ArrayRef.h"
23 #include "llvm/Bitcode/BitcodeReader.h"
24 #include "llvm/IR/DerivedTypes.h"
25 #include "llvm/IR/GlobalValue.h"
26 #include "llvm/IR/Value.h"
27 #include "llvm/Support/Format.h"
28 #include "llvm/Support/raw_ostream.h"
29 #include <cassert>
30 
31 using namespace clang;
32 using namespace CodeGen;
33 
34 namespace {
35 /// Base class for handling code generation inside OpenMP regions.
36 class CGOpenMPRegionInfo : public CodeGenFunction::CGCapturedStmtInfo {
37 public:
38  /// Kinds of OpenMP regions used in codegen.
39  enum CGOpenMPRegionKind {
40  /// Region with outlined function for standalone 'parallel'
41  /// directive.
42  ParallelOutlinedRegion,
43  /// Region with outlined function for standalone 'task' directive.
44  TaskOutlinedRegion,
45  /// Region for constructs that do not require function outlining,
46  /// like 'for', 'sections', 'atomic' etc. directives.
47  InlinedRegion,
48  /// Region with outlined function for standalone 'target' directive.
49  TargetRegion,
50  };
51 
52  CGOpenMPRegionInfo(const CapturedStmt &CS,
53  const CGOpenMPRegionKind RegionKind,
55  bool HasCancel)
56  : CGCapturedStmtInfo(CS, CR_OpenMP), RegionKind(RegionKind),
57  CodeGen(CodeGen), Kind(Kind), HasCancel(HasCancel) {}
58 
59  CGOpenMPRegionInfo(const CGOpenMPRegionKind RegionKind,
61  bool HasCancel)
62  : CGCapturedStmtInfo(CR_OpenMP), RegionKind(RegionKind), CodeGen(CodeGen),
63  Kind(Kind), HasCancel(HasCancel) {}
64 
65  /// Get a variable or parameter for storing global thread id
66  /// inside OpenMP construct.
67  virtual const VarDecl *getThreadIDVariable() const = 0;
68 
69  /// Emit the captured statement body.
70  void EmitBody(CodeGenFunction &CGF, const Stmt *S) override;
71 
72  /// Get an LValue for the current ThreadID variable.
73  /// \return LValue for thread id variable. This LValue always has type int32*.
74  virtual LValue getThreadIDVariableLValue(CodeGenFunction &CGF);
75 
76  virtual void emitUntiedSwitch(CodeGenFunction & /*CGF*/) {}
77 
78  CGOpenMPRegionKind getRegionKind() const { return RegionKind; }
79 
80  OpenMPDirectiveKind getDirectiveKind() const { return Kind; }
81 
82  bool hasCancel() const { return HasCancel; }
83 
84  static bool classof(const CGCapturedStmtInfo *Info) {
85  return Info->getKind() == CR_OpenMP;
86  }
87 
88  ~CGOpenMPRegionInfo() override = default;
89 
90 protected:
91  CGOpenMPRegionKind RegionKind;
92  RegionCodeGenTy CodeGen;
94  bool HasCancel;
95 };
96 
97 /// API for captured statement code generation in OpenMP constructs.
98 class CGOpenMPOutlinedRegionInfo final : public CGOpenMPRegionInfo {
99 public:
100  CGOpenMPOutlinedRegionInfo(const CapturedStmt &CS, const VarDecl *ThreadIDVar,
101  const RegionCodeGenTy &CodeGen,
102  OpenMPDirectiveKind Kind, bool HasCancel,
103  StringRef HelperName)
104  : CGOpenMPRegionInfo(CS, ParallelOutlinedRegion, CodeGen, Kind,
105  HasCancel),
106  ThreadIDVar(ThreadIDVar), HelperName(HelperName) {
107  assert(ThreadIDVar != nullptr && "No ThreadID in OpenMP region.");
108  }
109 
110  /// Get a variable or parameter for storing global thread id
111  /// inside OpenMP construct.
112  const VarDecl *getThreadIDVariable() const override { return ThreadIDVar; }
113 
114  /// Get the name of the capture helper.
115  StringRef getHelperName() const override { return HelperName; }
116 
117  static bool classof(const CGCapturedStmtInfo *Info) {
118  return CGOpenMPRegionInfo::classof(Info) &&
119  cast<CGOpenMPRegionInfo>(Info)->getRegionKind() ==
120  ParallelOutlinedRegion;
121  }
122 
123 private:
124  /// A variable or parameter storing global thread id for OpenMP
125  /// constructs.
126  const VarDecl *ThreadIDVar;
127  StringRef HelperName;
128 };
129 
130 /// API for captured statement code generation in OpenMP constructs.
131 class CGOpenMPTaskOutlinedRegionInfo final : public CGOpenMPRegionInfo {
132 public:
133  class UntiedTaskActionTy final : public PrePostActionTy {
134  bool Untied;
135  const VarDecl *PartIDVar;
136  const RegionCodeGenTy UntiedCodeGen;
137  llvm::SwitchInst *UntiedSwitch = nullptr;
138 
139  public:
140  UntiedTaskActionTy(bool Tied, const VarDecl *PartIDVar,
141  const RegionCodeGenTy &UntiedCodeGen)
142  : Untied(!Tied), PartIDVar(PartIDVar), UntiedCodeGen(UntiedCodeGen) {}
143  void Enter(CodeGenFunction &CGF) override {
144  if (Untied) {
145  // Emit task switching point.
146  LValue PartIdLVal = CGF.EmitLoadOfPointerLValue(
147  CGF.GetAddrOfLocalVar(PartIDVar),
148  PartIDVar->getType()->castAs<PointerType>());
149  llvm::Value *Res =
150  CGF.EmitLoadOfScalar(PartIdLVal, PartIDVar->getLocation());
151  llvm::BasicBlock *DoneBB = CGF.createBasicBlock(".untied.done.");
152  UntiedSwitch = CGF.Builder.CreateSwitch(Res, DoneBB);
153  CGF.EmitBlock(DoneBB);
155  CGF.EmitBlock(CGF.createBasicBlock(".untied.jmp."));
156  UntiedSwitch->addCase(CGF.Builder.getInt32(0),
157  CGF.Builder.GetInsertBlock());
158  emitUntiedSwitch(CGF);
159  }
160  }
161  void emitUntiedSwitch(CodeGenFunction &CGF) const {
162  if (Untied) {
163  LValue PartIdLVal = CGF.EmitLoadOfPointerLValue(
164  CGF.GetAddrOfLocalVar(PartIDVar),
165  PartIDVar->getType()->castAs<PointerType>());
166  CGF.EmitStoreOfScalar(CGF.Builder.getInt32(UntiedSwitch->getNumCases()),
167  PartIdLVal);
168  UntiedCodeGen(CGF);
169  CodeGenFunction::JumpDest CurPoint =
170  CGF.getJumpDestInCurrentScope(".untied.next.");
172  CGF.EmitBlock(CGF.createBasicBlock(".untied.jmp."));
173  UntiedSwitch->addCase(CGF.Builder.getInt32(UntiedSwitch->getNumCases()),
174  CGF.Builder.GetInsertBlock());
175  CGF.EmitBranchThroughCleanup(CurPoint);
176  CGF.EmitBlock(CurPoint.getBlock());
177  }
178  }
179  unsigned getNumberOfParts() const { return UntiedSwitch->getNumCases(); }
180  };
181  CGOpenMPTaskOutlinedRegionInfo(const CapturedStmt &CS,
182  const VarDecl *ThreadIDVar,
183  const RegionCodeGenTy &CodeGen,
184  OpenMPDirectiveKind Kind, bool HasCancel,
185  const UntiedTaskActionTy &Action)
186  : CGOpenMPRegionInfo(CS, TaskOutlinedRegion, CodeGen, Kind, HasCancel),
187  ThreadIDVar(ThreadIDVar), Action(Action) {
188  assert(ThreadIDVar != nullptr && "No ThreadID in OpenMP region.");
189  }
190 
191  /// Get a variable or parameter for storing global thread id
192  /// inside OpenMP construct.
193  const VarDecl *getThreadIDVariable() const override { return ThreadIDVar; }
194 
195  /// Get an LValue for the current ThreadID variable.
196  LValue getThreadIDVariableLValue(CodeGenFunction &CGF) override;
197 
198  /// Get the name of the capture helper.
199  StringRef getHelperName() const override { return ".omp_outlined."; }
200 
201  void emitUntiedSwitch(CodeGenFunction &CGF) override {
202  Action.emitUntiedSwitch(CGF);
203  }
204 
205  static bool classof(const CGCapturedStmtInfo *Info) {
206  return CGOpenMPRegionInfo::classof(Info) &&
207  cast<CGOpenMPRegionInfo>(Info)->getRegionKind() ==
208  TaskOutlinedRegion;
209  }
210 
211 private:
212  /// A variable or parameter storing global thread id for OpenMP
213  /// constructs.
214  const VarDecl *ThreadIDVar;
215  /// Action for emitting code for untied tasks.
216  const UntiedTaskActionTy &Action;
217 };
218 
219 /// API for inlined captured statement code generation in OpenMP
220 /// constructs.
221 class CGOpenMPInlinedRegionInfo : public CGOpenMPRegionInfo {
222 public:
223  CGOpenMPInlinedRegionInfo(CodeGenFunction::CGCapturedStmtInfo *OldCSI,
224  const RegionCodeGenTy &CodeGen,
225  OpenMPDirectiveKind Kind, bool HasCancel)
226  : CGOpenMPRegionInfo(InlinedRegion, CodeGen, Kind, HasCancel),
227  OldCSI(OldCSI),
228  OuterRegionInfo(dyn_cast_or_null<CGOpenMPRegionInfo>(OldCSI)) {}
229 
230  // Retrieve the value of the context parameter.
231  llvm::Value *getContextValue() const override {
232  if (OuterRegionInfo)
233  return OuterRegionInfo->getContextValue();
234  llvm_unreachable("No context value for inlined OpenMP region");
235  }
236 
237  void setContextValue(llvm::Value *V) override {
238  if (OuterRegionInfo) {
239  OuterRegionInfo->setContextValue(V);
240  return;
241  }
242  llvm_unreachable("No context value for inlined OpenMP region");
243  }
244 
245  /// Lookup the captured field decl for a variable.
246  const FieldDecl *lookup(const VarDecl *VD) const override {
247  if (OuterRegionInfo)
248  return OuterRegionInfo->lookup(VD);
249  // If there is no outer outlined region,no need to lookup in a list of
250  // captured variables, we can use the original one.
251  return nullptr;
252  }
253 
254  FieldDecl *getThisFieldDecl() const override {
255  if (OuterRegionInfo)
256  return OuterRegionInfo->getThisFieldDecl();
257  return nullptr;
258  }
259 
260  /// Get a variable or parameter for storing global thread id
261  /// inside OpenMP construct.
262  const VarDecl *getThreadIDVariable() const override {
263  if (OuterRegionInfo)
264  return OuterRegionInfo->getThreadIDVariable();
265  return nullptr;
266  }
267 
268  /// Get an LValue for the current ThreadID variable.
269  LValue getThreadIDVariableLValue(CodeGenFunction &CGF) override {
270  if (OuterRegionInfo)
271  return OuterRegionInfo->getThreadIDVariableLValue(CGF);
272  llvm_unreachable("No LValue for inlined OpenMP construct");
273  }
274 
275  /// Get the name of the capture helper.
276  StringRef getHelperName() const override {
277  if (auto *OuterRegionInfo = getOldCSI())
278  return OuterRegionInfo->getHelperName();
279  llvm_unreachable("No helper name for inlined OpenMP construct");
280  }
281 
282  void emitUntiedSwitch(CodeGenFunction &CGF) override {
283  if (OuterRegionInfo)
284  OuterRegionInfo->emitUntiedSwitch(CGF);
285  }
286 
287  CodeGenFunction::CGCapturedStmtInfo *getOldCSI() const { return OldCSI; }
288 
289  static bool classof(const CGCapturedStmtInfo *Info) {
290  return CGOpenMPRegionInfo::classof(Info) &&
291  cast<CGOpenMPRegionInfo>(Info)->getRegionKind() == InlinedRegion;
292  }
293 
294  ~CGOpenMPInlinedRegionInfo() override = default;
295 
296 private:
297  /// CodeGen info about outer OpenMP region.
299  CGOpenMPRegionInfo *OuterRegionInfo;
300 };
301 
302 /// API for captured statement code generation in OpenMP target
303 /// constructs. For this captures, implicit parameters are used instead of the
304 /// captured fields. The name of the target region has to be unique in a given
305 /// application so it is provided by the client, because only the client has
306 /// the information to generate that.
307 class CGOpenMPTargetRegionInfo final : public CGOpenMPRegionInfo {
308 public:
309  CGOpenMPTargetRegionInfo(const CapturedStmt &CS,
310  const RegionCodeGenTy &CodeGen, StringRef HelperName)
311  : CGOpenMPRegionInfo(CS, TargetRegion, CodeGen, OMPD_target,
312  /*HasCancel=*/false),
313  HelperName(HelperName) {}
314 
315  /// This is unused for target regions because each starts executing
316  /// with a single thread.
317  const VarDecl *getThreadIDVariable() const override { return nullptr; }
318 
319  /// Get the name of the capture helper.
320  StringRef getHelperName() const override { return HelperName; }
321 
322  static bool classof(const CGCapturedStmtInfo *Info) {
323  return CGOpenMPRegionInfo::classof(Info) &&
324  cast<CGOpenMPRegionInfo>(Info)->getRegionKind() == TargetRegion;
325  }
326 
327 private:
328  StringRef HelperName;
329 };
330 
331 static void EmptyCodeGen(CodeGenFunction &, PrePostActionTy &) {
332  llvm_unreachable("No codegen for expressions");
333 }
334 /// API for generation of expressions captured in a innermost OpenMP
335 /// region.
336 class CGOpenMPInnerExprInfo final : public CGOpenMPInlinedRegionInfo {
337 public:
338  CGOpenMPInnerExprInfo(CodeGenFunction &CGF, const CapturedStmt &CS)
339  : CGOpenMPInlinedRegionInfo(CGF.CapturedStmtInfo, EmptyCodeGen,
340  OMPD_unknown,
341  /*HasCancel=*/false),
342  PrivScope(CGF) {
343  // Make sure the globals captured in the provided statement are local by
344  // using the privatization logic. We assume the same variable is not
345  // captured more than once.
346  for (const auto &C : CS.captures()) {
347  if (!C.capturesVariable() && !C.capturesVariableByCopy())
348  continue;
349 
350  const VarDecl *VD = C.getCapturedVar();
351  if (VD->isLocalVarDeclOrParm())
352  continue;
353 
354  DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(VD),
355  /*RefersToEnclosingVariableOrCapture=*/false,
357  C.getLocation());
358  PrivScope.addPrivate(
359  VD, [&CGF, &DRE]() { return CGF.EmitLValue(&DRE).getAddress(); });
360  }
361  (void)PrivScope.Privatize();
362  }
363 
364  /// Lookup the captured field decl for a variable.
365  const FieldDecl *lookup(const VarDecl *VD) const override {
366  if (const FieldDecl *FD = CGOpenMPInlinedRegionInfo::lookup(VD))
367  return FD;
368  return nullptr;
369  }
370 
371  /// Emit the captured statement body.
372  void EmitBody(CodeGenFunction &CGF, const Stmt *S) override {
373  llvm_unreachable("No body for expressions");
374  }
375 
376  /// Get a variable or parameter for storing global thread id
377  /// inside OpenMP construct.
378  const VarDecl *getThreadIDVariable() const override {
379  llvm_unreachable("No thread id for expressions");
380  }
381 
382  /// Get the name of the capture helper.
383  StringRef getHelperName() const override {
384  llvm_unreachable("No helper name for expressions");
385  }
386 
387  static bool classof(const CGCapturedStmtInfo *Info) { return false; }
388 
389 private:
390  /// Private scope to capture global variables.
392 };
393 
394 /// RAII for emitting code of OpenMP constructs.
395 class InlinedOpenMPRegionRAII {
396  CodeGenFunction &CGF;
397  llvm::DenseMap<const VarDecl *, FieldDecl *> LambdaCaptureFields;
398  FieldDecl *LambdaThisCaptureField = nullptr;
399  const CodeGen::CGBlockInfo *BlockInfo = nullptr;
400 
401 public:
402  /// Constructs region for combined constructs.
403  /// \param CodeGen Code generation sequence for combined directives. Includes
404  /// a list of functions used for code generation of implicitly inlined
405  /// regions.
406  InlinedOpenMPRegionRAII(CodeGenFunction &CGF, const RegionCodeGenTy &CodeGen,
407  OpenMPDirectiveKind Kind, bool HasCancel)
408  : CGF(CGF) {
409  // Start emission for the construct.
410  CGF.CapturedStmtInfo = new CGOpenMPInlinedRegionInfo(
411  CGF.CapturedStmtInfo, CodeGen, Kind, HasCancel);
412  std::swap(CGF.LambdaCaptureFields, LambdaCaptureFields);
413  LambdaThisCaptureField = CGF.LambdaThisCaptureField;
414  CGF.LambdaThisCaptureField = nullptr;
415  BlockInfo = CGF.BlockInfo;
416  CGF.BlockInfo = nullptr;
417  }
418 
419  ~InlinedOpenMPRegionRAII() {
420  // Restore original CapturedStmtInfo only if we're done with code emission.
421  auto *OldCSI =
422  cast<CGOpenMPInlinedRegionInfo>(CGF.CapturedStmtInfo)->getOldCSI();
423  delete CGF.CapturedStmtInfo;
424  CGF.CapturedStmtInfo = OldCSI;
425  std::swap(CGF.LambdaCaptureFields, LambdaCaptureFields);
426  CGF.LambdaThisCaptureField = LambdaThisCaptureField;
427  CGF.BlockInfo = BlockInfo;
428  }
429 };
430 
431 /// Values for bit flags used in the ident_t to describe the fields.
432 /// All enumeric elements are named and described in accordance with the code
433 /// from https://github.com/llvm/llvm-project/blob/master/openmp/runtime/src/kmp.h
434 enum OpenMPLocationFlags : unsigned {
435  /// Use trampoline for internal microtask.
436  OMP_IDENT_IMD = 0x01,
437  /// Use c-style ident structure.
438  OMP_IDENT_KMPC = 0x02,
439  /// Atomic reduction option for kmpc_reduce.
440  OMP_ATOMIC_REDUCE = 0x10,
441  /// Explicit 'barrier' directive.
442  OMP_IDENT_BARRIER_EXPL = 0x20,
443  /// Implicit barrier in code.
444  OMP_IDENT_BARRIER_IMPL = 0x40,
445  /// Implicit barrier in 'for' directive.
446  OMP_IDENT_BARRIER_IMPL_FOR = 0x40,
447  /// Implicit barrier in 'sections' directive.
448  OMP_IDENT_BARRIER_IMPL_SECTIONS = 0xC0,
449  /// Implicit barrier in 'single' directive.
450  OMP_IDENT_BARRIER_IMPL_SINGLE = 0x140,
451  /// Call of __kmp_for_static_init for static loop.
452  OMP_IDENT_WORK_LOOP = 0x200,
453  /// Call of __kmp_for_static_init for sections.
454  OMP_IDENT_WORK_SECTIONS = 0x400,
455  /// Call of __kmp_for_static_init for distribute.
456  OMP_IDENT_WORK_DISTRIBUTE = 0x800,
457  LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/OMP_IDENT_WORK_DISTRIBUTE)
458 };
459 
460 namespace {
462 /// Values for bit flags for marking which requires clauses have been used.
464  /// flag undefined.
465  OMP_REQ_UNDEFINED = 0x000,
466  /// no requires clause present.
467  OMP_REQ_NONE = 0x001,
468  /// reverse_offload clause.
469  OMP_REQ_REVERSE_OFFLOAD = 0x002,
470  /// unified_address clause.
471  OMP_REQ_UNIFIED_ADDRESS = 0x004,
472  /// unified_shared_memory clause.
473  OMP_REQ_UNIFIED_SHARED_MEMORY = 0x008,
474  /// dynamic_allocators clause.
475  OMP_REQ_DYNAMIC_ALLOCATORS = 0x010,
476  LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/OMP_REQ_DYNAMIC_ALLOCATORS)
477 };
478 
480  /// Device ID if the device was not defined, runtime should get it
481  /// from environment variables in the spec.
482  OMP_DEVICEID_UNDEF = -1,
483 };
484 } // anonymous namespace
485 
486 /// Describes ident structure that describes a source location.
487 /// All descriptions are taken from
488 /// https://github.com/llvm/llvm-project/blob/master/openmp/runtime/src/kmp.h
489 /// Original structure:
490 /// typedef struct ident {
491 /// kmp_int32 reserved_1; /**< might be used in Fortran;
492 /// see above */
493 /// kmp_int32 flags; /**< also f.flags; KMP_IDENT_xxx flags;
494 /// KMP_IDENT_KMPC identifies this union
495 /// member */
496 /// kmp_int32 reserved_2; /**< not really used in Fortran any more;
497 /// see above */
498 ///#if USE_ITT_BUILD
499 /// /* but currently used for storing
500 /// region-specific ITT */
501 /// /* contextual information. */
502 ///#endif /* USE_ITT_BUILD */
503 /// kmp_int32 reserved_3; /**< source[4] in Fortran, do not use for
504 /// C++ */
505 /// char const *psource; /**< String describing the source location.
506 /// The string is composed of semi-colon separated
507 // fields which describe the source file,
508 /// the function and a pair of line numbers that
509 /// delimit the construct.
510 /// */
511 /// } ident_t;
513  /// might be used in Fortran
515  /// OMP_IDENT_xxx flags; OMP_IDENT_KMPC identifies this union member.
517  /// Not really used in Fortran any more
519  /// Source[4] in Fortran, do not use for C++
521  /// String describing the source location. The string is composed of
522  /// semi-colon separated fields which describe the source file, the function
523  /// and a pair of line numbers that delimit the construct.
525 };
526 
527 /// Schedule types for 'omp for' loops (these enumerators are taken from
528 /// the enum sched_type in kmp.h).
530  /// Lower bound for default (unordered) versions.
538  /// static with chunk adjustment (e.g., simd)
540  /// Lower bound for 'ordered' versions.
549  /// dist_schedule types
552  /// Support for OpenMP 4.5 monotonic and nonmonotonic schedule modifiers.
553  /// Set if the monotonic schedule modifier was present.
555  /// Set if the nonmonotonic schedule modifier was present.
557 };
558 
560  /// Call to void __kmpc_fork_call(ident_t *loc, kmp_int32 argc,
561  /// kmpc_micro microtask, ...);
563  /// Call to void *__kmpc_threadprivate_cached(ident_t *loc,
564  /// kmp_int32 global_tid, void *data, size_t size, void ***cache);
566  /// Call to void __kmpc_threadprivate_register( ident_t *,
567  /// void *data, kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor);
569  // Call to __kmpc_int32 kmpc_global_thread_num(ident_t *loc);
571  // Call to void __kmpc_critical(ident_t *loc, kmp_int32 global_tid,
572  // kmp_critical_name *crit);
574  // Call to void __kmpc_critical_with_hint(ident_t *loc, kmp_int32
575  // global_tid, kmp_critical_name *crit, uintptr_t hint);
577  // Call to void __kmpc_end_critical(ident_t *loc, kmp_int32 global_tid,
578  // kmp_critical_name *crit);
580  // Call to kmp_int32 __kmpc_cancel_barrier(ident_t *loc, kmp_int32
581  // global_tid);
583  // Call to void __kmpc_barrier(ident_t *loc, kmp_int32 global_tid);
585  // Call to void __kmpc_for_static_fini(ident_t *loc, kmp_int32 global_tid);
587  // Call to void __kmpc_serialized_parallel(ident_t *loc, kmp_int32
588  // global_tid);
590  // Call to void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32
591  // global_tid);
593  // Call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid,
594  // kmp_int32 num_threads);
596  // Call to void __kmpc_flush(ident_t *loc);
598  // Call to kmp_int32 __kmpc_master(ident_t *, kmp_int32 global_tid);
600  // Call to void __kmpc_end_master(ident_t *, kmp_int32 global_tid);
602  // Call to kmp_int32 __kmpc_omp_taskyield(ident_t *, kmp_int32 global_tid,
603  // int end_part);
605  // Call to kmp_int32 __kmpc_single(ident_t *, kmp_int32 global_tid);
607  // Call to void __kmpc_end_single(ident_t *, kmp_int32 global_tid);
609  // Call to kmp_task_t * __kmpc_omp_task_alloc(ident_t *, kmp_int32 gtid,
610  // kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
611  // kmp_routine_entry_t *task_entry);
613  // Call to kmp_task_t * __kmpc_omp_target_task_alloc(ident_t *,
614  // kmp_int32 gtid, kmp_int32 flags, size_t sizeof_kmp_task_t,
615  // size_t sizeof_shareds, kmp_routine_entry_t *task_entry,
616  // kmp_int64 device_id);
618  // Call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t *
619  // new_task);
621  // Call to void __kmpc_copyprivate(ident_t *loc, kmp_int32 global_tid,
622  // size_t cpy_size, void *cpy_data, void(*cpy_func)(void *, void *),
623  // kmp_int32 didit);
625  // Call to kmp_int32 __kmpc_reduce(ident_t *loc, kmp_int32 global_tid,
626  // kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void
627  // (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck);
629  // Call to kmp_int32 __kmpc_reduce_nowait(ident_t *loc, kmp_int32
630  // global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data,
631  // void (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name
632  // *lck);
634  // Call to void __kmpc_end_reduce(ident_t *loc, kmp_int32 global_tid,
635  // kmp_critical_name *lck);
637  // Call to void __kmpc_end_reduce_nowait(ident_t *loc, kmp_int32 global_tid,
638  // kmp_critical_name *lck);
640  // Call to void __kmpc_omp_task_begin_if0(ident_t *, kmp_int32 gtid,
641  // kmp_task_t * new_task);
643  // Call to void __kmpc_omp_task_complete_if0(ident_t *, kmp_int32 gtid,
644  // kmp_task_t * new_task);
646  // Call to void __kmpc_ordered(ident_t *loc, kmp_int32 global_tid);
648  // Call to void __kmpc_end_ordered(ident_t *loc, kmp_int32 global_tid);
650  // Call to kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32
651  // global_tid);
653  // Call to void __kmpc_taskgroup(ident_t *loc, kmp_int32 global_tid);
655  // Call to void __kmpc_end_taskgroup(ident_t *loc, kmp_int32 global_tid);
657  // Call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 global_tid,
658  // int proc_bind);
660  // Call to kmp_int32 __kmpc_omp_task_with_deps(ident_t *loc_ref, kmp_int32
661  // gtid, kmp_task_t * new_task, kmp_int32 ndeps, kmp_depend_info_t
662  // *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list);
664  // Call to void __kmpc_omp_wait_deps(ident_t *loc_ref, kmp_int32
665  // gtid, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32
666  // ndeps_noalias, kmp_depend_info_t *noalias_dep_list);
668  // Call to kmp_int32 __kmpc_cancellationpoint(ident_t *loc, kmp_int32
669  // global_tid, kmp_int32 cncl_kind);
671  // Call to kmp_int32 __kmpc_cancel(ident_t *loc, kmp_int32 global_tid,
672  // kmp_int32 cncl_kind);
674  // Call to void __kmpc_push_num_teams(ident_t *loc, kmp_int32 global_tid,
675  // kmp_int32 num_teams, kmp_int32 thread_limit);
677  // Call to void __kmpc_fork_teams(ident_t *loc, kmp_int32 argc, kmpc_micro
678  // microtask, ...);
680  // Call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t *task, int
681  // if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int nogroup, int
682  // sched, kmp_uint64 grainsize, void *task_dup);
684  // Call to void __kmpc_doacross_init(ident_t *loc, kmp_int32 gtid, kmp_int32
685  // num_dims, struct kmp_dim *dims);
687  // Call to void __kmpc_doacross_fini(ident_t *loc, kmp_int32 gtid);
689  // Call to void __kmpc_doacross_post(ident_t *loc, kmp_int32 gtid, kmp_int64
690  // *vec);
692  // Call to void __kmpc_doacross_wait(ident_t *loc, kmp_int32 gtid, kmp_int64
693  // *vec);
695  // Call to void *__kmpc_task_reduction_init(int gtid, int num_data, void
696  // *data);
698  // Call to void *__kmpc_task_reduction_get_th_data(int gtid, void *tg, void
699  // *d);
701  // Call to void *__kmpc_alloc(int gtid, size_t sz, omp_allocator_handle_t al);
703  // Call to void __kmpc_free(int gtid, void *ptr, omp_allocator_handle_t al);
705 
706  //
707  // Offloading related calls
708  //
709  // Call to void __kmpc_push_target_tripcount(int64_t device_id, kmp_uint64
710  // size);
712  // Call to int32_t __tgt_target(int64_t device_id, void *host_ptr, int32_t
713  // arg_num, void** args_base, void **args, int64_t *arg_sizes, int64_t
714  // *arg_types);
716  // Call to int32_t __tgt_target_nowait(int64_t device_id, void *host_ptr,
717  // int32_t arg_num, void** args_base, void **args, int64_t *arg_sizes, int64_t
718  // *arg_types);
720  // Call to int32_t __tgt_target_teams(int64_t device_id, void *host_ptr,
721  // int32_t arg_num, void** args_base, void **args, int64_t *arg_sizes, int64_t
722  // *arg_types, int32_t num_teams, int32_t thread_limit);
724  // Call to int32_t __tgt_target_teams_nowait(int64_t device_id, void
725  // *host_ptr, int32_t arg_num, void** args_base, void **args, int64_t
726  // *arg_sizes, int64_t *arg_types, int32_t num_teams, int32_t thread_limit);
728  // Call to void __tgt_register_requires(int64_t flags);
730  // Call to void __tgt_register_lib(__tgt_bin_desc *desc);
732  // Call to void __tgt_unregister_lib(__tgt_bin_desc *desc);
734  // Call to void __tgt_target_data_begin(int64_t device_id, int32_t arg_num,
735  // void** args_base, void **args, int64_t *arg_sizes, int64_t *arg_types);
737  // Call to void __tgt_target_data_begin_nowait(int64_t device_id, int32_t
738  // arg_num, void** args_base, void **args, int64_t *arg_sizes, int64_t
739  // *arg_types);
741  // Call to void __tgt_target_data_end(int64_t device_id, int32_t arg_num,
742  // void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types);
744  // Call to void __tgt_target_data_end_nowait(int64_t device_id, int32_t
745  // arg_num, void** args_base, void **args, int64_t *arg_sizes, int64_t
746  // *arg_types);
748  // Call to void __tgt_target_data_update(int64_t device_id, int32_t arg_num,
749  // void** args_base, void **args, int64_t *arg_sizes, int64_t *arg_types);
751  // Call to void __tgt_target_data_update_nowait(int64_t device_id, int32_t
752  // arg_num, void** args_base, void **args, int64_t *arg_sizes, int64_t
753  // *arg_types);
755  // Call to int64_t __tgt_mapper_num_components(void *rt_mapper_handle);
757  // Call to void __tgt_push_mapper_component(void *rt_mapper_handle, void
758  // *base, void *begin, int64_t size, int64_t type);
760 };
761 
762 /// A basic class for pre|post-action for advanced codegen sequence for OpenMP
763 /// region.
764 class CleanupTy final : public EHScopeStack::Cleanup {
765  PrePostActionTy *Action;
766 
767 public:
768  explicit CleanupTy(PrePostActionTy *Action) : Action(Action) {}
769  void Emit(CodeGenFunction &CGF, Flags /*flags*/) override {
770  if (!CGF.HaveInsertPoint())
771  return;
772  Action->Exit(CGF);
773  }
774 };
775 
776 } // anonymous namespace
777 
780  if (PrePostAction) {
781  CGF.EHStack.pushCleanup<CleanupTy>(NormalAndEHCleanup, PrePostAction);
782  Callback(CodeGen, CGF, *PrePostAction);
783  } else {
784  PrePostActionTy Action;
785  Callback(CodeGen, CGF, Action);
786  }
787 }
788 
789 /// Check if the combiner is a call to UDR combiner and if it is so return the
790 /// UDR decl used for reduction.
791 static const OMPDeclareReductionDecl *
792 getReductionInit(const Expr *ReductionOp) {
793  if (const auto *CE = dyn_cast<CallExpr>(ReductionOp))
794  if (const auto *OVE = dyn_cast<OpaqueValueExpr>(CE->getCallee()))
795  if (const auto *DRE =
796  dyn_cast<DeclRefExpr>(OVE->getSourceExpr()->IgnoreImpCasts()))
797  if (const auto *DRD = dyn_cast<OMPDeclareReductionDecl>(DRE->getDecl()))
798  return DRD;
799  return nullptr;
800 }
801 
803  const OMPDeclareReductionDecl *DRD,
804  const Expr *InitOp,
805  Address Private, Address Original,
806  QualType Ty) {
807  if (DRD->getInitializer()) {
808  std::pair<llvm::Function *, llvm::Function *> Reduction =
809  CGF.CGM.getOpenMPRuntime().getUserDefinedReduction(DRD);
810  const auto *CE = cast<CallExpr>(InitOp);
811  const auto *OVE = cast<OpaqueValueExpr>(CE->getCallee());
812  const Expr *LHS = CE->getArg(/*Arg=*/0)->IgnoreParenImpCasts();
813  const Expr *RHS = CE->getArg(/*Arg=*/1)->IgnoreParenImpCasts();
814  const auto *LHSDRE =
815  cast<DeclRefExpr>(cast<UnaryOperator>(LHS)->getSubExpr());
816  const auto *RHSDRE =
817  cast<DeclRefExpr>(cast<UnaryOperator>(RHS)->getSubExpr());
818  CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
819  PrivateScope.addPrivate(cast<VarDecl>(LHSDRE->getDecl()),
820  [=]() { return Private; });
821  PrivateScope.addPrivate(cast<VarDecl>(RHSDRE->getDecl()),
822  [=]() { return Original; });
823  (void)PrivateScope.Privatize();
824  RValue Func = RValue::get(Reduction.second);
825  CodeGenFunction::OpaqueValueMapping Map(CGF, OVE, Func);
826  CGF.EmitIgnoredExpr(InitOp);
827  } else {
828  llvm::Constant *Init = CGF.CGM.EmitNullConstant(Ty);
829  std::string Name = CGF.CGM.getOpenMPRuntime().getName({"init"});
830  auto *GV = new llvm::GlobalVariable(
831  CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true,
832  llvm::GlobalValue::PrivateLinkage, Init, Name);
833  LValue LV = CGF.MakeNaturalAlignAddrLValue(GV, Ty);
834  RValue InitRVal;
835  switch (CGF.getEvaluationKind(Ty)) {
836  case TEK_Scalar:
837  InitRVal = CGF.EmitLoadOfLValue(LV, DRD->getLocation());
838  break;
839  case TEK_Complex:
840  InitRVal =
842  break;
843  case TEK_Aggregate:
844  InitRVal = RValue::getAggregate(LV.getAddress());
845  break;
846  }
847  OpaqueValueExpr OVE(DRD->getLocation(), Ty, VK_RValue);
848  CodeGenFunction::OpaqueValueMapping OpaqueMap(CGF, &OVE, InitRVal);
849  CGF.EmitAnyExprToMem(&OVE, Private, Ty.getQualifiers(),
850  /*IsInitializer=*/false);
851  }
852 }
853 
854 /// Emit initialization of arrays of complex types.
855 /// \param DestAddr Address of the array.
856 /// \param Type Type of array.
857 /// \param Init Initial expression of array.
858 /// \param SrcAddr Address of the original array.
859 static void EmitOMPAggregateInit(CodeGenFunction &CGF, Address DestAddr,
860  QualType Type, bool EmitDeclareReductionInit,
861  const Expr *Init,
862  const OMPDeclareReductionDecl *DRD,
863  Address SrcAddr = Address::invalid()) {
864  // Perform element-by-element initialization.
865  QualType ElementTy;
866 
867  // Drill down to the base element type on both arrays.
868  const ArrayType *ArrayTy = Type->getAsArrayTypeUnsafe();
869  llvm::Value *NumElements = CGF.emitArrayLength(ArrayTy, ElementTy, DestAddr);
870  DestAddr =
871  CGF.Builder.CreateElementBitCast(DestAddr, DestAddr.getElementType());
872  if (DRD)
873  SrcAddr =
874  CGF.Builder.CreateElementBitCast(SrcAddr, DestAddr.getElementType());
875 
876  llvm::Value *SrcBegin = nullptr;
877  if (DRD)
878  SrcBegin = SrcAddr.getPointer();
879  llvm::Value *DestBegin = DestAddr.getPointer();
880  // Cast from pointer to array type to pointer to single element.
881  llvm::Value *DestEnd = CGF.Builder.CreateGEP(DestBegin, NumElements);
882  // The basic structure here is a while-do loop.
883  llvm::BasicBlock *BodyBB = CGF.createBasicBlock("omp.arrayinit.body");
884  llvm::BasicBlock *DoneBB = CGF.createBasicBlock("omp.arrayinit.done");
885  llvm::Value *IsEmpty =
886  CGF.Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arrayinit.isempty");
887  CGF.Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
888 
889  // Enter the loop body, making that address the current address.
890  llvm::BasicBlock *EntryBB = CGF.Builder.GetInsertBlock();
891  CGF.EmitBlock(BodyBB);
892 
893  CharUnits ElementSize = CGF.getContext().getTypeSizeInChars(ElementTy);
894 
895  llvm::PHINode *SrcElementPHI = nullptr;
896  Address SrcElementCurrent = Address::invalid();
897  if (DRD) {
898  SrcElementPHI = CGF.Builder.CreatePHI(SrcBegin->getType(), 2,
899  "omp.arraycpy.srcElementPast");
900  SrcElementPHI->addIncoming(SrcBegin, EntryBB);
901  SrcElementCurrent =
902  Address(SrcElementPHI,
903  SrcAddr.getAlignment().alignmentOfArrayElement(ElementSize));
904  }
905  llvm::PHINode *DestElementPHI = CGF.Builder.CreatePHI(
906  DestBegin->getType(), 2, "omp.arraycpy.destElementPast");
907  DestElementPHI->addIncoming(DestBegin, EntryBB);
908  Address DestElementCurrent =
909  Address(DestElementPHI,
910  DestAddr.getAlignment().alignmentOfArrayElement(ElementSize));
911 
912  // Emit copy.
913  {
914  CodeGenFunction::RunCleanupsScope InitScope(CGF);
915  if (EmitDeclareReductionInit) {
916  emitInitWithReductionInitializer(CGF, DRD, Init, DestElementCurrent,
917  SrcElementCurrent, ElementTy);
918  } else
919  CGF.EmitAnyExprToMem(Init, DestElementCurrent, ElementTy.getQualifiers(),
920  /*IsInitializer=*/false);
921  }
922 
923  if (DRD) {
924  // Shift the address forward by one element.
925  llvm::Value *SrcElementNext = CGF.Builder.CreateConstGEP1_32(
926  SrcElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
927  SrcElementPHI->addIncoming(SrcElementNext, CGF.Builder.GetInsertBlock());
928  }
929 
930  // Shift the address forward by one element.
931  llvm::Value *DestElementNext = CGF.Builder.CreateConstGEP1_32(
932  DestElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
933  // Check whether we've reached the end.
934  llvm::Value *Done =
935  CGF.Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done");
936  CGF.Builder.CreateCondBr(Done, DoneBB, BodyBB);
937  DestElementPHI->addIncoming(DestElementNext, CGF.Builder.GetInsertBlock());
938 
939  // Done.
940  CGF.EmitBlock(DoneBB, /*IsFinished=*/true);
941 }
942 
943 LValue ReductionCodeGen::emitSharedLValue(CodeGenFunction &CGF, const Expr *E) {
944  return CGF.EmitOMPSharedLValue(E);
945 }
946 
947 LValue ReductionCodeGen::emitSharedLValueUB(CodeGenFunction &CGF,
948  const Expr *E) {
949  if (const auto *OASE = dyn_cast<OMPArraySectionExpr>(E))
950  return CGF.EmitOMPArraySectionExpr(OASE, /*IsLowerBound=*/false);
951  return LValue();
952 }
953 
954 void ReductionCodeGen::emitAggregateInitialization(
955  CodeGenFunction &CGF, unsigned N, Address PrivateAddr, LValue SharedLVal,
956  const OMPDeclareReductionDecl *DRD) {
957  // Emit VarDecl with copy init for arrays.
958  // Get the address of the original variable captured in current
959  // captured region.
960  const auto *PrivateVD =
961  cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
962  bool EmitDeclareReductionInit =
963  DRD && (DRD->getInitializer() || !PrivateVD->hasInit());
964  EmitOMPAggregateInit(CGF, PrivateAddr, PrivateVD->getType(),
965  EmitDeclareReductionInit,
966  EmitDeclareReductionInit ? ClausesData[N].ReductionOp
967  : PrivateVD->getInit(),
968  DRD, SharedLVal.getAddress());
969 }
970 
973  ArrayRef<const Expr *> ReductionOps) {
974  ClausesData.reserve(Shareds.size());
975  SharedAddresses.reserve(Shareds.size());
976  Sizes.reserve(Shareds.size());
977  BaseDecls.reserve(Shareds.size());
978  auto IPriv = Privates.begin();
979  auto IRed = ReductionOps.begin();
980  for (const Expr *Ref : Shareds) {
981  ClausesData.emplace_back(Ref, *IPriv, *IRed);
982  std::advance(IPriv, 1);
983  std::advance(IRed, 1);
984  }
985 }
986 
987 void ReductionCodeGen::emitSharedLValue(CodeGenFunction &CGF, unsigned N) {
988  assert(SharedAddresses.size() == N &&
989  "Number of generated lvalues must be exactly N.");
990  LValue First = emitSharedLValue(CGF, ClausesData[N].Ref);
991  LValue Second = emitSharedLValueUB(CGF, ClausesData[N].Ref);
992  SharedAddresses.emplace_back(First, Second);
993 }
994 
996  const auto *PrivateVD =
997  cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
998  QualType PrivateType = PrivateVD->getType();
999  bool AsArraySection = isa<OMPArraySectionExpr>(ClausesData[N].Ref);
1000  if (!PrivateType->isVariablyModifiedType()) {
1001  Sizes.emplace_back(
1002  CGF.getTypeSize(
1003  SharedAddresses[N].first.getType().getNonReferenceType()),
1004  nullptr);
1005  return;
1006  }
1007  llvm::Value *Size;
1008  llvm::Value *SizeInChars;
1009  auto *ElemType =
1010  cast<llvm::PointerType>(SharedAddresses[N].first.getPointer()->getType())
1011  ->getElementType();
1012  auto *ElemSizeOf = llvm::ConstantExpr::getSizeOf(ElemType);
1013  if (AsArraySection) {
1014  Size = CGF.Builder.CreatePtrDiff(SharedAddresses[N].second.getPointer(),
1015  SharedAddresses[N].first.getPointer());
1016  Size = CGF.Builder.CreateNUWAdd(
1017  Size, llvm::ConstantInt::get(Size->getType(), /*V=*/1));
1018  SizeInChars = CGF.Builder.CreateNUWMul(Size, ElemSizeOf);
1019  } else {
1020  SizeInChars = CGF.getTypeSize(
1021  SharedAddresses[N].first.getType().getNonReferenceType());
1022  Size = CGF.Builder.CreateExactUDiv(SizeInChars, ElemSizeOf);
1023  }
1024  Sizes.emplace_back(SizeInChars, Size);
1026  CGF,
1027  cast<OpaqueValueExpr>(
1028  CGF.getContext().getAsVariableArrayType(PrivateType)->getSizeExpr()),
1029  RValue::get(Size));
1030  CGF.EmitVariablyModifiedType(PrivateType);
1031 }
1032 
1034  llvm::Value *Size) {
1035  const auto *PrivateVD =
1036  cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
1037  QualType PrivateType = PrivateVD->getType();
1038  if (!PrivateType->isVariablyModifiedType()) {
1039  assert(!Size && !Sizes[N].second &&
1040  "Size should be nullptr for non-variably modified reduction "
1041  "items.");
1042  return;
1043  }
1045  CGF,
1046  cast<OpaqueValueExpr>(
1047  CGF.getContext().getAsVariableArrayType(PrivateType)->getSizeExpr()),
1048  RValue::get(Size));
1049  CGF.EmitVariablyModifiedType(PrivateType);
1050 }
1051 
1053  CodeGenFunction &CGF, unsigned N, Address PrivateAddr, LValue SharedLVal,
1054  llvm::function_ref<bool(CodeGenFunction &)> DefaultInit) {
1055  assert(SharedAddresses.size() > N && "No variable was generated");
1056  const auto *PrivateVD =
1057  cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
1058  const OMPDeclareReductionDecl *DRD =
1059  getReductionInit(ClausesData[N].ReductionOp);
1060  QualType PrivateType = PrivateVD->getType();
1061  PrivateAddr = CGF.Builder.CreateElementBitCast(
1062  PrivateAddr, CGF.ConvertTypeForMem(PrivateType));
1063  QualType SharedType = SharedAddresses[N].first.getType();
1064  SharedLVal = CGF.MakeAddrLValue(
1065  CGF.Builder.CreateElementBitCast(SharedLVal.getAddress(),
1066  CGF.ConvertTypeForMem(SharedType)),
1067  SharedType, SharedAddresses[N].first.getBaseInfo(),
1068  CGF.CGM.getTBAAInfoForSubobject(SharedAddresses[N].first, SharedType));
1069  if (CGF.getContext().getAsArrayType(PrivateVD->getType())) {
1070  emitAggregateInitialization(CGF, N, PrivateAddr, SharedLVal, DRD);
1071  } else if (DRD && (DRD->getInitializer() || !PrivateVD->hasInit())) {
1072  emitInitWithReductionInitializer(CGF, DRD, ClausesData[N].ReductionOp,
1073  PrivateAddr, SharedLVal.getAddress(),
1074  SharedLVal.getType());
1075  } else if (!DefaultInit(CGF) && PrivateVD->hasInit() &&
1076  !CGF.isTrivialInitializer(PrivateVD->getInit())) {
1077  CGF.EmitAnyExprToMem(PrivateVD->getInit(), PrivateAddr,
1078  PrivateVD->getType().getQualifiers(),
1079  /*IsInitializer=*/false);
1080  }
1081 }
1082 
1084  const auto *PrivateVD =
1085  cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
1086  QualType PrivateType = PrivateVD->getType();
1087  QualType::DestructionKind DTorKind = PrivateType.isDestructedType();
1088  return DTorKind != QualType::DK_none;
1089 }
1090 
1092  Address PrivateAddr) {
1093  const auto *PrivateVD =
1094  cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
1095  QualType PrivateType = PrivateVD->getType();
1096  QualType::DestructionKind DTorKind = PrivateType.isDestructedType();
1097  if (needCleanups(N)) {
1098  PrivateAddr = CGF.Builder.CreateElementBitCast(
1099  PrivateAddr, CGF.ConvertTypeForMem(PrivateType));
1100  CGF.pushDestroy(DTorKind, PrivateAddr, PrivateType);
1101  }
1102 }
1103 
1105  LValue BaseLV) {
1106  BaseTy = BaseTy.getNonReferenceType();
1107  while ((BaseTy->isPointerType() || BaseTy->isReferenceType()) &&
1108  !CGF.getContext().hasSameType(BaseTy, ElTy)) {
1109  if (const auto *PtrTy = BaseTy->getAs<PointerType>()) {
1110  BaseLV = CGF.EmitLoadOfPointerLValue(BaseLV.getAddress(), PtrTy);
1111  } else {
1112  LValue RefLVal = CGF.MakeAddrLValue(BaseLV.getAddress(), BaseTy);
1113  BaseLV = CGF.EmitLoadOfReferenceLValue(RefLVal);
1114  }
1115  BaseTy = BaseTy->getPointeeType();
1116  }
1117  return CGF.MakeAddrLValue(
1119  CGF.ConvertTypeForMem(ElTy)),
1120  BaseLV.getType(), BaseLV.getBaseInfo(),
1121  CGF.CGM.getTBAAInfoForSubobject(BaseLV, BaseLV.getType()));
1122 }
1123 
1125  llvm::Type *BaseLVType, CharUnits BaseLVAlignment,
1126  llvm::Value *Addr) {
1127  Address Tmp = Address::invalid();
1128  Address TopTmp = Address::invalid();
1129  Address MostTopTmp = Address::invalid();
1130  BaseTy = BaseTy.getNonReferenceType();
1131  while ((BaseTy->isPointerType() || BaseTy->isReferenceType()) &&
1132  !CGF.getContext().hasSameType(BaseTy, ElTy)) {
1133  Tmp = CGF.CreateMemTemp(BaseTy);
1134  if (TopTmp.isValid())
1135  CGF.Builder.CreateStore(Tmp.getPointer(), TopTmp);
1136  else
1137  MostTopTmp = Tmp;
1138  TopTmp = Tmp;
1139  BaseTy = BaseTy->getPointeeType();
1140  }
1141  llvm::Type *Ty = BaseLVType;
1142  if (Tmp.isValid())
1143  Ty = Tmp.getElementType();
1144  Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Addr, Ty);
1145  if (Tmp.isValid()) {
1146  CGF.Builder.CreateStore(Addr, Tmp);
1147  return MostTopTmp;
1148  }
1149  return Address(Addr, BaseLVAlignment);
1150 }
1151 
1152 static const VarDecl *getBaseDecl(const Expr *Ref, const DeclRefExpr *&DE) {
1153  const VarDecl *OrigVD = nullptr;
1154  if (const auto *OASE = dyn_cast<OMPArraySectionExpr>(Ref)) {
1155  const Expr *Base = OASE->getBase()->IgnoreParenImpCasts();
1156  while (const auto *TempOASE = dyn_cast<OMPArraySectionExpr>(Base))
1157  Base = TempOASE->getBase()->IgnoreParenImpCasts();
1158  while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
1159  Base = TempASE->getBase()->IgnoreParenImpCasts();
1160  DE = cast<DeclRefExpr>(Base);
1161  OrigVD = cast<VarDecl>(DE->getDecl());
1162  } else if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Ref)) {
1163  const Expr *Base = ASE->getBase()->IgnoreParenImpCasts();
1164  while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
1165  Base = TempASE->getBase()->IgnoreParenImpCasts();
1166  DE = cast<DeclRefExpr>(Base);
1167  OrigVD = cast<VarDecl>(DE->getDecl());
1168  }
1169  return OrigVD;
1170 }
1171 
1173  Address PrivateAddr) {
1174  const DeclRefExpr *DE;
1175  if (const VarDecl *OrigVD = ::getBaseDecl(ClausesData[N].Ref, DE)) {
1176  BaseDecls.emplace_back(OrigVD);
1177  LValue OriginalBaseLValue = CGF.EmitLValue(DE);
1178  LValue BaseLValue =
1179  loadToBegin(CGF, OrigVD->getType(), SharedAddresses[N].first.getType(),
1180  OriginalBaseLValue);
1181  llvm::Value *Adjustment = CGF.Builder.CreatePtrDiff(
1182  BaseLValue.getPointer(), SharedAddresses[N].first.getPointer());
1183  llvm::Value *PrivatePointer =
1185  PrivateAddr.getPointer(),
1186  SharedAddresses[N].first.getAddress().getType());
1187  llvm::Value *Ptr = CGF.Builder.CreateGEP(PrivatePointer, Adjustment);
1188  return castToBase(CGF, OrigVD->getType(),
1189  SharedAddresses[N].first.getType(),
1190  OriginalBaseLValue.getAddress().getType(),
1191  OriginalBaseLValue.getAlignment(), Ptr);
1192  }
1193  BaseDecls.emplace_back(
1194  cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Ref)->getDecl()));
1195  return PrivateAddr;
1196 }
1197 
1199  const OMPDeclareReductionDecl *DRD =
1200  getReductionInit(ClausesData[N].ReductionOp);
1201  return DRD && DRD->getInitializer();
1202 }
1203 
1204 LValue CGOpenMPRegionInfo::getThreadIDVariableLValue(CodeGenFunction &CGF) {
1205  return CGF.EmitLoadOfPointerLValue(
1206  CGF.GetAddrOfLocalVar(getThreadIDVariable()),
1207  getThreadIDVariable()->getType()->castAs<PointerType>());
1208 }
1209 
1210 void CGOpenMPRegionInfo::EmitBody(CodeGenFunction &CGF, const Stmt * /*S*/) {
1211  if (!CGF.HaveInsertPoint())
1212  return;
1213  // 1.2.2 OpenMP Language Terminology
1214  // Structured block - An executable statement with a single entry at the
1215  // top and a single exit at the bottom.
1216  // The point of exit cannot be a branch out of the structured block.
1217  // longjmp() and throw() must not violate the entry/exit criteria.
1218  CGF.EHStack.pushTerminate();
1219  CodeGen(CGF);
1220  CGF.EHStack.popTerminate();
1221 }
1222 
1223 LValue CGOpenMPTaskOutlinedRegionInfo::getThreadIDVariableLValue(
1224  CodeGenFunction &CGF) {
1225  return CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(getThreadIDVariable()),
1226  getThreadIDVariable()->getType(),
1228 }
1229 
1231  QualType FieldTy) {
1232  auto *Field = FieldDecl::Create(
1233  C, DC, SourceLocation(), SourceLocation(), /*Id=*/nullptr, FieldTy,
1235  /*BW=*/nullptr, /*Mutable=*/false, /*InitStyle=*/ICIS_NoInit);
1236  Field->setAccess(AS_public);
1237  DC->addDecl(Field);
1238  return Field;
1239 }
1240 
1241 CGOpenMPRuntime::CGOpenMPRuntime(CodeGenModule &CGM, StringRef FirstSeparator,
1242  StringRef Separator)
1243  : CGM(CGM), FirstSeparator(FirstSeparator), Separator(Separator),
1245  ASTContext &C = CGM.getContext();
1246  RecordDecl *RD = C.buildImplicitRecord("ident_t");
1247  QualType KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
1248  RD->startDefinition();
1249  // reserved_1
1250  addFieldToRecordDecl(C, RD, KmpInt32Ty);
1251  // flags
1252  addFieldToRecordDecl(C, RD, KmpInt32Ty);
1253  // reserved_2
1254  addFieldToRecordDecl(C, RD, KmpInt32Ty);
1255  // reserved_3
1256  addFieldToRecordDecl(C, RD, KmpInt32Ty);
1257  // psource
1258  addFieldToRecordDecl(C, RD, C.VoidPtrTy);
1259  RD->completeDefinition();
1260  IdentQTy = C.getRecordType(RD);
1261  IdentTy = CGM.getTypes().ConvertRecordDeclType(RD);
1262  KmpCriticalNameTy = llvm::ArrayType::get(CGM.Int32Ty, /*NumElements*/ 8);
1263 
1265 }
1266 
1268  const GlobalDecl &OldGD,
1269  llvm::GlobalValue *OrigAddr,
1270  bool IsForDefinition) {
1271  // Emit at least a definition for the aliasee if the the address of the
1272  // original function is requested.
1273  if (IsForDefinition || OrigAddr)
1274  (void)CGM.GetAddrOfGlobal(NewGD);
1275  StringRef NewMangledName = CGM.getMangledName(NewGD);
1276  llvm::GlobalValue *Addr = CGM.GetGlobalValue(NewMangledName);
1277  if (Addr && !Addr->isDeclaration()) {
1278  const auto *D = cast<FunctionDecl>(OldGD.getDecl());
1279  const CGFunctionInfo &FI = CGM.getTypes().arrangeGlobalDeclaration(OldGD);
1281 
1282  // Create a reference to the named value. This ensures that it is emitted
1283  // if a deferred decl.
1284  llvm::GlobalValue::LinkageTypes LT = CGM.getFunctionLinkage(OldGD);
1285 
1286  // Create the new alias itself, but don't set a name yet.
1287  auto *GA =
1288  llvm::GlobalAlias::create(DeclTy, 0, LT, "", Addr, &CGM.getModule());
1289 
1290  if (OrigAddr) {
1291  assert(OrigAddr->isDeclaration() && "Expected declaration");
1292 
1293  GA->takeName(OrigAddr);
1294  OrigAddr->replaceAllUsesWith(
1295  llvm::ConstantExpr::getBitCast(GA, OrigAddr->getType()));
1296  OrigAddr->eraseFromParent();
1297  } else {
1298  GA->setName(CGM.getMangledName(OldGD));
1299  }
1300 
1301  // Set attributes which are particular to an alias; this is a
1302  // specialization of the attributes which may be set on a global function.
1303  if (D->hasAttr<WeakAttr>() || D->hasAttr<WeakRefAttr>() ||
1304  D->isWeakImported())
1305  GA->setLinkage(llvm::Function::WeakAnyLinkage);
1306 
1307  CGM.SetCommonAttributes(OldGD, GA);
1308  return true;
1309  }
1310  return false;
1311 }
1312 
1313 void CGOpenMPRuntime::clear() {
1314  InternalVars.clear();
1315  // Clean non-target variable declarations possibly used only in debug info.
1316  for (const auto &Data : EmittedNonTargetVariables) {
1317  if (!Data.getValue().pointsToAliveValue())
1318  continue;
1319  auto *GV = dyn_cast<llvm::GlobalVariable>(Data.getValue());
1320  if (!GV)
1321  continue;
1322  if (!GV->isDeclaration() || GV->getNumUses() > 0)
1323  continue;
1324  GV->eraseFromParent();
1325  }
1326  // Emit aliases for the deferred aliasees.
1327  for (const auto &Pair : DeferredVariantFunction) {
1328  StringRef MangledName = CGM.getMangledName(Pair.second.second);
1329  llvm::GlobalValue *Addr = CGM.GetGlobalValue(MangledName);
1330  // If not able to emit alias, just emit original declaration.
1331  (void)tryEmitDeclareVariant(Pair.second.first, Pair.second.second, Addr,
1332  /*IsForDefinition=*/false);
1333  }
1334 }
1335 
1336 std::string CGOpenMPRuntime::getName(ArrayRef<StringRef> Parts) const {
1337  SmallString<128> Buffer;
1338  llvm::raw_svector_ostream OS(Buffer);
1339  StringRef Sep = FirstSeparator;
1340  for (StringRef Part : Parts) {
1341  OS << Sep << Part;
1342  Sep = Separator;
1343  }
1344  return OS.str();
1345 }
1346 
1347 static llvm::Function *
1349  const Expr *CombinerInitializer, const VarDecl *In,
1350  const VarDecl *Out, bool IsCombiner) {
1351  // void .omp_combiner.(Ty *in, Ty *out);
1352  ASTContext &C = CGM.getContext();
1353  QualType PtrTy = C.getPointerType(Ty).withRestrict();
1354  FunctionArgList Args;
1355  ImplicitParamDecl OmpOutParm(C, /*DC=*/nullptr, Out->getLocation(),
1356  /*Id=*/nullptr, PtrTy, ImplicitParamDecl::Other);
1357  ImplicitParamDecl OmpInParm(C, /*DC=*/nullptr, In->getLocation(),
1358  /*Id=*/nullptr, PtrTy, ImplicitParamDecl::Other);
1359  Args.push_back(&OmpOutParm);
1360  Args.push_back(&OmpInParm);
1361  const CGFunctionInfo &FnInfo =
1363  llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
1364  std::string Name = CGM.getOpenMPRuntime().getName(
1365  {IsCombiner ? "omp_combiner" : "omp_initializer", ""});
1367  Name, &CGM.getModule());
1368  CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FnInfo);
1369  if (CGM.getLangOpts().Optimize) {
1370  Fn->removeFnAttr(llvm::Attribute::NoInline);
1371  Fn->removeFnAttr(llvm::Attribute::OptimizeNone);
1372  Fn->addFnAttr(llvm::Attribute::AlwaysInline);
1373  }
1374  CodeGenFunction CGF(CGM);
1375  // Map "T omp_in;" variable to "*omp_in_parm" value in all expressions.
1376  // Map "T omp_out;" variable to "*omp_out_parm" value in all expressions.
1377  CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, In->getLocation(),
1378  Out->getLocation());
1380  Address AddrIn = CGF.GetAddrOfLocalVar(&OmpInParm);
1381  Scope.addPrivate(In, [&CGF, AddrIn, PtrTy]() {
1382  return CGF.EmitLoadOfPointerLValue(AddrIn, PtrTy->castAs<PointerType>())
1383  .getAddress();
1384  });
1385  Address AddrOut = CGF.GetAddrOfLocalVar(&OmpOutParm);
1386  Scope.addPrivate(Out, [&CGF, AddrOut, PtrTy]() {
1387  return CGF.EmitLoadOfPointerLValue(AddrOut, PtrTy->castAs<PointerType>())
1388  .getAddress();
1389  });
1390  (void)Scope.Privatize();
1391  if (!IsCombiner && Out->hasInit() &&
1392  !CGF.isTrivialInitializer(Out->getInit())) {
1393  CGF.EmitAnyExprToMem(Out->getInit(), CGF.GetAddrOfLocalVar(Out),
1394  Out->getType().getQualifiers(),
1395  /*IsInitializer=*/true);
1396  }
1397  if (CombinerInitializer)
1398  CGF.EmitIgnoredExpr(CombinerInitializer);
1399  Scope.ForceCleanup();
1400  CGF.FinishFunction();
1401  return Fn;
1402 }
1403 
1405  CodeGenFunction *CGF, const OMPDeclareReductionDecl *D) {
1406  if (UDRMap.count(D) > 0)
1407  return;
1408  llvm::Function *Combiner = emitCombinerOrInitializer(
1409  CGM, D->getType(), D->getCombiner(),
1410  cast<VarDecl>(cast<DeclRefExpr>(D->getCombinerIn())->getDecl()),
1411  cast<VarDecl>(cast<DeclRefExpr>(D->getCombinerOut())->getDecl()),
1412  /*IsCombiner=*/true);
1413  llvm::Function *Initializer = nullptr;
1414  if (const Expr *Init = D->getInitializer()) {
1415  Initializer = emitCombinerOrInitializer(
1416  CGM, D->getType(),
1418  : nullptr,
1419  cast<VarDecl>(cast<DeclRefExpr>(D->getInitOrig())->getDecl()),
1420  cast<VarDecl>(cast<DeclRefExpr>(D->getInitPriv())->getDecl()),
1421  /*IsCombiner=*/false);
1422  }
1423  UDRMap.try_emplace(D, Combiner, Initializer);
1424  if (CGF) {
1425  auto &Decls = FunctionUDRMap.FindAndConstruct(CGF->CurFn);
1426  Decls.second.push_back(D);
1427  }
1428 }
1429 
1430 std::pair<llvm::Function *, llvm::Function *>
1432  auto I = UDRMap.find(D);
1433  if (I != UDRMap.end())
1434  return I->second;
1435  emitUserDefinedReduction(/*CGF=*/nullptr, D);
1436  return UDRMap.lookup(D);
1437 }
1438 
1440  CodeGenModule &CGM, const OMPExecutableDirective &D, const CapturedStmt *CS,
1441  const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind,
1442  const StringRef OutlinedHelperName, const RegionCodeGenTy &CodeGen) {
1443  assert(ThreadIDVar->getType()->isPointerType() &&
1444  "thread id variable must be of type kmp_int32 *");
1445  CodeGenFunction CGF(CGM, true);
1446  bool HasCancel = false;
1447  if (const auto *OPD = dyn_cast<OMPParallelDirective>(&D))
1448  HasCancel = OPD->hasCancel();
1449  else if (const auto *OPSD = dyn_cast<OMPParallelSectionsDirective>(&D))
1450  HasCancel = OPSD->hasCancel();
1451  else if (const auto *OPFD = dyn_cast<OMPParallelForDirective>(&D))
1452  HasCancel = OPFD->hasCancel();
1453  else if (const auto *OPFD = dyn_cast<OMPTargetParallelForDirective>(&D))
1454  HasCancel = OPFD->hasCancel();
1455  else if (const auto *OPFD = dyn_cast<OMPDistributeParallelForDirective>(&D))
1456  HasCancel = OPFD->hasCancel();
1457  else if (const auto *OPFD =
1458  dyn_cast<OMPTeamsDistributeParallelForDirective>(&D))
1459  HasCancel = OPFD->hasCancel();
1460  else if (const auto *OPFD =
1461  dyn_cast<OMPTargetTeamsDistributeParallelForDirective>(&D))
1462  HasCancel = OPFD->hasCancel();
1463  CGOpenMPOutlinedRegionInfo CGInfo(*CS, ThreadIDVar, CodeGen, InnermostKind,
1464  HasCancel, OutlinedHelperName);
1465  CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
1466  return CGF.GenerateOpenMPCapturedStmtFunction(*CS);
1467 }
1468 
1470  const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
1471  OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
1472  const CapturedStmt *CS = D.getCapturedStmt(OMPD_parallel);
1474  CGM, D, CS, ThreadIDVar, InnermostKind, getOutlinedHelperName(), CodeGen);
1475 }
1476 
1478  const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
1479  OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
1480  const CapturedStmt *CS = D.getCapturedStmt(OMPD_teams);
1482  CGM, D, CS, ThreadIDVar, InnermostKind, getOutlinedHelperName(), CodeGen);
1483 }
1484 
1486  const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
1487  const VarDecl *PartIDVar, const VarDecl *TaskTVar,
1488  OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
1489  bool Tied, unsigned &NumberOfParts) {
1490  auto &&UntiedCodeGen = [this, &D, TaskTVar](CodeGenFunction &CGF,
1491  PrePostActionTy &) {
1492  llvm::Value *ThreadID = getThreadID(CGF, D.getBeginLoc());
1493  llvm::Value *UpLoc = emitUpdateLocation(CGF, D.getBeginLoc());
1494  llvm::Value *TaskArgs[] = {
1495  UpLoc, ThreadID,
1496  CGF.EmitLoadOfPointerLValue(CGF.GetAddrOfLocalVar(TaskTVar),
1497  TaskTVar->getType()->castAs<PointerType>())
1498  .getPointer()};
1499  CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_omp_task), TaskArgs);
1500  };
1501  CGOpenMPTaskOutlinedRegionInfo::UntiedTaskActionTy Action(Tied, PartIDVar,
1502  UntiedCodeGen);
1503  CodeGen.setAction(Action);
1504  assert(!ThreadIDVar->getType()->isPointerType() &&
1505  "thread id variable must be of type kmp_int32 for tasks");
1506  const OpenMPDirectiveKind Region =
1507  isOpenMPTaskLoopDirective(D.getDirectiveKind()) ? OMPD_taskloop
1508  : OMPD_task;
1509  const CapturedStmt *CS = D.getCapturedStmt(Region);
1510  const auto *TD = dyn_cast<OMPTaskDirective>(&D);
1511  CodeGenFunction CGF(CGM, true);
1512  CGOpenMPTaskOutlinedRegionInfo CGInfo(*CS, ThreadIDVar, CodeGen,
1513  InnermostKind,
1514  TD ? TD->hasCancel() : false, Action);
1515  CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
1516  llvm::Function *Res = CGF.GenerateCapturedStmtFunction(*CS);
1517  if (!Tied)
1518  NumberOfParts = Action.getNumberOfParts();
1519  return Res;
1520 }
1521 
1523  const RecordDecl *RD, const CGRecordLayout &RL,
1524  ArrayRef<llvm::Constant *> Data) {
1525  llvm::StructType *StructTy = RL.getLLVMType();
1526  unsigned PrevIdx = 0;
1527  ConstantInitBuilder CIBuilder(CGM);
1528  auto DI = Data.begin();
1529  for (const FieldDecl *FD : RD->fields()) {
1530  unsigned Idx = RL.getLLVMFieldNo(FD);
1531  // Fill the alignment.
1532  for (unsigned I = PrevIdx; I < Idx; ++I)
1533  Fields.add(llvm::Constant::getNullValue(StructTy->getElementType(I)));
1534  PrevIdx = Idx + 1;
1535  Fields.add(*DI);
1536  ++DI;
1537  }
1538 }
1539 
1540 template <class... As>
1541 static llvm::GlobalVariable *
1543  ArrayRef<llvm::Constant *> Data, const Twine &Name,
1544  As &&... Args) {
1545  const auto *RD = cast<RecordDecl>(Ty->getAsTagDecl());
1546  const CGRecordLayout &RL = CGM.getTypes().getCGRecordLayout(RD);
1547  ConstantInitBuilder CIBuilder(CGM);
1548  ConstantStructBuilder Fields = CIBuilder.beginStruct(RL.getLLVMType());
1549  buildStructValue(Fields, CGM, RD, RL, Data);
1550  return Fields.finishAndCreateGlobal(
1551  Name, CGM.getContext().getAlignOfGlobalVarInChars(Ty), IsConstant,
1552  std::forward<As>(Args)...);
1553 }
1554 
1555 template <typename T>
1556 static void
1558  ArrayRef<llvm::Constant *> Data,
1559  T &Parent) {
1560  const auto *RD = cast<RecordDecl>(Ty->getAsTagDecl());
1561  const CGRecordLayout &RL = CGM.getTypes().getCGRecordLayout(RD);
1562  ConstantStructBuilder Fields = Parent.beginStruct(RL.getLLVMType());
1563  buildStructValue(Fields, CGM, RD, RL, Data);
1564  Fields.finishAndAddTo(Parent);
1565 }
1566 
1567 Address CGOpenMPRuntime::getOrCreateDefaultLocation(unsigned Flags) {
1568  CharUnits Align = CGM.getContext().getTypeAlignInChars(IdentQTy);
1569  unsigned Reserved2Flags = getDefaultLocationReserved2Flags();
1570  FlagsTy FlagsKey(Flags, Reserved2Flags);
1571  llvm::Value *Entry = OpenMPDefaultLocMap.lookup(FlagsKey);
1572  if (!Entry) {
1573  if (!DefaultOpenMPPSource) {
1574  // Initialize default location for psource field of ident_t structure of
1575  // all ident_t objects. Format is ";file;function;line;column;;".
1576  // Taken from
1577  // https://github.com/llvm/llvm-project/blob/master/openmp/runtime/src/kmp_str.cpp
1578  DefaultOpenMPPSource =
1579  CGM.GetAddrOfConstantCString(";unknown;unknown;0;0;;").getPointer();
1580  DefaultOpenMPPSource =
1581  llvm::ConstantExpr::getBitCast(DefaultOpenMPPSource, CGM.Int8PtrTy);
1582  }
1583 
1584  llvm::Constant *Data[] = {
1585  llvm::ConstantInt::getNullValue(CGM.Int32Ty),
1586  llvm::ConstantInt::get(CGM.Int32Ty, Flags),
1587  llvm::ConstantInt::get(CGM.Int32Ty, Reserved2Flags),
1588  llvm::ConstantInt::getNullValue(CGM.Int32Ty), DefaultOpenMPPSource};
1589  llvm::GlobalValue *DefaultOpenMPLocation =
1590  createGlobalStruct(CGM, IdentQTy, isDefaultLocationConstant(), Data, "",
1591  llvm::GlobalValue::PrivateLinkage);
1592  DefaultOpenMPLocation->setUnnamedAddr(
1593  llvm::GlobalValue::UnnamedAddr::Global);
1594 
1595  OpenMPDefaultLocMap[FlagsKey] = Entry = DefaultOpenMPLocation;
1596  }
1597  return Address(Entry, Align);
1598 }
1599 
1601  bool AtCurrentPoint) {
1602  auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
1603  assert(!Elem.second.ServiceInsertPt && "Insert point is set already.");
1604 
1605  llvm::Value *Undef = llvm::UndefValue::get(CGF.Int32Ty);
1606  if (AtCurrentPoint) {
1607  Elem.second.ServiceInsertPt = new llvm::BitCastInst(
1608  Undef, CGF.Int32Ty, "svcpt", CGF.Builder.GetInsertBlock());
1609  } else {
1610  Elem.second.ServiceInsertPt =
1611  new llvm::BitCastInst(Undef, CGF.Int32Ty, "svcpt");
1612  Elem.second.ServiceInsertPt->insertAfter(CGF.AllocaInsertPt);
1613  }
1614 }
1615 
1617  auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
1618  if (Elem.second.ServiceInsertPt) {
1619  llvm::Instruction *Ptr = Elem.second.ServiceInsertPt;
1620  Elem.second.ServiceInsertPt = nullptr;
1621  Ptr->eraseFromParent();
1622  }
1623 }
1624 
1626  SourceLocation Loc,
1627  unsigned Flags) {
1628  Flags |= OMP_IDENT_KMPC;
1629  // If no debug info is generated - return global default location.
1630  if (CGM.getCodeGenOpts().getDebugInfo() == codegenoptions::NoDebugInfo ||
1631  Loc.isInvalid())
1632  return getOrCreateDefaultLocation(Flags).getPointer();
1633 
1634  assert(CGF.CurFn && "No function in current CodeGenFunction.");
1635 
1636  CharUnits Align = CGM.getContext().getTypeAlignInChars(IdentQTy);
1637  Address LocValue = Address::invalid();
1638  auto I = OpenMPLocThreadIDMap.find(CGF.CurFn);
1639  if (I != OpenMPLocThreadIDMap.end())
1640  LocValue = Address(I->second.DebugLoc, Align);
1641 
1642  // OpenMPLocThreadIDMap may have null DebugLoc and non-null ThreadID, if
1643  // GetOpenMPThreadID was called before this routine.
1644  if (!LocValue.isValid()) {
1645  // Generate "ident_t .kmpc_loc.addr;"
1646  Address AI = CGF.CreateMemTemp(IdentQTy, ".kmpc_loc.addr");
1647  auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
1648  Elem.second.DebugLoc = AI.getPointer();
1649  LocValue = AI;
1650 
1651  if (!Elem.second.ServiceInsertPt)
1653  CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
1654  CGF.Builder.SetInsertPoint(Elem.second.ServiceInsertPt);
1655  CGF.Builder.CreateMemCpy(LocValue, getOrCreateDefaultLocation(Flags),
1656  CGF.getTypeSize(IdentQTy));
1657  }
1658 
1659  // char **psource = &.kmpc_loc_<flags>.addr.psource;
1660  LValue Base = CGF.MakeAddrLValue(LocValue, IdentQTy);
1661  auto Fields = cast<RecordDecl>(IdentQTy->getAsTagDecl())->field_begin();
1662  LValue PSource =
1663  CGF.EmitLValueForField(Base, *std::next(Fields, IdentField_PSource));
1664 
1665  llvm::Value *OMPDebugLoc = OpenMPDebugLocMap.lookup(Loc.getRawEncoding());
1666  if (OMPDebugLoc == nullptr) {
1667  SmallString<128> Buffer2;
1668  llvm::raw_svector_ostream OS2(Buffer2);
1669  // Build debug location
1671  OS2 << ";" << PLoc.getFilename() << ";";
1672  if (const auto *FD = dyn_cast_or_null<FunctionDecl>(CGF.CurFuncDecl))
1673  OS2 << FD->getQualifiedNameAsString();
1674  OS2 << ";" << PLoc.getLine() << ";" << PLoc.getColumn() << ";;";
1675  OMPDebugLoc = CGF.Builder.CreateGlobalStringPtr(OS2.str());
1676  OpenMPDebugLocMap[Loc.getRawEncoding()] = OMPDebugLoc;
1677  }
1678  // *psource = ";<File>;<Function>;<Line>;<Column>;;";
1679  CGF.EmitStoreOfScalar(OMPDebugLoc, PSource);
1680 
1681  // Our callers always pass this to a runtime function, so for
1682  // convenience, go ahead and return a naked pointer.
1683  return LocValue.getPointer();
1684 }
1685 
1687  SourceLocation Loc) {
1688  assert(CGF.CurFn && "No function in current CodeGenFunction.");
1689 
1690  llvm::Value *ThreadID = nullptr;
1691  // Check whether we've already cached a load of the thread id in this
1692  // function.
1693  auto I = OpenMPLocThreadIDMap.find(CGF.CurFn);
1694  if (I != OpenMPLocThreadIDMap.end()) {
1695  ThreadID = I->second.ThreadID;
1696  if (ThreadID != nullptr)
1697  return ThreadID;
1698  }
1699  // If exceptions are enabled, do not use parameter to avoid possible crash.
1700  if (auto *OMPRegionInfo =
1701  dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
1702  if (OMPRegionInfo->getThreadIDVariable()) {
1703  // Check if this an outlined function with thread id passed as argument.
1704  LValue LVal = OMPRegionInfo->getThreadIDVariableLValue(CGF);
1705  llvm::BasicBlock *TopBlock = CGF.AllocaInsertPt->getParent();
1706  if (!CGF.EHStack.requiresLandingPad() || !CGF.getLangOpts().Exceptions ||
1707  !CGF.getLangOpts().CXXExceptions ||
1708  CGF.Builder.GetInsertBlock() == TopBlock ||
1709  !isa<llvm::Instruction>(LVal.getPointer()) ||
1710  cast<llvm::Instruction>(LVal.getPointer())->getParent() == TopBlock ||
1711  cast<llvm::Instruction>(LVal.getPointer())->getParent() ==
1712  CGF.Builder.GetInsertBlock()) {
1713  ThreadID = CGF.EmitLoadOfScalar(LVal, Loc);
1714  // If value loaded in entry block, cache it and use it everywhere in
1715  // function.
1716  if (CGF.Builder.GetInsertBlock() == TopBlock) {
1717  auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
1718  Elem.second.ThreadID = ThreadID;
1719  }
1720  return ThreadID;
1721  }
1722  }
1723  }
1724 
1725  // This is not an outlined function region - need to call __kmpc_int32
1726  // kmpc_global_thread_num(ident_t *loc).
1727  // Generate thread id value and cache this value for use across the
1728  // function.
1729  auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
1730  if (!Elem.second.ServiceInsertPt)
1732  CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
1733  CGF.Builder.SetInsertPoint(Elem.second.ServiceInsertPt);
1734  llvm::CallInst *Call = CGF.Builder.CreateCall(
1736  emitUpdateLocation(CGF, Loc));
1737  Call->setCallingConv(CGF.getRuntimeCC());
1738  Elem.second.ThreadID = Call;
1739  return Call;
1740 }
1741 
1743  assert(CGF.CurFn && "No function in current CodeGenFunction.");
1744  if (OpenMPLocThreadIDMap.count(CGF.CurFn)) {
1746  OpenMPLocThreadIDMap.erase(CGF.CurFn);
1747  }
1748  if (FunctionUDRMap.count(CGF.CurFn) > 0) {
1749  for(auto *D : FunctionUDRMap[CGF.CurFn])
1750  UDRMap.erase(D);
1751  FunctionUDRMap.erase(CGF.CurFn);
1752  }
1753  auto I = FunctionUDMMap.find(CGF.CurFn);
1754  if (I != FunctionUDMMap.end()) {
1755  for(auto *D : I->second)
1756  UDMMap.erase(D);
1757  FunctionUDMMap.erase(I);
1758  }
1759 }
1760 
1762  return IdentTy->getPointerTo();
1763 }
1764 
1766  if (!Kmpc_MicroTy) {
1767  // Build void (*kmpc_micro)(kmp_int32 *global_tid, kmp_int32 *bound_tid,...)
1768  llvm::Type *MicroParams[] = {llvm::PointerType::getUnqual(CGM.Int32Ty),
1769  llvm::PointerType::getUnqual(CGM.Int32Ty)};
1770  Kmpc_MicroTy = llvm::FunctionType::get(CGM.VoidTy, MicroParams, true);
1771  }
1772  return llvm::PointerType::getUnqual(Kmpc_MicroTy);
1773 }
1774 
1775 llvm::FunctionCallee CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
1776  llvm::FunctionCallee RTLFn = nullptr;
1777  switch (static_cast<OpenMPRTLFunction>(Function)) {
1778  case OMPRTL__kmpc_fork_call: {
1779  // Build void __kmpc_fork_call(ident_t *loc, kmp_int32 argc, kmpc_micro
1780  // microtask, ...);
1781  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
1783  auto *FnTy =
1784  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ true);
1785  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_fork_call");
1786  if (auto *F = dyn_cast<llvm::Function>(RTLFn.getCallee())) {
1787  if (!F->hasMetadata(llvm::LLVMContext::MD_callback)) {
1788  llvm::LLVMContext &Ctx = F->getContext();
1789  llvm::MDBuilder MDB(Ctx);
1790  // Annotate the callback behavior of the __kmpc_fork_call:
1791  // - The callback callee is argument number 2 (microtask).
1792  // - The first two arguments of the callback callee are unknown (-1).
1793  // - All variadic arguments to the __kmpc_fork_call are passed to the
1794  // callback callee.
1795  F->addMetadata(
1796  llvm::LLVMContext::MD_callback,
1797  *llvm::MDNode::get(Ctx, {MDB.createCallbackEncoding(
1798  2, {-1, -1},
1799  /* VarArgsArePassed */ true)}));
1800  }
1801  }
1802  break;
1803  }
1805  // Build kmp_int32 __kmpc_global_thread_num(ident_t *loc);
1806  llvm::Type *TypeParams[] = {getIdentTyPointerTy()};
1807  auto *FnTy =
1808  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
1809  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_global_thread_num");
1810  break;
1811  }
1813  // Build void *__kmpc_threadprivate_cached(ident_t *loc,
1814  // kmp_int32 global_tid, void *data, size_t size, void ***cache);
1815  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
1817  CGM.VoidPtrTy->getPointerTo()->getPointerTo()};
1818  auto *FnTy =
1819  llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg*/ false);
1820  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_threadprivate_cached");
1821  break;
1822  }
1823  case OMPRTL__kmpc_critical: {
1824  // Build void __kmpc_critical(ident_t *loc, kmp_int32 global_tid,
1825  // kmp_critical_name *crit);
1826  llvm::Type *TypeParams[] = {
1828  llvm::PointerType::getUnqual(KmpCriticalNameTy)};
1829  auto *FnTy =
1830  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1831  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_critical");
1832  break;
1833  }
1835  // Build void __kmpc_critical_with_hint(ident_t *loc, kmp_int32 global_tid,
1836  // kmp_critical_name *crit, uintptr_t hint);
1837  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
1838  llvm::PointerType::getUnqual(KmpCriticalNameTy),
1839  CGM.IntPtrTy};
1840  auto *FnTy =
1841  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1842  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_critical_with_hint");
1843  break;
1844  }
1846  // Build void __kmpc_threadprivate_register(ident_t *, void *data,
1847  // kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor);
1848  // typedef void *(*kmpc_ctor)(void *);
1849  auto *KmpcCtorTy =
1850  llvm::FunctionType::get(CGM.VoidPtrTy, CGM.VoidPtrTy,
1851  /*isVarArg*/ false)->getPointerTo();
1852  // typedef void *(*kmpc_cctor)(void *, void *);
1853  llvm::Type *KmpcCopyCtorTyArgs[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
1854  auto *KmpcCopyCtorTy =
1855  llvm::FunctionType::get(CGM.VoidPtrTy, KmpcCopyCtorTyArgs,
1856  /*isVarArg*/ false)
1857  ->getPointerTo();
1858  // typedef void (*kmpc_dtor)(void *);
1859  auto *KmpcDtorTy =
1860  llvm::FunctionType::get(CGM.VoidTy, CGM.VoidPtrTy, /*isVarArg*/ false)
1861  ->getPointerTo();
1862  llvm::Type *FnTyArgs[] = {getIdentTyPointerTy(), CGM.VoidPtrTy, KmpcCtorTy,
1863  KmpcCopyCtorTy, KmpcDtorTy};
1864  auto *FnTy = llvm::FunctionType::get(CGM.VoidTy, FnTyArgs,
1865  /*isVarArg*/ false);
1866  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_threadprivate_register");
1867  break;
1868  }
1870  // Build void __kmpc_end_critical(ident_t *loc, kmp_int32 global_tid,
1871  // kmp_critical_name *crit);
1872  llvm::Type *TypeParams[] = {
1874  llvm::PointerType::getUnqual(KmpCriticalNameTy)};
1875  auto *FnTy =
1876  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1877  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_critical");
1878  break;
1879  }
1881  // Build kmp_int32 __kmpc_cancel_barrier(ident_t *loc, kmp_int32
1882  // global_tid);
1883  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1884  auto *FnTy =
1885  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
1886  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name*/ "__kmpc_cancel_barrier");
1887  break;
1888  }
1889  case OMPRTL__kmpc_barrier: {
1890  // Build void __kmpc_barrier(ident_t *loc, kmp_int32 global_tid);
1891  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1892  auto *FnTy =
1893  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1894  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name*/ "__kmpc_barrier");
1895  break;
1896  }
1898  // Build void __kmpc_for_static_fini(ident_t *loc, kmp_int32 global_tid);
1899  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1900  auto *FnTy =
1901  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1902  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_for_static_fini");
1903  break;
1904  }
1906  // Build void __kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid,
1907  // kmp_int32 num_threads)
1908  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
1909  CGM.Int32Ty};
1910  auto *FnTy =
1911  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1912  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_push_num_threads");
1913  break;
1914  }
1916  // Build void __kmpc_serialized_parallel(ident_t *loc, kmp_int32
1917  // global_tid);
1918  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1919  auto *FnTy =
1920  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1921  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_serialized_parallel");
1922  break;
1923  }
1925  // Build void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32
1926  // global_tid);
1927  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1928  auto *FnTy =
1929  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1930  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_serialized_parallel");
1931  break;
1932  }
1933  case OMPRTL__kmpc_flush: {
1934  // Build void __kmpc_flush(ident_t *loc);
1935  llvm::Type *TypeParams[] = {getIdentTyPointerTy()};
1936  auto *FnTy =
1937  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1938  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_flush");
1939  break;
1940  }
1941  case OMPRTL__kmpc_master: {
1942  // Build kmp_int32 __kmpc_master(ident_t *loc, kmp_int32 global_tid);
1943  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1944  auto *FnTy =
1945  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1946  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_master");
1947  break;
1948  }
1949  case OMPRTL__kmpc_end_master: {
1950  // Build void __kmpc_end_master(ident_t *loc, kmp_int32 global_tid);
1951  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1952  auto *FnTy =
1953  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1954  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_master");
1955  break;
1956  }
1958  // Build kmp_int32 __kmpc_omp_taskyield(ident_t *, kmp_int32 global_tid,
1959  // int end_part);
1960  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.IntTy};
1961  auto *FnTy =
1962  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1963  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_taskyield");
1964  break;
1965  }
1966  case OMPRTL__kmpc_single: {
1967  // Build kmp_int32 __kmpc_single(ident_t *loc, kmp_int32 global_tid);
1968  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1969  auto *FnTy =
1970  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1971  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_single");
1972  break;
1973  }
1974  case OMPRTL__kmpc_end_single: {
1975  // Build void __kmpc_end_single(ident_t *loc, kmp_int32 global_tid);
1976  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1977  auto *FnTy =
1978  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1979  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_single");
1980  break;
1981  }
1983  // Build kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 gtid,
1984  // kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
1985  // kmp_routine_entry_t *task_entry);
1986  assert(KmpRoutineEntryPtrTy != nullptr &&
1987  "Type kmp_routine_entry_t must be created.");
1988  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.Int32Ty,
1989  CGM.SizeTy, CGM.SizeTy, KmpRoutineEntryPtrTy};
1990  // Return void * and then cast to particular kmp_task_t type.
1991  auto *FnTy =
1992  llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
1993  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task_alloc");
1994  break;
1995  }
1997  // Build kmp_task_t *__kmpc_omp_target_task_alloc(ident_t *, kmp_int32 gtid,
1998  // kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
1999  // kmp_routine_entry_t *task_entry, kmp_int64 device_id);
2000  assert(KmpRoutineEntryPtrTy != nullptr &&
2001  "Type kmp_routine_entry_t must be created.");
2002  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.Int32Ty,
2003  CGM.SizeTy, CGM.SizeTy, KmpRoutineEntryPtrTy,
2004  CGM.Int64Ty};
2005  // Return void * and then cast to particular kmp_task_t type.
2006  auto *FnTy =
2007  llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
2008  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_target_task_alloc");
2009  break;
2010  }
2011  case OMPRTL__kmpc_omp_task: {
2012  // Build kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t
2013  // *new_task);
2014  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
2015  CGM.VoidPtrTy};
2016  auto *FnTy =
2017  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
2018  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task");
2019  break;
2020  }
2021  case OMPRTL__kmpc_copyprivate: {
2022  // Build void __kmpc_copyprivate(ident_t *loc, kmp_int32 global_tid,
2023  // size_t cpy_size, void *cpy_data, void(*cpy_func)(void *, void *),
2024  // kmp_int32 didit);
2025  llvm::Type *CpyTypeParams[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
2026  auto *CpyFnTy =
2027  llvm::FunctionType::get(CGM.VoidTy, CpyTypeParams, /*isVarArg=*/false);
2028  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.SizeTy,
2029  CGM.VoidPtrTy, CpyFnTy->getPointerTo(),
2030  CGM.Int32Ty};
2031  auto *FnTy =
2032  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2033  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_copyprivate");
2034  break;
2035  }
2036  case OMPRTL__kmpc_reduce: {
2037  // Build kmp_int32 __kmpc_reduce(ident_t *loc, kmp_int32 global_tid,
2038  // kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void
2039  // (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck);
2040  llvm::Type *ReduceTypeParams[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
2041  auto *ReduceFnTy = llvm::FunctionType::get(CGM.VoidTy, ReduceTypeParams,
2042  /*isVarArg=*/false);
2043  llvm::Type *TypeParams[] = {
2045  CGM.VoidPtrTy, ReduceFnTy->getPointerTo(),
2046  llvm::PointerType::getUnqual(KmpCriticalNameTy)};
2047  auto *FnTy =
2048  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
2049  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_reduce");
2050  break;
2051  }
2053  // Build kmp_int32 __kmpc_reduce_nowait(ident_t *loc, kmp_int32
2054  // global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data,
2055  // void (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name
2056  // *lck);
2057  llvm::Type *ReduceTypeParams[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
2058  auto *ReduceFnTy = llvm::FunctionType::get(CGM.VoidTy, ReduceTypeParams,
2059  /*isVarArg=*/false);
2060  llvm::Type *TypeParams[] = {
2062  CGM.VoidPtrTy, ReduceFnTy->getPointerTo(),
2063  llvm::PointerType::getUnqual(KmpCriticalNameTy)};
2064  auto *FnTy =
2065  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
2066  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_reduce_nowait");
2067  break;
2068  }
2069  case OMPRTL__kmpc_end_reduce: {
2070  // Build void __kmpc_end_reduce(ident_t *loc, kmp_int32 global_tid,
2071  // kmp_critical_name *lck);
2072  llvm::Type *TypeParams[] = {
2074  llvm::PointerType::getUnqual(KmpCriticalNameTy)};
2075  auto *FnTy =
2076  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2077  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_reduce");
2078  break;
2079  }
2081  // Build __kmpc_end_reduce_nowait(ident_t *loc, kmp_int32 global_tid,
2082  // kmp_critical_name *lck);
2083  llvm::Type *TypeParams[] = {
2085  llvm::PointerType::getUnqual(KmpCriticalNameTy)};
2086  auto *FnTy =
2087  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2088  RTLFn =
2089  CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_reduce_nowait");
2090  break;
2091  }
2093  // Build void __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t
2094  // *new_task);
2095  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
2096  CGM.VoidPtrTy};
2097  auto *FnTy =
2098  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2099  RTLFn =
2100  CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task_begin_if0");
2101  break;
2102  }
2104  // Build void __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t
2105  // *new_task);
2106  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
2107  CGM.VoidPtrTy};
2108  auto *FnTy =
2109  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2110  RTLFn = CGM.CreateRuntimeFunction(FnTy,
2111  /*Name=*/"__kmpc_omp_task_complete_if0");
2112  break;
2113  }
2114  case OMPRTL__kmpc_ordered: {
2115  // Build void __kmpc_ordered(ident_t *loc, kmp_int32 global_tid);
2116  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
2117  auto *FnTy =
2118  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2119  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_ordered");
2120  break;
2121  }
2122  case OMPRTL__kmpc_end_ordered: {
2123  // Build void __kmpc_end_ordered(ident_t *loc, kmp_int32 global_tid);
2124  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
2125  auto *FnTy =
2126  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2127  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_ordered");
2128  break;
2129  }
2131  // Build kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32 global_tid);
2132  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
2133  auto *FnTy =
2134  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
2135  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_omp_taskwait");
2136  break;
2137  }
2138  case OMPRTL__kmpc_taskgroup: {
2139  // Build void __kmpc_taskgroup(ident_t *loc, kmp_int32 global_tid);
2140  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
2141  auto *FnTy =
2142  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2143  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_taskgroup");
2144  break;
2145  }
2147  // Build void __kmpc_end_taskgroup(ident_t *loc, kmp_int32 global_tid);
2148  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
2149  auto *FnTy =
2150  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2151  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_taskgroup");
2152  break;
2153  }
2155  // Build void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 global_tid,
2156  // int proc_bind)
2157  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.IntTy};
2158  auto *FnTy =
2159  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
2160  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_push_proc_bind");
2161  break;
2162  }
2164  // Build kmp_int32 __kmpc_omp_task_with_deps(ident_t *, kmp_int32 gtid,
2165  // kmp_task_t *new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list,
2166  // kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list);
2167  llvm::Type *TypeParams[] = {
2170  auto *FnTy =
2171  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
2172  RTLFn =
2173  CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task_with_deps");
2174  break;
2175  }
2177  // Build void __kmpc_omp_wait_deps(ident_t *, kmp_int32 gtid,
2178  // kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias,
2179  // kmp_depend_info_t *noalias_dep_list);
2180  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
2183  auto *FnTy =
2184  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2185  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_wait_deps");
2186  break;
2187  }
2189  // Build kmp_int32 __kmpc_cancellationpoint(ident_t *loc, kmp_int32
2190  // global_tid, kmp_int32 cncl_kind)
2191  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.IntTy};
2192  auto *FnTy =
2193  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2194  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_cancellationpoint");
2195  break;
2196  }
2197  case OMPRTL__kmpc_cancel: {
2198  // Build kmp_int32 __kmpc_cancel(ident_t *loc, kmp_int32 global_tid,
2199  // kmp_int32 cncl_kind)
2200  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.IntTy};
2201  auto *FnTy =
2202  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2203  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_cancel");
2204  break;
2205  }
2207  // Build void kmpc_push_num_teams (ident_t loc, kmp_int32 global_tid,
2208  // kmp_int32 num_teams, kmp_int32 num_threads)
2209  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.Int32Ty,
2210  CGM.Int32Ty};
2211  auto *FnTy =
2212  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2213  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_push_num_teams");
2214  break;
2215  }
2216  case OMPRTL__kmpc_fork_teams: {
2217  // Build void __kmpc_fork_teams(ident_t *loc, kmp_int32 argc, kmpc_micro
2218  // microtask, ...);
2219  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
2221  auto *FnTy =
2222  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ true);
2223  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_fork_teams");
2224  if (auto *F = dyn_cast<llvm::Function>(RTLFn.getCallee())) {
2225  if (!F->hasMetadata(llvm::LLVMContext::MD_callback)) {
2226  llvm::LLVMContext &Ctx = F->getContext();
2227  llvm::MDBuilder MDB(Ctx);
2228  // Annotate the callback behavior of the __kmpc_fork_teams:
2229  // - The callback callee is argument number 2 (microtask).
2230  // - The first two arguments of the callback callee are unknown (-1).
2231  // - All variadic arguments to the __kmpc_fork_teams are passed to the
2232  // callback callee.
2233  F->addMetadata(
2234  llvm::LLVMContext::MD_callback,
2235  *llvm::MDNode::get(Ctx, {MDB.createCallbackEncoding(
2236  2, {-1, -1},
2237  /* VarArgsArePassed */ true)}));
2238  }
2239  }
2240  break;
2241  }
2242  case OMPRTL__kmpc_taskloop: {
2243  // Build void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t *task, int
2244  // if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int nogroup, int
2245  // sched, kmp_uint64 grainsize, void *task_dup);
2246  llvm::Type *TypeParams[] = {getIdentTyPointerTy(),
2247  CGM.IntTy,
2248  CGM.VoidPtrTy,
2249  CGM.IntTy,
2250  CGM.Int64Ty->getPointerTo(),
2251  CGM.Int64Ty->getPointerTo(),
2252  CGM.Int64Ty,
2253  CGM.IntTy,
2254  CGM.IntTy,
2255  CGM.Int64Ty,
2256  CGM.VoidPtrTy};
2257  auto *FnTy =
2258  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2259  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_taskloop");
2260  break;
2261  }
2263  // Build void __kmpc_doacross_init(ident_t *loc, kmp_int32 gtid, kmp_int32
2264  // num_dims, struct kmp_dim *dims);
2265  llvm::Type *TypeParams[] = {getIdentTyPointerTy(),
2266  CGM.Int32Ty,
2267  CGM.Int32Ty,
2268  CGM.VoidPtrTy};
2269  auto *FnTy =
2270  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2271  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_doacross_init");
2272  break;
2273  }
2275  // Build void __kmpc_doacross_fini(ident_t *loc, kmp_int32 gtid);
2276  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
2277  auto *FnTy =
2278  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2279  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_doacross_fini");
2280  break;
2281  }
2283  // Build void __kmpc_doacross_post(ident_t *loc, kmp_int32 gtid, kmp_int64
2284  // *vec);
2285  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
2286  CGM.Int64Ty->getPointerTo()};
2287  auto *FnTy =
2288  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2289  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_doacross_post");
2290  break;
2291  }
2293  // Build void __kmpc_doacross_wait(ident_t *loc, kmp_int32 gtid, kmp_int64
2294  // *vec);
2295  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
2296  CGM.Int64Ty->getPointerTo()};
2297  auto *FnTy =
2298  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2299  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_doacross_wait");
2300  break;
2301  }
2303  // Build void *__kmpc_task_reduction_init(int gtid, int num_data, void
2304  // *data);
2305  llvm::Type *TypeParams[] = {CGM.IntTy, CGM.IntTy, CGM.VoidPtrTy};
2306  auto *FnTy =
2307  llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
2308  RTLFn =
2309  CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_task_reduction_init");
2310  break;
2311  }
2313  // Build void *__kmpc_task_reduction_get_th_data(int gtid, void *tg, void
2314  // *d);
2315  llvm::Type *TypeParams[] = {CGM.IntTy, CGM.VoidPtrTy, CGM.VoidPtrTy};
2316  auto *FnTy =
2317  llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
2318  RTLFn = CGM.CreateRuntimeFunction(
2319  FnTy, /*Name=*/"__kmpc_task_reduction_get_th_data");
2320  break;
2321  }
2322  case OMPRTL__kmpc_alloc: {
2323  // Build to void *__kmpc_alloc(int gtid, size_t sz, omp_allocator_handle_t
2324  // al); omp_allocator_handle_t type is void *.
2325  llvm::Type *TypeParams[] = {CGM.IntTy, CGM.SizeTy, CGM.VoidPtrTy};
2326  auto *FnTy =
2327  llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
2328  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_alloc");
2329  break;
2330  }
2331  case OMPRTL__kmpc_free: {
2332  // Build to void __kmpc_free(int gtid, void *ptr, omp_allocator_handle_t
2333  // al); omp_allocator_handle_t type is void *.
2334  llvm::Type *TypeParams[] = {CGM.IntTy, CGM.VoidPtrTy, CGM.VoidPtrTy};
2335  auto *FnTy =
2336  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2337  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_free");
2338  break;
2339  }
2341  // Build void __kmpc_push_target_tripcount(int64_t device_id, kmp_uint64
2342  // size);
2343  llvm::Type *TypeParams[] = {CGM.Int64Ty, CGM.Int64Ty};
2344  llvm::FunctionType *FnTy =
2345  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2346  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_push_target_tripcount");
2347  break;
2348  }
2349  case OMPRTL__tgt_target: {
2350  // Build int32_t __tgt_target(int64_t device_id, void *host_ptr, int32_t
2351  // arg_num, void** args_base, void **args, int64_t *arg_sizes, int64_t
2352  // *arg_types);
2353  llvm::Type *TypeParams[] = {CGM.Int64Ty,
2354  CGM.VoidPtrTy,
2355  CGM.Int32Ty,
2356  CGM.VoidPtrPtrTy,
2357  CGM.VoidPtrPtrTy,
2358  CGM.Int64Ty->getPointerTo(),
2359  CGM.Int64Ty->getPointerTo()};
2360  auto *FnTy =
2361  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2362  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target");
2363  break;
2364  }
2366  // Build int32_t __tgt_target_nowait(int64_t device_id, void *host_ptr,
2367  // int32_t arg_num, void** args_base, void **args, int64_t *arg_sizes,
2368  // int64_t *arg_types);
2369  llvm::Type *TypeParams[] = {CGM.Int64Ty,
2370  CGM.VoidPtrTy,
2371  CGM.Int32Ty,
2372  CGM.VoidPtrPtrTy,
2373  CGM.VoidPtrPtrTy,
2374  CGM.Int64Ty->getPointerTo(),
2375  CGM.Int64Ty->getPointerTo()};
2376  auto *FnTy =
2377  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2378  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_nowait");
2379  break;
2380  }
2381  case OMPRTL__tgt_target_teams: {
2382  // Build int32_t __tgt_target_teams(int64_t device_id, void *host_ptr,
2383  // int32_t arg_num, void** args_base, void **args, int64_t *arg_sizes,
2384  // int64_t *arg_types, int32_t num_teams, int32_t thread_limit);
2385  llvm::Type *TypeParams[] = {CGM.Int64Ty,
2386  CGM.VoidPtrTy,
2387  CGM.Int32Ty,
2388  CGM.VoidPtrPtrTy,
2389  CGM.VoidPtrPtrTy,
2390  CGM.Int64Ty->getPointerTo(),
2391  CGM.Int64Ty->getPointerTo(),
2392  CGM.Int32Ty,
2393  CGM.Int32Ty};
2394  auto *FnTy =
2395  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2396  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_teams");
2397  break;
2398  }
2400  // Build int32_t __tgt_target_teams_nowait(int64_t device_id, void
2401  // *host_ptr, int32_t arg_num, void** args_base, void **args, int64_t
2402  // *arg_sizes, int64_t *arg_types, int32_t num_teams, int32_t thread_limit);
2403  llvm::Type *TypeParams[] = {CGM.Int64Ty,
2404  CGM.VoidPtrTy,
2405  CGM.Int32Ty,
2406  CGM.VoidPtrPtrTy,
2407  CGM.VoidPtrPtrTy,
2408  CGM.Int64Ty->getPointerTo(),
2409  CGM.Int64Ty->getPointerTo(),
2410  CGM.Int32Ty,
2411  CGM.Int32Ty};
2412  auto *FnTy =
2413  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2414  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_teams_nowait");
2415  break;
2416  }
2418  // Build void __tgt_register_requires(int64_t flags);
2419  llvm::Type *TypeParams[] = {CGM.Int64Ty};
2420  auto *FnTy =
2421  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
2422  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_register_requires");
2423  break;
2424  }
2425  case OMPRTL__tgt_register_lib: {
2426  // Build void __tgt_register_lib(__tgt_bin_desc *desc);
2427  QualType ParamTy =
2429  llvm::Type *TypeParams[] = {CGM.getTypes().ConvertTypeForMem(ParamTy)};
2430  auto *FnTy =
2431  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2432  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_register_lib");
2433  break;
2434  }
2436  // Build void __tgt_unregister_lib(__tgt_bin_desc *desc);
2437  QualType ParamTy =
2439  llvm::Type *TypeParams[] = {CGM.getTypes().ConvertTypeForMem(ParamTy)};
2440  auto *FnTy =
2441  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2442  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_unregister_lib");
2443  break;
2444  }
2446  // Build void __tgt_target_data_begin(int64_t device_id, int32_t arg_num,
2447  // void** args_base, void **args, int64_t *arg_sizes, int64_t *arg_types);
2448  llvm::Type *TypeParams[] = {CGM.Int64Ty,
2449  CGM.Int32Ty,
2450  CGM.VoidPtrPtrTy,
2451  CGM.VoidPtrPtrTy,
2452  CGM.Int64Ty->getPointerTo(),
2453  CGM.Int64Ty->getPointerTo()};
2454  auto *FnTy =
2455  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
2456  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_begin");
2457  break;
2458  }
2460  // Build void __tgt_target_data_begin_nowait(int64_t device_id, int32_t
2461  // arg_num, void** args_base, void **args, int64_t *arg_sizes, int64_t
2462  // *arg_types);
2463  llvm::Type *TypeParams[] = {CGM.Int64Ty,
2464  CGM.Int32Ty,
2465  CGM.VoidPtrPtrTy,
2466  CGM.VoidPtrPtrTy,
2467  CGM.Int64Ty->getPointerTo(),
2468  CGM.Int64Ty->getPointerTo()};
2469  auto *FnTy =
2470  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2471  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_begin_nowait");
2472  break;
2473  }
2475  // Build void __tgt_target_data_end(int64_t device_id, int32_t arg_num,
2476  // void** args_base, void **args, int64_t *arg_sizes, int64_t *arg_types);
2477  llvm::Type *TypeParams[] = {CGM.Int64Ty,
2478  CGM.Int32Ty,
2479  CGM.VoidPtrPtrTy,
2480  CGM.VoidPtrPtrTy,
2481  CGM.Int64Ty->getPointerTo(),
2482  CGM.Int64Ty->getPointerTo()};
2483  auto *FnTy =
2484  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
2485  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_end");
2486  break;
2487  }
2489  // Build void __tgt_target_data_end_nowait(int64_t device_id, int32_t
2490  // arg_num, void** args_base, void **args, int64_t *arg_sizes, int64_t
2491  // *arg_types);
2492  llvm::Type *TypeParams[] = {CGM.Int64Ty,
2493  CGM.Int32Ty,
2494  CGM.VoidPtrPtrTy,
2495  CGM.VoidPtrPtrTy,
2496  CGM.Int64Ty->getPointerTo(),
2497  CGM.Int64Ty->getPointerTo()};
2498  auto *FnTy =
2499  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2500  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_end_nowait");
2501  break;
2502  }
2504  // Build void __tgt_target_data_update(int64_t device_id, int32_t arg_num,
2505  // void** args_base, void **args, int64_t *arg_sizes, int64_t *arg_types);
2506  llvm::Type *TypeParams[] = {CGM.Int64Ty,
2507  CGM.Int32Ty,
2508  CGM.VoidPtrPtrTy,
2509  CGM.VoidPtrPtrTy,
2510  CGM.Int64Ty->getPointerTo(),
2511  CGM.Int64Ty->getPointerTo()};
2512  auto *FnTy =
2513  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
2514  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_update");
2515  break;
2516  }
2518  // Build void __tgt_target_data_update_nowait(int64_t device_id, int32_t
2519  // arg_num, void** args_base, void **args, int64_t *arg_sizes, int64_t
2520  // *arg_types);
2521  llvm::Type *TypeParams[] = {CGM.Int64Ty,
2522  CGM.Int32Ty,
2523  CGM.VoidPtrPtrTy,
2524  CGM.VoidPtrPtrTy,
2525  CGM.Int64Ty->getPointerTo(),
2526  CGM.Int64Ty->getPointerTo()};
2527  auto *FnTy =
2528  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2529  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_update_nowait");
2530  break;
2531  }
2533  // Build int64_t __tgt_mapper_num_components(void *rt_mapper_handle);
2534  llvm::Type *TypeParams[] = {CGM.VoidPtrTy};
2535  auto *FnTy =
2536  llvm::FunctionType::get(CGM.Int64Ty, TypeParams, /*isVarArg*/ false);
2537  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_mapper_num_components");
2538  break;
2539  }
2541  // Build void __tgt_push_mapper_component(void *rt_mapper_handle, void
2542  // *base, void *begin, int64_t size, int64_t type);
2543  llvm::Type *TypeParams[] = {CGM.VoidPtrTy, CGM.VoidPtrTy, CGM.VoidPtrTy,
2544  CGM.Int64Ty, CGM.Int64Ty};
2545  auto *FnTy =
2546  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
2547  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_push_mapper_component");
2548  break;
2549  }
2550  }
2551  assert(RTLFn && "Unable to find OpenMP runtime function");
2552  return RTLFn;
2553 }
2554 
2555 llvm::FunctionCallee
2556 CGOpenMPRuntime::createForStaticInitFunction(unsigned IVSize, bool IVSigned) {
2557  assert((IVSize == 32 || IVSize == 64) &&
2558  "IV size is not compatible with the omp runtime");
2559  StringRef Name = IVSize == 32 ? (IVSigned ? "__kmpc_for_static_init_4"
2560  : "__kmpc_for_static_init_4u")
2561  : (IVSigned ? "__kmpc_for_static_init_8"
2562  : "__kmpc_for_static_init_8u");
2563  llvm::Type *ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty;
2564  auto *PtrTy = llvm::PointerType::getUnqual(ITy);
2565  llvm::Type *TypeParams[] = {
2566  getIdentTyPointerTy(), // loc
2567  CGM.Int32Ty, // tid
2568  CGM.Int32Ty, // schedtype
2569  llvm::PointerType::getUnqual(CGM.Int32Ty), // p_lastiter
2570  PtrTy, // p_lower
2571  PtrTy, // p_upper
2572  PtrTy, // p_stride
2573  ITy, // incr
2574  ITy // chunk
2575  };
2576  auto *FnTy =
2577  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
2578  return CGM.CreateRuntimeFunction(FnTy, Name);
2579 }
2580 
2581 llvm::FunctionCallee
2582 CGOpenMPRuntime::createDispatchInitFunction(unsigned IVSize, bool IVSigned) {
2583  assert((IVSize == 32 || IVSize == 64) &&
2584  "IV size is not compatible with the omp runtime");
2585  StringRef Name =
2586  IVSize == 32
2587  ? (IVSigned ? "__kmpc_dispatch_init_4" : "__kmpc_dispatch_init_4u")
2588  : (IVSigned ? "__kmpc_dispatch_init_8" : "__kmpc_dispatch_init_8u");
2589  llvm::Type *ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty;
2590  llvm::Type *TypeParams[] = { getIdentTyPointerTy(), // loc
2591  CGM.Int32Ty, // tid
2592  CGM.Int32Ty, // schedtype
2593  ITy, // lower
2594  ITy, // upper
2595  ITy, // stride
2596  ITy // chunk
2597  };
2598  auto *FnTy =
2599  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
2600  return CGM.CreateRuntimeFunction(FnTy, Name);
2601 }
2602 
2603 llvm::FunctionCallee
2604 CGOpenMPRuntime::createDispatchFiniFunction(unsigned IVSize, bool IVSigned) {
2605  assert((IVSize == 32 || IVSize == 64) &&
2606  "IV size is not compatible with the omp runtime");
2607  StringRef Name =
2608  IVSize == 32
2609  ? (IVSigned ? "__kmpc_dispatch_fini_4" : "__kmpc_dispatch_fini_4u")
2610  : (IVSigned ? "__kmpc_dispatch_fini_8" : "__kmpc_dispatch_fini_8u");
2611  llvm::Type *TypeParams[] = {
2612  getIdentTyPointerTy(), // loc
2613  CGM.Int32Ty, // tid
2614  };
2615  auto *FnTy =
2616  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2617  return CGM.CreateRuntimeFunction(FnTy, Name);
2618 }
2619 
2620 llvm::FunctionCallee
2621 CGOpenMPRuntime::createDispatchNextFunction(unsigned IVSize, bool IVSigned) {
2622  assert((IVSize == 32 || IVSize == 64) &&
2623  "IV size is not compatible with the omp runtime");
2624  StringRef Name =
2625  IVSize == 32
2626  ? (IVSigned ? "__kmpc_dispatch_next_4" : "__kmpc_dispatch_next_4u")
2627  : (IVSigned ? "__kmpc_dispatch_next_8" : "__kmpc_dispatch_next_8u");
2628  llvm::Type *ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty;
2629  auto *PtrTy = llvm::PointerType::getUnqual(ITy);
2630  llvm::Type *TypeParams[] = {
2631  getIdentTyPointerTy(), // loc
2632  CGM.Int32Ty, // tid
2633  llvm::PointerType::getUnqual(CGM.Int32Ty), // p_lastiter
2634  PtrTy, // p_lower
2635  PtrTy, // p_upper
2636  PtrTy // p_stride
2637  };
2638  auto *FnTy =
2639  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2640  return CGM.CreateRuntimeFunction(FnTy, Name);
2641 }
2642 
2643 /// Obtain information that uniquely identifies a target entry. This
2644 /// consists of the file and device IDs as well as line number associated with
2645 /// the relevant entry source location.
2647  unsigned &DeviceID, unsigned &FileID,
2648  unsigned &LineNum) {
2650 
2651  // The loc should be always valid and have a file ID (the user cannot use
2652  // #pragma directives in macros)
2653 
2654  assert(Loc.isValid() && "Source location is expected to be always valid.");
2655 
2656  PresumedLoc PLoc = SM.getPresumedLoc(Loc);
2657  assert(PLoc.isValid() && "Source location is expected to be always valid.");
2658 
2659  llvm::sys::fs::UniqueID ID;
2660  if (auto EC = llvm::sys::fs::getUniqueID(PLoc.getFilename(), ID))
2661  SM.getDiagnostics().Report(diag::err_cannot_open_file)
2662  << PLoc.getFilename() << EC.message();
2663 
2664  DeviceID = ID.getDevice();
2665  FileID = ID.getFile();
2666  LineNum = PLoc.getLine();
2667 }
2668 
2670  if (CGM.getLangOpts().OpenMPSimd)
2671  return Address::invalid();
2673  OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
2674  if (Res && (*Res == OMPDeclareTargetDeclAttr::MT_Link ||
2675  (*Res == OMPDeclareTargetDeclAttr::MT_To &&
2677  SmallString<64> PtrName;
2678  {
2679  llvm::raw_svector_ostream OS(PtrName);
2680  OS << CGM.getMangledName(GlobalDecl(VD));
2681  if (!VD->isExternallyVisible()) {
2682  unsigned DeviceID, FileID, Line;
2684  VD->getCanonicalDecl()->getBeginLoc(),
2685  DeviceID, FileID, Line);
2686  OS << llvm::format("_%x", FileID);
2687  }
2688  OS << "_decl_tgt_ref_ptr";
2689  }
2690  llvm::Value *Ptr = CGM.getModule().getNamedValue(PtrName);
2691  if (!Ptr) {
2692  QualType PtrTy = CGM.getContext().getPointerType(VD->getType());
2694  PtrName);
2695 
2696  auto *GV = cast<llvm::GlobalVariable>(Ptr);
2697  GV->setLinkage(llvm::GlobalValue::WeakAnyLinkage);
2698 
2699  if (!CGM.getLangOpts().OpenMPIsDevice)
2700  GV->setInitializer(CGM.GetAddrOfGlobal(VD));
2701  registerTargetGlobalVariable(VD, cast<llvm::Constant>(Ptr));
2702  }
2703  return Address(Ptr, CGM.getContext().getDeclAlign(VD));
2704  }
2705  return Address::invalid();
2706 }
2707 
2708 llvm::Constant *
2710  assert(!CGM.getLangOpts().OpenMPUseTLS ||
2712  // Lookup the entry, lazily creating it if necessary.
2713  std::string Suffix = getName({"cache", ""});
2715  CGM.Int8PtrPtrTy, Twine(CGM.getMangledName(VD)).concat(Suffix));
2716 }
2717 
2719  const VarDecl *VD,
2720  Address VDAddr,
2721  SourceLocation Loc) {
2722  if (CGM.getLangOpts().OpenMPUseTLS &&
2724  return VDAddr;
2725 
2726  llvm::Type *VarTy = VDAddr.getElementType();
2727  llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
2728  CGF.Builder.CreatePointerCast(VDAddr.getPointer(),
2729  CGM.Int8PtrTy),
2732  return Address(CGF.EmitRuntimeCall(
2734  VDAddr.getAlignment());
2735 }
2736 
2738  CodeGenFunction &CGF, Address VDAddr, llvm::Value *Ctor,
2739  llvm::Value *CopyCtor, llvm::Value *Dtor, SourceLocation Loc) {
2740  // Call kmp_int32 __kmpc_global_thread_num(&loc) to init OpenMP runtime
2741  // library.
2742  llvm::Value *OMPLoc = emitUpdateLocation(CGF, Loc);
2744  OMPLoc);
2745  // Call __kmpc_threadprivate_register(&loc, &var, ctor, cctor/*NULL*/, dtor)
2746  // to register constructor/destructor for variable.
2747  llvm::Value *Args[] = {
2748  OMPLoc, CGF.Builder.CreatePointerCast(VDAddr.getPointer(), CGM.VoidPtrTy),
2749  Ctor, CopyCtor, Dtor};
2750  CGF.EmitRuntimeCall(
2752 }
2753 
2755  const VarDecl *VD, Address VDAddr, SourceLocation Loc,
2756  bool PerformInit, CodeGenFunction *CGF) {
2757  if (CGM.getLangOpts().OpenMPUseTLS &&
2759  return nullptr;
2760 
2761  VD = VD->getDefinition(CGM.getContext());
2762  if (VD && ThreadPrivateWithDefinition.insert(CGM.getMangledName(VD)).second) {
2763  QualType ASTTy = VD->getType();
2764 
2765  llvm::Value *Ctor = nullptr, *CopyCtor = nullptr, *Dtor = nullptr;
2766  const Expr *Init = VD->getAnyInitializer();
2767  if (CGM.getLangOpts().CPlusPlus && PerformInit) {
2768  // Generate function that re-emits the declaration's initializer into the
2769  // threadprivate copy of the variable VD
2770  CodeGenFunction CtorCGF(CGM);
2771  FunctionArgList Args;
2772  ImplicitParamDecl Dst(CGM.getContext(), /*DC=*/nullptr, Loc,
2773  /*Id=*/nullptr, CGM.getContext().VoidPtrTy,
2775  Args.push_back(&Dst);
2776 
2777  const auto &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
2778  CGM.getContext().VoidPtrTy, Args);
2779  llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
2780  std::string Name = getName({"__kmpc_global_ctor_", ""});
2781  llvm::Function *Fn =
2782  CGM.CreateGlobalInitOrDestructFunction(FTy, Name, FI, Loc);
2783  CtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidPtrTy, Fn, FI,
2784  Args, Loc, Loc);
2785  llvm::Value *ArgVal = CtorCGF.EmitLoadOfScalar(
2786  CtorCGF.GetAddrOfLocalVar(&Dst), /*Volatile=*/false,
2787  CGM.getContext().VoidPtrTy, Dst.getLocation());
2788  Address Arg = Address(ArgVal, VDAddr.getAlignment());
2789  Arg = CtorCGF.Builder.CreateElementBitCast(
2790  Arg, CtorCGF.ConvertTypeForMem(ASTTy));
2791  CtorCGF.EmitAnyExprToMem(Init, Arg, Init->getType().getQualifiers(),
2792  /*IsInitializer=*/true);
2793  ArgVal = CtorCGF.EmitLoadOfScalar(
2794  CtorCGF.GetAddrOfLocalVar(&Dst), /*Volatile=*/false,
2795  CGM.getContext().VoidPtrTy, Dst.getLocation());
2796  CtorCGF.Builder.CreateStore(ArgVal, CtorCGF.ReturnValue);
2797  CtorCGF.FinishFunction();
2798  Ctor = Fn;
2799  }
2800  if (VD->getType().isDestructedType() != QualType::DK_none) {
2801  // Generate function that emits destructor call for the threadprivate copy
2802  // of the variable VD
2803  CodeGenFunction DtorCGF(CGM);
2804  FunctionArgList Args;
2805  ImplicitParamDecl Dst(CGM.getContext(), /*DC=*/nullptr, Loc,
2806  /*Id=*/nullptr, CGM.getContext().VoidPtrTy,
2808  Args.push_back(&Dst);
2809 
2810  const auto &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
2811  CGM.getContext().VoidTy, Args);
2812  llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
2813  std::string Name = getName({"__kmpc_global_dtor_", ""});
2814  llvm::Function *Fn =
2815  CGM.CreateGlobalInitOrDestructFunction(FTy, Name, FI, Loc);
2816  auto NL = ApplyDebugLocation::CreateEmpty(DtorCGF);
2817  DtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, Fn, FI, Args,
2818  Loc, Loc);
2819  // Create a scope with an artificial location for the body of this function.
2820  auto AL = ApplyDebugLocation::CreateArtificial(DtorCGF);
2821  llvm::Value *ArgVal = DtorCGF.EmitLoadOfScalar(
2822  DtorCGF.GetAddrOfLocalVar(&Dst),
2823  /*Volatile=*/false, CGM.getContext().VoidPtrTy, Dst.getLocation());
2824  DtorCGF.emitDestroy(Address(ArgVal, VDAddr.getAlignment()), ASTTy,
2825  DtorCGF.getDestroyer(ASTTy.isDestructedType()),
2826  DtorCGF.needsEHCleanup(ASTTy.isDestructedType()));
2827  DtorCGF.FinishFunction();
2828  Dtor = Fn;
2829  }
2830  // Do not emit init function if it is not required.
2831  if (!Ctor && !Dtor)
2832  return nullptr;
2833 
2834  llvm::Type *CopyCtorTyArgs[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
2835  auto *CopyCtorTy = llvm::FunctionType::get(CGM.VoidPtrTy, CopyCtorTyArgs,
2836  /*isVarArg=*/false)
2837  ->getPointerTo();
2838  // Copying constructor for the threadprivate variable.
2839  // Must be NULL - reserved by runtime, but currently it requires that this
2840  // parameter is always NULL. Otherwise it fires assertion.
2841  CopyCtor = llvm::Constant::getNullValue(CopyCtorTy);
2842  if (Ctor == nullptr) {
2843  auto *CtorTy = llvm::FunctionType::get(CGM.VoidPtrTy, CGM.VoidPtrTy,
2844  /*isVarArg=*/false)
2845  ->getPointerTo();
2846  Ctor = llvm::Constant::getNullValue(CtorTy);
2847  }
2848  if (Dtor == nullptr) {
2849  auto *DtorTy = llvm::FunctionType::get(CGM.VoidTy, CGM.VoidPtrTy,
2850  /*isVarArg=*/false)
2851  ->getPointerTo();
2852  Dtor = llvm::Constant::getNullValue(DtorTy);
2853  }
2854  if (!CGF) {
2855  auto *InitFunctionTy =
2856  llvm::FunctionType::get(CGM.VoidTy, /*isVarArg*/ false);
2857  std::string Name = getName({"__omp_threadprivate_init_", ""});
2858  llvm::Function *InitFunction = CGM.CreateGlobalInitOrDestructFunction(
2859  InitFunctionTy, Name, CGM.getTypes().arrangeNullaryFunction());
2860  CodeGenFunction InitCGF(CGM);
2861  FunctionArgList ArgList;
2862  InitCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, InitFunction,
2863  CGM.getTypes().arrangeNullaryFunction(), ArgList,
2864  Loc, Loc);
2865  emitThreadPrivateVarInit(InitCGF, VDAddr, Ctor, CopyCtor, Dtor, Loc);
2866  InitCGF.FinishFunction();
2867  return InitFunction;
2868  }
2869  emitThreadPrivateVarInit(*CGF, VDAddr, Ctor, CopyCtor, Dtor, Loc);
2870  }
2871  return nullptr;
2872 }
2873 
2875  llvm::GlobalVariable *Addr,
2876  bool PerformInit) {
2877  if (CGM.getLangOpts().OMPTargetTriples.empty() &&
2878  !CGM.getLangOpts().OpenMPIsDevice)
2879  return false;
2881  OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
2882  if (!Res || *Res == OMPDeclareTargetDeclAttr::MT_Link ||
2883  (*Res == OMPDeclareTargetDeclAttr::MT_To &&
2885  return CGM.getLangOpts().OpenMPIsDevice;
2886  VD = VD->getDefinition(CGM.getContext());
2887  if (VD && !DeclareTargetWithDefinition.insert(CGM.getMangledName(VD)).second)
2888  return CGM.getLangOpts().OpenMPIsDevice;
2889 
2890  QualType ASTTy = VD->getType();
2891 
2893  // Produce the unique prefix to identify the new target regions. We use
2894  // the source location of the variable declaration which we know to not
2895  // conflict with any target region.
2896  unsigned DeviceID;
2897  unsigned FileID;
2898  unsigned Line;
2899  getTargetEntryUniqueInfo(CGM.getContext(), Loc, DeviceID, FileID, Line);
2900  SmallString<128> Buffer, Out;
2901  {
2902  llvm::raw_svector_ostream OS(Buffer);
2903  OS << "__omp_offloading_" << llvm::format("_%x", DeviceID)
2904  << llvm::format("_%x_", FileID) << VD->getName() << "_l" << Line;
2905  }
2906 
2907  const Expr *Init = VD->getAnyInitializer();
2908  if (CGM.getLangOpts().CPlusPlus && PerformInit) {
2909  llvm::Constant *Ctor;
2910  llvm::Constant *ID;
2911  if (CGM.getLangOpts().OpenMPIsDevice) {
2912  // Generate function that re-emits the declaration's initializer into
2913  // the threadprivate copy of the variable VD
2914  CodeGenFunction CtorCGF(CGM);
2915 
2917  llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
2918  llvm::Function *Fn = CGM.CreateGlobalInitOrDestructFunction(
2919  FTy, Twine(Buffer, "_ctor"), FI, Loc);
2920  auto NL = ApplyDebugLocation::CreateEmpty(CtorCGF);
2921  CtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, Fn, FI,
2922  FunctionArgList(), Loc, Loc);
2923  auto AL = ApplyDebugLocation::CreateArtificial(CtorCGF);
2924  CtorCGF.EmitAnyExprToMem(Init,
2925  Address(Addr, CGM.getContext().getDeclAlign(VD)),
2926  Init->getType().getQualifiers(),
2927  /*IsInitializer=*/true);
2928  CtorCGF.FinishFunction();
2929  Ctor = Fn;
2930  ID = llvm::ConstantExpr::getBitCast(Fn, CGM.Int8PtrTy);
2931  CGM.addUsedGlobal(cast<llvm::GlobalValue>(Ctor));
2932  } else {
2933  Ctor = new llvm::GlobalVariable(
2934  CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
2935  llvm::GlobalValue::PrivateLinkage,
2936  llvm::Constant::getNullValue(CGM.Int8Ty), Twine(Buffer, "_ctor"));
2937  ID = Ctor;
2938  }
2939 
2940  // Register the information for the entry associated with the constructor.
2941  Out.clear();
2943  DeviceID, FileID, Twine(Buffer, "_ctor").toStringRef(Out), Line, Ctor,
2945  }
2946  if (VD->getType().isDestructedType() != QualType::DK_none) {
2947  llvm::Constant *Dtor;
2948  llvm::Constant *ID;
2949  if (CGM.getLangOpts().OpenMPIsDevice) {
2950  // Generate function that emits destructor call for the threadprivate
2951  // copy of the variable VD
2952  CodeGenFunction DtorCGF(CGM);
2953 
2955  llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
2956  llvm::Function *Fn = CGM.CreateGlobalInitOrDestructFunction(
2957  FTy, Twine(Buffer, "_dtor"), FI, Loc);
2958  auto NL = ApplyDebugLocation::CreateEmpty(DtorCGF);
2959  DtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, Fn, FI,
2960  FunctionArgList(), Loc, Loc);
2961  // Create a scope with an artificial location for the body of this
2962  // function.
2963  auto AL = ApplyDebugLocation::CreateArtificial(DtorCGF);
2964  DtorCGF.emitDestroy(Address(Addr, CGM.getContext().getDeclAlign(VD)),
2965  ASTTy, DtorCGF.getDestroyer(ASTTy.isDestructedType()),
2966  DtorCGF.needsEHCleanup(ASTTy.isDestructedType()));
2967  DtorCGF.FinishFunction();
2968  Dtor = Fn;
2969  ID = llvm::ConstantExpr::getBitCast(Fn, CGM.Int8PtrTy);
2970  CGM.addUsedGlobal(cast<llvm::GlobalValue>(Dtor));
2971  } else {
2972  Dtor = new llvm::GlobalVariable(
2973  CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
2974  llvm::GlobalValue::PrivateLinkage,
2975  llvm::Constant::getNullValue(CGM.Int8Ty), Twine(Buffer, "_dtor"));
2976  ID = Dtor;
2977  }
2978  // Register the information for the entry associated with the destructor.
2979  Out.clear();
2981  DeviceID, FileID, Twine(Buffer, "_dtor").toStringRef(Out), Line, Dtor,
2983  }
2984  return CGM.getLangOpts().OpenMPIsDevice;
2985 }
2986 
2988  QualType VarType,
2989  StringRef Name) {
2990  std::string Suffix = getName({"artificial", ""});
2991  std::string CacheSuffix = getName({"cache", ""});
2992  llvm::Type *VarLVType = CGF.ConvertTypeForMem(VarType);
2993  llvm::Value *GAddr =
2994  getOrCreateInternalVariable(VarLVType, Twine(Name).concat(Suffix));
2995  llvm::Value *Args[] = {
2997  getThreadID(CGF, SourceLocation()),
2999  CGF.Builder.CreateIntCast(CGF.getTypeSize(VarType), CGM.SizeTy,
3000  /*isSigned=*/false),
3002  CGM.VoidPtrPtrTy, Twine(Name).concat(Suffix).concat(CacheSuffix))};
3003  return Address(
3005  CGF.EmitRuntimeCall(
3007  VarLVType->getPointerTo(/*AddrSpace=*/0)),
3008  CGM.getPointerAlign());
3009 }
3010 
3012  const RegionCodeGenTy &ThenGen,
3013  const RegionCodeGenTy &ElseGen) {
3014  CodeGenFunction::LexicalScope ConditionScope(CGF, Cond->getSourceRange());
3015 
3016  // If the condition constant folds and can be elided, try to avoid emitting
3017  // the condition and the dead arm of the if/else.
3018  bool CondConstant;
3019  if (CGF.ConstantFoldsToSimpleInteger(Cond, CondConstant)) {
3020  if (CondConstant)
3021  ThenGen(CGF);
3022  else
3023  ElseGen(CGF);
3024  return;
3025  }
3026 
3027  // Otherwise, the condition did not fold, or we couldn't elide it. Just
3028  // emit the conditional branch.
3029  llvm::BasicBlock *ThenBlock = CGF.createBasicBlock("omp_if.then");
3030  llvm::BasicBlock *ElseBlock = CGF.createBasicBlock("omp_if.else");
3031  llvm::BasicBlock *ContBlock = CGF.createBasicBlock("omp_if.end");
3032  CGF.EmitBranchOnBoolExpr(Cond, ThenBlock, ElseBlock, /*TrueCount=*/0);
3033 
3034  // Emit the 'then' code.
3035  CGF.EmitBlock(ThenBlock);
3036  ThenGen(CGF);
3037  CGF.EmitBranch(ContBlock);
3038  // Emit the 'else' code if present.
3039  // There is no need to emit line number for unconditional branch.
3041  CGF.EmitBlock(ElseBlock);
3042  ElseGen(CGF);
3043  // There is no need to emit line number for unconditional branch.
3045  CGF.EmitBranch(ContBlock);
3046  // Emit the continuation block for code after the if.
3047  CGF.EmitBlock(ContBlock, /*IsFinished=*/true);
3048 }
3049 
3051  llvm::Function *OutlinedFn,
3052  ArrayRef<llvm::Value *> CapturedVars,
3053  const Expr *IfCond) {
3054  if (!CGF.HaveInsertPoint())
3055  return;
3056  llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
3057  auto &&ThenGen = [OutlinedFn, CapturedVars, RTLoc](CodeGenFunction &CGF,
3058  PrePostActionTy &) {
3059  // Build call __kmpc_fork_call(loc, n, microtask, var1, .., varn);
3060  CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
3061  llvm::Value *Args[] = {
3062  RTLoc,
3063  CGF.Builder.getInt32(CapturedVars.size()), // Number of captured vars
3064  CGF.Builder.CreateBitCast(OutlinedFn, RT.getKmpc_MicroPointerTy())};
3066  RealArgs.append(std::begin(Args), std::end(Args));
3067  RealArgs.append(CapturedVars.begin(), CapturedVars.end());
3068 
3069  llvm::FunctionCallee RTLFn =
3070  RT.createRuntimeFunction(OMPRTL__kmpc_fork_call);
3071  CGF.EmitRuntimeCall(RTLFn, RealArgs);
3072  };
3073  auto &&ElseGen = [OutlinedFn, CapturedVars, RTLoc, Loc](CodeGenFunction &CGF,
3074  PrePostActionTy &) {
3075  CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
3076  llvm::Value *ThreadID = RT.getThreadID(CGF, Loc);
3077  // Build calls:
3078  // __kmpc_serialized_parallel(&Loc, GTid);
3079  llvm::Value *Args[] = {RTLoc, ThreadID};
3080  CGF.EmitRuntimeCall(
3081  RT.createRuntimeFunction(OMPRTL__kmpc_serialized_parallel), Args);
3082 
3083  // OutlinedFn(&GTid, &zero_bound, CapturedStruct);
3084  Address ThreadIDAddr = RT.emitThreadIDAddress(CGF, Loc);
3085  Address ZeroAddrBound =
3086  CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
3087  /*Name=*/".bound.zero.addr");
3088  CGF.InitTempAlloca(ZeroAddrBound, CGF.Builder.getInt32(/*C*/ 0));
3089  llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
3090  // ThreadId for serialized parallels is 0.
3091  OutlinedFnArgs.push_back(ThreadIDAddr.getPointer());
3092  OutlinedFnArgs.push_back(ZeroAddrBound.getPointer());
3093  OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
3094  RT.emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs);
3095 
3096  // __kmpc_end_serialized_parallel(&Loc, GTid);
3097  llvm::Value *EndArgs[] = {RT.emitUpdateLocation(CGF, Loc), ThreadID};
3098  CGF.EmitRuntimeCall(
3099  RT.createRuntimeFunction(OMPRTL__kmpc_end_serialized_parallel),
3100  EndArgs);
3101  };
3102  if (IfCond) {
3103  emitOMPIfClause(CGF, IfCond, ThenGen, ElseGen);
3104  } else {
3105  RegionCodeGenTy ThenRCG(ThenGen);
3106  ThenRCG(CGF);
3107  }
3108 }
3109 
3110 // If we're inside an (outlined) parallel region, use the region info's
3111 // thread-ID variable (it is passed in a first argument of the outlined function
3112 // as "kmp_int32 *gtid"). Otherwise, if we're not inside parallel region, but in
3113 // regular serial code region, get thread ID by calling kmp_int32
3114 // kmpc_global_thread_num(ident_t *loc), stash this thread ID in a temporary and
3115 // return the address of that temp.
3117  SourceLocation Loc) {
3118  if (auto *OMPRegionInfo =
3119  dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
3120  if (OMPRegionInfo->getThreadIDVariable())
3121  return OMPRegionInfo->getThreadIDVariableLValue(CGF).getAddress();
3122 
3123  llvm::Value *ThreadID = getThreadID(CGF, Loc);
3124  QualType Int32Ty =
3125  CGF.getContext().getIntTypeForBitwidth(/*DestWidth*/ 32, /*Signed*/ true);
3126  Address ThreadIDTemp = CGF.CreateMemTemp(Int32Ty, /*Name*/ ".threadid_temp.");
3127  CGF.EmitStoreOfScalar(ThreadID,
3128  CGF.MakeAddrLValue(ThreadIDTemp, Int32Ty));
3129 
3130  return ThreadIDTemp;
3131 }
3132 
3134  llvm::Type *Ty, const llvm::Twine &Name, unsigned AddressSpace) {
3135  SmallString<256> Buffer;
3136  llvm::raw_svector_ostream Out(Buffer);
3137  Out << Name;
3138  StringRef RuntimeName = Out.str();
3139  auto &Elem = *InternalVars.try_emplace(RuntimeName, nullptr).first;
3140  if (Elem.second) {
3141  assert(Elem.second->getType()->getPointerElementType() == Ty &&
3142  "OMP internal variable has different type than requested");
3143  return &*Elem.second;
3144  }
3145 
3146  return Elem.second = new llvm::GlobalVariable(
3147  CGM.getModule(), Ty, /*IsConstant*/ false,
3148  llvm::GlobalValue::CommonLinkage, llvm::Constant::getNullValue(Ty),
3149  Elem.first(), /*InsertBefore=*/nullptr,
3150  llvm::GlobalValue::NotThreadLocal, AddressSpace);
3151 }
3152 
3154  std::string Prefix = Twine("gomp_critical_user_", CriticalName).str();
3155  std::string Name = getName({Prefix, "var"});
3156  return getOrCreateInternalVariable(KmpCriticalNameTy, Name);
3157 }
3158 
3159 namespace {
3160 /// Common pre(post)-action for different OpenMP constructs.
3161 class CommonActionTy final : public PrePostActionTy {
3162  llvm::FunctionCallee EnterCallee;
3163  ArrayRef<llvm::Value *> EnterArgs;
3164  llvm::FunctionCallee ExitCallee;
3165  ArrayRef<llvm::Value *> ExitArgs;
3166  bool Conditional;
3167  llvm::BasicBlock *ContBlock = nullptr;
3168 
3169 public:
3170  CommonActionTy(llvm::FunctionCallee EnterCallee,
3171  ArrayRef<llvm::Value *> EnterArgs,
3172  llvm::FunctionCallee ExitCallee,
3173  ArrayRef<llvm::Value *> ExitArgs, bool Conditional = false)
3174  : EnterCallee(EnterCallee), EnterArgs(EnterArgs), ExitCallee(ExitCallee),
3175  ExitArgs(ExitArgs), Conditional(Conditional) {}
3176  void Enter(CodeGenFunction &CGF) override {
3177  llvm::Value *EnterRes = CGF.EmitRuntimeCall(EnterCallee, EnterArgs);
3178  if (Conditional) {
3179  llvm::Value *CallBool = CGF.Builder.CreateIsNotNull(EnterRes);
3180  auto *ThenBlock = CGF.createBasicBlock("omp_if.then");
3181  ContBlock = CGF.createBasicBlock("omp_if.end");
3182  // Generate the branch (If-stmt)
3183  CGF.Builder.CreateCondBr(CallBool, ThenBlock, ContBlock);
3184  CGF.EmitBlock(ThenBlock);
3185  }
3186  }
3187  void Done(CodeGenFunction &CGF) {
3188  // Emit the rest of blocks/branches
3189  CGF.EmitBranch(ContBlock);
3190  CGF.EmitBlock(ContBlock, true);
3191  }
3192  void Exit(CodeGenFunction &CGF) override {
3193  CGF.EmitRuntimeCall(ExitCallee, ExitArgs);
3194  }
3195 };
3196 } // anonymous namespace
3197 
3199  StringRef CriticalName,
3200  const RegionCodeGenTy &CriticalOpGen,
3201  SourceLocation Loc, const Expr *Hint) {
3202  // __kmpc_critical[_with_hint](ident_t *, gtid, Lock[, hint]);
3203  // CriticalOpGen();
3204  // __kmpc_end_critical(ident_t *, gtid, Lock);
3205  // Prepare arguments and build a call to __kmpc_critical
3206  if (!CGF.HaveInsertPoint())
3207  return;
3208  llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
3209  getCriticalRegionLock(CriticalName)};
3210  llvm::SmallVector<llvm::Value *, 4> EnterArgs(std::begin(Args),
3211  std::end(Args));
3212  if (Hint) {
3213  EnterArgs.push_back(CGF.Builder.CreateIntCast(
3214  CGF.EmitScalarExpr(Hint), CGM.IntPtrTy, /*isSigned=*/false));
3215  }
3216  CommonActionTy Action(
3220  CriticalOpGen.setAction(Action);
3221  emitInlinedDirective(CGF, OMPD_critical, CriticalOpGen);
3222 }
3223 
3225  const RegionCodeGenTy &MasterOpGen,
3226  SourceLocation Loc) {
3227  if (!CGF.HaveInsertPoint())
3228  return;
3229  // if(__kmpc_master(ident_t *, gtid)) {
3230  // MasterOpGen();
3231  // __kmpc_end_master(ident_t *, gtid);
3232  // }
3233  // Prepare arguments and build a call to __kmpc_master
3234  llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
3235  CommonActionTy Action(createRuntimeFunction(OMPRTL__kmpc_master), Args,
3237  /*Conditional=*/true);
3238  MasterOpGen.setAction(Action);
3239  emitInlinedDirective(CGF, OMPD_master, MasterOpGen);
3240  Action.Done(CGF);
3241 }
3242 
3244  SourceLocation Loc) {
3245  if (!CGF.HaveInsertPoint())
3246  return;
3247  // Build call __kmpc_omp_taskyield(loc, thread_id, 0);
3248  llvm::Value *Args[] = {
3249  emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
3250  llvm::ConstantInt::get(CGM.IntTy, /*V=*/0, /*isSigned=*/true)};
3252  if (auto *Region = dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
3253  Region->emitUntiedSwitch(CGF);
3254 }
3255 
3257  const RegionCodeGenTy &TaskgroupOpGen,
3258  SourceLocation Loc) {
3259  if (!CGF.HaveInsertPoint())
3260  return;
3261  // __kmpc_taskgroup(ident_t *, gtid);
3262  // TaskgroupOpGen();
3263  // __kmpc_end_taskgroup(ident_t *, gtid);
3264  // Prepare arguments and build a call to __kmpc_taskgroup
3265  llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
3266  CommonActionTy Action(createRuntimeFunction(OMPRTL__kmpc_taskgroup), Args,
3268  Args);
3269  TaskgroupOpGen.setAction(Action);
3270  emitInlinedDirective(CGF, OMPD_taskgroup, TaskgroupOpGen);
3271 }
3272 
3273 /// Given an array of pointers to variables, project the address of a
3274 /// given variable.
3276  unsigned Index, const VarDecl *Var) {
3277  // Pull out the pointer to the variable.
3278  Address PtrAddr = CGF.Builder.CreateConstArrayGEP(Array, Index);
3279  llvm::Value *Ptr = CGF.Builder.CreateLoad(PtrAddr);
3280 
3281  Address Addr = Address(Ptr, CGF.getContext().getDeclAlign(Var));
3282  Addr = CGF.Builder.CreateElementBitCast(
3283  Addr, CGF.ConvertTypeForMem(Var->getType()));
3284  return Addr;
3285 }
3286 
3288  CodeGenModule &CGM, llvm::Type *ArgsType,
3289  ArrayRef<const Expr *> CopyprivateVars, ArrayRef<const Expr *> DestExprs,
3290  ArrayRef<const Expr *> SrcExprs, ArrayRef<const Expr *> AssignmentOps,
3291  SourceLocation Loc) {
3292  ASTContext &C = CGM.getContext();
3293  // void copy_func(void *LHSArg, void *RHSArg);
3294  FunctionArgList Args;
3295  ImplicitParamDecl LHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
3297  ImplicitParamDecl RHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
3299  Args.push_back(&LHSArg);
3300  Args.push_back(&RHSArg);
3301  const auto &CGFI =
3303  std::string Name =
3304  CGM.getOpenMPRuntime().getName({"omp", "copyprivate", "copy_func"});
3305  auto *Fn = llvm::Function::Create(CGM.getTypes().GetFunctionType(CGFI),
3307  &CGM.getModule());
3308  CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
3309  Fn->setDoesNotRecurse();
3310  CodeGenFunction CGF(CGM);
3311  CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
3312  // Dest = (void*[n])(LHSArg);
3313  // Src = (void*[n])(RHSArg);
3315  CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&LHSArg)),
3316  ArgsType), CGF.getPointerAlign());
3318  CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&RHSArg)),
3319  ArgsType), CGF.getPointerAlign());
3320  // *(Type0*)Dst[0] = *(Type0*)Src[0];
3321  // *(Type1*)Dst[1] = *(Type1*)Src[1];
3322  // ...
3323  // *(Typen*)Dst[n] = *(Typen*)Src[n];
3324  for (unsigned I = 0, E = AssignmentOps.size(); I < E; ++I) {
3325  const auto *DestVar =
3326  cast<VarDecl>(cast<DeclRefExpr>(DestExprs[I])->getDecl());
3327  Address DestAddr = emitAddrOfVarFromArray(CGF, LHS, I, DestVar);
3328 
3329  const auto *SrcVar =
3330  cast<VarDecl>(cast<DeclRefExpr>(SrcExprs[I])->getDecl());
3331  Address SrcAddr = emitAddrOfVarFromArray(CGF, RHS, I, SrcVar);
3332 
3333  const auto *VD = cast<DeclRefExpr>(CopyprivateVars[I])->getDecl();
3334  QualType Type = VD->getType();
3335  CGF.EmitOMPCopy(Type, DestAddr, SrcAddr, DestVar, SrcVar, AssignmentOps[I]);
3336  }
3337  CGF.FinishFunction();
3338  return Fn;
3339 }
3340 
3342  const RegionCodeGenTy &SingleOpGen,
3343  SourceLocation Loc,
3344  ArrayRef<const Expr *> CopyprivateVars,
3345  ArrayRef<const Expr *> SrcExprs,
3346  ArrayRef<const Expr *> DstExprs,
3347  ArrayRef<const Expr *> AssignmentOps) {
3348  if (!CGF.HaveInsertPoint())
3349  return;
3350  assert(CopyprivateVars.size() == SrcExprs.size() &&
3351  CopyprivateVars.size() == DstExprs.size() &&
3352  CopyprivateVars.size() == AssignmentOps.size());
3353  ASTContext &C = CGM.getContext();
3354  // int32 did_it = 0;
3355  // if(__kmpc_single(ident_t *, gtid)) {
3356  // SingleOpGen();
3357  // __kmpc_end_single(ident_t *, gtid);
3358  // did_it = 1;
3359  // }
3360  // call __kmpc_copyprivate(ident_t *, gtid, <buf_size>, <copyprivate list>,
3361  // <copy_func>, did_it);
3362 
3363  Address DidIt = Address::invalid();
3364  if (!CopyprivateVars.empty()) {
3365  // int32 did_it = 0;
3366  QualType KmpInt32Ty =
3367  C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
3368  DidIt = CGF.CreateMemTemp(KmpInt32Ty, ".omp.copyprivate.did_it");
3369  CGF.Builder.CreateStore(CGF.Builder.getInt32(0), DidIt);
3370  }
3371  // Prepare arguments and build a call to __kmpc_single
3372  llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
3373  CommonActionTy Action(createRuntimeFunction(OMPRTL__kmpc_single), Args,
3375  /*Conditional=*/true);
3376  SingleOpGen.setAction(Action);
3377  emitInlinedDirective(CGF, OMPD_single, SingleOpGen);
3378  if (DidIt.isValid()) {
3379  // did_it = 1;
3380  CGF.Builder.CreateStore(CGF.Builder.getInt32(1), DidIt);
3381  }
3382  Action.Done(CGF);
3383  // call __kmpc_copyprivate(ident_t *, gtid, <buf_size>, <copyprivate list>,
3384  // <copy_func>, did_it);
3385  if (DidIt.isValid()) {
3386  llvm::APInt ArraySize(/*unsigned int numBits=*/32, CopyprivateVars.size());
3387  QualType CopyprivateArrayTy = C.getConstantArrayType(
3388  C.VoidPtrTy, ArraySize, nullptr, ArrayType::Normal,
3389  /*IndexTypeQuals=*/0);
3390  // Create a list of all private variables for copyprivate.
3391  Address CopyprivateList =
3392  CGF.CreateMemTemp(CopyprivateArrayTy, ".omp.copyprivate.cpr_list");
3393  for (unsigned I = 0, E = CopyprivateVars.size(); I < E; ++I) {
3394  Address Elem = CGF.Builder.CreateConstArrayGEP(CopyprivateList, I);
3395  CGF.Builder.CreateStore(
3397  CGF.EmitLValue(CopyprivateVars[I]).getPointer(), CGF.VoidPtrTy),
3398  Elem);
3399  }
3400  // Build function that copies private values from single region to all other
3401  // threads in the corresponding parallel region.
3403  CGM, CGF.ConvertTypeForMem(CopyprivateArrayTy)->getPointerTo(),
3404  CopyprivateVars, SrcExprs, DstExprs, AssignmentOps, Loc);
3405  llvm::Value *BufSize = CGF.getTypeSize(CopyprivateArrayTy);
3406  Address CL =
3407  CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(CopyprivateList,
3408  CGF.VoidPtrTy);
3409  llvm::Value *DidItVal = CGF.Builder.CreateLoad(DidIt);
3410  llvm::Value *Args[] = {
3411  emitUpdateLocation(CGF, Loc), // ident_t *<loc>
3412  getThreadID(CGF, Loc), // i32 <gtid>
3413  BufSize, // size_t <buf_size>
3414  CL.getPointer(), // void *<copyprivate list>
3415  CpyFn, // void (*) (void *, void *) <copy_func>
3416  DidItVal // i32 did_it
3417  };
3419  }
3420 }
3421 
3423  const RegionCodeGenTy &OrderedOpGen,
3424  SourceLocation Loc, bool IsThreads) {
3425  if (!CGF.HaveInsertPoint())
3426  return;
3427  // __kmpc_ordered(ident_t *, gtid);
3428  // OrderedOpGen();
3429  // __kmpc_end_ordered(ident_t *, gtid);
3430  // Prepare arguments and build a call to __kmpc_ordered
3431  if (IsThreads) {
3432  llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
3433  CommonActionTy Action(createRuntimeFunction(OMPRTL__kmpc_ordered), Args,
3435  Args);
3436  OrderedOpGen.setAction(Action);
3437  emitInlinedDirective(CGF, OMPD_ordered, OrderedOpGen);
3438  return;
3439  }
3440  emitInlinedDirective(CGF, OMPD_ordered, OrderedOpGen);
3441 }
3442 
3444  unsigned Flags;
3445  if (Kind == OMPD_for)
3446  Flags = OMP_IDENT_BARRIER_IMPL_FOR;
3447  else if (Kind == OMPD_sections)
3448  Flags = OMP_IDENT_BARRIER_IMPL_SECTIONS;
3449  else if (Kind == OMPD_single)
3450  Flags = OMP_IDENT_BARRIER_IMPL_SINGLE;
3451  else if (Kind == OMPD_barrier)
3452  Flags = OMP_IDENT_BARRIER_EXPL;
3453  else
3454  Flags = OMP_IDENT_BARRIER_IMPL;
3455  return Flags;
3456 }
3457 
3459  CodeGenFunction &CGF, const OMPLoopDirective &S,
3460  OpenMPScheduleClauseKind &ScheduleKind, const Expr *&ChunkExpr) const {
3461  // Check if the loop directive is actually a doacross loop directive. In this
3462  // case choose static, 1 schedule.
3463  if (llvm::any_of(
3465  [](const OMPOrderedClause *C) { return C->getNumForLoops(); })) {
3466  ScheduleKind = OMPC_SCHEDULE_static;
3467  // Chunk size is 1 in this case.
3468  llvm::APInt ChunkSize(32, 1);
3469  ChunkExpr = IntegerLiteral::Create(
3470  CGF.getContext(), ChunkSize,
3471  CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/0),
3472  SourceLocation());
3473  }
3474 }
3475 
3477  OpenMPDirectiveKind Kind, bool EmitChecks,
3478  bool ForceSimpleCall) {
3479  if (!CGF.HaveInsertPoint())
3480  return;
3481  // Build call __kmpc_cancel_barrier(loc, thread_id);
3482  // Build call __kmpc_barrier(loc, thread_id);
3483  unsigned Flags = getDefaultFlagsForBarriers(Kind);
3484  // Build call __kmpc_cancel_barrier(loc, thread_id) or __kmpc_barrier(loc,
3485  // thread_id);
3486  llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc, Flags),
3487  getThreadID(CGF, Loc)};
3488  if (auto *OMPRegionInfo =
3489  dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
3490  if (!ForceSimpleCall && OMPRegionInfo->hasCancel()) {
3491  llvm::Value *Result = CGF.EmitRuntimeCall(
3493  if (EmitChecks) {
3494  // if (__kmpc_cancel_barrier()) {
3495  // exit from construct;
3496  // }
3497  llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".cancel.exit");
3498  llvm::BasicBlock *ContBB = CGF.createBasicBlock(".cancel.continue");
3499  llvm::Value *Cmp = CGF.Builder.CreateIsNotNull(Result);
3500  CGF.Builder.CreateCondBr(Cmp, ExitBB, ContBB);
3501  CGF.EmitBlock(ExitBB);
3502  // exit from construct;
3503  CodeGenFunction::JumpDest CancelDestination =
3504  CGF.getOMPCancelDestination(OMPRegionInfo->getDirectiveKind());
3505  CGF.EmitBranchThroughCleanup(CancelDestination);
3506  CGF.EmitBlock(ContBB, /*IsFinished=*/true);
3507  }
3508  return;
3509  }
3510  }
3512 }
3513 
3514 /// Map the OpenMP loop schedule to the runtime enumeration.
3516  bool Chunked, bool Ordered) {
3517  switch (ScheduleKind) {
3518  case OMPC_SCHEDULE_static:
3519  return Chunked ? (Ordered ? OMP_ord_static_chunked : OMP_sch_static_chunked)
3520  : (Ordered ? OMP_ord_static : OMP_sch_static);
3521  case OMPC_SCHEDULE_dynamic:
3523  case OMPC_SCHEDULE_guided:
3525  case OMPC_SCHEDULE_runtime:
3526  return Ordered ? OMP_ord_runtime : OMP_sch_runtime;
3527  case OMPC_SCHEDULE_auto:
3528  return Ordered ? OMP_ord_auto : OMP_sch_auto;
3529  case OMPC_SCHEDULE_unknown:
3530  assert(!Chunked && "chunk was specified but schedule kind not known");
3531  return Ordered ? OMP_ord_static : OMP_sch_static;
3532  }
3533  llvm_unreachable("Unexpected runtime schedule");
3534 }
3535 
3536 /// Map the OpenMP distribute schedule to the runtime enumeration.
3537 static OpenMPSchedType
3539  // only static is allowed for dist_schedule
3541 }
3542 
3544  bool Chunked) const {
3545  OpenMPSchedType Schedule =
3546  getRuntimeSchedule(ScheduleKind, Chunked, /*Ordered=*/false);
3547  return Schedule == OMP_sch_static;
3548 }
3549 
3551  OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) const {
3552  OpenMPSchedType Schedule = getRuntimeSchedule(ScheduleKind, Chunked);
3553  return Schedule == OMP_dist_sch_static;
3554 }
3555 
3557  bool Chunked) const {
3558  OpenMPSchedType Schedule =
3559  getRuntimeSchedule(ScheduleKind, Chunked, /*Ordered=*/false);
3560  return Schedule == OMP_sch_static_chunked;
3561 }
3562 
3564  OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) const {
3565  OpenMPSchedType Schedule = getRuntimeSchedule(ScheduleKind, Chunked);
3566  return Schedule == OMP_dist_sch_static_chunked;
3567 }
3568 
3570  OpenMPSchedType Schedule =
3571  getRuntimeSchedule(ScheduleKind, /*Chunked=*/false, /*Ordered=*/false);
3572  assert(Schedule != OMP_sch_static_chunked && "cannot be chunked here");
3573  return Schedule != OMP_sch_static;
3574 }
3575 
3579  int Modifier = 0;
3580  switch (M1) {
3581  case OMPC_SCHEDULE_MODIFIER_monotonic:
3582  Modifier = OMP_sch_modifier_monotonic;
3583  break;
3584  case OMPC_SCHEDULE_MODIFIER_nonmonotonic:
3585  Modifier = OMP_sch_modifier_nonmonotonic;
3586  break;
3587  case OMPC_SCHEDULE_MODIFIER_simd:
3588  if (Schedule == OMP_sch_static_chunked)
3590  break;
3593  break;
3594  }
3595  switch (M2) {
3596  case OMPC_SCHEDULE_MODIFIER_monotonic:
3597  Modifier = OMP_sch_modifier_monotonic;
3598  break;
3599  case OMPC_SCHEDULE_MODIFIER_nonmonotonic:
3600  Modifier = OMP_sch_modifier_nonmonotonic;
3601  break;
3602  case OMPC_SCHEDULE_MODIFIER_simd:
3603  if (Schedule == OMP_sch_static_chunked)
3605  break;
3608  break;
3609  }
3610  // OpenMP 5.0, 2.9.2 Worksharing-Loop Construct, Desription.
3611  // If the static schedule kind is specified or if the ordered clause is
3612  // specified, and if the nonmonotonic modifier is not specified, the effect is
3613  // as if the monotonic modifier is specified. Otherwise, unless the monotonic
3614  // modifier is specified, the effect is as if the nonmonotonic modifier is
3615  // specified.
3616  if (CGM.getLangOpts().OpenMP >= 50 && Modifier == 0) {
3617  if (!(Schedule == OMP_sch_static_chunked || Schedule == OMP_sch_static ||
3618  Schedule == OMP_sch_static_balanced_chunked ||
3619  Schedule == OMP_ord_static_chunked || Schedule == OMP_ord_static))
3620  Modifier = OMP_sch_modifier_nonmonotonic;
3621  }
3622  return Schedule | Modifier;
3623 }
3624 
3626  CodeGenFunction &CGF, SourceLocation Loc,
3627  const OpenMPScheduleTy &ScheduleKind, unsigned IVSize, bool IVSigned,
3628  bool Ordered, const DispatchRTInput &DispatchValues) {
3629  if (!CGF.HaveInsertPoint())
3630  return;
3632  ScheduleKind.Schedule, DispatchValues.Chunk != nullptr, Ordered);
3633  assert(Ordered ||
3634  (Schedule != OMP_sch_static && Schedule != OMP_sch_static_chunked &&
3635  Schedule != OMP_ord_static && Schedule != OMP_ord_static_chunked &&
3636  Schedule != OMP_sch_static_balanced_chunked));
3637  // Call __kmpc_dispatch_init(
3638  // ident_t *loc, kmp_int32 tid, kmp_int32 schedule,
3639  // kmp_int[32|64] lower, kmp_int[32|64] upper,
3640  // kmp_int[32|64] stride, kmp_int[32|64] chunk);
3641 
3642  // If the Chunk was not specified in the clause - use default value 1.
3643  llvm::Value *Chunk = DispatchValues.Chunk ? DispatchValues.Chunk
3644  : CGF.Builder.getIntN(IVSize, 1);
3645  llvm::Value *Args[] = {
3646  emitUpdateLocation(CGF, Loc),
3647  getThreadID(CGF, Loc),
3648  CGF.Builder.getInt32(addMonoNonMonoModifier(
3649  CGM, Schedule, ScheduleKind.M1, ScheduleKind.M2)), // Schedule type
3650  DispatchValues.LB, // Lower
3651  DispatchValues.UB, // Upper
3652  CGF.Builder.getIntN(IVSize, 1), // Stride
3653  Chunk // Chunk
3654  };
3655  CGF.EmitRuntimeCall(createDispatchInitFunction(IVSize, IVSigned), Args);
3656 }
3657 
3659  CodeGenFunction &CGF, llvm::Value *UpdateLocation, llvm::Value *ThreadId,
3660  llvm::FunctionCallee ForStaticInitFunction, OpenMPSchedType Schedule,
3662  const CGOpenMPRuntime::StaticRTInput &Values) {
3663  if (!CGF.HaveInsertPoint())
3664  return;
3665 
3666  assert(!Values.Ordered);
3667  assert(Schedule == OMP_sch_static || Schedule == OMP_sch_static_chunked ||
3668  Schedule == OMP_sch_static_balanced_chunked ||
3669  Schedule == OMP_ord_static || Schedule == OMP_ord_static_chunked ||
3670  Schedule == OMP_dist_sch_static ||
3671  Schedule == OMP_dist_sch_static_chunked);
3672 
3673  // Call __kmpc_for_static_init(
3674  // ident_t *loc, kmp_int32 tid, kmp_int32 schedtype,
3675  // kmp_int32 *p_lastiter, kmp_int[32|64] *p_lower,
3676  // kmp_int[32|64] *p_upper, kmp_int[32|64] *p_stride,
3677  // kmp_int[32|64] incr, kmp_int[32|64] chunk);
3678  llvm::Value *Chunk = Values.Chunk;
3679  if (Chunk == nullptr) {
3680  assert((Schedule == OMP_sch_static || Schedule == OMP_ord_static ||
3681  Schedule == OMP_dist_sch_static) &&
3682  "expected static non-chunked schedule");
3683  // If the Chunk was not specified in the clause - use default value 1.
3684  Chunk = CGF.Builder.getIntN(Values.IVSize, 1);
3685  } else {
3686  assert((Schedule == OMP_sch_static_chunked ||
3687  Schedule == OMP_sch_static_balanced_chunked ||
3688  Schedule == OMP_ord_static_chunked ||
3689  Schedule == OMP_dist_sch_static_chunked) &&
3690  "expected static chunked schedule");
3691  }
3692  llvm::Value *Args[] = {
3693  UpdateLocation,
3694  ThreadId,
3695  CGF.Builder.getInt32(addMonoNonMonoModifier(CGF.CGM, Schedule, M1,
3696  M2)), // Schedule type
3697  Values.IL.getPointer(), // &isLastIter
3698  Values.LB.getPointer(), // &LB
3699  Values.UB.getPointer(), // &UB
3700  Values.ST.getPointer(), // &Stride
3701  CGF.Builder.getIntN(Values.IVSize, 1), // Incr
3702  Chunk // Chunk
3703  };
3704  CGF.EmitRuntimeCall(ForStaticInitFunction, Args);
3705 }
3706 
3708  SourceLocation Loc,
3709  OpenMPDirectiveKind DKind,
3710  const OpenMPScheduleTy &ScheduleKind,
3711  const StaticRTInput &Values) {
3712  OpenMPSchedType ScheduleNum = getRuntimeSchedule(
3713  ScheduleKind.Schedule, Values.Chunk != nullptr, Values.Ordered);
3714  assert(isOpenMPWorksharingDirective(DKind) &&
3715  "Expected loop-based or sections-based directive.");
3716  llvm::Value *UpdatedLocation = emitUpdateLocation(CGF, Loc,
3717  isOpenMPLoopDirective(DKind)
3718  ? OMP_IDENT_WORK_LOOP
3719  : OMP_IDENT_WORK_SECTIONS);
3720  llvm::Value *ThreadId = getThreadID(CGF, Loc);
3721  llvm::FunctionCallee StaticInitFunction =
3723  emitForStaticInitCall(CGF, UpdatedLocation, ThreadId, StaticInitFunction,
3724  ScheduleNum, ScheduleKind.M1, ScheduleKind.M2, Values);
3725 }
3726 
3728  CodeGenFunction &CGF, SourceLocation Loc,
3729  OpenMPDistScheduleClauseKind SchedKind,
3730  const CGOpenMPRuntime::StaticRTInput &Values) {
3731  OpenMPSchedType ScheduleNum =
3732  getRuntimeSchedule(SchedKind, Values.Chunk != nullptr);
3733  llvm::Value *UpdatedLocation =
3734  emitUpdateLocation(CGF, Loc, OMP_IDENT_WORK_DISTRIBUTE);
3735  llvm::Value *ThreadId = getThreadID(CGF, Loc);
3736  llvm::FunctionCallee StaticInitFunction =
3737  createForStaticInitFunction(Values.IVSize, Values.IVSigned);
3738  emitForStaticInitCall(CGF, UpdatedLocation, ThreadId, StaticInitFunction,
3739  ScheduleNum, OMPC_SCHEDULE_MODIFIER_unknown,
3741 }
3742 
3744  SourceLocation Loc,
3745  OpenMPDirectiveKind DKind) {
3746  if (!CGF.HaveInsertPoint())
3747  return;
3748  // Call __kmpc_for_static_fini(ident_t *loc, kmp_int32 tid);
3749  llvm::Value *Args[] = {
3750  emitUpdateLocation(CGF, Loc,
3752  ? OMP_IDENT_WORK_DISTRIBUTE
3753  : isOpenMPLoopDirective(DKind)
3754  ? OMP_IDENT_WORK_LOOP
3755  : OMP_IDENT_WORK_SECTIONS),
3756  getThreadID(CGF, Loc)};
3758  Args);
3759 }
3760 
3762  SourceLocation Loc,
3763  unsigned IVSize,
3764  bool IVSigned) {
3765  if (!CGF.HaveInsertPoint())
3766  return;
3767  // Call __kmpc_for_dynamic_fini_(4|8)[u](ident_t *loc, kmp_int32 tid);
3768  llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
3769  CGF.EmitRuntimeCall(createDispatchFiniFunction(IVSize, IVSigned), Args);
3770 }
3771 
3773  SourceLocation Loc, unsigned IVSize,
3774  bool IVSigned, Address IL,
3775  Address LB, Address UB,
3776  Address ST) {
3777  // Call __kmpc_dispatch_next(
3778  // ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter,
3779  // kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper,
3780  // kmp_int[32|64] *p_stride);
3781  llvm::Value *Args[] = {
3782  emitUpdateLocation(CGF, Loc),
3783  getThreadID(CGF, Loc),
3784  IL.getPointer(), // &isLastIter
3785  LB.getPointer(), // &Lower
3786  UB.getPointer(), // &Upper
3787  ST.getPointer() // &Stride
3788  };
3789  llvm::Value *Call =
3790  CGF.EmitRuntimeCall(createDispatchNextFunction(IVSize, IVSigned), Args);
3791  return CGF.EmitScalarConversion(
3792  Call, CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/1),
3793  CGF.getContext().BoolTy, Loc);
3794 }
3795 
3797  llvm::Value *NumThreads,
3798  SourceLocation Loc) {
3799  if (!CGF.HaveInsertPoint())
3800  return;
3801  // Build call __kmpc_push_num_threads(&loc, global_tid, num_threads)
3802  llvm::Value *Args[] = {
3803  emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
3804  CGF.Builder.CreateIntCast(NumThreads, CGF.Int32Ty, /*isSigned*/ true)};
3806  Args);
3807 }
3808 
3810  OpenMPProcBindClauseKind ProcBind,
3811  SourceLocation Loc) {
3812  if (!CGF.HaveInsertPoint())
3813  return;
3814  // Constants for proc bind value accepted by the runtime.
3815  enum ProcBindTy {
3816  ProcBindFalse = 0,
3817  ProcBindTrue,
3818  ProcBindMaster,
3819  ProcBindClose,
3820  ProcBindSpread,
3821  ProcBindIntel,
3822  ProcBindDefault
3823  } RuntimeProcBind;
3824  switch (ProcBind) {
3825  case OMPC_PROC_BIND_master:
3826  RuntimeProcBind = ProcBindMaster;
3827  break;
3828  case OMPC_PROC_BIND_close:
3829  RuntimeProcBind = ProcBindClose;
3830  break;
3831  case OMPC_PROC_BIND_spread:
3832  RuntimeProcBind = ProcBindSpread;
3833  break;
3835  llvm_unreachable("Unsupported proc_bind value.");
3836  }
3837  // Build call __kmpc_push_proc_bind(&loc, global_tid, proc_bind)
3838  llvm::Value *Args[] = {
3839  emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
3840  llvm::ConstantInt::get(CGM.IntTy, RuntimeProcBind, /*isSigned=*/true)};
3842 }
3843 
3844 void CGOpenMPRuntime::emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *>,
3845  SourceLocation Loc) {
3846  if (!CGF.HaveInsertPoint())
3847  return;
3848  // Build call void __kmpc_flush(ident_t *loc)
3850  emitUpdateLocation(CGF, Loc));
3851 }
3852 
3853 namespace {
3854 /// Indexes of fields for type kmp_task_t.
3856  /// List of shared variables.
3857  KmpTaskTShareds,
3858  /// Task routine.
3859  KmpTaskTRoutine,
3860  /// Partition id for the untied tasks.
3861  KmpTaskTPartId,
3862  /// Function with call of destructors for private variables.
3863  Data1,
3864  /// Task priority.
3865  Data2,
3866  /// (Taskloops only) Lower bound.
3867  KmpTaskTLowerBound,
3868  /// (Taskloops only) Upper bound.
3869  KmpTaskTUpperBound,
3870  /// (Taskloops only) Stride.
3871  KmpTaskTStride,
3872  /// (Taskloops only) Is last iteration flag.
3873  KmpTaskTLastIter,
3874  /// (Taskloops only) Reduction data.
3875  KmpTaskTReductions,
3876 };
3877 } // anonymous namespace
3878 
3879 bool CGOpenMPRuntime::OffloadEntriesInfoManagerTy::empty() const {
3880  return OffloadEntriesTargetRegion.empty() &&
3881  OffloadEntriesDeviceGlobalVar.empty();
3882 }
3883 
3884 /// Initialize target region entry.
3885 void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
3886  initializeTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
3887  StringRef ParentName, unsigned LineNum,
3888  unsigned Order) {
3889  assert(CGM.getLangOpts().OpenMPIsDevice && "Initialization of entries is "
3890  "only required for the device "
3891  "code generation.");
3892  OffloadEntriesTargetRegion[DeviceID][FileID][ParentName][LineNum] =
3893  OffloadEntryInfoTargetRegion(Order, /*Addr=*/nullptr, /*ID=*/nullptr,
3894  OMPTargetRegionEntryTargetRegion);
3895  ++OffloadingEntriesNum;
3896 }
3897 
3898 void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
3899  registerTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
3900  StringRef ParentName, unsigned LineNum,
3901  llvm::Constant *Addr, llvm::Constant *ID,
3902  OMPTargetRegionEntryKind Flags) {
3903  // If we are emitting code for a target, the entry is already initialized,
3904  // only has to be registered.
3905  if (CGM.getLangOpts().OpenMPIsDevice) {
3906  if (!hasTargetRegionEntryInfo(DeviceID, FileID, ParentName, LineNum)) {
3907  unsigned DiagID = CGM.getDiags().getCustomDiagID(
3909  "Unable to find target region on line '%0' in the device code.");
3910  CGM.getDiags().Report(DiagID) << LineNum;
3911  return;
3912  }
3913  auto &Entry =
3914  OffloadEntriesTargetRegion[DeviceID][FileID][ParentName][LineNum];
3915  assert(Entry.isValid() && "Entry not initialized!");
3916  Entry.setAddress(Addr);
3917  Entry.setID(ID);
3918  Entry.setFlags(Flags);
3919  } else {
3920  OffloadEntryInfoTargetRegion Entry(OffloadingEntriesNum, Addr, ID, Flags);
3921  OffloadEntriesTargetRegion[DeviceID][FileID][ParentName][LineNum] = Entry;
3922  ++OffloadingEntriesNum;
3923  }
3924 }
3925 
3926 bool CGOpenMPRuntime::OffloadEntriesInfoManagerTy::hasTargetRegionEntryInfo(
3927  unsigned DeviceID, unsigned FileID, StringRef ParentName,
3928  unsigned LineNum) const {
3929  auto PerDevice = OffloadEntriesTargetRegion.find(DeviceID);
3930  if (PerDevice == OffloadEntriesTargetRegion.end())
3931  return false;
3932  auto PerFile = PerDevice->second.find(FileID);
3933  if (PerFile == PerDevice->second.end())
3934  return false;
3935  auto PerParentName = PerFile->second.find(ParentName);
3936  if (PerParentName == PerFile->second.end())
3937  return false;
3938  auto PerLine = PerParentName->second.find(LineNum);
3939  if (PerLine == PerParentName->second.end())
3940  return false;
3941  // Fail if this entry is already registered.
3942  if (PerLine->second.getAddress() || PerLine->second.getID())
3943  return false;
3944  return true;
3945 }
3946 
3947 void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::actOnTargetRegionEntriesInfo(
3948  const OffloadTargetRegionEntryInfoActTy &Action) {
3949  // Scan all target region entries and perform the provided action.
3950  for (const auto &D : OffloadEntriesTargetRegion)
3951  for (const auto &F : D.second)
3952  for (const auto &P : F.second)
3953  for (const auto &L : P.second)
3954  Action(D.first, F.first, P.first(), L.first, L.second);
3955 }
3956 
3957 void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
3958  initializeDeviceGlobalVarEntryInfo(StringRef Name,
3959  OMPTargetGlobalVarEntryKind Flags,
3960  unsigned Order) {
3961  assert(CGM.getLangOpts().OpenMPIsDevice && "Initialization of entries is "
3962  "only required for the device "
3963  "code generation.");
3964  OffloadEntriesDeviceGlobalVar.try_emplace(Name, Order, Flags);
3965  ++OffloadingEntriesNum;
3966 }
3967 
3968 void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
3969  registerDeviceGlobalVarEntryInfo(StringRef VarName, llvm::Constant *Addr,
3970  CharUnits VarSize,
3971  OMPTargetGlobalVarEntryKind Flags,
3972  llvm::GlobalValue::LinkageTypes Linkage) {
3973  if (CGM.getLangOpts().OpenMPIsDevice) {
3974  auto &Entry = OffloadEntriesDeviceGlobalVar[VarName];
3975  assert(Entry.isValid() && Entry.getFlags() == Flags &&
3976  "Entry not initialized!");
3977  assert((!Entry.getAddress() || Entry.getAddress() == Addr) &&
3978  "Resetting with the new address.");
3979  if (Entry.getAddress() && hasDeviceGlobalVarEntryInfo(VarName)) {
3980  if (Entry.getVarSize().isZero()) {
3981  Entry.setVarSize(VarSize);
3982  Entry.setLinkage(Linkage);
3983  }
3984  return;
3985  }
3986  Entry.setVarSize(VarSize);
3987  Entry.setLinkage(Linkage);
3988  Entry.setAddress(Addr);
3989  } else {
3990  if (hasDeviceGlobalVarEntryInfo(VarName)) {
3991  auto &Entry = OffloadEntriesDeviceGlobalVar[VarName];
3992  assert(Entry.isValid() && Entry.getFlags() == Flags &&
3993  "Entry not initialized!");
3994  assert((!Entry.getAddress() || Entry.getAddress() == Addr) &&
3995  "Resetting with the new address.");
3996  if (Entry.getVarSize().isZero()) {
3997  Entry.setVarSize(VarSize);
3998  Entry.setLinkage(Linkage);
3999  }
4000  return;
4001  }
4002  OffloadEntriesDeviceGlobalVar.try_emplace(
4003  VarName, OffloadingEntriesNum, Addr, VarSize, Flags, Linkage);
4004  ++OffloadingEntriesNum;
4005  }
4006 }
4007 
4008 void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
4009  actOnDeviceGlobalVarEntriesInfo(
4010  const OffloadDeviceGlobalVarEntryInfoActTy &Action) {
4011  // Scan all target region entries and perform the provided action.
4012  for (const auto &E : OffloadEntriesDeviceGlobalVar)
4013  Action(E.getKey(), E.getValue());
4014 }
4015 
4017  llvm::Constant *ID, llvm::Constant *Addr, uint64_t Size, int32_t Flags,
4018  llvm::GlobalValue::LinkageTypes Linkage) {
4019  StringRef Name = Addr->getName();
4020  llvm::Module &M = CGM.getModule();
4021  llvm::LLVMContext &C = M.getContext();
4022 
4023  // Create constant string with the name.
4024  llvm::Constant *StrPtrInit = llvm::ConstantDataArray::getString(C, Name);
4025 
4026  std::string StringName = getName({"omp_offloading", "entry_name"});
4027  auto *Str = new llvm::GlobalVariable(
4028  M, StrPtrInit->getType(), /*isConstant=*/true,
4029  llvm::GlobalValue::InternalLinkage, StrPtrInit, StringName);
4030  Str->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
4031 
4032  llvm::Constant *Data[] = {llvm::ConstantExpr::getBitCast(ID, CGM.VoidPtrTy),
4033  llvm::ConstantExpr::getBitCast(Str, CGM.Int8PtrTy),
4034  llvm::ConstantInt::get(CGM.SizeTy, Size),
4035  llvm::ConstantInt::get(CGM.Int32Ty, Flags),
4036  llvm::ConstantInt::get(CGM.Int32Ty, 0)};
4037  std::string EntryName = getName({"omp_offloading", "entry", ""});
4038  llvm::GlobalVariable *Entry = createGlobalStruct(
4039  CGM, getTgtOffloadEntryQTy(), /*IsConstant=*/true, Data,
4040  Twine(EntryName).concat(Name), llvm::GlobalValue::WeakAnyLinkage);
4041 
4042  // The entry has to be created in the section the linker expects it to be.
4043  Entry->setSection("omp_offloading_entries");
4044 }
4045 
4047  // Emit the offloading entries and metadata so that the device codegen side
4048  // can easily figure out what to emit. The produced metadata looks like
4049  // this:
4050  //
4051  // !omp_offload.info = !{!1, ...}
4052  //
4053  // Right now we only generate metadata for function that contain target
4054  // regions.
4055 
4056  // If we are in simd mode or there are no entries, we don't need to do
4057  // anything.
4058  if (CGM.getLangOpts().OpenMPSimd || OffloadEntriesInfoManager.empty())
4059  return;
4060 
4061  llvm::Module &M = CGM.getModule();
4062  llvm::LLVMContext &C = M.getContext();
4064  SourceLocation, StringRef>,
4065  16>
4066  OrderedEntries(OffloadEntriesInfoManager.size());
4067  llvm::SmallVector<StringRef, 16> ParentFunctions(
4069 
4070  // Auxiliary methods to create metadata values and strings.
4071  auto &&GetMDInt = [this](unsigned V) {
4072  return llvm::ConstantAsMetadata::get(
4073  llvm::ConstantInt::get(CGM.Int32Ty, V));
4074  };
4075 
4076  auto &&GetMDString = [&C](StringRef V) { return llvm::MDString::get(C, V); };
4077 
4078  // Create the offloading info metadata node.
4079  llvm::NamedMDNode *MD = M.getOrInsertNamedMetadata("omp_offload.info");
4080 
4081  // Create function that emits metadata for each target region entry;
4082  auto &&TargetRegionMetadataEmitter =
4083  [this, &C, MD, &OrderedEntries, &ParentFunctions, &GetMDInt,
4084  &GetMDString](
4085  unsigned DeviceID, unsigned FileID, StringRef ParentName,
4086  unsigned Line,
4088  // Generate metadata for target regions. Each entry of this metadata
4089  // contains:
4090  // - Entry 0 -> Kind of this type of metadata (0).
4091  // - Entry 1 -> Device ID of the file where the entry was identified.
4092  // - Entry 2 -> File ID of the file where the entry was identified.
4093  // - Entry 3 -> Mangled name of the function where the entry was
4094  // identified.
4095  // - Entry 4 -> Line in the file where the entry was identified.
4096  // - Entry 5 -> Order the entry was created.
4097  // The first element of the metadata node is the kind.
4098  llvm::Metadata *Ops[] = {GetMDInt(E.getKind()), GetMDInt(DeviceID),
4099  GetMDInt(FileID), GetMDString(ParentName),
4100  GetMDInt(Line), GetMDInt(E.getOrder())};
4101 
4102  SourceLocation Loc;
4103  for (auto I = CGM.getContext().getSourceManager().fileinfo_begin(),
4105  I != E; ++I) {
4106  if (I->getFirst()->getUniqueID().getDevice() == DeviceID &&
4107  I->getFirst()->getUniqueID().getFile() == FileID) {
4109  I->getFirst(), Line, 1);
4110  break;
4111  }
4112  }
4113  // Save this entry in the right position of the ordered entries array.
4114  OrderedEntries[E.getOrder()] = std::make_tuple(&E, Loc, ParentName);
4115  ParentFunctions[E.getOrder()] = ParentName;
4116 
4117  // Add metadata to the named metadata node.
4118  MD->addOperand(llvm::MDNode::get(C, Ops));
4119  };
4120 
4122  TargetRegionMetadataEmitter);
4123 
4124  // Create function that emits metadata for each device global variable entry;
4125  auto &&DeviceGlobalVarMetadataEmitter =
4126  [&C, &OrderedEntries, &GetMDInt, &GetMDString,
4127  MD](StringRef MangledName,
4129  &E) {
4130  // Generate metadata for global variables. Each entry of this metadata
4131  // contains:
4132  // - Entry 0 -> Kind of this type of metadata (1).
4133  // - Entry 1 -> Mangled name of the variable.
4134  // - Entry 2 -> Declare target kind.
4135  // - Entry 3 -> Order the entry was created.
4136  // The first element of the metadata node is the kind.
4137  llvm::Metadata *Ops[] = {
4138  GetMDInt(E.getKind()), GetMDString(MangledName),
4139  GetMDInt(E.getFlags()), GetMDInt(E.getOrder())};
4140 
4141  // Save this entry in the right position of the ordered entries array.
4142  OrderedEntries[E.getOrder()] =
4143  std::make_tuple(&E, SourceLocation(), MangledName);
4144 
4145  // Add metadata to the named metadata node.
4146  MD->addOperand(llvm::MDNode::get(C, Ops));
4147  };
4148 
4150  DeviceGlobalVarMetadataEmitter);
4151 
4152  for (const auto &E : OrderedEntries) {
4153  assert(std::get<0>(E) && "All ordered entries must exist!");
4154  if (const auto *CE =
4155  dyn_cast<OffloadEntriesInfoManagerTy::OffloadEntryInfoTargetRegion>(
4156  std::get<0>(E))) {
4157  if (!CE->getID() || !CE->getAddress()) {
4158  // Do not blame the entry if the parent funtion is not emitted.
4159  StringRef FnName = ParentFunctions[CE->getOrder()];
4160  if (!CGM.GetGlobalValue(FnName))
4161  continue;
4162  unsigned DiagID = CGM.getDiags().getCustomDiagID(
4164  "Offloading entry for target region in %0 is incorrect: either the "
4165  "address or the ID is invalid.");
4166  CGM.getDiags().Report(std::get<1>(E), DiagID) << FnName;
4167  continue;
4168  }
4169  createOffloadEntry(CE->getID(), CE->getAddress(), /*Size=*/0,
4170  CE->getFlags(), llvm::GlobalValue::WeakAnyLinkage);
4171  } else if (const auto *CE = dyn_cast<OffloadEntriesInfoManagerTy::
4172  OffloadEntryInfoDeviceGlobalVar>(
4173  std::get<0>(E))) {
4176  CE->getFlags());
4177  switch (Flags) {
4179  if (CGM.getLangOpts().OpenMPIsDevice &&
4180  CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory())
4181  continue;
4182  if (!CE->getAddress()) {
4183  unsigned DiagID = CGM.getDiags().getCustomDiagID(
4184  DiagnosticsEngine::Error, "Offloading entry for declare target "
4185  "variable %0 is incorrect: the "
4186  "address is invalid.");
4187  CGM.getDiags().Report(std::get<1>(E), DiagID) << std::get<2>(E);
4188  continue;
4189  }
4190  // The vaiable has no definition - no need to add the entry.
4191  if (CE->getVarSize().isZero())
4192  continue;
4193  break;
4194  }
4196  assert(((CGM.getLangOpts().OpenMPIsDevice && !CE->getAddress()) ||
4197  (!CGM.getLangOpts().OpenMPIsDevice && CE->getAddress())) &&
4198  "Declaret target link address is set.");
4199  if (CGM.getLangOpts().OpenMPIsDevice)
4200  continue;
4201  if (!CE->getAddress()) {
4202  unsigned DiagID = CGM.getDiags().getCustomDiagID(
4204  "Offloading entry for declare target variable is incorrect: the "
4205  "address is invalid.");
4206  CGM.getDiags().Report(DiagID);
4207  continue;
4208  }
4209  break;
4210  }
4211  createOffloadEntry(CE->getAddress(), CE->getAddress(),
4212  CE->getVarSize().getQuantity(), Flags,
4213  CE->getLinkage());
4214  } else {
4215  llvm_unreachable("Unsupported entry kind.");
4216  }
4217  }
4218 }
4219 
4220 /// Loads all the offload entries information from the host IR
4221 /// metadata.
4223  // If we are in target mode, load the metadata from the host IR. This code has
4224  // to match the metadaata creation in createOffloadEntriesAndInfoMetadata().
4225 
4226  if (!CGM.getLangOpts().OpenMPIsDevice)
4227  return;
4228 
4229  if (CGM.getLangOpts().OMPHostIRFile.empty())
4230  return;
4231 
4232  auto Buf = llvm::MemoryBuffer::getFile(CGM.getLangOpts().OMPHostIRFile);
4233  if (auto EC = Buf.getError()) {
4234  CGM.getDiags().Report(diag::err_cannot_open_file)
4235  << CGM.getLangOpts().OMPHostIRFile << EC.message();
4236  return;
4237  }
4238 
4239  llvm::LLVMContext C;
4240  auto ME = expectedToErrorOrAndEmitErrors(
4241  C, llvm::parseBitcodeFile(Buf.get()->getMemBufferRef(), C));
4242 
4243  if (auto EC = ME.getError()) {
4244  unsigned DiagID = CGM.getDiags().getCustomDiagID(
4245  DiagnosticsEngine::Error, "Unable to parse host IR file '%0':'%1'");
4246  CGM.getDiags().Report(DiagID)
4247  << CGM.getLangOpts().OMPHostIRFile << EC.message();
4248  return;
4249  }
4250 
4251  llvm::NamedMDNode *MD = ME.get()->getNamedMetadata("omp_offload.info");
4252  if (!MD)
4253  return;
4254 
4255  for (llvm::MDNode *MN : MD->operands()) {
4256  auto &&GetMDInt = [MN](unsigned Idx) {
4257  auto *V = cast<llvm::ConstantAsMetadata>(MN->getOperand(Idx));
4258  return cast<llvm::ConstantInt>(V->getValue())->getZExtValue();
4259  };
4260 
4261  auto &&GetMDString = [MN](unsigned Idx) {
4262  auto *V = cast<llvm::MDString>(MN->getOperand(Idx));
4263  return V->getString();
4264  };
4265 
4266  switch (GetMDInt(0)) {
4267  default:
4268  llvm_unreachable("Unexpected metadata!");
4269  break;
4273  /*DeviceID=*/GetMDInt(1), /*FileID=*/GetMDInt(2),
4274  /*ParentName=*/GetMDString(3), /*Line=*/GetMDInt(4),
4275  /*Order=*/GetMDInt(5));
4276  break;
4280  /*MangledName=*/GetMDString(1),
4281  static_cast<OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryKind>(
4282  /*Flags=*/GetMDInt(2)),
4283  /*Order=*/GetMDInt(3));
4284  break;
4285  }
4286  }
4287 }
4288 
4290  if (!KmpRoutineEntryPtrTy) {
4291  // Build typedef kmp_int32 (* kmp_routine_entry_t)(kmp_int32, void *); type.
4292  ASTContext &C = CGM.getContext();
4293  QualType KmpRoutineEntryTyArgs[] = {KmpInt32Ty, C.VoidPtrTy};
4295  KmpRoutineEntryPtrQTy = C.getPointerType(
4296  C.getFunctionType(KmpInt32Ty, KmpRoutineEntryTyArgs, EPI));
4297  KmpRoutineEntryPtrTy = CGM.getTypes().ConvertType(KmpRoutineEntryPtrQTy);
4298  }
4299 }
4300 
4302  // Make sure the type of the entry is already created. This is the type we
4303  // have to create:
4304  // struct __tgt_offload_entry{
4305  // void *addr; // Pointer to the offload entry info.
4306  // // (function or global)
4307  // char *name; // Name of the function or global.
4308  // size_t size; // Size of the entry info (0 if it a function).
4309  // int32_t flags; // Flags associated with the entry, e.g. 'link'.
4310  // int32_t reserved; // Reserved, to use by the runtime library.
4311  // };
4312  if (TgtOffloadEntryQTy.isNull()) {
4313  ASTContext &C = CGM.getContext();
4314  RecordDecl *RD = C.buildImplicitRecord("__tgt_offload_entry");
4315  RD->startDefinition();
4316  addFieldToRecordDecl(C, RD, C.VoidPtrTy);
4318  addFieldToRecordDecl(C, RD, C.getSizeType());
4320  C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/true));
4322  C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/true));
4323  RD->completeDefinition();
4324  RD->addAttr(PackedAttr::CreateImplicit(C));
4326  }
4327  return TgtOffloadEntryQTy;
4328 }
4329 
4331  // These are the types we need to build:
4332  // struct __tgt_device_image{
4333  // void *ImageStart; // Pointer to the target code start.
4334  // void *ImageEnd; // Pointer to the target code end.
4335  // // We also add the host entries to the device image, as it may be useful
4336  // // for the target runtime to have access to that information.
4337  // __tgt_offload_entry *EntriesBegin; // Begin of the table with all
4338  // // the entries.
4339  // __tgt_offload_entry *EntriesEnd; // End of the table with all the
4340  // // entries (non inclusive).
4341  // };
4342  if (TgtDeviceImageQTy.isNull()) {
4343  ASTContext &C = CGM.getContext();
4344  RecordDecl *RD = C.buildImplicitRecord("__tgt_device_image");
4345  RD->startDefinition();
4346  addFieldToRecordDecl(C, RD, C.VoidPtrTy);
4347  addFieldToRecordDecl(C, RD, C.VoidPtrTy);
4350  RD->completeDefinition();
4352  }
4353  return TgtDeviceImageQTy;
4354 }
4355 
4357  // struct __tgt_bin_desc{
4358  // int32_t NumDevices; // Number of devices supported.
4359  // __tgt_device_image *DeviceImages; // Arrays of device images
4360  // // (one per device).
4361  // __tgt_offload_entry *EntriesBegin; // Begin of the table with all the
4362  // // entries.
4363  // __tgt_offload_entry *EntriesEnd; // End of the table with all the
4364  // // entries (non inclusive).
4365  // };
4367  ASTContext &C = CGM.getContext();
4368  RecordDecl *RD = C.buildImplicitRecord("__tgt_bin_desc");
4369  RD->startDefinition();
4371  C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/true));
4375  RD->completeDefinition();
4377  }
4378  return TgtBinaryDescriptorQTy;
4379 }
4380 
4381 namespace {
4382 struct PrivateHelpersTy {
4383  PrivateHelpersTy(const VarDecl *Original, const VarDecl *PrivateCopy,
4384  const VarDecl *PrivateElemInit)
4385  : Original(Original), PrivateCopy(PrivateCopy),
4386  PrivateElemInit(PrivateElemInit) {}
4387  const VarDecl *Original;
4388  const VarDecl *PrivateCopy;
4389  const VarDecl *PrivateElemInit;
4390 };
4391 typedef std::pair<CharUnits /*Align*/, PrivateHelpersTy> PrivateDataTy;
4392 } // anonymous namespace
4393 
4394 static RecordDecl *
4396  if (!Privates.empty()) {
4397  ASTContext &C = CGM.getContext();
4398  // Build struct .kmp_privates_t. {
4399  // /* private vars */
4400  // };
4401  RecordDecl *RD = C.buildImplicitRecord(".kmp_privates.t");
4402  RD->startDefinition();
4403  for (const auto &Pair : Privates) {
4404  const VarDecl *VD = Pair.second.Original;
4405  QualType Type = VD->getType().getNonReferenceType();
4406  FieldDecl *FD = addFieldToRecordDecl(C, RD, Type);
4407  if (VD->hasAttrs()) {
4408  for (specific_attr_iterator<AlignedAttr> I(VD->getAttrs().begin()),
4409  E(VD->getAttrs().end());
4410  I != E; ++I)
4411  FD->addAttr(*I);
4412  }
4413  }
4414  RD->completeDefinition();
4415  return RD;
4416  }
4417  return nullptr;
4418 }
4419 
4420 static RecordDecl *
4422  QualType KmpInt32Ty,
4423  QualType KmpRoutineEntryPointerQTy) {
4424  ASTContext &C = CGM.getContext();
4425  // Build struct kmp_task_t {
4426  // void * shareds;
4427  // kmp_routine_entry_t routine;
4428  // kmp_int32 part_id;
4429  // kmp_cmplrdata_t data1;
4430  // kmp_cmplrdata_t data2;
4431  // For taskloops additional fields:
4432  // kmp_uint64 lb;
4433  // kmp_uint64 ub;
4434  // kmp_int64 st;
4435  // kmp_int32 liter;
4436  // void * reductions;
4437  // };
4438  RecordDecl *UD = C.buildImplicitRecord("kmp_cmplrdata_t", TTK_Union);
4439  UD->startDefinition();
4440  addFieldToRecordDecl(C, UD, KmpInt32Ty);
4441  addFieldToRecordDecl(C, UD, KmpRoutineEntryPointerQTy);
4442  UD->completeDefinition();
4443  QualType KmpCmplrdataTy = C.getRecordType(UD);
4444  RecordDecl *RD = C.buildImplicitRecord("kmp_task_t");
4445  RD->startDefinition();
4446  addFieldToRecordDecl(C, RD, C.VoidPtrTy);
4447  addFieldToRecordDecl(C, RD, KmpRoutineEntryPointerQTy);
4448  addFieldToRecordDecl(C, RD, KmpInt32Ty);
4449  addFieldToRecordDecl(C, RD, KmpCmplrdataTy);
4450  addFieldToRecordDecl(C, RD, KmpCmplrdataTy);
4451  if (isOpenMPTaskLoopDirective(Kind)) {
4452  QualType KmpUInt64Ty =
4453  CGM.getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0);
4454  QualType KmpInt64Ty =
4455  CGM.getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1);
4456  addFieldToRecordDecl(C, RD, KmpUInt64Ty);
4457  addFieldToRecordDecl(C, RD, KmpUInt64Ty);
4458  addFieldToRecordDecl(C, RD, KmpInt64Ty);
4459  addFieldToRecordDecl(C, RD, KmpInt32Ty);
4460  addFieldToRecordDecl(C, RD, C.VoidPtrTy);
4461  }
4462  RD->completeDefinition();
4463  return RD;
4464 }
4465 
4466 static RecordDecl *
4468  ArrayRef<PrivateDataTy> Privates) {
4469  ASTContext &C = CGM.getContext();
4470  // Build struct kmp_task_t_with_privates {
4471  // kmp_task_t task_data;
4472  // .kmp_privates_t. privates;
4473  // };
4474  RecordDecl *RD = C.buildImplicitRecord("kmp_task_t_with_privates");
4475  RD->startDefinition();
4476  addFieldToRecordDecl(C, RD, KmpTaskTQTy);
4477  if (const RecordDecl *PrivateRD = createPrivatesRecordDecl(CGM, Privates))
4478  addFieldToRecordDecl(C, RD, C.getRecordType(PrivateRD));
4479  RD->completeDefinition();
4480  return RD;
4481 }
4482 
4483 /// Emit a proxy function which accepts kmp_task_t as the second
4484 /// argument.
4485 /// \code
4486 /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
4487 /// TaskFunction(gtid, tt->part_id, &tt->privates, task_privates_map, tt,
4488 /// For taskloops:
4489 /// tt->task_data.lb, tt->task_data.ub, tt->task_data.st, tt->task_data.liter,
4490 /// tt->reductions, tt->shareds);
4491 /// return 0;
4492 /// }
4493 /// \endcode
4494 static llvm::Function *
4496  OpenMPDirectiveKind Kind, QualType KmpInt32Ty,
4497  QualType KmpTaskTWithPrivatesPtrQTy,
4498  QualType KmpTaskTWithPrivatesQTy, QualType KmpTaskTQTy,
4499  QualType SharedsPtrTy, llvm::Function *TaskFunction,
4500  llvm::Value *TaskPrivatesMap) {
4501  ASTContext &C = CGM.getContext();
4502  FunctionArgList Args;
4503  ImplicitParamDecl GtidArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, KmpInt32Ty,
4505  ImplicitParamDecl TaskTypeArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
4506  KmpTaskTWithPrivatesPtrQTy.withRestrict(),
4508  Args.push_back(&GtidArg);
4509  Args.push_back(&TaskTypeArg);
4510  const auto &TaskEntryFnInfo =
4511  CGM.getTypes().arrangeBuiltinFunctionDeclaration(KmpInt32Ty, Args);
4512  llvm::FunctionType *TaskEntryTy =
4513  CGM.getTypes().GetFunctionType(TaskEntryFnInfo);
4514  std::string Name = CGM.getOpenMPRuntime().getName({"omp_task_entry", ""});
4515  auto *TaskEntry = llvm::Function::Create(
4516  TaskEntryTy, llvm::GlobalValue::InternalLinkage, Name, &CGM.getModule());
4517  CGM.SetInternalFunctionAttributes(GlobalDecl(), TaskEntry, TaskEntryFnInfo);
4518  TaskEntry->setDoesNotRecurse();
4519  CodeGenFunction CGF(CGM);
4520  CGF.StartFunction(GlobalDecl(), KmpInt32Ty, TaskEntry, TaskEntryFnInfo, Args,
4521  Loc, Loc);
4522 
4523  // TaskFunction(gtid, tt->task_data.part_id, &tt->privates, task_privates_map,
4524  // tt,
4525  // For taskloops:
4526  // tt->task_data.lb, tt->task_data.ub, tt->task_data.st, tt->task_data.liter,
4527  // tt->task_data.shareds);
4528  llvm::Value *GtidParam = CGF.EmitLoadOfScalar(
4529  CGF.GetAddrOfLocalVar(&GtidArg), /*Volatile=*/false, KmpInt32Ty, Loc);
4530  LValue TDBase = CGF.EmitLoadOfPointerLValue(
4531  CGF.GetAddrOfLocalVar(&TaskTypeArg),
4532  KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>());
4533  const auto *KmpTaskTWithPrivatesQTyRD =
4534  cast<RecordDecl>(KmpTaskTWithPrivatesQTy->getAsTagDecl());
4535  LValue Base =
4536  CGF.EmitLValueForField(TDBase, *KmpTaskTWithPrivatesQTyRD->field_begin());
4537  const auto *KmpTaskTQTyRD = cast<RecordDecl>(KmpTaskTQTy->getAsTagDecl());
4538  auto PartIdFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTPartId);
4539  LValue PartIdLVal = CGF.EmitLValueForField(Base, *PartIdFI);
4540  llvm::Value *PartidParam = PartIdLVal.getPointer();
4541 
4542  auto SharedsFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTShareds);
4543  LValue SharedsLVal = CGF.EmitLValueForField(Base, *SharedsFI);
4545  CGF.EmitLoadOfScalar(SharedsLVal, Loc),
4546  CGF.ConvertTypeForMem(SharedsPtrTy));
4547 
4548  auto PrivatesFI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin(), 1);
4549  llvm::Value *PrivatesParam;
4550  if (PrivatesFI != KmpTaskTWithPrivatesQTyRD->field_end()) {
4551  LValue PrivatesLVal = CGF.EmitLValueForField(TDBase, *PrivatesFI);
4552  PrivatesParam = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4553  PrivatesLVal.getPointer(), CGF.VoidPtrTy);
4554  } else {
4555  PrivatesParam = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
4556  }
4557 
4558  llvm::Value *CommonArgs[] = {GtidParam, PartidParam, PrivatesParam,
4559  TaskPrivatesMap,
4560  CGF.Builder
4562  TDBase.getAddress(), CGF.VoidPtrTy)
4563  .getPointer()};
4564  SmallVector<llvm::Value *, 16> CallArgs(std::begin(CommonArgs),
4565  std::end(CommonArgs));
4566  if (isOpenMPTaskLoopDirective(Kind)) {
4567  auto LBFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTLowerBound);
4568  LValue LBLVal = CGF.EmitLValueForField(Base, *LBFI);
4569  llvm::Value *LBParam = CGF.EmitLoadOfScalar(LBLVal, Loc);
4570  auto UBFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTUpperBound);
4571  LValue UBLVal = CGF.EmitLValueForField(Base, *UBFI);
4572  llvm::Value *UBParam = CGF.EmitLoadOfScalar(UBLVal, Loc);
4573  auto StFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTStride);
4574  LValue StLVal = CGF.EmitLValueForField(Base, *StFI);
4575  llvm::Value *StParam = CGF.EmitLoadOfScalar(StLVal, Loc);
4576  auto LIFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTLastIter);
4577  LValue LILVal = CGF.EmitLValueForField(Base, *LIFI);
4578  llvm::Value *LIParam = CGF.EmitLoadOfScalar(LILVal, Loc);
4579  auto RFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTReductions);
4580  LValue RLVal = CGF.EmitLValueForField(Base, *RFI);
4581  llvm::Value *RParam = CGF.EmitLoadOfScalar(RLVal, Loc);
4582  CallArgs.push_back(LBParam);
4583  CallArgs.push_back(UBParam);
4584  CallArgs.push_back(StParam);
4585  CallArgs.push_back(LIParam);
4586  CallArgs.push_back(RParam);
4587  }
4588  CallArgs.push_back(SharedsParam);
4589 
4590  CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, Loc, TaskFunction,
4591  CallArgs);
4592  CGF.EmitStoreThroughLValue(RValue::get(CGF.Builder.getInt32(/*C=*/0)),
4593  CGF.MakeAddrLValue(CGF.ReturnValue, KmpInt32Ty));
4594  CGF.FinishFunction();
4595  return TaskEntry;
4596 }
4597 
4599  SourceLocation Loc,
4600  QualType KmpInt32Ty,
4601  QualType KmpTaskTWithPrivatesPtrQTy,
4602  QualType KmpTaskTWithPrivatesQTy) {
4603  ASTContext &C = CGM.getContext();
4604  FunctionArgList Args;
4605  ImplicitParamDecl GtidArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, KmpInt32Ty,
4607  ImplicitParamDecl TaskTypeArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
4608  KmpTaskTWithPrivatesPtrQTy.withRestrict(),
4610  Args.push_back(&GtidArg);
4611  Args.push_back(&TaskTypeArg);
4612  const auto &DestructorFnInfo =
4613  CGM.getTypes().arrangeBuiltinFunctionDeclaration(KmpInt32Ty, Args);
4614  llvm::FunctionType *DestructorFnTy =
4615  CGM.getTypes().GetFunctionType(DestructorFnInfo);
4616  std::string Name =
4617  CGM.getOpenMPRuntime().getName({"omp_task_destructor", ""});
4618  auto *DestructorFn =
4620  Name, &CGM.getModule());
4621  CGM.SetInternalFunctionAttributes(GlobalDecl(), DestructorFn,
4622  DestructorFnInfo);
4623  DestructorFn->setDoesNotRecurse();
4624  CodeGenFunction CGF(CGM);
4625  CGF.StartFunction(GlobalDecl(), KmpInt32Ty, DestructorFn, DestructorFnInfo,
4626  Args, Loc, Loc);
4627 
4629  CGF.GetAddrOfLocalVar(&TaskTypeArg),
4630  KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>());
4631  const auto *KmpTaskTWithPrivatesQTyRD =
4632  cast<RecordDecl>(KmpTaskTWithPrivatesQTy->getAsTagDecl());
4633  auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin());
4634  Base = CGF.EmitLValueForField(Base, *FI);
4635  for (const auto *Field :
4636  cast<RecordDecl>(FI->getType()->getAsTagDecl())->fields()) {
4637  if (QualType::DestructionKind DtorKind =
4638  Field->getType().isDestructedType()) {
4639  LValue FieldLValue = CGF.EmitLValueForField(Base, Field);
4640  CGF.pushDestroy(DtorKind, FieldLValue.getAddress(), Field->getType());
4641  }
4642  }
4643  CGF.FinishFunction();
4644  return DestructorFn;
4645 }
4646 
4647 /// Emit a privates mapping function for correct handling of private and
4648 /// firstprivate variables.
4649 /// \code
4650 /// void .omp_task_privates_map.(const .privates. *noalias privs, <ty1>