clang  10.0.0svn
CGOpenMPRuntime.cpp
Go to the documentation of this file.
1 //===----- CGOpenMPRuntime.cpp - Interface to OpenMP Runtimes -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This provides a class for OpenMP runtime code generation.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "CGCXXABI.h"
14 #include "CGCleanup.h"
15 #include "CGOpenMPRuntime.h"
16 #include "CGRecordLayout.h"
17 #include "CodeGenFunction.h"
19 #include "clang/AST/Decl.h"
20 #include "clang/AST/StmtOpenMP.h"
22 #include "llvm/ADT/ArrayRef.h"
23 #include "llvm/Bitcode/BitcodeReader.h"
24 #include "llvm/IR/DerivedTypes.h"
25 #include "llvm/IR/GlobalValue.h"
26 #include "llvm/IR/Value.h"
27 #include "llvm/Support/Format.h"
28 #include "llvm/Support/raw_ostream.h"
29 #include <cassert>
30 
31 using namespace clang;
32 using namespace CodeGen;
33 
34 namespace {
35 /// Base class for handling code generation inside OpenMP regions.
36 class CGOpenMPRegionInfo : public CodeGenFunction::CGCapturedStmtInfo {
37 public:
38  /// Kinds of OpenMP regions used in codegen.
39  enum CGOpenMPRegionKind {
40  /// Region with outlined function for standalone 'parallel'
41  /// directive.
42  ParallelOutlinedRegion,
43  /// Region with outlined function for standalone 'task' directive.
44  TaskOutlinedRegion,
45  /// Region for constructs that do not require function outlining,
46  /// like 'for', 'sections', 'atomic' etc. directives.
47  InlinedRegion,
48  /// Region with outlined function for standalone 'target' directive.
49  TargetRegion,
50  };
51 
52  CGOpenMPRegionInfo(const CapturedStmt &CS,
53  const CGOpenMPRegionKind RegionKind,
55  bool HasCancel)
56  : CGCapturedStmtInfo(CS, CR_OpenMP), RegionKind(RegionKind),
57  CodeGen(CodeGen), Kind(Kind), HasCancel(HasCancel) {}
58 
59  CGOpenMPRegionInfo(const CGOpenMPRegionKind RegionKind,
61  bool HasCancel)
62  : CGCapturedStmtInfo(CR_OpenMP), RegionKind(RegionKind), CodeGen(CodeGen),
63  Kind(Kind), HasCancel(HasCancel) {}
64 
65  /// Get a variable or parameter for storing global thread id
66  /// inside OpenMP construct.
67  virtual const VarDecl *getThreadIDVariable() const = 0;
68 
69  /// Emit the captured statement body.
70  void EmitBody(CodeGenFunction &CGF, const Stmt *S) override;
71 
72  /// Get an LValue for the current ThreadID variable.
73  /// \return LValue for thread id variable. This LValue always has type int32*.
74  virtual LValue getThreadIDVariableLValue(CodeGenFunction &CGF);
75 
76  virtual void emitUntiedSwitch(CodeGenFunction & /*CGF*/) {}
77 
78  CGOpenMPRegionKind getRegionKind() const { return RegionKind; }
79 
80  OpenMPDirectiveKind getDirectiveKind() const { return Kind; }
81 
82  bool hasCancel() const { return HasCancel; }
83 
84  static bool classof(const CGCapturedStmtInfo *Info) {
85  return Info->getKind() == CR_OpenMP;
86  }
87 
88  ~CGOpenMPRegionInfo() override = default;
89 
90 protected:
91  CGOpenMPRegionKind RegionKind;
92  RegionCodeGenTy CodeGen;
94  bool HasCancel;
95 };
96 
97 /// API for captured statement code generation in OpenMP constructs.
98 class CGOpenMPOutlinedRegionInfo final : public CGOpenMPRegionInfo {
99 public:
100  CGOpenMPOutlinedRegionInfo(const CapturedStmt &CS, const VarDecl *ThreadIDVar,
101  const RegionCodeGenTy &CodeGen,
102  OpenMPDirectiveKind Kind, bool HasCancel,
103  StringRef HelperName)
104  : CGOpenMPRegionInfo(CS, ParallelOutlinedRegion, CodeGen, Kind,
105  HasCancel),
106  ThreadIDVar(ThreadIDVar), HelperName(HelperName) {
107  assert(ThreadIDVar != nullptr && "No ThreadID in OpenMP region.");
108  }
109 
110  /// Get a variable or parameter for storing global thread id
111  /// inside OpenMP construct.
112  const VarDecl *getThreadIDVariable() const override { return ThreadIDVar; }
113 
114  /// Get the name of the capture helper.
115  StringRef getHelperName() const override { return HelperName; }
116 
117  static bool classof(const CGCapturedStmtInfo *Info) {
118  return CGOpenMPRegionInfo::classof(Info) &&
119  cast<CGOpenMPRegionInfo>(Info)->getRegionKind() ==
120  ParallelOutlinedRegion;
121  }
122 
123 private:
124  /// A variable or parameter storing global thread id for OpenMP
125  /// constructs.
126  const VarDecl *ThreadIDVar;
127  StringRef HelperName;
128 };
129 
130 /// API for captured statement code generation in OpenMP constructs.
131 class CGOpenMPTaskOutlinedRegionInfo final : public CGOpenMPRegionInfo {
132 public:
133  class UntiedTaskActionTy final : public PrePostActionTy {
134  bool Untied;
135  const VarDecl *PartIDVar;
136  const RegionCodeGenTy UntiedCodeGen;
137  llvm::SwitchInst *UntiedSwitch = nullptr;
138 
139  public:
140  UntiedTaskActionTy(bool Tied, const VarDecl *PartIDVar,
141  const RegionCodeGenTy &UntiedCodeGen)
142  : Untied(!Tied), PartIDVar(PartIDVar), UntiedCodeGen(UntiedCodeGen) {}
143  void Enter(CodeGenFunction &CGF) override {
144  if (Untied) {
145  // Emit task switching point.
146  LValue PartIdLVal = CGF.EmitLoadOfPointerLValue(
147  CGF.GetAddrOfLocalVar(PartIDVar),
148  PartIDVar->getType()->castAs<PointerType>());
149  llvm::Value *Res =
150  CGF.EmitLoadOfScalar(PartIdLVal, PartIDVar->getLocation());
151  llvm::BasicBlock *DoneBB = CGF.createBasicBlock(".untied.done.");
152  UntiedSwitch = CGF.Builder.CreateSwitch(Res, DoneBB);
153  CGF.EmitBlock(DoneBB);
155  CGF.EmitBlock(CGF.createBasicBlock(".untied.jmp."));
156  UntiedSwitch->addCase(CGF.Builder.getInt32(0),
157  CGF.Builder.GetInsertBlock());
158  emitUntiedSwitch(CGF);
159  }
160  }
161  void emitUntiedSwitch(CodeGenFunction &CGF) const {
162  if (Untied) {
163  LValue PartIdLVal = CGF.EmitLoadOfPointerLValue(
164  CGF.GetAddrOfLocalVar(PartIDVar),
165  PartIDVar->getType()->castAs<PointerType>());
166  CGF.EmitStoreOfScalar(CGF.Builder.getInt32(UntiedSwitch->getNumCases()),
167  PartIdLVal);
168  UntiedCodeGen(CGF);
169  CodeGenFunction::JumpDest CurPoint =
170  CGF.getJumpDestInCurrentScope(".untied.next.");
172  CGF.EmitBlock(CGF.createBasicBlock(".untied.jmp."));
173  UntiedSwitch->addCase(CGF.Builder.getInt32(UntiedSwitch->getNumCases()),
174  CGF.Builder.GetInsertBlock());
175  CGF.EmitBranchThroughCleanup(CurPoint);
176  CGF.EmitBlock(CurPoint.getBlock());
177  }
178  }
179  unsigned getNumberOfParts() const { return UntiedSwitch->getNumCases(); }
180  };
181  CGOpenMPTaskOutlinedRegionInfo(const CapturedStmt &CS,
182  const VarDecl *ThreadIDVar,
183  const RegionCodeGenTy &CodeGen,
184  OpenMPDirectiveKind Kind, bool HasCancel,
185  const UntiedTaskActionTy &Action)
186  : CGOpenMPRegionInfo(CS, TaskOutlinedRegion, CodeGen, Kind, HasCancel),
187  ThreadIDVar(ThreadIDVar), Action(Action) {
188  assert(ThreadIDVar != nullptr && "No ThreadID in OpenMP region.");
189  }
190 
191  /// Get a variable or parameter for storing global thread id
192  /// inside OpenMP construct.
193  const VarDecl *getThreadIDVariable() const override { return ThreadIDVar; }
194 
195  /// Get an LValue for the current ThreadID variable.
196  LValue getThreadIDVariableLValue(CodeGenFunction &CGF) override;
197 
198  /// Get the name of the capture helper.
199  StringRef getHelperName() const override { return ".omp_outlined."; }
200 
201  void emitUntiedSwitch(CodeGenFunction &CGF) override {
202  Action.emitUntiedSwitch(CGF);
203  }
204 
205  static bool classof(const CGCapturedStmtInfo *Info) {
206  return CGOpenMPRegionInfo::classof(Info) &&
207  cast<CGOpenMPRegionInfo>(Info)->getRegionKind() ==
208  TaskOutlinedRegion;
209  }
210 
211 private:
212  /// A variable or parameter storing global thread id for OpenMP
213  /// constructs.
214  const VarDecl *ThreadIDVar;
215  /// Action for emitting code for untied tasks.
216  const UntiedTaskActionTy &Action;
217 };
218 
219 /// API for inlined captured statement code generation in OpenMP
220 /// constructs.
221 class CGOpenMPInlinedRegionInfo : public CGOpenMPRegionInfo {
222 public:
223  CGOpenMPInlinedRegionInfo(CodeGenFunction::CGCapturedStmtInfo *OldCSI,
224  const RegionCodeGenTy &CodeGen,
225  OpenMPDirectiveKind Kind, bool HasCancel)
226  : CGOpenMPRegionInfo(InlinedRegion, CodeGen, Kind, HasCancel),
227  OldCSI(OldCSI),
228  OuterRegionInfo(dyn_cast_or_null<CGOpenMPRegionInfo>(OldCSI)) {}
229 
230  // Retrieve the value of the context parameter.
231  llvm::Value *getContextValue() const override {
232  if (OuterRegionInfo)
233  return OuterRegionInfo->getContextValue();
234  llvm_unreachable("No context value for inlined OpenMP region");
235  }
236 
237  void setContextValue(llvm::Value *V) override {
238  if (OuterRegionInfo) {
239  OuterRegionInfo->setContextValue(V);
240  return;
241  }
242  llvm_unreachable("No context value for inlined OpenMP region");
243  }
244 
245  /// Lookup the captured field decl for a variable.
246  const FieldDecl *lookup(const VarDecl *VD) const override {
247  if (OuterRegionInfo)
248  return OuterRegionInfo->lookup(VD);
249  // If there is no outer outlined region,no need to lookup in a list of
250  // captured variables, we can use the original one.
251  return nullptr;
252  }
253 
254  FieldDecl *getThisFieldDecl() const override {
255  if (OuterRegionInfo)
256  return OuterRegionInfo->getThisFieldDecl();
257  return nullptr;
258  }
259 
260  /// Get a variable or parameter for storing global thread id
261  /// inside OpenMP construct.
262  const VarDecl *getThreadIDVariable() const override {
263  if (OuterRegionInfo)
264  return OuterRegionInfo->getThreadIDVariable();
265  return nullptr;
266  }
267 
268  /// Get an LValue for the current ThreadID variable.
269  LValue getThreadIDVariableLValue(CodeGenFunction &CGF) override {
270  if (OuterRegionInfo)
271  return OuterRegionInfo->getThreadIDVariableLValue(CGF);
272  llvm_unreachable("No LValue for inlined OpenMP construct");
273  }
274 
275  /// Get the name of the capture helper.
276  StringRef getHelperName() const override {
277  if (auto *OuterRegionInfo = getOldCSI())
278  return OuterRegionInfo->getHelperName();
279  llvm_unreachable("No helper name for inlined OpenMP construct");
280  }
281 
282  void emitUntiedSwitch(CodeGenFunction &CGF) override {
283  if (OuterRegionInfo)
284  OuterRegionInfo->emitUntiedSwitch(CGF);
285  }
286 
287  CodeGenFunction::CGCapturedStmtInfo *getOldCSI() const { return OldCSI; }
288 
289  static bool classof(const CGCapturedStmtInfo *Info) {
290  return CGOpenMPRegionInfo::classof(Info) &&
291  cast<CGOpenMPRegionInfo>(Info)->getRegionKind() == InlinedRegion;
292  }
293 
294  ~CGOpenMPInlinedRegionInfo() override = default;
295 
296 private:
297  /// CodeGen info about outer OpenMP region.
299  CGOpenMPRegionInfo *OuterRegionInfo;
300 };
301 
302 /// API for captured statement code generation in OpenMP target
303 /// constructs. For this captures, implicit parameters are used instead of the
304 /// captured fields. The name of the target region has to be unique in a given
305 /// application so it is provided by the client, because only the client has
306 /// the information to generate that.
307 class CGOpenMPTargetRegionInfo final : public CGOpenMPRegionInfo {
308 public:
309  CGOpenMPTargetRegionInfo(const CapturedStmt &CS,
310  const RegionCodeGenTy &CodeGen, StringRef HelperName)
311  : CGOpenMPRegionInfo(CS, TargetRegion, CodeGen, OMPD_target,
312  /*HasCancel=*/false),
313  HelperName(HelperName) {}
314 
315  /// This is unused for target regions because each starts executing
316  /// with a single thread.
317  const VarDecl *getThreadIDVariable() const override { return nullptr; }
318 
319  /// Get the name of the capture helper.
320  StringRef getHelperName() const override { return HelperName; }
321 
322  static bool classof(const CGCapturedStmtInfo *Info) {
323  return CGOpenMPRegionInfo::classof(Info) &&
324  cast<CGOpenMPRegionInfo>(Info)->getRegionKind() == TargetRegion;
325  }
326 
327 private:
328  StringRef HelperName;
329 };
330 
331 static void EmptyCodeGen(CodeGenFunction &, PrePostActionTy &) {
332  llvm_unreachable("No codegen for expressions");
333 }
334 /// API for generation of expressions captured in a innermost OpenMP
335 /// region.
336 class CGOpenMPInnerExprInfo final : public CGOpenMPInlinedRegionInfo {
337 public:
338  CGOpenMPInnerExprInfo(CodeGenFunction &CGF, const CapturedStmt &CS)
339  : CGOpenMPInlinedRegionInfo(CGF.CapturedStmtInfo, EmptyCodeGen,
340  OMPD_unknown,
341  /*HasCancel=*/false),
342  PrivScope(CGF) {
343  // Make sure the globals captured in the provided statement are local by
344  // using the privatization logic. We assume the same variable is not
345  // captured more than once.
346  for (const auto &C : CS.captures()) {
347  if (!C.capturesVariable() && !C.capturesVariableByCopy())
348  continue;
349 
350  const VarDecl *VD = C.getCapturedVar();
351  if (VD->isLocalVarDeclOrParm())
352  continue;
353 
354  DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(VD),
355  /*RefersToEnclosingVariableOrCapture=*/false,
357  C.getLocation());
358  PrivScope.addPrivate(
359  VD, [&CGF, &DRE]() { return CGF.EmitLValue(&DRE).getAddress(); });
360  }
361  (void)PrivScope.Privatize();
362  }
363 
364  /// Lookup the captured field decl for a variable.
365  const FieldDecl *lookup(const VarDecl *VD) const override {
366  if (const FieldDecl *FD = CGOpenMPInlinedRegionInfo::lookup(VD))
367  return FD;
368  return nullptr;
369  }
370 
371  /// Emit the captured statement body.
372  void EmitBody(CodeGenFunction &CGF, const Stmt *S) override {
373  llvm_unreachable("No body for expressions");
374  }
375 
376  /// Get a variable or parameter for storing global thread id
377  /// inside OpenMP construct.
378  const VarDecl *getThreadIDVariable() const override {
379  llvm_unreachable("No thread id for expressions");
380  }
381 
382  /// Get the name of the capture helper.
383  StringRef getHelperName() const override {
384  llvm_unreachable("No helper name for expressions");
385  }
386 
387  static bool classof(const CGCapturedStmtInfo *Info) { return false; }
388 
389 private:
390  /// Private scope to capture global variables.
392 };
393 
394 /// RAII for emitting code of OpenMP constructs.
395 class InlinedOpenMPRegionRAII {
396  CodeGenFunction &CGF;
397  llvm::DenseMap<const VarDecl *, FieldDecl *> LambdaCaptureFields;
398  FieldDecl *LambdaThisCaptureField = nullptr;
399  const CodeGen::CGBlockInfo *BlockInfo = nullptr;
400 
401 public:
402  /// Constructs region for combined constructs.
403  /// \param CodeGen Code generation sequence for combined directives. Includes
404  /// a list of functions used for code generation of implicitly inlined
405  /// regions.
406  InlinedOpenMPRegionRAII(CodeGenFunction &CGF, const RegionCodeGenTy &CodeGen,
407  OpenMPDirectiveKind Kind, bool HasCancel)
408  : CGF(CGF) {
409  // Start emission for the construct.
410  CGF.CapturedStmtInfo = new CGOpenMPInlinedRegionInfo(
411  CGF.CapturedStmtInfo, CodeGen, Kind, HasCancel);
412  std::swap(CGF.LambdaCaptureFields, LambdaCaptureFields);
413  LambdaThisCaptureField = CGF.LambdaThisCaptureField;
414  CGF.LambdaThisCaptureField = nullptr;
415  BlockInfo = CGF.BlockInfo;
416  CGF.BlockInfo = nullptr;
417  }
418 
419  ~InlinedOpenMPRegionRAII() {
420  // Restore original CapturedStmtInfo only if we're done with code emission.
421  auto *OldCSI =
422  cast<CGOpenMPInlinedRegionInfo>(CGF.CapturedStmtInfo)->getOldCSI();
423  delete CGF.CapturedStmtInfo;
424  CGF.CapturedStmtInfo = OldCSI;
425  std::swap(CGF.LambdaCaptureFields, LambdaCaptureFields);
426  CGF.LambdaThisCaptureField = LambdaThisCaptureField;
427  CGF.BlockInfo = BlockInfo;
428  }
429 };
430 
431 /// Values for bit flags used in the ident_t to describe the fields.
432 /// All enumeric elements are named and described in accordance with the code
433 /// from https://github.com/llvm/llvm-project/blob/master/openmp/runtime/src/kmp.h
434 enum OpenMPLocationFlags : unsigned {
435  /// Use trampoline for internal microtask.
436  OMP_IDENT_IMD = 0x01,
437  /// Use c-style ident structure.
438  OMP_IDENT_KMPC = 0x02,
439  /// Atomic reduction option for kmpc_reduce.
440  OMP_ATOMIC_REDUCE = 0x10,
441  /// Explicit 'barrier' directive.
442  OMP_IDENT_BARRIER_EXPL = 0x20,
443  /// Implicit barrier in code.
444  OMP_IDENT_BARRIER_IMPL = 0x40,
445  /// Implicit barrier in 'for' directive.
446  OMP_IDENT_BARRIER_IMPL_FOR = 0x40,
447  /// Implicit barrier in 'sections' directive.
448  OMP_IDENT_BARRIER_IMPL_SECTIONS = 0xC0,
449  /// Implicit barrier in 'single' directive.
450  OMP_IDENT_BARRIER_IMPL_SINGLE = 0x140,
451  /// Call of __kmp_for_static_init for static loop.
452  OMP_IDENT_WORK_LOOP = 0x200,
453  /// Call of __kmp_for_static_init for sections.
454  OMP_IDENT_WORK_SECTIONS = 0x400,
455  /// Call of __kmp_for_static_init for distribute.
456  OMP_IDENT_WORK_DISTRIBUTE = 0x800,
457  LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/OMP_IDENT_WORK_DISTRIBUTE)
458 };
459 
460 namespace {
462 /// Values for bit flags for marking which requires clauses have been used.
464  /// flag undefined.
465  OMP_REQ_UNDEFINED = 0x000,
466  /// no requires clause present.
467  OMP_REQ_NONE = 0x001,
468  /// reverse_offload clause.
469  OMP_REQ_REVERSE_OFFLOAD = 0x002,
470  /// unified_address clause.
471  OMP_REQ_UNIFIED_ADDRESS = 0x004,
472  /// unified_shared_memory clause.
473  OMP_REQ_UNIFIED_SHARED_MEMORY = 0x008,
474  /// dynamic_allocators clause.
475  OMP_REQ_DYNAMIC_ALLOCATORS = 0x010,
476  LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/OMP_REQ_DYNAMIC_ALLOCATORS)
477 };
478 
480  /// Device ID if the device was not defined, runtime should get it
481  /// from environment variables in the spec.
482  OMP_DEVICEID_UNDEF = -1,
483 };
484 } // anonymous namespace
485 
486 /// Describes ident structure that describes a source location.
487 /// All descriptions are taken from
488 /// https://github.com/llvm/llvm-project/blob/master/openmp/runtime/src/kmp.h
489 /// Original structure:
490 /// typedef struct ident {
491 /// kmp_int32 reserved_1; /**< might be used in Fortran;
492 /// see above */
493 /// kmp_int32 flags; /**< also f.flags; KMP_IDENT_xxx flags;
494 /// KMP_IDENT_KMPC identifies this union
495 /// member */
496 /// kmp_int32 reserved_2; /**< not really used in Fortran any more;
497 /// see above */
498 ///#if USE_ITT_BUILD
499 /// /* but currently used for storing
500 /// region-specific ITT */
501 /// /* contextual information. */
502 ///#endif /* USE_ITT_BUILD */
503 /// kmp_int32 reserved_3; /**< source[4] in Fortran, do not use for
504 /// C++ */
505 /// char const *psource; /**< String describing the source location.
506 /// The string is composed of semi-colon separated
507 // fields which describe the source file,
508 /// the function and a pair of line numbers that
509 /// delimit the construct.
510 /// */
511 /// } ident_t;
513  /// might be used in Fortran
515  /// OMP_IDENT_xxx flags; OMP_IDENT_KMPC identifies this union member.
517  /// Not really used in Fortran any more
519  /// Source[4] in Fortran, do not use for C++
521  /// String describing the source location. The string is composed of
522  /// semi-colon separated fields which describe the source file, the function
523  /// and a pair of line numbers that delimit the construct.
525 };
526 
527 /// Schedule types for 'omp for' loops (these enumerators are taken from
528 /// the enum sched_type in kmp.h).
530  /// Lower bound for default (unordered) versions.
538  /// static with chunk adjustment (e.g., simd)
540  /// Lower bound for 'ordered' versions.
549  /// dist_schedule types
552  /// Support for OpenMP 4.5 monotonic and nonmonotonic schedule modifiers.
553  /// Set if the monotonic schedule modifier was present.
555  /// Set if the nonmonotonic schedule modifier was present.
557 };
558 
560  /// Call to void __kmpc_fork_call(ident_t *loc, kmp_int32 argc,
561  /// kmpc_micro microtask, ...);
563  /// Call to void *__kmpc_threadprivate_cached(ident_t *loc,
564  /// kmp_int32 global_tid, void *data, size_t size, void ***cache);
566  /// Call to void __kmpc_threadprivate_register( ident_t *,
567  /// void *data, kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor);
569  // Call to __kmpc_int32 kmpc_global_thread_num(ident_t *loc);
571  // Call to void __kmpc_critical(ident_t *loc, kmp_int32 global_tid,
572  // kmp_critical_name *crit);
574  // Call to void __kmpc_critical_with_hint(ident_t *loc, kmp_int32
575  // global_tid, kmp_critical_name *crit, uintptr_t hint);
577  // Call to void __kmpc_end_critical(ident_t *loc, kmp_int32 global_tid,
578  // kmp_critical_name *crit);
580  // Call to kmp_int32 __kmpc_cancel_barrier(ident_t *loc, kmp_int32
581  // global_tid);
583  // Call to void __kmpc_barrier(ident_t *loc, kmp_int32 global_tid);
585  // Call to void __kmpc_for_static_fini(ident_t *loc, kmp_int32 global_tid);
587  // Call to void __kmpc_serialized_parallel(ident_t *loc, kmp_int32
588  // global_tid);
590  // Call to void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32
591  // global_tid);
593  // Call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid,
594  // kmp_int32 num_threads);
596  // Call to void __kmpc_flush(ident_t *loc);
598  // Call to kmp_int32 __kmpc_master(ident_t *, kmp_int32 global_tid);
600  // Call to void __kmpc_end_master(ident_t *, kmp_int32 global_tid);
602  // Call to kmp_int32 __kmpc_omp_taskyield(ident_t *, kmp_int32 global_tid,
603  // int end_part);
605  // Call to kmp_int32 __kmpc_single(ident_t *, kmp_int32 global_tid);
607  // Call to void __kmpc_end_single(ident_t *, kmp_int32 global_tid);
609  // Call to kmp_task_t * __kmpc_omp_task_alloc(ident_t *, kmp_int32 gtid,
610  // kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
611  // kmp_routine_entry_t *task_entry);
613  // Call to kmp_task_t * __kmpc_omp_target_task_alloc(ident_t *,
614  // kmp_int32 gtid, kmp_int32 flags, size_t sizeof_kmp_task_t,
615  // size_t sizeof_shareds, kmp_routine_entry_t *task_entry,
616  // kmp_int64 device_id);
618  // Call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t *
619  // new_task);
621  // Call to void __kmpc_copyprivate(ident_t *loc, kmp_int32 global_tid,
622  // size_t cpy_size, void *cpy_data, void(*cpy_func)(void *, void *),
623  // kmp_int32 didit);
625  // Call to kmp_int32 __kmpc_reduce(ident_t *loc, kmp_int32 global_tid,
626  // kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void
627  // (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck);
629  // Call to kmp_int32 __kmpc_reduce_nowait(ident_t *loc, kmp_int32
630  // global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data,
631  // void (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name
632  // *lck);
634  // Call to void __kmpc_end_reduce(ident_t *loc, kmp_int32 global_tid,
635  // kmp_critical_name *lck);
637  // Call to void __kmpc_end_reduce_nowait(ident_t *loc, kmp_int32 global_tid,
638  // kmp_critical_name *lck);
640  // Call to void __kmpc_omp_task_begin_if0(ident_t *, kmp_int32 gtid,
641  // kmp_task_t * new_task);
643  // Call to void __kmpc_omp_task_complete_if0(ident_t *, kmp_int32 gtid,
644  // kmp_task_t * new_task);
646  // Call to void __kmpc_ordered(ident_t *loc, kmp_int32 global_tid);
648  // Call to void __kmpc_end_ordered(ident_t *loc, kmp_int32 global_tid);
650  // Call to kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32
651  // global_tid);
653  // Call to void __kmpc_taskgroup(ident_t *loc, kmp_int32 global_tid);
655  // Call to void __kmpc_end_taskgroup(ident_t *loc, kmp_int32 global_tid);
657  // Call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 global_tid,
658  // int proc_bind);
660  // Call to kmp_int32 __kmpc_omp_task_with_deps(ident_t *loc_ref, kmp_int32
661  // gtid, kmp_task_t * new_task, kmp_int32 ndeps, kmp_depend_info_t
662  // *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list);
664  // Call to void __kmpc_omp_wait_deps(ident_t *loc_ref, kmp_int32
665  // gtid, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32
666  // ndeps_noalias, kmp_depend_info_t *noalias_dep_list);
668  // Call to kmp_int32 __kmpc_cancellationpoint(ident_t *loc, kmp_int32
669  // global_tid, kmp_int32 cncl_kind);
671  // Call to kmp_int32 __kmpc_cancel(ident_t *loc, kmp_int32 global_tid,
672  // kmp_int32 cncl_kind);
674  // Call to void __kmpc_push_num_teams(ident_t *loc, kmp_int32 global_tid,
675  // kmp_int32 num_teams, kmp_int32 thread_limit);
677  // Call to void __kmpc_fork_teams(ident_t *loc, kmp_int32 argc, kmpc_micro
678  // microtask, ...);
680  // Call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t *task, int
681  // if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int nogroup, int
682  // sched, kmp_uint64 grainsize, void *task_dup);
684  // Call to void __kmpc_doacross_init(ident_t *loc, kmp_int32 gtid, kmp_int32
685  // num_dims, struct kmp_dim *dims);
687  // Call to void __kmpc_doacross_fini(ident_t *loc, kmp_int32 gtid);
689  // Call to void __kmpc_doacross_post(ident_t *loc, kmp_int32 gtid, kmp_int64
690  // *vec);
692  // Call to void __kmpc_doacross_wait(ident_t *loc, kmp_int32 gtid, kmp_int64
693  // *vec);
695  // Call to void *__kmpc_task_reduction_init(int gtid, int num_data, void
696  // *data);
698  // Call to void *__kmpc_task_reduction_get_th_data(int gtid, void *tg, void
699  // *d);
701  // Call to void *__kmpc_alloc(int gtid, size_t sz, omp_allocator_handle_t al);
703  // Call to void __kmpc_free(int gtid, void *ptr, omp_allocator_handle_t al);
705 
706  //
707  // Offloading related calls
708  //
709  // Call to void __kmpc_push_target_tripcount(int64_t device_id, kmp_uint64
710  // size);
712  // Call to int32_t __tgt_target(int64_t device_id, void *host_ptr, int32_t
713  // arg_num, void** args_base, void **args, int64_t *arg_sizes, int64_t
714  // *arg_types);
716  // Call to int32_t __tgt_target_nowait(int64_t device_id, void *host_ptr,
717  // int32_t arg_num, void** args_base, void **args, int64_t *arg_sizes, int64_t
718  // *arg_types);
720  // Call to int32_t __tgt_target_teams(int64_t device_id, void *host_ptr,
721  // int32_t arg_num, void** args_base, void **args, int64_t *arg_sizes, int64_t
722  // *arg_types, int32_t num_teams, int32_t thread_limit);
724  // Call to int32_t __tgt_target_teams_nowait(int64_t device_id, void
725  // *host_ptr, int32_t arg_num, void** args_base, void **args, int64_t
726  // *arg_sizes, int64_t *arg_types, int32_t num_teams, int32_t thread_limit);
728  // Call to void __tgt_register_requires(int64_t flags);
730  // Call to void __tgt_register_lib(__tgt_bin_desc *desc);
732  // Call to void __tgt_unregister_lib(__tgt_bin_desc *desc);
734  // Call to void __tgt_target_data_begin(int64_t device_id, int32_t arg_num,
735  // void** args_base, void **args, int64_t *arg_sizes, int64_t *arg_types);
737  // Call to void __tgt_target_data_begin_nowait(int64_t device_id, int32_t
738  // arg_num, void** args_base, void **args, int64_t *arg_sizes, int64_t
739  // *arg_types);
741  // Call to void __tgt_target_data_end(int64_t device_id, int32_t arg_num,
742  // void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types);
744  // Call to void __tgt_target_data_end_nowait(int64_t device_id, int32_t
745  // arg_num, void** args_base, void **args, int64_t *arg_sizes, int64_t
746  // *arg_types);
748  // Call to void __tgt_target_data_update(int64_t device_id, int32_t arg_num,
749  // void** args_base, void **args, int64_t *arg_sizes, int64_t *arg_types);
751  // Call to void __tgt_target_data_update_nowait(int64_t device_id, int32_t
752  // arg_num, void** args_base, void **args, int64_t *arg_sizes, int64_t
753  // *arg_types);
755  // Call to int64_t __tgt_mapper_num_components(void *rt_mapper_handle);
757  // Call to void __tgt_push_mapper_component(void *rt_mapper_handle, void
758  // *base, void *begin, int64_t size, int64_t type);
760 };
761 
762 /// A basic class for pre|post-action for advanced codegen sequence for OpenMP
763 /// region.
764 class CleanupTy final : public EHScopeStack::Cleanup {
765  PrePostActionTy *Action;
766 
767 public:
768  explicit CleanupTy(PrePostActionTy *Action) : Action(Action) {}
769  void Emit(CodeGenFunction &CGF, Flags /*flags*/) override {
770  if (!CGF.HaveInsertPoint())
771  return;
772  Action->Exit(CGF);
773  }
774 };
775 
776 } // anonymous namespace
777 
780  if (PrePostAction) {
781  CGF.EHStack.pushCleanup<CleanupTy>(NormalAndEHCleanup, PrePostAction);
782  Callback(CodeGen, CGF, *PrePostAction);
783  } else {
784  PrePostActionTy Action;
785  Callback(CodeGen, CGF, Action);
786  }
787 }
788 
789 /// Check if the combiner is a call to UDR combiner and if it is so return the
790 /// UDR decl used for reduction.
791 static const OMPDeclareReductionDecl *
792 getReductionInit(const Expr *ReductionOp) {
793  if (const auto *CE = dyn_cast<CallExpr>(ReductionOp))
794  if (const auto *OVE = dyn_cast<OpaqueValueExpr>(CE->getCallee()))
795  if (const auto *DRE =
796  dyn_cast<DeclRefExpr>(OVE->getSourceExpr()->IgnoreImpCasts()))
797  if (const auto *DRD = dyn_cast<OMPDeclareReductionDecl>(DRE->getDecl()))
798  return DRD;
799  return nullptr;
800 }
801 
803  const OMPDeclareReductionDecl *DRD,
804  const Expr *InitOp,
805  Address Private, Address Original,
806  QualType Ty) {
807  if (DRD->getInitializer()) {
808  std::pair<llvm::Function *, llvm::Function *> Reduction =
809  CGF.CGM.getOpenMPRuntime().getUserDefinedReduction(DRD);
810  const auto *CE = cast<CallExpr>(InitOp);
811  const auto *OVE = cast<OpaqueValueExpr>(CE->getCallee());
812  const Expr *LHS = CE->getArg(/*Arg=*/0)->IgnoreParenImpCasts();
813  const Expr *RHS = CE->getArg(/*Arg=*/1)->IgnoreParenImpCasts();
814  const auto *LHSDRE =
815  cast<DeclRefExpr>(cast<UnaryOperator>(LHS)->getSubExpr());
816  const auto *RHSDRE =
817  cast<DeclRefExpr>(cast<UnaryOperator>(RHS)->getSubExpr());
818  CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
819  PrivateScope.addPrivate(cast<VarDecl>(LHSDRE->getDecl()),
820  [=]() { return Private; });
821  PrivateScope.addPrivate(cast<VarDecl>(RHSDRE->getDecl()),
822  [=]() { return Original; });
823  (void)PrivateScope.Privatize();
824  RValue Func = RValue::get(Reduction.second);
825  CodeGenFunction::OpaqueValueMapping Map(CGF, OVE, Func);
826  CGF.EmitIgnoredExpr(InitOp);
827  } else {
828  llvm::Constant *Init = CGF.CGM.EmitNullConstant(Ty);
829  std::string Name = CGF.CGM.getOpenMPRuntime().getName({"init"});
830  auto *GV = new llvm::GlobalVariable(
831  CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true,
832  llvm::GlobalValue::PrivateLinkage, Init, Name);
833  LValue LV = CGF.MakeNaturalAlignAddrLValue(GV, Ty);
834  RValue InitRVal;
835  switch (CGF.getEvaluationKind(Ty)) {
836  case TEK_Scalar:
837  InitRVal = CGF.EmitLoadOfLValue(LV, DRD->getLocation());
838  break;
839  case TEK_Complex:
840  InitRVal =
842  break;
843  case TEK_Aggregate:
844  InitRVal = RValue::getAggregate(LV.getAddress());
845  break;
846  }
847  OpaqueValueExpr OVE(DRD->getLocation(), Ty, VK_RValue);
848  CodeGenFunction::OpaqueValueMapping OpaqueMap(CGF, &OVE, InitRVal);
849  CGF.EmitAnyExprToMem(&OVE, Private, Ty.getQualifiers(),
850  /*IsInitializer=*/false);
851  }
852 }
853 
854 /// Emit initialization of arrays of complex types.
855 /// \param DestAddr Address of the array.
856 /// \param Type Type of array.
857 /// \param Init Initial expression of array.
858 /// \param SrcAddr Address of the original array.
859 static void EmitOMPAggregateInit(CodeGenFunction &CGF, Address DestAddr,
860  QualType Type, bool EmitDeclareReductionInit,
861  const Expr *Init,
862  const OMPDeclareReductionDecl *DRD,
863  Address SrcAddr = Address::invalid()) {
864  // Perform element-by-element initialization.
865  QualType ElementTy;
866 
867  // Drill down to the base element type on both arrays.
868  const ArrayType *ArrayTy = Type->getAsArrayTypeUnsafe();
869  llvm::Value *NumElements = CGF.emitArrayLength(ArrayTy, ElementTy, DestAddr);
870  DestAddr =
871  CGF.Builder.CreateElementBitCast(DestAddr, DestAddr.getElementType());
872  if (DRD)
873  SrcAddr =
874  CGF.Builder.CreateElementBitCast(SrcAddr, DestAddr.getElementType());
875 
876  llvm::Value *SrcBegin = nullptr;
877  if (DRD)
878  SrcBegin = SrcAddr.getPointer();
879  llvm::Value *DestBegin = DestAddr.getPointer();
880  // Cast from pointer to array type to pointer to single element.
881  llvm::Value *DestEnd = CGF.Builder.CreateGEP(DestBegin, NumElements);
882  // The basic structure here is a while-do loop.
883  llvm::BasicBlock *BodyBB = CGF.createBasicBlock("omp.arrayinit.body");
884  llvm::BasicBlock *DoneBB = CGF.createBasicBlock("omp.arrayinit.done");
885  llvm::Value *IsEmpty =
886  CGF.Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arrayinit.isempty");
887  CGF.Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
888 
889  // Enter the loop body, making that address the current address.
890  llvm::BasicBlock *EntryBB = CGF.Builder.GetInsertBlock();
891  CGF.EmitBlock(BodyBB);
892 
893  CharUnits ElementSize = CGF.getContext().getTypeSizeInChars(ElementTy);
894 
895  llvm::PHINode *SrcElementPHI = nullptr;
896  Address SrcElementCurrent = Address::invalid();
897  if (DRD) {
898  SrcElementPHI = CGF.Builder.CreatePHI(SrcBegin->getType(), 2,
899  "omp.arraycpy.srcElementPast");
900  SrcElementPHI->addIncoming(SrcBegin, EntryBB);
901  SrcElementCurrent =
902  Address(SrcElementPHI,
903  SrcAddr.getAlignment().alignmentOfArrayElement(ElementSize));
904  }
905  llvm::PHINode *DestElementPHI = CGF.Builder.CreatePHI(
906  DestBegin->getType(), 2, "omp.arraycpy.destElementPast");
907  DestElementPHI->addIncoming(DestBegin, EntryBB);
908  Address DestElementCurrent =
909  Address(DestElementPHI,
910  DestAddr.getAlignment().alignmentOfArrayElement(ElementSize));
911 
912  // Emit copy.
913  {
914  CodeGenFunction::RunCleanupsScope InitScope(CGF);
915  if (EmitDeclareReductionInit) {
916  emitInitWithReductionInitializer(CGF, DRD, Init, DestElementCurrent,
917  SrcElementCurrent, ElementTy);
918  } else
919  CGF.EmitAnyExprToMem(Init, DestElementCurrent, ElementTy.getQualifiers(),
920  /*IsInitializer=*/false);
921  }
922 
923  if (DRD) {
924  // Shift the address forward by one element.
925  llvm::Value *SrcElementNext = CGF.Builder.CreateConstGEP1_32(
926  SrcElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
927  SrcElementPHI->addIncoming(SrcElementNext, CGF.Builder.GetInsertBlock());
928  }
929 
930  // Shift the address forward by one element.
931  llvm::Value *DestElementNext = CGF.Builder.CreateConstGEP1_32(
932  DestElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
933  // Check whether we've reached the end.
934  llvm::Value *Done =
935  CGF.Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done");
936  CGF.Builder.CreateCondBr(Done, DoneBB, BodyBB);
937  DestElementPHI->addIncoming(DestElementNext, CGF.Builder.GetInsertBlock());
938 
939  // Done.
940  CGF.EmitBlock(DoneBB, /*IsFinished=*/true);
941 }
942 
943 LValue ReductionCodeGen::emitSharedLValue(CodeGenFunction &CGF, const Expr *E) {
944  return CGF.EmitOMPSharedLValue(E);
945 }
946 
947 LValue ReductionCodeGen::emitSharedLValueUB(CodeGenFunction &CGF,
948  const Expr *E) {
949  if (const auto *OASE = dyn_cast<OMPArraySectionExpr>(E))
950  return CGF.EmitOMPArraySectionExpr(OASE, /*IsLowerBound=*/false);
951  return LValue();
952 }
953 
954 void ReductionCodeGen::emitAggregateInitialization(
955  CodeGenFunction &CGF, unsigned N, Address PrivateAddr, LValue SharedLVal,
956  const OMPDeclareReductionDecl *DRD) {
957  // Emit VarDecl with copy init for arrays.
958  // Get the address of the original variable captured in current
959  // captured region.
960  const auto *PrivateVD =
961  cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
962  bool EmitDeclareReductionInit =
963  DRD && (DRD->getInitializer() || !PrivateVD->hasInit());
964  EmitOMPAggregateInit(CGF, PrivateAddr, PrivateVD->getType(),
965  EmitDeclareReductionInit,
966  EmitDeclareReductionInit ? ClausesData[N].ReductionOp
967  : PrivateVD->getInit(),
968  DRD, SharedLVal.getAddress());
969 }
970 
973  ArrayRef<const Expr *> ReductionOps) {
974  ClausesData.reserve(Shareds.size());
975  SharedAddresses.reserve(Shareds.size());
976  Sizes.reserve(Shareds.size());
977  BaseDecls.reserve(Shareds.size());
978  auto IPriv = Privates.begin();
979  auto IRed = ReductionOps.begin();
980  for (const Expr *Ref : Shareds) {
981  ClausesData.emplace_back(Ref, *IPriv, *IRed);
982  std::advance(IPriv, 1);
983  std::advance(IRed, 1);
984  }
985 }
986 
987 void ReductionCodeGen::emitSharedLValue(CodeGenFunction &CGF, unsigned N) {
988  assert(SharedAddresses.size() == N &&
989  "Number of generated lvalues must be exactly N.");
990  LValue First = emitSharedLValue(CGF, ClausesData[N].Ref);
991  LValue Second = emitSharedLValueUB(CGF, ClausesData[N].Ref);
992  SharedAddresses.emplace_back(First, Second);
993 }
994 
996  const auto *PrivateVD =
997  cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
998  QualType PrivateType = PrivateVD->getType();
999  bool AsArraySection = isa<OMPArraySectionExpr>(ClausesData[N].Ref);
1000  if (!PrivateType->isVariablyModifiedType()) {
1001  Sizes.emplace_back(
1002  CGF.getTypeSize(
1003  SharedAddresses[N].first.getType().getNonReferenceType()),
1004  nullptr);
1005  return;
1006  }
1007  llvm::Value *Size;
1008  llvm::Value *SizeInChars;
1009  auto *ElemType =
1010  cast<llvm::PointerType>(SharedAddresses[N].first.getPointer()->getType())
1011  ->getElementType();
1012  auto *ElemSizeOf = llvm::ConstantExpr::getSizeOf(ElemType);
1013  if (AsArraySection) {
1014  Size = CGF.Builder.CreatePtrDiff(SharedAddresses[N].second.getPointer(),
1015  SharedAddresses[N].first.getPointer());
1016  Size = CGF.Builder.CreateNUWAdd(
1017  Size, llvm::ConstantInt::get(Size->getType(), /*V=*/1));
1018  SizeInChars = CGF.Builder.CreateNUWMul(Size, ElemSizeOf);
1019  } else {
1020  SizeInChars = CGF.getTypeSize(
1021  SharedAddresses[N].first.getType().getNonReferenceType());
1022  Size = CGF.Builder.CreateExactUDiv(SizeInChars, ElemSizeOf);
1023  }
1024  Sizes.emplace_back(SizeInChars, Size);
1026  CGF,
1027  cast<OpaqueValueExpr>(
1028  CGF.getContext().getAsVariableArrayType(PrivateType)->getSizeExpr()),
1029  RValue::get(Size));
1030  CGF.EmitVariablyModifiedType(PrivateType);
1031 }
1032 
1034  llvm::Value *Size) {
1035  const auto *PrivateVD =
1036  cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
1037  QualType PrivateType = PrivateVD->getType();
1038  if (!PrivateType->isVariablyModifiedType()) {
1039  assert(!Size && !Sizes[N].second &&
1040  "Size should be nullptr for non-variably modified reduction "
1041  "items.");
1042  return;
1043  }
1045  CGF,
1046  cast<OpaqueValueExpr>(
1047  CGF.getContext().getAsVariableArrayType(PrivateType)->getSizeExpr()),
1048  RValue::get(Size));
1049  CGF.EmitVariablyModifiedType(PrivateType);
1050 }
1051 
1053  CodeGenFunction &CGF, unsigned N, Address PrivateAddr, LValue SharedLVal,
1054  llvm::function_ref<bool(CodeGenFunction &)> DefaultInit) {
1055  assert(SharedAddresses.size() > N && "No variable was generated");
1056  const auto *PrivateVD =
1057  cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
1058  const OMPDeclareReductionDecl *DRD =
1059  getReductionInit(ClausesData[N].ReductionOp);
1060  QualType PrivateType = PrivateVD->getType();
1061  PrivateAddr = CGF.Builder.CreateElementBitCast(
1062  PrivateAddr, CGF.ConvertTypeForMem(PrivateType));
1063  QualType SharedType = SharedAddresses[N].first.getType();
1064  SharedLVal = CGF.MakeAddrLValue(
1065  CGF.Builder.CreateElementBitCast(SharedLVal.getAddress(),
1066  CGF.ConvertTypeForMem(SharedType)),
1067  SharedType, SharedAddresses[N].first.getBaseInfo(),
1068  CGF.CGM.getTBAAInfoForSubobject(SharedAddresses[N].first, SharedType));
1069  if (CGF.getContext().getAsArrayType(PrivateVD->getType())) {
1070  emitAggregateInitialization(CGF, N, PrivateAddr, SharedLVal, DRD);
1071  } else if (DRD && (DRD->getInitializer() || !PrivateVD->hasInit())) {
1072  emitInitWithReductionInitializer(CGF, DRD, ClausesData[N].ReductionOp,
1073  PrivateAddr, SharedLVal.getAddress(),
1074  SharedLVal.getType());
1075  } else if (!DefaultInit(CGF) && PrivateVD->hasInit() &&
1076  !CGF.isTrivialInitializer(PrivateVD->getInit())) {
1077  CGF.EmitAnyExprToMem(PrivateVD->getInit(), PrivateAddr,
1078  PrivateVD->getType().getQualifiers(),
1079  /*IsInitializer=*/false);
1080  }
1081 }
1082 
1084  const auto *PrivateVD =
1085  cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
1086  QualType PrivateType = PrivateVD->getType();
1087  QualType::DestructionKind DTorKind = PrivateType.isDestructedType();
1088  return DTorKind != QualType::DK_none;
1089 }
1090 
1092  Address PrivateAddr) {
1093  const auto *PrivateVD =
1094  cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
1095  QualType PrivateType = PrivateVD->getType();
1096  QualType::DestructionKind DTorKind = PrivateType.isDestructedType();
1097  if (needCleanups(N)) {
1098  PrivateAddr = CGF.Builder.CreateElementBitCast(
1099  PrivateAddr, CGF.ConvertTypeForMem(PrivateType));
1100  CGF.pushDestroy(DTorKind, PrivateAddr, PrivateType);
1101  }
1102 }
1103 
1105  LValue BaseLV) {
1106  BaseTy = BaseTy.getNonReferenceType();
1107  while ((BaseTy->isPointerType() || BaseTy->isReferenceType()) &&
1108  !CGF.getContext().hasSameType(BaseTy, ElTy)) {
1109  if (const auto *PtrTy = BaseTy->getAs<PointerType>()) {
1110  BaseLV = CGF.EmitLoadOfPointerLValue(BaseLV.getAddress(), PtrTy);
1111  } else {
1112  LValue RefLVal = CGF.MakeAddrLValue(BaseLV.getAddress(), BaseTy);
1113  BaseLV = CGF.EmitLoadOfReferenceLValue(RefLVal);
1114  }
1115  BaseTy = BaseTy->getPointeeType();
1116  }
1117  return CGF.MakeAddrLValue(
1119  CGF.ConvertTypeForMem(ElTy)),
1120  BaseLV.getType(), BaseLV.getBaseInfo(),
1121  CGF.CGM.getTBAAInfoForSubobject(BaseLV, BaseLV.getType()));
1122 }
1123 
1125  llvm::Type *BaseLVType, CharUnits BaseLVAlignment,
1126  llvm::Value *Addr) {
1127  Address Tmp = Address::invalid();
1128  Address TopTmp = Address::invalid();
1129  Address MostTopTmp = Address::invalid();
1130  BaseTy = BaseTy.getNonReferenceType();
1131  while ((BaseTy->isPointerType() || BaseTy->isReferenceType()) &&
1132  !CGF.getContext().hasSameType(BaseTy, ElTy)) {
1133  Tmp = CGF.CreateMemTemp(BaseTy);
1134  if (TopTmp.isValid())
1135  CGF.Builder.CreateStore(Tmp.getPointer(), TopTmp);
1136  else
1137  MostTopTmp = Tmp;
1138  TopTmp = Tmp;
1139  BaseTy = BaseTy->getPointeeType();
1140  }
1141  llvm::Type *Ty = BaseLVType;
1142  if (Tmp.isValid())
1143  Ty = Tmp.getElementType();
1144  Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Addr, Ty);
1145  if (Tmp.isValid()) {
1146  CGF.Builder.CreateStore(Addr, Tmp);
1147  return MostTopTmp;
1148  }
1149  return Address(Addr, BaseLVAlignment);
1150 }
1151 
1152 static const VarDecl *getBaseDecl(const Expr *Ref, const DeclRefExpr *&DE) {
1153  const VarDecl *OrigVD = nullptr;
1154  if (const auto *OASE = dyn_cast<OMPArraySectionExpr>(Ref)) {
1155  const Expr *Base = OASE->getBase()->IgnoreParenImpCasts();
1156  while (const auto *TempOASE = dyn_cast<OMPArraySectionExpr>(Base))
1157  Base = TempOASE->getBase()->IgnoreParenImpCasts();
1158  while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
1159  Base = TempASE->getBase()->IgnoreParenImpCasts();
1160  DE = cast<DeclRefExpr>(Base);
1161  OrigVD = cast<VarDecl>(DE->getDecl());
1162  } else if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Ref)) {
1163  const Expr *Base = ASE->getBase()->IgnoreParenImpCasts();
1164  while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
1165  Base = TempASE->getBase()->IgnoreParenImpCasts();
1166  DE = cast<DeclRefExpr>(Base);
1167  OrigVD = cast<VarDecl>(DE->getDecl());
1168  }
1169  return OrigVD;
1170 }
1171 
1173  Address PrivateAddr) {
1174  const DeclRefExpr *DE;
1175  if (const VarDecl *OrigVD = ::getBaseDecl(ClausesData[N].Ref, DE)) {
1176  BaseDecls.emplace_back(OrigVD);
1177  LValue OriginalBaseLValue = CGF.EmitLValue(DE);
1178  LValue BaseLValue =
1179  loadToBegin(CGF, OrigVD->getType(), SharedAddresses[N].first.getType(),
1180  OriginalBaseLValue);
1181  llvm::Value *Adjustment = CGF.Builder.CreatePtrDiff(
1182  BaseLValue.getPointer(), SharedAddresses[N].first.getPointer());
1183  llvm::Value *PrivatePointer =
1185  PrivateAddr.getPointer(),
1186  SharedAddresses[N].first.getAddress().getType());
1187  llvm::Value *Ptr = CGF.Builder.CreateGEP(PrivatePointer, Adjustment);
1188  return castToBase(CGF, OrigVD->getType(),
1189  SharedAddresses[N].first.getType(),
1190  OriginalBaseLValue.getAddress().getType(),
1191  OriginalBaseLValue.getAlignment(), Ptr);
1192  }
1193  BaseDecls.emplace_back(
1194  cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Ref)->getDecl()));
1195  return PrivateAddr;
1196 }
1197 
1199  const OMPDeclareReductionDecl *DRD =
1200  getReductionInit(ClausesData[N].ReductionOp);
1201  return DRD && DRD->getInitializer();
1202 }
1203 
1204 LValue CGOpenMPRegionInfo::getThreadIDVariableLValue(CodeGenFunction &CGF) {
1205  return CGF.EmitLoadOfPointerLValue(
1206  CGF.GetAddrOfLocalVar(getThreadIDVariable()),
1207  getThreadIDVariable()->getType()->castAs<PointerType>());
1208 }
1209 
1210 void CGOpenMPRegionInfo::EmitBody(CodeGenFunction &CGF, const Stmt * /*S*/) {
1211  if (!CGF.HaveInsertPoint())
1212  return;
1213  // 1.2.2 OpenMP Language Terminology
1214  // Structured block - An executable statement with a single entry at the
1215  // top and a single exit at the bottom.
1216  // The point of exit cannot be a branch out of the structured block.
1217  // longjmp() and throw() must not violate the entry/exit criteria.
1218  CGF.EHStack.pushTerminate();
1219  CodeGen(CGF);
1220  CGF.EHStack.popTerminate();
1221 }
1222 
1223 LValue CGOpenMPTaskOutlinedRegionInfo::getThreadIDVariableLValue(
1224  CodeGenFunction &CGF) {
1225  return CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(getThreadIDVariable()),
1226  getThreadIDVariable()->getType(),
1228 }
1229 
1231  QualType FieldTy) {
1232  auto *Field = FieldDecl::Create(
1233  C, DC, SourceLocation(), SourceLocation(), /*Id=*/nullptr, FieldTy,
1235  /*BW=*/nullptr, /*Mutable=*/false, /*InitStyle=*/ICIS_NoInit);
1236  Field->setAccess(AS_public);
1237  DC->addDecl(Field);
1238  return Field;
1239 }
1240 
1241 CGOpenMPRuntime::CGOpenMPRuntime(CodeGenModule &CGM, StringRef FirstSeparator,
1242  StringRef Separator)
1243  : CGM(CGM), FirstSeparator(FirstSeparator), Separator(Separator),
1245  ASTContext &C = CGM.getContext();
1246  RecordDecl *RD = C.buildImplicitRecord("ident_t");
1247  QualType KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
1248  RD->startDefinition();
1249  // reserved_1
1250  addFieldToRecordDecl(C, RD, KmpInt32Ty);
1251  // flags
1252  addFieldToRecordDecl(C, RD, KmpInt32Ty);
1253  // reserved_2
1254  addFieldToRecordDecl(C, RD, KmpInt32Ty);
1255  // reserved_3
1256  addFieldToRecordDecl(C, RD, KmpInt32Ty);
1257  // psource
1258  addFieldToRecordDecl(C, RD, C.VoidPtrTy);
1259  RD->completeDefinition();
1260  IdentQTy = C.getRecordType(RD);
1261  IdentTy = CGM.getTypes().ConvertRecordDeclType(RD);
1262  KmpCriticalNameTy = llvm::ArrayType::get(CGM.Int32Ty, /*NumElements*/ 8);
1263 
1265 }
1266 
1267 void CGOpenMPRuntime::clear() {
1268  InternalVars.clear();
1269  // Clean non-target variable declarations possibly used only in debug info.
1270  for (const auto &Data : EmittedNonTargetVariables) {
1271  if (!Data.getValue().pointsToAliveValue())
1272  continue;
1273  auto *GV = dyn_cast<llvm::GlobalVariable>(Data.getValue());
1274  if (!GV)
1275  continue;
1276  if (!GV->isDeclaration() || GV->getNumUses() > 0)
1277  continue;
1278  GV->eraseFromParent();
1279  }
1280 }
1281 
1282 std::string CGOpenMPRuntime::getName(ArrayRef<StringRef> Parts) const {
1283  SmallString<128> Buffer;
1284  llvm::raw_svector_ostream OS(Buffer);
1285  StringRef Sep = FirstSeparator;
1286  for (StringRef Part : Parts) {
1287  OS << Sep << Part;
1288  Sep = Separator;
1289  }
1290  return OS.str();
1291 }
1292 
1293 static llvm::Function *
1295  const Expr *CombinerInitializer, const VarDecl *In,
1296  const VarDecl *Out, bool IsCombiner) {
1297  // void .omp_combiner.(Ty *in, Ty *out);
1298  ASTContext &C = CGM.getContext();
1299  QualType PtrTy = C.getPointerType(Ty).withRestrict();
1300  FunctionArgList Args;
1301  ImplicitParamDecl OmpOutParm(C, /*DC=*/nullptr, Out->getLocation(),
1302  /*Id=*/nullptr, PtrTy, ImplicitParamDecl::Other);
1303  ImplicitParamDecl OmpInParm(C, /*DC=*/nullptr, In->getLocation(),
1304  /*Id=*/nullptr, PtrTy, ImplicitParamDecl::Other);
1305  Args.push_back(&OmpOutParm);
1306  Args.push_back(&OmpInParm);
1307  const CGFunctionInfo &FnInfo =
1309  llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
1310  std::string Name = CGM.getOpenMPRuntime().getName(
1311  {IsCombiner ? "omp_combiner" : "omp_initializer", ""});
1313  Name, &CGM.getModule());
1314  CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FnInfo);
1315  if (CGM.getLangOpts().Optimize) {
1316  Fn->removeFnAttr(llvm::Attribute::NoInline);
1317  Fn->removeFnAttr(llvm::Attribute::OptimizeNone);
1318  Fn->addFnAttr(llvm::Attribute::AlwaysInline);
1319  }
1320  CodeGenFunction CGF(CGM);
1321  // Map "T omp_in;" variable to "*omp_in_parm" value in all expressions.
1322  // Map "T omp_out;" variable to "*omp_out_parm" value in all expressions.
1323  CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, In->getLocation(),
1324  Out->getLocation());
1326  Address AddrIn = CGF.GetAddrOfLocalVar(&OmpInParm);
1327  Scope.addPrivate(In, [&CGF, AddrIn, PtrTy]() {
1328  return CGF.EmitLoadOfPointerLValue(AddrIn, PtrTy->castAs<PointerType>())
1329  .getAddress();
1330  });
1331  Address AddrOut = CGF.GetAddrOfLocalVar(&OmpOutParm);
1332  Scope.addPrivate(Out, [&CGF, AddrOut, PtrTy]() {
1333  return CGF.EmitLoadOfPointerLValue(AddrOut, PtrTy->castAs<PointerType>())
1334  .getAddress();
1335  });
1336  (void)Scope.Privatize();
1337  if (!IsCombiner && Out->hasInit() &&
1338  !CGF.isTrivialInitializer(Out->getInit())) {
1339  CGF.EmitAnyExprToMem(Out->getInit(), CGF.GetAddrOfLocalVar(Out),
1340  Out->getType().getQualifiers(),
1341  /*IsInitializer=*/true);
1342  }
1343  if (CombinerInitializer)
1344  CGF.EmitIgnoredExpr(CombinerInitializer);
1345  Scope.ForceCleanup();
1346  CGF.FinishFunction();
1347  return Fn;
1348 }
1349 
1351  CodeGenFunction *CGF, const OMPDeclareReductionDecl *D) {
1352  if (UDRMap.count(D) > 0)
1353  return;
1354  llvm::Function *Combiner = emitCombinerOrInitializer(
1355  CGM, D->getType(), D->getCombiner(),
1356  cast<VarDecl>(cast<DeclRefExpr>(D->getCombinerIn())->getDecl()),
1357  cast<VarDecl>(cast<DeclRefExpr>(D->getCombinerOut())->getDecl()),
1358  /*IsCombiner=*/true);
1359  llvm::Function *Initializer = nullptr;
1360  if (const Expr *Init = D->getInitializer()) {
1361  Initializer = emitCombinerOrInitializer(
1362  CGM, D->getType(),
1364  : nullptr,
1365  cast<VarDecl>(cast<DeclRefExpr>(D->getInitOrig())->getDecl()),
1366  cast<VarDecl>(cast<DeclRefExpr>(D->getInitPriv())->getDecl()),
1367  /*IsCombiner=*/false);
1368  }
1369  UDRMap.try_emplace(D, Combiner, Initializer);
1370  if (CGF) {
1371  auto &Decls = FunctionUDRMap.FindAndConstruct(CGF->CurFn);
1372  Decls.second.push_back(D);
1373  }
1374 }
1375 
1376 std::pair<llvm::Function *, llvm::Function *>
1378  auto I = UDRMap.find(D);
1379  if (I != UDRMap.end())
1380  return I->second;
1381  emitUserDefinedReduction(/*CGF=*/nullptr, D);
1382  return UDRMap.lookup(D);
1383 }
1384 
1386  CodeGenModule &CGM, const OMPExecutableDirective &D, const CapturedStmt *CS,
1387  const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind,
1388  const StringRef OutlinedHelperName, const RegionCodeGenTy &CodeGen) {
1389  assert(ThreadIDVar->getType()->isPointerType() &&
1390  "thread id variable must be of type kmp_int32 *");
1391  CodeGenFunction CGF(CGM, true);
1392  bool HasCancel = false;
1393  if (const auto *OPD = dyn_cast<OMPParallelDirective>(&D))
1394  HasCancel = OPD->hasCancel();
1395  else if (const auto *OPSD = dyn_cast<OMPParallelSectionsDirective>(&D))
1396  HasCancel = OPSD->hasCancel();
1397  else if (const auto *OPFD = dyn_cast<OMPParallelForDirective>(&D))
1398  HasCancel = OPFD->hasCancel();
1399  else if (const auto *OPFD = dyn_cast<OMPTargetParallelForDirective>(&D))
1400  HasCancel = OPFD->hasCancel();
1401  else if (const auto *OPFD = dyn_cast<OMPDistributeParallelForDirective>(&D))
1402  HasCancel = OPFD->hasCancel();
1403  else if (const auto *OPFD =
1404  dyn_cast<OMPTeamsDistributeParallelForDirective>(&D))
1405  HasCancel = OPFD->hasCancel();
1406  else if (const auto *OPFD =
1407  dyn_cast<OMPTargetTeamsDistributeParallelForDirective>(&D))
1408  HasCancel = OPFD->hasCancel();
1409  CGOpenMPOutlinedRegionInfo CGInfo(*CS, ThreadIDVar, CodeGen, InnermostKind,
1410  HasCancel, OutlinedHelperName);
1411  CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
1412  return CGF.GenerateOpenMPCapturedStmtFunction(*CS);
1413 }
1414 
1416  const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
1417  OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
1418  const CapturedStmt *CS = D.getCapturedStmt(OMPD_parallel);
1420  CGM, D, CS, ThreadIDVar, InnermostKind, getOutlinedHelperName(), CodeGen);
1421 }
1422 
1424  const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
1425  OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
1426  const CapturedStmt *CS = D.getCapturedStmt(OMPD_teams);
1428  CGM, D, CS, ThreadIDVar, InnermostKind, getOutlinedHelperName(), CodeGen);
1429 }
1430 
1432  const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
1433  const VarDecl *PartIDVar, const VarDecl *TaskTVar,
1434  OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
1435  bool Tied, unsigned &NumberOfParts) {
1436  auto &&UntiedCodeGen = [this, &D, TaskTVar](CodeGenFunction &CGF,
1437  PrePostActionTy &) {
1438  llvm::Value *ThreadID = getThreadID(CGF, D.getBeginLoc());
1439  llvm::Value *UpLoc = emitUpdateLocation(CGF, D.getBeginLoc());
1440  llvm::Value *TaskArgs[] = {
1441  UpLoc, ThreadID,
1442  CGF.EmitLoadOfPointerLValue(CGF.GetAddrOfLocalVar(TaskTVar),
1443  TaskTVar->getType()->castAs<PointerType>())
1444  .getPointer()};
1445  CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_omp_task), TaskArgs);
1446  };
1447  CGOpenMPTaskOutlinedRegionInfo::UntiedTaskActionTy Action(Tied, PartIDVar,
1448  UntiedCodeGen);
1449  CodeGen.setAction(Action);
1450  assert(!ThreadIDVar->getType()->isPointerType() &&
1451  "thread id variable must be of type kmp_int32 for tasks");
1452  const OpenMPDirectiveKind Region =
1453  isOpenMPTaskLoopDirective(D.getDirectiveKind()) ? OMPD_taskloop
1454  : OMPD_task;
1455  const CapturedStmt *CS = D.getCapturedStmt(Region);
1456  const auto *TD = dyn_cast<OMPTaskDirective>(&D);
1457  CodeGenFunction CGF(CGM, true);
1458  CGOpenMPTaskOutlinedRegionInfo CGInfo(*CS, ThreadIDVar, CodeGen,
1459  InnermostKind,
1460  TD ? TD->hasCancel() : false, Action);
1461  CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
1462  llvm::Function *Res = CGF.GenerateCapturedStmtFunction(*CS);
1463  if (!Tied)
1464  NumberOfParts = Action.getNumberOfParts();
1465  return Res;
1466 }
1467 
1469  const RecordDecl *RD, const CGRecordLayout &RL,
1470  ArrayRef<llvm::Constant *> Data) {
1471  llvm::StructType *StructTy = RL.getLLVMType();
1472  unsigned PrevIdx = 0;
1473  ConstantInitBuilder CIBuilder(CGM);
1474  auto DI = Data.begin();
1475  for (const FieldDecl *FD : RD->fields()) {
1476  unsigned Idx = RL.getLLVMFieldNo(FD);
1477  // Fill the alignment.
1478  for (unsigned I = PrevIdx; I < Idx; ++I)
1479  Fields.add(llvm::Constant::getNullValue(StructTy->getElementType(I)));
1480  PrevIdx = Idx + 1;
1481  Fields.add(*DI);
1482  ++DI;
1483  }
1484 }
1485 
1486 template <class... As>
1487 static llvm::GlobalVariable *
1489  ArrayRef<llvm::Constant *> Data, const Twine &Name,
1490  As &&... Args) {
1491  const auto *RD = cast<RecordDecl>(Ty->getAsTagDecl());
1492  const CGRecordLayout &RL = CGM.getTypes().getCGRecordLayout(RD);
1493  ConstantInitBuilder CIBuilder(CGM);
1494  ConstantStructBuilder Fields = CIBuilder.beginStruct(RL.getLLVMType());
1495  buildStructValue(Fields, CGM, RD, RL, Data);
1496  return Fields.finishAndCreateGlobal(
1497  Name, CGM.getContext().getAlignOfGlobalVarInChars(Ty), IsConstant,
1498  std::forward<As>(Args)...);
1499 }
1500 
1501 template <typename T>
1502 static void
1504  ArrayRef<llvm::Constant *> Data,
1505  T &Parent) {
1506  const auto *RD = cast<RecordDecl>(Ty->getAsTagDecl());
1507  const CGRecordLayout &RL = CGM.getTypes().getCGRecordLayout(RD);
1508  ConstantStructBuilder Fields = Parent.beginStruct(RL.getLLVMType());
1509  buildStructValue(Fields, CGM, RD, RL, Data);
1510  Fields.finishAndAddTo(Parent);
1511 }
1512 
1513 Address CGOpenMPRuntime::getOrCreateDefaultLocation(unsigned Flags) {
1514  CharUnits Align = CGM.getContext().getTypeAlignInChars(IdentQTy);
1515  unsigned Reserved2Flags = getDefaultLocationReserved2Flags();
1516  FlagsTy FlagsKey(Flags, Reserved2Flags);
1517  llvm::Value *Entry = OpenMPDefaultLocMap.lookup(FlagsKey);
1518  if (!Entry) {
1519  if (!DefaultOpenMPPSource) {
1520  // Initialize default location for psource field of ident_t structure of
1521  // all ident_t objects. Format is ";file;function;line;column;;".
1522  // Taken from
1523  // https://github.com/llvm/llvm-project/blob/master/openmp/runtime/src/kmp_str.cpp
1524  DefaultOpenMPPSource =
1525  CGM.GetAddrOfConstantCString(";unknown;unknown;0;0;;").getPointer();
1526  DefaultOpenMPPSource =
1527  llvm::ConstantExpr::getBitCast(DefaultOpenMPPSource, CGM.Int8PtrTy);
1528  }
1529 
1530  llvm::Constant *Data[] = {
1531  llvm::ConstantInt::getNullValue(CGM.Int32Ty),
1532  llvm::ConstantInt::get(CGM.Int32Ty, Flags),
1533  llvm::ConstantInt::get(CGM.Int32Ty, Reserved2Flags),
1534  llvm::ConstantInt::getNullValue(CGM.Int32Ty), DefaultOpenMPPSource};
1535  llvm::GlobalValue *DefaultOpenMPLocation =
1536  createGlobalStruct(CGM, IdentQTy, isDefaultLocationConstant(), Data, "",
1537  llvm::GlobalValue::PrivateLinkage);
1538  DefaultOpenMPLocation->setUnnamedAddr(
1539  llvm::GlobalValue::UnnamedAddr::Global);
1540 
1541  OpenMPDefaultLocMap[FlagsKey] = Entry = DefaultOpenMPLocation;
1542  }
1543  return Address(Entry, Align);
1544 }
1545 
1547  bool AtCurrentPoint) {
1548  auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
1549  assert(!Elem.second.ServiceInsertPt && "Insert point is set already.");
1550 
1551  llvm::Value *Undef = llvm::UndefValue::get(CGF.Int32Ty);
1552  if (AtCurrentPoint) {
1553  Elem.second.ServiceInsertPt = new llvm::BitCastInst(
1554  Undef, CGF.Int32Ty, "svcpt", CGF.Builder.GetInsertBlock());
1555  } else {
1556  Elem.second.ServiceInsertPt =
1557  new llvm::BitCastInst(Undef, CGF.Int32Ty, "svcpt");
1558  Elem.second.ServiceInsertPt->insertAfter(CGF.AllocaInsertPt);
1559  }
1560 }
1561 
1563  auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
1564  if (Elem.second.ServiceInsertPt) {
1565  llvm::Instruction *Ptr = Elem.second.ServiceInsertPt;
1566  Elem.second.ServiceInsertPt = nullptr;
1567  Ptr->eraseFromParent();
1568  }
1569 }
1570 
1572  SourceLocation Loc,
1573  unsigned Flags) {
1574  Flags |= OMP_IDENT_KMPC;
1575  // If no debug info is generated - return global default location.
1576  if (CGM.getCodeGenOpts().getDebugInfo() == codegenoptions::NoDebugInfo ||
1577  Loc.isInvalid())
1578  return getOrCreateDefaultLocation(Flags).getPointer();
1579 
1580  assert(CGF.CurFn && "No function in current CodeGenFunction.");
1581 
1582  CharUnits Align = CGM.getContext().getTypeAlignInChars(IdentQTy);
1583  Address LocValue = Address::invalid();
1584  auto I = OpenMPLocThreadIDMap.find(CGF.CurFn);
1585  if (I != OpenMPLocThreadIDMap.end())
1586  LocValue = Address(I->second.DebugLoc, Align);
1587 
1588  // OpenMPLocThreadIDMap may have null DebugLoc and non-null ThreadID, if
1589  // GetOpenMPThreadID was called before this routine.
1590  if (!LocValue.isValid()) {
1591  // Generate "ident_t .kmpc_loc.addr;"
1592  Address AI = CGF.CreateMemTemp(IdentQTy, ".kmpc_loc.addr");
1593  auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
1594  Elem.second.DebugLoc = AI.getPointer();
1595  LocValue = AI;
1596 
1597  if (!Elem.second.ServiceInsertPt)
1599  CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
1600  CGF.Builder.SetInsertPoint(Elem.second.ServiceInsertPt);
1601  CGF.Builder.CreateMemCpy(LocValue, getOrCreateDefaultLocation(Flags),
1602  CGF.getTypeSize(IdentQTy));
1603  }
1604 
1605  // char **psource = &.kmpc_loc_<flags>.addr.psource;
1606  LValue Base = CGF.MakeAddrLValue(LocValue, IdentQTy);
1607  auto Fields = cast<RecordDecl>(IdentQTy->getAsTagDecl())->field_begin();
1608  LValue PSource =
1609  CGF.EmitLValueForField(Base, *std::next(Fields, IdentField_PSource));
1610 
1611  llvm::Value *OMPDebugLoc = OpenMPDebugLocMap.lookup(Loc.getRawEncoding());
1612  if (OMPDebugLoc == nullptr) {
1613  SmallString<128> Buffer2;
1614  llvm::raw_svector_ostream OS2(Buffer2);
1615  // Build debug location
1617  OS2 << ";" << PLoc.getFilename() << ";";
1618  if (const auto *FD = dyn_cast_or_null<FunctionDecl>(CGF.CurFuncDecl))
1619  OS2 << FD->getQualifiedNameAsString();
1620  OS2 << ";" << PLoc.getLine() << ";" << PLoc.getColumn() << ";;";
1621  OMPDebugLoc = CGF.Builder.CreateGlobalStringPtr(OS2.str());
1622  OpenMPDebugLocMap[Loc.getRawEncoding()] = OMPDebugLoc;
1623  }
1624  // *psource = ";<File>;<Function>;<Line>;<Column>;;";
1625  CGF.EmitStoreOfScalar(OMPDebugLoc, PSource);
1626 
1627  // Our callers always pass this to a runtime function, so for
1628  // convenience, go ahead and return a naked pointer.
1629  return LocValue.getPointer();
1630 }
1631 
1633  SourceLocation Loc) {
1634  assert(CGF.CurFn && "No function in current CodeGenFunction.");
1635 
1636  llvm::Value *ThreadID = nullptr;
1637  // Check whether we've already cached a load of the thread id in this
1638  // function.
1639  auto I = OpenMPLocThreadIDMap.find(CGF.CurFn);
1640  if (I != OpenMPLocThreadIDMap.end()) {
1641  ThreadID = I->second.ThreadID;
1642  if (ThreadID != nullptr)
1643  return ThreadID;
1644  }
1645  // If exceptions are enabled, do not use parameter to avoid possible crash.
1646  if (!CGF.EHStack.requiresLandingPad() || !CGF.getLangOpts().Exceptions ||
1647  !CGF.getLangOpts().CXXExceptions ||
1648  CGF.Builder.GetInsertBlock() == CGF.AllocaInsertPt->getParent()) {
1649  if (auto *OMPRegionInfo =
1650  dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
1651  if (OMPRegionInfo->getThreadIDVariable()) {
1652  // Check if this an outlined function with thread id passed as argument.
1653  LValue LVal = OMPRegionInfo->getThreadIDVariableLValue(CGF);
1654  ThreadID = CGF.EmitLoadOfScalar(LVal, Loc);
1655  // If value loaded in entry block, cache it and use it everywhere in
1656  // function.
1657  if (CGF.Builder.GetInsertBlock() == CGF.AllocaInsertPt->getParent()) {
1658  auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
1659  Elem.second.ThreadID = ThreadID;
1660  }
1661  return ThreadID;
1662  }
1663  }
1664  }
1665 
1666  // This is not an outlined function region - need to call __kmpc_int32
1667  // kmpc_global_thread_num(ident_t *loc).
1668  // Generate thread id value and cache this value for use across the
1669  // function.
1670  auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
1671  if (!Elem.second.ServiceInsertPt)
1673  CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
1674  CGF.Builder.SetInsertPoint(Elem.second.ServiceInsertPt);
1675  llvm::CallInst *Call = CGF.Builder.CreateCall(
1677  emitUpdateLocation(CGF, Loc));
1678  Call->setCallingConv(CGF.getRuntimeCC());
1679  Elem.second.ThreadID = Call;
1680  return Call;
1681 }
1682 
1684  assert(CGF.CurFn && "No function in current CodeGenFunction.");
1685  if (OpenMPLocThreadIDMap.count(CGF.CurFn)) {
1687  OpenMPLocThreadIDMap.erase(CGF.CurFn);
1688  }
1689  if (FunctionUDRMap.count(CGF.CurFn) > 0) {
1690  for(auto *D : FunctionUDRMap[CGF.CurFn])
1691  UDRMap.erase(D);
1692  FunctionUDRMap.erase(CGF.CurFn);
1693  }
1694  auto I = FunctionUDMMap.find(CGF.CurFn);
1695  if (I != FunctionUDMMap.end()) {
1696  for(auto *D : I->second)
1697  UDMMap.erase(D);
1698  FunctionUDMMap.erase(I);
1699  }
1700 }
1701 
1703  return IdentTy->getPointerTo();
1704 }
1705 
1707  if (!Kmpc_MicroTy) {
1708  // Build void (*kmpc_micro)(kmp_int32 *global_tid, kmp_int32 *bound_tid,...)
1709  llvm::Type *MicroParams[] = {llvm::PointerType::getUnqual(CGM.Int32Ty),
1710  llvm::PointerType::getUnqual(CGM.Int32Ty)};
1711  Kmpc_MicroTy = llvm::FunctionType::get(CGM.VoidTy, MicroParams, true);
1712  }
1713  return llvm::PointerType::getUnqual(Kmpc_MicroTy);
1714 }
1715 
1716 llvm::FunctionCallee CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
1717  llvm::FunctionCallee RTLFn = nullptr;
1718  switch (static_cast<OpenMPRTLFunction>(Function)) {
1719  case OMPRTL__kmpc_fork_call: {
1720  // Build void __kmpc_fork_call(ident_t *loc, kmp_int32 argc, kmpc_micro
1721  // microtask, ...);
1722  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
1724  auto *FnTy =
1725  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ true);
1726  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_fork_call");
1727  if (auto *F = dyn_cast<llvm::Function>(RTLFn.getCallee())) {
1728  if (!F->hasMetadata(llvm::LLVMContext::MD_callback)) {
1729  llvm::LLVMContext &Ctx = F->getContext();
1730  llvm::MDBuilder MDB(Ctx);
1731  // Annotate the callback behavior of the __kmpc_fork_call:
1732  // - The callback callee is argument number 2 (microtask).
1733  // - The first two arguments of the callback callee are unknown (-1).
1734  // - All variadic arguments to the __kmpc_fork_call are passed to the
1735  // callback callee.
1736  F->addMetadata(
1737  llvm::LLVMContext::MD_callback,
1738  *llvm::MDNode::get(Ctx, {MDB.createCallbackEncoding(
1739  2, {-1, -1},
1740  /* VarArgsArePassed */ true)}));
1741  }
1742  }
1743  break;
1744  }
1746  // Build kmp_int32 __kmpc_global_thread_num(ident_t *loc);
1747  llvm::Type *TypeParams[] = {getIdentTyPointerTy()};
1748  auto *FnTy =
1749  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
1750  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_global_thread_num");
1751  break;
1752  }
1754  // Build void *__kmpc_threadprivate_cached(ident_t *loc,
1755  // kmp_int32 global_tid, void *data, size_t size, void ***cache);
1756  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
1758  CGM.VoidPtrTy->getPointerTo()->getPointerTo()};
1759  auto *FnTy =
1760  llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg*/ false);
1761  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_threadprivate_cached");
1762  break;
1763  }
1764  case OMPRTL__kmpc_critical: {
1765  // Build void __kmpc_critical(ident_t *loc, kmp_int32 global_tid,
1766  // kmp_critical_name *crit);
1767  llvm::Type *TypeParams[] = {
1769  llvm::PointerType::getUnqual(KmpCriticalNameTy)};
1770  auto *FnTy =
1771  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1772  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_critical");
1773  break;
1774  }
1776  // Build void __kmpc_critical_with_hint(ident_t *loc, kmp_int32 global_tid,
1777  // kmp_critical_name *crit, uintptr_t hint);
1778  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
1779  llvm::PointerType::getUnqual(KmpCriticalNameTy),
1780  CGM.IntPtrTy};
1781  auto *FnTy =
1782  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1783  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_critical_with_hint");
1784  break;
1785  }
1787  // Build void __kmpc_threadprivate_register(ident_t *, void *data,
1788  // kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor);
1789  // typedef void *(*kmpc_ctor)(void *);
1790  auto *KmpcCtorTy =
1791  llvm::FunctionType::get(CGM.VoidPtrTy, CGM.VoidPtrTy,
1792  /*isVarArg*/ false)->getPointerTo();
1793  // typedef void *(*kmpc_cctor)(void *, void *);
1794  llvm::Type *KmpcCopyCtorTyArgs[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
1795  auto *KmpcCopyCtorTy =
1796  llvm::FunctionType::get(CGM.VoidPtrTy, KmpcCopyCtorTyArgs,
1797  /*isVarArg*/ false)
1798  ->getPointerTo();
1799  // typedef void (*kmpc_dtor)(void *);
1800  auto *KmpcDtorTy =
1801  llvm::FunctionType::get(CGM.VoidTy, CGM.VoidPtrTy, /*isVarArg*/ false)
1802  ->getPointerTo();
1803  llvm::Type *FnTyArgs[] = {getIdentTyPointerTy(), CGM.VoidPtrTy, KmpcCtorTy,
1804  KmpcCopyCtorTy, KmpcDtorTy};
1805  auto *FnTy = llvm::FunctionType::get(CGM.VoidTy, FnTyArgs,
1806  /*isVarArg*/ false);
1807  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_threadprivate_register");
1808  break;
1809  }
1811  // Build void __kmpc_end_critical(ident_t *loc, kmp_int32 global_tid,
1812  // kmp_critical_name *crit);
1813  llvm::Type *TypeParams[] = {
1815  llvm::PointerType::getUnqual(KmpCriticalNameTy)};
1816  auto *FnTy =
1817  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1818  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_critical");
1819  break;
1820  }
1822  // Build kmp_int32 __kmpc_cancel_barrier(ident_t *loc, kmp_int32
1823  // global_tid);
1824  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1825  auto *FnTy =
1826  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
1827  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name*/ "__kmpc_cancel_barrier");
1828  break;
1829  }
1830  case OMPRTL__kmpc_barrier: {
1831  // Build void __kmpc_barrier(ident_t *loc, kmp_int32 global_tid);
1832  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1833  auto *FnTy =
1834  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1835  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name*/ "__kmpc_barrier");
1836  break;
1837  }
1839  // Build void __kmpc_for_static_fini(ident_t *loc, kmp_int32 global_tid);
1840  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1841  auto *FnTy =
1842  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1843  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_for_static_fini");
1844  break;
1845  }
1847  // Build void __kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid,
1848  // kmp_int32 num_threads)
1849  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
1850  CGM.Int32Ty};
1851  auto *FnTy =
1852  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1853  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_push_num_threads");
1854  break;
1855  }
1857  // Build void __kmpc_serialized_parallel(ident_t *loc, kmp_int32
1858  // global_tid);
1859  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1860  auto *FnTy =
1861  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1862  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_serialized_parallel");
1863  break;
1864  }
1866  // Build void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32
1867  // global_tid);
1868  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1869  auto *FnTy =
1870  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1871  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_serialized_parallel");
1872  break;
1873  }
1874  case OMPRTL__kmpc_flush: {
1875  // Build void __kmpc_flush(ident_t *loc);
1876  llvm::Type *TypeParams[] = {getIdentTyPointerTy()};
1877  auto *FnTy =
1878  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1879  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_flush");
1880  break;
1881  }
1882  case OMPRTL__kmpc_master: {
1883  // Build kmp_int32 __kmpc_master(ident_t *loc, kmp_int32 global_tid);
1884  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1885  auto *FnTy =
1886  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1887  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_master");
1888  break;
1889  }
1890  case OMPRTL__kmpc_end_master: {
1891  // Build void __kmpc_end_master(ident_t *loc, kmp_int32 global_tid);
1892  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1893  auto *FnTy =
1894  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1895  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_master");
1896  break;
1897  }
1899  // Build kmp_int32 __kmpc_omp_taskyield(ident_t *, kmp_int32 global_tid,
1900  // int end_part);
1901  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.IntTy};
1902  auto *FnTy =
1903  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1904  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_taskyield");
1905  break;
1906  }
1907  case OMPRTL__kmpc_single: {
1908  // Build kmp_int32 __kmpc_single(ident_t *loc, kmp_int32 global_tid);
1909  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1910  auto *FnTy =
1911  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1912  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_single");
1913  break;
1914  }
1915  case OMPRTL__kmpc_end_single: {
1916  // Build void __kmpc_end_single(ident_t *loc, kmp_int32 global_tid);
1917  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1918  auto *FnTy =
1919  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1920  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_single");
1921  break;
1922  }
1924  // Build kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 gtid,
1925  // kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
1926  // kmp_routine_entry_t *task_entry);
1927  assert(KmpRoutineEntryPtrTy != nullptr &&
1928  "Type kmp_routine_entry_t must be created.");
1929  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.Int32Ty,
1930  CGM.SizeTy, CGM.SizeTy, KmpRoutineEntryPtrTy};
1931  // Return void * and then cast to particular kmp_task_t type.
1932  auto *FnTy =
1933  llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
1934  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task_alloc");
1935  break;
1936  }
1938  // Build kmp_task_t *__kmpc_omp_target_task_alloc(ident_t *, kmp_int32 gtid,
1939  // kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
1940  // kmp_routine_entry_t *task_entry, kmp_int64 device_id);
1941  assert(KmpRoutineEntryPtrTy != nullptr &&
1942  "Type kmp_routine_entry_t must be created.");
1943  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.Int32Ty,
1944  CGM.SizeTy, CGM.SizeTy, KmpRoutineEntryPtrTy,
1945  CGM.Int64Ty};
1946  // Return void * and then cast to particular kmp_task_t type.
1947  auto *FnTy =
1948  llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
1949  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_target_task_alloc");
1950  break;
1951  }
1952  case OMPRTL__kmpc_omp_task: {
1953  // Build kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t
1954  // *new_task);
1955  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
1956  CGM.VoidPtrTy};
1957  auto *FnTy =
1958  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1959  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task");
1960  break;
1961  }
1962  case OMPRTL__kmpc_copyprivate: {
1963  // Build void __kmpc_copyprivate(ident_t *loc, kmp_int32 global_tid,
1964  // size_t cpy_size, void *cpy_data, void(*cpy_func)(void *, void *),
1965  // kmp_int32 didit);
1966  llvm::Type *CpyTypeParams[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
1967  auto *CpyFnTy =
1968  llvm::FunctionType::get(CGM.VoidTy, CpyTypeParams, /*isVarArg=*/false);
1969  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.SizeTy,
1970  CGM.VoidPtrTy, CpyFnTy->getPointerTo(),
1971  CGM.Int32Ty};
1972  auto *FnTy =
1973  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1974  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_copyprivate");
1975  break;
1976  }
1977  case OMPRTL__kmpc_reduce: {
1978  // Build kmp_int32 __kmpc_reduce(ident_t *loc, kmp_int32 global_tid,
1979  // kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void
1980  // (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck);
1981  llvm::Type *ReduceTypeParams[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
1982  auto *ReduceFnTy = llvm::FunctionType::get(CGM.VoidTy, ReduceTypeParams,
1983  /*isVarArg=*/false);
1984  llvm::Type *TypeParams[] = {
1986  CGM.VoidPtrTy, ReduceFnTy->getPointerTo(),
1987  llvm::PointerType::getUnqual(KmpCriticalNameTy)};
1988  auto *FnTy =
1989  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1990  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_reduce");
1991  break;
1992  }
1994  // Build kmp_int32 __kmpc_reduce_nowait(ident_t *loc, kmp_int32
1995  // global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data,
1996  // void (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name
1997  // *lck);
1998  llvm::Type *ReduceTypeParams[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
1999  auto *ReduceFnTy = llvm::FunctionType::get(CGM.VoidTy, ReduceTypeParams,
2000  /*isVarArg=*/false);
2001  llvm::Type *TypeParams[] = {
2003  CGM.VoidPtrTy, ReduceFnTy->getPointerTo(),
2004  llvm::PointerType::getUnqual(KmpCriticalNameTy)};
2005  auto *FnTy =
2006  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
2007  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_reduce_nowait");
2008  break;
2009  }
2010  case OMPRTL__kmpc_end_reduce: {
2011  // Build void __kmpc_end_reduce(ident_t *loc, kmp_int32 global_tid,
2012  // kmp_critical_name *lck);
2013  llvm::Type *TypeParams[] = {
2015  llvm::PointerType::getUnqual(KmpCriticalNameTy)};
2016  auto *FnTy =
2017  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2018  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_reduce");
2019  break;
2020  }
2022  // Build __kmpc_end_reduce_nowait(ident_t *loc, kmp_int32 global_tid,
2023  // kmp_critical_name *lck);
2024  llvm::Type *TypeParams[] = {
2026  llvm::PointerType::getUnqual(KmpCriticalNameTy)};
2027  auto *FnTy =
2028  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2029  RTLFn =
2030  CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_reduce_nowait");
2031  break;
2032  }
2034  // Build void __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t
2035  // *new_task);
2036  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
2037  CGM.VoidPtrTy};
2038  auto *FnTy =
2039  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2040  RTLFn =
2041  CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task_begin_if0");
2042  break;
2043  }
2045  // Build void __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t
2046  // *new_task);
2047  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
2048  CGM.VoidPtrTy};
2049  auto *FnTy =
2050  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2051  RTLFn = CGM.CreateRuntimeFunction(FnTy,
2052  /*Name=*/"__kmpc_omp_task_complete_if0");
2053  break;
2054  }
2055  case OMPRTL__kmpc_ordered: {
2056  // Build void __kmpc_ordered(ident_t *loc, kmp_int32 global_tid);
2057  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
2058  auto *FnTy =
2059  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2060  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_ordered");
2061  break;
2062  }
2063  case OMPRTL__kmpc_end_ordered: {
2064  // Build void __kmpc_end_ordered(ident_t *loc, kmp_int32 global_tid);
2065  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
2066  auto *FnTy =
2067  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2068  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_ordered");
2069  break;
2070  }
2072  // Build kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32 global_tid);
2073  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
2074  auto *FnTy =
2075  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
2076  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_omp_taskwait");
2077  break;
2078  }
2079  case OMPRTL__kmpc_taskgroup: {
2080  // Build void __kmpc_taskgroup(ident_t *loc, kmp_int32 global_tid);
2081  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
2082  auto *FnTy =
2083  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2084  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_taskgroup");
2085  break;
2086  }
2088  // Build void __kmpc_end_taskgroup(ident_t *loc, kmp_int32 global_tid);
2089  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
2090  auto *FnTy =
2091  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2092  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_taskgroup");
2093  break;
2094  }
2096  // Build void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 global_tid,
2097  // int proc_bind)
2098  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.IntTy};
2099  auto *FnTy =
2100  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
2101  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_push_proc_bind");
2102  break;
2103  }
2105  // Build kmp_int32 __kmpc_omp_task_with_deps(ident_t *, kmp_int32 gtid,
2106  // kmp_task_t *new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list,
2107  // kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list);
2108  llvm::Type *TypeParams[] = {
2111  auto *FnTy =
2112  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
2113  RTLFn =
2114  CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task_with_deps");
2115  break;
2116  }
2118  // Build void __kmpc_omp_wait_deps(ident_t *, kmp_int32 gtid,
2119  // kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias,
2120  // kmp_depend_info_t *noalias_dep_list);
2121  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
2124  auto *FnTy =
2125  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2126  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_wait_deps");
2127  break;
2128  }
2130  // Build kmp_int32 __kmpc_cancellationpoint(ident_t *loc, kmp_int32
2131  // global_tid, kmp_int32 cncl_kind)
2132  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.IntTy};
2133  auto *FnTy =
2134  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2135  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_cancellationpoint");
2136  break;
2137  }
2138  case OMPRTL__kmpc_cancel: {
2139  // Build kmp_int32 __kmpc_cancel(ident_t *loc, kmp_int32 global_tid,
2140  // kmp_int32 cncl_kind)
2141  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.IntTy};
2142  auto *FnTy =
2143  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2144  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_cancel");
2145  break;
2146  }
2148  // Build void kmpc_push_num_teams (ident_t loc, kmp_int32 global_tid,
2149  // kmp_int32 num_teams, kmp_int32 num_threads)
2150  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.Int32Ty,
2151  CGM.Int32Ty};
2152  auto *FnTy =
2153  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2154  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_push_num_teams");
2155  break;
2156  }
2157  case OMPRTL__kmpc_fork_teams: {
2158  // Build void __kmpc_fork_teams(ident_t *loc, kmp_int32 argc, kmpc_micro
2159  // microtask, ...);
2160  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
2162  auto *FnTy =
2163  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ true);
2164  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_fork_teams");
2165  if (auto *F = dyn_cast<llvm::Function>(RTLFn.getCallee())) {
2166  if (!F->hasMetadata(llvm::LLVMContext::MD_callback)) {
2167  llvm::LLVMContext &Ctx = F->getContext();
2168  llvm::MDBuilder MDB(Ctx);
2169  // Annotate the callback behavior of the __kmpc_fork_teams:
2170  // - The callback callee is argument number 2 (microtask).
2171  // - The first two arguments of the callback callee are unknown (-1).
2172  // - All variadic arguments to the __kmpc_fork_teams are passed to the
2173  // callback callee.
2174  F->addMetadata(
2175  llvm::LLVMContext::MD_callback,
2176  *llvm::MDNode::get(Ctx, {MDB.createCallbackEncoding(
2177  2, {-1, -1},
2178  /* VarArgsArePassed */ true)}));
2179  }
2180  }
2181  break;
2182  }
2183  case OMPRTL__kmpc_taskloop: {
2184  // Build void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t *task, int
2185  // if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int nogroup, int
2186  // sched, kmp_uint64 grainsize, void *task_dup);
2187  llvm::Type *TypeParams[] = {getIdentTyPointerTy(),
2188  CGM.IntTy,
2189  CGM.VoidPtrTy,
2190  CGM.IntTy,
2191  CGM.Int64Ty->getPointerTo(),
2192  CGM.Int64Ty->getPointerTo(),
2193  CGM.Int64Ty,
2194  CGM.IntTy,
2195  CGM.IntTy,
2196  CGM.Int64Ty,
2197  CGM.VoidPtrTy};
2198  auto *FnTy =
2199  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2200  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_taskloop");
2201  break;
2202  }
2204  // Build void __kmpc_doacross_init(ident_t *loc, kmp_int32 gtid, kmp_int32
2205  // num_dims, struct kmp_dim *dims);
2206  llvm::Type *TypeParams[] = {getIdentTyPointerTy(),
2207  CGM.Int32Ty,
2208  CGM.Int32Ty,
2209  CGM.VoidPtrTy};
2210  auto *FnTy =
2211  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2212  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_doacross_init");
2213  break;
2214  }
2216  // Build void __kmpc_doacross_fini(ident_t *loc, kmp_int32 gtid);
2217  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
2218  auto *FnTy =
2219  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2220  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_doacross_fini");
2221  break;
2222  }
2224  // Build void __kmpc_doacross_post(ident_t *loc, kmp_int32 gtid, kmp_int64
2225  // *vec);
2226  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
2227  CGM.Int64Ty->getPointerTo()};
2228  auto *FnTy =
2229  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2230  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_doacross_post");
2231  break;
2232  }
2234  // Build void __kmpc_doacross_wait(ident_t *loc, kmp_int32 gtid, kmp_int64
2235  // *vec);
2236  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
2237  CGM.Int64Ty->getPointerTo()};
2238  auto *FnTy =
2239  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2240  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_doacross_wait");
2241  break;
2242  }
2244  // Build void *__kmpc_task_reduction_init(int gtid, int num_data, void
2245  // *data);
2246  llvm::Type *TypeParams[] = {CGM.IntTy, CGM.IntTy, CGM.VoidPtrTy};
2247  auto *FnTy =
2248  llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
2249  RTLFn =
2250  CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_task_reduction_init");
2251  break;
2252  }
2254  // Build void *__kmpc_task_reduction_get_th_data(int gtid, void *tg, void
2255  // *d);
2256  llvm::Type *TypeParams[] = {CGM.IntTy, CGM.VoidPtrTy, CGM.VoidPtrTy};
2257  auto *FnTy =
2258  llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
2259  RTLFn = CGM.CreateRuntimeFunction(
2260  FnTy, /*Name=*/"__kmpc_task_reduction_get_th_data");
2261  break;
2262  }
2263  case OMPRTL__kmpc_alloc: {
2264  // Build to void *__kmpc_alloc(int gtid, size_t sz, omp_allocator_handle_t
2265  // al); omp_allocator_handle_t type is void *.
2266  llvm::Type *TypeParams[] = {CGM.IntTy, CGM.SizeTy, CGM.VoidPtrTy};
2267  auto *FnTy =
2268  llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
2269  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_alloc");
2270  break;
2271  }
2272  case OMPRTL__kmpc_free: {
2273  // Build to void __kmpc_free(int gtid, void *ptr, omp_allocator_handle_t
2274  // al); omp_allocator_handle_t type is void *.
2275  llvm::Type *TypeParams[] = {CGM.IntTy, CGM.VoidPtrTy, CGM.VoidPtrTy};
2276  auto *FnTy =
2277  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2278  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_free");
2279  break;
2280  }
2282  // Build void __kmpc_push_target_tripcount(int64_t device_id, kmp_uint64
2283  // size);
2284  llvm::Type *TypeParams[] = {CGM.Int64Ty, CGM.Int64Ty};
2285  llvm::FunctionType *FnTy =
2286  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2287  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_push_target_tripcount");
2288  break;
2289  }
2290  case OMPRTL__tgt_target: {
2291  // Build int32_t __tgt_target(int64_t device_id, void *host_ptr, int32_t
2292  // arg_num, void** args_base, void **args, int64_t *arg_sizes, int64_t
2293  // *arg_types);
2294  llvm::Type *TypeParams[] = {CGM.Int64Ty,
2295  CGM.VoidPtrTy,
2296  CGM.Int32Ty,
2297  CGM.VoidPtrPtrTy,
2298  CGM.VoidPtrPtrTy,
2299  CGM.Int64Ty->getPointerTo(),
2300  CGM.Int64Ty->getPointerTo()};
2301  auto *FnTy =
2302  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2303  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target");
2304  break;
2305  }
2307  // Build int32_t __tgt_target_nowait(int64_t device_id, void *host_ptr,
2308  // int32_t arg_num, void** args_base, void **args, int64_t *arg_sizes,
2309  // int64_t *arg_types);
2310  llvm::Type *TypeParams[] = {CGM.Int64Ty,
2311  CGM.VoidPtrTy,
2312  CGM.Int32Ty,
2313  CGM.VoidPtrPtrTy,
2314  CGM.VoidPtrPtrTy,
2315  CGM.Int64Ty->getPointerTo(),
2316  CGM.Int64Ty->getPointerTo()};
2317  auto *FnTy =
2318  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2319  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_nowait");
2320  break;
2321  }
2322  case OMPRTL__tgt_target_teams: {
2323  // Build int32_t __tgt_target_teams(int64_t device_id, void *host_ptr,
2324  // int32_t arg_num, void** args_base, void **args, int64_t *arg_sizes,
2325  // int64_t *arg_types, int32_t num_teams, int32_t thread_limit);
2326  llvm::Type *TypeParams[] = {CGM.Int64Ty,
2327  CGM.VoidPtrTy,
2328  CGM.Int32Ty,
2329  CGM.VoidPtrPtrTy,
2330  CGM.VoidPtrPtrTy,
2331  CGM.Int64Ty->getPointerTo(),
2332  CGM.Int64Ty->getPointerTo(),
2333  CGM.Int32Ty,
2334  CGM.Int32Ty};
2335  auto *FnTy =
2336  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2337  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_teams");
2338  break;
2339  }
2341  // Build int32_t __tgt_target_teams_nowait(int64_t device_id, void
2342  // *host_ptr, int32_t arg_num, void** args_base, void **args, int64_t
2343  // *arg_sizes, int64_t *arg_types, int32_t num_teams, int32_t thread_limit);
2344  llvm::Type *TypeParams[] = {CGM.Int64Ty,
2345  CGM.VoidPtrTy,
2346  CGM.Int32Ty,
2347  CGM.VoidPtrPtrTy,
2348  CGM.VoidPtrPtrTy,
2349  CGM.Int64Ty->getPointerTo(),
2350  CGM.Int64Ty->getPointerTo(),
2351  CGM.Int32Ty,
2352  CGM.Int32Ty};
2353  auto *FnTy =
2354  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2355  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_teams_nowait");
2356  break;
2357  }
2359  // Build void __tgt_register_requires(int64_t flags);
2360  llvm::Type *TypeParams[] = {CGM.Int64Ty};
2361  auto *FnTy =
2362  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
2363  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_register_requires");
2364  break;
2365  }
2366  case OMPRTL__tgt_register_lib: {
2367  // Build void __tgt_register_lib(__tgt_bin_desc *desc);
2368  QualType ParamTy =
2370  llvm::Type *TypeParams[] = {CGM.getTypes().ConvertTypeForMem(ParamTy)};
2371  auto *FnTy =
2372  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2373  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_register_lib");
2374  break;
2375  }
2377  // Build void __tgt_unregister_lib(__tgt_bin_desc *desc);
2378  QualType ParamTy =
2380  llvm::Type *TypeParams[] = {CGM.getTypes().ConvertTypeForMem(ParamTy)};
2381  auto *FnTy =
2382  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2383  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_unregister_lib");
2384  break;
2385  }
2387  // Build void __tgt_target_data_begin(int64_t device_id, int32_t arg_num,
2388  // void** args_base, void **args, int64_t *arg_sizes, int64_t *arg_types);
2389  llvm::Type *TypeParams[] = {CGM.Int64Ty,
2390  CGM.Int32Ty,
2391  CGM.VoidPtrPtrTy,
2392  CGM.VoidPtrPtrTy,
2393  CGM.Int64Ty->getPointerTo(),
2394  CGM.Int64Ty->getPointerTo()};
2395  auto *FnTy =
2396  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
2397  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_begin");
2398  break;
2399  }
2401  // Build void __tgt_target_data_begin_nowait(int64_t device_id, int32_t
2402  // arg_num, void** args_base, void **args, int64_t *arg_sizes, int64_t
2403  // *arg_types);
2404  llvm::Type *TypeParams[] = {CGM.Int64Ty,
2405  CGM.Int32Ty,
2406  CGM.VoidPtrPtrTy,
2407  CGM.VoidPtrPtrTy,
2408  CGM.Int64Ty->getPointerTo(),
2409  CGM.Int64Ty->getPointerTo()};
2410  auto *FnTy =
2411  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2412  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_begin_nowait");
2413  break;
2414  }
2416  // Build void __tgt_target_data_end(int64_t device_id, int32_t arg_num,
2417  // void** args_base, void **args, int64_t *arg_sizes, int64_t *arg_types);
2418  llvm::Type *TypeParams[] = {CGM.Int64Ty,
2419  CGM.Int32Ty,
2420  CGM.VoidPtrPtrTy,
2421  CGM.VoidPtrPtrTy,
2422  CGM.Int64Ty->getPointerTo(),
2423  CGM.Int64Ty->getPointerTo()};
2424  auto *FnTy =
2425  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
2426  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_end");
2427  break;
2428  }
2430  // Build void __tgt_target_data_end_nowait(int64_t device_id, int32_t
2431  // arg_num, void** args_base, void **args, int64_t *arg_sizes, int64_t
2432  // *arg_types);
2433  llvm::Type *TypeParams[] = {CGM.Int64Ty,
2434  CGM.Int32Ty,
2435  CGM.VoidPtrPtrTy,
2436  CGM.VoidPtrPtrTy,
2437  CGM.Int64Ty->getPointerTo(),
2438  CGM.Int64Ty->getPointerTo()};
2439  auto *FnTy =
2440  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2441  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_end_nowait");
2442  break;
2443  }
2445  // Build void __tgt_target_data_update(int64_t device_id, int32_t arg_num,
2446  // void** args_base, void **args, int64_t *arg_sizes, int64_t *arg_types);
2447  llvm::Type *TypeParams[] = {CGM.Int64Ty,
2448  CGM.Int32Ty,
2449  CGM.VoidPtrPtrTy,
2450  CGM.VoidPtrPtrTy,
2451  CGM.Int64Ty->getPointerTo(),
2452  CGM.Int64Ty->getPointerTo()};
2453  auto *FnTy =
2454  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
2455  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_update");
2456  break;
2457  }
2459  // Build void __tgt_target_data_update_nowait(int64_t device_id, int32_t
2460  // arg_num, void** args_base, void **args, int64_t *arg_sizes, int64_t
2461  // *arg_types);
2462  llvm::Type *TypeParams[] = {CGM.Int64Ty,
2463  CGM.Int32Ty,
2464  CGM.VoidPtrPtrTy,
2465  CGM.VoidPtrPtrTy,
2466  CGM.Int64Ty->getPointerTo(),
2467  CGM.Int64Ty->getPointerTo()};
2468  auto *FnTy =
2469  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2470  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_update_nowait");
2471  break;
2472  }
2474  // Build int64_t __tgt_mapper_num_components(void *rt_mapper_handle);
2475  llvm::Type *TypeParams[] = {CGM.VoidPtrTy};
2476  auto *FnTy =
2477  llvm::FunctionType::get(CGM.Int64Ty, TypeParams, /*isVarArg*/ false);
2478  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_mapper_num_components");
2479  break;
2480  }
2482  // Build void __tgt_push_mapper_component(void *rt_mapper_handle, void
2483  // *base, void *begin, int64_t size, int64_t type);
2484  llvm::Type *TypeParams[] = {CGM.VoidPtrTy, CGM.VoidPtrTy, CGM.VoidPtrTy,
2485  CGM.Int64Ty, CGM.Int64Ty};
2486  auto *FnTy =
2487  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
2488  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_push_mapper_component");
2489  break;
2490  }
2491  }
2492  assert(RTLFn && "Unable to find OpenMP runtime function");
2493  return RTLFn;
2494 }
2495 
2496 llvm::FunctionCallee
2497 CGOpenMPRuntime::createForStaticInitFunction(unsigned IVSize, bool IVSigned) {
2498  assert((IVSize == 32 || IVSize == 64) &&
2499  "IV size is not compatible with the omp runtime");
2500  StringRef Name = IVSize == 32 ? (IVSigned ? "__kmpc_for_static_init_4"
2501  : "__kmpc_for_static_init_4u")
2502  : (IVSigned ? "__kmpc_for_static_init_8"
2503  : "__kmpc_for_static_init_8u");
2504  llvm::Type *ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty;
2505  auto *PtrTy = llvm::PointerType::getUnqual(ITy);
2506  llvm::Type *TypeParams[] = {
2507  getIdentTyPointerTy(), // loc
2508  CGM.Int32Ty, // tid
2509  CGM.Int32Ty, // schedtype
2510  llvm::PointerType::getUnqual(CGM.Int32Ty), // p_lastiter
2511  PtrTy, // p_lower
2512  PtrTy, // p_upper
2513  PtrTy, // p_stride
2514  ITy, // incr
2515  ITy // chunk
2516  };
2517  auto *FnTy =
2518  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
2519  return CGM.CreateRuntimeFunction(FnTy, Name);
2520 }
2521 
2522 llvm::FunctionCallee
2523 CGOpenMPRuntime::createDispatchInitFunction(unsigned IVSize, bool IVSigned) {
2524  assert((IVSize == 32 || IVSize == 64) &&
2525  "IV size is not compatible with the omp runtime");
2526  StringRef Name =
2527  IVSize == 32
2528  ? (IVSigned ? "__kmpc_dispatch_init_4" : "__kmpc_dispatch_init_4u")
2529  : (IVSigned ? "__kmpc_dispatch_init_8" : "__kmpc_dispatch_init_8u");
2530  llvm::Type *ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty;
2531  llvm::Type *TypeParams[] = { getIdentTyPointerTy(), // loc
2532  CGM.Int32Ty, // tid
2533  CGM.Int32Ty, // schedtype
2534  ITy, // lower
2535  ITy, // upper
2536  ITy, // stride
2537  ITy // chunk
2538  };
2539  auto *FnTy =
2540  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
2541  return CGM.CreateRuntimeFunction(FnTy, Name);
2542 }
2543 
2544 llvm::FunctionCallee
2545 CGOpenMPRuntime::createDispatchFiniFunction(unsigned IVSize, bool IVSigned) {
2546  assert((IVSize == 32 || IVSize == 64) &&
2547  "IV size is not compatible with the omp runtime");
2548  StringRef Name =
2549  IVSize == 32
2550  ? (IVSigned ? "__kmpc_dispatch_fini_4" : "__kmpc_dispatch_fini_4u")
2551  : (IVSigned ? "__kmpc_dispatch_fini_8" : "__kmpc_dispatch_fini_8u");
2552  llvm::Type *TypeParams[] = {
2553  getIdentTyPointerTy(), // loc
2554  CGM.Int32Ty, // tid
2555  };
2556  auto *FnTy =
2557  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2558  return CGM.CreateRuntimeFunction(FnTy, Name);
2559 }
2560 
2561 llvm::FunctionCallee
2562 CGOpenMPRuntime::createDispatchNextFunction(unsigned IVSize, bool IVSigned) {
2563  assert((IVSize == 32 || IVSize == 64) &&
2564  "IV size is not compatible with the omp runtime");
2565  StringRef Name =
2566  IVSize == 32
2567  ? (IVSigned ? "__kmpc_dispatch_next_4" : "__kmpc_dispatch_next_4u")
2568  : (IVSigned ? "__kmpc_dispatch_next_8" : "__kmpc_dispatch_next_8u");
2569  llvm::Type *ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty;
2570  auto *PtrTy = llvm::PointerType::getUnqual(ITy);
2571  llvm::Type *TypeParams[] = {
2572  getIdentTyPointerTy(), // loc
2573  CGM.Int32Ty, // tid
2574  llvm::PointerType::getUnqual(CGM.Int32Ty), // p_lastiter
2575  PtrTy, // p_lower
2576  PtrTy, // p_upper
2577  PtrTy // p_stride
2578  };
2579  auto *FnTy =
2580  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2581  return CGM.CreateRuntimeFunction(FnTy, Name);
2582 }
2583 
2584 /// Obtain information that uniquely identifies a target entry. This
2585 /// consists of the file and device IDs as well as line number associated with
2586 /// the relevant entry source location.
2588  unsigned &DeviceID, unsigned &FileID,
2589  unsigned &LineNum) {
2591 
2592  // The loc should be always valid and have a file ID (the user cannot use
2593  // #pragma directives in macros)
2594 
2595  assert(Loc.isValid() && "Source location is expected to be always valid.");
2596 
2597  PresumedLoc PLoc = SM.getPresumedLoc(Loc);
2598  assert(PLoc.isValid() && "Source location is expected to be always valid.");
2599 
2600  llvm::sys::fs::UniqueID ID;
2601  if (auto EC = llvm::sys::fs::getUniqueID(PLoc.getFilename(), ID))
2602  SM.getDiagnostics().Report(diag::err_cannot_open_file)
2603  << PLoc.getFilename() << EC.message();
2604 
2605  DeviceID = ID.getDevice();
2606  FileID = ID.getFile();
2607  LineNum = PLoc.getLine();
2608 }
2609 
2611  if (CGM.getLangOpts().OpenMPSimd)
2612  return Address::invalid();
2614  OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
2615  if (Res && (*Res == OMPDeclareTargetDeclAttr::MT_Link ||
2616  (*Res == OMPDeclareTargetDeclAttr::MT_To &&
2618  SmallString<64> PtrName;
2619  {
2620  llvm::raw_svector_ostream OS(PtrName);
2621  OS << CGM.getMangledName(GlobalDecl(VD));
2622  if (!VD->isExternallyVisible()) {
2623  unsigned DeviceID, FileID, Line;
2625  VD->getCanonicalDecl()->getBeginLoc(),
2626  DeviceID, FileID, Line);
2627  OS << llvm::format("_%x", FileID);
2628  }
2629  OS << "_decl_tgt_ref_ptr";
2630  }
2631  llvm::Value *Ptr = CGM.getModule().getNamedValue(PtrName);
2632  if (!Ptr) {
2633  QualType PtrTy = CGM.getContext().getPointerType(VD->getType());
2635  PtrName);
2636 
2637  auto *GV = cast<llvm::GlobalVariable>(Ptr);
2638  GV->setLinkage(llvm::GlobalValue::WeakAnyLinkage);
2639 
2640  if (!CGM.getLangOpts().OpenMPIsDevice)
2641  GV->setInitializer(CGM.GetAddrOfGlobal(VD));
2642  registerTargetGlobalVariable(VD, cast<llvm::Constant>(Ptr));
2643  }
2644  return Address(Ptr, CGM.getContext().getDeclAlign(VD));
2645  }
2646  return Address::invalid();
2647 }
2648 
2649 llvm::Constant *
2651  assert(!CGM.getLangOpts().OpenMPUseTLS ||
2653  // Lookup the entry, lazily creating it if necessary.
2654  std::string Suffix = getName({"cache", ""});
2656  CGM.Int8PtrPtrTy, Twine(CGM.getMangledName(VD)).concat(Suffix));
2657 }
2658 
2660  const VarDecl *VD,
2661  Address VDAddr,
2662  SourceLocation Loc) {
2663  if (CGM.getLangOpts().OpenMPUseTLS &&
2665  return VDAddr;
2666 
2667  llvm::Type *VarTy = VDAddr.getElementType();
2668  llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
2669  CGF.Builder.CreatePointerCast(VDAddr.getPointer(),
2670  CGM.Int8PtrTy),
2673  return Address(CGF.EmitRuntimeCall(
2675  VDAddr.getAlignment());
2676 }
2677 
2679  CodeGenFunction &CGF, Address VDAddr, llvm::Value *Ctor,
2680  llvm::Value *CopyCtor, llvm::Value *Dtor, SourceLocation Loc) {
2681  // Call kmp_int32 __kmpc_global_thread_num(&loc) to init OpenMP runtime
2682  // library.
2683  llvm::Value *OMPLoc = emitUpdateLocation(CGF, Loc);
2685  OMPLoc);
2686  // Call __kmpc_threadprivate_register(&loc, &var, ctor, cctor/*NULL*/, dtor)
2687  // to register constructor/destructor for variable.
2688  llvm::Value *Args[] = {
2689  OMPLoc, CGF.Builder.CreatePointerCast(VDAddr.getPointer(), CGM.VoidPtrTy),
2690  Ctor, CopyCtor, Dtor};
2691  CGF.EmitRuntimeCall(
2693 }
2694 
2696  const VarDecl *VD, Address VDAddr, SourceLocation Loc,
2697  bool PerformInit, CodeGenFunction *CGF) {
2698  if (CGM.getLangOpts().OpenMPUseTLS &&
2700  return nullptr;
2701 
2702  VD = VD->getDefinition(CGM.getContext());
2703  if (VD && ThreadPrivateWithDefinition.insert(CGM.getMangledName(VD)).second) {
2704  QualType ASTTy = VD->getType();
2705 
2706  llvm::Value *Ctor = nullptr, *CopyCtor = nullptr, *Dtor = nullptr;
2707  const Expr *Init = VD->getAnyInitializer();
2708  if (CGM.getLangOpts().CPlusPlus && PerformInit) {
2709  // Generate function that re-emits the declaration's initializer into the
2710  // threadprivate copy of the variable VD
2711  CodeGenFunction CtorCGF(CGM);
2712  FunctionArgList Args;
2713  ImplicitParamDecl Dst(CGM.getContext(), /*DC=*/nullptr, Loc,
2714  /*Id=*/nullptr, CGM.getContext().VoidPtrTy,
2716  Args.push_back(&Dst);
2717 
2718  const auto &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
2719  CGM.getContext().VoidPtrTy, Args);
2720  llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
2721  std::string Name = getName({"__kmpc_global_ctor_", ""});
2722  llvm::Function *Fn =
2723  CGM.CreateGlobalInitOrDestructFunction(FTy, Name, FI, Loc);
2724  CtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidPtrTy, Fn, FI,
2725  Args, Loc, Loc);
2726  llvm::Value *ArgVal = CtorCGF.EmitLoadOfScalar(
2727  CtorCGF.GetAddrOfLocalVar(&Dst), /*Volatile=*/false,
2728  CGM.getContext().VoidPtrTy, Dst.getLocation());
2729  Address Arg = Address(ArgVal, VDAddr.getAlignment());
2730  Arg = CtorCGF.Builder.CreateElementBitCast(
2731  Arg, CtorCGF.ConvertTypeForMem(ASTTy));
2732  CtorCGF.EmitAnyExprToMem(Init, Arg, Init->getType().getQualifiers(),
2733  /*IsInitializer=*/true);
2734  ArgVal = CtorCGF.EmitLoadOfScalar(
2735  CtorCGF.GetAddrOfLocalVar(&Dst), /*Volatile=*/false,
2736  CGM.getContext().VoidPtrTy, Dst.getLocation());
2737  CtorCGF.Builder.CreateStore(ArgVal, CtorCGF.ReturnValue);
2738  CtorCGF.FinishFunction();
2739  Ctor = Fn;
2740  }
2741  if (VD->getType().isDestructedType() != QualType::DK_none) {
2742  // Generate function that emits destructor call for the threadprivate copy
2743  // of the variable VD
2744  CodeGenFunction DtorCGF(CGM);
2745  FunctionArgList Args;
2746  ImplicitParamDecl Dst(CGM.getContext(), /*DC=*/nullptr, Loc,
2747  /*Id=*/nullptr, CGM.getContext().VoidPtrTy,
2749  Args.push_back(&Dst);
2750 
2751  const auto &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
2752  CGM.getContext().VoidTy, Args);
2753  llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
2754  std::string Name = getName({"__kmpc_global_dtor_", ""});
2755  llvm::Function *Fn =
2756  CGM.CreateGlobalInitOrDestructFunction(FTy, Name, FI, Loc);
2757  auto NL = ApplyDebugLocation::CreateEmpty(DtorCGF);
2758  DtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, Fn, FI, Args,
2759  Loc, Loc);
2760  // Create a scope with an artificial location for the body of this function.
2761  auto AL = ApplyDebugLocation::CreateArtificial(DtorCGF);
2762  llvm::Value *ArgVal = DtorCGF.EmitLoadOfScalar(
2763  DtorCGF.GetAddrOfLocalVar(&Dst),
2764  /*Volatile=*/false, CGM.getContext().VoidPtrTy, Dst.getLocation());
2765  DtorCGF.emitDestroy(Address(ArgVal, VDAddr.getAlignment()), ASTTy,
2766  DtorCGF.getDestroyer(ASTTy.isDestructedType()),
2767  DtorCGF.needsEHCleanup(ASTTy.isDestructedType()));
2768  DtorCGF.FinishFunction();
2769  Dtor = Fn;
2770  }
2771  // Do not emit init function if it is not required.
2772  if (!Ctor && !Dtor)
2773  return nullptr;
2774 
2775  llvm::Type *CopyCtorTyArgs[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
2776  auto *CopyCtorTy = llvm::FunctionType::get(CGM.VoidPtrTy, CopyCtorTyArgs,
2777  /*isVarArg=*/false)
2778  ->getPointerTo();
2779  // Copying constructor for the threadprivate variable.
2780  // Must be NULL - reserved by runtime, but currently it requires that this
2781  // parameter is always NULL. Otherwise it fires assertion.
2782  CopyCtor = llvm::Constant::getNullValue(CopyCtorTy);
2783  if (Ctor == nullptr) {
2784  auto *CtorTy = llvm::FunctionType::get(CGM.VoidPtrTy, CGM.VoidPtrTy,
2785  /*isVarArg=*/false)
2786  ->getPointerTo();
2787  Ctor = llvm::Constant::getNullValue(CtorTy);
2788  }
2789  if (Dtor == nullptr) {
2790  auto *DtorTy = llvm::FunctionType::get(CGM.VoidTy, CGM.VoidPtrTy,
2791  /*isVarArg=*/false)
2792  ->getPointerTo();
2793  Dtor = llvm::Constant::getNullValue(DtorTy);
2794  }
2795  if (!CGF) {
2796  auto *InitFunctionTy =
2797  llvm::FunctionType::get(CGM.VoidTy, /*isVarArg*/ false);
2798  std::string Name = getName({"__omp_threadprivate_init_", ""});
2799  llvm::Function *InitFunction = CGM.CreateGlobalInitOrDestructFunction(
2800  InitFunctionTy, Name, CGM.getTypes().arrangeNullaryFunction());
2801  CodeGenFunction InitCGF(CGM);
2802  FunctionArgList ArgList;
2803  InitCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, InitFunction,
2804  CGM.getTypes().arrangeNullaryFunction(), ArgList,
2805  Loc, Loc);
2806  emitThreadPrivateVarInit(InitCGF, VDAddr, Ctor, CopyCtor, Dtor, Loc);
2807  InitCGF.FinishFunction();
2808  return InitFunction;
2809  }
2810  emitThreadPrivateVarInit(*CGF, VDAddr, Ctor, CopyCtor, Dtor, Loc);
2811  }
2812  return nullptr;
2813 }
2814 
2816  llvm::GlobalVariable *Addr,
2817  bool PerformInit) {
2819  OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
2820  if (!Res || *Res == OMPDeclareTargetDeclAttr::MT_Link ||
2821  (*Res == OMPDeclareTargetDeclAttr::MT_To &&
2823  return CGM.getLangOpts().OpenMPIsDevice;
2824  VD = VD->getDefinition(CGM.getContext());
2825  if (VD && !DeclareTargetWithDefinition.insert(CGM.getMangledName(VD)).second)
2826  return CGM.getLangOpts().OpenMPIsDevice;
2827 
2828  QualType ASTTy = VD->getType();
2829 
2831  // Produce the unique prefix to identify the new target regions. We use
2832  // the source location of the variable declaration which we know to not
2833  // conflict with any target region.
2834  unsigned DeviceID;
2835  unsigned FileID;
2836  unsigned Line;
2837  getTargetEntryUniqueInfo(CGM.getContext(), Loc, DeviceID, FileID, Line);
2838  SmallString<128> Buffer, Out;
2839  {
2840  llvm::raw_svector_ostream OS(Buffer);
2841  OS << "__omp_offloading_" << llvm::format("_%x", DeviceID)
2842  << llvm::format("_%x_", FileID) << VD->getName() << "_l" << Line;
2843  }
2844 
2845  const Expr *Init = VD->getAnyInitializer();
2846  if (CGM.getLangOpts().CPlusPlus && PerformInit) {
2847  llvm::Constant *Ctor;
2848  llvm::Constant *ID;
2849  if (CGM.getLangOpts().OpenMPIsDevice) {
2850  // Generate function that re-emits the declaration's initializer into
2851  // the threadprivate copy of the variable VD
2852  CodeGenFunction CtorCGF(CGM);
2853 
2855  llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
2856  llvm::Function *Fn = CGM.CreateGlobalInitOrDestructFunction(
2857  FTy, Twine(Buffer, "_ctor"), FI, Loc);
2858  auto NL = ApplyDebugLocation::CreateEmpty(CtorCGF);
2859  CtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, Fn, FI,
2860  FunctionArgList(), Loc, Loc);
2861  auto AL = ApplyDebugLocation::CreateArtificial(CtorCGF);
2862  CtorCGF.EmitAnyExprToMem(Init,
2863  Address(Addr, CGM.getContext().getDeclAlign(VD)),
2864  Init->getType().getQualifiers(),
2865  /*IsInitializer=*/true);
2866  CtorCGF.FinishFunction();
2867  Ctor = Fn;
2868  ID = llvm::ConstantExpr::getBitCast(Fn, CGM.Int8PtrTy);
2869  CGM.addUsedGlobal(cast<llvm::GlobalValue>(Ctor));
2870  } else {
2871  Ctor = new llvm::GlobalVariable(
2872  CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
2873  llvm::GlobalValue::PrivateLinkage,
2874  llvm::Constant::getNullValue(CGM.Int8Ty), Twine(Buffer, "_ctor"));
2875  ID = Ctor;
2876  }
2877 
2878  // Register the information for the entry associated with the constructor.
2879  Out.clear();
2881  DeviceID, FileID, Twine(Buffer, "_ctor").toStringRef(Out), Line, Ctor,
2883  }
2884  if (VD->getType().isDestructedType() != QualType::DK_none) {
2885  llvm::Constant *Dtor;
2886  llvm::Constant *ID;
2887  if (CGM.getLangOpts().OpenMPIsDevice) {
2888  // Generate function that emits destructor call for the threadprivate
2889  // copy of the variable VD
2890  CodeGenFunction DtorCGF(CGM);
2891 
2893  llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
2894  llvm::Function *Fn = CGM.CreateGlobalInitOrDestructFunction(
2895  FTy, Twine(Buffer, "_dtor"), FI, Loc);
2896  auto NL = ApplyDebugLocation::CreateEmpty(DtorCGF);
2897  DtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, Fn, FI,
2898  FunctionArgList(), Loc, Loc);
2899  // Create a scope with an artificial location for the body of this
2900  // function.
2901  auto AL = ApplyDebugLocation::CreateArtificial(DtorCGF);
2902  DtorCGF.emitDestroy(Address(Addr, CGM.getContext().getDeclAlign(VD)),
2903  ASTTy, DtorCGF.getDestroyer(ASTTy.isDestructedType()),
2904  DtorCGF.needsEHCleanup(ASTTy.isDestructedType()));
2905  DtorCGF.FinishFunction();
2906  Dtor = Fn;
2907  ID = llvm::ConstantExpr::getBitCast(Fn, CGM.Int8PtrTy);
2908  CGM.addUsedGlobal(cast<llvm::GlobalValue>(Dtor));
2909  } else {
2910  Dtor = new llvm::GlobalVariable(
2911  CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
2912  llvm::GlobalValue::PrivateLinkage,
2913  llvm::Constant::getNullValue(CGM.Int8Ty), Twine(Buffer, "_dtor"));
2914  ID = Dtor;
2915  }
2916  // Register the information for the entry associated with the destructor.
2917  Out.clear();
2919  DeviceID, FileID, Twine(Buffer, "_dtor").toStringRef(Out), Line, Dtor,
2921  }
2922  return CGM.getLangOpts().OpenMPIsDevice;
2923 }
2924 
2926  QualType VarType,
2927  StringRef Name) {
2928  std::string Suffix = getName({"artificial", ""});
2929  std::string CacheSuffix = getName({"cache", ""});
2930  llvm::Type *VarLVType = CGF.ConvertTypeForMem(VarType);
2931  llvm::Value *GAddr =
2932  getOrCreateInternalVariable(VarLVType, Twine(Name).concat(Suffix));
2933  llvm::Value *Args[] = {
2935  getThreadID(CGF, SourceLocation()),
2937  CGF.Builder.CreateIntCast(CGF.getTypeSize(VarType), CGM.SizeTy,
2938  /*isSigned=*/false),
2940  CGM.VoidPtrPtrTy, Twine(Name).concat(Suffix).concat(CacheSuffix))};
2941  return Address(
2943  CGF.EmitRuntimeCall(
2945  VarLVType->getPointerTo(/*AddrSpace=*/0)),
2946  CGM.getPointerAlign());
2947 }
2948 
2950  const RegionCodeGenTy &ThenGen,
2951  const RegionCodeGenTy &ElseGen) {
2952  CodeGenFunction::LexicalScope ConditionScope(CGF, Cond->getSourceRange());
2953 
2954  // If the condition constant folds and can be elided, try to avoid emitting
2955  // the condition and the dead arm of the if/else.
2956  bool CondConstant;
2957  if (CGF.ConstantFoldsToSimpleInteger(Cond, CondConstant)) {
2958  if (CondConstant)
2959  ThenGen(CGF);
2960  else
2961  ElseGen(CGF);
2962  return;
2963  }
2964 
2965  // Otherwise, the condition did not fold, or we couldn't elide it. Just
2966  // emit the conditional branch.
2967  llvm::BasicBlock *ThenBlock = CGF.createBasicBlock("omp_if.then");
2968  llvm::BasicBlock *ElseBlock = CGF.createBasicBlock("omp_if.else");
2969  llvm::BasicBlock *ContBlock = CGF.createBasicBlock("omp_if.end");
2970  CGF.EmitBranchOnBoolExpr(Cond, ThenBlock, ElseBlock, /*TrueCount=*/0);
2971 
2972  // Emit the 'then' code.
2973  CGF.EmitBlock(ThenBlock);
2974  ThenGen(CGF);
2975  CGF.EmitBranch(ContBlock);
2976  // Emit the 'else' code if present.
2977  // There is no need to emit line number for unconditional branch.
2979  CGF.EmitBlock(ElseBlock);
2980  ElseGen(CGF);
2981  // There is no need to emit line number for unconditional branch.
2983  CGF.EmitBranch(ContBlock);
2984  // Emit the continuation block for code after the if.
2985  CGF.EmitBlock(ContBlock, /*IsFinished=*/true);
2986 }
2987 
2989  llvm::Function *OutlinedFn,
2990  ArrayRef<llvm::Value *> CapturedVars,
2991  const Expr *IfCond) {
2992  if (!CGF.HaveInsertPoint())
2993  return;
2994  llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
2995  auto &&ThenGen = [OutlinedFn, CapturedVars, RTLoc](CodeGenFunction &CGF,
2996  PrePostActionTy &) {
2997  // Build call __kmpc_fork_call(loc, n, microtask, var1, .., varn);
2998  CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
2999  llvm::Value *Args[] = {
3000  RTLoc,
3001  CGF.Builder.getInt32(CapturedVars.size()), // Number of captured vars
3002  CGF.Builder.CreateBitCast(OutlinedFn, RT.getKmpc_MicroPointerTy())};
3004  RealArgs.append(std::begin(Args), std::end(Args));
3005  RealArgs.append(CapturedVars.begin(), CapturedVars.end());
3006 
3007  llvm::FunctionCallee RTLFn =
3008  RT.createRuntimeFunction(OMPRTL__kmpc_fork_call);
3009  CGF.EmitRuntimeCall(RTLFn, RealArgs);
3010  };
3011  auto &&ElseGen = [OutlinedFn, CapturedVars, RTLoc, Loc](CodeGenFunction &CGF,
3012  PrePostActionTy &) {
3013  CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
3014  llvm::Value *ThreadID = RT.getThreadID(CGF, Loc);
3015  // Build calls:
3016  // __kmpc_serialized_parallel(&Loc, GTid);
3017  llvm::Value *Args[] = {RTLoc, ThreadID};
3018  CGF.EmitRuntimeCall(
3019  RT.createRuntimeFunction(OMPRTL__kmpc_serialized_parallel), Args);
3020 
3021  // OutlinedFn(&GTid, &zero, CapturedStruct);
3022  Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
3023  /*Name*/ ".zero.addr");
3024  CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
3025  llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
3026  // ThreadId for serialized parallels is 0.
3027  OutlinedFnArgs.push_back(ZeroAddr.getPointer());
3028  OutlinedFnArgs.push_back(ZeroAddr.getPointer());
3029  OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
3030  RT.emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs);
3031 
3032  // __kmpc_end_serialized_parallel(&Loc, GTid);
3033  llvm::Value *EndArgs[] = {RT.emitUpdateLocation(CGF, Loc), ThreadID};
3034  CGF.EmitRuntimeCall(
3035  RT.createRuntimeFunction(OMPRTL__kmpc_end_serialized_parallel),
3036  EndArgs);
3037  };
3038  if (IfCond) {
3039  emitOMPIfClause(CGF, IfCond, ThenGen, ElseGen);
3040  } else {
3041  RegionCodeGenTy ThenRCG(ThenGen);
3042  ThenRCG(CGF);
3043  }
3044 }
3045 
3046 // If we're inside an (outlined) parallel region, use the region info's
3047 // thread-ID variable (it is passed in a first argument of the outlined function
3048 // as "kmp_int32 *gtid"). Otherwise, if we're not inside parallel region, but in
3049 // regular serial code region, get thread ID by calling kmp_int32
3050 // kmpc_global_thread_num(ident_t *loc), stash this thread ID in a temporary and
3051 // return the address of that temp.
3053  SourceLocation Loc) {
3054  if (auto *OMPRegionInfo =
3055  dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
3056  if (OMPRegionInfo->getThreadIDVariable())
3057  return OMPRegionInfo->getThreadIDVariableLValue(CGF).getAddress();
3058 
3059  llvm::Value *ThreadID = getThreadID(CGF, Loc);
3060  QualType Int32Ty =
3061  CGF.getContext().getIntTypeForBitwidth(/*DestWidth*/ 32, /*Signed*/ true);
3062  Address ThreadIDTemp = CGF.CreateMemTemp(Int32Ty, /*Name*/ ".threadid_temp.");
3063  CGF.EmitStoreOfScalar(ThreadID,
3064  CGF.MakeAddrLValue(ThreadIDTemp, Int32Ty));
3065 
3066  return ThreadIDTemp;
3067 }
3068 
3070  llvm::Type *Ty, const llvm::Twine &Name, unsigned AddressSpace) {
3071  SmallString<256> Buffer;
3072  llvm::raw_svector_ostream Out(Buffer);
3073  Out << Name;
3074  StringRef RuntimeName = Out.str();
3075  auto &Elem = *InternalVars.try_emplace(RuntimeName, nullptr).first;
3076  if (Elem.second) {
3077  assert(Elem.second->getType()->getPointerElementType() == Ty &&
3078  "OMP internal variable has different type than requested");
3079  return &*Elem.second;
3080  }
3081 
3082  return Elem.second = new llvm::GlobalVariable(
3083  CGM.getModule(), Ty, /*IsConstant*/ false,
3084  llvm::GlobalValue::CommonLinkage, llvm::Constant::getNullValue(Ty),
3085  Elem.first(), /*InsertBefore=*/nullptr,
3086  llvm::GlobalValue::NotThreadLocal, AddressSpace);
3087 }
3088 
3090  std::string Prefix = Twine("gomp_critical_user_", CriticalName).str();
3091  std::string Name = getName({Prefix, "var"});
3092  return getOrCreateInternalVariable(KmpCriticalNameTy, Name);
3093 }
3094 
3095 namespace {
3096 /// Common pre(post)-action for different OpenMP constructs.
3097 class CommonActionTy final : public PrePostActionTy {
3098  llvm::FunctionCallee EnterCallee;
3099  ArrayRef<llvm::Value *> EnterArgs;
3100  llvm::FunctionCallee ExitCallee;
3101  ArrayRef<llvm::Value *> ExitArgs;
3102  bool Conditional;
3103  llvm::BasicBlock *ContBlock = nullptr;
3104 
3105 public:
3106  CommonActionTy(llvm::FunctionCallee EnterCallee,
3107  ArrayRef<llvm::Value *> EnterArgs,
3108  llvm::FunctionCallee ExitCallee,
3109  ArrayRef<llvm::Value *> ExitArgs, bool Conditional = false)
3110  : EnterCallee(EnterCallee), EnterArgs(EnterArgs), ExitCallee(ExitCallee),
3111  ExitArgs(ExitArgs), Conditional(Conditional) {}
3112  void Enter(CodeGenFunction &CGF) override {
3113  llvm::Value *EnterRes = CGF.EmitRuntimeCall(EnterCallee, EnterArgs);
3114  if (Conditional) {
3115  llvm::Value *CallBool = CGF.Builder.CreateIsNotNull(EnterRes);
3116  auto *ThenBlock = CGF.createBasicBlock("omp_if.then");
3117  ContBlock = CGF.createBasicBlock("omp_if.end");
3118  // Generate the branch (If-stmt)
3119  CGF.Builder.CreateCondBr(CallBool, ThenBlock, ContBlock);
3120  CGF.EmitBlock(ThenBlock);
3121  }
3122  }
3123  void Done(CodeGenFunction &CGF) {
3124  // Emit the rest of blocks/branches
3125  CGF.EmitBranch(ContBlock);
3126  CGF.EmitBlock(ContBlock, true);
3127  }
3128  void Exit(CodeGenFunction &CGF) override {
3129  CGF.EmitRuntimeCall(ExitCallee, ExitArgs);
3130  }
3131 };
3132 } // anonymous namespace
3133 
3135  StringRef CriticalName,
3136  const RegionCodeGenTy &CriticalOpGen,
3137  SourceLocation Loc, const Expr *Hint) {
3138  // __kmpc_critical[_with_hint](ident_t *, gtid, Lock[, hint]);
3139  // CriticalOpGen();
3140  // __kmpc_end_critical(ident_t *, gtid, Lock);
3141  // Prepare arguments and build a call to __kmpc_critical
3142  if (!CGF.HaveInsertPoint())
3143  return;
3144  llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
3145  getCriticalRegionLock(CriticalName)};
3146  llvm::SmallVector<llvm::Value *, 4> EnterArgs(std::begin(Args),
3147  std::end(Args));
3148  if (Hint) {
3149  EnterArgs.push_back(CGF.Builder.CreateIntCast(
3150  CGF.EmitScalarExpr(Hint), CGM.IntPtrTy, /*isSigned=*/false));
3151  }
3152  CommonActionTy Action(
3156  CriticalOpGen.setAction(Action);
3157  emitInlinedDirective(CGF, OMPD_critical, CriticalOpGen);
3158 }
3159 
3161  const RegionCodeGenTy &MasterOpGen,
3162  SourceLocation Loc) {
3163  if (!CGF.HaveInsertPoint())
3164  return;
3165  // if(__kmpc_master(ident_t *, gtid)) {
3166  // MasterOpGen();
3167  // __kmpc_end_master(ident_t *, gtid);
3168  // }
3169  // Prepare arguments and build a call to __kmpc_master
3170  llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
3171  CommonActionTy Action(createRuntimeFunction(OMPRTL__kmpc_master), Args,
3173  /*Conditional=*/true);
3174  MasterOpGen.setAction(Action);
3175  emitInlinedDirective(CGF, OMPD_master, MasterOpGen);
3176  Action.Done(CGF);
3177 }
3178 
3180  SourceLocation Loc) {
3181  if (!CGF.HaveInsertPoint())
3182  return;
3183  // Build call __kmpc_omp_taskyield(loc, thread_id, 0);
3184  llvm::Value *Args[] = {
3185  emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
3186  llvm::ConstantInt::get(CGM.IntTy, /*V=*/0, /*isSigned=*/true)};
3188  if (auto *Region = dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
3189  Region->emitUntiedSwitch(CGF);
3190 }
3191 
3193  const RegionCodeGenTy &TaskgroupOpGen,
3194  SourceLocation Loc) {
3195  if (!CGF.HaveInsertPoint())
3196  return;
3197  // __kmpc_taskgroup(ident_t *, gtid);
3198  // TaskgroupOpGen();
3199  // __kmpc_end_taskgroup(ident_t *, gtid);
3200  // Prepare arguments and build a call to __kmpc_taskgroup
3201  llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
3202  CommonActionTy Action(createRuntimeFunction(OMPRTL__kmpc_taskgroup), Args,
3204  Args);
3205  TaskgroupOpGen.setAction(Action);
3206  emitInlinedDirective(CGF, OMPD_taskgroup, TaskgroupOpGen);
3207 }
3208 
3209 /// Given an array of pointers to variables, project the address of a
3210 /// given variable.
3212  unsigned Index, const VarDecl *Var) {
3213  // Pull out the pointer to the variable.
3214  Address PtrAddr = CGF.Builder.CreateConstArrayGEP(Array, Index);
3215  llvm::Value *Ptr = CGF.Builder.CreateLoad(PtrAddr);
3216 
3217  Address Addr = Address(Ptr, CGF.getContext().getDeclAlign(Var));
3218  Addr = CGF.Builder.CreateElementBitCast(
3219  Addr, CGF.ConvertTypeForMem(Var->getType()));
3220  return Addr;
3221 }
3222 
3224  CodeGenModule &CGM, llvm::Type *ArgsType,
3225  ArrayRef<const Expr *> CopyprivateVars, ArrayRef<const Expr *> DestExprs,
3226  ArrayRef<const Expr *> SrcExprs, ArrayRef<const Expr *> AssignmentOps,
3227  SourceLocation Loc) {
3228  ASTContext &C = CGM.getContext();
3229  // void copy_func(void *LHSArg, void *RHSArg);
3230  FunctionArgList Args;
3231  ImplicitParamDecl LHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
3233  ImplicitParamDecl RHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
3235  Args.push_back(&LHSArg);
3236  Args.push_back(&RHSArg);
3237  const auto &CGFI =
3239  std::string Name =
3240  CGM.getOpenMPRuntime().getName({"omp", "copyprivate", "copy_func"});
3241  auto *Fn = llvm::Function::Create(CGM.getTypes().GetFunctionType(CGFI),
3243  &CGM.getModule());
3244  CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
3245  Fn->setDoesNotRecurse();
3246  CodeGenFunction CGF(CGM);
3247  CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
3248  // Dest = (void*[n])(LHSArg);
3249  // Src = (void*[n])(RHSArg);
3251  CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&LHSArg)),
3252  ArgsType), CGF.getPointerAlign());
3254  CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&RHSArg)),
3255  ArgsType), CGF.getPointerAlign());
3256  // *(Type0*)Dst[0] = *(Type0*)Src[0];
3257  // *(Type1*)Dst[1] = *(Type1*)Src[1];
3258  // ...
3259  // *(Typen*)Dst[n] = *(Typen*)Src[n];
3260  for (unsigned I = 0, E = AssignmentOps.size(); I < E; ++I) {
3261  const auto *DestVar =
3262  cast<VarDecl>(cast<DeclRefExpr>(DestExprs[I])->getDecl());
3263  Address DestAddr = emitAddrOfVarFromArray(CGF, LHS, I, DestVar);
3264 
3265  const auto *SrcVar =
3266  cast<VarDecl>(cast<DeclRefExpr>(SrcExprs[I])->getDecl());
3267  Address SrcAddr = emitAddrOfVarFromArray(CGF, RHS, I, SrcVar);
3268 
3269  const auto *VD = cast<DeclRefExpr>(CopyprivateVars[I])->getDecl();
3270  QualType Type = VD->getType();
3271  CGF.EmitOMPCopy(Type, DestAddr, SrcAddr, DestVar, SrcVar, AssignmentOps[I]);
3272  }
3273  CGF.FinishFunction();
3274  return Fn;
3275 }
3276 
3278  const RegionCodeGenTy &SingleOpGen,
3279  SourceLocation Loc,
3280  ArrayRef<const Expr *> CopyprivateVars,
3281  ArrayRef<const Expr *> SrcExprs,
3282  ArrayRef<const Expr *> DstExprs,
3283  ArrayRef<const Expr *> AssignmentOps) {
3284  if (!CGF.HaveInsertPoint())
3285  return;
3286  assert(CopyprivateVars.size() == SrcExprs.size() &&
3287  CopyprivateVars.size() == DstExprs.size() &&
3288  CopyprivateVars.size() == AssignmentOps.size());
3289  ASTContext &C = CGM.getContext();
3290  // int32 did_it = 0;
3291  // if(__kmpc_single(ident_t *, gtid)) {
3292  // SingleOpGen();
3293  // __kmpc_end_single(ident_t *, gtid);
3294  // did_it = 1;
3295  // }
3296  // call __kmpc_copyprivate(ident_t *, gtid, <buf_size>, <copyprivate list>,
3297  // <copy_func>, did_it);
3298 
3299  Address DidIt = Address::invalid();
3300  if (!CopyprivateVars.empty()) {
3301  // int32 did_it = 0;
3302  QualType KmpInt32Ty =
3303  C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
3304  DidIt = CGF.CreateMemTemp(KmpInt32Ty, ".omp.copyprivate.did_it");
3305  CGF.Builder.CreateStore(CGF.Builder.getInt32(0), DidIt);
3306  }
3307  // Prepare arguments and build a call to __kmpc_single
3308  llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
3309  CommonActionTy Action(createRuntimeFunction(OMPRTL__kmpc_single), Args,
3311  /*Conditional=*/true);
3312  SingleOpGen.setAction(Action);
3313  emitInlinedDirective(CGF, OMPD_single, SingleOpGen);
3314  if (DidIt.isValid()) {
3315  // did_it = 1;
3316  CGF.Builder.CreateStore(CGF.Builder.getInt32(1), DidIt);
3317  }
3318  Action.Done(CGF);
3319  // call __kmpc_copyprivate(ident_t *, gtid, <buf_size>, <copyprivate list>,
3320  // <copy_func>, did_it);
3321  if (DidIt.isValid()) {
3322  llvm::APInt ArraySize(/*unsigned int numBits=*/32, CopyprivateVars.size());
3323  QualType CopyprivateArrayTy =
3324  C.getConstantArrayType(C.VoidPtrTy, ArraySize, ArrayType::Normal,
3325  /*IndexTypeQuals=*/0);
3326  // Create a list of all private variables for copyprivate.
3327  Address CopyprivateList =
3328  CGF.CreateMemTemp(CopyprivateArrayTy, ".omp.copyprivate.cpr_list");
3329  for (unsigned I = 0, E = CopyprivateVars.size(); I < E; ++I) {
3330  Address Elem = CGF.Builder.CreateConstArrayGEP(CopyprivateList, I);
3331  CGF.Builder.CreateStore(
3333  CGF.EmitLValue(CopyprivateVars[I]).getPointer(), CGF.VoidPtrTy),
3334  Elem);
3335  }
3336  // Build function that copies private values from single region to all other
3337  // threads in the corresponding parallel region.
3339  CGM, CGF.ConvertTypeForMem(CopyprivateArrayTy)->getPointerTo(),
3340  CopyprivateVars, SrcExprs, DstExprs, AssignmentOps, Loc);
3341  llvm::Value *BufSize = CGF.getTypeSize(CopyprivateArrayTy);
3342  Address CL =
3343  CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(CopyprivateList,
3344  CGF.VoidPtrTy);
3345  llvm::Value *DidItVal = CGF.Builder.CreateLoad(DidIt);
3346  llvm::Value *Args[] = {
3347  emitUpdateLocation(CGF, Loc), // ident_t *<loc>
3348  getThreadID(CGF, Loc), // i32 <gtid>
3349  BufSize, // size_t <buf_size>
3350  CL.getPointer(), // void *<copyprivate list>
3351  CpyFn, // void (*) (void *, void *) <copy_func>
3352  DidItVal // i32 did_it
3353  };
3355  }
3356 }
3357 
3359  const RegionCodeGenTy &OrderedOpGen,
3360  SourceLocation Loc, bool IsThreads) {
3361  if (!CGF.HaveInsertPoint())
3362  return;
3363  // __kmpc_ordered(ident_t *, gtid);
3364  // OrderedOpGen();
3365  // __kmpc_end_ordered(ident_t *, gtid);
3366  // Prepare arguments and build a call to __kmpc_ordered
3367  if (IsThreads) {
3368  llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
3369  CommonActionTy Action(createRuntimeFunction(OMPRTL__kmpc_ordered), Args,
3371  Args);
3372  OrderedOpGen.setAction(Action);
3373  emitInlinedDirective(CGF, OMPD_ordered, OrderedOpGen);
3374  return;
3375  }
3376  emitInlinedDirective(CGF, OMPD_ordered, OrderedOpGen);
3377 }
3378 
3380  unsigned Flags;
3381  if (Kind == OMPD_for)
3382  Flags = OMP_IDENT_BARRIER_IMPL_FOR;
3383  else if (Kind == OMPD_sections)
3384  Flags = OMP_IDENT_BARRIER_IMPL_SECTIONS;
3385  else if (Kind == OMPD_single)
3386  Flags = OMP_IDENT_BARRIER_IMPL_SINGLE;
3387  else if (Kind == OMPD_barrier)
3388  Flags = OMP_IDENT_BARRIER_EXPL;
3389  else
3390  Flags = OMP_IDENT_BARRIER_IMPL;
3391  return Flags;
3392 }
3393 
3395  CodeGenFunction &CGF, const OMPLoopDirective &S,
3396  OpenMPScheduleClauseKind &ScheduleKind, const Expr *&ChunkExpr) const {
3397  // Check if the loop directive is actually a doacross loop directive. In this
3398  // case choose static, 1 schedule.
3399  if (llvm::any_of(
3401  [](const OMPOrderedClause *C) { return C->getNumForLoops(); })) {
3402  ScheduleKind = OMPC_SCHEDULE_static;
3403  // Chunk size is 1 in this case.
3404  llvm::APInt ChunkSize(32, 1);
3405  ChunkExpr = IntegerLiteral::Create(
3406  CGF.getContext(), ChunkSize,
3407  CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/0),
3408  SourceLocation());
3409  }
3410 }
3411 
3413  OpenMPDirectiveKind Kind, bool EmitChecks,
3414  bool ForceSimpleCall) {
3415  if (!CGF.HaveInsertPoint())
3416  return;
3417  // Build call __kmpc_cancel_barrier(loc, thread_id);
3418  // Build call __kmpc_barrier(loc, thread_id);
3419  unsigned Flags = getDefaultFlagsForBarriers(Kind);
3420  // Build call __kmpc_cancel_barrier(loc, thread_id) or __kmpc_barrier(loc,
3421  // thread_id);
3422  llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc, Flags),
3423  getThreadID(CGF, Loc)};
3424  if (auto *OMPRegionInfo =
3425  dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
3426  if (!ForceSimpleCall && OMPRegionInfo->hasCancel()) {
3427  llvm::Value *Result = CGF.EmitRuntimeCall(
3429  if (EmitChecks) {
3430  // if (__kmpc_cancel_barrier()) {
3431  // exit from construct;
3432  // }
3433  llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".cancel.exit");
3434  llvm::BasicBlock *ContBB = CGF.createBasicBlock(".cancel.continue");
3435  llvm::Value *Cmp = CGF.Builder.CreateIsNotNull(Result);
3436  CGF.Builder.CreateCondBr(Cmp, ExitBB, ContBB);
3437  CGF.EmitBlock(ExitBB);
3438  // exit from construct;
3439  CodeGenFunction::JumpDest CancelDestination =
3440  CGF.getOMPCancelDestination(OMPRegionInfo->getDirectiveKind());
3441  CGF.EmitBranchThroughCleanup(CancelDestination);
3442  CGF.EmitBlock(ContBB, /*IsFinished=*/true);
3443  }
3444  return;
3445  }
3446  }
3448 }
3449 
3450 /// Map the OpenMP loop schedule to the runtime enumeration.
3452  bool Chunked, bool Ordered) {
3453  switch (ScheduleKind) {
3454  case OMPC_SCHEDULE_static:
3455  return Chunked ? (Ordered ? OMP_ord_static_chunked : OMP_sch_static_chunked)
3456  : (Ordered ? OMP_ord_static : OMP_sch_static);
3457  case OMPC_SCHEDULE_dynamic:
3459  case OMPC_SCHEDULE_guided:
3461  case OMPC_SCHEDULE_runtime:
3462  return Ordered ? OMP_ord_runtime : OMP_sch_runtime;
3463  case OMPC_SCHEDULE_auto:
3464  return Ordered ? OMP_ord_auto : OMP_sch_auto;
3465  case OMPC_SCHEDULE_unknown:
3466  assert(!Chunked && "chunk was specified but schedule kind not known");
3467  return Ordered ? OMP_ord_static : OMP_sch_static;
3468  }
3469  llvm_unreachable("Unexpected runtime schedule");
3470 }
3471 
3472 /// Map the OpenMP distribute schedule to the runtime enumeration.
3473 static OpenMPSchedType
3475  // only static is allowed for dist_schedule
3477 }
3478 
3480  bool Chunked) const {
3481  OpenMPSchedType Schedule =
3482  getRuntimeSchedule(ScheduleKind, Chunked, /*Ordered=*/false);
3483  return Schedule == OMP_sch_static;
3484 }
3485 
3487  OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) const {
3488  OpenMPSchedType Schedule = getRuntimeSchedule(ScheduleKind, Chunked);
3489  return Schedule == OMP_dist_sch_static;
3490 }
3491 
3493  bool Chunked) const {
3494  OpenMPSchedType Schedule =
3495  getRuntimeSchedule(ScheduleKind, Chunked, /*Ordered=*/false);
3496  return Schedule == OMP_sch_static_chunked;
3497 }
3498 
3500  OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) const {
3501  OpenMPSchedType Schedule = getRuntimeSchedule(ScheduleKind, Chunked);
3502  return Schedule == OMP_dist_sch_static_chunked;
3503 }
3504 
3506  OpenMPSchedType Schedule =
3507  getRuntimeSchedule(ScheduleKind, /*Chunked=*/false, /*Ordered=*/false);
3508  assert(Schedule != OMP_sch_static_chunked && "cannot be chunked here");
3509  return Schedule != OMP_sch_static;
3510 }
3511 
3515  int Modifier = 0;
3516  switch (M1) {
3517  case OMPC_SCHEDULE_MODIFIER_monotonic:
3518  Modifier = OMP_sch_modifier_monotonic;
3519  break;
3520  case OMPC_SCHEDULE_MODIFIER_nonmonotonic:
3521  Modifier = OMP_sch_modifier_nonmonotonic;
3522  break;
3523  case OMPC_SCHEDULE_MODIFIER_simd:
3524  if (Schedule == OMP_sch_static_chunked)
3526  break;
3529  break;
3530  }
3531  switch (M2) {
3532  case OMPC_SCHEDULE_MODIFIER_monotonic:
3533  Modifier = OMP_sch_modifier_monotonic;
3534  break;
3535  case OMPC_SCHEDULE_MODIFIER_nonmonotonic:
3536  Modifier = OMP_sch_modifier_nonmonotonic;
3537  break;
3538  case OMPC_SCHEDULE_MODIFIER_simd:
3539  if (Schedule == OMP_sch_static_chunked)
3541  break;
3544  break;
3545  }
3546  return Schedule | Modifier;
3547 }
3548 
3550  CodeGenFunction &CGF, SourceLocation Loc,
3551  const OpenMPScheduleTy &ScheduleKind, unsigned IVSize, bool IVSigned,
3552  bool Ordered, const DispatchRTInput &DispatchValues) {
3553  if (!CGF.HaveInsertPoint())
3554  return;
3556  ScheduleKind.Schedule, DispatchValues.Chunk != nullptr, Ordered);
3557  assert(Ordered ||
3558  (Schedule != OMP_sch_static && Schedule != OMP_sch_static_chunked &&
3559  Schedule != OMP_ord_static && Schedule != OMP_ord_static_chunked &&
3560  Schedule != OMP_sch_static_balanced_chunked));
3561  // Call __kmpc_dispatch_init(
3562  // ident_t *loc, kmp_int32 tid, kmp_int32 schedule,
3563  // kmp_int[32|64] lower, kmp_int[32|64] upper,
3564  // kmp_int[32|64] stride, kmp_int[32|64] chunk);
3565 
3566  // If the Chunk was not specified in the clause - use default value 1.
3567  llvm::Value *Chunk = DispatchValues.Chunk ? DispatchValues.Chunk
3568  : CGF.Builder.getIntN(IVSize, 1);
3569  llvm::Value *Args[] = {
3570  emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
3571  CGF.Builder.getInt32(addMonoNonMonoModifier(
3572  Schedule, ScheduleKind.M1, ScheduleKind.M2)), // Schedule type
3573  DispatchValues.LB, // Lower
3574  DispatchValues.UB, // Upper
3575  CGF.Builder.getIntN(IVSize, 1), // Stride
3576  Chunk // Chunk
3577  };
3578  CGF.EmitRuntimeCall(createDispatchInitFunction(IVSize, IVSigned), Args);
3579 }
3580 
3582  CodeGenFunction &CGF, llvm::Value *UpdateLocation, llvm::Value *ThreadId,
3583  llvm::FunctionCallee ForStaticInitFunction, OpenMPSchedType Schedule,
3585  const CGOpenMPRuntime::StaticRTInput &Values) {
3586  if (!CGF.HaveInsertPoint())
3587  return;
3588 
3589  assert(!Values.Ordered);
3590  assert(Schedule == OMP_sch_static || Schedule == OMP_sch_static_chunked ||
3591  Schedule == OMP_sch_static_balanced_chunked ||
3592  Schedule == OMP_ord_static || Schedule == OMP_ord_static_chunked ||
3593  Schedule == OMP_dist_sch_static ||
3594  Schedule == OMP_dist_sch_static_chunked);
3595 
3596  // Call __kmpc_for_static_init(
3597  // ident_t *loc, kmp_int32 tid, kmp_int32 schedtype,
3598  // kmp_int32 *p_lastiter, kmp_int[32|64] *p_lower,
3599  // kmp_int[32|64] *p_upper, kmp_int[32|64] *p_stride,
3600  // kmp_int[32|64] incr, kmp_int[32|64] chunk);
3601  llvm::Value *Chunk = Values.Chunk;
3602  if (Chunk == nullptr) {
3603  assert((Schedule == OMP_sch_static || Schedule == OMP_ord_static ||
3604  Schedule == OMP_dist_sch_static) &&
3605  "expected static non-chunked schedule");
3606  // If the Chunk was not specified in the clause - use default value 1.
3607  Chunk = CGF.Builder.getIntN(Values.IVSize, 1);
3608  } else {
3609  assert((Schedule == OMP_sch_static_chunked ||
3610  Schedule == OMP_sch_static_balanced_chunked ||
3611  Schedule == OMP_ord_static_chunked ||
3612  Schedule == OMP_dist_sch_static_chunked) &&
3613  "expected static chunked schedule");
3614  }
3615  llvm::Value *Args[] = {
3616  UpdateLocation,
3617  ThreadId,
3618  CGF.Builder.getInt32(addMonoNonMonoModifier(Schedule, M1,
3619  M2)), // Schedule type
3620  Values.IL.getPointer(), // &isLastIter
3621  Values.LB.getPointer(), // &LB
3622  Values.UB.getPointer(), // &UB
3623  Values.ST.getPointer(), // &Stride
3624  CGF.Builder.getIntN(Values.IVSize, 1), // Incr
3625  Chunk // Chunk
3626  };
3627  CGF.EmitRuntimeCall(ForStaticInitFunction, Args);
3628 }
3629 
3631  SourceLocation Loc,
3632  OpenMPDirectiveKind DKind,
3633  const OpenMPScheduleTy &ScheduleKind,
3634  const StaticRTInput &Values) {
3635  OpenMPSchedType ScheduleNum = getRuntimeSchedule(
3636  ScheduleKind.Schedule, Values.Chunk != nullptr, Values.Ordered);
3637  assert(isOpenMPWorksharingDirective(DKind) &&
3638  "Expected loop-based or sections-based directive.");
3639  llvm::Value *UpdatedLocation = emitUpdateLocation(CGF, Loc,
3640  isOpenMPLoopDirective(DKind)
3641  ? OMP_IDENT_WORK_LOOP
3642  : OMP_IDENT_WORK_SECTIONS);
3643  llvm::Value *ThreadId = getThreadID(CGF, Loc);
3644  llvm::FunctionCallee StaticInitFunction =
3646  emitForStaticInitCall(CGF, UpdatedLocation, ThreadId, StaticInitFunction,
3647  ScheduleNum, ScheduleKind.M1, ScheduleKind.M2, Values);
3648 }
3649 
3651  CodeGenFunction &CGF, SourceLocation Loc,
3652  OpenMPDistScheduleClauseKind SchedKind,
3653  const CGOpenMPRuntime::StaticRTInput &Values) {
3654  OpenMPSchedType ScheduleNum =
3655  getRuntimeSchedule(SchedKind, Values.Chunk != nullptr);
3656  llvm::Value *UpdatedLocation =
3657  emitUpdateLocation(CGF, Loc, OMP_IDENT_WORK_DISTRIBUTE);
3658  llvm::Value *ThreadId = getThreadID(CGF, Loc);
3659  llvm::FunctionCallee StaticInitFunction =
3660  createForStaticInitFunction(Values.IVSize, Values.IVSigned);
3661  emitForStaticInitCall(CGF, UpdatedLocation, ThreadId, StaticInitFunction,
3662  ScheduleNum, OMPC_SCHEDULE_MODIFIER_unknown,
3664 }
3665 
3667  SourceLocation Loc,
3668  OpenMPDirectiveKind DKind) {
3669  if (!CGF.HaveInsertPoint())
3670  return;
3671  // Call __kmpc_for_static_fini(ident_t *loc, kmp_int32 tid);
3672  llvm::Value *Args[] = {
3673  emitUpdateLocation(CGF, Loc,
3675  ? OMP_IDENT_WORK_DISTRIBUTE
3676  : isOpenMPLoopDirective(DKind)
3677  ? OMP_IDENT_WORK_LOOP
3678  : OMP_IDENT_WORK_SECTIONS),
3679  getThreadID(CGF, Loc)};
3681  Args);
3682 }
3683 
3685  SourceLocation Loc,
3686  unsigned IVSize,
3687  bool IVSigned) {
3688  if (!CGF.HaveInsertPoint())
3689  return;
3690  // Call __kmpc_for_dynamic_fini_(4|8)[u](ident_t *loc, kmp_int32 tid);
3691  llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
3692  CGF.EmitRuntimeCall(createDispatchFiniFunction(IVSize, IVSigned), Args);
3693 }
3694 
3696  SourceLocation Loc, unsigned IVSize,
3697  bool IVSigned, Address IL,
3698  Address LB, Address UB,
3699  Address ST) {
3700  // Call __kmpc_dispatch_next(
3701  // ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter,
3702  // kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper,
3703  // kmp_int[32|64] *p_stride);
3704  llvm::Value *Args[] = {
3705  emitUpdateLocation(CGF, Loc),
3706  getThreadID(CGF, Loc),
3707  IL.getPointer(), // &isLastIter
3708  LB.getPointer(), // &Lower
3709  UB.getPointer(), // &Upper
3710  ST.getPointer() // &Stride
3711  };
3712  llvm::Value *Call =
3713  CGF.EmitRuntimeCall(createDispatchNextFunction(IVSize, IVSigned), Args);
3714  return CGF.EmitScalarConversion(
3715  Call, CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/1),
3716  CGF.getContext().BoolTy, Loc);
3717 }
3718 
3720  llvm::Value *NumThreads,
3721  SourceLocation Loc) {
3722  if (!CGF.HaveInsertPoint())
3723  return;
3724  // Build call __kmpc_push_num_threads(&loc, global_tid, num_threads)
3725  llvm::Value *Args[] = {
3726  emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
3727  CGF.Builder.CreateIntCast(NumThreads, CGF.Int32Ty, /*isSigned*/ true)};
3729  Args);
3730 }
3731 
3733  OpenMPProcBindClauseKind ProcBind,
3734  SourceLocation Loc) {
3735  if (!CGF.HaveInsertPoint())
3736  return;
3737  // Constants for proc bind value accepted by the runtime.
3738  enum ProcBindTy {
3739  ProcBindFalse = 0,
3740  ProcBindTrue,
3741  ProcBindMaster,
3742  ProcBindClose,
3743  ProcBindSpread,
3744  ProcBindIntel,
3745  ProcBindDefault
3746  } RuntimeProcBind;
3747  switch (ProcBind) {
3748  case OMPC_PROC_BIND_master:
3749  RuntimeProcBind = ProcBindMaster;
3750  break;
3751  case OMPC_PROC_BIND_close:
3752  RuntimeProcBind = ProcBindClose;
3753  break;
3754  case OMPC_PROC_BIND_spread:
3755  RuntimeProcBind = ProcBindSpread;
3756  break;
3758  llvm_unreachable("Unsupported proc_bind value.");
3759  }
3760  // Build call __kmpc_push_proc_bind(&loc, global_tid, proc_bind)
3761  llvm::Value *Args[] = {
3762  emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
3763  llvm::ConstantInt::get(CGM.IntTy, RuntimeProcBind, /*isSigned=*/true)};
3765 }
3766 
3767 void CGOpenMPRuntime::emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *>,
3768  SourceLocation Loc) {
3769  if (!CGF.HaveInsertPoint())
3770  return;
3771  // Build call void __kmpc_flush(ident_t *loc)
3773  emitUpdateLocation(CGF, Loc));
3774 }
3775 
3776 namespace {
3777 /// Indexes of fields for type kmp_task_t.
3779  /// List of shared variables.
3780  KmpTaskTShareds,
3781  /// Task routine.
3782  KmpTaskTRoutine,
3783  /// Partition id for the untied tasks.
3784  KmpTaskTPartId,
3785  /// Function with call of destructors for private variables.
3786  Data1,
3787  /// Task priority.
3788  Data2,
3789  /// (Taskloops only) Lower bound.
3790  KmpTaskTLowerBound,
3791  /// (Taskloops only) Upper bound.
3792  KmpTaskTUpperBound,
3793  /// (Taskloops only) Stride.
3794  KmpTaskTStride,
3795  /// (Taskloops only) Is last iteration flag.
3796  KmpTaskTLastIter,
3797  /// (Taskloops only) Reduction data.
3798  KmpTaskTReductions,
3799 };
3800 } // anonymous namespace
3801 
3802 bool CGOpenMPRuntime::OffloadEntriesInfoManagerTy::empty() const {
3803  return OffloadEntriesTargetRegion.empty() &&
3804  OffloadEntriesDeviceGlobalVar.empty();
3805 }
3806 
3807 /// Initialize target region entry.
3808 void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
3809  initializeTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
3810  StringRef ParentName, unsigned LineNum,
3811  unsigned Order) {
3812  assert(CGM.getLangOpts().OpenMPIsDevice && "Initialization of entries is "
3813  "only required for the device "
3814  "code generation.");
3815  OffloadEntriesTargetRegion[DeviceID][FileID][ParentName][LineNum] =
3816  OffloadEntryInfoTargetRegion(Order, /*Addr=*/nullptr, /*ID=*/nullptr,
3817  OMPTargetRegionEntryTargetRegion);
3818  ++OffloadingEntriesNum;
3819 }
3820 
3821 void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
3822  registerTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
3823  StringRef ParentName, unsigned LineNum,
3824  llvm::Constant *Addr, llvm::Constant *ID,
3825  OMPTargetRegionEntryKind Flags) {
3826  // If we are emitting code for a target, the entry is already initialized,
3827  // only has to be registered.
3828  if (CGM.getLangOpts().OpenMPIsDevice) {
3829  if (!hasTargetRegionEntryInfo(DeviceID, FileID, ParentName, LineNum)) {
3830  unsigned DiagID = CGM.getDiags().getCustomDiagID(
3832  "Unable to find target region on line '%0' in the device code.");
3833  CGM.getDiags().Report(DiagID) << LineNum;
3834  return;
3835  }
3836  auto &Entry =
3837  OffloadEntriesTargetRegion[DeviceID][FileID][ParentName][LineNum];
3838  assert(Entry.isValid() && "Entry not initialized!");
3839  Entry.setAddress(Addr);
3840  Entry.setID(ID);
3841  Entry.setFlags(Flags);
3842  } else {
3843  OffloadEntryInfoTargetRegion Entry(OffloadingEntriesNum, Addr, ID, Flags);
3844  OffloadEntriesTargetRegion[DeviceID][FileID][ParentName][LineNum] = Entry;
3845  ++OffloadingEntriesNum;
3846  }
3847 }
3848 
3849 bool CGOpenMPRuntime::OffloadEntriesInfoManagerTy::hasTargetRegionEntryInfo(
3850  unsigned DeviceID, unsigned FileID, StringRef ParentName,
3851  unsigned LineNum) const {
3852  auto PerDevice = OffloadEntriesTargetRegion.find(DeviceID);
3853  if (PerDevice == OffloadEntriesTargetRegion.end())
3854  return false;
3855  auto PerFile = PerDevice->second.find(FileID);
3856  if (PerFile == PerDevice->second.end())
3857  return false;
3858  auto PerParentName = PerFile->second.find(ParentName);
3859  if (PerParentName == PerFile->second.end())
3860  return false;
3861  auto PerLine = PerParentName->second.find(LineNum);
3862  if (PerLine == PerParentName->second.end())
3863  return false;
3864  // Fail if this entry is already registered.
3865  if (PerLine->second.getAddress() || PerLine->second.getID())
3866  return false;
3867  return true;
3868 }
3869 
3870 void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::actOnTargetRegionEntriesInfo(
3871  const OffloadTargetRegionEntryInfoActTy &Action) {
3872  // Scan all target region entries and perform the provided action.
3873  for (const auto &D : OffloadEntriesTargetRegion)
3874  for (const auto &F : D.second)
3875  for (const auto &P : F.second)
3876  for (const auto &L : P.second)
3877  Action(D.first, F.first, P.first(), L.first, L.second);
3878 }
3879 
3880 void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
3881  initializeDeviceGlobalVarEntryInfo(StringRef Name,
3882  OMPTargetGlobalVarEntryKind Flags,
3883  unsigned Order) {
3884  assert(CGM.getLangOpts().OpenMPIsDevice && "Initialization of entries is "
3885  "only required for the device "
3886  "code generation.");
3887  OffloadEntriesDeviceGlobalVar.try_emplace(Name, Order, Flags);
3888  ++OffloadingEntriesNum;
3889 }
3890 
3891 void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
3892  registerDeviceGlobalVarEntryInfo(StringRef VarName, llvm::Constant *Addr,
3893  CharUnits VarSize,
3894  OMPTargetGlobalVarEntryKind Flags,
3895  llvm::GlobalValue::LinkageTypes Linkage) {
3896  if (CGM.getLangOpts().OpenMPIsDevice) {
3897  auto &Entry = OffloadEntriesDeviceGlobalVar[VarName];
3898  assert(Entry.isValid() && Entry.getFlags() == Flags &&
3899  "Entry not initialized!");
3900  assert((!Entry.getAddress() || Entry.getAddress() == Addr) &&
3901  "Resetting with the new address.");
3902  if (Entry.getAddress() && hasDeviceGlobalVarEntryInfo(VarName)) {
3903  if (Entry.getVarSize().isZero()) {
3904  Entry.setVarSize(VarSize);
3905  Entry.setLinkage(Linkage);
3906  }
3907  return;
3908  }
3909  Entry.setVarSize(VarSize);
3910  Entry.setLinkage(Linkage);
3911  Entry.setAddress(Addr);
3912  } else {
3913  if (hasDeviceGlobalVarEntryInfo(VarName)) {
3914  auto &Entry = OffloadEntriesDeviceGlobalVar[VarName];
3915  assert(Entry.isValid() && Entry.getFlags() == Flags &&
3916  "Entry not initialized!");
3917  assert((!Entry.getAddress() || Entry.getAddress() == Addr) &&
3918  "Resetting with the new address.");
3919  if (Entry.getVarSize().isZero()) {
3920  Entry.setVarSize(VarSize);
3921  Entry.setLinkage(Linkage);
3922  }
3923  return;
3924  }
3925  OffloadEntriesDeviceGlobalVar.try_emplace(
3926  VarName, OffloadingEntriesNum, Addr, VarSize, Flags, Linkage);
3927  ++OffloadingEntriesNum;
3928  }
3929 }
3930 
3931 void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
3932  actOnDeviceGlobalVarEntriesInfo(
3933  const OffloadDeviceGlobalVarEntryInfoActTy &Action) {
3934  // Scan all target region entries and perform the provided action.
3935  for (const auto &E : OffloadEntriesDeviceGlobalVar)
3936  Action(E.getKey(), E.getValue());
3937 }
3938 
3939 llvm::Function *
3941  // If we don't have entries or if we are emitting code for the device, we
3942  // don't need to do anything.
3943  if (CGM.getLangOpts().OpenMPIsDevice || OffloadEntriesInfoManager.empty())
3944  return nullptr;
3945 
3946  llvm::Module &M = CGM.getModule();
3947  ASTContext &C = CGM.getContext();
3948 
3949  // Get list of devices we care about
3950  const std::vector<llvm::Triple> &Devices = CGM.getLangOpts().OMPTargetTriples;
3951 
3952  // We should be creating an offloading descriptor only if there are devices
3953  // specified.
3954  assert(!Devices.empty() && "No OpenMP offloading devices??");
3955 
3956  // Create the external variables that will point to the begin and end of the
3957  // host entries section. These will be defined by the linker.
3958  llvm::Type *OffloadEntryTy =
3960  std::string EntriesBeginName = getName({"omp_offloading", "entries_begin"});
3961  auto *HostEntriesBegin = new llvm::GlobalVariable(
3962  M, OffloadEntryTy, /*isConstant=*/true,
3963  llvm::GlobalValue::ExternalLinkage, /*Initializer=*/nullptr,
3964  EntriesBeginName);
3965  std::string EntriesEndName = getName({"omp_offloading", "entries_end"});
3966  auto *HostEntriesEnd =
3967  new llvm::GlobalVariable(M, OffloadEntryTy, /*isConstant=*/true,
3969  /*Initializer=*/nullptr, EntriesEndName);
3970 
3971  // Create all device images
3972  auto *DeviceImageTy = cast<llvm::StructType>(
3974  ConstantInitBuilder DeviceImagesBuilder(CGM);
3975  ConstantArrayBuilder DeviceImagesEntries =
3976  DeviceImagesBuilder.beginArray(DeviceImageTy);
3977 
3978  for (const llvm::Triple &Device : Devices) {
3979  StringRef T = Device.getTriple();
3980  std::string BeginName = getName({"omp_offloading", "img_start", ""});
3981  auto *ImgBegin = new llvm::GlobalVariable(
3982  M, CGM.Int8Ty, /*isConstant=*/true,
3983  llvm::GlobalValue::ExternalWeakLinkage,
3984  /*Initializer=*/nullptr, Twine(BeginName).concat(T));
3985  std::string EndName = getName({"omp_offloading", "img_end", ""});
3986  auto *ImgEnd = new llvm::GlobalVariable(
3987  M, CGM.Int8Ty, /*isConstant=*/true,
3988  llvm::GlobalValue::ExternalWeakLinkage,
3989  /*Initializer=*/nullptr, Twine(EndName).concat(T));
3990 
3991  llvm::Constant *Data[] = {ImgBegin, ImgEnd, HostEntriesBegin,
3992  HostEntriesEnd};
3994  DeviceImagesEntries);
3995  }
3996 
3997  // Create device images global array.
3998  std::string ImagesName = getName({"omp_offloading", "device_images"});
3999  llvm::GlobalVariable *DeviceImages =
4000  DeviceImagesEntries.finishAndCreateGlobal(ImagesName,
4001  CGM.getPointerAlign(),
4002  /*isConstant=*/true);
4003  DeviceImages->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
4004 
4005  // This is a Zero array to be used in the creation of the constant expressions
4006  llvm::Constant *Index[] = {llvm::Constant::getNullValue(CGM.Int32Ty),
4007  llvm::Constant::getNullValue(CGM.Int32Ty)};
4008 
4009  // Create the target region descriptor.
4010  llvm::Constant *Data[] = {
4011  llvm::ConstantInt::get(CGM.Int32Ty, Devices.size()),
4012  llvm::ConstantExpr::getGetElementPtr(DeviceImages->getValueType(),
4013  DeviceImages, Index),
4014  HostEntriesBegin, HostEntriesEnd};
4015  std::string Descriptor = getName({"omp_offloading", "descriptor"});
4016  llvm::GlobalVariable *Desc = createGlobalStruct(
4017  CGM, getTgtBinaryDescriptorQTy(), /*IsConstant=*/true, Data, Descriptor);
4018 
4019  // Emit code to register or unregister the descriptor at execution
4020  // startup or closing, respectively.
4021 
4022  llvm::Function *UnRegFn;
4023  {
4024  FunctionArgList Args;
4026  Args.push_back(&DummyPtr);
4027 
4028  CodeGenFunction CGF(CGM);
4029  // Disable debug info for global (de-)initializer because they are not part
4030  // of some particular construct.
4031  CGF.disableDebugInfo();
4032  const auto &FI =
4034  llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
4035  std::string UnregName = getName({"omp_offloading", "descriptor_unreg"});
4036  UnRegFn = CGM.CreateGlobalInitOrDestructFunction(FTy, UnregName, FI);
4037  CGF.StartFunction(GlobalDecl(), C.VoidTy, UnRegFn, FI, Args);
4039  Desc);
4040  CGF.FinishFunction();
4041  }
4042  llvm::Function *RegFn;
4043  {
4044  CodeGenFunction CGF(CGM);
4045  // Disable debug info for global (de-)initializer because they are not part
4046  // of some particular construct.
4047  CGF.disableDebugInfo();
4048  const auto &FI = CGM.getTypes().arrangeNullaryFunction();
4049  llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
4050 
4051  // Encode offload target triples into the registration function name. It
4052  // will serve as a comdat key for the registration/unregistration code for
4053  // this particular combination of offloading targets.
4054  SmallVector<StringRef, 4U> RegFnNameParts(Devices.size() + 2U);
4055  RegFnNameParts[0] = "omp_offloading";
4056  RegFnNameParts[1] = "descriptor_reg";
4057  llvm::transform(Devices, std::next(RegFnNameParts.begin(), 2),
4058  [](const llvm::Triple &T) -> const std::string& {
4059  return T.getTriple();
4060  });
4061  llvm::sort(std::next(RegFnNameParts.begin(), 2), RegFnNameParts.end());
4062  std::string Descriptor = getName(RegFnNameParts);
4063  RegFn = CGM.CreateGlobalInitOrDestructFunction(FTy, Descriptor, FI);
4064  CGF.StartFunction(GlobalDecl(), C.VoidTy, RegFn, FI, FunctionArgList());
4066  // Create a variable to drive the registration and unregistration of the
4067  // descriptor, so we can reuse the logic that emits Ctors and Dtors.
4068  ImplicitParamDecl RegUnregVar(C, C.getTranslationUnitDecl(),
4069  SourceLocation(), nullptr, C.CharTy,
4071  CGM.getCXXABI().registerGlobalDtor(CGF, RegUnregVar, UnRegFn, Desc);
4072  CGF.FinishFunction();
4073  }
4074  if (CGM.supportsCOMDAT()) {
4075  // It is sufficient to call registration function only once, so create a
4076  // COMDAT group for registration/unregistration functions and associated
4077  // data. That would reduce startup time and code size. Registration
4078  // function serves as a COMDAT group key.
4079  llvm::Comdat *ComdatKey = M.getOrInsertComdat(RegFn->getName());
4080  RegFn->setLinkage(llvm::GlobalValue::LinkOnceAnyLinkage);
4081  RegFn->setVisibility(llvm::GlobalValue::HiddenVisibility);
4082  RegFn->setComdat(ComdatKey);
4083  UnRegFn->setComdat(ComdatKey);
4084  DeviceImages->setComdat(ComdatKey);
4085  Desc->setComdat(ComdatKey);
4086  }
4087  return RegFn;
4088 }
4089 
4091  llvm::Constant *ID, llvm::Constant *Addr, uint64_t Size, int32_t Flags,
4092  llvm::GlobalValue::LinkageTypes Linkage) {
4093  StringRef Name = Addr->getName();
4094  llvm::Module &M = CGM.getModule();
4095  llvm::LLVMContext &C = M.getContext();
4096 
4097  // Create constant string with the name.
4098  llvm::Constant *StrPtrInit = llvm::ConstantDataArray::getString(C, Name);
4099 
4100  std::string StringName = getName({"omp_offloading", "entry_name"});
4101  auto *Str = new llvm::GlobalVariable(
4102  M, StrPtrInit->getType(), /*isConstant=*/true,
4103  llvm::GlobalValue::InternalLinkage, StrPtrInit, StringName);
4104  Str->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
4105 
4106  llvm::Constant *Data[] = {llvm::ConstantExpr::getBitCast(ID, CGM.VoidPtrTy),
4107  llvm::ConstantExpr::getBitCast(Str, CGM.Int8PtrTy),
4108  llvm::ConstantInt::get(CGM.SizeTy, Size),
4109  llvm::ConstantInt::get(CGM.Int32Ty, Flags),
4110  llvm::ConstantInt::get(CGM.Int32Ty, 0)};
4111  std::string EntryName = getName({"omp_offloading", "entry", ""});
4112  llvm::GlobalVariable *Entry = createGlobalStruct(
4113  CGM, getTgtOffloadEntryQTy(), /*IsConstant=*/true, Data,
4114  Twine(EntryName).concat(Name), llvm::GlobalValue::WeakAnyLinkage);
4115 
4116  // The entry has to be created in the section the linker expects it to be.
4117  std::string Section = getName({"omp_offloading", "entries"});
4118  Entry->setSection(Section);
4119 }
4120 
4122  // Emit the offloading entries and metadata so that the device codegen side
4123  // can easily figure out what to emit. The produced metadata looks like
4124  // this:
4125  //
4126  // !omp_offload.info = !{!1, ...}
4127  //
4128  // Right now we only generate metadata for function that contain target
4129  // regions.
4130 
4131  // If we do not have entries, we don't need to do anything.
4133  return;
4134 
4135  llvm::Module &M = CGM.getModule();
4136  llvm::LLVMContext &C = M.getContext();
4138  OrderedEntries(OffloadEntriesInfoManager.size());
4139  llvm::SmallVector<StringRef, 16> ParentFunctions(
4141 
4142  // Auxiliary methods to create metadata values and strings.
4143  auto &&GetMDInt = [this](unsigned V) {
4144  return llvm::ConstantAsMetadata::get(
4145  llvm::ConstantInt::get(CGM.Int32Ty, V));
4146  };
4147 
4148  auto &&GetMDString = [&C](StringRef V) { return llvm::MDString::get(C, V); };
4149 
4150  // Create the offloading info metadata node.
4151  llvm::NamedMDNode *MD = M.getOrInsertNamedMetadata("omp_offload.info");
4152 
4153  // Create function that emits metadata for each target region entry;
4154  auto &&TargetRegionMetadataEmitter =
4155  [&C, MD, &OrderedEntries, &ParentFunctions, &GetMDInt, &GetMDString](
4156  unsigned DeviceID, unsigned FileID, StringRef ParentName,
4157  unsigned Line,
4159  // Generate metadata for target regions. Each entry of this metadata
4160  // contains:
4161  // - Entry 0 -> Kind of this type of metadata (0).
4162  // - Entry 1 -> Device ID of the file where the entry was identified.
4163  // - Entry 2 -> File ID of the file where the entry was identified.
4164  // - Entry 3 -> Mangled name of the function where the entry was
4165  // identified.
4166  // - Entry 4 -> Line in the file where the entry was identified.
4167  // - Entry 5 -> Order the entry was created.
4168  // The first element of the metadata node is the kind.
4169  llvm::Metadata *Ops[] = {GetMDInt(E.getKind()), GetMDInt(DeviceID),
4170  GetMDInt(FileID), GetMDString(ParentName),
4171  GetMDInt(Line), GetMDInt(E.getOrder())};
4172 
4173  // Save this entry in the right position of the ordered entries array.
4174  OrderedEntries[E.getOrder()] = &E;
4175  ParentFunctions[E.getOrder()] = ParentName;
4176 
4177  // Add metadata to the named metadata node.
4178  MD->addOperand(llvm::MDNode::get(C, Ops));
4179  };
4180 
4182  TargetRegionMetadataEmitter);
4183 
4184  // Create function that emits metadata for each device global variable entry;
4185  auto &&DeviceGlobalVarMetadataEmitter =
4186  [&C, &OrderedEntries, &GetMDInt, &GetMDString,
4187  MD](StringRef MangledName,
4189  &E) {
4190  // Generate metadata for global variables. Each entry of this metadata
4191  // contains:
4192  // - Entry 0 -> Kind of this type of metadata (1).
4193  // - Entry 1 -> Mangled name of the variable.
4194  // - Entry 2 -> Declare target kind.
4195  // - Entry 3 -> Order the entry was created.
4196  // The first element of the metadata node is the kind.
4197  llvm::Metadata *Ops[] = {
4198  GetMDInt(E.getKind()), GetMDString(MangledName),
4199  GetMDInt(E.getFlags()), GetMDInt(E.getOrder())};
4200 
4201  // Save this entry in the right position of the ordered entries array.
4202  OrderedEntries[E.getOrder()] = &E;
4203 
4204  // Add metadata to the named metadata node.
4205  MD->addOperand(llvm::MDNode::get(C, Ops));
4206  };
4207 
4209  DeviceGlobalVarMetadataEmitter);
4210 
4211  for (const auto *E : OrderedEntries) {
4212  assert(E && "All ordered entries must exist!");
4213  if (const auto *CE =
4214  dyn_cast<OffloadEntriesInfoManagerTy::OffloadEntryInfoTargetRegion>(
4215  E)) {
4216  if (!CE->getID() || !CE->getAddress()) {
4217  // Do not blame the entry if the parent funtion is not emitted.
4218  StringRef FnName = ParentFunctions[CE->getOrder()];
4219  if (!CGM.GetGlobalValue(FnName))
4220  continue;
4221  unsigned DiagID = CGM.getDiags().getCustomDiagID(
4223  "Offloading entry for target region is incorrect: either the "
4224  "address or the ID is invalid.");
4225  CGM.getDiags().Report(DiagID);
4226  continue;
4227  }
4228  createOffloadEntry(CE->getID(), CE->getAddress(), /*Size=*/0,
4229  CE->getFlags(), llvm::GlobalValue::WeakAnyLinkage);
4230  } else if (const auto *CE =
4231  dyn_cast<OffloadEntriesInfoManagerTy::
4232  OffloadEntryInfoDeviceGlobalVar>(E)) {
4235  CE->getFlags());
4236  switch (Flags) {
4238  if (CGM.getLangOpts().OpenMPIsDevice &&
4239  CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory())
4240  continue;
4241  if (!CE->getAddress()) {
4242  unsigned DiagID = CGM.getDiags().getCustomDiagID(
4244  "Offloading entry for declare target variable is incorrect: the "
4245  "address is invalid.");
4246  CGM.getDiags().Report(DiagID);
4247  continue;
4248  }
4249  // The vaiable has no definition - no need to add the entry.
4250  if (CE->getVarSize().isZero())
4251  continue;
4252  break;
4253  }
4255  assert(((CGM.getLangOpts().OpenMPIsDevice && !CE->getAddress()) ||
4256  (!CGM.getLangOpts().OpenMPIsDevice && CE->getAddress())) &&
4257  "Declaret target link address is set.");
4258  if (CGM.getLangOpts().OpenMPIsDevice)
4259  continue;
4260  if (!CE->getAddress()) {
4261  unsigned DiagID = CGM.getDiags().getCustomDiagID(
4263  "Offloading entry for declare target variable is incorrect: the "
4264  "address is invalid.");
4265  CGM.getDiags().Report(DiagID);
4266  continue;
4267  }
4268  break;
4269  }
4270  createOffloadEntry(CE->getAddress(), CE->getAddress(),
4271  CE->getVarSize().getQuantity(), Flags,
4272  CE->getLinkage());
4273  } else {
4274  llvm_unreachable("Unsupported entry kind.");
4275  }
4276  }
4277 }
4278 
4279 /// Loads all the offload entries information from the host IR
4280 /// metadata.
4282  // If we are in target mode, load the metadata from the host IR. This code has
4283  // to match the metadaata creation in createOffloadEntriesAndInfoMetadata().
4284 
4285  if (!CGM.getLangOpts().OpenMPIsDevice)
4286  return;
4287 
4288  if (CGM.getLangOpts().OMPHostIRFile.empty())
4289  return;
4290 
4291  auto Buf = llvm::MemoryBuffer::getFile(CGM.getLangOpts().OMPHostIRFile);
4292  if (auto EC = Buf.getError()) {
4293  CGM.getDiags().Report(diag::err_cannot_open_file)
4294  << CGM.getLangOpts().OMPHostIRFile << EC.message();
4295  return;
4296  }
4297 
4298  llvm::LLVMContext C;
4299  auto ME = expectedToErrorOrAndEmitErrors(
4300  C, llvm::parseBitcodeFile(Buf.get()->getMemBufferRef(), C));
4301 
4302  if (auto EC = ME.getError()) {
4303  unsigned DiagID = CGM.getDiags().getCustomDiagID(
4304  DiagnosticsEngine::Error, "Unable to parse host IR file '%0':'%1'");
4305  CGM.getDiags().Report(DiagID)
4306  << CGM.getLangOpts().OMPHostIRFile << EC.message();
4307  return;
4308  }
4309 
4310  llvm::NamedMDNode *MD = ME.get()->getNamedMetadata("omp_offload.info");
4311  if (!MD)
4312  return;
4313 
4314  for (llvm::MDNode *MN : MD->operands()) {
4315  auto &&GetMDInt = [MN](unsigned Idx) {
4316  auto *V = cast<llvm::ConstantAsMetadata>(MN->getOperand(Idx));
4317  return cast<llvm::ConstantInt>(V->getValue())->getZExtValue();
4318  };
4319 
4320  auto &&GetMDString = [MN](unsigned Idx) {
4321  auto *V = cast<llvm::MDString>(MN->getOperand(Idx));
4322  return V->getString();
4323  };
4324 
4325  switch (GetMDInt(0)) {
4326  default:
4327  llvm_unreachable("Unexpected metadata!");
4328  break;
4332  /*DeviceID=*/GetMDInt(1), /*FileID=*/GetMDInt(2),
4333  /*ParentName=*/GetMDString(3), /*Line=*/GetMDInt(4),
4334  /*Order=*/GetMDInt(5));
4335  break;
4339  /*MangledName=*/GetMDString(1),
4340  static_cast<OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryKind>(
4341  /*Flags=*/GetMDInt(2)),
4342  /*Order=*/GetMDInt(3));
4343  break;
4344  }
4345  }
4346 }
4347 
4349  if (!KmpRoutineEntryPtrTy) {
4350  // Build typedef kmp_int32 (* kmp_routine_entry_t)(kmp_int32, void *); type.
4351  ASTContext &C = CGM.getContext();
4352  QualType KmpRoutineEntryTyArgs[] = {KmpInt32Ty, C.VoidPtrTy};
4354  KmpRoutineEntryPtrQTy = C.getPointerType(
4355  C.getFunctionType(KmpInt32Ty, KmpRoutineEntryTyArgs, EPI));
4356  KmpRoutineEntryPtrTy = CGM.getTypes().ConvertType(KmpRoutineEntryPtrQTy);
4357  }
4358 }
4359 
4361  // Make sure the type of the entry is already created. This is the type we
4362  // have to create:
4363  // struct __tgt_offload_entry{
4364  // void *addr; // Pointer to the offload entry info.
4365  // // (function or global)
4366  // char *name; // Name of the function or global.
4367  // size_t size; // Size of the entry info (0 if it a function).
4368  // int32_t flags; // Flags associated with the entry, e.g. 'link'.
4369  // int32_t reserved; // Reserved, to use by the runtime library.
4370  // };
4371  if (TgtOffloadEntryQTy.isNull()) {
4372  ASTContext &C = CGM.getContext();
4373  RecordDecl *RD = C.buildImplicitRecord("__tgt_offload_entry");
4374  RD->startDefinition();
4375  addFieldToRecordDecl(C, RD, C.VoidPtrTy);
4377  addFieldToRecordDecl(C, RD, C.getSizeType());
4379  C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/true));
4381  C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/true));
4382  RD->completeDefinition();
4383  RD->addAttr(PackedAttr::CreateImplicit(C));
4385  }
4386  return TgtOffloadEntryQTy;
4387 }
4388 
4390  // These are the types we need to build:
4391  // struct __tgt_device_image{
4392  // void *ImageStart; // Pointer to the target code start.
4393  // void *ImageEnd; // Pointer to the target code end.
4394  // // We also add the host entries to the device image, as it may be useful
4395  // // for the target runtime to have access to that information.
4396  // __tgt_offload_entry *EntriesBegin; // Begin of the table with all
4397  // // the entries.
4398  // __tgt_offload_entry *EntriesEnd; // End of the table with all the
4399  // // entries (non inclusive).
4400  // };
4401  if (TgtDeviceImageQTy.isNull()) {
4402  ASTContext &C = CGM.getContext();
4403  RecordDecl *RD = C.buildImplicitRecord("__tgt_device_image");
4404  RD->startDefinition();
4405  addFieldToRecordDecl(C, RD, C.VoidPtrTy);
4406  addFieldToRecordDecl(C, RD, C.VoidPtrTy);
4409  RD->completeDefinition();
4411  }
4412  return TgtDeviceImageQTy;
4413 }
4414 
4416  // struct __tgt_bin_desc{
4417  // int32_t NumDevices; // Number of devices supported.
4418  // __tgt_device_image *DeviceImages; // Arrays of device images
4419  // // (one per device).
4420  // __tgt_offload_entry *EntriesBegin; // Begin of the table with all the
4421  // // entries.
4422  // __tgt_offload_entry *EntriesEnd; // End of the table with all the
4423  // // entries (non inclusive).
4424  // };
4426  ASTContext &C = CGM.getContext();
4427  RecordDecl *RD = C.buildImplicitRecord("__tgt_bin_desc");
4428  RD->startDefinition();
4430  C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/true));
4434  RD->completeDefinition();
4436  }
4437  return TgtBinaryDescriptorQTy;
4438 }
4439 
4440 namespace {
4441 struct PrivateHelpersTy {
4442  PrivateHelpersTy(const VarDecl *Original, const VarDecl *PrivateCopy,
4443  const VarDecl *PrivateElemInit)
4444  : Original(Original), PrivateCopy(PrivateCopy),
4445  PrivateElemInit(PrivateElemInit) {}
4446  const VarDecl *Original;
4447  const VarDecl *PrivateCopy;
4448  const VarDecl *PrivateElemInit;
4449 };
4450 typedef std::pair<CharUnits /*Align*/, PrivateHelpersTy> PrivateDataTy;
4451 } // anonymous namespace
4452 
4453 static RecordDecl *
4455  if (!Privates.empty()) {
4456  ASTContext &C = CGM.getContext();
4457  // Build struct .kmp_privates_t. {
4458  // /* private vars */
4459  // };
4460  RecordDecl *RD = C.buildImplicitRecord(".kmp_privates.t");
4461  RD->startDefinition();
4462  for (const auto &Pair : Privates) {
4463  const VarDecl *VD = Pair.second.Original;
4464  QualType Type = VD->getType().getNonReferenceType();
4465  FieldDecl *FD = addFieldToRecordDecl(C, RD, Type);
4466  if (VD->hasAttrs()) {
4467  for (specific_attr_iterator<AlignedAttr> I(VD->getAttrs().begin()),
4468  E(VD->getAttrs().end());
4469  I != E; ++I)
4470  FD->addAttr(*I);
4471  }
4472  }
4473  RD->completeDefinition();
4474  return RD;
4475  }
4476  return nullptr;
4477 }
4478 
4479 static RecordDecl *
4481  QualType KmpInt32Ty,
4482  QualType KmpRoutineEntryPointerQTy) {
4483  ASTContext &C = CGM.getContext();
4484  // Build struct kmp_task_t {
4485  // void * shareds;
4486  // kmp_routine_entry_t routine;
4487  // kmp_int32 part_id;
4488  // kmp_cmplrdata_t data1;
4489  // kmp_cmplrdata_t data2;
4490  // For taskloops additional fields:
4491  // kmp_uint64 lb;
4492  // kmp_uint64 ub;
4493  // kmp_int64 st;
4494  // kmp_int32 liter;
4495  // void * reductions;
4496  // };
4497  RecordDecl *UD = C.buildImplicitRecord("kmp_cmplrdata_t", TTK_Union);
4498  UD->startDefinition();
4499  addFieldToRecordDecl(C, UD, KmpInt32Ty);
4500  addFieldToRecordDecl(C, UD, KmpRoutineEntryPointerQTy);
4501  UD->completeDefinition();
4502  QualType KmpCmplrdataTy = C.getRecordType(UD);
4503  RecordDecl *RD = C.buildImplicitRecord("kmp_task_t");
4504  RD->startDefinition();
4505  addFieldToRecordDecl(C, RD, C.VoidPtrTy);
4506  addFieldToRecordDecl(C, RD, KmpRoutineEntryPointerQTy);
4507  addFieldToRecordDecl(C, RD, KmpInt32Ty);
4508  addFieldToRecordDecl(C, RD, KmpCmplrdataTy);
4509  addFieldToRecordDecl(C, RD, KmpCmplrdataTy);
4510  if (isOpenMPTaskLoopDirective(Kind)) {
4511  QualType KmpUInt64Ty =
4512  CGM.getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0);
4513  QualType KmpInt64Ty =
4514  CGM.getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1);
4515  addFieldToRecordDecl(C, RD, KmpUInt64Ty);
4516  addFieldToRecordDecl(C, RD, KmpUInt64Ty);
4517  addFieldToRecordDecl(C, RD, KmpInt64Ty);
4518  addFieldToRecordDecl(C, RD, KmpInt32Ty);
4519  addFieldToRecordDecl(C, RD, C.VoidPtrTy);
4520  }
4521  RD->completeDefinition();
4522  return RD;
4523 }
4524 
4525 static RecordDecl *
4527  ArrayRef<PrivateDataTy> Privates) {
4528  ASTContext &C = CGM.getContext();
4529  // Build struct kmp_task_t_with_privates {
4530  // kmp_task_t task_data;
4531  // .kmp_privates_t. privates;
4532  // };
4533  RecordDecl *RD = C.buildImplicitRecord("kmp_task_t_with_privates");
4534  RD->startDefinition();
4535  addFieldToRecordDecl(C, RD, KmpTaskTQTy);
4536  if (const RecordDecl *PrivateRD = createPrivatesRecordDecl(CGM, Privates))
4537  addFieldToRecordDecl(C, RD, C.getRecordType(PrivateRD));
4538  RD->completeDefinition();
4539  return RD;
4540 }
4541 
4542 /// Emit a proxy function which accepts kmp_task_t as the second
4543 /// argument.
4544 /// \code
4545 /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
4546 /// TaskFunction(gtid, tt->part_id, &tt->privates, task_privates_map, tt,
4547 /// For taskloops:
4548 /// tt->task_data.lb, tt->task_data.ub, tt->task_data.st, tt->task_data.liter,
4549 /// tt->reductions, tt->shareds);
4550 /// return 0;
4551 /// }
4552 /// \endcode
4553 static llvm::Function *
4555  OpenMPDirectiveKind Kind, QualType KmpInt32Ty,
4556  QualType KmpTaskTWithPrivatesPtrQTy,
4557  QualType KmpTaskTWithPrivatesQTy, QualType KmpTaskTQTy,
4558  QualType SharedsPtrTy, llvm::Function *TaskFunction,
4559  llvm::Value *TaskPrivatesMap) {
4560  ASTContext &C = CGM.getContext();
4561  FunctionArgList Args;
4562  ImplicitParamDecl GtidArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, KmpInt32Ty,
4564  ImplicitParamDecl TaskTypeArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
4565  KmpTaskTWithPrivatesPtrQTy.withRestrict(),
4567  Args.push_back(&GtidArg);
4568  Args.push_back(&TaskTypeArg);
4569  const auto &TaskEntryFnInfo =
4570  CGM.getTypes().arrangeBuiltinFunctionDeclaration(KmpInt32Ty, Args);
4571  llvm::FunctionType *TaskEntryTy =
4572  CGM.getTypes().GetFunctionType(TaskEntryFnInfo);
4573  std::string Name = CGM.getOpenMPRuntime().getName({"omp_task_entry", ""});
4574  auto *TaskEntry = llvm::Function::Create(
4575  TaskEntryTy, llvm::GlobalValue::InternalLinkage, Name, &CGM.getModule());
4576  CGM.SetInternalFunctionAttributes(GlobalDecl(), TaskEntry, TaskEntryFnInfo);
4577  TaskEntry->setDoesNotRecurse();
4578  CodeGenFunction CGF(CGM);
4579  CGF.StartFunction(GlobalDecl(), KmpInt32Ty, TaskEntry, TaskEntryFnInfo, Args,
4580  Loc, Loc);
4581 
4582  // TaskFunction(gtid, tt->task_data.part_id, &tt->privates, task_privates_map,
4583  // tt,
4584  // For taskloops:
4585  // tt->task_data.lb, tt->task_data.ub, tt->task_data.st, tt->task_data.liter,
4586  // tt->task_data.shareds);
4587  llvm::Value *GtidParam = CGF.EmitLoadOfScalar(
4588  CGF.GetAddrOfLocalVar(&GtidArg), /*Volatile=*/false, KmpInt32Ty, Loc);
4589  LValue TDBase = CGF.EmitLoadOfPointerLValue(
4590  CGF.GetAddrOfLocalVar(&TaskTypeArg),
4591  KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>());
4592  const auto *KmpTaskTWithPrivatesQTyRD =
4593  cast<RecordDecl>(KmpTaskTWithPrivatesQTy->getAsTagDecl());
4594  LValue Base =
4595  CGF.EmitLValueForField(TDBase, *KmpTaskTWithPrivatesQTyRD->field_begin());
4596  const auto *KmpTaskTQTyRD = cast<RecordDecl>(KmpTaskTQTy->getAsTagDecl());
4597  auto PartIdFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTPartId);
4598  LValue PartIdLVal = CGF.EmitLValueForField(Base, *PartIdFI);
4599  llvm::Value *PartidParam = PartIdLVal.getPointer();
4600 
4601  auto SharedsFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTShareds);
4602  LValue SharedsLVal = CGF.EmitLValueForField(Base, *SharedsFI);
4604  CGF.EmitLoadOfScalar(SharedsLVal, Loc),
4605  CGF.ConvertTypeForMem(SharedsPtrTy));
4606 
4607  auto PrivatesFI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin(), 1);
4608  llvm::Value *PrivatesParam;
4609  if (PrivatesFI != KmpTaskTWithPrivatesQTyRD->field_end()) {
4610  LValue PrivatesLVal = CGF.EmitLValueForField(TDBase, *PrivatesFI);
4611  PrivatesParam = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4612  PrivatesLVal.getPointer(), CGF.VoidPtrTy);
4613  } else {
4614  PrivatesParam = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
4615  }
4616 
4617  llvm::Value *CommonArgs[] = {GtidParam, PartidParam, PrivatesParam,
4618  TaskPrivatesMap,
4619  CGF.Builder
4621  TDBase.getAddress(), CGF.VoidPtrTy)
4622  .getPointer()};
4623  SmallVector<llvm::Value *, 16> CallArgs(std::begin(CommonArgs),
4624  std::end(CommonArgs));
4625  if (isOpenMPTaskLoopDirective(Kind)) {
4626  auto LBFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTLowerBound);
4627  LValue LBLVal = CGF.EmitLValueForField(Base, *LBFI);
4628  llvm::Value *LBParam = CGF.EmitLoadOfScalar(LBLVal, Loc);
4629  auto UBFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTUpperBound);
4630  LValue UBLVal = CGF.EmitLValueForField(Base, *UBFI);
4631  llvm::Value *UBParam = CGF.EmitLoadOfScalar(UBLVal, Loc);
4632  auto StFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTStride);
4633  LValue StLVal = CGF.EmitLValueForField(Base, *StFI);
4634  llvm::Value *StParam = CGF.EmitLoadOfScalar(StLVal, Loc);
4635  auto LIFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTLastIter);
4636  LValue LILVal = CGF.EmitLValueForField(Base, *LIFI);
4637  llvm::Value *LIParam = CGF.EmitLoadOfScalar(LILVal, Loc);
4638  auto RFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTReductions);
4639  LValue RLVal = CGF.EmitLValueForField(Base, *RFI);
4640  llvm::Value *RParam = CGF.EmitLoadOfScalar(RLVal, Loc);
4641  CallArgs.push_back(LBParam);
4642  CallArgs.push_back(UBParam);
4643  CallArgs.push_back(StParam);
4644  CallArgs.push_back(LIParam);
4645  CallArgs.push_back(RParam);
4646  }
4647  CallArgs.push_back(SharedsParam);
4648 
4649  CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, Loc, TaskFunction,
4650  CallArgs);
4651  CGF.EmitStoreThroughLValue(RValue::get(CGF.Builder.getInt32(/*C=*/0)),
4652  CGF.MakeAddrLValue(CGF.ReturnValue, K