clang  9.0.0svn
CGOpenMPRuntime.cpp
Go to the documentation of this file.
1 //===----- CGOpenMPRuntime.cpp - Interface to OpenMP Runtimes -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This provides a class for OpenMP runtime code generation.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "CGCXXABI.h"
14 #include "CGCleanup.h"
15 #include "CGOpenMPRuntime.h"
16 #include "CGRecordLayout.h"
17 #include "CodeGenFunction.h"
19 #include "clang/AST/Decl.h"
20 #include "clang/AST/StmtOpenMP.h"
22 #include "llvm/ADT/ArrayRef.h"
23 #include "llvm/Bitcode/BitcodeReader.h"
24 #include "llvm/IR/DerivedTypes.h"
25 #include "llvm/IR/GlobalValue.h"
26 #include "llvm/IR/Value.h"
27 #include "llvm/Support/Format.h"
28 #include "llvm/Support/raw_ostream.h"
29 #include <cassert>
30 
31 using namespace clang;
32 using namespace CodeGen;
33 
34 namespace {
35 /// Base class for handling code generation inside OpenMP regions.
36 class CGOpenMPRegionInfo : public CodeGenFunction::CGCapturedStmtInfo {
37 public:
38  /// Kinds of OpenMP regions used in codegen.
39  enum CGOpenMPRegionKind {
40  /// Region with outlined function for standalone 'parallel'
41  /// directive.
42  ParallelOutlinedRegion,
43  /// Region with outlined function for standalone 'task' directive.
44  TaskOutlinedRegion,
45  /// Region for constructs that do not require function outlining,
46  /// like 'for', 'sections', 'atomic' etc. directives.
47  InlinedRegion,
48  /// Region with outlined function for standalone 'target' directive.
49  TargetRegion,
50  };
51 
52  CGOpenMPRegionInfo(const CapturedStmt &CS,
53  const CGOpenMPRegionKind RegionKind,
55  bool HasCancel)
56  : CGCapturedStmtInfo(CS, CR_OpenMP), RegionKind(RegionKind),
57  CodeGen(CodeGen), Kind(Kind), HasCancel(HasCancel) {}
58 
59  CGOpenMPRegionInfo(const CGOpenMPRegionKind RegionKind,
61  bool HasCancel)
62  : CGCapturedStmtInfo(CR_OpenMP), RegionKind(RegionKind), CodeGen(CodeGen),
63  Kind(Kind), HasCancel(HasCancel) {}
64 
65  /// Get a variable or parameter for storing global thread id
66  /// inside OpenMP construct.
67  virtual const VarDecl *getThreadIDVariable() const = 0;
68 
69  /// Emit the captured statement body.
70  void EmitBody(CodeGenFunction &CGF, const Stmt *S) override;
71 
72  /// Get an LValue for the current ThreadID variable.
73  /// \return LValue for thread id variable. This LValue always has type int32*.
74  virtual LValue getThreadIDVariableLValue(CodeGenFunction &CGF);
75 
76  virtual void emitUntiedSwitch(CodeGenFunction & /*CGF*/) {}
77 
78  CGOpenMPRegionKind getRegionKind() const { return RegionKind; }
79 
80  OpenMPDirectiveKind getDirectiveKind() const { return Kind; }
81 
82  bool hasCancel() const { return HasCancel; }
83 
84  static bool classof(const CGCapturedStmtInfo *Info) {
85  return Info->getKind() == CR_OpenMP;
86  }
87 
88  ~CGOpenMPRegionInfo() override = default;
89 
90 protected:
91  CGOpenMPRegionKind RegionKind;
92  RegionCodeGenTy CodeGen;
94  bool HasCancel;
95 };
96 
97 /// API for captured statement code generation in OpenMP constructs.
98 class CGOpenMPOutlinedRegionInfo final : public CGOpenMPRegionInfo {
99 public:
100  CGOpenMPOutlinedRegionInfo(const CapturedStmt &CS, const VarDecl *ThreadIDVar,
101  const RegionCodeGenTy &CodeGen,
102  OpenMPDirectiveKind Kind, bool HasCancel,
103  StringRef HelperName)
104  : CGOpenMPRegionInfo(CS, ParallelOutlinedRegion, CodeGen, Kind,
105  HasCancel),
106  ThreadIDVar(ThreadIDVar), HelperName(HelperName) {
107  assert(ThreadIDVar != nullptr && "No ThreadID in OpenMP region.");
108  }
109 
110  /// Get a variable or parameter for storing global thread id
111  /// inside OpenMP construct.
112  const VarDecl *getThreadIDVariable() const override { return ThreadIDVar; }
113 
114  /// Get the name of the capture helper.
115  StringRef getHelperName() const override { return HelperName; }
116 
117  static bool classof(const CGCapturedStmtInfo *Info) {
118  return CGOpenMPRegionInfo::classof(Info) &&
119  cast<CGOpenMPRegionInfo>(Info)->getRegionKind() ==
120  ParallelOutlinedRegion;
121  }
122 
123 private:
124  /// A variable or parameter storing global thread id for OpenMP
125  /// constructs.
126  const VarDecl *ThreadIDVar;
127  StringRef HelperName;
128 };
129 
130 /// API for captured statement code generation in OpenMP constructs.
131 class CGOpenMPTaskOutlinedRegionInfo final : public CGOpenMPRegionInfo {
132 public:
133  class UntiedTaskActionTy final : public PrePostActionTy {
134  bool Untied;
135  const VarDecl *PartIDVar;
136  const RegionCodeGenTy UntiedCodeGen;
137  llvm::SwitchInst *UntiedSwitch = nullptr;
138 
139  public:
140  UntiedTaskActionTy(bool Tied, const VarDecl *PartIDVar,
141  const RegionCodeGenTy &UntiedCodeGen)
142  : Untied(!Tied), PartIDVar(PartIDVar), UntiedCodeGen(UntiedCodeGen) {}
143  void Enter(CodeGenFunction &CGF) override {
144  if (Untied) {
145  // Emit task switching point.
146  LValue PartIdLVal = CGF.EmitLoadOfPointerLValue(
147  CGF.GetAddrOfLocalVar(PartIDVar),
148  PartIDVar->getType()->castAs<PointerType>());
149  llvm::Value *Res =
150  CGF.EmitLoadOfScalar(PartIdLVal, PartIDVar->getLocation());
151  llvm::BasicBlock *DoneBB = CGF.createBasicBlock(".untied.done.");
152  UntiedSwitch = CGF.Builder.CreateSwitch(Res, DoneBB);
153  CGF.EmitBlock(DoneBB);
155  CGF.EmitBlock(CGF.createBasicBlock(".untied.jmp."));
156  UntiedSwitch->addCase(CGF.Builder.getInt32(0),
157  CGF.Builder.GetInsertBlock());
158  emitUntiedSwitch(CGF);
159  }
160  }
161  void emitUntiedSwitch(CodeGenFunction &CGF) const {
162  if (Untied) {
163  LValue PartIdLVal = CGF.EmitLoadOfPointerLValue(
164  CGF.GetAddrOfLocalVar(PartIDVar),
165  PartIDVar->getType()->castAs<PointerType>());
166  CGF.EmitStoreOfScalar(CGF.Builder.getInt32(UntiedSwitch->getNumCases()),
167  PartIdLVal);
168  UntiedCodeGen(CGF);
169  CodeGenFunction::JumpDest CurPoint =
170  CGF.getJumpDestInCurrentScope(".untied.next.");
172  CGF.EmitBlock(CGF.createBasicBlock(".untied.jmp."));
173  UntiedSwitch->addCase(CGF.Builder.getInt32(UntiedSwitch->getNumCases()),
174  CGF.Builder.GetInsertBlock());
175  CGF.EmitBranchThroughCleanup(CurPoint);
176  CGF.EmitBlock(CurPoint.getBlock());
177  }
178  }
179  unsigned getNumberOfParts() const { return UntiedSwitch->getNumCases(); }
180  };
181  CGOpenMPTaskOutlinedRegionInfo(const CapturedStmt &CS,
182  const VarDecl *ThreadIDVar,
183  const RegionCodeGenTy &CodeGen,
184  OpenMPDirectiveKind Kind, bool HasCancel,
185  const UntiedTaskActionTy &Action)
186  : CGOpenMPRegionInfo(CS, TaskOutlinedRegion, CodeGen, Kind, HasCancel),
187  ThreadIDVar(ThreadIDVar), Action(Action) {
188  assert(ThreadIDVar != nullptr && "No ThreadID in OpenMP region.");
189  }
190 
191  /// Get a variable or parameter for storing global thread id
192  /// inside OpenMP construct.
193  const VarDecl *getThreadIDVariable() const override { return ThreadIDVar; }
194 
195  /// Get an LValue for the current ThreadID variable.
196  LValue getThreadIDVariableLValue(CodeGenFunction &CGF) override;
197 
198  /// Get the name of the capture helper.
199  StringRef getHelperName() const override { return ".omp_outlined."; }
200 
201  void emitUntiedSwitch(CodeGenFunction &CGF) override {
202  Action.emitUntiedSwitch(CGF);
203  }
204 
205  static bool classof(const CGCapturedStmtInfo *Info) {
206  return CGOpenMPRegionInfo::classof(Info) &&
207  cast<CGOpenMPRegionInfo>(Info)->getRegionKind() ==
208  TaskOutlinedRegion;
209  }
210 
211 private:
212  /// A variable or parameter storing global thread id for OpenMP
213  /// constructs.
214  const VarDecl *ThreadIDVar;
215  /// Action for emitting code for untied tasks.
216  const UntiedTaskActionTy &Action;
217 };
218 
219 /// API for inlined captured statement code generation in OpenMP
220 /// constructs.
221 class CGOpenMPInlinedRegionInfo : public CGOpenMPRegionInfo {
222 public:
223  CGOpenMPInlinedRegionInfo(CodeGenFunction::CGCapturedStmtInfo *OldCSI,
224  const RegionCodeGenTy &CodeGen,
225  OpenMPDirectiveKind Kind, bool HasCancel)
226  : CGOpenMPRegionInfo(InlinedRegion, CodeGen, Kind, HasCancel),
227  OldCSI(OldCSI),
228  OuterRegionInfo(dyn_cast_or_null<CGOpenMPRegionInfo>(OldCSI)) {}
229 
230  // Retrieve the value of the context parameter.
231  llvm::Value *getContextValue() const override {
232  if (OuterRegionInfo)
233  return OuterRegionInfo->getContextValue();
234  llvm_unreachable("No context value for inlined OpenMP region");
235  }
236 
237  void setContextValue(llvm::Value *V) override {
238  if (OuterRegionInfo) {
239  OuterRegionInfo->setContextValue(V);
240  return;
241  }
242  llvm_unreachable("No context value for inlined OpenMP region");
243  }
244 
245  /// Lookup the captured field decl for a variable.
246  const FieldDecl *lookup(const VarDecl *VD) const override {
247  if (OuterRegionInfo)
248  return OuterRegionInfo->lookup(VD);
249  // If there is no outer outlined region,no need to lookup in a list of
250  // captured variables, we can use the original one.
251  return nullptr;
252  }
253 
254  FieldDecl *getThisFieldDecl() const override {
255  if (OuterRegionInfo)
256  return OuterRegionInfo->getThisFieldDecl();
257  return nullptr;
258  }
259 
260  /// Get a variable or parameter for storing global thread id
261  /// inside OpenMP construct.
262  const VarDecl *getThreadIDVariable() const override {
263  if (OuterRegionInfo)
264  return OuterRegionInfo->getThreadIDVariable();
265  return nullptr;
266  }
267 
268  /// Get an LValue for the current ThreadID variable.
269  LValue getThreadIDVariableLValue(CodeGenFunction &CGF) override {
270  if (OuterRegionInfo)
271  return OuterRegionInfo->getThreadIDVariableLValue(CGF);
272  llvm_unreachable("No LValue for inlined OpenMP construct");
273  }
274 
275  /// Get the name of the capture helper.
276  StringRef getHelperName() const override {
277  if (auto *OuterRegionInfo = getOldCSI())
278  return OuterRegionInfo->getHelperName();
279  llvm_unreachable("No helper name for inlined OpenMP construct");
280  }
281 
282  void emitUntiedSwitch(CodeGenFunction &CGF) override {
283  if (OuterRegionInfo)
284  OuterRegionInfo->emitUntiedSwitch(CGF);
285  }
286 
287  CodeGenFunction::CGCapturedStmtInfo *getOldCSI() const { return OldCSI; }
288 
289  static bool classof(const CGCapturedStmtInfo *Info) {
290  return CGOpenMPRegionInfo::classof(Info) &&
291  cast<CGOpenMPRegionInfo>(Info)->getRegionKind() == InlinedRegion;
292  }
293 
294  ~CGOpenMPInlinedRegionInfo() override = default;
295 
296 private:
297  /// CodeGen info about outer OpenMP region.
299  CGOpenMPRegionInfo *OuterRegionInfo;
300 };
301 
302 /// API for captured statement code generation in OpenMP target
303 /// constructs. For this captures, implicit parameters are used instead of the
304 /// captured fields. The name of the target region has to be unique in a given
305 /// application so it is provided by the client, because only the client has
306 /// the information to generate that.
307 class CGOpenMPTargetRegionInfo final : public CGOpenMPRegionInfo {
308 public:
309  CGOpenMPTargetRegionInfo(const CapturedStmt &CS,
310  const RegionCodeGenTy &CodeGen, StringRef HelperName)
311  : CGOpenMPRegionInfo(CS, TargetRegion, CodeGen, OMPD_target,
312  /*HasCancel=*/false),
313  HelperName(HelperName) {}
314 
315  /// This is unused for target regions because each starts executing
316  /// with a single thread.
317  const VarDecl *getThreadIDVariable() const override { return nullptr; }
318 
319  /// Get the name of the capture helper.
320  StringRef getHelperName() const override { return HelperName; }
321 
322  static bool classof(const CGCapturedStmtInfo *Info) {
323  return CGOpenMPRegionInfo::classof(Info) &&
324  cast<CGOpenMPRegionInfo>(Info)->getRegionKind() == TargetRegion;
325  }
326 
327 private:
328  StringRef HelperName;
329 };
330 
331 static void EmptyCodeGen(CodeGenFunction &, PrePostActionTy &) {
332  llvm_unreachable("No codegen for expressions");
333 }
334 /// API for generation of expressions captured in a innermost OpenMP
335 /// region.
336 class CGOpenMPInnerExprInfo final : public CGOpenMPInlinedRegionInfo {
337 public:
338  CGOpenMPInnerExprInfo(CodeGenFunction &CGF, const CapturedStmt &CS)
339  : CGOpenMPInlinedRegionInfo(CGF.CapturedStmtInfo, EmptyCodeGen,
340  OMPD_unknown,
341  /*HasCancel=*/false),
342  PrivScope(CGF) {
343  // Make sure the globals captured in the provided statement are local by
344  // using the privatization logic. We assume the same variable is not
345  // captured more than once.
346  for (const auto &C : CS.captures()) {
347  if (!C.capturesVariable() && !C.capturesVariableByCopy())
348  continue;
349 
350  const VarDecl *VD = C.getCapturedVar();
351  if (VD->isLocalVarDeclOrParm())
352  continue;
353 
354  DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(VD),
355  /*RefersToEnclosingVariableOrCapture=*/false,
357  C.getLocation());
358  PrivScope.addPrivate(
359  VD, [&CGF, &DRE]() { return CGF.EmitLValue(&DRE).getAddress(); });
360  }
361  (void)PrivScope.Privatize();
362  }
363 
364  /// Lookup the captured field decl for a variable.
365  const FieldDecl *lookup(const VarDecl *VD) const override {
366  if (const FieldDecl *FD = CGOpenMPInlinedRegionInfo::lookup(VD))
367  return FD;
368  return nullptr;
369  }
370 
371  /// Emit the captured statement body.
372  void EmitBody(CodeGenFunction &CGF, const Stmt *S) override {
373  llvm_unreachable("No body for expressions");
374  }
375 
376  /// Get a variable or parameter for storing global thread id
377  /// inside OpenMP construct.
378  const VarDecl *getThreadIDVariable() const override {
379  llvm_unreachable("No thread id for expressions");
380  }
381 
382  /// Get the name of the capture helper.
383  StringRef getHelperName() const override {
384  llvm_unreachable("No helper name for expressions");
385  }
386 
387  static bool classof(const CGCapturedStmtInfo *Info) { return false; }
388 
389 private:
390  /// Private scope to capture global variables.
392 };
393 
394 /// RAII for emitting code of OpenMP constructs.
395 class InlinedOpenMPRegionRAII {
396  CodeGenFunction &CGF;
397  llvm::DenseMap<const VarDecl *, FieldDecl *> LambdaCaptureFields;
398  FieldDecl *LambdaThisCaptureField = nullptr;
399  const CodeGen::CGBlockInfo *BlockInfo = nullptr;
400 
401 public:
402  /// Constructs region for combined constructs.
403  /// \param CodeGen Code generation sequence for combined directives. Includes
404  /// a list of functions used for code generation of implicitly inlined
405  /// regions.
406  InlinedOpenMPRegionRAII(CodeGenFunction &CGF, const RegionCodeGenTy &CodeGen,
407  OpenMPDirectiveKind Kind, bool HasCancel)
408  : CGF(CGF) {
409  // Start emission for the construct.
410  CGF.CapturedStmtInfo = new CGOpenMPInlinedRegionInfo(
411  CGF.CapturedStmtInfo, CodeGen, Kind, HasCancel);
412  std::swap(CGF.LambdaCaptureFields, LambdaCaptureFields);
413  LambdaThisCaptureField = CGF.LambdaThisCaptureField;
414  CGF.LambdaThisCaptureField = nullptr;
415  BlockInfo = CGF.BlockInfo;
416  CGF.BlockInfo = nullptr;
417  }
418 
419  ~InlinedOpenMPRegionRAII() {
420  // Restore original CapturedStmtInfo only if we're done with code emission.
421  auto *OldCSI =
422  cast<CGOpenMPInlinedRegionInfo>(CGF.CapturedStmtInfo)->getOldCSI();
423  delete CGF.CapturedStmtInfo;
424  CGF.CapturedStmtInfo = OldCSI;
425  std::swap(CGF.LambdaCaptureFields, LambdaCaptureFields);
426  CGF.LambdaThisCaptureField = LambdaThisCaptureField;
427  CGF.BlockInfo = BlockInfo;
428  }
429 };
430 
431 /// Values for bit flags used in the ident_t to describe the fields.
432 /// All enumeric elements are named and described in accordance with the code
433 /// from https://github.com/llvm/llvm-project/blob/master/openmp/runtime/src/kmp.h
434 enum OpenMPLocationFlags : unsigned {
435  /// Use trampoline for internal microtask.
436  OMP_IDENT_IMD = 0x01,
437  /// Use c-style ident structure.
438  OMP_IDENT_KMPC = 0x02,
439  /// Atomic reduction option for kmpc_reduce.
440  OMP_ATOMIC_REDUCE = 0x10,
441  /// Explicit 'barrier' directive.
442  OMP_IDENT_BARRIER_EXPL = 0x20,
443  /// Implicit barrier in code.
444  OMP_IDENT_BARRIER_IMPL = 0x40,
445  /// Implicit barrier in 'for' directive.
446  OMP_IDENT_BARRIER_IMPL_FOR = 0x40,
447  /// Implicit barrier in 'sections' directive.
448  OMP_IDENT_BARRIER_IMPL_SECTIONS = 0xC0,
449  /// Implicit barrier in 'single' directive.
450  OMP_IDENT_BARRIER_IMPL_SINGLE = 0x140,
451  /// Call of __kmp_for_static_init for static loop.
452  OMP_IDENT_WORK_LOOP = 0x200,
453  /// Call of __kmp_for_static_init for sections.
454  OMP_IDENT_WORK_SECTIONS = 0x400,
455  /// Call of __kmp_for_static_init for distribute.
456  OMP_IDENT_WORK_DISTRIBUTE = 0x800,
457  LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/OMP_IDENT_WORK_DISTRIBUTE)
458 };
459 
460 namespace {
462 /// Values for bit flags for marking which requires clauses have been used.
464  /// flag undefined.
465  OMP_REQ_UNDEFINED = 0x000,
466  /// no requires clause present.
467  OMP_REQ_NONE = 0x001,
468  /// reverse_offload clause.
469  OMP_REQ_REVERSE_OFFLOAD = 0x002,
470  /// unified_address clause.
471  OMP_REQ_UNIFIED_ADDRESS = 0x004,
472  /// unified_shared_memory clause.
473  OMP_REQ_UNIFIED_SHARED_MEMORY = 0x008,
474  /// dynamic_allocators clause.
475  OMP_REQ_DYNAMIC_ALLOCATORS = 0x010,
476  LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/OMP_REQ_DYNAMIC_ALLOCATORS)
477 };
478 
480  /// Device ID if the device was not defined, runtime should get it
481  /// from environment variables in the spec.
482  OMP_DEVICEID_UNDEF = -1,
483 };
484 } // anonymous namespace
485 
486 /// Describes ident structure that describes a source location.
487 /// All descriptions are taken from
488 /// https://github.com/llvm/llvm-project/blob/master/openmp/runtime/src/kmp.h
489 /// Original structure:
490 /// typedef struct ident {
491 /// kmp_int32 reserved_1; /**< might be used in Fortran;
492 /// see above */
493 /// kmp_int32 flags; /**< also f.flags; KMP_IDENT_xxx flags;
494 /// KMP_IDENT_KMPC identifies this union
495 /// member */
496 /// kmp_int32 reserved_2; /**< not really used in Fortran any more;
497 /// see above */
498 ///#if USE_ITT_BUILD
499 /// /* but currently used for storing
500 /// region-specific ITT */
501 /// /* contextual information. */
502 ///#endif /* USE_ITT_BUILD */
503 /// kmp_int32 reserved_3; /**< source[4] in Fortran, do not use for
504 /// C++ */
505 /// char const *psource; /**< String describing the source location.
506 /// The string is composed of semi-colon separated
507 // fields which describe the source file,
508 /// the function and a pair of line numbers that
509 /// delimit the construct.
510 /// */
511 /// } ident_t;
513  /// might be used in Fortran
515  /// OMP_IDENT_xxx flags; OMP_IDENT_KMPC identifies this union member.
517  /// Not really used in Fortran any more
519  /// Source[4] in Fortran, do not use for C++
521  /// String describing the source location. The string is composed of
522  /// semi-colon separated fields which describe the source file, the function
523  /// and a pair of line numbers that delimit the construct.
525 };
526 
527 /// Schedule types for 'omp for' loops (these enumerators are taken from
528 /// the enum sched_type in kmp.h).
530  /// Lower bound for default (unordered) versions.
538  /// static with chunk adjustment (e.g., simd)
540  /// Lower bound for 'ordered' versions.
549  /// dist_schedule types
552  /// Support for OpenMP 4.5 monotonic and nonmonotonic schedule modifiers.
553  /// Set if the monotonic schedule modifier was present.
555  /// Set if the nonmonotonic schedule modifier was present.
557 };
558 
560  /// Call to void __kmpc_fork_call(ident_t *loc, kmp_int32 argc,
561  /// kmpc_micro microtask, ...);
563  /// Call to void *__kmpc_threadprivate_cached(ident_t *loc,
564  /// kmp_int32 global_tid, void *data, size_t size, void ***cache);
566  /// Call to void __kmpc_threadprivate_register( ident_t *,
567  /// void *data, kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor);
569  // Call to __kmpc_int32 kmpc_global_thread_num(ident_t *loc);
571  // Call to void __kmpc_critical(ident_t *loc, kmp_int32 global_tid,
572  // kmp_critical_name *crit);
574  // Call to void __kmpc_critical_with_hint(ident_t *loc, kmp_int32
575  // global_tid, kmp_critical_name *crit, uintptr_t hint);
577  // Call to void __kmpc_end_critical(ident_t *loc, kmp_int32 global_tid,
578  // kmp_critical_name *crit);
580  // Call to kmp_int32 __kmpc_cancel_barrier(ident_t *loc, kmp_int32
581  // global_tid);
583  // Call to void __kmpc_barrier(ident_t *loc, kmp_int32 global_tid);
585  // Call to void __kmpc_for_static_fini(ident_t *loc, kmp_int32 global_tid);
587  // Call to void __kmpc_serialized_parallel(ident_t *loc, kmp_int32
588  // global_tid);
590  // Call to void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32
591  // global_tid);
593  // Call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid,
594  // kmp_int32 num_threads);
596  // Call to void __kmpc_flush(ident_t *loc);
598  // Call to kmp_int32 __kmpc_master(ident_t *, kmp_int32 global_tid);
600  // Call to void __kmpc_end_master(ident_t *, kmp_int32 global_tid);
602  // Call to kmp_int32 __kmpc_omp_taskyield(ident_t *, kmp_int32 global_tid,
603  // int end_part);
605  // Call to kmp_int32 __kmpc_single(ident_t *, kmp_int32 global_tid);
607  // Call to void __kmpc_end_single(ident_t *, kmp_int32 global_tid);
609  // Call to kmp_task_t * __kmpc_omp_task_alloc(ident_t *, kmp_int32 gtid,
610  // kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
611  // kmp_routine_entry_t *task_entry);
613  // Call to kmp_task_t * __kmpc_omp_target_task_alloc(ident_t *,
614  // kmp_int32 gtid, kmp_int32 flags, size_t sizeof_kmp_task_t,
615  // size_t sizeof_shareds, kmp_routine_entry_t *task_entry,
616  // kmp_int64 device_id);
618  // Call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t *
619  // new_task);
621  // Call to void __kmpc_copyprivate(ident_t *loc, kmp_int32 global_tid,
622  // size_t cpy_size, void *cpy_data, void(*cpy_func)(void *, void *),
623  // kmp_int32 didit);
625  // Call to kmp_int32 __kmpc_reduce(ident_t *loc, kmp_int32 global_tid,
626  // kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void
627  // (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck);
629  // Call to kmp_int32 __kmpc_reduce_nowait(ident_t *loc, kmp_int32
630  // global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data,
631  // void (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name
632  // *lck);
634  // Call to void __kmpc_end_reduce(ident_t *loc, kmp_int32 global_tid,
635  // kmp_critical_name *lck);
637  // Call to void __kmpc_end_reduce_nowait(ident_t *loc, kmp_int32 global_tid,
638  // kmp_critical_name *lck);
640  // Call to void __kmpc_omp_task_begin_if0(ident_t *, kmp_int32 gtid,
641  // kmp_task_t * new_task);
643  // Call to void __kmpc_omp_task_complete_if0(ident_t *, kmp_int32 gtid,
644  // kmp_task_t * new_task);
646  // Call to void __kmpc_ordered(ident_t *loc, kmp_int32 global_tid);
648  // Call to void __kmpc_end_ordered(ident_t *loc, kmp_int32 global_tid);
650  // Call to kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32
651  // global_tid);
653  // Call to void __kmpc_taskgroup(ident_t *loc, kmp_int32 global_tid);
655  // Call to void __kmpc_end_taskgroup(ident_t *loc, kmp_int32 global_tid);
657  // Call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 global_tid,
658  // int proc_bind);
660  // Call to kmp_int32 __kmpc_omp_task_with_deps(ident_t *loc_ref, kmp_int32
661  // gtid, kmp_task_t * new_task, kmp_int32 ndeps, kmp_depend_info_t
662  // *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list);
664  // Call to void __kmpc_omp_wait_deps(ident_t *loc_ref, kmp_int32
665  // gtid, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32
666  // ndeps_noalias, kmp_depend_info_t *noalias_dep_list);
668  // Call to kmp_int32 __kmpc_cancellationpoint(ident_t *loc, kmp_int32
669  // global_tid, kmp_int32 cncl_kind);
671  // Call to kmp_int32 __kmpc_cancel(ident_t *loc, kmp_int32 global_tid,
672  // kmp_int32 cncl_kind);
674  // Call to void __kmpc_push_num_teams(ident_t *loc, kmp_int32 global_tid,
675  // kmp_int32 num_teams, kmp_int32 thread_limit);
677  // Call to void __kmpc_fork_teams(ident_t *loc, kmp_int32 argc, kmpc_micro
678  // microtask, ...);
680  // Call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t *task, int
681  // if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int nogroup, int
682  // sched, kmp_uint64 grainsize, void *task_dup);
684  // Call to void __kmpc_doacross_init(ident_t *loc, kmp_int32 gtid, kmp_int32
685  // num_dims, struct kmp_dim *dims);
687  // Call to void __kmpc_doacross_fini(ident_t *loc, kmp_int32 gtid);
689  // Call to void __kmpc_doacross_post(ident_t *loc, kmp_int32 gtid, kmp_int64
690  // *vec);
692  // Call to void __kmpc_doacross_wait(ident_t *loc, kmp_int32 gtid, kmp_int64
693  // *vec);
695  // Call to void *__kmpc_task_reduction_init(int gtid, int num_data, void
696  // *data);
698  // Call to void *__kmpc_task_reduction_get_th_data(int gtid, void *tg, void
699  // *d);
701  // Call to void *__kmpc_alloc(int gtid, size_t sz, omp_allocator_handle_t al);
703  // Call to void __kmpc_free(int gtid, void *ptr, omp_allocator_handle_t al);
705 
706  //
707  // Offloading related calls
708  //
709  // Call to void __kmpc_push_target_tripcount(int64_t device_id, kmp_uint64
710  // size);
712  // Call to int32_t __tgt_target(int64_t device_id, void *host_ptr, int32_t
713  // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
714  // *arg_types);
716  // Call to int32_t __tgt_target_nowait(int64_t device_id, void *host_ptr,
717  // int32_t arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
718  // *arg_types);
720  // Call to int32_t __tgt_target_teams(int64_t device_id, void *host_ptr,
721  // int32_t arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
722  // *arg_types, int32_t num_teams, int32_t thread_limit);
724  // Call to int32_t __tgt_target_teams_nowait(int64_t device_id, void
725  // *host_ptr, int32_t arg_num, void** args_base, void **args, size_t
726  // *arg_sizes, int64_t *arg_types, int32_t num_teams, int32_t thread_limit);
728  // Call to void __tgt_register_requires(int64_t flags);
730  // Call to void __tgt_register_lib(__tgt_bin_desc *desc);
732  // Call to void __tgt_unregister_lib(__tgt_bin_desc *desc);
734  // Call to void __tgt_target_data_begin(int64_t device_id, int32_t arg_num,
735  // void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types);
737  // Call to void __tgt_target_data_begin_nowait(int64_t device_id, int32_t
738  // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
739  // *arg_types);
741  // Call to void __tgt_target_data_end(int64_t device_id, int32_t arg_num,
742  // void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types);
744  // Call to void __tgt_target_data_end_nowait(int64_t device_id, int32_t
745  // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
746  // *arg_types);
748  // Call to void __tgt_target_data_update(int64_t device_id, int32_t arg_num,
749  // void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types);
751  // Call to void __tgt_target_data_update_nowait(int64_t device_id, int32_t
752  // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
753  // *arg_types);
755 };
756 
757 /// A basic class for pre|post-action for advanced codegen sequence for OpenMP
758 /// region.
759 class CleanupTy final : public EHScopeStack::Cleanup {
760  PrePostActionTy *Action;
761 
762 public:
763  explicit CleanupTy(PrePostActionTy *Action) : Action(Action) {}
764  void Emit(CodeGenFunction &CGF, Flags /*flags*/) override {
765  if (!CGF.HaveInsertPoint())
766  return;
767  Action->Exit(CGF);
768  }
769 };
770 
771 } // anonymous namespace
772 
775  if (PrePostAction) {
776  CGF.EHStack.pushCleanup<CleanupTy>(NormalAndEHCleanup, PrePostAction);
777  Callback(CodeGen, CGF, *PrePostAction);
778  } else {
779  PrePostActionTy Action;
780  Callback(CodeGen, CGF, Action);
781  }
782 }
783 
784 /// Check if the combiner is a call to UDR combiner and if it is so return the
785 /// UDR decl used for reduction.
786 static const OMPDeclareReductionDecl *
787 getReductionInit(const Expr *ReductionOp) {
788  if (const auto *CE = dyn_cast<CallExpr>(ReductionOp))
789  if (const auto *OVE = dyn_cast<OpaqueValueExpr>(CE->getCallee()))
790  if (const auto *DRE =
791  dyn_cast<DeclRefExpr>(OVE->getSourceExpr()->IgnoreImpCasts()))
792  if (const auto *DRD = dyn_cast<OMPDeclareReductionDecl>(DRE->getDecl()))
793  return DRD;
794  return nullptr;
795 }
796 
798  const OMPDeclareReductionDecl *DRD,
799  const Expr *InitOp,
800  Address Private, Address Original,
801  QualType Ty) {
802  if (DRD->getInitializer()) {
803  std::pair<llvm::Function *, llvm::Function *> Reduction =
804  CGF.CGM.getOpenMPRuntime().getUserDefinedReduction(DRD);
805  const auto *CE = cast<CallExpr>(InitOp);
806  const auto *OVE = cast<OpaqueValueExpr>(CE->getCallee());
807  const Expr *LHS = CE->getArg(/*Arg=*/0)->IgnoreParenImpCasts();
808  const Expr *RHS = CE->getArg(/*Arg=*/1)->IgnoreParenImpCasts();
809  const auto *LHSDRE =
810  cast<DeclRefExpr>(cast<UnaryOperator>(LHS)->getSubExpr());
811  const auto *RHSDRE =
812  cast<DeclRefExpr>(cast<UnaryOperator>(RHS)->getSubExpr());
813  CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
814  PrivateScope.addPrivate(cast<VarDecl>(LHSDRE->getDecl()),
815  [=]() { return Private; });
816  PrivateScope.addPrivate(cast<VarDecl>(RHSDRE->getDecl()),
817  [=]() { return Original; });
818  (void)PrivateScope.Privatize();
819  RValue Func = RValue::get(Reduction.second);
820  CodeGenFunction::OpaqueValueMapping Map(CGF, OVE, Func);
821  CGF.EmitIgnoredExpr(InitOp);
822  } else {
823  llvm::Constant *Init = CGF.CGM.EmitNullConstant(Ty);
824  std::string Name = CGF.CGM.getOpenMPRuntime().getName({"init"});
825  auto *GV = new llvm::GlobalVariable(
826  CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true,
827  llvm::GlobalValue::PrivateLinkage, Init, Name);
828  LValue LV = CGF.MakeNaturalAlignAddrLValue(GV, Ty);
829  RValue InitRVal;
830  switch (CGF.getEvaluationKind(Ty)) {
831  case TEK_Scalar:
832  InitRVal = CGF.EmitLoadOfLValue(LV, DRD->getLocation());
833  break;
834  case TEK_Complex:
835  InitRVal =
837  break;
838  case TEK_Aggregate:
839  InitRVal = RValue::getAggregate(LV.getAddress());
840  break;
841  }
842  OpaqueValueExpr OVE(DRD->getLocation(), Ty, VK_RValue);
843  CodeGenFunction::OpaqueValueMapping OpaqueMap(CGF, &OVE, InitRVal);
844  CGF.EmitAnyExprToMem(&OVE, Private, Ty.getQualifiers(),
845  /*IsInitializer=*/false);
846  }
847 }
848 
849 /// Emit initialization of arrays of complex types.
850 /// \param DestAddr Address of the array.
851 /// \param Type Type of array.
852 /// \param Init Initial expression of array.
853 /// \param SrcAddr Address of the original array.
854 static void EmitOMPAggregateInit(CodeGenFunction &CGF, Address DestAddr,
855  QualType Type, bool EmitDeclareReductionInit,
856  const Expr *Init,
857  const OMPDeclareReductionDecl *DRD,
858  Address SrcAddr = Address::invalid()) {
859  // Perform element-by-element initialization.
860  QualType ElementTy;
861 
862  // Drill down to the base element type on both arrays.
863  const ArrayType *ArrayTy = Type->getAsArrayTypeUnsafe();
864  llvm::Value *NumElements = CGF.emitArrayLength(ArrayTy, ElementTy, DestAddr);
865  DestAddr =
866  CGF.Builder.CreateElementBitCast(DestAddr, DestAddr.getElementType());
867  if (DRD)
868  SrcAddr =
869  CGF.Builder.CreateElementBitCast(SrcAddr, DestAddr.getElementType());
870 
871  llvm::Value *SrcBegin = nullptr;
872  if (DRD)
873  SrcBegin = SrcAddr.getPointer();
874  llvm::Value *DestBegin = DestAddr.getPointer();
875  // Cast from pointer to array type to pointer to single element.
876  llvm::Value *DestEnd = CGF.Builder.CreateGEP(DestBegin, NumElements);
877  // The basic structure here is a while-do loop.
878  llvm::BasicBlock *BodyBB = CGF.createBasicBlock("omp.arrayinit.body");
879  llvm::BasicBlock *DoneBB = CGF.createBasicBlock("omp.arrayinit.done");
880  llvm::Value *IsEmpty =
881  CGF.Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arrayinit.isempty");
882  CGF.Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
883 
884  // Enter the loop body, making that address the current address.
885  llvm::BasicBlock *EntryBB = CGF.Builder.GetInsertBlock();
886  CGF.EmitBlock(BodyBB);
887 
888  CharUnits ElementSize = CGF.getContext().getTypeSizeInChars(ElementTy);
889 
890  llvm::PHINode *SrcElementPHI = nullptr;
891  Address SrcElementCurrent = Address::invalid();
892  if (DRD) {
893  SrcElementPHI = CGF.Builder.CreatePHI(SrcBegin->getType(), 2,
894  "omp.arraycpy.srcElementPast");
895  SrcElementPHI->addIncoming(SrcBegin, EntryBB);
896  SrcElementCurrent =
897  Address(SrcElementPHI,
898  SrcAddr.getAlignment().alignmentOfArrayElement(ElementSize));
899  }
900  llvm::PHINode *DestElementPHI = CGF.Builder.CreatePHI(
901  DestBegin->getType(), 2, "omp.arraycpy.destElementPast");
902  DestElementPHI->addIncoming(DestBegin, EntryBB);
903  Address DestElementCurrent =
904  Address(DestElementPHI,
905  DestAddr.getAlignment().alignmentOfArrayElement(ElementSize));
906 
907  // Emit copy.
908  {
909  CodeGenFunction::RunCleanupsScope InitScope(CGF);
910  if (EmitDeclareReductionInit) {
911  emitInitWithReductionInitializer(CGF, DRD, Init, DestElementCurrent,
912  SrcElementCurrent, ElementTy);
913  } else
914  CGF.EmitAnyExprToMem(Init, DestElementCurrent, ElementTy.getQualifiers(),
915  /*IsInitializer=*/false);
916  }
917 
918  if (DRD) {
919  // Shift the address forward by one element.
920  llvm::Value *SrcElementNext = CGF.Builder.CreateConstGEP1_32(
921  SrcElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
922  SrcElementPHI->addIncoming(SrcElementNext, CGF.Builder.GetInsertBlock());
923  }
924 
925  // Shift the address forward by one element.
926  llvm::Value *DestElementNext = CGF.Builder.CreateConstGEP1_32(
927  DestElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
928  // Check whether we've reached the end.
929  llvm::Value *Done =
930  CGF.Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done");
931  CGF.Builder.CreateCondBr(Done, DoneBB, BodyBB);
932  DestElementPHI->addIncoming(DestElementNext, CGF.Builder.GetInsertBlock());
933 
934  // Done.
935  CGF.EmitBlock(DoneBB, /*IsFinished=*/true);
936 }
937 
938 LValue ReductionCodeGen::emitSharedLValue(CodeGenFunction &CGF, const Expr *E) {
939  return CGF.EmitOMPSharedLValue(E);
940 }
941 
942 LValue ReductionCodeGen::emitSharedLValueUB(CodeGenFunction &CGF,
943  const Expr *E) {
944  if (const auto *OASE = dyn_cast<OMPArraySectionExpr>(E))
945  return CGF.EmitOMPArraySectionExpr(OASE, /*IsLowerBound=*/false);
946  return LValue();
947 }
948 
949 void ReductionCodeGen::emitAggregateInitialization(
950  CodeGenFunction &CGF, unsigned N, Address PrivateAddr, LValue SharedLVal,
951  const OMPDeclareReductionDecl *DRD) {
952  // Emit VarDecl with copy init for arrays.
953  // Get the address of the original variable captured in current
954  // captured region.
955  const auto *PrivateVD =
956  cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
957  bool EmitDeclareReductionInit =
958  DRD && (DRD->getInitializer() || !PrivateVD->hasInit());
959  EmitOMPAggregateInit(CGF, PrivateAddr, PrivateVD->getType(),
960  EmitDeclareReductionInit,
961  EmitDeclareReductionInit ? ClausesData[N].ReductionOp
962  : PrivateVD->getInit(),
963  DRD, SharedLVal.getAddress());
964 }
965 
968  ArrayRef<const Expr *> ReductionOps) {
969  ClausesData.reserve(Shareds.size());
970  SharedAddresses.reserve(Shareds.size());
971  Sizes.reserve(Shareds.size());
972  BaseDecls.reserve(Shareds.size());
973  auto IPriv = Privates.begin();
974  auto IRed = ReductionOps.begin();
975  for (const Expr *Ref : Shareds) {
976  ClausesData.emplace_back(Ref, *IPriv, *IRed);
977  std::advance(IPriv, 1);
978  std::advance(IRed, 1);
979  }
980 }
981 
982 void ReductionCodeGen::emitSharedLValue(CodeGenFunction &CGF, unsigned N) {
983  assert(SharedAddresses.size() == N &&
984  "Number of generated lvalues must be exactly N.");
985  LValue First = emitSharedLValue(CGF, ClausesData[N].Ref);
986  LValue Second = emitSharedLValueUB(CGF, ClausesData[N].Ref);
987  SharedAddresses.emplace_back(First, Second);
988 }
989 
991  const auto *PrivateVD =
992  cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
993  QualType PrivateType = PrivateVD->getType();
994  bool AsArraySection = isa<OMPArraySectionExpr>(ClausesData[N].Ref);
995  if (!PrivateType->isVariablyModifiedType()) {
996  Sizes.emplace_back(
997  CGF.getTypeSize(
998  SharedAddresses[N].first.getType().getNonReferenceType()),
999  nullptr);
1000  return;
1001  }
1002  llvm::Value *Size;
1003  llvm::Value *SizeInChars;
1004  auto *ElemType =
1005  cast<llvm::PointerType>(SharedAddresses[N].first.getPointer()->getType())
1006  ->getElementType();
1007  auto *ElemSizeOf = llvm::ConstantExpr::getSizeOf(ElemType);
1008  if (AsArraySection) {
1009  Size = CGF.Builder.CreatePtrDiff(SharedAddresses[N].second.getPointer(),
1010  SharedAddresses[N].first.getPointer());
1011  Size = CGF.Builder.CreateNUWAdd(
1012  Size, llvm::ConstantInt::get(Size->getType(), /*V=*/1));
1013  SizeInChars = CGF.Builder.CreateNUWMul(Size, ElemSizeOf);
1014  } else {
1015  SizeInChars = CGF.getTypeSize(
1016  SharedAddresses[N].first.getType().getNonReferenceType());
1017  Size = CGF.Builder.CreateExactUDiv(SizeInChars, ElemSizeOf);
1018  }
1019  Sizes.emplace_back(SizeInChars, Size);
1021  CGF,
1022  cast<OpaqueValueExpr>(
1023  CGF.getContext().getAsVariableArrayType(PrivateType)->getSizeExpr()),
1024  RValue::get(Size));
1025  CGF.EmitVariablyModifiedType(PrivateType);
1026 }
1027 
1029  llvm::Value *Size) {
1030  const auto *PrivateVD =
1031  cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
1032  QualType PrivateType = PrivateVD->getType();
1033  if (!PrivateType->isVariablyModifiedType()) {
1034  assert(!Size && !Sizes[N].second &&
1035  "Size should be nullptr for non-variably modified reduction "
1036  "items.");
1037  return;
1038  }
1040  CGF,
1041  cast<OpaqueValueExpr>(
1042  CGF.getContext().getAsVariableArrayType(PrivateType)->getSizeExpr()),
1043  RValue::get(Size));
1044  CGF.EmitVariablyModifiedType(PrivateType);
1045 }
1046 
1048  CodeGenFunction &CGF, unsigned N, Address PrivateAddr, LValue SharedLVal,
1049  llvm::function_ref<bool(CodeGenFunction &)> DefaultInit) {
1050  assert(SharedAddresses.size() > N && "No variable was generated");
1051  const auto *PrivateVD =
1052  cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
1053  const OMPDeclareReductionDecl *DRD =
1054  getReductionInit(ClausesData[N].ReductionOp);
1055  QualType PrivateType = PrivateVD->getType();
1056  PrivateAddr = CGF.Builder.CreateElementBitCast(
1057  PrivateAddr, CGF.ConvertTypeForMem(PrivateType));
1058  QualType SharedType = SharedAddresses[N].first.getType();
1059  SharedLVal = CGF.MakeAddrLValue(
1060  CGF.Builder.CreateElementBitCast(SharedLVal.getAddress(),
1061  CGF.ConvertTypeForMem(SharedType)),
1062  SharedType, SharedAddresses[N].first.getBaseInfo(),
1063  CGF.CGM.getTBAAInfoForSubobject(SharedAddresses[N].first, SharedType));
1064  if (CGF.getContext().getAsArrayType(PrivateVD->getType())) {
1065  emitAggregateInitialization(CGF, N, PrivateAddr, SharedLVal, DRD);
1066  } else if (DRD && (DRD->getInitializer() || !PrivateVD->hasInit())) {
1067  emitInitWithReductionInitializer(CGF, DRD, ClausesData[N].ReductionOp,
1068  PrivateAddr, SharedLVal.getAddress(),
1069  SharedLVal.getType());
1070  } else if (!DefaultInit(CGF) && PrivateVD->hasInit() &&
1071  !CGF.isTrivialInitializer(PrivateVD->getInit())) {
1072  CGF.EmitAnyExprToMem(PrivateVD->getInit(), PrivateAddr,
1073  PrivateVD->getType().getQualifiers(),
1074  /*IsInitializer=*/false);
1075  }
1076 }
1077 
1079  const auto *PrivateVD =
1080  cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
1081  QualType PrivateType = PrivateVD->getType();
1082  QualType::DestructionKind DTorKind = PrivateType.isDestructedType();
1083  return DTorKind != QualType::DK_none;
1084 }
1085 
1087  Address PrivateAddr) {
1088  const auto *PrivateVD =
1089  cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
1090  QualType PrivateType = PrivateVD->getType();
1091  QualType::DestructionKind DTorKind = PrivateType.isDestructedType();
1092  if (needCleanups(N)) {
1093  PrivateAddr = CGF.Builder.CreateElementBitCast(
1094  PrivateAddr, CGF.ConvertTypeForMem(PrivateType));
1095  CGF.pushDestroy(DTorKind, PrivateAddr, PrivateType);
1096  }
1097 }
1098 
1100  LValue BaseLV) {
1101  BaseTy = BaseTy.getNonReferenceType();
1102  while ((BaseTy->isPointerType() || BaseTy->isReferenceType()) &&
1103  !CGF.getContext().hasSameType(BaseTy, ElTy)) {
1104  if (const auto *PtrTy = BaseTy->getAs<PointerType>()) {
1105  BaseLV = CGF.EmitLoadOfPointerLValue(BaseLV.getAddress(), PtrTy);
1106  } else {
1107  LValue RefLVal = CGF.MakeAddrLValue(BaseLV.getAddress(), BaseTy);
1108  BaseLV = CGF.EmitLoadOfReferenceLValue(RefLVal);
1109  }
1110  BaseTy = BaseTy->getPointeeType();
1111  }
1112  return CGF.MakeAddrLValue(
1114  CGF.ConvertTypeForMem(ElTy)),
1115  BaseLV.getType(), BaseLV.getBaseInfo(),
1116  CGF.CGM.getTBAAInfoForSubobject(BaseLV, BaseLV.getType()));
1117 }
1118 
1120  llvm::Type *BaseLVType, CharUnits BaseLVAlignment,
1121  llvm::Value *Addr) {
1122  Address Tmp = Address::invalid();
1123  Address TopTmp = Address::invalid();
1124  Address MostTopTmp = Address::invalid();
1125  BaseTy = BaseTy.getNonReferenceType();
1126  while ((BaseTy->isPointerType() || BaseTy->isReferenceType()) &&
1127  !CGF.getContext().hasSameType(BaseTy, ElTy)) {
1128  Tmp = CGF.CreateMemTemp(BaseTy);
1129  if (TopTmp.isValid())
1130  CGF.Builder.CreateStore(Tmp.getPointer(), TopTmp);
1131  else
1132  MostTopTmp = Tmp;
1133  TopTmp = Tmp;
1134  BaseTy = BaseTy->getPointeeType();
1135  }
1136  llvm::Type *Ty = BaseLVType;
1137  if (Tmp.isValid())
1138  Ty = Tmp.getElementType();
1139  Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Addr, Ty);
1140  if (Tmp.isValid()) {
1141  CGF.Builder.CreateStore(Addr, Tmp);
1142  return MostTopTmp;
1143  }
1144  return Address(Addr, BaseLVAlignment);
1145 }
1146 
1147 static const VarDecl *getBaseDecl(const Expr *Ref, const DeclRefExpr *&DE) {
1148  const VarDecl *OrigVD = nullptr;
1149  if (const auto *OASE = dyn_cast<OMPArraySectionExpr>(Ref)) {
1150  const Expr *Base = OASE->getBase()->IgnoreParenImpCasts();
1151  while (const auto *TempOASE = dyn_cast<OMPArraySectionExpr>(Base))
1152  Base = TempOASE->getBase()->IgnoreParenImpCasts();
1153  while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
1154  Base = TempASE->getBase()->IgnoreParenImpCasts();
1155  DE = cast<DeclRefExpr>(Base);
1156  OrigVD = cast<VarDecl>(DE->getDecl());
1157  } else if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Ref)) {
1158  const Expr *Base = ASE->getBase()->IgnoreParenImpCasts();
1159  while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
1160  Base = TempASE->getBase()->IgnoreParenImpCasts();
1161  DE = cast<DeclRefExpr>(Base);
1162  OrigVD = cast<VarDecl>(DE->getDecl());
1163  }
1164  return OrigVD;
1165 }
1166 
1168  Address PrivateAddr) {
1169  const DeclRefExpr *DE;
1170  if (const VarDecl *OrigVD = ::getBaseDecl(ClausesData[N].Ref, DE)) {
1171  BaseDecls.emplace_back(OrigVD);
1172  LValue OriginalBaseLValue = CGF.EmitLValue(DE);
1173  LValue BaseLValue =
1174  loadToBegin(CGF, OrigVD->getType(), SharedAddresses[N].first.getType(),
1175  OriginalBaseLValue);
1176  llvm::Value *Adjustment = CGF.Builder.CreatePtrDiff(
1177  BaseLValue.getPointer(), SharedAddresses[N].first.getPointer());
1178  llvm::Value *PrivatePointer =
1180  PrivateAddr.getPointer(),
1181  SharedAddresses[N].first.getAddress().getType());
1182  llvm::Value *Ptr = CGF.Builder.CreateGEP(PrivatePointer, Adjustment);
1183  return castToBase(CGF, OrigVD->getType(),
1184  SharedAddresses[N].first.getType(),
1185  OriginalBaseLValue.getAddress().getType(),
1186  OriginalBaseLValue.getAlignment(), Ptr);
1187  }
1188  BaseDecls.emplace_back(
1189  cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Ref)->getDecl()));
1190  return PrivateAddr;
1191 }
1192 
1194  const OMPDeclareReductionDecl *DRD =
1195  getReductionInit(ClausesData[N].ReductionOp);
1196  return DRD && DRD->getInitializer();
1197 }
1198 
1199 LValue CGOpenMPRegionInfo::getThreadIDVariableLValue(CodeGenFunction &CGF) {
1200  return CGF.EmitLoadOfPointerLValue(
1201  CGF.GetAddrOfLocalVar(getThreadIDVariable()),
1202  getThreadIDVariable()->getType()->castAs<PointerType>());
1203 }
1204 
1205 void CGOpenMPRegionInfo::EmitBody(CodeGenFunction &CGF, const Stmt * /*S*/) {
1206  if (!CGF.HaveInsertPoint())
1207  return;
1208  // 1.2.2 OpenMP Language Terminology
1209  // Structured block - An executable statement with a single entry at the
1210  // top and a single exit at the bottom.
1211  // The point of exit cannot be a branch out of the structured block.
1212  // longjmp() and throw() must not violate the entry/exit criteria.
1213  CGF.EHStack.pushTerminate();
1214  CodeGen(CGF);
1215  CGF.EHStack.popTerminate();
1216 }
1217 
1218 LValue CGOpenMPTaskOutlinedRegionInfo::getThreadIDVariableLValue(
1219  CodeGenFunction &CGF) {
1220  return CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(getThreadIDVariable()),
1221  getThreadIDVariable()->getType(),
1223 }
1224 
1226  QualType FieldTy) {
1227  auto *Field = FieldDecl::Create(
1228  C, DC, SourceLocation(), SourceLocation(), /*Id=*/nullptr, FieldTy,
1230  /*BW=*/nullptr, /*Mutable=*/false, /*InitStyle=*/ICIS_NoInit);
1231  Field->setAccess(AS_public);
1232  DC->addDecl(Field);
1233  return Field;
1234 }
1235 
1236 CGOpenMPRuntime::CGOpenMPRuntime(CodeGenModule &CGM, StringRef FirstSeparator,
1237  StringRef Separator)
1238  : CGM(CGM), FirstSeparator(FirstSeparator), Separator(Separator),
1240  ASTContext &C = CGM.getContext();
1241  RecordDecl *RD = C.buildImplicitRecord("ident_t");
1242  QualType KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
1243  RD->startDefinition();
1244  // reserved_1
1245  addFieldToRecordDecl(C, RD, KmpInt32Ty);
1246  // flags
1247  addFieldToRecordDecl(C, RD, KmpInt32Ty);
1248  // reserved_2
1249  addFieldToRecordDecl(C, RD, KmpInt32Ty);
1250  // reserved_3
1251  addFieldToRecordDecl(C, RD, KmpInt32Ty);
1252  // psource
1253  addFieldToRecordDecl(C, RD, C.VoidPtrTy);
1254  RD->completeDefinition();
1255  IdentQTy = C.getRecordType(RD);
1256  IdentTy = CGM.getTypes().ConvertRecordDeclType(RD);
1257  KmpCriticalNameTy = llvm::ArrayType::get(CGM.Int32Ty, /*NumElements*/ 8);
1258 
1260 }
1261 
1262 void CGOpenMPRuntime::clear() {
1263  InternalVars.clear();
1264  // Clean non-target variable declarations possibly used only in debug info.
1265  for (const auto &Data : EmittedNonTargetVariables) {
1266  if (!Data.getValue().pointsToAliveValue())
1267  continue;
1268  auto *GV = dyn_cast<llvm::GlobalVariable>(Data.getValue());
1269  if (!GV)
1270  continue;
1271  if (!GV->isDeclaration() || GV->getNumUses() > 0)
1272  continue;
1273  GV->eraseFromParent();
1274  }
1275 }
1276 
1277 std::string CGOpenMPRuntime::getName(ArrayRef<StringRef> Parts) const {
1278  SmallString<128> Buffer;
1279  llvm::raw_svector_ostream OS(Buffer);
1280  StringRef Sep = FirstSeparator;
1281  for (StringRef Part : Parts) {
1282  OS << Sep << Part;
1283  Sep = Separator;
1284  }
1285  return OS.str();
1286 }
1287 
1288 static llvm::Function *
1290  const Expr *CombinerInitializer, const VarDecl *In,
1291  const VarDecl *Out, bool IsCombiner) {
1292  // void .omp_combiner.(Ty *in, Ty *out);
1293  ASTContext &C = CGM.getContext();
1294  QualType PtrTy = C.getPointerType(Ty).withRestrict();
1295  FunctionArgList Args;
1296  ImplicitParamDecl OmpOutParm(C, /*DC=*/nullptr, Out->getLocation(),
1297  /*Id=*/nullptr, PtrTy, ImplicitParamDecl::Other);
1298  ImplicitParamDecl OmpInParm(C, /*DC=*/nullptr, In->getLocation(),
1299  /*Id=*/nullptr, PtrTy, ImplicitParamDecl::Other);
1300  Args.push_back(&OmpOutParm);
1301  Args.push_back(&OmpInParm);
1302  const CGFunctionInfo &FnInfo =
1304  llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
1305  std::string Name = CGM.getOpenMPRuntime().getName(
1306  {IsCombiner ? "omp_combiner" : "omp_initializer", ""});
1308  Name, &CGM.getModule());
1309  CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FnInfo);
1310  if (CGM.getLangOpts().Optimize) {
1311  Fn->removeFnAttr(llvm::Attribute::NoInline);
1312  Fn->removeFnAttr(llvm::Attribute::OptimizeNone);
1313  Fn->addFnAttr(llvm::Attribute::AlwaysInline);
1314  }
1315  CodeGenFunction CGF(CGM);
1316  // Map "T omp_in;" variable to "*omp_in_parm" value in all expressions.
1317  // Map "T omp_out;" variable to "*omp_out_parm" value in all expressions.
1318  CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, In->getLocation(),
1319  Out->getLocation());
1321  Address AddrIn = CGF.GetAddrOfLocalVar(&OmpInParm);
1322  Scope.addPrivate(In, [&CGF, AddrIn, PtrTy]() {
1323  return CGF.EmitLoadOfPointerLValue(AddrIn, PtrTy->castAs<PointerType>())
1324  .getAddress();
1325  });
1326  Address AddrOut = CGF.GetAddrOfLocalVar(&OmpOutParm);
1327  Scope.addPrivate(Out, [&CGF, AddrOut, PtrTy]() {
1328  return CGF.EmitLoadOfPointerLValue(AddrOut, PtrTy->castAs<PointerType>())
1329  .getAddress();
1330  });
1331  (void)Scope.Privatize();
1332  if (!IsCombiner && Out->hasInit() &&
1333  !CGF.isTrivialInitializer(Out->getInit())) {
1334  CGF.EmitAnyExprToMem(Out->getInit(), CGF.GetAddrOfLocalVar(Out),
1335  Out->getType().getQualifiers(),
1336  /*IsInitializer=*/true);
1337  }
1338  if (CombinerInitializer)
1339  CGF.EmitIgnoredExpr(CombinerInitializer);
1340  Scope.ForceCleanup();
1341  CGF.FinishFunction();
1342  return Fn;
1343 }
1344 
1346  CodeGenFunction *CGF, const OMPDeclareReductionDecl *D) {
1347  if (UDRMap.count(D) > 0)
1348  return;
1349  llvm::Function *Combiner = emitCombinerOrInitializer(
1350  CGM, D->getType(), D->getCombiner(),
1351  cast<VarDecl>(cast<DeclRefExpr>(D->getCombinerIn())->getDecl()),
1352  cast<VarDecl>(cast<DeclRefExpr>(D->getCombinerOut())->getDecl()),
1353  /*IsCombiner=*/true);
1354  llvm::Function *Initializer = nullptr;
1355  if (const Expr *Init = D->getInitializer()) {
1356  Initializer = emitCombinerOrInitializer(
1357  CGM, D->getType(),
1359  : nullptr,
1360  cast<VarDecl>(cast<DeclRefExpr>(D->getInitOrig())->getDecl()),
1361  cast<VarDecl>(cast<DeclRefExpr>(D->getInitPriv())->getDecl()),
1362  /*IsCombiner=*/false);
1363  }
1364  UDRMap.try_emplace(D, Combiner, Initializer);
1365  if (CGF) {
1366  auto &Decls = FunctionUDRMap.FindAndConstruct(CGF->CurFn);
1367  Decls.second.push_back(D);
1368  }
1369 }
1370 
1371 std::pair<llvm::Function *, llvm::Function *>
1373  auto I = UDRMap.find(D);
1374  if (I != UDRMap.end())
1375  return I->second;
1376  emitUserDefinedReduction(/*CGF=*/nullptr, D);
1377  return UDRMap.lookup(D);
1378 }
1379 
1381  CodeGenModule &CGM, const OMPExecutableDirective &D, const CapturedStmt *CS,
1382  const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind,
1383  const StringRef OutlinedHelperName, const RegionCodeGenTy &CodeGen) {
1384  assert(ThreadIDVar->getType()->isPointerType() &&
1385  "thread id variable must be of type kmp_int32 *");
1386  CodeGenFunction CGF(CGM, true);
1387  bool HasCancel = false;
1388  if (const auto *OPD = dyn_cast<OMPParallelDirective>(&D))
1389  HasCancel = OPD->hasCancel();
1390  else if (const auto *OPSD = dyn_cast<OMPParallelSectionsDirective>(&D))
1391  HasCancel = OPSD->hasCancel();
1392  else if (const auto *OPFD = dyn_cast<OMPParallelForDirective>(&D))
1393  HasCancel = OPFD->hasCancel();
1394  else if (const auto *OPFD = dyn_cast<OMPTargetParallelForDirective>(&D))
1395  HasCancel = OPFD->hasCancel();
1396  else if (const auto *OPFD = dyn_cast<OMPDistributeParallelForDirective>(&D))
1397  HasCancel = OPFD->hasCancel();
1398  else if (const auto *OPFD =
1399  dyn_cast<OMPTeamsDistributeParallelForDirective>(&D))
1400  HasCancel = OPFD->hasCancel();
1401  else if (const auto *OPFD =
1402  dyn_cast<OMPTargetTeamsDistributeParallelForDirective>(&D))
1403  HasCancel = OPFD->hasCancel();
1404  CGOpenMPOutlinedRegionInfo CGInfo(*CS, ThreadIDVar, CodeGen, InnermostKind,
1405  HasCancel, OutlinedHelperName);
1406  CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
1407  return CGF.GenerateOpenMPCapturedStmtFunction(*CS);
1408 }
1409 
1411  const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
1412  OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
1413  const CapturedStmt *CS = D.getCapturedStmt(OMPD_parallel);
1415  CGM, D, CS, ThreadIDVar, InnermostKind, getOutlinedHelperName(), CodeGen);
1416 }
1417 
1419  const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
1420  OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
1421  const CapturedStmt *CS = D.getCapturedStmt(OMPD_teams);
1423  CGM, D, CS, ThreadIDVar, InnermostKind, getOutlinedHelperName(), CodeGen);
1424 }
1425 
1427  const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
1428  const VarDecl *PartIDVar, const VarDecl *TaskTVar,
1429  OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
1430  bool Tied, unsigned &NumberOfParts) {
1431  auto &&UntiedCodeGen = [this, &D, TaskTVar](CodeGenFunction &CGF,
1432  PrePostActionTy &) {
1433  llvm::Value *ThreadID = getThreadID(CGF, D.getBeginLoc());
1434  llvm::Value *UpLoc = emitUpdateLocation(CGF, D.getBeginLoc());
1435  llvm::Value *TaskArgs[] = {
1436  UpLoc, ThreadID,
1437  CGF.EmitLoadOfPointerLValue(CGF.GetAddrOfLocalVar(TaskTVar),
1438  TaskTVar->getType()->castAs<PointerType>())
1439  .getPointer()};
1440  CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_omp_task), TaskArgs);
1441  };
1442  CGOpenMPTaskOutlinedRegionInfo::UntiedTaskActionTy Action(Tied, PartIDVar,
1443  UntiedCodeGen);
1444  CodeGen.setAction(Action);
1445  assert(!ThreadIDVar->getType()->isPointerType() &&
1446  "thread id variable must be of type kmp_int32 for tasks");
1447  const OpenMPDirectiveKind Region =
1448  isOpenMPTaskLoopDirective(D.getDirectiveKind()) ? OMPD_taskloop
1449  : OMPD_task;
1450  const CapturedStmt *CS = D.getCapturedStmt(Region);
1451  const auto *TD = dyn_cast<OMPTaskDirective>(&D);
1452  CodeGenFunction CGF(CGM, true);
1453  CGOpenMPTaskOutlinedRegionInfo CGInfo(*CS, ThreadIDVar, CodeGen,
1454  InnermostKind,
1455  TD ? TD->hasCancel() : false, Action);
1456  CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
1457  llvm::Function *Res = CGF.GenerateCapturedStmtFunction(*CS);
1458  if (!Tied)
1459  NumberOfParts = Action.getNumberOfParts();
1460  return Res;
1461 }
1462 
1464  const RecordDecl *RD, const CGRecordLayout &RL,
1465  ArrayRef<llvm::Constant *> Data) {
1466  llvm::StructType *StructTy = RL.getLLVMType();
1467  unsigned PrevIdx = 0;
1468  ConstantInitBuilder CIBuilder(CGM);
1469  auto DI = Data.begin();
1470  for (const FieldDecl *FD : RD->fields()) {
1471  unsigned Idx = RL.getLLVMFieldNo(FD);
1472  // Fill the alignment.
1473  for (unsigned I = PrevIdx; I < Idx; ++I)
1474  Fields.add(llvm::Constant::getNullValue(StructTy->getElementType(I)));
1475  PrevIdx = Idx + 1;
1476  Fields.add(*DI);
1477  ++DI;
1478  }
1479 }
1480 
1481 template <class... As>
1482 static llvm::GlobalVariable *
1484  ArrayRef<llvm::Constant *> Data, const Twine &Name,
1485  As &&... Args) {
1486  const auto *RD = cast<RecordDecl>(Ty->getAsTagDecl());
1487  const CGRecordLayout &RL = CGM.getTypes().getCGRecordLayout(RD);
1488  ConstantInitBuilder CIBuilder(CGM);
1489  ConstantStructBuilder Fields = CIBuilder.beginStruct(RL.getLLVMType());
1490  buildStructValue(Fields, CGM, RD, RL, Data);
1491  return Fields.finishAndCreateGlobal(
1492  Name, CGM.getContext().getAlignOfGlobalVarInChars(Ty), IsConstant,
1493  std::forward<As>(Args)...);
1494 }
1495 
1496 template <typename T>
1497 static void
1499  ArrayRef<llvm::Constant *> Data,
1500  T &Parent) {
1501  const auto *RD = cast<RecordDecl>(Ty->getAsTagDecl());
1502  const CGRecordLayout &RL = CGM.getTypes().getCGRecordLayout(RD);
1503  ConstantStructBuilder Fields = Parent.beginStruct(RL.getLLVMType());
1504  buildStructValue(Fields, CGM, RD, RL, Data);
1505  Fields.finishAndAddTo(Parent);
1506 }
1507 
1508 Address CGOpenMPRuntime::getOrCreateDefaultLocation(unsigned Flags) {
1509  CharUnits Align = CGM.getContext().getTypeAlignInChars(IdentQTy);
1510  unsigned Reserved2Flags = getDefaultLocationReserved2Flags();
1511  FlagsTy FlagsKey(Flags, Reserved2Flags);
1512  llvm::Value *Entry = OpenMPDefaultLocMap.lookup(FlagsKey);
1513  if (!Entry) {
1514  if (!DefaultOpenMPPSource) {
1515  // Initialize default location for psource field of ident_t structure of
1516  // all ident_t objects. Format is ";file;function;line;column;;".
1517  // Taken from
1518  // https://github.com/llvm/llvm-project/blob/master/openmp/runtime/src/kmp_str.cpp
1519  DefaultOpenMPPSource =
1520  CGM.GetAddrOfConstantCString(";unknown;unknown;0;0;;").getPointer();
1521  DefaultOpenMPPSource =
1522  llvm::ConstantExpr::getBitCast(DefaultOpenMPPSource, CGM.Int8PtrTy);
1523  }
1524 
1525  llvm::Constant *Data[] = {
1526  llvm::ConstantInt::getNullValue(CGM.Int32Ty),
1527  llvm::ConstantInt::get(CGM.Int32Ty, Flags),
1528  llvm::ConstantInt::get(CGM.Int32Ty, Reserved2Flags),
1529  llvm::ConstantInt::getNullValue(CGM.Int32Ty), DefaultOpenMPPSource};
1530  llvm::GlobalValue *DefaultOpenMPLocation =
1531  createGlobalStruct(CGM, IdentQTy, isDefaultLocationConstant(), Data, "",
1532  llvm::GlobalValue::PrivateLinkage);
1533  DefaultOpenMPLocation->setUnnamedAddr(
1534  llvm::GlobalValue::UnnamedAddr::Global);
1535 
1536  OpenMPDefaultLocMap[FlagsKey] = Entry = DefaultOpenMPLocation;
1537  }
1538  return Address(Entry, Align);
1539 }
1540 
1542  bool AtCurrentPoint) {
1543  auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
1544  assert(!Elem.second.ServiceInsertPt && "Insert point is set already.");
1545 
1546  llvm::Value *Undef = llvm::UndefValue::get(CGF.Int32Ty);
1547  if (AtCurrentPoint) {
1548  Elem.second.ServiceInsertPt = new llvm::BitCastInst(
1549  Undef, CGF.Int32Ty, "svcpt", CGF.Builder.GetInsertBlock());
1550  } else {
1551  Elem.second.ServiceInsertPt =
1552  new llvm::BitCastInst(Undef, CGF.Int32Ty, "svcpt");
1553  Elem.second.ServiceInsertPt->insertAfter(CGF.AllocaInsertPt);
1554  }
1555 }
1556 
1558  auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
1559  if (Elem.second.ServiceInsertPt) {
1560  llvm::Instruction *Ptr = Elem.second.ServiceInsertPt;
1561  Elem.second.ServiceInsertPt = nullptr;
1562  Ptr->eraseFromParent();
1563  }
1564 }
1565 
1567  SourceLocation Loc,
1568  unsigned Flags) {
1569  Flags |= OMP_IDENT_KMPC;
1570  // If no debug info is generated - return global default location.
1571  if (CGM.getCodeGenOpts().getDebugInfo() == codegenoptions::NoDebugInfo ||
1572  Loc.isInvalid())
1573  return getOrCreateDefaultLocation(Flags).getPointer();
1574 
1575  assert(CGF.CurFn && "No function in current CodeGenFunction.");
1576 
1577  CharUnits Align = CGM.getContext().getTypeAlignInChars(IdentQTy);
1578  Address LocValue = Address::invalid();
1579  auto I = OpenMPLocThreadIDMap.find(CGF.CurFn);
1580  if (I != OpenMPLocThreadIDMap.end())
1581  LocValue = Address(I->second.DebugLoc, Align);
1582 
1583  // OpenMPLocThreadIDMap may have null DebugLoc and non-null ThreadID, if
1584  // GetOpenMPThreadID was called before this routine.
1585  if (!LocValue.isValid()) {
1586  // Generate "ident_t .kmpc_loc.addr;"
1587  Address AI = CGF.CreateMemTemp(IdentQTy, ".kmpc_loc.addr");
1588  auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
1589  Elem.second.DebugLoc = AI.getPointer();
1590  LocValue = AI;
1591 
1592  if (!Elem.second.ServiceInsertPt)
1594  CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
1595  CGF.Builder.SetInsertPoint(Elem.second.ServiceInsertPt);
1596  CGF.Builder.CreateMemCpy(LocValue, getOrCreateDefaultLocation(Flags),
1597  CGF.getTypeSize(IdentQTy));
1598  }
1599 
1600  // char **psource = &.kmpc_loc_<flags>.addr.psource;
1601  LValue Base = CGF.MakeAddrLValue(LocValue, IdentQTy);
1602  auto Fields = cast<RecordDecl>(IdentQTy->getAsTagDecl())->field_begin();
1603  LValue PSource =
1604  CGF.EmitLValueForField(Base, *std::next(Fields, IdentField_PSource));
1605 
1606  llvm::Value *OMPDebugLoc = OpenMPDebugLocMap.lookup(Loc.getRawEncoding());
1607  if (OMPDebugLoc == nullptr) {
1608  SmallString<128> Buffer2;
1609  llvm::raw_svector_ostream OS2(Buffer2);
1610  // Build debug location
1612  OS2 << ";" << PLoc.getFilename() << ";";
1613  if (const auto *FD = dyn_cast_or_null<FunctionDecl>(CGF.CurFuncDecl))
1614  OS2 << FD->getQualifiedNameAsString();
1615  OS2 << ";" << PLoc.getLine() << ";" << PLoc.getColumn() << ";;";
1616  OMPDebugLoc = CGF.Builder.CreateGlobalStringPtr(OS2.str());
1617  OpenMPDebugLocMap[Loc.getRawEncoding()] = OMPDebugLoc;
1618  }
1619  // *psource = ";<File>;<Function>;<Line>;<Column>;;";
1620  CGF.EmitStoreOfScalar(OMPDebugLoc, PSource);
1621 
1622  // Our callers always pass this to a runtime function, so for
1623  // convenience, go ahead and return a naked pointer.
1624  return LocValue.getPointer();
1625 }
1626 
1628  SourceLocation Loc) {
1629  assert(CGF.CurFn && "No function in current CodeGenFunction.");
1630 
1631  llvm::Value *ThreadID = nullptr;
1632  // Check whether we've already cached a load of the thread id in this
1633  // function.
1634  auto I = OpenMPLocThreadIDMap.find(CGF.CurFn);
1635  if (I != OpenMPLocThreadIDMap.end()) {
1636  ThreadID = I->second.ThreadID;
1637  if (ThreadID != nullptr)
1638  return ThreadID;
1639  }
1640  // If exceptions are enabled, do not use parameter to avoid possible crash.
1641  if (!CGF.EHStack.requiresLandingPad() || !CGF.getLangOpts().Exceptions ||
1642  !CGF.getLangOpts().CXXExceptions ||
1643  CGF.Builder.GetInsertBlock() == CGF.AllocaInsertPt->getParent()) {
1644  if (auto *OMPRegionInfo =
1645  dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
1646  if (OMPRegionInfo->getThreadIDVariable()) {
1647  // Check if this an outlined function with thread id passed as argument.
1648  LValue LVal = OMPRegionInfo->getThreadIDVariableLValue(CGF);
1649  ThreadID = CGF.EmitLoadOfScalar(LVal, Loc);
1650  // If value loaded in entry block, cache it and use it everywhere in
1651  // function.
1652  if (CGF.Builder.GetInsertBlock() == CGF.AllocaInsertPt->getParent()) {
1653  auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
1654  Elem.second.ThreadID = ThreadID;
1655  }
1656  return ThreadID;
1657  }
1658  }
1659  }
1660 
1661  // This is not an outlined function region - need to call __kmpc_int32
1662  // kmpc_global_thread_num(ident_t *loc).
1663  // Generate thread id value and cache this value for use across the
1664  // function.
1665  auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
1666  if (!Elem.second.ServiceInsertPt)
1668  CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
1669  CGF.Builder.SetInsertPoint(Elem.second.ServiceInsertPt);
1670  llvm::CallInst *Call = CGF.Builder.CreateCall(
1672  emitUpdateLocation(CGF, Loc));
1673  Call->setCallingConv(CGF.getRuntimeCC());
1674  Elem.second.ThreadID = Call;
1675  return Call;
1676 }
1677 
1679  assert(CGF.CurFn && "No function in current CodeGenFunction.");
1680  if (OpenMPLocThreadIDMap.count(CGF.CurFn)) {
1682  OpenMPLocThreadIDMap.erase(CGF.CurFn);
1683  }
1684  if (FunctionUDRMap.count(CGF.CurFn) > 0) {
1685  for(auto *D : FunctionUDRMap[CGF.CurFn])
1686  UDRMap.erase(D);
1687  FunctionUDRMap.erase(CGF.CurFn);
1688  }
1689 }
1690 
1692  return IdentTy->getPointerTo();
1693 }
1694 
1696  if (!Kmpc_MicroTy) {
1697  // Build void (*kmpc_micro)(kmp_int32 *global_tid, kmp_int32 *bound_tid,...)
1698  llvm::Type *MicroParams[] = {llvm::PointerType::getUnqual(CGM.Int32Ty),
1699  llvm::PointerType::getUnqual(CGM.Int32Ty)};
1700  Kmpc_MicroTy = llvm::FunctionType::get(CGM.VoidTy, MicroParams, true);
1701  }
1702  return llvm::PointerType::getUnqual(Kmpc_MicroTy);
1703 }
1704 
1705 llvm::FunctionCallee CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
1706  llvm::FunctionCallee RTLFn = nullptr;
1707  switch (static_cast<OpenMPRTLFunction>(Function)) {
1708  case OMPRTL__kmpc_fork_call: {
1709  // Build void __kmpc_fork_call(ident_t *loc, kmp_int32 argc, kmpc_micro
1710  // microtask, ...);
1711  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
1713  auto *FnTy =
1714  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ true);
1715  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_fork_call");
1716  if (auto *F = dyn_cast<llvm::Function>(RTLFn.getCallee())) {
1717  if (!F->hasMetadata(llvm::LLVMContext::MD_callback)) {
1718  llvm::LLVMContext &Ctx = F->getContext();
1719  llvm::MDBuilder MDB(Ctx);
1720  // Annotate the callback behavior of the __kmpc_fork_call:
1721  // - The callback callee is argument number 2 (microtask).
1722  // - The first two arguments of the callback callee are unknown (-1).
1723  // - All variadic arguments to the __kmpc_fork_call are passed to the
1724  // callback callee.
1725  F->addMetadata(
1726  llvm::LLVMContext::MD_callback,
1727  *llvm::MDNode::get(Ctx, {MDB.createCallbackEncoding(
1728  2, {-1, -1},
1729  /* VarArgsArePassed */ true)}));
1730  }
1731  }
1732  break;
1733  }
1735  // Build kmp_int32 __kmpc_global_thread_num(ident_t *loc);
1736  llvm::Type *TypeParams[] = {getIdentTyPointerTy()};
1737  auto *FnTy =
1738  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
1739  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_global_thread_num");
1740  break;
1741  }
1743  // Build void *__kmpc_threadprivate_cached(ident_t *loc,
1744  // kmp_int32 global_tid, void *data, size_t size, void ***cache);
1745  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
1747  CGM.VoidPtrTy->getPointerTo()->getPointerTo()};
1748  auto *FnTy =
1749  llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg*/ false);
1750  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_threadprivate_cached");
1751  break;
1752  }
1753  case OMPRTL__kmpc_critical: {
1754  // Build void __kmpc_critical(ident_t *loc, kmp_int32 global_tid,
1755  // kmp_critical_name *crit);
1756  llvm::Type *TypeParams[] = {
1758  llvm::PointerType::getUnqual(KmpCriticalNameTy)};
1759  auto *FnTy =
1760  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1761  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_critical");
1762  break;
1763  }
1765  // Build void __kmpc_critical_with_hint(ident_t *loc, kmp_int32 global_tid,
1766  // kmp_critical_name *crit, uintptr_t hint);
1767  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
1768  llvm::PointerType::getUnqual(KmpCriticalNameTy),
1769  CGM.IntPtrTy};
1770  auto *FnTy =
1771  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1772  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_critical_with_hint");
1773  break;
1774  }
1776  // Build void __kmpc_threadprivate_register(ident_t *, void *data,
1777  // kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor);
1778  // typedef void *(*kmpc_ctor)(void *);
1779  auto *KmpcCtorTy =
1780  llvm::FunctionType::get(CGM.VoidPtrTy, CGM.VoidPtrTy,
1781  /*isVarArg*/ false)->getPointerTo();
1782  // typedef void *(*kmpc_cctor)(void *, void *);
1783  llvm::Type *KmpcCopyCtorTyArgs[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
1784  auto *KmpcCopyCtorTy =
1785  llvm::FunctionType::get(CGM.VoidPtrTy, KmpcCopyCtorTyArgs,
1786  /*isVarArg*/ false)
1787  ->getPointerTo();
1788  // typedef void (*kmpc_dtor)(void *);
1789  auto *KmpcDtorTy =
1790  llvm::FunctionType::get(CGM.VoidTy, CGM.VoidPtrTy, /*isVarArg*/ false)
1791  ->getPointerTo();
1792  llvm::Type *FnTyArgs[] = {getIdentTyPointerTy(), CGM.VoidPtrTy, KmpcCtorTy,
1793  KmpcCopyCtorTy, KmpcDtorTy};
1794  auto *FnTy = llvm::FunctionType::get(CGM.VoidTy, FnTyArgs,
1795  /*isVarArg*/ false);
1796  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_threadprivate_register");
1797  break;
1798  }
1800  // Build void __kmpc_end_critical(ident_t *loc, kmp_int32 global_tid,
1801  // kmp_critical_name *crit);
1802  llvm::Type *TypeParams[] = {
1804  llvm::PointerType::getUnqual(KmpCriticalNameTy)};
1805  auto *FnTy =
1806  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1807  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_critical");
1808  break;
1809  }
1811  // Build kmp_int32 __kmpc_cancel_barrier(ident_t *loc, kmp_int32
1812  // global_tid);
1813  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1814  auto *FnTy =
1815  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
1816  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name*/ "__kmpc_cancel_barrier");
1817  break;
1818  }
1819  case OMPRTL__kmpc_barrier: {
1820  // Build void __kmpc_barrier(ident_t *loc, kmp_int32 global_tid);
1821  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1822  auto *FnTy =
1823  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1824  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name*/ "__kmpc_barrier");
1825  break;
1826  }
1828  // Build void __kmpc_for_static_fini(ident_t *loc, kmp_int32 global_tid);
1829  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1830  auto *FnTy =
1831  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1832  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_for_static_fini");
1833  break;
1834  }
1836  // Build void __kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid,
1837  // kmp_int32 num_threads)
1838  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
1839  CGM.Int32Ty};
1840  auto *FnTy =
1841  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1842  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_push_num_threads");
1843  break;
1844  }
1846  // Build void __kmpc_serialized_parallel(ident_t *loc, kmp_int32
1847  // global_tid);
1848  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1849  auto *FnTy =
1850  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1851  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_serialized_parallel");
1852  break;
1853  }
1855  // Build void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32
1856  // global_tid);
1857  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1858  auto *FnTy =
1859  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1860  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_serialized_parallel");
1861  break;
1862  }
1863  case OMPRTL__kmpc_flush: {
1864  // Build void __kmpc_flush(ident_t *loc);
1865  llvm::Type *TypeParams[] = {getIdentTyPointerTy()};
1866  auto *FnTy =
1867  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1868  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_flush");
1869  break;
1870  }
1871  case OMPRTL__kmpc_master: {
1872  // Build kmp_int32 __kmpc_master(ident_t *loc, kmp_int32 global_tid);
1873  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1874  auto *FnTy =
1875  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1876  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_master");
1877  break;
1878  }
1879  case OMPRTL__kmpc_end_master: {
1880  // Build void __kmpc_end_master(ident_t *loc, kmp_int32 global_tid);
1881  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1882  auto *FnTy =
1883  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1884  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_master");
1885  break;
1886  }
1888  // Build kmp_int32 __kmpc_omp_taskyield(ident_t *, kmp_int32 global_tid,
1889  // int end_part);
1890  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.IntTy};
1891  auto *FnTy =
1892  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1893  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_taskyield");
1894  break;
1895  }
1896  case OMPRTL__kmpc_single: {
1897  // Build kmp_int32 __kmpc_single(ident_t *loc, kmp_int32 global_tid);
1898  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1899  auto *FnTy =
1900  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1901  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_single");
1902  break;
1903  }
1904  case OMPRTL__kmpc_end_single: {
1905  // Build void __kmpc_end_single(ident_t *loc, kmp_int32 global_tid);
1906  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1907  auto *FnTy =
1908  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1909  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_single");
1910  break;
1911  }
1913  // Build kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 gtid,
1914  // kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
1915  // kmp_routine_entry_t *task_entry);
1916  assert(KmpRoutineEntryPtrTy != nullptr &&
1917  "Type kmp_routine_entry_t must be created.");
1918  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.Int32Ty,
1919  CGM.SizeTy, CGM.SizeTy, KmpRoutineEntryPtrTy};
1920  // Return void * and then cast to particular kmp_task_t type.
1921  auto *FnTy =
1922  llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
1923  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task_alloc");
1924  break;
1925  }
1927  // Build kmp_task_t *__kmpc_omp_target_task_alloc(ident_t *, kmp_int32 gtid,
1928  // kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
1929  // kmp_routine_entry_t *task_entry, kmp_int64 device_id);
1930  assert(KmpRoutineEntryPtrTy != nullptr &&
1931  "Type kmp_routine_entry_t must be created.");
1932  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.Int32Ty,
1933  CGM.SizeTy, CGM.SizeTy, KmpRoutineEntryPtrTy,
1934  CGM.Int64Ty};
1935  // Return void * and then cast to particular kmp_task_t type.
1936  auto *FnTy =
1937  llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
1938  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_target_task_alloc");
1939  break;
1940  }
1941  case OMPRTL__kmpc_omp_task: {
1942  // Build kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t
1943  // *new_task);
1944  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
1945  CGM.VoidPtrTy};
1946  auto *FnTy =
1947  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1948  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task");
1949  break;
1950  }
1951  case OMPRTL__kmpc_copyprivate: {
1952  // Build void __kmpc_copyprivate(ident_t *loc, kmp_int32 global_tid,
1953  // size_t cpy_size, void *cpy_data, void(*cpy_func)(void *, void *),
1954  // kmp_int32 didit);
1955  llvm::Type *CpyTypeParams[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
1956  auto *CpyFnTy =
1957  llvm::FunctionType::get(CGM.VoidTy, CpyTypeParams, /*isVarArg=*/false);
1958  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.SizeTy,
1959  CGM.VoidPtrTy, CpyFnTy->getPointerTo(),
1960  CGM.Int32Ty};
1961  auto *FnTy =
1962  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1963  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_copyprivate");
1964  break;
1965  }
1966  case OMPRTL__kmpc_reduce: {
1967  // Build kmp_int32 __kmpc_reduce(ident_t *loc, kmp_int32 global_tid,
1968  // kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void
1969  // (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck);
1970  llvm::Type *ReduceTypeParams[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
1971  auto *ReduceFnTy = llvm::FunctionType::get(CGM.VoidTy, ReduceTypeParams,
1972  /*isVarArg=*/false);
1973  llvm::Type *TypeParams[] = {
1975  CGM.VoidPtrTy, ReduceFnTy->getPointerTo(),
1976  llvm::PointerType::getUnqual(KmpCriticalNameTy)};
1977  auto *FnTy =
1978  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1979  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_reduce");
1980  break;
1981  }
1983  // Build kmp_int32 __kmpc_reduce_nowait(ident_t *loc, kmp_int32
1984  // global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data,
1985  // void (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name
1986  // *lck);
1987  llvm::Type *ReduceTypeParams[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
1988  auto *ReduceFnTy = llvm::FunctionType::get(CGM.VoidTy, ReduceTypeParams,
1989  /*isVarArg=*/false);
1990  llvm::Type *TypeParams[] = {
1992  CGM.VoidPtrTy, ReduceFnTy->getPointerTo(),
1993  llvm::PointerType::getUnqual(KmpCriticalNameTy)};
1994  auto *FnTy =
1995  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1996  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_reduce_nowait");
1997  break;
1998  }
1999  case OMPRTL__kmpc_end_reduce: {
2000  // Build void __kmpc_end_reduce(ident_t *loc, kmp_int32 global_tid,
2001  // kmp_critical_name *lck);
2002  llvm::Type *TypeParams[] = {
2004  llvm::PointerType::getUnqual(KmpCriticalNameTy)};
2005  auto *FnTy =
2006  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2007  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_reduce");
2008  break;
2009  }
2011  // Build __kmpc_end_reduce_nowait(ident_t *loc, kmp_int32 global_tid,
2012  // kmp_critical_name *lck);
2013  llvm::Type *TypeParams[] = {
2015  llvm::PointerType::getUnqual(KmpCriticalNameTy)};
2016  auto *FnTy =
2017  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2018  RTLFn =
2019  CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_reduce_nowait");
2020  break;
2021  }
2023  // Build void __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t
2024  // *new_task);
2025  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
2026  CGM.VoidPtrTy};
2027  auto *FnTy =
2028  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2029  RTLFn =
2030  CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task_begin_if0");
2031  break;
2032  }
2034  // Build void __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t
2035  // *new_task);
2036  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
2037  CGM.VoidPtrTy};
2038  auto *FnTy =
2039  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2040  RTLFn = CGM.CreateRuntimeFunction(FnTy,
2041  /*Name=*/"__kmpc_omp_task_complete_if0");
2042  break;
2043  }
2044  case OMPRTL__kmpc_ordered: {
2045  // Build void __kmpc_ordered(ident_t *loc, kmp_int32 global_tid);
2046  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
2047  auto *FnTy =
2048  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2049  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_ordered");
2050  break;
2051  }
2052  case OMPRTL__kmpc_end_ordered: {
2053  // Build void __kmpc_end_ordered(ident_t *loc, kmp_int32 global_tid);
2054  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
2055  auto *FnTy =
2056  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2057  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_ordered");
2058  break;
2059  }
2061  // Build kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32 global_tid);
2062  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
2063  auto *FnTy =
2064  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
2065  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_omp_taskwait");
2066  break;
2067  }
2068  case OMPRTL__kmpc_taskgroup: {
2069  // Build void __kmpc_taskgroup(ident_t *loc, kmp_int32 global_tid);
2070  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
2071  auto *FnTy =
2072  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2073  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_taskgroup");
2074  break;
2075  }
2077  // Build void __kmpc_end_taskgroup(ident_t *loc, kmp_int32 global_tid);
2078  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
2079  auto *FnTy =
2080  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2081  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_taskgroup");
2082  break;
2083  }
2085  // Build void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 global_tid,
2086  // int proc_bind)
2087  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.IntTy};
2088  auto *FnTy =
2089  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
2090  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_push_proc_bind");
2091  break;
2092  }
2094  // Build kmp_int32 __kmpc_omp_task_with_deps(ident_t *, kmp_int32 gtid,
2095  // kmp_task_t *new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list,
2096  // kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list);
2097  llvm::Type *TypeParams[] = {
2100  auto *FnTy =
2101  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
2102  RTLFn =
2103  CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task_with_deps");
2104  break;
2105  }
2107  // Build void __kmpc_omp_wait_deps(ident_t *, kmp_int32 gtid,
2108  // kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias,
2109  // kmp_depend_info_t *noalias_dep_list);
2110  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
2113  auto *FnTy =
2114  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2115  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_wait_deps");
2116  break;
2117  }
2119  // Build kmp_int32 __kmpc_cancellationpoint(ident_t *loc, kmp_int32
2120  // global_tid, kmp_int32 cncl_kind)
2121  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.IntTy};
2122  auto *FnTy =
2123  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2124  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_cancellationpoint");
2125  break;
2126  }
2127  case OMPRTL__kmpc_cancel: {
2128  // Build kmp_int32 __kmpc_cancel(ident_t *loc, kmp_int32 global_tid,
2129  // kmp_int32 cncl_kind)
2130  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.IntTy};
2131  auto *FnTy =
2132  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2133  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_cancel");
2134  break;
2135  }
2137  // Build void kmpc_push_num_teams (ident_t loc, kmp_int32 global_tid,
2138  // kmp_int32 num_teams, kmp_int32 num_threads)
2139  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.Int32Ty,
2140  CGM.Int32Ty};
2141  auto *FnTy =
2142  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2143  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_push_num_teams");
2144  break;
2145  }
2146  case OMPRTL__kmpc_fork_teams: {
2147  // Build void __kmpc_fork_teams(ident_t *loc, kmp_int32 argc, kmpc_micro
2148  // microtask, ...);
2149  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
2151  auto *FnTy =
2152  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ true);
2153  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_fork_teams");
2154  if (auto *F = dyn_cast<llvm::Function>(RTLFn.getCallee())) {
2155  if (!F->hasMetadata(llvm::LLVMContext::MD_callback)) {
2156  llvm::LLVMContext &Ctx = F->getContext();
2157  llvm::MDBuilder MDB(Ctx);
2158  // Annotate the callback behavior of the __kmpc_fork_teams:
2159  // - The callback callee is argument number 2 (microtask).
2160  // - The first two arguments of the callback callee are unknown (-1).
2161  // - All variadic arguments to the __kmpc_fork_teams are passed to the
2162  // callback callee.
2163  F->addMetadata(
2164  llvm::LLVMContext::MD_callback,
2165  *llvm::MDNode::get(Ctx, {MDB.createCallbackEncoding(
2166  2, {-1, -1},
2167  /* VarArgsArePassed */ true)}));
2168  }
2169  }
2170  break;
2171  }
2172  case OMPRTL__kmpc_taskloop: {
2173  // Build void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t *task, int
2174  // if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int nogroup, int
2175  // sched, kmp_uint64 grainsize, void *task_dup);
2176  llvm::Type *TypeParams[] = {getIdentTyPointerTy(),
2177  CGM.IntTy,
2178  CGM.VoidPtrTy,
2179  CGM.IntTy,
2180  CGM.Int64Ty->getPointerTo(),
2181  CGM.Int64Ty->getPointerTo(),
2182  CGM.Int64Ty,
2183  CGM.IntTy,
2184  CGM.IntTy,
2185  CGM.Int64Ty,
2186  CGM.VoidPtrTy};
2187  auto *FnTy =
2188  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2189  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_taskloop");
2190  break;
2191  }
2193  // Build void __kmpc_doacross_init(ident_t *loc, kmp_int32 gtid, kmp_int32
2194  // num_dims, struct kmp_dim *dims);
2195  llvm::Type *TypeParams[] = {getIdentTyPointerTy(),
2196  CGM.Int32Ty,
2197  CGM.Int32Ty,
2198  CGM.VoidPtrTy};
2199  auto *FnTy =
2200  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2201  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_doacross_init");
2202  break;
2203  }
2205  // Build void __kmpc_doacross_fini(ident_t *loc, kmp_int32 gtid);
2206  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
2207  auto *FnTy =
2208  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2209  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_doacross_fini");
2210  break;
2211  }
2213  // Build void __kmpc_doacross_post(ident_t *loc, kmp_int32 gtid, kmp_int64
2214  // *vec);
2215  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
2216  CGM.Int64Ty->getPointerTo()};
2217  auto *FnTy =
2218  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2219  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_doacross_post");
2220  break;
2221  }
2223  // Build void __kmpc_doacross_wait(ident_t *loc, kmp_int32 gtid, kmp_int64
2224  // *vec);
2225  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
2226  CGM.Int64Ty->getPointerTo()};
2227  auto *FnTy =
2228  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2229  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_doacross_wait");
2230  break;
2231  }
2233  // Build void *__kmpc_task_reduction_init(int gtid, int num_data, void
2234  // *data);
2235  llvm::Type *TypeParams[] = {CGM.IntTy, CGM.IntTy, CGM.VoidPtrTy};
2236  auto *FnTy =
2237  llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
2238  RTLFn =
2239  CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_task_reduction_init");
2240  break;
2241  }
2243  // Build void *__kmpc_task_reduction_get_th_data(int gtid, void *tg, void
2244  // *d);
2245  llvm::Type *TypeParams[] = {CGM.IntTy, CGM.VoidPtrTy, CGM.VoidPtrTy};
2246  auto *FnTy =
2247  llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
2248  RTLFn = CGM.CreateRuntimeFunction(
2249  FnTy, /*Name=*/"__kmpc_task_reduction_get_th_data");
2250  break;
2251  }
2252  case OMPRTL__kmpc_alloc: {
2253  // Build to void *__kmpc_alloc(int gtid, size_t sz, omp_allocator_handle_t
2254  // al); omp_allocator_handle_t type is void *.
2255  llvm::Type *TypeParams[] = {CGM.IntTy, CGM.SizeTy, CGM.VoidPtrTy};
2256  auto *FnTy =
2257  llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
2258  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_alloc");
2259  break;
2260  }
2261  case OMPRTL__kmpc_free: {
2262  // Build to void __kmpc_free(int gtid, void *ptr, omp_allocator_handle_t
2263  // al); omp_allocator_handle_t type is void *.
2264  llvm::Type *TypeParams[] = {CGM.IntTy, CGM.VoidPtrTy, CGM.VoidPtrTy};
2265  auto *FnTy =
2266  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2267  RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_free");
2268  break;
2269  }
2271  // Build void __kmpc_push_target_tripcount(int64_t device_id, kmp_uint64
2272  // size);
2273  llvm::Type *TypeParams[] = {CGM.Int64Ty, CGM.Int64Ty};
2274  llvm::FunctionType *FnTy =
2275  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2276  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_push_target_tripcount");
2277  break;
2278  }
2279  case OMPRTL__tgt_target: {
2280  // Build int32_t __tgt_target(int64_t device_id, void *host_ptr, int32_t
2281  // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
2282  // *arg_types);
2283  llvm::Type *TypeParams[] = {CGM.Int64Ty,
2284  CGM.VoidPtrTy,
2285  CGM.Int32Ty,
2286  CGM.VoidPtrPtrTy,
2287  CGM.VoidPtrPtrTy,
2288  CGM.SizeTy->getPointerTo(),
2289  CGM.Int64Ty->getPointerTo()};
2290  auto *FnTy =
2291  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2292  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target");
2293  break;
2294  }
2296  // Build int32_t __tgt_target_nowait(int64_t device_id, void *host_ptr,
2297  // int32_t arg_num, void** args_base, void **args, size_t *arg_sizes,
2298  // int64_t *arg_types);
2299  llvm::Type *TypeParams[] = {CGM.Int64Ty,
2300  CGM.VoidPtrTy,
2301  CGM.Int32Ty,
2302  CGM.VoidPtrPtrTy,
2303  CGM.VoidPtrPtrTy,
2304  CGM.SizeTy->getPointerTo(),
2305  CGM.Int64Ty->getPointerTo()};
2306  auto *FnTy =
2307  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2308  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_nowait");
2309  break;
2310  }
2311  case OMPRTL__tgt_target_teams: {
2312  // Build int32_t __tgt_target_teams(int64_t device_id, void *host_ptr,
2313  // int32_t arg_num, void** args_base, void **args, size_t *arg_sizes,
2314  // int64_t *arg_types, int32_t num_teams, int32_t thread_limit);
2315  llvm::Type *TypeParams[] = {CGM.Int64Ty,
2316  CGM.VoidPtrTy,
2317  CGM.Int32Ty,
2318  CGM.VoidPtrPtrTy,
2319  CGM.VoidPtrPtrTy,
2320  CGM.SizeTy->getPointerTo(),
2321  CGM.Int64Ty->getPointerTo(),
2322  CGM.Int32Ty,
2323  CGM.Int32Ty};
2324  auto *FnTy =
2325  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2326  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_teams");
2327  break;
2328  }
2330  // Build int32_t __tgt_target_teams_nowait(int64_t device_id, void
2331  // *host_ptr, int32_t arg_num, void** args_base, void **args, size_t
2332  // *arg_sizes, int64_t *arg_types, int32_t num_teams, int32_t thread_limit);
2333  llvm::Type *TypeParams[] = {CGM.Int64Ty,
2334  CGM.VoidPtrTy,
2335  CGM.Int32Ty,
2336  CGM.VoidPtrPtrTy,
2337  CGM.VoidPtrPtrTy,
2338  CGM.SizeTy->getPointerTo(),
2339  CGM.Int64Ty->getPointerTo(),
2340  CGM.Int32Ty,
2341  CGM.Int32Ty};
2342  auto *FnTy =
2343  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2344  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_teams_nowait");
2345  break;
2346  }
2348  // Build void __tgt_register_requires(int64_t flags);
2349  llvm::Type *TypeParams[] = {CGM.Int64Ty};
2350  auto *FnTy =
2351  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
2352  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_register_requires");
2353  break;
2354  }
2355  case OMPRTL__tgt_register_lib: {
2356  // Build void __tgt_register_lib(__tgt_bin_desc *desc);
2357  QualType ParamTy =
2359  llvm::Type *TypeParams[] = {CGM.getTypes().ConvertTypeForMem(ParamTy)};
2360  auto *FnTy =
2361  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2362  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_register_lib");
2363  break;
2364  }
2366  // Build void __tgt_unregister_lib(__tgt_bin_desc *desc);
2367  QualType ParamTy =
2369  llvm::Type *TypeParams[] = {CGM.getTypes().ConvertTypeForMem(ParamTy)};
2370  auto *FnTy =
2371  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2372  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_unregister_lib");
2373  break;
2374  }
2376  // Build void __tgt_target_data_begin(int64_t device_id, int32_t arg_num,
2377  // void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types);
2378  llvm::Type *TypeParams[] = {CGM.Int64Ty,
2379  CGM.Int32Ty,
2380  CGM.VoidPtrPtrTy,
2381  CGM.VoidPtrPtrTy,
2382  CGM.SizeTy->getPointerTo(),
2383  CGM.Int64Ty->getPointerTo()};
2384  auto *FnTy =
2385  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
2386  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_begin");
2387  break;
2388  }
2390  // Build void __tgt_target_data_begin_nowait(int64_t device_id, int32_t
2391  // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
2392  // *arg_types);
2393  llvm::Type *TypeParams[] = {CGM.Int64Ty,
2394  CGM.Int32Ty,
2395  CGM.VoidPtrPtrTy,
2396  CGM.VoidPtrPtrTy,
2397  CGM.SizeTy->getPointerTo(),
2398  CGM.Int64Ty->getPointerTo()};
2399  auto *FnTy =
2400  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2401  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_begin_nowait");
2402  break;
2403  }
2405  // Build void __tgt_target_data_end(int64_t device_id, int32_t arg_num,
2406  // void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types);
2407  llvm::Type *TypeParams[] = {CGM.Int64Ty,
2408  CGM.Int32Ty,
2409  CGM.VoidPtrPtrTy,
2410  CGM.VoidPtrPtrTy,
2411  CGM.SizeTy->getPointerTo(),
2412  CGM.Int64Ty->getPointerTo()};
2413  auto *FnTy =
2414  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
2415  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_end");
2416  break;
2417  }
2419  // Build void __tgt_target_data_end_nowait(int64_t device_id, int32_t
2420  // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
2421  // *arg_types);
2422  llvm::Type *TypeParams[] = {CGM.Int64Ty,
2423  CGM.Int32Ty,
2424  CGM.VoidPtrPtrTy,
2425  CGM.VoidPtrPtrTy,
2426  CGM.SizeTy->getPointerTo(),
2427  CGM.Int64Ty->getPointerTo()};
2428  auto *FnTy =
2429  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2430  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_end_nowait");
2431  break;
2432  }
2434  // Build void __tgt_target_data_update(int64_t device_id, int32_t arg_num,
2435  // void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types);
2436  llvm::Type *TypeParams[] = {CGM.Int64Ty,
2437  CGM.Int32Ty,
2438  CGM.VoidPtrPtrTy,
2439  CGM.VoidPtrPtrTy,
2440  CGM.SizeTy->getPointerTo(),
2441  CGM.Int64Ty->getPointerTo()};
2442  auto *FnTy =
2443  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
2444  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_update");
2445  break;
2446  }
2448  // Build void __tgt_target_data_update_nowait(int64_t device_id, int32_t
2449  // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
2450  // *arg_types);
2451  llvm::Type *TypeParams[] = {CGM.Int64Ty,
2452  CGM.Int32Ty,
2453  CGM.VoidPtrPtrTy,
2454  CGM.VoidPtrPtrTy,
2455  CGM.SizeTy->getPointerTo(),
2456  CGM.Int64Ty->getPointerTo()};
2457  auto *FnTy =
2458  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2459  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_update_nowait");
2460  break;
2461  }
2462  }
2463  assert(RTLFn && "Unable to find OpenMP runtime function");
2464  return RTLFn;
2465 }
2466 
2467 llvm::FunctionCallee
2468 CGOpenMPRuntime::createForStaticInitFunction(unsigned IVSize, bool IVSigned) {
2469  assert((IVSize == 32 || IVSize == 64) &&
2470  "IV size is not compatible with the omp runtime");
2471  StringRef Name = IVSize == 32 ? (IVSigned ? "__kmpc_for_static_init_4"
2472  : "__kmpc_for_static_init_4u")
2473  : (IVSigned ? "__kmpc_for_static_init_8"
2474  : "__kmpc_for_static_init_8u");
2475  llvm::Type *ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty;
2476  auto *PtrTy = llvm::PointerType::getUnqual(ITy);
2477  llvm::Type *TypeParams[] = {
2478  getIdentTyPointerTy(), // loc
2479  CGM.Int32Ty, // tid
2480  CGM.Int32Ty, // schedtype
2481  llvm::PointerType::getUnqual(CGM.Int32Ty), // p_lastiter
2482  PtrTy, // p_lower
2483  PtrTy, // p_upper
2484  PtrTy, // p_stride
2485  ITy, // incr
2486  ITy // chunk
2487  };
2488  auto *FnTy =
2489  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
2490  return CGM.CreateRuntimeFunction(FnTy, Name);
2491 }
2492 
2493 llvm::FunctionCallee
2494 CGOpenMPRuntime::createDispatchInitFunction(unsigned IVSize, bool IVSigned) {
2495  assert((IVSize == 32 || IVSize == 64) &&
2496  "IV size is not compatible with the omp runtime");
2497  StringRef Name =
2498  IVSize == 32
2499  ? (IVSigned ? "__kmpc_dispatch_init_4" : "__kmpc_dispatch_init_4u")
2500  : (IVSigned ? "__kmpc_dispatch_init_8" : "__kmpc_dispatch_init_8u");
2501  llvm::Type *ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty;
2502  llvm::Type *TypeParams[] = { getIdentTyPointerTy(), // loc
2503  CGM.Int32Ty, // tid
2504  CGM.Int32Ty, // schedtype
2505  ITy, // lower
2506  ITy, // upper
2507  ITy, // stride
2508  ITy // chunk
2509  };
2510  auto *FnTy =
2511  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
2512  return CGM.CreateRuntimeFunction(FnTy, Name);
2513 }
2514 
2515 llvm::FunctionCallee
2516 CGOpenMPRuntime::createDispatchFiniFunction(unsigned IVSize, bool IVSigned) {
2517  assert((IVSize == 32 || IVSize == 64) &&
2518  "IV size is not compatible with the omp runtime");
2519  StringRef Name =
2520  IVSize == 32
2521  ? (IVSigned ? "__kmpc_dispatch_fini_4" : "__kmpc_dispatch_fini_4u")
2522  : (IVSigned ? "__kmpc_dispatch_fini_8" : "__kmpc_dispatch_fini_8u");
2523  llvm::Type *TypeParams[] = {
2524  getIdentTyPointerTy(), // loc
2525  CGM.Int32Ty, // tid
2526  };
2527  auto *FnTy =
2528  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2529  return CGM.CreateRuntimeFunction(FnTy, Name);
2530 }
2531 
2532 llvm::FunctionCallee
2533 CGOpenMPRuntime::createDispatchNextFunction(unsigned IVSize, bool IVSigned) {
2534  assert((IVSize == 32 || IVSize == 64) &&
2535  "IV size is not compatible with the omp runtime");
2536  StringRef Name =
2537  IVSize == 32
2538  ? (IVSigned ? "__kmpc_dispatch_next_4" : "__kmpc_dispatch_next_4u")
2539  : (IVSigned ? "__kmpc_dispatch_next_8" : "__kmpc_dispatch_next_8u");
2540  llvm::Type *ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty;
2541  auto *PtrTy = llvm::PointerType::getUnqual(ITy);
2542  llvm::Type *TypeParams[] = {
2543  getIdentTyPointerTy(), // loc
2544  CGM.Int32Ty, // tid
2545  llvm::PointerType::getUnqual(CGM.Int32Ty), // p_lastiter
2546  PtrTy, // p_lower
2547  PtrTy, // p_upper
2548  PtrTy // p_stride
2549  };
2550  auto *FnTy =
2551  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2552  return CGM.CreateRuntimeFunction(FnTy, Name);
2553 }
2554 
2556  if (CGM.getLangOpts().OpenMPSimd)
2557  return Address::invalid();
2559  OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
2560  if (Res && (*Res == OMPDeclareTargetDeclAttr::MT_Link ||
2561  (*Res == OMPDeclareTargetDeclAttr::MT_To &&
2563  SmallString<64> PtrName;
2564  {
2565  llvm::raw_svector_ostream OS(PtrName);
2566  OS << CGM.getMangledName(GlobalDecl(VD)) << "_decl_tgt_ref_ptr";
2567  }
2568  llvm::Value *Ptr = CGM.getModule().getNamedValue(PtrName);
2569  if (!Ptr) {
2570  QualType PtrTy = CGM.getContext().getPointerType(VD->getType());
2572  PtrName);
2573  if (!CGM.getLangOpts().OpenMPIsDevice) {
2574  auto *GV = cast<llvm::GlobalVariable>(Ptr);
2575  GV->setLinkage(llvm::GlobalValue::ExternalLinkage);
2576  GV->setInitializer(CGM.GetAddrOfGlobal(VD));
2577  }
2578  CGM.addUsedGlobal(cast<llvm::GlobalValue>(Ptr));
2579  registerTargetGlobalVariable(VD, cast<llvm::Constant>(Ptr));
2580  }
2581  return Address(Ptr, CGM.getContext().getDeclAlign(VD));
2582  }
2583  return Address::invalid();
2584 }
2585 
2586 llvm::Constant *
2588  assert(!CGM.getLangOpts().OpenMPUseTLS ||
2590  // Lookup the entry, lazily creating it if necessary.
2591  std::string Suffix = getName({"cache", ""});
2593  CGM.Int8PtrPtrTy, Twine(CGM.getMangledName(VD)).concat(Suffix));
2594 }
2595 
2597  const VarDecl *VD,
2598  Address VDAddr,
2599  SourceLocation Loc) {
2600  if (CGM.getLangOpts().OpenMPUseTLS &&
2602  return VDAddr;
2603 
2604  llvm::Type *VarTy = VDAddr.getElementType();
2605  llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
2606  CGF.Builder.CreatePointerCast(VDAddr.getPointer(),
2607  CGM.Int8PtrTy),
2610  return Address(CGF.EmitRuntimeCall(
2612  VDAddr.getAlignment());
2613 }
2614 
2616  CodeGenFunction &CGF, Address VDAddr, llvm::Value *Ctor,
2617  llvm::Value *CopyCtor, llvm::Value *Dtor, SourceLocation Loc) {
2618  // Call kmp_int32 __kmpc_global_thread_num(&loc) to init OpenMP runtime
2619  // library.
2620  llvm::Value *OMPLoc = emitUpdateLocation(CGF, Loc);
2622  OMPLoc);
2623  // Call __kmpc_threadprivate_register(&loc, &var, ctor, cctor/*NULL*/, dtor)
2624  // to register constructor/destructor for variable.
2625  llvm::Value *Args[] = {
2626  OMPLoc, CGF.Builder.CreatePointerCast(VDAddr.getPointer(), CGM.VoidPtrTy),
2627  Ctor, CopyCtor, Dtor};
2628  CGF.EmitRuntimeCall(
2630 }
2631 
2633  const VarDecl *VD, Address VDAddr, SourceLocation Loc,
2634  bool PerformInit, CodeGenFunction *CGF) {
2635  if (CGM.getLangOpts().OpenMPUseTLS &&
2637  return nullptr;
2638 
2639  VD = VD->getDefinition(CGM.getContext());
2640  if (VD && ThreadPrivateWithDefinition.insert(CGM.getMangledName(VD)).second) {
2641  QualType ASTTy = VD->getType();
2642 
2643  llvm::Value *Ctor = nullptr, *CopyCtor = nullptr, *Dtor = nullptr;
2644  const Expr *Init = VD->getAnyInitializer();
2645  if (CGM.getLangOpts().CPlusPlus && PerformInit) {
2646  // Generate function that re-emits the declaration's initializer into the
2647  // threadprivate copy of the variable VD
2648  CodeGenFunction CtorCGF(CGM);
2649  FunctionArgList Args;
2650  ImplicitParamDecl Dst(CGM.getContext(), /*DC=*/nullptr, Loc,
2651  /*Id=*/nullptr, CGM.getContext().VoidPtrTy,
2653  Args.push_back(&Dst);
2654 
2655  const auto &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
2656  CGM.getContext().VoidPtrTy, Args);
2657  llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
2658  std::string Name = getName({"__kmpc_global_ctor_", ""});
2659  llvm::Function *Fn =
2660  CGM.CreateGlobalInitOrDestructFunction(FTy, Name, FI, Loc);
2661  CtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidPtrTy, Fn, FI,
2662  Args, Loc, Loc);
2663  llvm::Value *ArgVal = CtorCGF.EmitLoadOfScalar(
2664  CtorCGF.GetAddrOfLocalVar(&Dst), /*Volatile=*/false,
2665  CGM.getContext().VoidPtrTy, Dst.getLocation());
2666  Address Arg = Address(ArgVal, VDAddr.getAlignment());
2667  Arg = CtorCGF.Builder.CreateElementBitCast(
2668  Arg, CtorCGF.ConvertTypeForMem(ASTTy));
2669  CtorCGF.EmitAnyExprToMem(Init, Arg, Init->getType().getQualifiers(),
2670  /*IsInitializer=*/true);
2671  ArgVal = CtorCGF.EmitLoadOfScalar(
2672  CtorCGF.GetAddrOfLocalVar(&Dst), /*Volatile=*/false,
2673  CGM.getContext().VoidPtrTy, Dst.getLocation());
2674  CtorCGF.Builder.CreateStore(ArgVal, CtorCGF.ReturnValue);
2675  CtorCGF.FinishFunction();
2676  Ctor = Fn;
2677  }
2678  if (VD->getType().isDestructedType() != QualType::DK_none) {
2679  // Generate function that emits destructor call for the threadprivate copy
2680  // of the variable VD
2681  CodeGenFunction DtorCGF(CGM);
2682  FunctionArgList Args;
2683  ImplicitParamDecl Dst(CGM.getContext(), /*DC=*/nullptr, Loc,
2684  /*Id=*/nullptr, CGM.getContext().VoidPtrTy,
2686  Args.push_back(&Dst);
2687 
2688  const auto &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
2689  CGM.getContext().VoidTy, Args);
2690  llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
2691  std::string Name = getName({"__kmpc_global_dtor_", ""});
2692  llvm::Function *Fn =
2693  CGM.CreateGlobalInitOrDestructFunction(FTy, Name, FI, Loc);
2694  auto NL = ApplyDebugLocation::CreateEmpty(DtorCGF);
2695  DtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, Fn, FI, Args,
2696  Loc, Loc);
2697  // Create a scope with an artificial location for the body of this function.
2698  auto AL = ApplyDebugLocation::CreateArtificial(DtorCGF);
2699  llvm::Value *ArgVal = DtorCGF.EmitLoadOfScalar(
2700  DtorCGF.GetAddrOfLocalVar(&Dst),
2701  /*Volatile=*/false, CGM.getContext().VoidPtrTy, Dst.getLocation());
2702  DtorCGF.emitDestroy(Address(ArgVal, VDAddr.getAlignment()), ASTTy,
2703  DtorCGF.getDestroyer(ASTTy.isDestructedType()),
2704  DtorCGF.needsEHCleanup(ASTTy.isDestructedType()));
2705  DtorCGF.FinishFunction();
2706  Dtor = Fn;
2707  }
2708  // Do not emit init function if it is not required.
2709  if (!Ctor && !Dtor)
2710  return nullptr;
2711 
2712  llvm::Type *CopyCtorTyArgs[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
2713  auto *CopyCtorTy = llvm::FunctionType::get(CGM.VoidPtrTy, CopyCtorTyArgs,
2714  /*isVarArg=*/false)
2715  ->getPointerTo();
2716  // Copying constructor for the threadprivate variable.
2717  // Must be NULL - reserved by runtime, but currently it requires that this
2718  // parameter is always NULL. Otherwise it fires assertion.
2719  CopyCtor = llvm::Constant::getNullValue(CopyCtorTy);
2720  if (Ctor == nullptr) {
2721  auto *CtorTy = llvm::FunctionType::get(CGM.VoidPtrTy, CGM.VoidPtrTy,
2722  /*isVarArg=*/false)
2723  ->getPointerTo();
2724  Ctor = llvm::Constant::getNullValue(CtorTy);
2725  }
2726  if (Dtor == nullptr) {
2727  auto *DtorTy = llvm::FunctionType::get(CGM.VoidTy, CGM.VoidPtrTy,
2728  /*isVarArg=*/false)
2729  ->getPointerTo();
2730  Dtor = llvm::Constant::getNullValue(DtorTy);
2731  }
2732  if (!CGF) {
2733  auto *InitFunctionTy =
2734  llvm::FunctionType::get(CGM.VoidTy, /*isVarArg*/ false);
2735  std::string Name = getName({"__omp_threadprivate_init_", ""});
2736  llvm::Function *InitFunction = CGM.CreateGlobalInitOrDestructFunction(
2737  InitFunctionTy, Name, CGM.getTypes().arrangeNullaryFunction());
2738  CodeGenFunction InitCGF(CGM);
2739  FunctionArgList ArgList;
2740  InitCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, InitFunction,
2741  CGM.getTypes().arrangeNullaryFunction(), ArgList,
2742  Loc, Loc);
2743  emitThreadPrivateVarInit(InitCGF, VDAddr, Ctor, CopyCtor, Dtor, Loc);
2744  InitCGF.FinishFunction();
2745  return InitFunction;
2746  }
2747  emitThreadPrivateVarInit(*CGF, VDAddr, Ctor, CopyCtor, Dtor, Loc);
2748  }
2749  return nullptr;
2750 }
2751 
2752 /// Obtain information that uniquely identifies a target entry. This
2753 /// consists of the file and device IDs as well as line number associated with
2754 /// the relevant entry source location.
2756  unsigned &DeviceID, unsigned &FileID,
2757  unsigned &LineNum) {
2759 
2760  // The loc should be always valid and have a file ID (the user cannot use
2761  // #pragma directives in macros)
2762 
2763  assert(Loc.isValid() && "Source location is expected to be always valid.");
2764 
2765  PresumedLoc PLoc = SM.getPresumedLoc(Loc);
2766  assert(PLoc.isValid() && "Source location is expected to be always valid.");
2767 
2768  llvm::sys::fs::UniqueID ID;
2769  if (auto EC = llvm::sys::fs::getUniqueID(PLoc.getFilename(), ID))
2770  SM.getDiagnostics().Report(diag::err_cannot_open_file)
2771  << PLoc.getFilename() << EC.message();
2772 
2773  DeviceID = ID.getDevice();
2774  FileID = ID.getFile();
2775  LineNum = PLoc.getLine();
2776 }
2777 
2779  llvm::GlobalVariable *Addr,
2780  bool PerformInit) {
2782  OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
2783  if (!Res || *Res == OMPDeclareTargetDeclAttr::MT_Link ||
2784  (*Res == OMPDeclareTargetDeclAttr::MT_To &&
2786  return CGM.getLangOpts().OpenMPIsDevice;
2787  VD = VD->getDefinition(CGM.getContext());
2788  if (VD && !DeclareTargetWithDefinition.insert(CGM.getMangledName(VD)).second)
2789  return CGM.getLangOpts().OpenMPIsDevice;
2790 
2791  QualType ASTTy = VD->getType();
2792 
2794  // Produce the unique prefix to identify the new target regions. We use
2795  // the source location of the variable declaration which we know to not
2796  // conflict with any target region.
2797  unsigned DeviceID;
2798  unsigned FileID;
2799  unsigned Line;
2800  getTargetEntryUniqueInfo(CGM.getContext(), Loc, DeviceID, FileID, Line);
2801  SmallString<128> Buffer, Out;
2802  {
2803  llvm::raw_svector_ostream OS(Buffer);
2804  OS << "__omp_offloading_" << llvm::format("_%x", DeviceID)
2805  << llvm::format("_%x_", FileID) << VD->getName() << "_l" << Line;
2806  }
2807 
2808  const Expr *Init = VD->getAnyInitializer();
2809  if (CGM.getLangOpts().CPlusPlus && PerformInit) {
2810  llvm::Constant *Ctor;
2811  llvm::Constant *ID;
2812  if (CGM.getLangOpts().OpenMPIsDevice) {
2813  // Generate function that re-emits the declaration's initializer into
2814  // the threadprivate copy of the variable VD
2815  CodeGenFunction CtorCGF(CGM);
2816 
2818  llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
2819  llvm::Function *Fn = CGM.CreateGlobalInitOrDestructFunction(
2820  FTy, Twine(Buffer, "_ctor"), FI, Loc);
2821  auto NL = ApplyDebugLocation::CreateEmpty(CtorCGF);
2822  CtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, Fn, FI,
2823  FunctionArgList(), Loc, Loc);
2824  auto AL = ApplyDebugLocation::CreateArtificial(CtorCGF);
2825  CtorCGF.EmitAnyExprToMem(Init,
2826  Address(Addr, CGM.getContext().getDeclAlign(VD)),
2827  Init->getType().getQualifiers(),
2828  /*IsInitializer=*/true);
2829  CtorCGF.FinishFunction();
2830  Ctor = Fn;
2831  ID = llvm::ConstantExpr::getBitCast(Fn, CGM.Int8PtrTy);
2832  CGM.addUsedGlobal(cast<llvm::GlobalValue>(Ctor));
2833  } else {
2834  Ctor = new llvm::GlobalVariable(
2835  CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
2836  llvm::GlobalValue::PrivateLinkage,
2837  llvm::Constant::getNullValue(CGM.Int8Ty), Twine(Buffer, "_ctor"));
2838  ID = Ctor;
2839  }
2840 
2841  // Register the information for the entry associated with the constructor.
2842  Out.clear();
2844  DeviceID, FileID, Twine(Buffer, "_ctor").toStringRef(Out), Line, Ctor,
2846  }
2847  if (VD->getType().isDestructedType() != QualType::DK_none) {
2848  llvm::Constant *Dtor;
2849  llvm::Constant *ID;
2850  if (CGM.getLangOpts().OpenMPIsDevice) {
2851  // Generate function that emits destructor call for the threadprivate
2852  // copy of the variable VD
2853  CodeGenFunction DtorCGF(CGM);
2854 
2856  llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
2857  llvm::Function *Fn = CGM.CreateGlobalInitOrDestructFunction(
2858  FTy, Twine(Buffer, "_dtor"), FI, Loc);
2859  auto NL = ApplyDebugLocation::CreateEmpty(DtorCGF);
2860  DtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, Fn, FI,
2861  FunctionArgList(), Loc, Loc);
2862  // Create a scope with an artificial location for the body of this
2863  // function.
2864  auto AL = ApplyDebugLocation::CreateArtificial(DtorCGF);
2865  DtorCGF.emitDestroy(Address(Addr, CGM.getContext().getDeclAlign(VD)),
2866  ASTTy, DtorCGF.getDestroyer(ASTTy.isDestructedType()),
2867  DtorCGF.needsEHCleanup(ASTTy.isDestructedType()));
2868  DtorCGF.FinishFunction();
2869  Dtor = Fn;
2870  ID = llvm::ConstantExpr::getBitCast(Fn, CGM.Int8PtrTy);
2871  CGM.addUsedGlobal(cast<llvm::GlobalValue>(Dtor));
2872  } else {
2873  Dtor = new llvm::GlobalVariable(
2874  CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
2875  llvm::GlobalValue::PrivateLinkage,
2876  llvm::Constant::getNullValue(CGM.Int8Ty), Twine(Buffer, "_dtor"));
2877  ID = Dtor;
2878  }
2879  // Register the information for the entry associated with the destructor.
2880  Out.clear();
2882  DeviceID, FileID, Twine(Buffer, "_dtor").toStringRef(Out), Line, Dtor,
2884  }
2885  return CGM.getLangOpts().OpenMPIsDevice;
2886 }
2887 
2889  QualType VarType,
2890  StringRef Name) {
2891  std::string Suffix = getName({"artificial", ""});
2892  std::string CacheSuffix = getName({"cache", ""});
2893  llvm::Type *VarLVType = CGF.ConvertTypeForMem(VarType);
2894  llvm::Value *GAddr =
2895  getOrCreateInternalVariable(VarLVType, Twine(Name).concat(Suffix));
2896  llvm::Value *Args[] = {
2898  getThreadID(CGF, SourceLocation()),
2900  CGF.Builder.CreateIntCast(CGF.getTypeSize(VarType), CGM.SizeTy,
2901  /*IsSigned=*/false),
2903  CGM.VoidPtrPtrTy, Twine(Name).concat(Suffix).concat(CacheSuffix))};
2904  return Address(
2906  CGF.EmitRuntimeCall(
2908  VarLVType->getPointerTo(/*AddrSpace=*/0)),
2909  CGM.getPointerAlign());
2910 }
2911 
2913  const RegionCodeGenTy &ThenGen,
2914  const RegionCodeGenTy &ElseGen) {
2915  CodeGenFunction::LexicalScope ConditionScope(CGF, Cond->getSourceRange());
2916 
2917  // If the condition constant folds and can be elided, try to avoid emitting
2918  // the condition and the dead arm of the if/else.
2919  bool CondConstant;
2920  if (CGF.ConstantFoldsToSimpleInteger(Cond, CondConstant)) {
2921  if (CondConstant)
2922  ThenGen(CGF);
2923  else
2924  ElseGen(CGF);
2925  return;
2926  }
2927 
2928  // Otherwise, the condition did not fold, or we couldn't elide it. Just
2929  // emit the conditional branch.
2930  llvm::BasicBlock *ThenBlock = CGF.createBasicBlock("omp_if.then");
2931  llvm::BasicBlock *ElseBlock = CGF.createBasicBlock("omp_if.else");
2932  llvm::BasicBlock *ContBlock = CGF.createBasicBlock("omp_if.end");
2933  CGF.EmitBranchOnBoolExpr(Cond, ThenBlock, ElseBlock, /*TrueCount=*/0);
2934 
2935  // Emit the 'then' code.
2936  CGF.EmitBlock(ThenBlock);
2937  ThenGen(CGF);
2938  CGF.EmitBranch(ContBlock);
2939  // Emit the 'else' code if present.
2940  // There is no need to emit line number for unconditional branch.
2942  CGF.EmitBlock(ElseBlock);
2943  ElseGen(CGF);
2944  // There is no need to emit line number for unconditional branch.
2946  CGF.EmitBranch(ContBlock);
2947  // Emit the continuation block for code after the if.
2948  CGF.EmitBlock(ContBlock, /*IsFinished=*/true);
2949 }
2950 
2952  llvm::Function *OutlinedFn,
2953  ArrayRef<llvm::Value *> CapturedVars,
2954  const Expr *IfCond) {
2955  if (!CGF.HaveInsertPoint())
2956  return;
2957  llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
2958  auto &&ThenGen = [OutlinedFn, CapturedVars, RTLoc](CodeGenFunction &CGF,
2959  PrePostActionTy &) {
2960  // Build call __kmpc_fork_call(loc, n, microtask, var1, .., varn);
2961  CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
2962  llvm::Value *Args[] = {
2963  RTLoc,
2964  CGF.Builder.getInt32(CapturedVars.size()), // Number of captured vars
2965  CGF.Builder.CreateBitCast(OutlinedFn, RT.getKmpc_MicroPointerTy())};
2967  RealArgs.append(std::begin(Args), std::end(Args));
2968  RealArgs.append(CapturedVars.begin(), CapturedVars.end());
2969 
2970  llvm::FunctionCallee RTLFn =
2971  RT.createRuntimeFunction(OMPRTL__kmpc_fork_call);
2972  CGF.EmitRuntimeCall(RTLFn, RealArgs);
2973  };
2974  auto &&ElseGen = [OutlinedFn, CapturedVars, RTLoc, Loc](CodeGenFunction &CGF,
2975  PrePostActionTy &) {
2976  CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
2977  llvm::Value *ThreadID = RT.getThreadID(CGF, Loc);
2978  // Build calls:
2979  // __kmpc_serialized_parallel(&Loc, GTid);
2980  llvm::Value *Args[] = {RTLoc, ThreadID};
2981  CGF.EmitRuntimeCall(
2982  RT.createRuntimeFunction(OMPRTL__kmpc_serialized_parallel), Args);
2983 
2984  // OutlinedFn(&GTid, &zero, CapturedStruct);
2985  Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
2986  /*Name*/ ".zero.addr");
2987  CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
2988  llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
2989  // ThreadId for serialized parallels is 0.
2990  OutlinedFnArgs.push_back(ZeroAddr.getPointer());
2991  OutlinedFnArgs.push_back(ZeroAddr.getPointer());
2992  OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
2993  RT.emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs);
2994 
2995  // __kmpc_end_serialized_parallel(&Loc, GTid);
2996  llvm::Value *EndArgs[] = {RT.emitUpdateLocation(CGF, Loc), ThreadID};
2997  CGF.EmitRuntimeCall(
2998  RT.createRuntimeFunction(OMPRTL__kmpc_end_serialized_parallel),
2999  EndArgs);
3000  };
3001  if (IfCond) {
3002  emitOMPIfClause(CGF, IfCond, ThenGen, ElseGen);
3003  } else {
3004  RegionCodeGenTy ThenRCG(ThenGen);
3005  ThenRCG(CGF);
3006  }
3007 }
3008 
3009 // If we're inside an (outlined) parallel region, use the region info's
3010 // thread-ID variable (it is passed in a first argument of the outlined function
3011 // as "kmp_int32 *gtid"). Otherwise, if we're not inside parallel region, but in
3012 // regular serial code region, get thread ID by calling kmp_int32
3013 // kmpc_global_thread_num(ident_t *loc), stash this thread ID in a temporary and
3014 // return the address of that temp.
3016  SourceLocation Loc) {
3017  if (auto *OMPRegionInfo =
3018  dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
3019  if (OMPRegionInfo->getThreadIDVariable())
3020  return OMPRegionInfo->getThreadIDVariableLValue(CGF).getAddress();
3021 
3022  llvm::Value *ThreadID = getThreadID(CGF, Loc);
3023  QualType Int32Ty =
3024  CGF.getContext().getIntTypeForBitwidth(/*DestWidth*/ 32, /*Signed*/ true);
3025  Address ThreadIDTemp = CGF.CreateMemTemp(Int32Ty, /*Name*/ ".threadid_temp.");
3026  CGF.EmitStoreOfScalar(ThreadID,
3027  CGF.MakeAddrLValue(ThreadIDTemp, Int32Ty));
3028 
3029  return ThreadIDTemp;
3030 }
3031 
3033  llvm::Type *Ty, const llvm::Twine &Name, unsigned AddressSpace) {
3034  SmallString<256> Buffer;
3035  llvm::raw_svector_ostream Out(Buffer);
3036  Out << Name;
3037  StringRef RuntimeName = Out.str();
3038  auto &Elem = *InternalVars.try_emplace(RuntimeName, nullptr).first;
3039  if (Elem.second) {
3040  assert(Elem.second->getType()->getPointerElementType() == Ty &&
3041  "OMP internal variable has different type than requested");
3042  return &*Elem.second;
3043  }
3044 
3045  return Elem.second = new llvm::GlobalVariable(
3046  CGM.getModule(), Ty, /*IsConstant*/ false,
3047  llvm::GlobalValue::CommonLinkage, llvm::Constant::getNullValue(Ty),
3048  Elem.first(), /*InsertBefore=*/nullptr,
3049  llvm::GlobalValue::NotThreadLocal, AddressSpace);
3050 }
3051 
3053  std::string Prefix = Twine("gomp_critical_user_", CriticalName).str();
3054  std::string Name = getName({Prefix, "var"});
3055  return getOrCreateInternalVariable(KmpCriticalNameTy, Name);
3056 }
3057 
3058 namespace {
3059 /// Common pre(post)-action for different OpenMP constructs.
3060 class CommonActionTy final : public PrePostActionTy {
3061  llvm::FunctionCallee EnterCallee;
3062  ArrayRef<llvm::Value *> EnterArgs;
3063  llvm::FunctionCallee ExitCallee;
3064  ArrayRef<llvm::Value *> ExitArgs;
3065  bool Conditional;
3066  llvm::BasicBlock *ContBlock = nullptr;
3067 
3068 public:
3069  CommonActionTy(llvm::FunctionCallee EnterCallee,
3070  ArrayRef<llvm::Value *> EnterArgs,
3071  llvm::FunctionCallee ExitCallee,
3072  ArrayRef<llvm::Value *> ExitArgs, bool Conditional = false)
3073  : EnterCallee(EnterCallee), EnterArgs(EnterArgs), ExitCallee(ExitCallee),
3074  ExitArgs(ExitArgs), Conditional(Conditional) {}
3075  void Enter(CodeGenFunction &CGF) override {
3076  llvm::Value *EnterRes = CGF.EmitRuntimeCall(EnterCallee, EnterArgs);
3077  if (Conditional) {
3078  llvm::Value *CallBool = CGF.Builder.CreateIsNotNull(EnterRes);
3079  auto *ThenBlock = CGF.createBasicBlock("omp_if.then");
3080  ContBlock = CGF.createBasicBlock("omp_if.end");
3081  // Generate the branch (If-stmt)
3082  CGF.Builder.CreateCondBr(CallBool, ThenBlock, ContBlock);
3083  CGF.EmitBlock(ThenBlock);
3084  }
3085  }
3086  void Done(CodeGenFunction &CGF) {
3087  // Emit the rest of blocks/branches
3088  CGF.EmitBranch(ContBlock);
3089  CGF.EmitBlock(ContBlock, true);
3090  }
3091  void Exit(CodeGenFunction &CGF) override {
3092  CGF.EmitRuntimeCall(ExitCallee, ExitArgs);
3093  }
3094 };
3095 } // anonymous namespace
3096 
3098  StringRef CriticalName,
3099  const RegionCodeGenTy &CriticalOpGen,
3100  SourceLocation Loc, const Expr *Hint) {
3101  // __kmpc_critical[_with_hint](ident_t *, gtid, Lock[, hint]);
3102  // CriticalOpGen();
3103  // __kmpc_end_critical(ident_t *, gtid, Lock);
3104  // Prepare arguments and build a call to __kmpc_critical
3105  if (!CGF.HaveInsertPoint())
3106  return;
3107  llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
3108  getCriticalRegionLock(CriticalName)};
3109  llvm::SmallVector<llvm::Value *, 4> EnterArgs(std::begin(Args),
3110  std::end(Args));
3111  if (Hint) {
3112  EnterArgs.push_back(CGF.Builder.CreateIntCast(
3113  CGF.EmitScalarExpr(Hint), CGM.IntPtrTy, /*isSigned=*/false));
3114  }
3115  CommonActionTy Action(
3119  CriticalOpGen.setAction(Action);
3120  emitInlinedDirective(CGF, OMPD_critical, CriticalOpGen);
3121 }
3122 
3124  const RegionCodeGenTy &MasterOpGen,
3125  SourceLocation Loc) {
3126  if (!CGF.HaveInsertPoint())
3127  return;
3128  // if(__kmpc_master(ident_t *, gtid)) {
3129  // MasterOpGen();
3130  // __kmpc_end_master(ident_t *, gtid);
3131  // }
3132  // Prepare arguments and build a call to __kmpc_master
3133  llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
3134  CommonActionTy Action(createRuntimeFunction(OMPRTL__kmpc_master), Args,
3136  /*Conditional=*/true);
3137  MasterOpGen.setAction(Action);
3138  emitInlinedDirective(CGF, OMPD_master, MasterOpGen);
3139  Action.Done(CGF);
3140 }
3141 
3143  SourceLocation Loc) {
3144  if (!CGF.HaveInsertPoint())
3145  return;
3146  // Build call __kmpc_omp_taskyield(loc, thread_id, 0);
3147  llvm::Value *Args[] = {
3148  emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
3149  llvm::ConstantInt::get(CGM.IntTy, /*V=*/0, /*isSigned=*/true)};
3151  if (auto *Region = dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
3152  Region->emitUntiedSwitch(CGF);
3153 }
3154 
3156  const RegionCodeGenTy &TaskgroupOpGen,
3157  SourceLocation Loc) {
3158  if (!CGF.HaveInsertPoint())
3159  return;
3160  // __kmpc_taskgroup(ident_t *, gtid);
3161  // TaskgroupOpGen();
3162  // __kmpc_end_taskgroup(ident_t *, gtid);
3163  // Prepare arguments and build a call to __kmpc_taskgroup
3164  llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
3165  CommonActionTy Action(createRuntimeFunction(OMPRTL__kmpc_taskgroup), Args,
3167  Args);
3168  TaskgroupOpGen.setAction(Action);
3169  emitInlinedDirective(CGF, OMPD_taskgroup, TaskgroupOpGen);
3170 }
3171 
3172 /// Given an array of pointers to variables, project the address of a
3173 /// given variable.
3175  unsigned Index, const VarDecl *Var) {
3176  // Pull out the pointer to the variable.
3177  Address PtrAddr = CGF.Builder.CreateConstArrayGEP(Array, Index);
3178  llvm::Value *Ptr = CGF.Builder.CreateLoad(PtrAddr);
3179 
3180  Address Addr = Address(Ptr, CGF.getContext().getDeclAlign(Var));
3181  Addr = CGF.Builder.CreateElementBitCast(
3182  Addr, CGF.ConvertTypeForMem(Var->getType()));
3183  return Addr;
3184 }
3185 
3187  CodeGenModule &CGM, llvm::Type *ArgsType,
3188  ArrayRef<const Expr *> CopyprivateVars, ArrayRef<const Expr *> DestExprs,
3189  ArrayRef<const Expr *> SrcExprs, ArrayRef<const Expr *> AssignmentOps,
3190  SourceLocation Loc) {
3191  ASTContext &C = CGM.getContext();
3192  // void copy_func(void *LHSArg, void *RHSArg);
3193  FunctionArgList Args;
3194  ImplicitParamDecl LHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
3196  ImplicitParamDecl RHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
3198  Args.push_back(&LHSArg);
3199  Args.push_back(&RHSArg);
3200  const auto &CGFI =
3202  std::string Name =
3203  CGM.getOpenMPRuntime().getName({"omp", "copyprivate", "copy_func"});
3204  auto *Fn = llvm::Function::Create(CGM.getTypes().GetFunctionType(CGFI),
3206  &CGM.getModule());
3207  CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
3208  Fn->setDoesNotRecurse();
3209  CodeGenFunction CGF(CGM);
3210  CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
3211  // Dest = (void*[n])(LHSArg);
3212  // Src = (void*[n])(RHSArg);
3214  CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&LHSArg)),
3215  ArgsType), CGF.getPointerAlign());
3217  CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&RHSArg)),
3218  ArgsType), CGF.getPointerAlign());
3219  // *(Type0*)Dst[0] = *(Type0*)Src[0];
3220  // *(Type1*)Dst[1] = *(Type1*)Src[1];
3221  // ...
3222  // *(Typen*)Dst[n] = *(Typen*)Src[n];
3223  for (unsigned I = 0, E = AssignmentOps.size(); I < E; ++I) {
3224  const auto *DestVar =
3225  cast<VarDecl>(cast<DeclRefExpr>(DestExprs[I])->getDecl());
3226  Address DestAddr = emitAddrOfVarFromArray(CGF, LHS, I, DestVar);
3227 
3228  const auto *SrcVar =
3229  cast<VarDecl>(cast<DeclRefExpr>(SrcExprs[I])->getDecl());
3230  Address SrcAddr = emitAddrOfVarFromArray(CGF, RHS, I, SrcVar);
3231 
3232  const auto *VD = cast<DeclRefExpr>(CopyprivateVars[I])->getDecl();
3233  QualType Type = VD->getType();
3234  CGF.EmitOMPCopy(Type, DestAddr, SrcAddr, DestVar, SrcVar, AssignmentOps[I]);
3235  }
3236  CGF.FinishFunction();
3237  return Fn;
3238 }
3239 
3241  const RegionCodeGenTy &SingleOpGen,
3242  SourceLocation Loc,
3243  ArrayRef<const Expr *> CopyprivateVars,
3244  ArrayRef<const Expr *> SrcExprs,
3245  ArrayRef<const Expr *> DstExprs,
3246  ArrayRef<const Expr *> AssignmentOps) {
3247  if (!CGF.HaveInsertPoint())
3248  return;
3249  assert(CopyprivateVars.size() == SrcExprs.size() &&
3250  CopyprivateVars.size() == DstExprs.size() &&
3251  CopyprivateVars.size() == AssignmentOps.size());
3252  ASTContext &C = CGM.getContext();
3253  // int32 did_it = 0;
3254  // if(__kmpc_single(ident_t *, gtid)) {
3255  // SingleOpGen();
3256  // __kmpc_end_single(ident_t *, gtid);
3257  // did_it = 1;
3258  // }
3259  // call __kmpc_copyprivate(ident_t *, gtid, <buf_size>, <copyprivate list>,
3260  // <copy_func>, did_it);
3261 
3262  Address DidIt = Address::invalid();
3263  if (!CopyprivateVars.empty()) {
3264  // int32 did_it = 0;
3265  QualType KmpInt32Ty =
3266  C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
3267  DidIt = CGF.CreateMemTemp(KmpInt32Ty, ".omp.copyprivate.did_it");
3268  CGF.Builder.CreateStore(CGF.Builder.getInt32(0), DidIt);
3269  }
3270  // Prepare arguments and build a call to __kmpc_single
3271  llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
3272  CommonActionTy Action(createRuntimeFunction(OMPRTL__kmpc_single), Args,
3274  /*Conditional=*/true);
3275  SingleOpGen.setAction(Action);
3276  emitInlinedDirective(CGF, OMPD_single, SingleOpGen);
3277  if (DidIt.isValid()) {
3278  // did_it = 1;
3279  CGF.Builder.CreateStore(CGF.Builder.getInt32(1), DidIt);
3280  }
3281  Action.Done(CGF);
3282  // call __kmpc_copyprivate(ident_t *, gtid, <buf_size>, <copyprivate list>,
3283  // <copy_func>, did_it);
3284  if (DidIt.isValid()) {
3285  llvm::APInt ArraySize(/*unsigned int numBits=*/32, CopyprivateVars.size());
3286  QualType CopyprivateArrayTy =
3287  C.getConstantArrayType(C.VoidPtrTy, ArraySize, ArrayType::Normal,
3288  /*IndexTypeQuals=*/0);
3289  // Create a list of all private variables for copyprivate.
3290  Address CopyprivateList =
3291  CGF.CreateMemTemp(CopyprivateArrayTy, ".omp.copyprivate.cpr_list");
3292  for (unsigned I = 0, E = CopyprivateVars.size(); I < E; ++I) {
3293  Address Elem = CGF.Builder.CreateConstArrayGEP(CopyprivateList, I);
3294  CGF.Builder.CreateStore(
3296  CGF.EmitLValue(CopyprivateVars[I]).getPointer(), CGF.VoidPtrTy),
3297  Elem);
3298  }
3299  // Build function that copies private values from single region to all other
3300  // threads in the corresponding parallel region.
3302  CGM, CGF.ConvertTypeForMem(CopyprivateArrayTy)->getPointerTo(),
3303  CopyprivateVars, SrcExprs, DstExprs, AssignmentOps, Loc);
3304  llvm::Value *BufSize = CGF.getTypeSize(CopyprivateArrayTy);
3305  Address CL =
3306  CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(CopyprivateList,
3307  CGF.VoidPtrTy);
3308  llvm::Value *DidItVal = CGF.Builder.CreateLoad(DidIt);
3309  llvm::Value *Args[] = {
3310  emitUpdateLocation(CGF, Loc), // ident_t *<loc>
3311  getThreadID(CGF, Loc), // i32 <gtid>
3312  BufSize, // size_t <buf_size>
3313  CL.getPointer(), // void *<copyprivate list>
3314  CpyFn, // void (*) (void *, void *) <copy_func>
3315  DidItVal // i32 did_it
3316  };
3318  }
3319 }
3320 
3322  const RegionCodeGenTy &OrderedOpGen,
3323  SourceLocation Loc, bool IsThreads) {
3324  if (!CGF.HaveInsertPoint())
3325  return;
3326  // __kmpc_ordered(ident_t *, gtid);
3327  // OrderedOpGen();
3328  // __kmpc_end_ordered(ident_t *, gtid);
3329  // Prepare arguments and build a call to __kmpc_ordered
3330  if (IsThreads) {
3331  llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
3332  CommonActionTy Action(createRuntimeFunction(OMPRTL__kmpc_ordered), Args,
3334  Args);
3335  OrderedOpGen.setAction(Action);
3336  emitInlinedDirective(CGF, OMPD_ordered, OrderedOpGen);
3337  return;
3338  }
3339  emitInlinedDirective(CGF, OMPD_ordered, OrderedOpGen);
3340 }
3341 
3343  unsigned Flags;
3344  if (Kind == OMPD_for)
3345  Flags = OMP_IDENT_BARRIER_IMPL_FOR;
3346  else if (Kind == OMPD_sections)
3347  Flags = OMP_IDENT_BARRIER_IMPL_SECTIONS;
3348  else if (Kind == OMPD_single)
3349  Flags = OMP_IDENT_BARRIER_IMPL_SINGLE;
3350  else if (Kind == OMPD_barrier)
3351  Flags = OMP_IDENT_BARRIER_EXPL;
3352  else
3353  Flags = OMP_IDENT_BARRIER_IMPL;
3354  return Flags;
3355 }
3356 
3358  CodeGenFunction &CGF, const OMPLoopDirective &S,
3359  OpenMPScheduleClauseKind &ScheduleKind, const Expr *&ChunkExpr) const {
3360  // Check if the loop directive is actually a doacross loop directive. In this
3361  // case choose static, 1 schedule.
3362  if (llvm::any_of(
3364  [](const OMPOrderedClause *C) { return C->getNumForLoops(); })) {
3365  ScheduleKind = OMPC_SCHEDULE_static;
3366  // Chunk size is 1 in this case.
3367  llvm::APInt ChunkSize(32, 1);
3368  ChunkExpr = IntegerLiteral::Create(
3369  CGF.getContext(), ChunkSize,
3370  CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/0),
3371  SourceLocation());
3372  }
3373 }
3374 
3376  OpenMPDirectiveKind Kind, bool EmitChecks,
3377  bool ForceSimpleCall) {
3378  if (!CGF.HaveInsertPoint())
3379  return;
3380  // Build call __kmpc_cancel_barrier(loc, thread_id);
3381  // Build call __kmpc_barrier(loc, thread_id);
3382  unsigned Flags = getDefaultFlagsForBarriers(Kind);
3383  // Build call __kmpc_cancel_barrier(loc, thread_id) or __kmpc_barrier(loc,
3384  // thread_id);
3385  llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc, Flags),
3386  getThreadID(CGF, Loc)};
3387  if (auto *OMPRegionInfo =
3388  dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
3389  if (!ForceSimpleCall && OMPRegionInfo->hasCancel()) {
3390  llvm::Value *Result = CGF.EmitRuntimeCall(
3392  if (EmitChecks) {
3393  // if (__kmpc_cancel_barrier()) {
3394  // exit from construct;
3395  // }
3396  llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".cancel.exit");
3397  llvm::BasicBlock *ContBB = CGF.createBasicBlock(".cancel.continue");
3398  llvm::Value *Cmp = CGF.Builder.CreateIsNotNull(Result);
3399  CGF.Builder.CreateCondBr(Cmp, ExitBB, ContBB);
3400  CGF.EmitBlock(ExitBB);
3401  // exit from construct;
3402  CodeGenFunction::JumpDest CancelDestination =
3403  CGF.getOMPCancelDestination(OMPRegionInfo->getDirectiveKind());
3404  CGF.EmitBranchThroughCleanup(CancelDestination);
3405  CGF.EmitBlock(ContBB, /*IsFinished=*/true);
3406  }
3407  return;
3408  }
3409  }
3411 }
3412 
3413 /// Map the OpenMP loop schedule to the runtime enumeration.
3415  bool Chunked, bool Ordered) {
3416  switch (ScheduleKind) {
3417  case OMPC_SCHEDULE_static:
3418  return Chunked ? (Ordered ? OMP_ord_static_chunked : OMP_sch_static_chunked)
3419  : (Ordered ? OMP_ord_static : OMP_sch_static);
3420  case OMPC_SCHEDULE_dynamic:
3422  case OMPC_SCHEDULE_guided:
3424  case OMPC_SCHEDULE_runtime:
3425  return Ordered ? OMP_ord_runtime : OMP_sch_runtime;
3426  case OMPC_SCHEDULE_auto:
3427  return Ordered ? OMP_ord_auto : OMP_sch_auto;
3428  case OMPC_SCHEDULE_unknown:
3429  assert(!Chunked && "chunk was specified but schedule kind not known");
3430  return Ordered ? OMP_ord_static : OMP_sch_static;
3431  }
3432  llvm_unreachable("Unexpected runtime schedule");
3433 }
3434 
3435 /// Map the OpenMP distribute schedule to the runtime enumeration.
3436 static OpenMPSchedType
3438  // only static is allowed for dist_schedule
3440 }
3441 
3443  bool Chunked) const {
3444  OpenMPSchedType Schedule =
3445  getRuntimeSchedule(ScheduleKind, Chunked, /*Ordered=*/false);
3446  return Schedule == OMP_sch_static;
3447 }
3448 
3450  OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) const {
3451  OpenMPSchedType Schedule = getRuntimeSchedule(ScheduleKind, Chunked);
3452  return Schedule == OMP_dist_sch_static;
3453 }
3454 
3456  bool Chunked) const {
3457  OpenMPSchedType Schedule =
3458  getRuntimeSchedule(ScheduleKind, Chunked, /*Ordered=*/false);
3459  return Schedule == OMP_sch_static_chunked;
3460 }
3461 
3463  OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) const {
3464  OpenMPSchedType Schedule = getRuntimeSchedule(ScheduleKind, Chunked);
3465  return Schedule == OMP_dist_sch_static_chunked;
3466 }
3467 
3469  OpenMPSchedType Schedule =
3470  getRuntimeSchedule(ScheduleKind, /*Chunked=*/false, /*Ordered=*/false);
3471  assert(Schedule != OMP_sch_static_chunked && "cannot be chunked here");
3472  return Schedule != OMP_sch_static;
3473 }
3474 
3478  int Modifier = 0;
3479  switch (M1) {
3480  case OMPC_SCHEDULE_MODIFIER_monotonic:
3481  Modifier = OMP_sch_modifier_monotonic;
3482  break;
3483  case OMPC_SCHEDULE_MODIFIER_nonmonotonic:
3484  Modifier = OMP_sch_modifier_nonmonotonic;
3485  break;
3486  case OMPC_SCHEDULE_MODIFIER_simd:
3487  if (Schedule == OMP_sch_static_chunked)
3489  break;
3492  break;
3493  }
3494  switch (M2) {
3495  case OMPC_SCHEDULE_MODIFIER_monotonic:
3496  Modifier = OMP_sch_modifier_monotonic;
3497  break;
3498  case OMPC_SCHEDULE_MODIFIER_nonmonotonic:
3499  Modifier = OMP_sch_modifier_nonmonotonic;
3500  break;
3501  case OMPC_SCHEDULE_MODIFIER_simd:
3502  if (Schedule == OMP_sch_static_chunked)
3504  break;
3507  break;
3508  }
3509  return Schedule | Modifier;
3510 }
3511 
3513  CodeGenFunction &CGF, SourceLocation Loc,
3514  const OpenMPScheduleTy &ScheduleKind, unsigned IVSize, bool IVSigned,
3515  bool Ordered, const DispatchRTInput &DispatchValues) {
3516  if (!CGF.HaveInsertPoint())
3517  return;
3519  ScheduleKind.Schedule, DispatchValues.Chunk != nullptr, Ordered);
3520  assert(Ordered ||
3521  (Schedule != OMP_sch_static && Schedule != OMP_sch_static_chunked &&
3522  Schedule != OMP_ord_static && Schedule != OMP_ord_static_chunked &&
3523  Schedule != OMP_sch_static_balanced_chunked));
3524  // Call __kmpc_dispatch_init(
3525  // ident_t *loc, kmp_int32 tid, kmp_int32 schedule,
3526  // kmp_int[32|64] lower, kmp_int[32|64] upper,
3527  // kmp_int[32|64] stride, kmp_int[32|64] chunk);
3528 
3529  // If the Chunk was not specified in the clause - use default value 1.
3530  llvm::Value *Chunk = DispatchValues.Chunk ? DispatchValues.Chunk
3531  : CGF.Builder.getIntN(IVSize, 1);
3532  llvm::Value *Args[] = {
3533  emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
3534  CGF.Builder.getInt32(addMonoNonMonoModifier(
3535  Schedule, ScheduleKind.M1, ScheduleKind.M2)), // Schedule type
3536  DispatchValues.LB, // Lower
3537  DispatchValues.UB, // Upper
3538  CGF.Builder.getIntN(IVSize, 1), // Stride
3539  Chunk // Chunk
3540  };
3541  CGF.EmitRuntimeCall(createDispatchInitFunction(IVSize, IVSigned), Args);
3542 }
3543 
3545  CodeGenFunction &CGF, llvm::Value *UpdateLocation, llvm::Value *ThreadId,
3546  llvm::FunctionCallee ForStaticInitFunction, OpenMPSchedType Schedule,
3548  const CGOpenMPRuntime::StaticRTInput &Values) {
3549  if (!CGF.HaveInsertPoint())
3550  return;
3551 
3552  assert(!Values.Ordered);
3553  assert(Schedule == OMP_sch_static || Schedule == OMP_sch_static_chunked ||
3554  Schedule == OMP_sch_static_balanced_chunked ||
3555  Schedule == OMP_ord_static || Schedule == OMP_ord_static_chunked ||
3556  Schedule == OMP_dist_sch_static ||
3557  Schedule == OMP_dist_sch_static_chunked);
3558 
3559  // Call __kmpc_for_static_init(
3560  // ident_t *loc, kmp_int32 tid, kmp_int32 schedtype,
3561  // kmp_int32 *p_lastiter, kmp_int[32|64] *p_lower,
3562  // kmp_int[32|64] *p_upper, kmp_int[32|64] *p_stride,
3563  // kmp_int[32|64] incr, kmp_int[32|64] chunk);
3564  llvm::Value *Chunk = Values.Chunk;
3565  if (Chunk == nullptr) {
3566  assert((Schedule == OMP_sch_static || Schedule == OMP_ord_static ||
3567  Schedule == OMP_dist_sch_static) &&
3568  "expected static non-chunked schedule");
3569  // If the Chunk was not specified in the clause - use default value 1.
3570  Chunk = CGF.Builder.getIntN(Values.IVSize, 1);
3571  } else {
3572  assert((Schedule == OMP_sch_static_chunked ||
3573  Schedule == OMP_sch_static_balanced_chunked ||
3574  Schedule == OMP_ord_static_chunked ||
3575  Schedule == OMP_dist_sch_static_chunked) &&
3576  "expected static chunked schedule");
3577  }
3578  llvm::Value *Args[] = {
3579  UpdateLocation,
3580  ThreadId,
3581  CGF.Builder.getInt32(addMonoNonMonoModifier(Schedule, M1,
3582  M2)), // Schedule type
3583  Values.IL.getPointer(), // &isLastIter
3584  Values.LB.getPointer(), // &LB
3585  Values.UB.getPointer(), // &UB
3586  Values.ST.getPointer(), // &Stride
3587  CGF.Builder.getIntN(Values.IVSize, 1), // Incr
3588  Chunk // Chunk
3589  };
3590  CGF.EmitRuntimeCall(ForStaticInitFunction, Args);
3591 }
3592 
3594  SourceLocation Loc,
3595  OpenMPDirectiveKind DKind,
3596  const OpenMPScheduleTy &ScheduleKind,
3597  const StaticRTInput &Values) {
3598  OpenMPSchedType ScheduleNum = getRuntimeSchedule(
3599  ScheduleKind.Schedule, Values.Chunk != nullptr, Values.Ordered);
3600  assert(isOpenMPWorksharingDirective(DKind) &&
3601  "Expected loop-based or sections-based directive.");
3602  llvm::Value *UpdatedLocation = emitUpdateLocation(CGF, Loc,
3603  isOpenMPLoopDirective(DKind)
3604  ? OMP_IDENT_WORK_LOOP
3605  : OMP_IDENT_WORK_SECTIONS);
3606  llvm::Value *ThreadId = getThreadID(CGF, Loc);
3607  llvm::FunctionCallee StaticInitFunction =
3609  emitForStaticInitCall(CGF, UpdatedLocation, ThreadId, StaticInitFunction,
3610  ScheduleNum, ScheduleKind.M1, ScheduleKind.M2, Values);
3611 }
3612 
3614  CodeGenFunction &CGF, SourceLocation Loc,
3615  OpenMPDistScheduleClauseKind SchedKind,
3616  const CGOpenMPRuntime::StaticRTInput &Values) {
3617  OpenMPSchedType ScheduleNum =
3618  getRuntimeSchedule(SchedKind, Values.Chunk != nullptr);
3619  llvm::Value *UpdatedLocation =
3620  emitUpdateLocation(CGF, Loc, OMP_IDENT_WORK_DISTRIBUTE);
3621  llvm::Value *ThreadId = getThreadID(CGF, Loc);
3622  llvm::FunctionCallee StaticInitFunction =
3623  createForStaticInitFunction(Values.IVSize, Values.IVSigned);
3624  emitForStaticInitCall(CGF, UpdatedLocation, ThreadId, StaticInitFunction,
3625  ScheduleNum, OMPC_SCHEDULE_MODIFIER_unknown,
3627 }
3628 
3630  SourceLocation Loc,
3631  OpenMPDirectiveKind DKind) {
3632  if (!CGF.HaveInsertPoint())
3633  return;
3634  // Call __kmpc_for_static_fini(ident_t *loc, kmp_int32 tid);
3635  llvm::Value *Args[] = {
3636  emitUpdateLocation(CGF, Loc,
3638  ? OMP_IDENT_WORK_DISTRIBUTE
3639  : isOpenMPLoopDirective(DKind)
3640  ? OMP_IDENT_WORK_LOOP
3641  : OMP_IDENT_WORK_SECTIONS),
3642  getThreadID(CGF, Loc)};
3644  Args);
3645 }
3646 
3648  SourceLocation Loc,
3649  unsigned IVSize,
3650  bool IVSigned) {
3651  if (!CGF.HaveInsertPoint())
3652  return;
3653  // Call __kmpc_for_dynamic_fini_(4|8)[u](ident_t *loc, kmp_int32 tid);
3654  llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
3655  CGF.EmitRuntimeCall(createDispatchFiniFunction(IVSize, IVSigned), Args);
3656 }
3657 
3659  SourceLocation Loc, unsigned IVSize,
3660  bool IVSigned, Address IL,
3661  Address LB, Address UB,
3662  Address ST) {
3663  // Call __kmpc_dispatch_next(
3664  // ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter,
3665  // kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper,
3666  // kmp_int[32|64] *p_stride);
3667  llvm::Value *Args[] = {
3668  emitUpdateLocation(CGF, Loc),
3669  getThreadID(CGF, Loc),
3670  IL.getPointer(), // &isLastIter
3671  LB.getPointer(), // &Lower
3672  UB.getPointer(), // &Upper
3673  ST.getPointer() // &Stride
3674  };
3675  llvm::Value *Call =
3676  CGF.EmitRuntimeCall(createDispatchNextFunction(IVSize, IVSigned), Args);
3677  return CGF.EmitScalarConversion(
3678  Call, CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/1),
3679  CGF.getContext().BoolTy, Loc);
3680 }
3681 
3683  llvm::Value *NumThreads,
3684  SourceLocation Loc) {
3685  if (!CGF.HaveInsertPoint())
3686  return;
3687  // Build call __kmpc_push_num_threads(&loc, global_tid, num_threads)
3688  llvm::Value *Args[] = {
3689  emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
3690  CGF.Builder.CreateIntCast(NumThreads, CGF.Int32Ty, /*isSigned*/ true)};
3692  Args);
3693 }
3694 
3696  OpenMPProcBindClauseKind ProcBind,
3697  SourceLocation Loc) {
3698  if (!CGF.HaveInsertPoint())
3699  return;
3700  // Constants for proc bind value accepted by the runtime.
3701  enum ProcBindTy {
3702  ProcBindFalse = 0,
3703  ProcBindTrue,
3704  ProcBindMaster,
3705  ProcBindClose,
3706  ProcBindSpread,
3707  ProcBindIntel,
3708  ProcBindDefault
3709  } RuntimeProcBind;
3710  switch (ProcBind) {
3711  case OMPC_PROC_BIND_master:
3712  RuntimeProcBind = ProcBindMaster;
3713  break;
3714  case OMPC_PROC_BIND_close:
3715  RuntimeProcBind = ProcBindClose;
3716  break;
3717  case OMPC_PROC_BIND_spread:
3718  RuntimeProcBind = ProcBindSpread;
3719  break;
3721  llvm_unreachable("Unsupported proc_bind value.");
3722  }
3723  // Build call __kmpc_push_proc_bind(&loc, global_tid, proc_bind)
3724  llvm::Value *Args[] = {
3725  emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
3726  llvm::ConstantInt::get(CGM.IntTy, RuntimeProcBind, /*isSigned=*/true)};
3728 }
3729 
3730 void CGOpenMPRuntime::emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *>,
3731  SourceLocation Loc) {
3732  if (!CGF.HaveInsertPoint())
3733  return;
3734  // Build call void __kmpc_flush(ident_t *loc)
3736  emitUpdateLocation(CGF, Loc));
3737 }
3738 
3739 namespace {
3740 /// Indexes of fields for type kmp_task_t.
3742  /// List of shared variables.
3743  KmpTaskTShareds,
3744  /// Task routine.
3745  KmpTaskTRoutine,
3746  /// Partition id for the untied tasks.
3747  KmpTaskTPartId,
3748  /// Function with call of destructors for private variables.
3749  Data1,
3750  /// Task priority.
3751  Data2,
3752  /// (Taskloops only) Lower bound.
3753  KmpTaskTLowerBound,
3754  /// (Taskloops only) Upper bound.
3755  KmpTaskTUpperBound,
3756  /// (Taskloops only) Stride.
3757  KmpTaskTStride,
3758  /// (Taskloops only) Is last iteration flag.
3759  KmpTaskTLastIter,
3760  /// (Taskloops only) Reduction data.
3761  KmpTaskTReductions,
3762 };
3763 } // anonymous namespace
3764 
3765 bool CGOpenMPRuntime::OffloadEntriesInfoManagerTy::empty() const {
3766  return OffloadEntriesTargetRegion.empty() &&
3767  OffloadEntriesDeviceGlobalVar.empty();
3768 }
3769 
3770 /// Initialize target region entry.
3771 void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
3772  initializeTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
3773  StringRef ParentName, unsigned LineNum,
3774  unsigned Order) {
3775  assert(CGM.getLangOpts().OpenMPIsDevice && "Initialization of entries is "
3776  "only required for the device "
3777  "code generation.");
3778  OffloadEntriesTargetRegion[DeviceID][FileID][ParentName][LineNum] =
3779  OffloadEntryInfoTargetRegion(Order, /*Addr=*/nullptr, /*ID=*/nullptr,
3780  OMPTargetRegionEntryTargetRegion);
3781  ++OffloadingEntriesNum;
3782 }
3783 
3784 void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
3785  registerTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
3786  StringRef ParentName, unsigned LineNum,
3787  llvm::Constant *Addr, llvm::Constant *ID,
3788  OMPTargetRegionEntryKind Flags) {
3789  // If we are emitting code for a target, the entry is already initialized,
3790  // only has to be registered.
3791  if (CGM.getLangOpts().OpenMPIsDevice) {
3792  if (!hasTargetRegionEntryInfo(DeviceID, FileID, ParentName, LineNum)) {
3793  unsigned DiagID = CGM.getDiags().getCustomDiagID(
3795  "Unable to find target region on line '%0' in the device code.");
3796  CGM.getDiags().Report(DiagID) << LineNum;
3797  return;
3798  }
3799  auto &Entry =
3800  OffloadEntriesTargetRegion[DeviceID][FileID][ParentName][LineNum];
3801  assert(Entry.isValid() && "Entry not initialized!");
3802  Entry.setAddress(Addr);
3803  Entry.setID(ID);
3804  Entry.setFlags(Flags);
3805  } else {
3806  OffloadEntryInfoTargetRegion Entry(OffloadingEntriesNum, Addr, ID, Flags);
3807  OffloadEntriesTargetRegion[DeviceID][FileID][ParentName][LineNum] = Entry;
3808  ++OffloadingEntriesNum;
3809  }
3810 }
3811 
3812 bool CGOpenMPRuntime::OffloadEntriesInfoManagerTy::hasTargetRegionEntryInfo(
3813  unsigned DeviceID, unsigned FileID, StringRef ParentName,
3814  unsigned LineNum) const {
3815  auto PerDevice = OffloadEntriesTargetRegion.find(DeviceID);
3816  if (PerDevice == OffloadEntriesTargetRegion.end())
3817  return false;
3818  auto PerFile = PerDevice->second.find(FileID);
3819  if (PerFile == PerDevice->second.end())
3820  return false;
3821  auto PerParentName = PerFile->second.find(ParentName);
3822  if (PerParentName == PerFile->second.end())
3823  return false;
3824  auto PerLine = PerParentName->second.find(LineNum);
3825  if (PerLine == PerParentName->second.end())
3826  return false;
3827  // Fail if this entry is already registered.
3828  if (PerLine->second.getAddress() || PerLine->second.getID())
3829  return false;
3830  return true;
3831 }
3832 
3833 void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::actOnTargetRegionEntriesInfo(
3834  const OffloadTargetRegionEntryInfoActTy &Action) {
3835  // Scan all target region entries and perform the provided action.
3836  for (const auto &D : OffloadEntriesTargetRegion)
3837  for (const auto &F : D.second)
3838  for (const auto &P : F.second)
3839  for (const auto &L : P.second)
3840  Action(D.first, F.first, P.first(), L.first, L.second);
3841 }
3842 
3843 void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
3844  initializeDeviceGlobalVarEntryInfo(StringRef Name,
3845  OMPTargetGlobalVarEntryKind Flags,
3846  unsigned Order) {
3847  assert(CGM.getLangOpts().OpenMPIsDevice && "Initialization of entries is "
3848  "only required for the device "
3849  "code generation.");
3850  OffloadEntriesDeviceGlobalVar.try_emplace(Name, Order, Flags);
3851  ++OffloadingEntriesNum;
3852 }
3853 
3854 void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
3855  registerDeviceGlobalVarEntryInfo(StringRef VarName, llvm::Constant *Addr,
3856  CharUnits VarSize,
3857  OMPTargetGlobalVarEntryKind Flags,
3858  llvm::GlobalValue::LinkageTypes Linkage) {
3859  if (CGM.getLangOpts().OpenMPIsDevice) {
3860  auto &Entry = OffloadEntriesDeviceGlobalVar[VarName];
3861  assert(Entry.isValid() && Entry.getFlags() == Flags &&
3862  "Entry not initialized!");
3863  assert((!Entry.getAddress() || Entry.getAddress() == Addr) &&
3864  "Resetting with the new address.");
3865  if (Entry.getAddress() && hasDeviceGlobalVarEntryInfo(VarName)) {
3866  if (Entry.getVarSize().isZero()) {
3867  Entry.setVarSize(VarSize);
3868  Entry.setLinkage(Linkage);
3869  }
3870  return;
3871  }
3872  Entry.setVarSize(VarSize);
3873  Entry.setLinkage(Linkage);
3874  Entry.setAddress(Addr);
3875  } else {
3876  if (hasDeviceGlobalVarEntryInfo(VarName)) {
3877  auto &Entry = OffloadEntriesDeviceGlobalVar[VarName];
3878  assert(Entry.isValid() && Entry.getFlags() == Flags &&
3879  "Entry not initialized!");
3880  assert((!Entry.getAddress() || Entry.getAddress() == Addr) &&
3881  "Resetting with the new address.");
3882  if (Entry.getVarSize().isZero()) {
3883  Entry.setVarSize(VarSize);
3884  Entry.setLinkage(Linkage);
3885  }
3886  return;
3887  }
3888  OffloadEntriesDeviceGlobalVar.try_emplace(
3889  VarName, OffloadingEntriesNum, Addr, VarSize, Flags, Linkage);
3890  ++OffloadingEntriesNum;
3891  }
3892 }
3893 
3894 void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
3895  actOnDeviceGlobalVarEntriesInfo(
3896  const OffloadDeviceGlobalVarEntryInfoActTy &Action) {
3897  // Scan all target region entries and perform the provided action.
3898  for (const auto &E : OffloadEntriesDeviceGlobalVar)
3899  Action(E.getKey(), E.getValue());
3900 }
3901 
3902 llvm::Function *
3904  // If we don't have entries or if we are emitting code for the device, we
3905  // don't need to do anything.
3906  if (CGM.getLangOpts().OpenMPIsDevice || OffloadEntriesInfoManager.empty())
3907  return nullptr;
3908 
3909  llvm::Module &M = CGM.getModule();
3910  ASTContext &C = CGM.getContext();
3911 
3912  // Get list of devices we care about
3913  const std::vector<llvm::Triple> &Devices = CGM.getLangOpts().OMPTargetTriples;
3914 
3915  // We should be creating an offloading descriptor only if there are devices
3916  // specified.
3917  assert(!Devices.empty() && "No OpenMP offloading devices??");
3918 
3919  // Create the external variables that will point to the begin and end of the
3920  // host entries section. These will be defined by the linker.
3921  llvm::Type *OffloadEntryTy =
3923  std::string EntriesBeginName = getName({"omp_offloading", "entries_begin"});
3924  auto *HostEntriesBegin = new llvm::GlobalVariable(
3925  M, OffloadEntryTy, /*isConstant=*/true,
3926  llvm::GlobalValue::ExternalLinkage, /*Initializer=*/nullptr,
3927  EntriesBeginName);
3928  std::string EntriesEndName = getName({"omp_offloading", "entries_end"});
3929  auto *HostEntriesEnd =
3930  new llvm::GlobalVariable(M, OffloadEntryTy, /*isConstant=*/true,
3932  /*Initializer=*/nullptr, EntriesEndName);
3933 
3934  // Create all device images
3935  auto *DeviceImageTy = cast<llvm::StructType>(
3937  ConstantInitBuilder DeviceImagesBuilder(CGM);
3938  ConstantArrayBuilder DeviceImagesEntries =
3939  DeviceImagesBuilder.beginArray(DeviceImageTy);
3940 
3941  for (const llvm::Triple &Device : Devices) {
3942  StringRef T = Device.getTriple();
3943  std::string BeginName = getName({"omp_offloading", "img_start", ""});
3944  auto *ImgBegin = new llvm::GlobalVariable(
3945  M, CGM.Int8Ty, /*isConstant=*/true,
3946  llvm::GlobalValue::ExternalWeakLinkage,
3947  /*Initializer=*/nullptr, Twine(BeginName).concat(T));
3948  std::string EndName = getName({"omp_offloading", "img_end", ""});
3949  auto *ImgEnd = new llvm::GlobalVariable(
3950  M, CGM.Int8Ty, /*isConstant=*/true,
3951  llvm::GlobalValue::ExternalWeakLinkage,
3952  /*Initializer=*/nullptr, Twine(EndName).concat(T));
3953 
3954  llvm::Constant *Data[] = {ImgBegin, ImgEnd, HostEntriesBegin,
3955  HostEntriesEnd};
3957  DeviceImagesEntries);
3958  }
3959 
3960  // Create device images global array.
3961  std::string ImagesName = getName({"omp_offloading", "device_images"});
3962  llvm::GlobalVariable *DeviceImages =
3963  DeviceImagesEntries.finishAndCreateGlobal(ImagesName,
3964  CGM.getPointerAlign(),
3965  /*isConstant=*/true);
3966  DeviceImages->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3967 
3968  // This is a Zero array to be used in the creation of the constant expressions
3969  llvm::Constant *Index[] = {llvm::Constant::getNullValue(CGM.Int32Ty),
3970  llvm::Constant::getNullValue(CGM.Int32Ty)};
3971 
3972  // Create the target region descriptor.
3973  llvm::Constant *Data[] = {
3974  llvm::ConstantInt::get(CGM.Int32Ty, Devices.size()),
3975  llvm::ConstantExpr::getGetElementPtr(DeviceImages->getValueType(),
3976  DeviceImages, Index),
3977  HostEntriesBegin, HostEntriesEnd};
3978  std::string Descriptor = getName({"omp_offloading", "descriptor"});
3979  llvm::GlobalVariable *Desc = createGlobalStruct(
3980  CGM, getTgtBinaryDescriptorQTy(), /*IsConstant=*/true, Data, Descriptor);
3981 
3982  // Emit code to register or unregister the descriptor at execution
3983  // startup or closing, respectively.
3984 
3985  llvm::Function *UnRegFn;
3986  {
3987  FunctionArgList Args;
3989  Args.push_back(&DummyPtr);
3990 
3991  CodeGenFunction CGF(CGM);
3992  // Disable debug info for global (de-)initializer because they are not part
3993  // of some particular construct.
3994  CGF.disableDebugInfo();
3995  const auto &FI =
3997  llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
3998  std::string UnregName = getName({"omp_offloading", "descriptor_unreg"});
3999  UnRegFn = CGM.CreateGlobalInitOrDestructFunction(FTy, UnregName, FI);
4000  CGF.StartFunction(GlobalDecl(), C.VoidTy, UnRegFn, FI, Args);
4002  Desc);
4003  CGF.FinishFunction();
4004  }
4005  llvm::Function *RegFn;
4006  {
4007  CodeGenFunction CGF(CGM);
4008  // Disable debug info for global (de-)initializer because they are not part
4009  // of some particular construct.
4010  CGF.disableDebugInfo();
4011  const auto &FI = CGM.getTypes().arrangeNullaryFunction();
4012  llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
4013 
4014  // Encode offload target triples into the registration function name. It
4015  // will serve as a comdat key for the registration/unregistration code for
4016  // this particular combination of offloading targets.
4017  SmallVector<StringRef, 4U> RegFnNameParts(Devices.size() + 2U);
4018  RegFnNameParts[0] = "omp_offloading";
4019  RegFnNameParts[1] = "descriptor_reg";
4020  llvm::transform(Devices, std::next(RegFnNameParts.begin(), 2),
4021  [](const llvm::Triple &T) -> const std::string& {
4022  return T.getTriple();
4023  });
4024  llvm::sort(std::next(RegFnNameParts.begin(), 2), RegFnNameParts.end());
4025  std::string Descriptor = getName(RegFnNameParts);
4026  RegFn = CGM.CreateGlobalInitOrDestructFunction(FTy, Descriptor, FI);
4027  CGF.StartFunction(GlobalDecl(), C.VoidTy, RegFn, FI, FunctionArgList());
4029  // Create a variable to drive the registration and unregistration of the
4030  // descriptor, so we can reuse the logic that emits Ctors and Dtors.
4031  ImplicitParamDecl RegUnregVar(C, C.getTranslationUnitDecl(),
4032  SourceLocation(), nullptr, C.CharTy,
4034  CGM.getCXXABI().registerGlobalDtor(CGF, RegUnregVar, UnRegFn, Desc);
4035  CGF.FinishFunction();
4036  }
4037  if (CGM.supportsCOMDAT()) {
4038  // It is sufficient to call registration function only once, so create a
4039  // COMDAT group for registration/unregistration functions and associated
4040  // data. That would reduce startup time and code size. Registration
4041  // function serves as a COMDAT group key.
4042  llvm::Comdat *ComdatKey = M.getOrInsertComdat(RegFn->getName());
4043  RegFn->setLinkage(llvm::GlobalValue::LinkOnceAnyLinkage);
4044  RegFn->setVisibility(llvm::GlobalValue::HiddenVisibility);
4045  RegFn->setComdat(ComdatKey);
4046  UnRegFn->setComdat(ComdatKey);
4047  DeviceImages->setComdat(ComdatKey);
4048  Desc->setComdat(ComdatKey);
4049  }
4050  return RegFn;
4051 }
4052 
4054  llvm::Constant *ID, llvm::Constant *Addr, uint64_t Size, int32_t Flags,
4055  llvm::GlobalValue::LinkageTypes Linkage) {
4056  StringRef Name = Addr->getName();
4057  llvm::Module &M = CGM.getModule();
4058  llvm::LLVMContext &C = M.getContext();
4059 
4060  // Create constant string with the name.
4061  llvm::Constant *StrPtrInit = llvm::ConstantDataArray::getString(C, Name);
4062 
4063  std::string StringName = getName({"omp_offloading", "entry_name"});
4064  auto *Str = new llvm::GlobalVariable(
4065  M, StrPtrInit->getType(), /*isConstant=*/true,
4066  llvm::GlobalValue::InternalLinkage, StrPtrInit, StringName);
4067  Str->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
4068 
4069  llvm::Constant *Data[] = {llvm::ConstantExpr::getBitCast(ID, CGM.VoidPtrTy),
4070  llvm::ConstantExpr::getBitCast(Str, CGM.Int8PtrTy),
4071  llvm::ConstantInt::get(CGM.SizeTy, Size),
4072  llvm::ConstantInt::get(CGM.Int32Ty, Flags),
4073  llvm::ConstantInt::get(CGM.Int32Ty, 0)};
4074  std::string EntryName = getName({"omp_offloading", "entry", ""});
4075  llvm::GlobalVariable *Entry = createGlobalStruct(
4076  CGM, getTgtOffloadEntryQTy(), /*IsConstant=*/true, Data,
4077  Twine(EntryName).concat(Name), llvm::GlobalValue::WeakAnyLinkage);
4078 
4079  // The entry has to be created in the section the linker expects it to be.
4080  std::string Section = getName({"omp_offloading", "entries"});
4081  Entry->setSection(Section);
4082 }
4083 
4085  // Emit the offloading entries and metadata so that the device codegen side
4086  // can easily figure out what to emit. The produced metadata looks like
4087  // this:
4088  //
4089  // !omp_offload.info = !{!1, ...}
4090  //
4091  // Right now we only generate metadata for function that contain target
4092  // regions.
4093 
4094  // If we do not have entries, we don't need to do anything.
4096  return;
4097 
4098  llvm::Module &M = CGM.getModule();
4099  llvm::LLVMContext &C = M.getContext();
4101  OrderedEntries(OffloadEntriesInfoManager.size());
4102  llvm::SmallVector<StringRef, 16> ParentFunctions(
4104 
4105  // Auxiliary methods to create metadata values and strings.
4106  auto &&GetMDInt = [this](unsigned V) {
4107  return llvm::ConstantAsMetadata::get(
4108  llvm::ConstantInt::get(CGM.Int32Ty, V));
4109  };
4110 
4111  auto &&GetMDString = [&C](StringRef V) { return llvm::MDString::get(C, V); };
4112 
4113  // Create the offloading info metadata node.
4114  llvm::NamedMDNode *MD = M.getOrInsertNamedMetadata("omp_offload.info");
4115 
4116  // Create function that emits metadata for each target region entry;
4117  auto &&TargetRegionMetadataEmitter =
4118  [&C, MD, &OrderedEntries, &ParentFunctions, &GetMDInt, &GetMDString](
4119  unsigned DeviceID, unsigned FileID, StringRef ParentName,
4120  unsigned Line,
4122  // Generate metadata for target regions. Each entry of this metadata
4123  // contains:
4124  // - Entry 0 -> Kind of this type of metadata (0).
4125  // - Entry 1 -> Device ID of the file where the entry was identified.
4126  // - Entry 2 -> File ID of the file where the entry was identified.
4127  // - Entry 3 -> Mangled name of the function where the entry was
4128  // identified.
4129  // - Entry 4 -> Line in the file where the entry was identified.
4130  // - Entry 5 -> Order the entry was created.
4131  // The first element of the metadata node is the kind.
4132  llvm::Metadata *Ops[] = {GetMDInt(E.getKind()), GetMDInt(DeviceID),
4133  GetMDInt(FileID), GetMDString(ParentName),
4134  GetMDInt(Line), GetMDInt(E.getOrder())};
4135 
4136  // Save this entry in the right position of the ordered entries array.
4137  OrderedEntries[E.getOrder()] = &E;
4138  ParentFunctions[E.getOrder()] = ParentName;
4139 
4140  // Add metadata to the named metadata node.
4141  MD->addOperand(llvm::MDNode::get(C, Ops));
4142  };
4143 
4145  TargetRegionMetadataEmitter);
4146 
4147  // Create function that emits metadata for each device global variable entry;
4148  auto &&DeviceGlobalVarMetadataEmitter =
4149  [&C, &OrderedEntries, &GetMDInt, &GetMDString,
4150  MD](StringRef MangledName,
4152  &E) {
4153  // Generate metadata for global variables. Each entry of this metadata
4154  // contains:
4155  // - Entry 0 -> Kind of this type of metadata (1).
4156  // - Entry 1 -> Mangled name of the variable.
4157  // - Entry 2 -> Declare target kind.
4158  // - Entry 3 -> Order the entry was created.
4159  // The first element of the metadata node is the kind.
4160  llvm::Metadata *Ops[] = {
4161  GetMDInt(E.getKind()), GetMDString(MangledName),
4162  GetMDInt(E.getFlags()), GetMDInt(E.getOrder())};
4163 
4164  // Save this entry in the right position of the ordered entries array.
4165  OrderedEntries[E.getOrder()] = &E;
4166 
4167  // Add metadata to the named metadata node.
4168  MD->addOperand(llvm::MDNode::get(C, Ops));
4169  };
4170 
4172  DeviceGlobalVarMetadataEmitter);
4173 
4174  for (const auto *E : OrderedEntries) {
4175  assert(E && "All ordered entries must exist!");
4176  if (const auto *CE =
4177  dyn_cast<OffloadEntriesInfoManagerTy::OffloadEntryInfoTargetRegion>(
4178  E)) {
4179  if (!CE->getID() || !CE->getAddress()) {
4180  // Do not blame the entry if the parent funtion is not emitted.
4181  StringRef FnName = ParentFunctions[CE->getOrder()];
4182  if (!CGM.GetGlobalValue(FnName))
4183  continue;
4184  unsigned DiagID = CGM.getDiags().getCustomDiagID(
4186  "Offloading entry for target region is incorrect: either the "
4187  "address or the ID is invalid.");
4188  CGM.getDiags().Report(DiagID);
4189  continue;
4190  }
4191  createOffloadEntry(CE->getID(), CE->getAddress(), /*Size=*/0,
4192  CE->getFlags(), llvm::GlobalValue::WeakAnyLinkage);
4193  } else if (const auto *CE =
4194  dyn_cast<OffloadEntriesInfoManagerTy::
4195  OffloadEntryInfoDeviceGlobalVar>(E)) {
4198  CE->getFlags());
4199  switch (Flags) {
4201  if (CGM.getLangOpts().OpenMPIsDevice &&
4202  CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory())
4203  continue;
4204  if (!CE->getAddress()) {
4205  unsigned DiagID = CGM.getDiags().getCustomDiagID(
4207  "Offloading entry for declare target variable is incorrect: the "
4208  "address is invalid.");
4209  CGM.getDiags().Report(DiagID);
4210  continue;
4211  }
4212  // The vaiable has no definition - no need to add the entry.
4213  if (CE->getVarSize().isZero())
4214  continue;
4215  break;
4216  }
4218  assert(((CGM.getLangOpts().OpenMPIsDevice && !CE->getAddress()) ||
4219  (!CGM.getLangOpts().OpenMPIsDevice && CE->getAddress())) &&
4220  "Declaret target link address is set.");
4221  if (CGM.getLangOpts().OpenMPIsDevice)
4222  continue;
4223  if (!CE->getAddress()) {
4224  unsigned DiagID = CGM.getDiags().getCustomDiagID(
4226  "Offloading entry for declare target variable is incorrect: the "
4227  "address is invalid.");
4228  CGM.getDiags().Report(DiagID);
4229  continue;
4230  }
4231  break;
4232  }
4233  createOffloadEntry(CE->getAddress(), CE->getAddress(),
4234  CE->getVarSize().getQuantity(), Flags,
4235  CE->getLinkage());
4236  } else {
4237  llvm_unreachable("Unsupported entry kind.");
4238  }
4239  }
4240 }
4241 
4242 /// Loads all the offload entries information from the host IR
4243 /// metadata.
4245  // If we are in target mode, load the metadata from the host IR. This code has
4246  // to match the metadaata creation in createOffloadEntriesAndInfoMetadata().
4247 
4248  if (!CGM.getLangOpts().OpenMPIsDevice)
4249  return;
4250 
4251  if (CGM.getLangOpts().OMPHostIRFile.empty())
4252  return;
4253 
4254  auto Buf = llvm::MemoryBuffer::getFile(CGM.getLangOpts().OMPHostIRFile);
4255  if (auto EC = Buf.getError()) {
4256  CGM.getDiags().Report(diag::err_cannot_open_file)
4257  << CGM.getLangOpts().OMPHostIRFile << EC.message();
4258  return;
4259  }
4260 
4261  llvm::LLVMContext C;
4262  auto ME = expectedToErrorOrAndEmitErrors(
4263  C, llvm::parseBitcodeFile(Buf.get()->getMemBufferRef(), C));
4264 
4265  if (auto EC = ME.getError()) {
4266  unsigned DiagID = CGM.getDiags().getCustomDiagID(
4267  DiagnosticsEngine::Error, "Unable to parse host IR file '%0':'%1'");
4268  CGM.getDiags().Report(DiagID)
4269  << CGM.getLangOpts().OMPHostIRFile << EC.message();
4270  return;
4271  }
4272 
4273  llvm::NamedMDNode *MD = ME.get()->getNamedMetadata("omp_offload.info");
4274  if (!MD)
4275  return;
4276 
4277  for (llvm::MDNode *MN : MD->operands()) {
4278  auto &&GetMDInt = [MN](unsigned Idx) {
4279  auto *V = cast<llvm::ConstantAsMetadata>(MN->getOperand(Idx));
4280  return cast<llvm::ConstantInt>(V->getValue())->getZExtValue();
4281  };
4282 
4283  auto &&GetMDString = [MN](unsigned Idx) {
4284  auto *V = cast<llvm::MDString>(MN->getOperand(Idx));
4285  return V->getString();
4286  };
4287 
4288  switch (GetMDInt(0)) {
4289  default:
4290  llvm_unreachable("Unexpected metadata!");
4291  break;
4295  /*DeviceID=*/GetMDInt(1), /*FileID=*/GetMDInt(2),
4296  /*ParentName=*/GetMDString(3), /*Line=*/GetMDInt(4),
4297  /*Order=*/GetMDInt(5));
4298  break;
4302  /*MangledName=*/GetMDString(1),
4303  static_cast<OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryKind>(
4304  /*Flags=*/GetMDInt(2)),
4305  /*Order=*/GetMDInt(3));
4306  break;
4307  }
4308  }
4309 }
4310 
4312  if (!KmpRoutineEntryPtrTy) {
4313  // Build typedef kmp_int32 (* kmp_routine_entry_t)(kmp_int32, void *); type.
4314  ASTContext &C = CGM.getContext();
4315  QualType KmpRoutineEntryTyArgs[] = {KmpInt32Ty, C.VoidPtrTy};
4317  KmpRoutineEntryPtrQTy = C.getPointerType(
4318  C.getFunctionType(KmpInt32Ty, KmpRoutineEntryTyArgs, EPI));
4319  KmpRoutineEntryPtrTy = CGM.getTypes().ConvertType(KmpRoutineEntryPtrQTy);
4320  }
4321 }
4322 
4324  // Make sure the type of the entry is already created. This is the type we
4325  // have to create:
4326  // struct __tgt_offload_entry{
4327  // void *addr; // Pointer to the offload entry info.
4328  // // (function or global)
4329  // char *name; // Name of the function or global.
4330  // size_t size; // Size of the entry info (0 if it a function).
4331  // int32_t flags; // Flags associated with the entry, e.g. 'link'.
4332  // int32_t reserved; // Reserved, to use by the runtime library.
4333  // };
4334  if (TgtOffloadEntryQTy.isNull()) {
4335  ASTContext &C = CGM.getContext();
4336  RecordDecl *RD = C.buildImplicitRecord("__tgt_offload_entry");
4337  RD->startDefinition();
4338  addFieldToRecordDecl(C, RD, C.VoidPtrTy);
4340  addFieldToRecordDecl(C, RD, C.getSizeType());
4342  C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/true));
4344  C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/true));
4345  RD->completeDefinition();
4346  RD->addAttr(PackedAttr::CreateImplicit(C));
4348  }
4349  return TgtOffloadEntryQTy;
4350 }
4351 
4353  // These are the types we need to build:
4354  // struct __tgt_device_image{
4355  // void *ImageStart; // Pointer to the target code start.
4356  // void *ImageEnd; // Pointer to the target code end.
4357  // // We also add the host entries to the device image, as it may be useful
4358  // // for the target runtime to have access to that information.
4359  // __tgt_offload_entry *EntriesBegin; // Begin of the table with all
4360  // // the entries.
4361  // __tgt_offload_entry *EntriesEnd; // End of the table with all the
4362  // // entries (non inclusive).
4363  // };
4364  if (TgtDeviceImageQTy.isNull()) {
4365  ASTContext &C = CGM.getContext();
4366  RecordDecl *RD = C.buildImplicitRecord("__tgt_device_image");
4367  RD->startDefinition();
4368  addFieldToRecordDecl(C, RD, C.VoidPtrTy);
4369  addFieldToRecordDecl(C, RD, C.VoidPtrTy);
4372  RD->completeDefinition();
4374  }
4375  return TgtDeviceImageQTy;
4376 }
4377 
4379  // struct __tgt_bin_desc{
4380  // int32_t NumDevices; // Number of devices supported.
4381  // __tgt_device_image *DeviceImages; // Arrays of device images
4382  // // (one per device).
4383  // __tgt_offload_entry *EntriesBegin; // Begin of the table with all the
4384  // // entries.
4385  // __tgt_offload_entry *EntriesEnd; // End of the table with all the
4386  // // entries (non inclusive).
4387  // };
4389  ASTContext &C = CGM.getContext();
4390  RecordDecl *RD = C.buildImplicitRecord("__tgt_bin_desc");
4391  RD->startDefinition();
4393  C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/true));
4397  RD->completeDefinition();
4399  }
4400  return TgtBinaryDescriptorQTy;
4401 }
4402 
4403 namespace {
4404 struct PrivateHelpersTy {
4405  PrivateHelpersTy(const VarDecl *Original, const VarDecl *PrivateCopy,
4406  const VarDecl *PrivateElemInit)
4407  : Original(Original), PrivateCopy(PrivateCopy),
4408  PrivateElemInit(PrivateElemInit) {}
4409  const VarDecl *Original;
4410  const VarDecl *PrivateCopy;
4411  const VarDecl *PrivateElemInit;
4412 };
4413 typedef std::pair<CharUnits /*Align*/, PrivateHelpersTy> PrivateDataTy;
4414 } // anonymous namespace
4415 
4416 static RecordDecl *
4418  if (!Privates.empty()) {
4419  ASTContext &C = CGM.getContext();
4420  // Build struct .kmp_privates_t. {
4421  // /* private vars */
4422  // };
4423  RecordDecl *RD = C.buildImplicitRecord(".kmp_privates.t");
4424  RD->startDefinition();
4425  for (const auto &Pair : Privates) {
4426  const VarDecl *VD = Pair.second.Original;
4427  QualType Type = VD->getType().getNonReferenceType();
4428  FieldDecl *FD = addFieldToRecordDecl(C, RD, Type);
4429  if (VD->hasAttrs()) {
4430  for (specific_attr_iterator<AlignedAttr> I(VD->getAttrs().begin()),
4431  E(VD->getAttrs().end());
4432  I != E; ++I)
4433  FD->addAttr(*I);
4434  }
4435  }
4436  RD->completeDefinition();
4437  return RD;
4438  }
4439  return nullptr;
4440 }
4441 
4442 static RecordDecl *
4444  QualType KmpInt32Ty,
4445  QualType KmpRoutineEntryPointerQTy) {
4446  ASTContext &C = CGM.getContext();
4447  // Build struct kmp_task_t {
4448  // void * shareds;
4449  // kmp_routine_entry_t routine;
4450  // kmp_int32 part_id;
4451  // kmp_cmplrdata_t data1;
4452  // kmp_cmplrdata_t data2;
4453  // For taskloops additional fields:
4454  // kmp_uint64 lb;
4455  // kmp_uint64 ub;
4456  // kmp_int64 st;
4457  // kmp_int32 liter;
4458  // void * reductions;
4459  // };
4460  RecordDecl *UD = C.buildImplicitRecord("kmp_cmplrdata_t", TTK_Union);
4461  UD->startDefinition();
4462  addFieldToRecordDecl(C, UD, KmpInt32Ty);
4463  addFieldToRecordDecl(C, UD, KmpRoutineEntryPointerQTy);
4464  UD->completeDefinition();
4465  QualType KmpCmplrdataTy = C.getRecordType(UD);
4466  RecordDecl *RD = C.buildImplicitRecord("kmp_task_t");
4467  RD->startDefinition();
4468  addFieldToRecordDecl(C, RD, C.VoidPtrTy);
4469  addFieldToRecordDecl(C, RD, KmpRoutineEntryPointerQTy);
4470  addFieldToRecordDecl(C, RD, KmpInt32Ty);
4471  addFieldToRecordDecl(C, RD, KmpCmplrdataTy);
4472  addFieldToRecordDecl(C, RD, KmpCmplrdataTy);
4473  if (isOpenMPTaskLoopDirective(Kind)) {
4474  QualType KmpUInt64Ty =
4475  CGM.getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0);
4476  QualType KmpInt64Ty =
4477  CGM.getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1);
4478  addFieldToRecordDecl(C, RD, KmpUInt64Ty);
4479  addFieldToRecordDecl(C, RD, KmpUInt64Ty);
4480  addFieldToRecordDecl(C, RD, KmpInt64Ty);
4481  addFieldToRecordDecl(C, RD, KmpInt32Ty);
4482  addFieldToRecordDecl(C, RD, C.VoidPtrTy);
4483  }
4484  RD->completeDefinition();
4485  return RD;
4486 }
4487 
4488 static RecordDecl *
4490  ArrayRef<PrivateDataTy> Privates) {
4491  ASTContext &C = CGM.getContext();
4492  // Build struct kmp_task_t_with_privates {
4493  // kmp_task_t task_data;
4494  // .kmp_privates_t. privates;
4495  // };
4496  RecordDecl *RD = C.buildImplicitRecord("kmp_task_t_with_privates");
4497  RD->startDefinition();
4498  addFieldToRecordDecl(C, RD, KmpTaskTQTy);
4499  if (const RecordDecl *PrivateRD = createPrivatesRecordDecl(CGM, Privates))
4500  addFieldToRecordDecl(C, RD, C.getRecordType(PrivateRD));
4501  RD->completeDefinition();
4502  return RD;
4503 }
4504 
4505 /// Emit a proxy function which accepts kmp_task_t as the second
4506 /// argument.
4507 /// \code
4508 /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
4509 /// TaskFunction(gtid, tt->part_id, &tt->privates, task_privates_map, tt,
4510 /// For taskloops:
4511 /// tt->task_data.lb, tt->task_data.ub, tt->task_data.st, tt->task_data.liter,
4512 /// tt->reductions, tt->shareds);
4513 /// return 0;
4514 /// }
4515 /// \endcode
4516 static llvm::Function *
4518  OpenMPDirectiveKind Kind, QualType KmpInt32Ty,
4519  QualType KmpTaskTWithPrivatesPtrQTy,
4520  QualType KmpTaskTWithPrivatesQTy, QualType KmpTaskTQTy,
4521  QualType SharedsPtrTy, llvm::Function *TaskFunction,
4522  llvm::Value *TaskPrivatesMap) {
4523  ASTContext &C = CGM.getContext();
4524  FunctionArgList Args;
4525  ImplicitParamDecl GtidArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, KmpInt32Ty,
4527  ImplicitParamDecl TaskTypeArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
4528  KmpTaskTWithPrivatesPtrQTy.withRestrict(),
4530  Args.push_back(&GtidArg);
4531  Args.push_back(&TaskTypeArg);
4532  const auto &TaskEntryFnInfo =
4533  CGM.getTypes().arrangeBuiltinFunctionDeclaration(KmpInt32Ty, Args);
4534  llvm::FunctionType *TaskEntryTy =
4535  CGM.getTypes().GetFunctionType(TaskEntryFnInfo);
4536  std::string Name = CGM.getOpenMPRuntime().getName({"omp_task_entry", ""});
4537  auto *TaskEntry = llvm::Function::Create(
4538  TaskEntryTy, llvm::GlobalValue::InternalLinkage, Name, &CGM.getModule());
4539  CGM.SetInternalFunctionAttributes(GlobalDecl(), TaskEntry, TaskEntryFnInfo);
4540  TaskEntry->setDoesNotRecurse();
4541  CodeGenFunction CGF(CGM);
4542  CGF.StartFunction(GlobalDecl(), KmpInt32Ty, TaskEntry, TaskEntryFnInfo, Args,
4543  Loc, Loc);
4544 
4545  // TaskFunction(gtid, tt->task_data.part_id, &tt->privates, task_privates_map,
4546  // tt,
4547  // For taskloops:
4548  // tt->task_data.lb, tt->task_data.ub, tt->task_data.st, tt->task_data.liter,
4549  // tt->task_data.shareds);
4550  llvm::Value *GtidParam = CGF.EmitLoadOfScalar(
4551  CGF.GetAddrOfLocalVar(&GtidArg), /*Volatile=*/false, KmpInt32Ty, Loc);
4552  LValue TDBase = CGF.EmitLoadOfPointerLValue(
4553  CGF.GetAddrOfLocalVar(&TaskTypeArg),
4554  KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>());
4555  const auto *KmpTaskTWithPrivatesQTyRD =
4556  cast<RecordDecl>(KmpTaskTWithPrivatesQTy->getAsTagDecl());
4557  LValue Base =
4558  CGF.EmitLValueForField(TDBase, *KmpTaskTWithPrivatesQTyRD->field_begin());
4559  const auto *KmpTaskTQTyRD = cast<RecordDecl>(KmpTaskTQTy->getAsTagDecl());
4560  auto PartIdFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTPartId);
4561  LValue PartIdLVal = CGF.EmitLValueForField(Base, *PartIdFI);
4562  llvm::Value *PartidParam = PartIdLVal.getPointer();
4563 
4564  auto SharedsFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTShareds);
4565  LValue SharedsLVal = CGF.EmitLValueForField(Base, *SharedsFI);
4567  CGF.EmitLoadOfScalar(SharedsLVal, Loc),
4568  CGF.ConvertTypeForMem(SharedsPtrTy));
4569 
4570  auto PrivatesFI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin(), 1);
4571  llvm::Value *PrivatesParam;
4572  if (PrivatesFI != KmpTaskTWithPrivatesQTyRD->field_end()) {
4573  LValue PrivatesLVal = CGF.EmitLValueForField(TDBase, *PrivatesFI);
4574  PrivatesParam = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4575  PrivatesLVal.getPointer(), CGF.VoidPtrTy);
4576  } else {
4577  PrivatesParam = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
4578  }
4579 
4580  llvm::Value *CommonArgs[] = {GtidParam, PartidParam, PrivatesParam,
4581  TaskPrivatesMap,
4582  CGF.Builder
4584  TDBase.getAddress(), CGF.VoidPtrTy)
4585  .getPointer()};
4586  SmallVector<llvm::Value *, 16> CallArgs(std::begin(CommonArgs),
4587  std::end(CommonArgs));
4588  if (isOpenMPTaskLoopDirective(Kind)) {
4589  auto LBFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTLowerBound);
4590  LValue LBLVal = CGF.EmitLValueForField(Base, *LBFI);
4591  llvm::Value *LBParam = CGF.EmitLoadOfScalar(LBLVal, Loc);
4592  auto UBFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTUpperBound);
4593  LValue UBLVal = CGF.EmitLValueForField(Base, *UBFI);
4594  llvm::Value *UBParam = CGF.EmitLoadOfScalar(UBLVal, Loc);
4595  auto StFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTStride);
4596  LValue StLVal = CGF.EmitLValueForField(Base, *StFI);
4597  llvm::Value *StParam = CGF.EmitLoadOfScalar(StLVal, Loc);
4598  auto LIFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTLastIter);
4599  LValue LILVal = CGF.EmitLValueForField(Base, *LIFI);
4600  llvm::Value *LIParam = CGF.EmitLoadOfScalar(LILVal, Loc);
4601  auto RFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTReductions);
4602  LValue RLVal = CGF.EmitLValueForField(Base, *RFI);
4603  llvm::Value *RParam = CGF.EmitLoadOfScalar(RLVal, Loc);
4604  CallArgs.push_back(LBParam);
4605  CallArgs.push_back(UBParam);
4606  CallArgs.push_back(StParam);
4607  CallArgs.push_back(LIParam);
4608  CallArgs.push_back(RParam);
4609  }
4610  CallArgs.push_back(SharedsParam);
4611 
4612  CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, Loc, TaskFunction,
4613  CallArgs);
4614  CGF.EmitStoreThroughLValue(RValue::get(CGF.Builder.getInt32(/*C=*/0)),
4615  CGF.MakeAddrLValue(CGF.ReturnValue, KmpInt32Ty));
4616  CGF.FinishFunction();
4617  return TaskEntry;
4618 }
4619 
4621  SourceLocation Loc,
4622  QualType KmpInt32Ty,
4623  QualType KmpTaskTWithPrivatesPtrQTy,
4624  QualType KmpTaskTWithPrivatesQTy) {
4625  ASTContext &C = CGM.getContext();
4626  FunctionArgList Args;
4627  ImplicitParamDecl GtidArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, KmpInt32Ty,
4629  ImplicitParamDecl TaskTypeArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
4630  KmpTaskTWithPrivatesPtrQTy.withRestrict(),
4632  Args.push_back(&GtidArg);
4633  Args.push_back(&TaskTypeArg);
4634  const auto &DestructorFnInfo =
4635  CGM.getTypes().arrangeBuiltinFunctionDeclaration(KmpInt32Ty, Args);
4636  llvm::FunctionType *DestructorFnTy =
4637  CGM.getTypes().GetFunctionType(DestructorFnInfo);
4638  std::string Name =
4639  CGM.getOpenMPRuntime().getName({"omp_task_destructor", ""});
4640  auto *DestructorFn =
4642  Name, &CGM.getModule());
4643  CGM.SetInternalFunctionAttributes(GlobalDecl(), DestructorFn,
4644  DestructorFnInfo);
4645  DestructorFn->setDoesNotRecurse();
4646  CodeGenFunction CGF(CGM);
4647  CGF.StartFunction(GlobalDecl(), KmpInt32Ty, DestructorFn, DestructorFnInfo,
4648  Args, Loc, Loc);
4649 
4651  CGF.GetAddrOfLocalVar(&TaskTypeArg),
4652  KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>());
4653  const auto *KmpTaskTWithPrivatesQTyRD =
4654  cast<RecordDecl>(KmpTaskTWithPrivatesQTy->getAsTagDecl());
4655  auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin());