clang 20.0.0git
CGOpenMPRuntime.cpp
Go to the documentation of this file.
1//===----- CGOpenMPRuntime.cpp - Interface to OpenMP Runtimes -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This provides a class for OpenMP runtime code generation.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGOpenMPRuntime.h"
14#include "ABIInfoImpl.h"
15#include "CGCXXABI.h"
16#include "CGCleanup.h"
17#include "CGRecordLayout.h"
18#include "CodeGenFunction.h"
19#include "TargetInfo.h"
20#include "clang/AST/APValue.h"
21#include "clang/AST/Attr.h"
22#include "clang/AST/Decl.h"
29#include "llvm/ADT/ArrayRef.h"
30#include "llvm/ADT/SmallVector.h"
31#include "llvm/ADT/StringExtras.h"
32#include "llvm/Bitcode/BitcodeReader.h"
33#include "llvm/IR/Constants.h"
34#include "llvm/IR/DerivedTypes.h"
35#include "llvm/IR/GlobalValue.h"
36#include "llvm/IR/InstrTypes.h"
37#include "llvm/IR/Value.h"
38#include "llvm/Support/AtomicOrdering.h"
39#include "llvm/Support/raw_ostream.h"
40#include <cassert>
41#include <cstdint>
42#include <numeric>
43#include <optional>
44
45using namespace clang;
46using namespace CodeGen;
47using namespace llvm::omp;
48
49namespace {
50/// Base class for handling code generation inside OpenMP regions.
51class CGOpenMPRegionInfo : public CodeGenFunction::CGCapturedStmtInfo {
52public:
53 /// Kinds of OpenMP regions used in codegen.
54 enum CGOpenMPRegionKind {
55 /// Region with outlined function for standalone 'parallel'
56 /// directive.
57 ParallelOutlinedRegion,
58 /// Region with outlined function for standalone 'task' directive.
59 TaskOutlinedRegion,
60 /// Region for constructs that do not require function outlining,
61 /// like 'for', 'sections', 'atomic' etc. directives.
62 InlinedRegion,
63 /// Region with outlined function for standalone 'target' directive.
64 TargetRegion,
65 };
66
67 CGOpenMPRegionInfo(const CapturedStmt &CS,
68 const CGOpenMPRegionKind RegionKind,
69 const RegionCodeGenTy &CodeGen, OpenMPDirectiveKind Kind,
70 bool HasCancel)
71 : CGCapturedStmtInfo(CS, CR_OpenMP), RegionKind(RegionKind),
72 CodeGen(CodeGen), Kind(Kind), HasCancel(HasCancel) {}
73
74 CGOpenMPRegionInfo(const CGOpenMPRegionKind RegionKind,
75 const RegionCodeGenTy &CodeGen, OpenMPDirectiveKind Kind,
76 bool HasCancel)
77 : CGCapturedStmtInfo(CR_OpenMP), RegionKind(RegionKind), CodeGen(CodeGen),
78 Kind(Kind), HasCancel(HasCancel) {}
79
80 /// Get a variable or parameter for storing global thread id
81 /// inside OpenMP construct.
82 virtual const VarDecl *getThreadIDVariable() const = 0;
83
84 /// Emit the captured statement body.
85 void EmitBody(CodeGenFunction &CGF, const Stmt *S) override;
86
87 /// Get an LValue for the current ThreadID variable.
88 /// \return LValue for thread id variable. This LValue always has type int32*.
89 virtual LValue getThreadIDVariableLValue(CodeGenFunction &CGF);
90
91 virtual void emitUntiedSwitch(CodeGenFunction & /*CGF*/) {}
92
93 CGOpenMPRegionKind getRegionKind() const { return RegionKind; }
94
95 OpenMPDirectiveKind getDirectiveKind() const { return Kind; }
96
97 bool hasCancel() const { return HasCancel; }
98
99 static bool classof(const CGCapturedStmtInfo *Info) {
100 return Info->getKind() == CR_OpenMP;
101 }
102
103 ~CGOpenMPRegionInfo() override = default;
104
105protected:
106 CGOpenMPRegionKind RegionKind;
107 RegionCodeGenTy CodeGen;
109 bool HasCancel;
110};
111
112/// API for captured statement code generation in OpenMP constructs.
113class CGOpenMPOutlinedRegionInfo final : public CGOpenMPRegionInfo {
114public:
115 CGOpenMPOutlinedRegionInfo(const CapturedStmt &CS, const VarDecl *ThreadIDVar,
116 const RegionCodeGenTy &CodeGen,
117 OpenMPDirectiveKind Kind, bool HasCancel,
118 StringRef HelperName)
119 : CGOpenMPRegionInfo(CS, ParallelOutlinedRegion, CodeGen, Kind,
120 HasCancel),
121 ThreadIDVar(ThreadIDVar), HelperName(HelperName) {
122 assert(ThreadIDVar != nullptr && "No ThreadID in OpenMP region.");
123 }
124
125 /// Get a variable or parameter for storing global thread id
126 /// inside OpenMP construct.
127 const VarDecl *getThreadIDVariable() const override { return ThreadIDVar; }
128
129 /// Get the name of the capture helper.
130 StringRef getHelperName() const override { return HelperName; }
131
132 static bool classof(const CGCapturedStmtInfo *Info) {
133 return CGOpenMPRegionInfo::classof(Info) &&
134 cast<CGOpenMPRegionInfo>(Info)->getRegionKind() ==
135 ParallelOutlinedRegion;
136 }
137
138private:
139 /// A variable or parameter storing global thread id for OpenMP
140 /// constructs.
141 const VarDecl *ThreadIDVar;
142 StringRef HelperName;
143};
144
145/// API for captured statement code generation in OpenMP constructs.
146class CGOpenMPTaskOutlinedRegionInfo final : public CGOpenMPRegionInfo {
147public:
148 class UntiedTaskActionTy final : public PrePostActionTy {
149 bool Untied;
150 const VarDecl *PartIDVar;
151 const RegionCodeGenTy UntiedCodeGen;
152 llvm::SwitchInst *UntiedSwitch = nullptr;
153
154 public:
155 UntiedTaskActionTy(bool Tied, const VarDecl *PartIDVar,
156 const RegionCodeGenTy &UntiedCodeGen)
157 : Untied(!Tied), PartIDVar(PartIDVar), UntiedCodeGen(UntiedCodeGen) {}
158 void Enter(CodeGenFunction &CGF) override {
159 if (Untied) {
160 // Emit task switching point.
161 LValue PartIdLVal = CGF.EmitLoadOfPointerLValue(
162 CGF.GetAddrOfLocalVar(PartIDVar),
163 PartIDVar->getType()->castAs<PointerType>());
164 llvm::Value *Res =
165 CGF.EmitLoadOfScalar(PartIdLVal, PartIDVar->getLocation());
166 llvm::BasicBlock *DoneBB = CGF.createBasicBlock(".untied.done.");
167 UntiedSwitch = CGF.Builder.CreateSwitch(Res, DoneBB);
168 CGF.EmitBlock(DoneBB);
170 CGF.EmitBlock(CGF.createBasicBlock(".untied.jmp."));
171 UntiedSwitch->addCase(CGF.Builder.getInt32(0),
172 CGF.Builder.GetInsertBlock());
173 emitUntiedSwitch(CGF);
174 }
175 }
176 void emitUntiedSwitch(CodeGenFunction &CGF) const {
177 if (Untied) {
178 LValue PartIdLVal = CGF.EmitLoadOfPointerLValue(
179 CGF.GetAddrOfLocalVar(PartIDVar),
180 PartIDVar->getType()->castAs<PointerType>());
181 CGF.EmitStoreOfScalar(CGF.Builder.getInt32(UntiedSwitch->getNumCases()),
182 PartIdLVal);
183 UntiedCodeGen(CGF);
184 CodeGenFunction::JumpDest CurPoint =
185 CGF.getJumpDestInCurrentScope(".untied.next.");
187 CGF.EmitBlock(CGF.createBasicBlock(".untied.jmp."));
188 UntiedSwitch->addCase(CGF.Builder.getInt32(UntiedSwitch->getNumCases()),
189 CGF.Builder.GetInsertBlock());
190 CGF.EmitBranchThroughCleanup(CurPoint);
191 CGF.EmitBlock(CurPoint.getBlock());
192 }
193 }
194 unsigned getNumberOfParts() const { return UntiedSwitch->getNumCases(); }
195 };
196 CGOpenMPTaskOutlinedRegionInfo(const CapturedStmt &CS,
197 const VarDecl *ThreadIDVar,
198 const RegionCodeGenTy &CodeGen,
199 OpenMPDirectiveKind Kind, bool HasCancel,
200 const UntiedTaskActionTy &Action)
201 : CGOpenMPRegionInfo(CS, TaskOutlinedRegion, CodeGen, Kind, HasCancel),
202 ThreadIDVar(ThreadIDVar), Action(Action) {
203 assert(ThreadIDVar != nullptr && "No ThreadID in OpenMP region.");
204 }
205
206 /// Get a variable or parameter for storing global thread id
207 /// inside OpenMP construct.
208 const VarDecl *getThreadIDVariable() const override { return ThreadIDVar; }
209
210 /// Get an LValue for the current ThreadID variable.
211 LValue getThreadIDVariableLValue(CodeGenFunction &CGF) override;
212
213 /// Get the name of the capture helper.
214 StringRef getHelperName() const override { return ".omp_outlined."; }
215
216 void emitUntiedSwitch(CodeGenFunction &CGF) override {
217 Action.emitUntiedSwitch(CGF);
218 }
219
220 static bool classof(const CGCapturedStmtInfo *Info) {
221 return CGOpenMPRegionInfo::classof(Info) &&
222 cast<CGOpenMPRegionInfo>(Info)->getRegionKind() ==
223 TaskOutlinedRegion;
224 }
225
226private:
227 /// A variable or parameter storing global thread id for OpenMP
228 /// constructs.
229 const VarDecl *ThreadIDVar;
230 /// Action for emitting code for untied tasks.
231 const UntiedTaskActionTy &Action;
232};
233
234/// API for inlined captured statement code generation in OpenMP
235/// constructs.
236class CGOpenMPInlinedRegionInfo : public CGOpenMPRegionInfo {
237public:
238 CGOpenMPInlinedRegionInfo(CodeGenFunction::CGCapturedStmtInfo *OldCSI,
239 const RegionCodeGenTy &CodeGen,
240 OpenMPDirectiveKind Kind, bool HasCancel)
241 : CGOpenMPRegionInfo(InlinedRegion, CodeGen, Kind, HasCancel),
242 OldCSI(OldCSI),
243 OuterRegionInfo(dyn_cast_or_null<CGOpenMPRegionInfo>(OldCSI)) {}
244
245 // Retrieve the value of the context parameter.
246 llvm::Value *getContextValue() const override {
247 if (OuterRegionInfo)
248 return OuterRegionInfo->getContextValue();
249 llvm_unreachable("No context value for inlined OpenMP region");
250 }
251
252 void setContextValue(llvm::Value *V) override {
253 if (OuterRegionInfo) {
254 OuterRegionInfo->setContextValue(V);
255 return;
256 }
257 llvm_unreachable("No context value for inlined OpenMP region");
258 }
259
260 /// Lookup the captured field decl for a variable.
261 const FieldDecl *lookup(const VarDecl *VD) const override {
262 if (OuterRegionInfo)
263 return OuterRegionInfo->lookup(VD);
264 // If there is no outer outlined region,no need to lookup in a list of
265 // captured variables, we can use the original one.
266 return nullptr;
267 }
268
269 FieldDecl *getThisFieldDecl() const override {
270 if (OuterRegionInfo)
271 return OuterRegionInfo->getThisFieldDecl();
272 return nullptr;
273 }
274
275 /// Get a variable or parameter for storing global thread id
276 /// inside OpenMP construct.
277 const VarDecl *getThreadIDVariable() const override {
278 if (OuterRegionInfo)
279 return OuterRegionInfo->getThreadIDVariable();
280 return nullptr;
281 }
282
283 /// Get an LValue for the current ThreadID variable.
284 LValue getThreadIDVariableLValue(CodeGenFunction &CGF) override {
285 if (OuterRegionInfo)
286 return OuterRegionInfo->getThreadIDVariableLValue(CGF);
287 llvm_unreachable("No LValue for inlined OpenMP construct");
288 }
289
290 /// Get the name of the capture helper.
291 StringRef getHelperName() const override {
292 if (auto *OuterRegionInfo = getOldCSI())
293 return OuterRegionInfo->getHelperName();
294 llvm_unreachable("No helper name for inlined OpenMP construct");
295 }
296
297 void emitUntiedSwitch(CodeGenFunction &CGF) override {
298 if (OuterRegionInfo)
299 OuterRegionInfo->emitUntiedSwitch(CGF);
300 }
301
302 CodeGenFunction::CGCapturedStmtInfo *getOldCSI() const { return OldCSI; }
303
304 static bool classof(const CGCapturedStmtInfo *Info) {
305 return CGOpenMPRegionInfo::classof(Info) &&
306 cast<CGOpenMPRegionInfo>(Info)->getRegionKind() == InlinedRegion;
307 }
308
309 ~CGOpenMPInlinedRegionInfo() override = default;
310
311private:
312 /// CodeGen info about outer OpenMP region.
313 CodeGenFunction::CGCapturedStmtInfo *OldCSI;
314 CGOpenMPRegionInfo *OuterRegionInfo;
315};
316
317/// API for captured statement code generation in OpenMP target
318/// constructs. For this captures, implicit parameters are used instead of the
319/// captured fields. The name of the target region has to be unique in a given
320/// application so it is provided by the client, because only the client has
321/// the information to generate that.
322class CGOpenMPTargetRegionInfo final : public CGOpenMPRegionInfo {
323public:
324 CGOpenMPTargetRegionInfo(const CapturedStmt &CS,
325 const RegionCodeGenTy &CodeGen, StringRef HelperName)
326 : CGOpenMPRegionInfo(CS, TargetRegion, CodeGen, OMPD_target,
327 /*HasCancel=*/false),
328 HelperName(HelperName) {}
329
330 /// This is unused for target regions because each starts executing
331 /// with a single thread.
332 const VarDecl *getThreadIDVariable() const override { return nullptr; }
333
334 /// Get the name of the capture helper.
335 StringRef getHelperName() const override { return HelperName; }
336
337 static bool classof(const CGCapturedStmtInfo *Info) {
338 return CGOpenMPRegionInfo::classof(Info) &&
339 cast<CGOpenMPRegionInfo>(Info)->getRegionKind() == TargetRegion;
340 }
341
342private:
343 StringRef HelperName;
344};
345
346static void EmptyCodeGen(CodeGenFunction &, PrePostActionTy &) {
347 llvm_unreachable("No codegen for expressions");
348}
349/// API for generation of expressions captured in a innermost OpenMP
350/// region.
351class CGOpenMPInnerExprInfo final : public CGOpenMPInlinedRegionInfo {
352public:
353 CGOpenMPInnerExprInfo(CodeGenFunction &CGF, const CapturedStmt &CS)
354 : CGOpenMPInlinedRegionInfo(CGF.CapturedStmtInfo, EmptyCodeGen,
355 OMPD_unknown,
356 /*HasCancel=*/false),
357 PrivScope(CGF) {
358 // Make sure the globals captured in the provided statement are local by
359 // using the privatization logic. We assume the same variable is not
360 // captured more than once.
361 for (const auto &C : CS.captures()) {
362 if (!C.capturesVariable() && !C.capturesVariableByCopy())
363 continue;
364
365 const VarDecl *VD = C.getCapturedVar();
366 if (VD->isLocalVarDeclOrParm())
367 continue;
368
369 DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(VD),
370 /*RefersToEnclosingVariableOrCapture=*/false,
372 C.getLocation());
373 PrivScope.addPrivate(VD, CGF.EmitLValue(&DRE).getAddress());
374 }
375 (void)PrivScope.Privatize();
376 }
377
378 /// Lookup the captured field decl for a variable.
379 const FieldDecl *lookup(const VarDecl *VD) const override {
380 if (const FieldDecl *FD = CGOpenMPInlinedRegionInfo::lookup(VD))
381 return FD;
382 return nullptr;
383 }
384
385 /// Emit the captured statement body.
386 void EmitBody(CodeGenFunction &CGF, const Stmt *S) override {
387 llvm_unreachable("No body for expressions");
388 }
389
390 /// Get a variable or parameter for storing global thread id
391 /// inside OpenMP construct.
392 const VarDecl *getThreadIDVariable() const override {
393 llvm_unreachable("No thread id for expressions");
394 }
395
396 /// Get the name of the capture helper.
397 StringRef getHelperName() const override {
398 llvm_unreachable("No helper name for expressions");
399 }
400
401 static bool classof(const CGCapturedStmtInfo *Info) { return false; }
402
403private:
404 /// Private scope to capture global variables.
405 CodeGenFunction::OMPPrivateScope PrivScope;
406};
407
408/// RAII for emitting code of OpenMP constructs.
409class InlinedOpenMPRegionRAII {
410 CodeGenFunction &CGF;
411 llvm::DenseMap<const ValueDecl *, FieldDecl *> LambdaCaptureFields;
412 FieldDecl *LambdaThisCaptureField = nullptr;
413 const CodeGen::CGBlockInfo *BlockInfo = nullptr;
414 bool NoInheritance = false;
415
416public:
417 /// Constructs region for combined constructs.
418 /// \param CodeGen Code generation sequence for combined directives. Includes
419 /// a list of functions used for code generation of implicitly inlined
420 /// regions.
421 InlinedOpenMPRegionRAII(CodeGenFunction &CGF, const RegionCodeGenTy &CodeGen,
422 OpenMPDirectiveKind Kind, bool HasCancel,
423 bool NoInheritance = true)
424 : CGF(CGF), NoInheritance(NoInheritance) {
425 // Start emission for the construct.
426 CGF.CapturedStmtInfo = new CGOpenMPInlinedRegionInfo(
427 CGF.CapturedStmtInfo, CodeGen, Kind, HasCancel);
428 if (NoInheritance) {
429 std::swap(CGF.LambdaCaptureFields, LambdaCaptureFields);
430 LambdaThisCaptureField = CGF.LambdaThisCaptureField;
431 CGF.LambdaThisCaptureField = nullptr;
432 BlockInfo = CGF.BlockInfo;
433 CGF.BlockInfo = nullptr;
434 }
435 }
436
437 ~InlinedOpenMPRegionRAII() {
438 // Restore original CapturedStmtInfo only if we're done with code emission.
439 auto *OldCSI =
440 cast<CGOpenMPInlinedRegionInfo>(CGF.CapturedStmtInfo)->getOldCSI();
441 delete CGF.CapturedStmtInfo;
442 CGF.CapturedStmtInfo = OldCSI;
443 if (NoInheritance) {
444 std::swap(CGF.LambdaCaptureFields, LambdaCaptureFields);
445 CGF.LambdaThisCaptureField = LambdaThisCaptureField;
446 CGF.BlockInfo = BlockInfo;
447 }
448 }
449};
450
451/// Values for bit flags used in the ident_t to describe the fields.
452/// All enumeric elements are named and described in accordance with the code
453/// from https://github.com/llvm/llvm-project/blob/main/openmp/runtime/src/kmp.h
454enum OpenMPLocationFlags : unsigned {
455 /// Use trampoline for internal microtask.
456 OMP_IDENT_IMD = 0x01,
457 /// Use c-style ident structure.
458 OMP_IDENT_KMPC = 0x02,
459 /// Atomic reduction option for kmpc_reduce.
460 OMP_ATOMIC_REDUCE = 0x10,
461 /// Explicit 'barrier' directive.
462 OMP_IDENT_BARRIER_EXPL = 0x20,
463 /// Implicit barrier in code.
464 OMP_IDENT_BARRIER_IMPL = 0x40,
465 /// Implicit barrier in 'for' directive.
466 OMP_IDENT_BARRIER_IMPL_FOR = 0x40,
467 /// Implicit barrier in 'sections' directive.
468 OMP_IDENT_BARRIER_IMPL_SECTIONS = 0xC0,
469 /// Implicit barrier in 'single' directive.
470 OMP_IDENT_BARRIER_IMPL_SINGLE = 0x140,
471 /// Call of __kmp_for_static_init for static loop.
472 OMP_IDENT_WORK_LOOP = 0x200,
473 /// Call of __kmp_for_static_init for sections.
474 OMP_IDENT_WORK_SECTIONS = 0x400,
475 /// Call of __kmp_for_static_init for distribute.
476 OMP_IDENT_WORK_DISTRIBUTE = 0x800,
477 LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/OMP_IDENT_WORK_DISTRIBUTE)
478};
479
480/// Describes ident structure that describes a source location.
481/// All descriptions are taken from
482/// https://github.com/llvm/llvm-project/blob/main/openmp/runtime/src/kmp.h
483/// Original structure:
484/// typedef struct ident {
485/// kmp_int32 reserved_1; /**< might be used in Fortran;
486/// see above */
487/// kmp_int32 flags; /**< also f.flags; KMP_IDENT_xxx flags;
488/// KMP_IDENT_KMPC identifies this union
489/// member */
490/// kmp_int32 reserved_2; /**< not really used in Fortran any more;
491/// see above */
492///#if USE_ITT_BUILD
493/// /* but currently used for storing
494/// region-specific ITT */
495/// /* contextual information. */
496///#endif /* USE_ITT_BUILD */
497/// kmp_int32 reserved_3; /**< source[4] in Fortran, do not use for
498/// C++ */
499/// char const *psource; /**< String describing the source location.
500/// The string is composed of semi-colon separated
501// fields which describe the source file,
502/// the function and a pair of line numbers that
503/// delimit the construct.
504/// */
505/// } ident_t;
506enum IdentFieldIndex {
507 /// might be used in Fortran
508 IdentField_Reserved_1,
509 /// OMP_IDENT_xxx flags; OMP_IDENT_KMPC identifies this union member.
510 IdentField_Flags,
511 /// Not really used in Fortran any more
512 IdentField_Reserved_2,
513 /// Source[4] in Fortran, do not use for C++
514 IdentField_Reserved_3,
515 /// String describing the source location. The string is composed of
516 /// semi-colon separated fields which describe the source file, the function
517 /// and a pair of line numbers that delimit the construct.
518 IdentField_PSource
519};
520
521/// Schedule types for 'omp for' loops (these enumerators are taken from
522/// the enum sched_type in kmp.h).
523enum OpenMPSchedType {
524 /// Lower bound for default (unordered) versions.
525 OMP_sch_lower = 32,
526 OMP_sch_static_chunked = 33,
527 OMP_sch_static = 34,
528 OMP_sch_dynamic_chunked = 35,
529 OMP_sch_guided_chunked = 36,
530 OMP_sch_runtime = 37,
531 OMP_sch_auto = 38,
532 /// static with chunk adjustment (e.g., simd)
533 OMP_sch_static_balanced_chunked = 45,
534 /// Lower bound for 'ordered' versions.
535 OMP_ord_lower = 64,
536 OMP_ord_static_chunked = 65,
537 OMP_ord_static = 66,
538 OMP_ord_dynamic_chunked = 67,
539 OMP_ord_guided_chunked = 68,
540 OMP_ord_runtime = 69,
541 OMP_ord_auto = 70,
542 OMP_sch_default = OMP_sch_static,
543 /// dist_schedule types
544 OMP_dist_sch_static_chunked = 91,
545 OMP_dist_sch_static = 92,
546 /// Support for OpenMP 4.5 monotonic and nonmonotonic schedule modifiers.
547 /// Set if the monotonic schedule modifier was present.
548 OMP_sch_modifier_monotonic = (1 << 29),
549 /// Set if the nonmonotonic schedule modifier was present.
550 OMP_sch_modifier_nonmonotonic = (1 << 30),
551};
552
553/// A basic class for pre|post-action for advanced codegen sequence for OpenMP
554/// region.
555class CleanupTy final : public EHScopeStack::Cleanup {
556 PrePostActionTy *Action;
557
558public:
559 explicit CleanupTy(PrePostActionTy *Action) : Action(Action) {}
560 void Emit(CodeGenFunction &CGF, Flags /*flags*/) override {
561 if (!CGF.HaveInsertPoint())
562 return;
563 Action->Exit(CGF);
564 }
565};
566
567} // anonymous namespace
568
571 if (PrePostAction) {
572 CGF.EHStack.pushCleanup<CleanupTy>(NormalAndEHCleanup, PrePostAction);
573 Callback(CodeGen, CGF, *PrePostAction);
574 } else {
575 PrePostActionTy Action;
576 Callback(CodeGen, CGF, Action);
577 }
578}
579
580/// Check if the combiner is a call to UDR combiner and if it is so return the
581/// UDR decl used for reduction.
582static const OMPDeclareReductionDecl *
583getReductionInit(const Expr *ReductionOp) {
584 if (const auto *CE = dyn_cast<CallExpr>(ReductionOp))
585 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(CE->getCallee()))
586 if (const auto *DRE =
587 dyn_cast<DeclRefExpr>(OVE->getSourceExpr()->IgnoreImpCasts()))
588 if (const auto *DRD = dyn_cast<OMPDeclareReductionDecl>(DRE->getDecl()))
589 return DRD;
590 return nullptr;
591}
592
594 const OMPDeclareReductionDecl *DRD,
595 const Expr *InitOp,
596 Address Private, Address Original,
597 QualType Ty) {
598 if (DRD->getInitializer()) {
599 std::pair<llvm::Function *, llvm::Function *> Reduction =
601 const auto *CE = cast<CallExpr>(InitOp);
602 const auto *OVE = cast<OpaqueValueExpr>(CE->getCallee());
603 const Expr *LHS = CE->getArg(/*Arg=*/0)->IgnoreParenImpCasts();
604 const Expr *RHS = CE->getArg(/*Arg=*/1)->IgnoreParenImpCasts();
605 const auto *LHSDRE =
606 cast<DeclRefExpr>(cast<UnaryOperator>(LHS)->getSubExpr());
607 const auto *RHSDRE =
608 cast<DeclRefExpr>(cast<UnaryOperator>(RHS)->getSubExpr());
609 CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
610 PrivateScope.addPrivate(cast<VarDecl>(LHSDRE->getDecl()), Private);
611 PrivateScope.addPrivate(cast<VarDecl>(RHSDRE->getDecl()), Original);
612 (void)PrivateScope.Privatize();
614 CodeGenFunction::OpaqueValueMapping Map(CGF, OVE, Func);
615 CGF.EmitIgnoredExpr(InitOp);
616 } else {
617 llvm::Constant *Init = CGF.CGM.EmitNullConstant(Ty);
618 std::string Name = CGF.CGM.getOpenMPRuntime().getName({"init"});
619 auto *GV = new llvm::GlobalVariable(
620 CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true,
621 llvm::GlobalValue::PrivateLinkage, Init, Name);
622 LValue LV = CGF.MakeNaturalAlignRawAddrLValue(GV, Ty);
623 RValue InitRVal;
624 switch (CGF.getEvaluationKind(Ty)) {
625 case TEK_Scalar:
626 InitRVal = CGF.EmitLoadOfLValue(LV, DRD->getLocation());
627 break;
628 case TEK_Complex:
629 InitRVal =
631 break;
632 case TEK_Aggregate: {
633 OpaqueValueExpr OVE(DRD->getLocation(), Ty, VK_LValue);
634 CodeGenFunction::OpaqueValueMapping OpaqueMap(CGF, &OVE, LV);
635 CGF.EmitAnyExprToMem(&OVE, Private, Ty.getQualifiers(),
636 /*IsInitializer=*/false);
637 return;
638 }
639 }
640 OpaqueValueExpr OVE(DRD->getLocation(), Ty, VK_PRValue);
641 CodeGenFunction::OpaqueValueMapping OpaqueMap(CGF, &OVE, InitRVal);
642 CGF.EmitAnyExprToMem(&OVE, Private, Ty.getQualifiers(),
643 /*IsInitializer=*/false);
644 }
645}
646
647/// Emit initialization of arrays of complex types.
648/// \param DestAddr Address of the array.
649/// \param Type Type of array.
650/// \param Init Initial expression of array.
651/// \param SrcAddr Address of the original array.
653 QualType Type, bool EmitDeclareReductionInit,
654 const Expr *Init,
655 const OMPDeclareReductionDecl *DRD,
656 Address SrcAddr = Address::invalid()) {
657 // Perform element-by-element initialization.
658 QualType ElementTy;
659
660 // Drill down to the base element type on both arrays.
661 const ArrayType *ArrayTy = Type->getAsArrayTypeUnsafe();
662 llvm::Value *NumElements = CGF.emitArrayLength(ArrayTy, ElementTy, DestAddr);
663 if (DRD)
664 SrcAddr = SrcAddr.withElementType(DestAddr.getElementType());
665
666 llvm::Value *SrcBegin = nullptr;
667 if (DRD)
668 SrcBegin = SrcAddr.emitRawPointer(CGF);
669 llvm::Value *DestBegin = DestAddr.emitRawPointer(CGF);
670 // Cast from pointer to array type to pointer to single element.
671 llvm::Value *DestEnd =
672 CGF.Builder.CreateGEP(DestAddr.getElementType(), DestBegin, NumElements);
673 // The basic structure here is a while-do loop.
674 llvm::BasicBlock *BodyBB = CGF.createBasicBlock("omp.arrayinit.body");
675 llvm::BasicBlock *DoneBB = CGF.createBasicBlock("omp.arrayinit.done");
676 llvm::Value *IsEmpty =
677 CGF.Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arrayinit.isempty");
678 CGF.Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
679
680 // Enter the loop body, making that address the current address.
681 llvm::BasicBlock *EntryBB = CGF.Builder.GetInsertBlock();
682 CGF.EmitBlock(BodyBB);
683
684 CharUnits ElementSize = CGF.getContext().getTypeSizeInChars(ElementTy);
685
686 llvm::PHINode *SrcElementPHI = nullptr;
687 Address SrcElementCurrent = Address::invalid();
688 if (DRD) {
689 SrcElementPHI = CGF.Builder.CreatePHI(SrcBegin->getType(), 2,
690 "omp.arraycpy.srcElementPast");
691 SrcElementPHI->addIncoming(SrcBegin, EntryBB);
692 SrcElementCurrent =
693 Address(SrcElementPHI, SrcAddr.getElementType(),
694 SrcAddr.getAlignment().alignmentOfArrayElement(ElementSize));
695 }
696 llvm::PHINode *DestElementPHI = CGF.Builder.CreatePHI(
697 DestBegin->getType(), 2, "omp.arraycpy.destElementPast");
698 DestElementPHI->addIncoming(DestBegin, EntryBB);
699 Address DestElementCurrent =
700 Address(DestElementPHI, DestAddr.getElementType(),
701 DestAddr.getAlignment().alignmentOfArrayElement(ElementSize));
702
703 // Emit copy.
704 {
705 CodeGenFunction::RunCleanupsScope InitScope(CGF);
706 if (EmitDeclareReductionInit) {
707 emitInitWithReductionInitializer(CGF, DRD, Init, DestElementCurrent,
708 SrcElementCurrent, ElementTy);
709 } else
710 CGF.EmitAnyExprToMem(Init, DestElementCurrent, ElementTy.getQualifiers(),
711 /*IsInitializer=*/false);
712 }
713
714 if (DRD) {
715 // Shift the address forward by one element.
716 llvm::Value *SrcElementNext = CGF.Builder.CreateConstGEP1_32(
717 SrcAddr.getElementType(), SrcElementPHI, /*Idx0=*/1,
718 "omp.arraycpy.dest.element");
719 SrcElementPHI->addIncoming(SrcElementNext, CGF.Builder.GetInsertBlock());
720 }
721
722 // Shift the address forward by one element.
723 llvm::Value *DestElementNext = CGF.Builder.CreateConstGEP1_32(
724 DestAddr.getElementType(), DestElementPHI, /*Idx0=*/1,
725 "omp.arraycpy.dest.element");
726 // Check whether we've reached the end.
727 llvm::Value *Done =
728 CGF.Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done");
729 CGF.Builder.CreateCondBr(Done, DoneBB, BodyBB);
730 DestElementPHI->addIncoming(DestElementNext, CGF.Builder.GetInsertBlock());
731
732 // Done.
733 CGF.EmitBlock(DoneBB, /*IsFinished=*/true);
734}
735
736LValue ReductionCodeGen::emitSharedLValue(CodeGenFunction &CGF, const Expr *E) {
737 return CGF.EmitOMPSharedLValue(E);
738}
739
740LValue ReductionCodeGen::emitSharedLValueUB(CodeGenFunction &CGF,
741 const Expr *E) {
742 if (const auto *OASE = dyn_cast<ArraySectionExpr>(E))
743 return CGF.EmitArraySectionExpr(OASE, /*IsLowerBound=*/false);
744 return LValue();
745}
746
747void ReductionCodeGen::emitAggregateInitialization(
748 CodeGenFunction &CGF, unsigned N, Address PrivateAddr, Address SharedAddr,
749 const OMPDeclareReductionDecl *DRD) {
750 // Emit VarDecl with copy init for arrays.
751 // Get the address of the original variable captured in current
752 // captured region.
753 const auto *PrivateVD =
754 cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
755 bool EmitDeclareReductionInit =
756 DRD && (DRD->getInitializer() || !PrivateVD->hasInit());
757 EmitOMPAggregateInit(CGF, PrivateAddr, PrivateVD->getType(),
758 EmitDeclareReductionInit,
759 EmitDeclareReductionInit ? ClausesData[N].ReductionOp
760 : PrivateVD->getInit(),
761 DRD, SharedAddr);
762}
763
766 ArrayRef<const Expr *> Privates,
767 ArrayRef<const Expr *> ReductionOps) {
768 ClausesData.reserve(Shareds.size());
769 SharedAddresses.reserve(Shareds.size());
770 Sizes.reserve(Shareds.size());
771 BaseDecls.reserve(Shareds.size());
772 const auto *IOrig = Origs.begin();
773 const auto *IPriv = Privates.begin();
774 const auto *IRed = ReductionOps.begin();
775 for (const Expr *Ref : Shareds) {
776 ClausesData.emplace_back(Ref, *IOrig, *IPriv, *IRed);
777 std::advance(IOrig, 1);
778 std::advance(IPriv, 1);
779 std::advance(IRed, 1);
780 }
781}
782
784 assert(SharedAddresses.size() == N && OrigAddresses.size() == N &&
785 "Number of generated lvalues must be exactly N.");
786 LValue First = emitSharedLValue(CGF, ClausesData[N].Shared);
787 LValue Second = emitSharedLValueUB(CGF, ClausesData[N].Shared);
788 SharedAddresses.emplace_back(First, Second);
789 if (ClausesData[N].Shared == ClausesData[N].Ref) {
790 OrigAddresses.emplace_back(First, Second);
791 } else {
792 LValue First = emitSharedLValue(CGF, ClausesData[N].Ref);
793 LValue Second = emitSharedLValueUB(CGF, ClausesData[N].Ref);
794 OrigAddresses.emplace_back(First, Second);
795 }
796}
797
799 QualType PrivateType = getPrivateType(N);
800 bool AsArraySection = isa<ArraySectionExpr>(ClausesData[N].Ref);
801 if (!PrivateType->isVariablyModifiedType()) {
802 Sizes.emplace_back(
803 CGF.getTypeSize(OrigAddresses[N].first.getType().getNonReferenceType()),
804 nullptr);
805 return;
806 }
807 llvm::Value *Size;
808 llvm::Value *SizeInChars;
809 auto *ElemType = OrigAddresses[N].first.getAddress().getElementType();
810 auto *ElemSizeOf = llvm::ConstantExpr::getSizeOf(ElemType);
811 if (AsArraySection) {
812 Size = CGF.Builder.CreatePtrDiff(ElemType,
813 OrigAddresses[N].second.getPointer(CGF),
814 OrigAddresses[N].first.getPointer(CGF));
815 Size = CGF.Builder.CreateNUWAdd(
816 Size, llvm::ConstantInt::get(Size->getType(), /*V=*/1));
817 SizeInChars = CGF.Builder.CreateNUWMul(Size, ElemSizeOf);
818 } else {
819 SizeInChars =
820 CGF.getTypeSize(OrigAddresses[N].first.getType().getNonReferenceType());
821 Size = CGF.Builder.CreateExactUDiv(SizeInChars, ElemSizeOf);
822 }
823 Sizes.emplace_back(SizeInChars, Size);
825 CGF,
826 cast<OpaqueValueExpr>(
827 CGF.getContext().getAsVariableArrayType(PrivateType)->getSizeExpr()),
828 RValue::get(Size));
829 CGF.EmitVariablyModifiedType(PrivateType);
830}
831
833 llvm::Value *Size) {
834 QualType PrivateType = getPrivateType(N);
835 if (!PrivateType->isVariablyModifiedType()) {
836 assert(!Size && !Sizes[N].second &&
837 "Size should be nullptr for non-variably modified reduction "
838 "items.");
839 return;
840 }
842 CGF,
843 cast<OpaqueValueExpr>(
844 CGF.getContext().getAsVariableArrayType(PrivateType)->getSizeExpr()),
845 RValue::get(Size));
846 CGF.EmitVariablyModifiedType(PrivateType);
847}
848
850 CodeGenFunction &CGF, unsigned N, Address PrivateAddr, Address SharedAddr,
851 llvm::function_ref<bool(CodeGenFunction &)> DefaultInit) {
852 assert(SharedAddresses.size() > N && "No variable was generated");
853 const auto *PrivateVD =
854 cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
855 const OMPDeclareReductionDecl *DRD =
856 getReductionInit(ClausesData[N].ReductionOp);
857 if (CGF.getContext().getAsArrayType(PrivateVD->getType())) {
858 if (DRD && DRD->getInitializer())
859 (void)DefaultInit(CGF);
860 emitAggregateInitialization(CGF, N, PrivateAddr, SharedAddr, DRD);
861 } else if (DRD && (DRD->getInitializer() || !PrivateVD->hasInit())) {
862 (void)DefaultInit(CGF);
863 QualType SharedType = SharedAddresses[N].first.getType();
864 emitInitWithReductionInitializer(CGF, DRD, ClausesData[N].ReductionOp,
865 PrivateAddr, SharedAddr, SharedType);
866 } else if (!DefaultInit(CGF) && PrivateVD->hasInit() &&
867 !CGF.isTrivialInitializer(PrivateVD->getInit())) {
868 CGF.EmitAnyExprToMem(PrivateVD->getInit(), PrivateAddr,
869 PrivateVD->getType().getQualifiers(),
870 /*IsInitializer=*/false);
871 }
872}
873
875 QualType PrivateType = getPrivateType(N);
876 QualType::DestructionKind DTorKind = PrivateType.isDestructedType();
877 return DTorKind != QualType::DK_none;
878}
879
881 Address PrivateAddr) {
882 QualType PrivateType = getPrivateType(N);
883 QualType::DestructionKind DTorKind = PrivateType.isDestructedType();
884 if (needCleanups(N)) {
885 PrivateAddr =
886 PrivateAddr.withElementType(CGF.ConvertTypeForMem(PrivateType));
887 CGF.pushDestroy(DTorKind, PrivateAddr, PrivateType);
888 }
889}
890
892 LValue BaseLV) {
893 BaseTy = BaseTy.getNonReferenceType();
894 while ((BaseTy->isPointerType() || BaseTy->isReferenceType()) &&
895 !CGF.getContext().hasSameType(BaseTy, ElTy)) {
896 if (const auto *PtrTy = BaseTy->getAs<PointerType>()) {
897 BaseLV = CGF.EmitLoadOfPointerLValue(BaseLV.getAddress(), PtrTy);
898 } else {
899 LValue RefLVal = CGF.MakeAddrLValue(BaseLV.getAddress(), BaseTy);
900 BaseLV = CGF.EmitLoadOfReferenceLValue(RefLVal);
901 }
902 BaseTy = BaseTy->getPointeeType();
903 }
904 return CGF.MakeAddrLValue(
905 BaseLV.getAddress().withElementType(CGF.ConvertTypeForMem(ElTy)),
906 BaseLV.getType(), BaseLV.getBaseInfo(),
907 CGF.CGM.getTBAAInfoForSubobject(BaseLV, BaseLV.getType()));
908}
909
911 Address OriginalBaseAddress, llvm::Value *Addr) {
913 Address TopTmp = Address::invalid();
914 Address MostTopTmp = Address::invalid();
915 BaseTy = BaseTy.getNonReferenceType();
916 while ((BaseTy->isPointerType() || BaseTy->isReferenceType()) &&
917 !CGF.getContext().hasSameType(BaseTy, ElTy)) {
918 Tmp = CGF.CreateMemTemp(BaseTy);
919 if (TopTmp.isValid())
920 CGF.Builder.CreateStore(Tmp.getPointer(), TopTmp);
921 else
922 MostTopTmp = Tmp;
923 TopTmp = Tmp;
924 BaseTy = BaseTy->getPointeeType();
925 }
926
927 if (Tmp.isValid()) {
929 Addr, Tmp.getElementType());
930 CGF.Builder.CreateStore(Addr, Tmp);
931 return MostTopTmp;
932 }
933
935 Addr, OriginalBaseAddress.getType());
936 return OriginalBaseAddress.withPointer(Addr, NotKnownNonNull);
937}
938
939static const VarDecl *getBaseDecl(const Expr *Ref, const DeclRefExpr *&DE) {
940 const VarDecl *OrigVD = nullptr;
941 if (const auto *OASE = dyn_cast<ArraySectionExpr>(Ref)) {
942 const Expr *Base = OASE->getBase()->IgnoreParenImpCasts();
943 while (const auto *TempOASE = dyn_cast<ArraySectionExpr>(Base))
944 Base = TempOASE->getBase()->IgnoreParenImpCasts();
945 while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
946 Base = TempASE->getBase()->IgnoreParenImpCasts();
947 DE = cast<DeclRefExpr>(Base);
948 OrigVD = cast<VarDecl>(DE->getDecl());
949 } else if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Ref)) {
950 const Expr *Base = ASE->getBase()->IgnoreParenImpCasts();
951 while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
952 Base = TempASE->getBase()->IgnoreParenImpCasts();
953 DE = cast<DeclRefExpr>(Base);
954 OrigVD = cast<VarDecl>(DE->getDecl());
955 }
956 return OrigVD;
957}
958
960 Address PrivateAddr) {
961 const DeclRefExpr *DE;
962 if (const VarDecl *OrigVD = ::getBaseDecl(ClausesData[N].Ref, DE)) {
963 BaseDecls.emplace_back(OrigVD);
964 LValue OriginalBaseLValue = CGF.EmitLValue(DE);
965 LValue BaseLValue =
966 loadToBegin(CGF, OrigVD->getType(), SharedAddresses[N].first.getType(),
967 OriginalBaseLValue);
968 Address SharedAddr = SharedAddresses[N].first.getAddress();
969 llvm::Value *Adjustment = CGF.Builder.CreatePtrDiff(
970 SharedAddr.getElementType(), BaseLValue.getPointer(CGF),
971 SharedAddr.emitRawPointer(CGF));
972 llvm::Value *PrivatePointer =
974 PrivateAddr.emitRawPointer(CGF), SharedAddr.getType());
975 llvm::Value *Ptr = CGF.Builder.CreateGEP(
976 SharedAddr.getElementType(), PrivatePointer, Adjustment);
977 return castToBase(CGF, OrigVD->getType(),
978 SharedAddresses[N].first.getType(),
979 OriginalBaseLValue.getAddress(), Ptr);
980 }
981 BaseDecls.emplace_back(
982 cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Ref)->getDecl()));
983 return PrivateAddr;
984}
985
987 const OMPDeclareReductionDecl *DRD =
988 getReductionInit(ClausesData[N].ReductionOp);
989 return DRD && DRD->getInitializer();
990}
991
992LValue CGOpenMPRegionInfo::getThreadIDVariableLValue(CodeGenFunction &CGF) {
993 return CGF.EmitLoadOfPointerLValue(
994 CGF.GetAddrOfLocalVar(getThreadIDVariable()),
995 getThreadIDVariable()->getType()->castAs<PointerType>());
996}
997
998void CGOpenMPRegionInfo::EmitBody(CodeGenFunction &CGF, const Stmt *S) {
999 if (!CGF.HaveInsertPoint())
1000 return;
1001 // 1.2.2 OpenMP Language Terminology
1002 // Structured block - An executable statement with a single entry at the
1003 // top and a single exit at the bottom.
1004 // The point of exit cannot be a branch out of the structured block.
1005 // longjmp() and throw() must not violate the entry/exit criteria.
1006 CGF.EHStack.pushTerminate();
1007 if (S)
1009 CodeGen(CGF);
1010 CGF.EHStack.popTerminate();
1011}
1012
1013LValue CGOpenMPTaskOutlinedRegionInfo::getThreadIDVariableLValue(
1014 CodeGenFunction &CGF) {
1015 return CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(getThreadIDVariable()),
1016 getThreadIDVariable()->getType(),
1018}
1019
1021 QualType FieldTy) {
1022 auto *Field = FieldDecl::Create(
1023 C, DC, SourceLocation(), SourceLocation(), /*Id=*/nullptr, FieldTy,
1024 C.getTrivialTypeSourceInfo(FieldTy, SourceLocation()),
1025 /*BW=*/nullptr, /*Mutable=*/false, /*InitStyle=*/ICIS_NoInit);
1026 Field->setAccess(AS_public);
1027 DC->addDecl(Field);
1028 return Field;
1029}
1030
1032 : CGM(CGM), OMPBuilder(CGM.getModule()) {
1033 KmpCriticalNameTy = llvm::ArrayType::get(CGM.Int32Ty, /*NumElements*/ 8);
1034 llvm::OpenMPIRBuilderConfig Config(
1035 CGM.getLangOpts().OpenMPIsTargetDevice, isGPU(),
1036 CGM.getLangOpts().OpenMPOffloadMandatory,
1037 /*HasRequiresReverseOffload*/ false, /*HasRequiresUnifiedAddress*/ false,
1038 hasRequiresUnifiedSharedMemory(), /*HasRequiresDynamicAllocators*/ false);
1039 OMPBuilder.initialize();
1040 OMPBuilder.loadOffloadInfoMetadata(CGM.getLangOpts().OpenMPIsTargetDevice
1042 : StringRef{});
1043 OMPBuilder.setConfig(Config);
1044
1045 // The user forces the compiler to behave as if omp requires
1046 // unified_shared_memory was given.
1047 if (CGM.getLangOpts().OpenMPForceUSM) {
1049 OMPBuilder.Config.setHasRequiresUnifiedSharedMemory(true);
1050 }
1051}
1052
1054 InternalVars.clear();
1055 // Clean non-target variable declarations possibly used only in debug info.
1056 for (const auto &Data : EmittedNonTargetVariables) {
1057 if (!Data.getValue().pointsToAliveValue())
1058 continue;
1059 auto *GV = dyn_cast<llvm::GlobalVariable>(Data.getValue());
1060 if (!GV)
1061 continue;
1062 if (!GV->isDeclaration() || GV->getNumUses() > 0)
1063 continue;
1064 GV->eraseFromParent();
1065 }
1066}
1067
1069 return OMPBuilder.createPlatformSpecificName(Parts);
1070}
1071
1072static llvm::Function *
1074 const Expr *CombinerInitializer, const VarDecl *In,
1075 const VarDecl *Out, bool IsCombiner) {
1076 // void .omp_combiner.(Ty *in, Ty *out);
1077 ASTContext &C = CGM.getContext();
1078 QualType PtrTy = C.getPointerType(Ty).withRestrict();
1079 FunctionArgList Args;
1080 ImplicitParamDecl OmpOutParm(C, /*DC=*/nullptr, Out->getLocation(),
1081 /*Id=*/nullptr, PtrTy, ImplicitParamKind::Other);
1082 ImplicitParamDecl OmpInParm(C, /*DC=*/nullptr, In->getLocation(),
1083 /*Id=*/nullptr, PtrTy, ImplicitParamKind::Other);
1084 Args.push_back(&OmpOutParm);
1085 Args.push_back(&OmpInParm);
1086 const CGFunctionInfo &FnInfo =
1087 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
1088 llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
1089 std::string Name = CGM.getOpenMPRuntime().getName(
1090 {IsCombiner ? "omp_combiner" : "omp_initializer", ""});
1091 auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage,
1092 Name, &CGM.getModule());
1093 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FnInfo);
1094 if (CGM.getLangOpts().Optimize) {
1095 Fn->removeFnAttr(llvm::Attribute::NoInline);
1096 Fn->removeFnAttr(llvm::Attribute::OptimizeNone);
1097 Fn->addFnAttr(llvm::Attribute::AlwaysInline);
1098 }
1099 CodeGenFunction CGF(CGM);
1100 // Map "T omp_in;" variable to "*omp_in_parm" value in all expressions.
1101 // Map "T omp_out;" variable to "*omp_out_parm" value in all expressions.
1102 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, In->getLocation(),
1103 Out->getLocation());
1104 CodeGenFunction::OMPPrivateScope Scope(CGF);
1105 Address AddrIn = CGF.GetAddrOfLocalVar(&OmpInParm);
1106 Scope.addPrivate(
1107 In, CGF.EmitLoadOfPointerLValue(AddrIn, PtrTy->castAs<PointerType>())
1108 .getAddress());
1109 Address AddrOut = CGF.GetAddrOfLocalVar(&OmpOutParm);
1110 Scope.addPrivate(
1111 Out, CGF.EmitLoadOfPointerLValue(AddrOut, PtrTy->castAs<PointerType>())
1112 .getAddress());
1113 (void)Scope.Privatize();
1114 if (!IsCombiner && Out->hasInit() &&
1115 !CGF.isTrivialInitializer(Out->getInit())) {
1116 CGF.EmitAnyExprToMem(Out->getInit(), CGF.GetAddrOfLocalVar(Out),
1117 Out->getType().getQualifiers(),
1118 /*IsInitializer=*/true);
1119 }
1120 if (CombinerInitializer)
1121 CGF.EmitIgnoredExpr(CombinerInitializer);
1122 Scope.ForceCleanup();
1123 CGF.FinishFunction();
1124 return Fn;
1125}
1126
1129 if (UDRMap.count(D) > 0)
1130 return;
1131 llvm::Function *Combiner = emitCombinerOrInitializer(
1132 CGM, D->getType(), D->getCombiner(),
1133 cast<VarDecl>(cast<DeclRefExpr>(D->getCombinerIn())->getDecl()),
1134 cast<VarDecl>(cast<DeclRefExpr>(D->getCombinerOut())->getDecl()),
1135 /*IsCombiner=*/true);
1136 llvm::Function *Initializer = nullptr;
1137 if (const Expr *Init = D->getInitializer()) {
1139 CGM, D->getType(),
1140 D->getInitializerKind() == OMPDeclareReductionInitKind::Call ? Init
1141 : nullptr,
1142 cast<VarDecl>(cast<DeclRefExpr>(D->getInitOrig())->getDecl()),
1143 cast<VarDecl>(cast<DeclRefExpr>(D->getInitPriv())->getDecl()),
1144 /*IsCombiner=*/false);
1145 }
1146 UDRMap.try_emplace(D, Combiner, Initializer);
1147 if (CGF)
1148 FunctionUDRMap[CGF->CurFn].push_back(D);
1149}
1150
1151std::pair<llvm::Function *, llvm::Function *>
1153 auto I = UDRMap.find(D);
1154 if (I != UDRMap.end())
1155 return I->second;
1156 emitUserDefinedReduction(/*CGF=*/nullptr, D);
1157 return UDRMap.lookup(D);
1158}
1159
1160namespace {
1161// Temporary RAII solution to perform a push/pop stack event on the OpenMP IR
1162// Builder if one is present.
1163struct PushAndPopStackRAII {
1164 PushAndPopStackRAII(llvm::OpenMPIRBuilder *OMPBuilder, CodeGenFunction &CGF,
1165 bool HasCancel, llvm::omp::Directive Kind)
1166 : OMPBuilder(OMPBuilder) {
1167 if (!OMPBuilder)
1168 return;
1169
1170 // The following callback is the crucial part of clangs cleanup process.
1171 //
1172 // NOTE:
1173 // Once the OpenMPIRBuilder is used to create parallel regions (and
1174 // similar), the cancellation destination (Dest below) is determined via
1175 // IP. That means if we have variables to finalize we split the block at IP,
1176 // use the new block (=BB) as destination to build a JumpDest (via
1177 // getJumpDestInCurrentScope(BB)) which then is fed to
1178 // EmitBranchThroughCleanup. Furthermore, there will not be the need
1179 // to push & pop an FinalizationInfo object.
1180 // The FiniCB will still be needed but at the point where the
1181 // OpenMPIRBuilder is asked to construct a parallel (or similar) construct.
1182 auto FiniCB = [&CGF](llvm::OpenMPIRBuilder::InsertPointTy IP) {
1183 assert(IP.getBlock()->end() == IP.getPoint() &&
1184 "Clang CG should cause non-terminated block!");
1185 CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
1186 CGF.Builder.restoreIP(IP);
1188 CGF.getOMPCancelDestination(OMPD_parallel);
1189 CGF.EmitBranchThroughCleanup(Dest);
1190 return llvm::Error::success();
1191 };
1192
1193 // TODO: Remove this once we emit parallel regions through the
1194 // OpenMPIRBuilder as it can do this setup internally.
1195 llvm::OpenMPIRBuilder::FinalizationInfo FI({FiniCB, Kind, HasCancel});
1196 OMPBuilder->pushFinalizationCB(std::move(FI));
1197 }
1198 ~PushAndPopStackRAII() {
1199 if (OMPBuilder)
1200 OMPBuilder->popFinalizationCB();
1201 }
1202 llvm::OpenMPIRBuilder *OMPBuilder;
1203};
1204} // namespace
1205
1207 CodeGenModule &CGM, const OMPExecutableDirective &D, const CapturedStmt *CS,
1208 const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind,
1209 const StringRef OutlinedHelperName, const RegionCodeGenTy &CodeGen) {
1210 assert(ThreadIDVar->getType()->isPointerType() &&
1211 "thread id variable must be of type kmp_int32 *");
1212 CodeGenFunction CGF(CGM, true);
1213 bool HasCancel = false;
1214 if (const auto *OPD = dyn_cast<OMPParallelDirective>(&D))
1215 HasCancel = OPD->hasCancel();
1216 else if (const auto *OPD = dyn_cast<OMPTargetParallelDirective>(&D))
1217 HasCancel = OPD->hasCancel();
1218 else if (const auto *OPSD = dyn_cast<OMPParallelSectionsDirective>(&D))
1219 HasCancel = OPSD->hasCancel();
1220 else if (const auto *OPFD = dyn_cast<OMPParallelForDirective>(&D))
1221 HasCancel = OPFD->hasCancel();
1222 else if (const auto *OPFD = dyn_cast<OMPTargetParallelForDirective>(&D))
1223 HasCancel = OPFD->hasCancel();
1224 else if (const auto *OPFD = dyn_cast<OMPDistributeParallelForDirective>(&D))
1225 HasCancel = OPFD->hasCancel();
1226 else if (const auto *OPFD =
1227 dyn_cast<OMPTeamsDistributeParallelForDirective>(&D))
1228 HasCancel = OPFD->hasCancel();
1229 else if (const auto *OPFD =
1230 dyn_cast<OMPTargetTeamsDistributeParallelForDirective>(&D))
1231 HasCancel = OPFD->hasCancel();
1232
1233 // TODO: Temporarily inform the OpenMPIRBuilder, if any, about the new
1234 // parallel region to make cancellation barriers work properly.
1235 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
1236 PushAndPopStackRAII PSR(&OMPBuilder, CGF, HasCancel, InnermostKind);
1237 CGOpenMPOutlinedRegionInfo CGInfo(*CS, ThreadIDVar, CodeGen, InnermostKind,
1238 HasCancel, OutlinedHelperName);
1239 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
1241}
1242
1243std::string CGOpenMPRuntime::getOutlinedHelperName(StringRef Name) const {
1244 std::string Suffix = getName({"omp_outlined"});
1245 return (Name + Suffix).str();
1246}
1247
1249 return getOutlinedHelperName(CGF.CurFn->getName());
1250}
1251
1252std::string CGOpenMPRuntime::getReductionFuncName(StringRef Name) const {
1253 std::string Suffix = getName({"omp", "reduction", "reduction_func"});
1254 return (Name + Suffix).str();
1255}
1256
1259 const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind,
1260 const RegionCodeGenTy &CodeGen) {
1261 const CapturedStmt *CS = D.getCapturedStmt(OMPD_parallel);
1263 CGM, D, CS, ThreadIDVar, InnermostKind, getOutlinedHelperName(CGF),
1264 CodeGen);
1265}
1266
1269 const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind,
1270 const RegionCodeGenTy &CodeGen) {
1271 const CapturedStmt *CS = D.getCapturedStmt(OMPD_teams);
1273 CGM, D, CS, ThreadIDVar, InnermostKind, getOutlinedHelperName(CGF),
1274 CodeGen);
1275}
1276
1278 const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
1279 const VarDecl *PartIDVar, const VarDecl *TaskTVar,
1280 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
1281 bool Tied, unsigned &NumberOfParts) {
1282 auto &&UntiedCodeGen = [this, &D, TaskTVar](CodeGenFunction &CGF,
1283 PrePostActionTy &) {
1284 llvm::Value *ThreadID = getThreadID(CGF, D.getBeginLoc());
1285 llvm::Value *UpLoc = emitUpdateLocation(CGF, D.getBeginLoc());
1286 llvm::Value *TaskArgs[] = {
1287 UpLoc, ThreadID,
1288 CGF.EmitLoadOfPointerLValue(CGF.GetAddrOfLocalVar(TaskTVar),
1289 TaskTVar->getType()->castAs<PointerType>())
1290 .getPointer(CGF)};
1291 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1292 CGM.getModule(), OMPRTL___kmpc_omp_task),
1293 TaskArgs);
1294 };
1295 CGOpenMPTaskOutlinedRegionInfo::UntiedTaskActionTy Action(Tied, PartIDVar,
1296 UntiedCodeGen);
1297 CodeGen.setAction(Action);
1298 assert(!ThreadIDVar->getType()->isPointerType() &&
1299 "thread id variable must be of type kmp_int32 for tasks");
1300 const OpenMPDirectiveKind Region =
1301 isOpenMPTaskLoopDirective(D.getDirectiveKind()) ? OMPD_taskloop
1302 : OMPD_task;
1303 const CapturedStmt *CS = D.getCapturedStmt(Region);
1304 bool HasCancel = false;
1305 if (const auto *TD = dyn_cast<OMPTaskDirective>(&D))
1306 HasCancel = TD->hasCancel();
1307 else if (const auto *TD = dyn_cast<OMPTaskLoopDirective>(&D))
1308 HasCancel = TD->hasCancel();
1309 else if (const auto *TD = dyn_cast<OMPMasterTaskLoopDirective>(&D))
1310 HasCancel = TD->hasCancel();
1311 else if (const auto *TD = dyn_cast<OMPParallelMasterTaskLoopDirective>(&D))
1312 HasCancel = TD->hasCancel();
1313
1314 CodeGenFunction CGF(CGM, true);
1315 CGOpenMPTaskOutlinedRegionInfo CGInfo(*CS, ThreadIDVar, CodeGen,
1316 InnermostKind, HasCancel, Action);
1317 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
1318 llvm::Function *Res = CGF.GenerateCapturedStmtFunction(*CS);
1319 if (!Tied)
1320 NumberOfParts = Action.getNumberOfParts();
1321 return Res;
1322}
1323
1325 bool AtCurrentPoint) {
1326 auto &Elem = OpenMPLocThreadIDMap[CGF.CurFn];
1327 assert(!Elem.ServiceInsertPt && "Insert point is set already.");
1328
1329 llvm::Value *Undef = llvm::UndefValue::get(CGF.Int32Ty);
1330 if (AtCurrentPoint) {
1331 Elem.ServiceInsertPt = new llvm::BitCastInst(Undef, CGF.Int32Ty, "svcpt",
1332 CGF.Builder.GetInsertBlock());
1333 } else {
1334 Elem.ServiceInsertPt = new llvm::BitCastInst(Undef, CGF.Int32Ty, "svcpt");
1335 Elem.ServiceInsertPt->insertAfter(CGF.AllocaInsertPt);
1336 }
1337}
1338
1340 auto &Elem = OpenMPLocThreadIDMap[CGF.CurFn];
1341 if (Elem.ServiceInsertPt) {
1342 llvm::Instruction *Ptr = Elem.ServiceInsertPt;
1343 Elem.ServiceInsertPt = nullptr;
1344 Ptr->eraseFromParent();
1345 }
1346}
1347
1350 SmallString<128> &Buffer) {
1351 llvm::raw_svector_ostream OS(Buffer);
1352 // Build debug location
1354 OS << ";" << PLoc.getFilename() << ";";
1355 if (const auto *FD = dyn_cast_or_null<FunctionDecl>(CGF.CurFuncDecl))
1356 OS << FD->getQualifiedNameAsString();
1357 OS << ";" << PLoc.getLine() << ";" << PLoc.getColumn() << ";;";
1358 return OS.str();
1359}
1360
1363 unsigned Flags, bool EmitLoc) {
1364 uint32_t SrcLocStrSize;
1365 llvm::Constant *SrcLocStr;
1366 if ((!EmitLoc && CGM.getCodeGenOpts().getDebugInfo() ==
1367 llvm::codegenoptions::NoDebugInfo) ||
1368 Loc.isInvalid()) {
1369 SrcLocStr = OMPBuilder.getOrCreateDefaultSrcLocStr(SrcLocStrSize);
1370 } else {
1371 std::string FunctionName;
1372 if (const auto *FD = dyn_cast_or_null<FunctionDecl>(CGF.CurFuncDecl))
1373 FunctionName = FD->getQualifiedNameAsString();
1375 const char *FileName = PLoc.getFilename();
1376 unsigned Line = PLoc.getLine();
1377 unsigned Column = PLoc.getColumn();
1378 SrcLocStr = OMPBuilder.getOrCreateSrcLocStr(FunctionName, FileName, Line,
1379 Column, SrcLocStrSize);
1380 }
1381 unsigned Reserved2Flags = getDefaultLocationReserved2Flags();
1382 return OMPBuilder.getOrCreateIdent(
1383 SrcLocStr, SrcLocStrSize, llvm::omp::IdentFlag(Flags), Reserved2Flags);
1384}
1385
1388 assert(CGF.CurFn && "No function in current CodeGenFunction.");
1389 // If the OpenMPIRBuilder is used we need to use it for all thread id calls as
1390 // the clang invariants used below might be broken.
1391 if (CGM.getLangOpts().OpenMPIRBuilder) {
1392 SmallString<128> Buffer;
1393 OMPBuilder.updateToLocation(CGF.Builder.saveIP());
1394 uint32_t SrcLocStrSize;
1395 auto *SrcLocStr = OMPBuilder.getOrCreateSrcLocStr(
1396 getIdentStringFromSourceLocation(CGF, Loc, Buffer), SrcLocStrSize);
1397 return OMPBuilder.getOrCreateThreadID(
1398 OMPBuilder.getOrCreateIdent(SrcLocStr, SrcLocStrSize));
1399 }
1400
1401 llvm::Value *ThreadID = nullptr;
1402 // Check whether we've already cached a load of the thread id in this
1403 // function.
1404 auto I = OpenMPLocThreadIDMap.find(CGF.CurFn);
1405 if (I != OpenMPLocThreadIDMap.end()) {
1406 ThreadID = I->second.ThreadID;
1407 if (ThreadID != nullptr)
1408 return ThreadID;
1409 }
1410 // If exceptions are enabled, do not use parameter to avoid possible crash.
1411 if (auto *OMPRegionInfo =
1412 dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
1413 if (OMPRegionInfo->getThreadIDVariable()) {
1414 // Check if this an outlined function with thread id passed as argument.
1415 LValue LVal = OMPRegionInfo->getThreadIDVariableLValue(CGF);
1416 llvm::BasicBlock *TopBlock = CGF.AllocaInsertPt->getParent();
1417 if (!CGF.EHStack.requiresLandingPad() || !CGF.getLangOpts().Exceptions ||
1418 !CGF.getLangOpts().CXXExceptions ||
1419 CGF.Builder.GetInsertBlock() == TopBlock ||
1420 !isa<llvm::Instruction>(LVal.getPointer(CGF)) ||
1421 cast<llvm::Instruction>(LVal.getPointer(CGF))->getParent() ==
1422 TopBlock ||
1423 cast<llvm::Instruction>(LVal.getPointer(CGF))->getParent() ==
1424 CGF.Builder.GetInsertBlock()) {
1425 ThreadID = CGF.EmitLoadOfScalar(LVal, Loc);
1426 // If value loaded in entry block, cache it and use it everywhere in
1427 // function.
1428 if (CGF.Builder.GetInsertBlock() == TopBlock)
1429 OpenMPLocThreadIDMap[CGF.CurFn].ThreadID = ThreadID;
1430 return ThreadID;
1431 }
1432 }
1433 }
1434
1435 // This is not an outlined function region - need to call __kmpc_int32
1436 // kmpc_global_thread_num(ident_t *loc).
1437 // Generate thread id value and cache this value for use across the
1438 // function.
1439 auto &Elem = OpenMPLocThreadIDMap[CGF.CurFn];
1440 if (!Elem.ServiceInsertPt)
1442 CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
1443 CGF.Builder.SetInsertPoint(Elem.ServiceInsertPt);
1445 llvm::CallInst *Call = CGF.Builder.CreateCall(
1446 OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
1447 OMPRTL___kmpc_global_thread_num),
1448 emitUpdateLocation(CGF, Loc));
1449 Call->setCallingConv(CGF.getRuntimeCC());
1450 Elem.ThreadID = Call;
1451 return Call;
1452}
1453
1455 assert(CGF.CurFn && "No function in current CodeGenFunction.");
1456 if (OpenMPLocThreadIDMap.count(CGF.CurFn)) {
1458 OpenMPLocThreadIDMap.erase(CGF.CurFn);
1459 }
1460 if (FunctionUDRMap.count(CGF.CurFn) > 0) {
1461 for(const auto *D : FunctionUDRMap[CGF.CurFn])
1462 UDRMap.erase(D);
1463 FunctionUDRMap.erase(CGF.CurFn);
1464 }
1465 auto I = FunctionUDMMap.find(CGF.CurFn);
1466 if (I != FunctionUDMMap.end()) {
1467 for(const auto *D : I->second)
1468 UDMMap.erase(D);
1469 FunctionUDMMap.erase(I);
1470 }
1473}
1474
1476 return OMPBuilder.IdentPtr;
1477}
1478
1480 if (!Kmpc_MicroTy) {
1481 // Build void (*kmpc_micro)(kmp_int32 *global_tid, kmp_int32 *bound_tid,...)
1482 llvm::Type *MicroParams[] = {llvm::PointerType::getUnqual(CGM.Int32Ty),
1483 llvm::PointerType::getUnqual(CGM.Int32Ty)};
1484 Kmpc_MicroTy = llvm::FunctionType::get(CGM.VoidTy, MicroParams, true);
1485 }
1486 return llvm::PointerType::getUnqual(Kmpc_MicroTy);
1487}
1488
1489static llvm::OffloadEntriesInfoManager::OMPTargetDeviceClauseKind
1491 std::optional<OMPDeclareTargetDeclAttr::DevTypeTy> DevTy =
1492 OMPDeclareTargetDeclAttr::getDeviceType(VD);
1493 if (!DevTy)
1494 return llvm::OffloadEntriesInfoManager::OMPTargetDeviceClauseNone;
1495
1496 switch ((int)*DevTy) { // Avoid -Wcovered-switch-default
1497 case OMPDeclareTargetDeclAttr::DT_Host:
1498 return llvm::OffloadEntriesInfoManager::OMPTargetDeviceClauseHost;
1499 break;
1500 case OMPDeclareTargetDeclAttr::DT_NoHost:
1501 return llvm::OffloadEntriesInfoManager::OMPTargetDeviceClauseNoHost;
1502 break;
1503 case OMPDeclareTargetDeclAttr::DT_Any:
1504 return llvm::OffloadEntriesInfoManager::OMPTargetDeviceClauseAny;
1505 break;
1506 default:
1507 return llvm::OffloadEntriesInfoManager::OMPTargetDeviceClauseNone;
1508 break;
1509 }
1510}
1511
1512static llvm::OffloadEntriesInfoManager::OMPTargetGlobalVarEntryKind
1514 std::optional<OMPDeclareTargetDeclAttr::MapTypeTy> MapType =
1515 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
1516 if (!MapType)
1517 return llvm::OffloadEntriesInfoManager::OMPTargetGlobalVarEntryNone;
1518 switch ((int)*MapType) { // Avoid -Wcovered-switch-default
1519 case OMPDeclareTargetDeclAttr::MapTypeTy::MT_To:
1520 return llvm::OffloadEntriesInfoManager::OMPTargetGlobalVarEntryTo;
1521 break;
1522 case OMPDeclareTargetDeclAttr::MapTypeTy::MT_Enter:
1523 return llvm::OffloadEntriesInfoManager::OMPTargetGlobalVarEntryEnter;
1524 break;
1525 case OMPDeclareTargetDeclAttr::MapTypeTy::MT_Link:
1526 return llvm::OffloadEntriesInfoManager::OMPTargetGlobalVarEntryLink;
1527 break;
1528 default:
1529 return llvm::OffloadEntriesInfoManager::OMPTargetGlobalVarEntryNone;
1530 break;
1531 }
1532}
1533
1534static llvm::TargetRegionEntryInfo getEntryInfoFromPresumedLoc(
1535 CodeGenModule &CGM, llvm::OpenMPIRBuilder &OMPBuilder,
1536 SourceLocation BeginLoc, llvm::StringRef ParentName = "") {
1537
1538 auto FileInfoCallBack = [&]() {
1540 PresumedLoc PLoc = SM.getPresumedLoc(BeginLoc);
1541
1542 llvm::sys::fs::UniqueID ID;
1543 if (llvm::sys::fs::getUniqueID(PLoc.getFilename(), ID)) {
1544 PLoc = SM.getPresumedLoc(BeginLoc, /*UseLineDirectives=*/false);
1545 }
1546
1547 return std::pair<std::string, uint64_t>(PLoc.getFilename(), PLoc.getLine());
1548 };
1549
1550 return OMPBuilder.getTargetEntryUniqueInfo(FileInfoCallBack, ParentName);
1551}
1552
1554 auto AddrOfGlobal = [&VD, this]() { return CGM.GetAddrOfGlobal(VD); };
1555
1556 auto LinkageForVariable = [&VD, this]() {
1558 };
1559
1560 std::vector<llvm::GlobalVariable *> GeneratedRefs;
1561
1562 llvm::Type *LlvmPtrTy = CGM.getTypes().ConvertTypeForMem(
1564 llvm::Constant *addr = OMPBuilder.getAddrOfDeclareTargetVar(
1567 VD->isExternallyVisible(),
1569 VD->getCanonicalDecl()->getBeginLoc()),
1570 CGM.getMangledName(VD), GeneratedRefs, CGM.getLangOpts().OpenMPSimd,
1571 CGM.getLangOpts().OMPTargetTriples, LlvmPtrTy, AddrOfGlobal,
1572 LinkageForVariable);
1573
1574 if (!addr)
1575 return ConstantAddress::invalid();
1576 return ConstantAddress(addr, LlvmPtrTy, CGM.getContext().getDeclAlign(VD));
1577}
1578
1579llvm::Constant *
1581 assert(!CGM.getLangOpts().OpenMPUseTLS ||
1583 // Lookup the entry, lazily creating it if necessary.
1584 std::string Suffix = getName({"cache", ""});
1585 return OMPBuilder.getOrCreateInternalVariable(
1586 CGM.Int8PtrPtrTy, Twine(CGM.getMangledName(VD)).concat(Suffix).str());
1587}
1588
1590 const VarDecl *VD,
1591 Address VDAddr,
1593 if (CGM.getLangOpts().OpenMPUseTLS &&
1595 return VDAddr;
1596
1597 llvm::Type *VarTy = VDAddr.getElementType();
1598 llvm::Value *Args[] = {
1600 CGF.Builder.CreatePointerCast(VDAddr.emitRawPointer(CGF), CGM.Int8PtrTy),
1603 return Address(
1604 CGF.EmitRuntimeCall(
1605 OMPBuilder.getOrCreateRuntimeFunction(
1606 CGM.getModule(), OMPRTL___kmpc_threadprivate_cached),
1607 Args),
1608 CGF.Int8Ty, VDAddr.getAlignment());
1609}
1610
1612 CodeGenFunction &CGF, Address VDAddr, llvm::Value *Ctor,
1613 llvm::Value *CopyCtor, llvm::Value *Dtor, SourceLocation Loc) {
1614 // Call kmp_int32 __kmpc_global_thread_num(&loc) to init OpenMP runtime
1615 // library.
1616 llvm::Value *OMPLoc = emitUpdateLocation(CGF, Loc);
1617 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1618 CGM.getModule(), OMPRTL___kmpc_global_thread_num),
1619 OMPLoc);
1620 // Call __kmpc_threadprivate_register(&loc, &var, ctor, cctor/*NULL*/, dtor)
1621 // to register constructor/destructor for variable.
1622 llvm::Value *Args[] = {
1623 OMPLoc,
1624 CGF.Builder.CreatePointerCast(VDAddr.emitRawPointer(CGF), CGM.VoidPtrTy),
1625 Ctor, CopyCtor, Dtor};
1626 CGF.EmitRuntimeCall(
1627 OMPBuilder.getOrCreateRuntimeFunction(
1628 CGM.getModule(), OMPRTL___kmpc_threadprivate_register),
1629 Args);
1630}
1631
1633 const VarDecl *VD, Address VDAddr, SourceLocation Loc,
1634 bool PerformInit, CodeGenFunction *CGF) {
1635 if (CGM.getLangOpts().OpenMPUseTLS &&
1637 return nullptr;
1638
1639 VD = VD->getDefinition(CGM.getContext());
1640 if (VD && ThreadPrivateWithDefinition.insert(CGM.getMangledName(VD)).second) {
1641 QualType ASTTy = VD->getType();
1642
1643 llvm::Value *Ctor = nullptr, *CopyCtor = nullptr, *Dtor = nullptr;
1644 const Expr *Init = VD->getAnyInitializer();
1645 if (CGM.getLangOpts().CPlusPlus && PerformInit) {
1646 // Generate function that re-emits the declaration's initializer into the
1647 // threadprivate copy of the variable VD
1648 CodeGenFunction CtorCGF(CGM);
1649 FunctionArgList Args;
1650 ImplicitParamDecl Dst(CGM.getContext(), /*DC=*/nullptr, Loc,
1651 /*Id=*/nullptr, CGM.getContext().VoidPtrTy,
1653 Args.push_back(&Dst);
1654
1656 CGM.getContext().VoidPtrTy, Args);
1657 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
1658 std::string Name = getName({"__kmpc_global_ctor_", ""});
1659 llvm::Function *Fn =
1661 CtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidPtrTy, Fn, FI,
1662 Args, Loc, Loc);
1663 llvm::Value *ArgVal = CtorCGF.EmitLoadOfScalar(
1664 CtorCGF.GetAddrOfLocalVar(&Dst), /*Volatile=*/false,
1666 Address Arg(ArgVal, CtorCGF.ConvertTypeForMem(ASTTy),
1667 VDAddr.getAlignment());
1668 CtorCGF.EmitAnyExprToMem(Init, Arg, Init->getType().getQualifiers(),
1669 /*IsInitializer=*/true);
1670 ArgVal = CtorCGF.EmitLoadOfScalar(
1671 CtorCGF.GetAddrOfLocalVar(&Dst), /*Volatile=*/false,
1673 CtorCGF.Builder.CreateStore(ArgVal, CtorCGF.ReturnValue);
1674 CtorCGF.FinishFunction();
1675 Ctor = Fn;
1676 }
1678 // Generate function that emits destructor call for the threadprivate copy
1679 // of the variable VD
1680 CodeGenFunction DtorCGF(CGM);
1681 FunctionArgList Args;
1682 ImplicitParamDecl Dst(CGM.getContext(), /*DC=*/nullptr, Loc,
1683 /*Id=*/nullptr, CGM.getContext().VoidPtrTy,
1685 Args.push_back(&Dst);
1686
1688 CGM.getContext().VoidTy, Args);
1689 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
1690 std::string Name = getName({"__kmpc_global_dtor_", ""});
1691 llvm::Function *Fn =
1693 auto NL = ApplyDebugLocation::CreateEmpty(DtorCGF);
1694 DtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, Fn, FI, Args,
1695 Loc, Loc);
1696 // Create a scope with an artificial location for the body of this function.
1697 auto AL = ApplyDebugLocation::CreateArtificial(DtorCGF);
1698 llvm::Value *ArgVal = DtorCGF.EmitLoadOfScalar(
1699 DtorCGF.GetAddrOfLocalVar(&Dst),
1700 /*Volatile=*/false, CGM.getContext().VoidPtrTy, Dst.getLocation());
1701 DtorCGF.emitDestroy(
1702 Address(ArgVal, DtorCGF.Int8Ty, VDAddr.getAlignment()), ASTTy,
1703 DtorCGF.getDestroyer(ASTTy.isDestructedType()),
1704 DtorCGF.needsEHCleanup(ASTTy.isDestructedType()));
1705 DtorCGF.FinishFunction();
1706 Dtor = Fn;
1707 }
1708 // Do not emit init function if it is not required.
1709 if (!Ctor && !Dtor)
1710 return nullptr;
1711
1712 // Copying constructor for the threadprivate variable.
1713 // Must be NULL - reserved by runtime, but currently it requires that this
1714 // parameter is always NULL. Otherwise it fires assertion.
1715 CopyCtor = llvm::Constant::getNullValue(CGM.UnqualPtrTy);
1716 if (Ctor == nullptr) {
1717 Ctor = llvm::Constant::getNullValue(CGM.UnqualPtrTy);
1718 }
1719 if (Dtor == nullptr) {
1720 Dtor = llvm::Constant::getNullValue(CGM.UnqualPtrTy);
1721 }
1722 if (!CGF) {
1723 auto *InitFunctionTy =
1724 llvm::FunctionType::get(CGM.VoidTy, /*isVarArg*/ false);
1725 std::string Name = getName({"__omp_threadprivate_init_", ""});
1726 llvm::Function *InitFunction = CGM.CreateGlobalInitOrCleanUpFunction(
1727 InitFunctionTy, Name, CGM.getTypes().arrangeNullaryFunction());
1728 CodeGenFunction InitCGF(CGM);
1729 FunctionArgList ArgList;
1730 InitCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, InitFunction,
1731 CGM.getTypes().arrangeNullaryFunction(), ArgList,
1732 Loc, Loc);
1733 emitThreadPrivateVarInit(InitCGF, VDAddr, Ctor, CopyCtor, Dtor, Loc);
1734 InitCGF.FinishFunction();
1735 return InitFunction;
1736 }
1737 emitThreadPrivateVarInit(*CGF, VDAddr, Ctor, CopyCtor, Dtor, Loc);
1738 }
1739 return nullptr;
1740}
1741
1743 llvm::GlobalValue *GV) {
1744 std::optional<OMPDeclareTargetDeclAttr *> ActiveAttr =
1745 OMPDeclareTargetDeclAttr::getActiveAttr(FD);
1746
1747 // We only need to handle active 'indirect' declare target functions.
1748 if (!ActiveAttr || !(*ActiveAttr)->getIndirect())
1749 return;
1750
1751 // Get a mangled name to store the new device global in.
1752 llvm::TargetRegionEntryInfo EntryInfo = getEntryInfoFromPresumedLoc(
1754 SmallString<128> Name;
1755 OMPBuilder.OffloadInfoManager.getTargetRegionEntryFnName(Name, EntryInfo);
1756
1757 // We need to generate a new global to hold the address of the indirectly
1758 // called device function. Doing this allows us to keep the visibility and
1759 // linkage of the associated function unchanged while allowing the runtime to
1760 // access its value.
1761 llvm::GlobalValue *Addr = GV;
1762 if (CGM.getLangOpts().OpenMPIsTargetDevice) {
1763 Addr = new llvm::GlobalVariable(
1765 /*isConstant=*/true, llvm::GlobalValue::ExternalLinkage, GV, Name,
1766 nullptr, llvm::GlobalValue::NotThreadLocal,
1767 CGM.getModule().getDataLayout().getDefaultGlobalsAddressSpace());
1768 Addr->setVisibility(llvm::GlobalValue::ProtectedVisibility);
1769 }
1770
1771 OMPBuilder.OffloadInfoManager.registerDeviceGlobalVarEntryInfo(
1773 llvm::OffloadEntriesInfoManager::OMPTargetGlobalVarEntryIndirect,
1774 llvm::GlobalValue::WeakODRLinkage);
1775}
1776
1778 QualType VarType,
1779 StringRef Name) {
1780 std::string Suffix = getName({"artificial", ""});
1781 llvm::Type *VarLVType = CGF.ConvertTypeForMem(VarType);
1782 llvm::GlobalVariable *GAddr = OMPBuilder.getOrCreateInternalVariable(
1783 VarLVType, Twine(Name).concat(Suffix).str());
1784 if (CGM.getLangOpts().OpenMP && CGM.getLangOpts().OpenMPUseTLS &&
1786 GAddr->setThreadLocal(/*Val=*/true);
1787 return Address(GAddr, GAddr->getValueType(),
1789 }
1790 std::string CacheSuffix = getName({"cache", ""});
1791 llvm::Value *Args[] = {
1795 CGF.Builder.CreateIntCast(CGF.getTypeSize(VarType), CGM.SizeTy,
1796 /*isSigned=*/false),
1797 OMPBuilder.getOrCreateInternalVariable(
1799 Twine(Name).concat(Suffix).concat(CacheSuffix).str())};
1800 return Address(
1802 CGF.EmitRuntimeCall(
1803 OMPBuilder.getOrCreateRuntimeFunction(
1804 CGM.getModule(), OMPRTL___kmpc_threadprivate_cached),
1805 Args),
1806 CGF.Builder.getPtrTy(0)),
1807 VarLVType, CGM.getContext().getTypeAlignInChars(VarType));
1808}
1809
1811 const RegionCodeGenTy &ThenGen,
1812 const RegionCodeGenTy &ElseGen) {
1813 CodeGenFunction::LexicalScope ConditionScope(CGF, Cond->getSourceRange());
1814
1815 // If the condition constant folds and can be elided, try to avoid emitting
1816 // the condition and the dead arm of the if/else.
1817 bool CondConstant;
1818 if (CGF.ConstantFoldsToSimpleInteger(Cond, CondConstant)) {
1819 if (CondConstant)
1820 ThenGen(CGF);
1821 else
1822 ElseGen(CGF);
1823 return;
1824 }
1825
1826 // Otherwise, the condition did not fold, or we couldn't elide it. Just
1827 // emit the conditional branch.
1828 llvm::BasicBlock *ThenBlock = CGF.createBasicBlock("omp_if.then");
1829 llvm::BasicBlock *ElseBlock = CGF.createBasicBlock("omp_if.else");
1830 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("omp_if.end");
1831 CGF.EmitBranchOnBoolExpr(Cond, ThenBlock, ElseBlock, /*TrueCount=*/0);
1832
1833 // Emit the 'then' code.
1834 CGF.EmitBlock(ThenBlock);
1835 ThenGen(CGF);
1836 CGF.EmitBranch(ContBlock);
1837 // Emit the 'else' code if present.
1838 // There is no need to emit line number for unconditional branch.
1840 CGF.EmitBlock(ElseBlock);
1841 ElseGen(CGF);
1842 // There is no need to emit line number for unconditional branch.
1844 CGF.EmitBranch(ContBlock);
1845 // Emit the continuation block for code after the if.
1846 CGF.EmitBlock(ContBlock, /*IsFinished=*/true);
1847}
1848
1850 llvm::Function *OutlinedFn,
1851 ArrayRef<llvm::Value *> CapturedVars,
1852 const Expr *IfCond,
1853 llvm::Value *NumThreads) {
1854 if (!CGF.HaveInsertPoint())
1855 return;
1856 llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
1857 auto &M = CGM.getModule();
1858 auto &&ThenGen = [&M, OutlinedFn, CapturedVars, RTLoc,
1859 this](CodeGenFunction &CGF, PrePostActionTy &) {
1860 // Build call __kmpc_fork_call(loc, n, microtask, var1, .., varn);
1862 llvm::Value *Args[] = {
1863 RTLoc,
1864 CGF.Builder.getInt32(CapturedVars.size()), // Number of captured vars
1865 CGF.Builder.CreateBitCast(OutlinedFn, RT.getKmpc_MicroPointerTy())};
1867 RealArgs.append(std::begin(Args), std::end(Args));
1868 RealArgs.append(CapturedVars.begin(), CapturedVars.end());
1869
1870 llvm::FunctionCallee RTLFn =
1871 OMPBuilder.getOrCreateRuntimeFunction(M, OMPRTL___kmpc_fork_call);
1872 CGF.EmitRuntimeCall(RTLFn, RealArgs);
1873 };
1874 auto &&ElseGen = [&M, OutlinedFn, CapturedVars, RTLoc, Loc,
1875 this](CodeGenFunction &CGF, PrePostActionTy &) {
1877 llvm::Value *ThreadID = RT.getThreadID(CGF, Loc);
1878 // Build calls:
1879 // __kmpc_serialized_parallel(&Loc, GTid);
1880 llvm::Value *Args[] = {RTLoc, ThreadID};
1881 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1882 M, OMPRTL___kmpc_serialized_parallel),
1883 Args);
1884
1885 // OutlinedFn(&GTid, &zero_bound, CapturedStruct);
1886 Address ThreadIDAddr = RT.emitThreadIDAddress(CGF, Loc);
1887 RawAddress ZeroAddrBound =
1889 /*Name=*/".bound.zero.addr");
1890 CGF.Builder.CreateStore(CGF.Builder.getInt32(/*C*/ 0), ZeroAddrBound);
1892 // ThreadId for serialized parallels is 0.
1893 OutlinedFnArgs.push_back(ThreadIDAddr.emitRawPointer(CGF));
1894 OutlinedFnArgs.push_back(ZeroAddrBound.getPointer());
1895 OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
1896
1897 // Ensure we do not inline the function. This is trivially true for the ones
1898 // passed to __kmpc_fork_call but the ones called in serialized regions
1899 // could be inlined. This is not a perfect but it is closer to the invariant
1900 // we want, namely, every data environment starts with a new function.
1901 // TODO: We should pass the if condition to the runtime function and do the
1902 // handling there. Much cleaner code.
1903 OutlinedFn->removeFnAttr(llvm::Attribute::AlwaysInline);
1904 OutlinedFn->addFnAttr(llvm::Attribute::NoInline);
1905 RT.emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs);
1906
1907 // __kmpc_end_serialized_parallel(&Loc, GTid);
1908 llvm::Value *EndArgs[] = {RT.emitUpdateLocation(CGF, Loc), ThreadID};
1909 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1910 M, OMPRTL___kmpc_end_serialized_parallel),
1911 EndArgs);
1912 };
1913 if (IfCond) {
1914 emitIfClause(CGF, IfCond, ThenGen, ElseGen);
1915 } else {
1916 RegionCodeGenTy ThenRCG(ThenGen);
1917 ThenRCG(CGF);
1918 }
1919}
1920
1921// If we're inside an (outlined) parallel region, use the region info's
1922// thread-ID variable (it is passed in a first argument of the outlined function
1923// as "kmp_int32 *gtid"). Otherwise, if we're not inside parallel region, but in
1924// regular serial code region, get thread ID by calling kmp_int32
1925// kmpc_global_thread_num(ident_t *loc), stash this thread ID in a temporary and
1926// return the address of that temp.
1929 if (auto *OMPRegionInfo =
1930 dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
1931 if (OMPRegionInfo->getThreadIDVariable())
1932 return OMPRegionInfo->getThreadIDVariableLValue(CGF).getAddress();
1933
1934 llvm::Value *ThreadID = getThreadID(CGF, Loc);
1935 QualType Int32Ty =
1936 CGF.getContext().getIntTypeForBitwidth(/*DestWidth*/ 32, /*Signed*/ true);
1937 Address ThreadIDTemp = CGF.CreateMemTemp(Int32Ty, /*Name*/ ".threadid_temp.");
1938 CGF.EmitStoreOfScalar(ThreadID,
1939 CGF.MakeAddrLValue(ThreadIDTemp, Int32Ty));
1940
1941 return ThreadIDTemp;
1942}
1943
1944llvm::Value *CGOpenMPRuntime::getCriticalRegionLock(StringRef CriticalName) {
1945 std::string Prefix = Twine("gomp_critical_user_", CriticalName).str();
1946 std::string Name = getName({Prefix, "var"});
1947 return OMPBuilder.getOrCreateInternalVariable(KmpCriticalNameTy, Name);
1948}
1949
1950namespace {
1951/// Common pre(post)-action for different OpenMP constructs.
1952class CommonActionTy final : public PrePostActionTy {
1953 llvm::FunctionCallee EnterCallee;
1954 ArrayRef<llvm::Value *> EnterArgs;
1955 llvm::FunctionCallee ExitCallee;
1956 ArrayRef<llvm::Value *> ExitArgs;
1957 bool Conditional;
1958 llvm::BasicBlock *ContBlock = nullptr;
1959
1960public:
1961 CommonActionTy(llvm::FunctionCallee EnterCallee,
1962 ArrayRef<llvm::Value *> EnterArgs,
1963 llvm::FunctionCallee ExitCallee,
1964 ArrayRef<llvm::Value *> ExitArgs, bool Conditional = false)
1965 : EnterCallee(EnterCallee), EnterArgs(EnterArgs), ExitCallee(ExitCallee),
1966 ExitArgs(ExitArgs), Conditional(Conditional) {}
1967 void Enter(CodeGenFunction &CGF) override {
1968 llvm::Value *EnterRes = CGF.EmitRuntimeCall(EnterCallee, EnterArgs);
1969 if (Conditional) {
1970 llvm::Value *CallBool = CGF.Builder.CreateIsNotNull(EnterRes);
1971 auto *ThenBlock = CGF.createBasicBlock("omp_if.then");
1972 ContBlock = CGF.createBasicBlock("omp_if.end");
1973 // Generate the branch (If-stmt)
1974 CGF.Builder.CreateCondBr(CallBool, ThenBlock, ContBlock);
1975 CGF.EmitBlock(ThenBlock);
1976 }
1977 }
1978 void Done(CodeGenFunction &CGF) {
1979 // Emit the rest of blocks/branches
1980 CGF.EmitBranch(ContBlock);
1981 CGF.EmitBlock(ContBlock, true);
1982 }
1983 void Exit(CodeGenFunction &CGF) override {
1984 CGF.EmitRuntimeCall(ExitCallee, ExitArgs);
1985 }
1986};
1987} // anonymous namespace
1988
1990 StringRef CriticalName,
1991 const RegionCodeGenTy &CriticalOpGen,
1992 SourceLocation Loc, const Expr *Hint) {
1993 // __kmpc_critical[_with_hint](ident_t *, gtid, Lock[, hint]);
1994 // CriticalOpGen();
1995 // __kmpc_end_critical(ident_t *, gtid, Lock);
1996 // Prepare arguments and build a call to __kmpc_critical
1997 if (!CGF.HaveInsertPoint())
1998 return;
1999 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
2000 getCriticalRegionLock(CriticalName)};
2001 llvm::SmallVector<llvm::Value *, 4> EnterArgs(std::begin(Args),
2002 std::end(Args));
2003 if (Hint) {
2004 EnterArgs.push_back(CGF.Builder.CreateIntCast(
2005 CGF.EmitScalarExpr(Hint), CGM.Int32Ty, /*isSigned=*/false));
2006 }
2007 CommonActionTy Action(
2008 OMPBuilder.getOrCreateRuntimeFunction(
2009 CGM.getModule(),
2010 Hint ? OMPRTL___kmpc_critical_with_hint : OMPRTL___kmpc_critical),
2011 EnterArgs,
2012 OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
2013 OMPRTL___kmpc_end_critical),
2014 Args);
2015 CriticalOpGen.setAction(Action);
2016 emitInlinedDirective(CGF, OMPD_critical, CriticalOpGen);
2017}
2018
2020 const RegionCodeGenTy &MasterOpGen,
2022 if (!CGF.HaveInsertPoint())
2023 return;
2024 // if(__kmpc_master(ident_t *, gtid)) {
2025 // MasterOpGen();
2026 // __kmpc_end_master(ident_t *, gtid);
2027 // }
2028 // Prepare arguments and build a call to __kmpc_master
2029 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
2030 CommonActionTy Action(OMPBuilder.getOrCreateRuntimeFunction(
2031 CGM.getModule(), OMPRTL___kmpc_master),
2032 Args,
2033 OMPBuilder.getOrCreateRuntimeFunction(
2034 CGM.getModule(), OMPRTL___kmpc_end_master),
2035 Args,
2036 /*Conditional=*/true);
2037 MasterOpGen.setAction(Action);
2038 emitInlinedDirective(CGF, OMPD_master, MasterOpGen);
2039 Action.Done(CGF);
2040}
2041
2043 const RegionCodeGenTy &MaskedOpGen,
2044 SourceLocation Loc, const Expr *Filter) {
2045 if (!CGF.HaveInsertPoint())
2046 return;
2047 // if(__kmpc_masked(ident_t *, gtid, filter)) {
2048 // MaskedOpGen();
2049 // __kmpc_end_masked(iden_t *, gtid);
2050 // }
2051 // Prepare arguments and build a call to __kmpc_masked
2052 llvm::Value *FilterVal = Filter
2053 ? CGF.EmitScalarExpr(Filter, CGF.Int32Ty)
2054 : llvm::ConstantInt::get(CGM.Int32Ty, /*V=*/0);
2055 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
2056 FilterVal};
2057 llvm::Value *ArgsEnd[] = {emitUpdateLocation(CGF, Loc),
2058 getThreadID(CGF, Loc)};
2059 CommonActionTy Action(OMPBuilder.getOrCreateRuntimeFunction(
2060 CGM.getModule(), OMPRTL___kmpc_masked),
2061 Args,
2062 OMPBuilder.getOrCreateRuntimeFunction(
2063 CGM.getModule(), OMPRTL___kmpc_end_masked),
2064 ArgsEnd,
2065 /*Conditional=*/true);
2066 MaskedOpGen.setAction(Action);
2067 emitInlinedDirective(CGF, OMPD_masked, MaskedOpGen);
2068 Action.Done(CGF);
2069}
2070
2073 if (!CGF.HaveInsertPoint())
2074 return;
2075 if (CGF.CGM.getLangOpts().OpenMPIRBuilder) {
2076 OMPBuilder.createTaskyield(CGF.Builder);
2077 } else {
2078 // Build call __kmpc_omp_taskyield(loc, thread_id, 0);
2079 llvm::Value *Args[] = {
2081 llvm::ConstantInt::get(CGM.IntTy, /*V=*/0, /*isSigned=*/true)};
2082 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
2083 CGM.getModule(), OMPRTL___kmpc_omp_taskyield),
2084 Args);
2085 }
2086
2087 if (auto *Region = dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
2088 Region->emitUntiedSwitch(CGF);
2089}
2090
2092 const RegionCodeGenTy &TaskgroupOpGen,
2094 if (!CGF.HaveInsertPoint())
2095 return;
2096 // __kmpc_taskgroup(ident_t *, gtid);
2097 // TaskgroupOpGen();
2098 // __kmpc_end_taskgroup(ident_t *, gtid);
2099 // Prepare arguments and build a call to __kmpc_taskgroup
2100 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
2101 CommonActionTy Action(OMPBuilder.getOrCreateRuntimeFunction(
2102 CGM.getModule(), OMPRTL___kmpc_taskgroup),
2103 Args,
2104 OMPBuilder.getOrCreateRuntimeFunction(
2105 CGM.getModule(), OMPRTL___kmpc_end_taskgroup),
2106 Args);
2107 TaskgroupOpGen.setAction(Action);
2108 emitInlinedDirective(CGF, OMPD_taskgroup, TaskgroupOpGen);
2109}
2110
2111/// Given an array of pointers to variables, project the address of a
2112/// given variable.
2114 unsigned Index, const VarDecl *Var) {
2115 // Pull out the pointer to the variable.
2116 Address PtrAddr = CGF.Builder.CreateConstArrayGEP(Array, Index);
2117 llvm::Value *Ptr = CGF.Builder.CreateLoad(PtrAddr);
2118
2119 llvm::Type *ElemTy = CGF.ConvertTypeForMem(Var->getType());
2120 return Address(Ptr, ElemTy, CGF.getContext().getDeclAlign(Var));
2121}
2122
2124 CodeGenModule &CGM, llvm::Type *ArgsElemType,
2125 ArrayRef<const Expr *> CopyprivateVars, ArrayRef<const Expr *> DestExprs,
2126 ArrayRef<const Expr *> SrcExprs, ArrayRef<const Expr *> AssignmentOps,
2128 ASTContext &C = CGM.getContext();
2129 // void copy_func(void *LHSArg, void *RHSArg);
2130 FunctionArgList Args;
2131 ImplicitParamDecl LHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
2133 ImplicitParamDecl RHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
2135 Args.push_back(&LHSArg);
2136 Args.push_back(&RHSArg);
2137 const auto &CGFI =
2138 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
2139 std::string Name =
2140 CGM.getOpenMPRuntime().getName({"omp", "copyprivate", "copy_func"});
2141 auto *Fn = llvm::Function::Create(CGM.getTypes().GetFunctionType(CGFI),
2142 llvm::GlobalValue::InternalLinkage, Name,
2143 &CGM.getModule());
2145 Fn->setDoesNotRecurse();
2146 CodeGenFunction CGF(CGM);
2147 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
2148 // Dest = (void*[n])(LHSArg);
2149 // Src = (void*[n])(RHSArg);
2151 CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&LHSArg)),
2152 CGF.Builder.getPtrTy(0)),
2153 ArgsElemType, CGF.getPointerAlign());
2155 CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&RHSArg)),
2156 CGF.Builder.getPtrTy(0)),
2157 ArgsElemType, CGF.getPointerAlign());
2158 // *(Type0*)Dst[0] = *(Type0*)Src[0];
2159 // *(Type1*)Dst[1] = *(Type1*)Src[1];
2160 // ...
2161 // *(Typen*)Dst[n] = *(Typen*)Src[n];
2162 for (unsigned I = 0, E = AssignmentOps.size(); I < E; ++I) {
2163 const auto *DestVar =
2164 cast<VarDecl>(cast<DeclRefExpr>(DestExprs[I])->getDecl());
2165 Address DestAddr = emitAddrOfVarFromArray(CGF, LHS, I, DestVar);
2166
2167 const auto *SrcVar =
2168 cast<VarDecl>(cast<DeclRefExpr>(SrcExprs[I])->getDecl());
2169 Address SrcAddr = emitAddrOfVarFromArray(CGF, RHS, I, SrcVar);
2170
2171 const auto *VD = cast<DeclRefExpr>(CopyprivateVars[I])->getDecl();
2172 QualType Type = VD->getType();
2173 CGF.EmitOMPCopy(Type, DestAddr, SrcAddr, DestVar, SrcVar, AssignmentOps[I]);
2174 }
2175 CGF.FinishFunction();
2176 return Fn;
2177}
2178
2180 const RegionCodeGenTy &SingleOpGen,
2182 ArrayRef<const Expr *> CopyprivateVars,
2183 ArrayRef<const Expr *> SrcExprs,
2184 ArrayRef<const Expr *> DstExprs,
2185 ArrayRef<const Expr *> AssignmentOps) {
2186 if (!CGF.HaveInsertPoint())
2187 return;
2188 assert(CopyprivateVars.size() == SrcExprs.size() &&
2189 CopyprivateVars.size() == DstExprs.size() &&
2190 CopyprivateVars.size() == AssignmentOps.size());
2192 // int32 did_it = 0;
2193 // if(__kmpc_single(ident_t *, gtid)) {
2194 // SingleOpGen();
2195 // __kmpc_end_single(ident_t *, gtid);
2196 // did_it = 1;
2197 // }
2198 // call __kmpc_copyprivate(ident_t *, gtid, <buf_size>, <copyprivate list>,
2199 // <copy_func>, did_it);
2200
2201 Address DidIt = Address::invalid();
2202 if (!CopyprivateVars.empty()) {
2203 // int32 did_it = 0;
2204 QualType KmpInt32Ty =
2205 C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
2206 DidIt = CGF.CreateMemTemp(KmpInt32Ty, ".omp.copyprivate.did_it");
2207 CGF.Builder.CreateStore(CGF.Builder.getInt32(0), DidIt);
2208 }
2209 // Prepare arguments and build a call to __kmpc_single
2210 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
2211 CommonActionTy Action(OMPBuilder.getOrCreateRuntimeFunction(
2212 CGM.getModule(), OMPRTL___kmpc_single),
2213 Args,
2214 OMPBuilder.getOrCreateRuntimeFunction(
2215 CGM.getModule(), OMPRTL___kmpc_end_single),
2216 Args,
2217 /*Conditional=*/true);
2218 SingleOpGen.setAction(Action);
2219 emitInlinedDirective(CGF, OMPD_single, SingleOpGen);
2220 if (DidIt.isValid()) {
2221 // did_it = 1;
2222 CGF.Builder.CreateStore(CGF.Builder.getInt32(1), DidIt);
2223 }
2224 Action.Done(CGF);
2225 // call __kmpc_copyprivate(ident_t *, gtid, <buf_size>, <copyprivate list>,
2226 // <copy_func>, did_it);
2227 if (DidIt.isValid()) {
2228 llvm::APInt ArraySize(/*unsigned int numBits=*/32, CopyprivateVars.size());
2229 QualType CopyprivateArrayTy = C.getConstantArrayType(
2230 C.VoidPtrTy, ArraySize, nullptr, ArraySizeModifier::Normal,
2231 /*IndexTypeQuals=*/0);
2232 // Create a list of all private variables for copyprivate.
2233 Address CopyprivateList =
2234 CGF.CreateMemTemp(CopyprivateArrayTy, ".omp.copyprivate.cpr_list");
2235 for (unsigned I = 0, E = CopyprivateVars.size(); I < E; ++I) {
2236 Address Elem = CGF.Builder.CreateConstArrayGEP(CopyprivateList, I);
2237 CGF.Builder.CreateStore(
2239 CGF.EmitLValue(CopyprivateVars[I]).getPointer(CGF),
2240 CGF.VoidPtrTy),
2241 Elem);
2242 }
2243 // Build function that copies private values from single region to all other
2244 // threads in the corresponding parallel region.
2245 llvm::Value *CpyFn = emitCopyprivateCopyFunction(
2246 CGM, CGF.ConvertTypeForMem(CopyprivateArrayTy), CopyprivateVars,
2247 SrcExprs, DstExprs, AssignmentOps, Loc);
2248 llvm::Value *BufSize = CGF.getTypeSize(CopyprivateArrayTy);
2250 CopyprivateList, CGF.VoidPtrTy, CGF.Int8Ty);
2251 llvm::Value *DidItVal = CGF.Builder.CreateLoad(DidIt);
2252 llvm::Value *Args[] = {
2253 emitUpdateLocation(CGF, Loc), // ident_t *<loc>
2254 getThreadID(CGF, Loc), // i32 <gtid>
2255 BufSize, // size_t <buf_size>
2256 CL.emitRawPointer(CGF), // void *<copyprivate list>
2257 CpyFn, // void (*) (void *, void *) <copy_func>
2258 DidItVal // i32 did_it
2259 };
2260 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
2261 CGM.getModule(), OMPRTL___kmpc_copyprivate),
2262 Args);
2263 }
2264}
2265
2267 const RegionCodeGenTy &OrderedOpGen,
2268 SourceLocation Loc, bool IsThreads) {
2269 if (!CGF.HaveInsertPoint())
2270 return;
2271 // __kmpc_ordered(ident_t *, gtid);
2272 // OrderedOpGen();
2273 // __kmpc_end_ordered(ident_t *, gtid);
2274 // Prepare arguments and build a call to __kmpc_ordered
2275 if (IsThreads) {
2276 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
2277 CommonActionTy Action(OMPBuilder.getOrCreateRuntimeFunction(
2278 CGM.getModule(), OMPRTL___kmpc_ordered),
2279 Args,
2280 OMPBuilder.getOrCreateRuntimeFunction(
2281 CGM.getModule(), OMPRTL___kmpc_end_ordered),
2282 Args);
2283 OrderedOpGen.setAction(Action);
2284 emitInlinedDirective(CGF, OMPD_ordered, OrderedOpGen);
2285 return;
2286 }
2287 emitInlinedDirective(CGF, OMPD_ordered, OrderedOpGen);
2288}
2289
2291 unsigned Flags;
2292 if (Kind == OMPD_for)
2293 Flags = OMP_IDENT_BARRIER_IMPL_FOR;
2294 else if (Kind == OMPD_sections)
2295 Flags = OMP_IDENT_BARRIER_IMPL_SECTIONS;
2296 else if (Kind == OMPD_single)
2297 Flags = OMP_IDENT_BARRIER_IMPL_SINGLE;
2298 else if (Kind == OMPD_barrier)
2299 Flags = OMP_IDENT_BARRIER_EXPL;
2300 else
2301 Flags = OMP_IDENT_BARRIER_IMPL;
2302 return Flags;
2303}
2304
2306 CodeGenFunction &CGF, const OMPLoopDirective &S,
2307 OpenMPScheduleClauseKind &ScheduleKind, const Expr *&ChunkExpr) const {
2308 // Check if the loop directive is actually a doacross loop directive. In this
2309 // case choose static, 1 schedule.
2310 if (llvm::any_of(
2311 S.getClausesOfKind<OMPOrderedClause>(),
2312 [](const OMPOrderedClause *C) { return C->getNumForLoops(); })) {
2313 ScheduleKind = OMPC_SCHEDULE_static;
2314 // Chunk size is 1 in this case.
2315 llvm::APInt ChunkSize(32, 1);
2316 ChunkExpr = IntegerLiteral::Create(
2317 CGF.getContext(), ChunkSize,
2318 CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/0),
2319 SourceLocation());
2320 }
2321}
2322
2324 OpenMPDirectiveKind Kind, bool EmitChecks,
2325 bool ForceSimpleCall) {
2326 // Check if we should use the OMPBuilder
2327 auto *OMPRegionInfo =
2328 dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo);
2329 if (CGF.CGM.getLangOpts().OpenMPIRBuilder) {
2330 llvm::OpenMPIRBuilder::InsertPointOrErrorTy AfterIP =
2331 OMPBuilder.createBarrier(CGF.Builder, Kind, ForceSimpleCall,
2332 EmitChecks);
2333 assert(AfterIP && "unexpected error creating barrier");
2334 CGF.Builder.restoreIP(*AfterIP);
2335 return;
2336 }
2337
2338 if (!CGF.HaveInsertPoint())
2339 return;
2340 // Build call __kmpc_cancel_barrier(loc, thread_id);
2341 // Build call __kmpc_barrier(loc, thread_id);
2342 unsigned Flags = getDefaultFlagsForBarriers(Kind);
2343 // Build call __kmpc_cancel_barrier(loc, thread_id) or __kmpc_barrier(loc,
2344 // thread_id);
2345 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc, Flags),
2346 getThreadID(CGF, Loc)};
2347 if (OMPRegionInfo) {
2348 if (!ForceSimpleCall && OMPRegionInfo->hasCancel()) {
2349 llvm::Value *Result = CGF.EmitRuntimeCall(
2350 OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
2351 OMPRTL___kmpc_cancel_barrier),
2352 Args);
2353 if (EmitChecks) {
2354 // if (__kmpc_cancel_barrier()) {
2355 // exit from construct;
2356 // }
2357 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".cancel.exit");
2358 llvm::BasicBlock *ContBB = CGF.createBasicBlock(".cancel.continue");
2359 llvm::Value *Cmp = CGF.Builder.CreateIsNotNull(Result);
2360 CGF.Builder.CreateCondBr(Cmp, ExitBB, ContBB);
2361 CGF.EmitBlock(ExitBB);
2362 // exit from construct;
2363 CodeGenFunction::JumpDest CancelDestination =
2364 CGF.getOMPCancelDestination(OMPRegionInfo->getDirectiveKind());
2365 CGF.EmitBranchThroughCleanup(CancelDestination);
2366 CGF.EmitBlock(ContBB, /*IsFinished=*/true);
2367 }
2368 return;
2369 }
2370 }
2371 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
2372 CGM.getModule(), OMPRTL___kmpc_barrier),
2373 Args);
2374}
2375
2377 Expr *ME, bool IsFatal) {
2378 llvm::Value *MVL =
2379 ME ? CGF.EmitStringLiteralLValue(cast<StringLiteral>(ME)).getPointer(CGF)
2380 : llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
2381 // Build call void __kmpc_error(ident_t *loc, int severity, const char
2382 // *message)
2383 llvm::Value *Args[] = {
2384 emitUpdateLocation(CGF, Loc, /*Flags=*/0, /*GenLoc=*/true),
2385 llvm::ConstantInt::get(CGM.Int32Ty, IsFatal ? 2 : 1),
2386 CGF.Builder.CreatePointerCast(MVL, CGM.Int8PtrTy)};
2387 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
2388 CGM.getModule(), OMPRTL___kmpc_error),
2389 Args);
2390}
2391
2392/// Map the OpenMP loop schedule to the runtime enumeration.
2393static OpenMPSchedType getRuntimeSchedule(OpenMPScheduleClauseKind ScheduleKind,
2394 bool Chunked, bool Ordered) {
2395 switch (ScheduleKind) {
2396 case OMPC_SCHEDULE_static:
2397 return Chunked ? (Ordered ? OMP_ord_static_chunked : OMP_sch_static_chunked)
2398 : (Ordered ? OMP_ord_static : OMP_sch_static);
2399 case OMPC_SCHEDULE_dynamic:
2400 return Ordered ? OMP_ord_dynamic_chunked : OMP_sch_dynamic_chunked;
2401 case OMPC_SCHEDULE_guided:
2402 return Ordered ? OMP_ord_guided_chunked : OMP_sch_guided_chunked;
2403 case OMPC_SCHEDULE_runtime:
2404 return Ordered ? OMP_ord_runtime : OMP_sch_runtime;
2405 case OMPC_SCHEDULE_auto:
2406 return Ordered ? OMP_ord_auto : OMP_sch_auto;
2408 assert(!Chunked && "chunk was specified but schedule kind not known");
2409 return Ordered ? OMP_ord_static : OMP_sch_static;
2410 }
2411 llvm_unreachable("Unexpected runtime schedule");
2412}
2413
2414/// Map the OpenMP distribute schedule to the runtime enumeration.
2415static OpenMPSchedType
2417 // only static is allowed for dist_schedule
2418 return Chunked ? OMP_dist_sch_static_chunked : OMP_dist_sch_static;
2419}
2420
2422 bool Chunked) const {
2423 OpenMPSchedType Schedule =
2424 getRuntimeSchedule(ScheduleKind, Chunked, /*Ordered=*/false);
2425 return Schedule == OMP_sch_static;
2426}
2427
2429 OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) const {
2430 OpenMPSchedType Schedule = getRuntimeSchedule(ScheduleKind, Chunked);
2431 return Schedule == OMP_dist_sch_static;
2432}
2433
2435 bool Chunked) const {
2436 OpenMPSchedType Schedule =
2437 getRuntimeSchedule(ScheduleKind, Chunked, /*Ordered=*/false);
2438 return Schedule == OMP_sch_static_chunked;
2439}
2440
2442 OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) const {
2443 OpenMPSchedType Schedule = getRuntimeSchedule(ScheduleKind, Chunked);
2444 return Schedule == OMP_dist_sch_static_chunked;
2445}
2446
2448 OpenMPSchedType Schedule =
2449 getRuntimeSchedule(ScheduleKind, /*Chunked=*/false, /*Ordered=*/false);
2450 assert(Schedule != OMP_sch_static_chunked && "cannot be chunked here");
2451 return Schedule != OMP_sch_static;
2452}
2453
2454static int addMonoNonMonoModifier(CodeGenModule &CGM, OpenMPSchedType Schedule,
2457 int Modifier = 0;
2458 switch (M1) {
2459 case OMPC_SCHEDULE_MODIFIER_monotonic:
2460 Modifier = OMP_sch_modifier_monotonic;
2461 break;
2462 case OMPC_SCHEDULE_MODIFIER_nonmonotonic:
2463 Modifier = OMP_sch_modifier_nonmonotonic;
2464 break;
2465 case OMPC_SCHEDULE_MODIFIER_simd:
2466 if (Schedule == OMP_sch_static_chunked)
2467 Schedule = OMP_sch_static_balanced_chunked;
2468 break;
2471 break;
2472 }
2473 switch (M2) {
2474 case OMPC_SCHEDULE_MODIFIER_monotonic:
2475 Modifier = OMP_sch_modifier_monotonic;
2476 break;
2477 case OMPC_SCHEDULE_MODIFIER_nonmonotonic:
2478 Modifier = OMP_sch_modifier_nonmonotonic;
2479 break;
2480 case OMPC_SCHEDULE_MODIFIER_simd:
2481 if (Schedule == OMP_sch_static_chunked)
2482 Schedule = OMP_sch_static_balanced_chunked;
2483 break;
2486 break;
2487 }
2488 // OpenMP 5.0, 2.9.2 Worksharing-Loop Construct, Desription.
2489 // If the static schedule kind is specified or if the ordered clause is
2490 // specified, and if the nonmonotonic modifier is not specified, the effect is
2491 // as if the monotonic modifier is specified. Otherwise, unless the monotonic
2492 // modifier is specified, the effect is as if the nonmonotonic modifier is
2493 // specified.
2494 if (CGM.getLangOpts().OpenMP >= 50 && Modifier == 0) {
2495 if (!(Schedule == OMP_sch_static_chunked || Schedule == OMP_sch_static ||
2496 Schedule == OMP_sch_static_balanced_chunked ||
2497 Schedule == OMP_ord_static_chunked || Schedule == OMP_ord_static ||
2498 Schedule == OMP_dist_sch_static_chunked ||
2499 Schedule == OMP_dist_sch_static))
2500 Modifier = OMP_sch_modifier_nonmonotonic;
2501 }
2502 return Schedule | Modifier;
2503}
2504
2507 const OpenMPScheduleTy &ScheduleKind, unsigned IVSize, bool IVSigned,
2508 bool Ordered, const DispatchRTInput &DispatchValues) {
2509 if (!CGF.HaveInsertPoint())
2510 return;
2511 OpenMPSchedType Schedule = getRuntimeSchedule(
2512 ScheduleKind.Schedule, DispatchValues.Chunk != nullptr, Ordered);
2513 assert(Ordered ||
2514 (Schedule != OMP_sch_static && Schedule != OMP_sch_static_chunked &&
2515 Schedule != OMP_ord_static && Schedule != OMP_ord_static_chunked &&
2516 Schedule != OMP_sch_static_balanced_chunked));
2517 // Call __kmpc_dispatch_init(
2518 // ident_t *loc, kmp_int32 tid, kmp_int32 schedule,
2519 // kmp_int[32|64] lower, kmp_int[32|64] upper,
2520 // kmp_int[32|64] stride, kmp_int[32|64] chunk);
2521
2522 // If the Chunk was not specified in the clause - use default value 1.
2523 llvm::Value *Chunk = DispatchValues.Chunk ? DispatchValues.Chunk
2524 : CGF.Builder.getIntN(IVSize, 1);
2525 llvm::Value *Args[] = {
2526 emitUpdateLocation(CGF, Loc),
2527 getThreadID(CGF, Loc),
2528 CGF.Builder.getInt32(addMonoNonMonoModifier(
2529 CGM, Schedule, ScheduleKind.M1, ScheduleKind.M2)), // Schedule type
2530 DispatchValues.LB, // Lower
2531 DispatchValues.UB, // Upper
2532 CGF.Builder.getIntN(IVSize, 1), // Stride
2533 Chunk // Chunk
2534 };
2535 CGF.EmitRuntimeCall(OMPBuilder.createDispatchInitFunction(IVSize, IVSigned),
2536 Args);
2537}
2538
2541 if (!CGF.HaveInsertPoint())
2542 return;
2543 // Call __kmpc_dispatch_deinit(ident_t *loc, kmp_int32 tid);
2544 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
2545 CGF.EmitRuntimeCall(OMPBuilder.createDispatchDeinitFunction(), Args);
2546}
2547
2549 CodeGenFunction &CGF, llvm::Value *UpdateLocation, llvm::Value *ThreadId,
2550 llvm::FunctionCallee ForStaticInitFunction, OpenMPSchedType Schedule,
2552 const CGOpenMPRuntime::StaticRTInput &Values) {
2553 if (!CGF.HaveInsertPoint())
2554 return;
2555
2556 assert(!Values.Ordered);
2557 assert(Schedule == OMP_sch_static || Schedule == OMP_sch_static_chunked ||
2558 Schedule == OMP_sch_static_balanced_chunked ||
2559 Schedule == OMP_ord_static || Schedule == OMP_ord_static_chunked ||
2560 Schedule == OMP_dist_sch_static ||
2561 Schedule == OMP_dist_sch_static_chunked);
2562
2563 // Call __kmpc_for_static_init(
2564 // ident_t *loc, kmp_int32 tid, kmp_int32 schedtype,
2565 // kmp_int32 *p_lastiter, kmp_int[32|64] *p_lower,
2566 // kmp_int[32|64] *p_upper, kmp_int[32|64] *p_stride,
2567 // kmp_int[32|64] incr, kmp_int[32|64] chunk);
2568 llvm::Value *Chunk = Values.Chunk;
2569 if (Chunk == nullptr) {
2570 assert((Schedule == OMP_sch_static || Schedule == OMP_ord_static ||
2571 Schedule == OMP_dist_sch_static) &&
2572 "expected static non-chunked schedule");
2573 // If the Chunk was not specified in the clause - use default value 1.
2574 Chunk = CGF.Builder.getIntN(Values.IVSize, 1);
2575 } else {
2576 assert((Schedule == OMP_sch_static_chunked ||
2577 Schedule == OMP_sch_static_balanced_chunked ||
2578 Schedule == OMP_ord_static_chunked ||
2579 Schedule == OMP_dist_sch_static_chunked) &&
2580 "expected static chunked schedule");
2581 }
2582 llvm::Value *Args[] = {
2583 UpdateLocation,
2584 ThreadId,
2585 CGF.Builder.getInt32(addMonoNonMonoModifier(CGF.CGM, Schedule, M1,
2586 M2)), // Schedule type
2587 Values.IL.emitRawPointer(CGF), // &isLastIter
2588 Values.LB.emitRawPointer(CGF), // &LB
2589 Values.UB.emitRawPointer(CGF), // &UB
2590 Values.ST.emitRawPointer(CGF), // &Stride
2591 CGF.Builder.getIntN(Values.IVSize, 1), // Incr
2592 Chunk // Chunk
2593 };
2594 CGF.EmitRuntimeCall(ForStaticInitFunction, Args);
2595}
2596
2599 OpenMPDirectiveKind DKind,
2600 const OpenMPScheduleTy &ScheduleKind,
2601 const StaticRTInput &Values) {
2602 OpenMPSchedType ScheduleNum = getRuntimeSchedule(
2603 ScheduleKind.Schedule, Values.Chunk != nullptr, Values.Ordered);
2604 assert((isOpenMPWorksharingDirective(DKind) || (DKind == OMPD_loop)) &&
2605 "Expected loop-based or sections-based directive.");
2606 llvm::Value *UpdatedLocation = emitUpdateLocation(CGF, Loc,
2608 ? OMP_IDENT_WORK_LOOP
2609 : OMP_IDENT_WORK_SECTIONS);
2610 llvm::Value *ThreadId = getThreadID(CGF, Loc);
2611 llvm::FunctionCallee StaticInitFunction =
2612 OMPBuilder.createForStaticInitFunction(Values.IVSize, Values.IVSigned,
2613 false);
2615 emitForStaticInitCall(CGF, UpdatedLocation, ThreadId, StaticInitFunction,
2616 ScheduleNum, ScheduleKind.M1, ScheduleKind.M2, Values);
2617}
2618
2622 const CGOpenMPRuntime::StaticRTInput &Values) {
2623 OpenMPSchedType ScheduleNum =
2624 getRuntimeSchedule(SchedKind, Values.Chunk != nullptr);
2625 llvm::Value *UpdatedLocation =
2626 emitUpdateLocation(CGF, Loc, OMP_IDENT_WORK_DISTRIBUTE);
2627 llvm::Value *ThreadId = getThreadID(CGF, Loc);
2628 llvm::FunctionCallee StaticInitFunction;
2629 bool isGPUDistribute =
2630 CGM.getLangOpts().OpenMPIsTargetDevice &&
2631 (CGM.getTriple().isAMDGCN() || CGM.getTriple().isNVPTX());
2632 StaticInitFunction = OMPBuilder.createForStaticInitFunction(
2633 Values.IVSize, Values.IVSigned, isGPUDistribute);
2634
2635 emitForStaticInitCall(CGF, UpdatedLocation, ThreadId, StaticInitFunction,
2636 ScheduleNum, OMPC_SCHEDULE_MODIFIER_unknown,
2638}
2639
2642 OpenMPDirectiveKind DKind) {
2643 assert((DKind == OMPD_distribute || DKind == OMPD_for ||
2644 DKind == OMPD_sections) &&
2645 "Expected distribute, for, or sections directive kind");
2646 if (!CGF.HaveInsertPoint())
2647 return;
2648 // Call __kmpc_for_static_fini(ident_t *loc, kmp_int32 tid);
2649 llvm::Value *Args[] = {
2652 (DKind == OMPD_target_teams_loop)
2653 ? OMP_IDENT_WORK_DISTRIBUTE
2654 : isOpenMPLoopDirective(DKind)
2655 ? OMP_IDENT_WORK_LOOP
2656 : OMP_IDENT_WORK_SECTIONS),
2657 getThreadID(CGF, Loc)};
2659 if (isOpenMPDistributeDirective(DKind) &&
2660 CGM.getLangOpts().OpenMPIsTargetDevice &&
2661 (CGM.getTriple().isAMDGCN() || CGM.getTriple().isNVPTX()))
2662 CGF.EmitRuntimeCall(
2663 OMPBuilder.getOrCreateRuntimeFunction(
2664 CGM.getModule(), OMPRTL___kmpc_distribute_static_fini),
2665 Args);
2666 else
2667 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
2668 CGM.getModule(), OMPRTL___kmpc_for_static_fini),
2669 Args);
2670}
2671
2674 unsigned IVSize,
2675 bool IVSigned) {
2676 if (!CGF.HaveInsertPoint())
2677 return;
2678 // Call __kmpc_for_dynamic_fini_(4|8)[u](ident_t *loc, kmp_int32 tid);
2679 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
2680 CGF.EmitRuntimeCall(OMPBuilder.createDispatchFiniFunction(IVSize, IVSigned),
2681 Args);
2682}
2683
2685 SourceLocation Loc, unsigned IVSize,
2686 bool IVSigned, Address IL,
2687 Address LB, Address UB,
2688 Address ST) {
2689 // Call __kmpc_dispatch_next(
2690 // ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter,
2691 // kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper,
2692 // kmp_int[32|64] *p_stride);
2693 llvm::Value *Args[] = {
2695 IL.emitRawPointer(CGF), // &isLastIter
2696 LB.emitRawPointer(CGF), // &Lower
2697 UB.emitRawPointer(CGF), // &Upper
2698 ST.emitRawPointer(CGF) // &Stride
2699 };
2700 llvm::Value *Call = CGF.EmitRuntimeCall(
2701 OMPBuilder.createDispatchNextFunction(IVSize, IVSigned), Args);
2702 return CGF.EmitScalarConversion(
2703 Call, CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/1),
2704 CGF.getContext().BoolTy, Loc);
2705}
2706
2708 llvm::Value *NumThreads,
2710 if (!CGF.HaveInsertPoint())
2711 return;
2712 // Build call __kmpc_push_num_threads(&loc, global_tid, num_threads)
2713 llvm::Value *Args[] = {
2715 CGF.Builder.CreateIntCast(NumThreads, CGF.Int32Ty, /*isSigned*/ true)};
2716 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
2717 CGM.getModule(), OMPRTL___kmpc_push_num_threads),
2718 Args);
2719}
2720
2722 ProcBindKind ProcBind,
2724 if (!CGF.HaveInsertPoint())
2725 return;
2726 assert(ProcBind != OMP_PROC_BIND_unknown && "Unsupported proc_bind value.");
2727 // Build call __kmpc_push_proc_bind(&loc, global_tid, proc_bind)
2728 llvm::Value *Args[] = {
2730 llvm::ConstantInt::get(CGM.IntTy, unsigned(ProcBind), /*isSigned=*/true)};
2731 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
2732 CGM.getModule(), OMPRTL___kmpc_push_proc_bind),
2733 Args);
2734}
2735
2737 SourceLocation Loc, llvm::AtomicOrdering AO) {
2738 if (CGF.CGM.getLangOpts().OpenMPIRBuilder) {
2739 OMPBuilder.createFlush(CGF.Builder);
2740 } else {
2741 if (!CGF.HaveInsertPoint())
2742 return;
2743 // Build call void __kmpc_flush(ident_t *loc)
2744 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
2745 CGM.getModule(), OMPRTL___kmpc_flush),
2746 emitUpdateLocation(CGF, Loc));
2747 }
2748}
2749
2750namespace {
2751/// Indexes of fields for type kmp_task_t.
2752enum KmpTaskTFields {
2753 /// List of shared variables.
2754 KmpTaskTShareds,
2755 /// Task routine.
2756 KmpTaskTRoutine,
2757 /// Partition id for the untied tasks.
2758 KmpTaskTPartId,
2759 /// Function with call of destructors for private variables.
2760 Data1,
2761 /// Task priority.
2762 Data2,
2763 /// (Taskloops only) Lower bound.
2764 KmpTaskTLowerBound,
2765 /// (Taskloops only) Upper bound.
2766 KmpTaskTUpperBound,
2767 /// (Taskloops only) Stride.
2768 KmpTaskTStride,
2769 /// (Taskloops only) Is last iteration flag.
2770 KmpTaskTLastIter,
2771 /// (Taskloops only) Reduction data.
2772 KmpTaskTReductions,
2773};
2774} // anonymous namespace
2775
2777 // If we are in simd mode or there are no entries, we don't need to do
2778 // anything.
2779 if (CGM.getLangOpts().OpenMPSimd || OMPBuilder.OffloadInfoManager.empty())
2780 return;
2781
2782 llvm::OpenMPIRBuilder::EmitMetadataErrorReportFunctionTy &&ErrorReportFn =
2783 [this](llvm::OpenMPIRBuilder::EmitMetadataErrorKind Kind,
2784 const llvm::TargetRegionEntryInfo &EntryInfo) -> void {
2786 if (Kind != llvm::OpenMPIRBuilder::EMIT_MD_GLOBAL_VAR_LINK_ERROR) {
2787 for (auto I = CGM.getContext().getSourceManager().fileinfo_begin(),
2789 I != E; ++I) {
2790 if (I->getFirst().getUniqueID().getDevice() == EntryInfo.DeviceID &&
2791 I->getFirst().getUniqueID().getFile() == EntryInfo.FileID) {
2793 I->getFirst(), EntryInfo.Line, 1);
2794 break;
2795 }
2796 }
2797 }
2798 switch (Kind) {
2799 case llvm::OpenMPIRBuilder::EMIT_MD_TARGET_REGION_ERROR: {
2800 unsigned DiagID = CGM.getDiags().getCustomDiagID(
2801 DiagnosticsEngine::Error, "Offloading entry for target region in "
2802 "%0 is incorrect: either the "
2803 "address or the ID is invalid.");
2804 CGM.getDiags().Report(Loc, DiagID) << EntryInfo.ParentName;
2805 } break;
2806 case llvm::OpenMPIRBuilder::EMIT_MD_DECLARE_TARGET_ERROR: {
2807 unsigned DiagID = CGM.getDiags().getCustomDiagID(
2808 DiagnosticsEngine::Error, "Offloading entry for declare target "
2809 "variable %0 is incorrect: the "
2810 "address is invalid.");
2811 CGM.getDiags().Report(Loc, DiagID) << EntryInfo.ParentName;
2812 } break;
2813 case llvm::OpenMPIRBuilder::EMIT_MD_GLOBAL_VAR_LINK_ERROR: {
2814 unsigned DiagID = CGM.getDiags().getCustomDiagID(
2816 "Offloading entry for declare target variable is incorrect: the "
2817 "address is invalid.");
2818 CGM.getDiags().Report(DiagID);
2819 } break;
2820 }
2821 };
2822
2823 OMPBuilder.createOffloadEntriesAndInfoMetadata(ErrorReportFn);
2824}
2825
2827 if (!KmpRoutineEntryPtrTy) {
2828 // Build typedef kmp_int32 (* kmp_routine_entry_t)(kmp_int32, void *); type.
2830 QualType KmpRoutineEntryTyArgs[] = {KmpInt32Ty, C.VoidPtrTy};
2832 KmpRoutineEntryPtrQTy = C.getPointerType(
2833 C.getFunctionType(KmpInt32Ty, KmpRoutineEntryTyArgs, EPI));
2835 }
2836}
2837
2838namespace {
2839struct PrivateHelpersTy {
2840 PrivateHelpersTy(const Expr *OriginalRef, const VarDecl *Original,
2841 const VarDecl *PrivateCopy, const VarDecl *PrivateElemInit)
2842 : OriginalRef(OriginalRef), Original(Original), PrivateCopy(PrivateCopy),
2843 PrivateElemInit(PrivateElemInit) {}
2844 PrivateHelpersTy(const VarDecl *Original) : Original(Original) {}
2845 const Expr *OriginalRef = nullptr;
2846 const VarDecl *Original = nullptr;
2847 const VarDecl *PrivateCopy = nullptr;
2848 const VarDecl *PrivateElemInit = nullptr;
2849 bool isLocalPrivate() const {
2850 return !OriginalRef && !PrivateCopy && !PrivateElemInit;
2851 }
2852};
2853typedef std::pair<CharUnits /*Align*/, PrivateHelpersTy> PrivateDataTy;
2854} // anonymous namespace
2855
2856static bool isAllocatableDecl(const VarDecl *VD) {
2857 const VarDecl *CVD = VD->getCanonicalDecl();
2858 if (!CVD->hasAttr<OMPAllocateDeclAttr>())
2859 return false;
2860 const auto *AA = CVD->getAttr<OMPAllocateDeclAttr>();
2861 // Use the default allocation.
2862 return !(AA->getAllocatorType() == OMPAllocateDeclAttr::OMPDefaultMemAlloc &&
2863 !AA->getAllocator());
2864}
2865
2866static RecordDecl *
2868 if (!Privates.empty()) {
2869 ASTContext &C = CGM.getContext();
2870 // Build struct .kmp_privates_t. {
2871 // /* private vars */
2872 // };
2873 RecordDecl *RD = C.buildImplicitRecord(".kmp_privates.t");
2874 RD->startDefinition();
2875 for (const auto &Pair : Privates) {
2876 const VarDecl *VD = Pair.second.Original;
2878 // If the private variable is a local variable with lvalue ref type,
2879 // allocate the pointer instead of the pointee type.
2880 if (Pair.second.isLocalPrivate()) {
2881 if (VD->getType()->isLValueReferenceType())
2882 Type = C.getPointerType(Type);
2883 if (isAllocatableDecl(VD))
2884 Type = C.getPointerType(Type);
2885 }
2887 if (VD->hasAttrs()) {
2888 for (specific_attr_iterator<AlignedAttr> I(VD->getAttrs().begin()),
2889 E(VD->getAttrs().end());
2890 I != E; ++I)
2891 FD->addAttr(*I);
2892 }
2893 }
2894 RD->completeDefinition();
2895 return RD;
2896 }
2897 return nullptr;
2898}
2899
2900static RecordDecl *
2902 QualType KmpInt32Ty,
2903 QualType KmpRoutineEntryPointerQTy) {
2904 ASTContext &C = CGM.getContext();
2905 // Build struct kmp_task_t {
2906 // void * shareds;
2907 // kmp_routine_entry_t routine;
2908 // kmp_int32 part_id;
2909 // kmp_cmplrdata_t data1;
2910 // kmp_cmplrdata_t data2;
2911 // For taskloops additional fields:
2912 // kmp_uint64 lb;
2913 // kmp_uint64 ub;
2914 // kmp_int64 st;
2915 // kmp_int32 liter;
2916 // void * reductions;
2917 // };
2918 RecordDecl *UD = C.buildImplicitRecord("kmp_cmplrdata_t", TagTypeKind::Union);
2919 UD->startDefinition();
2920 addFieldToRecordDecl(C, UD, KmpInt32Ty);
2921 addFieldToRecordDecl(C, UD, KmpRoutineEntryPointerQTy);
2922 UD->completeDefinition();
2923 QualType KmpCmplrdataTy = C.getRecordType(UD);
2924 RecordDecl *RD = C.buildImplicitRecord("kmp_task_t");
2925 RD->startDefinition();
2926 addFieldToRecordDecl(C, RD, C.VoidPtrTy);
2927 addFieldToRecordDecl(C, RD, KmpRoutineEntryPointerQTy);
2928 addFieldToRecordDecl(C, RD, KmpInt32Ty);
2929 addFieldToRecordDecl(C, RD, KmpCmplrdataTy);
2930 addFieldToRecordDecl(C, RD, KmpCmplrdataTy);
2931 if (isOpenMPTaskLoopDirective(Kind)) {
2932 QualType KmpUInt64Ty =
2933 CGM.getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0);
2934 QualType KmpInt64Ty =
2935 CGM.getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1);
2936 addFieldToRecordDecl(C, RD, KmpUInt64Ty);
2937 addFieldToRecordDecl(C, RD, KmpUInt64Ty);
2938 addFieldToRecordDecl(C, RD, KmpInt64Ty);
2939 addFieldToRecordDecl(C, RD, KmpInt32Ty);
2940 addFieldToRecordDecl(C, RD, C.VoidPtrTy);
2941 }
2942 RD->completeDefinition();
2943 return RD;
2944}
2945
2946static RecordDecl *
2948 ArrayRef<PrivateDataTy> Privates) {
2949 ASTContext &C = CGM.getContext();
2950 // Build struct kmp_task_t_with_privates {
2951 // kmp_task_t task_data;
2952 // .kmp_privates_t. privates;
2953 // };
2954 RecordDecl *RD = C.buildImplicitRecord("kmp_task_t_with_privates");
2955 RD->startDefinition();
2956 addFieldToRecordDecl(C, RD, KmpTaskTQTy);
2957 if (const RecordDecl *PrivateRD = createPrivatesRecordDecl(CGM, Privates))
2958 addFieldToRecordDecl(C, RD, C.getRecordType(PrivateRD));
2959 RD->completeDefinition();
2960 return RD;
2961}
2962
2963/// Emit a proxy function which accepts kmp_task_t as the second
2964/// argument.
2965/// \code
2966/// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
2967/// TaskFunction(gtid, tt->part_id, &tt->privates, task_privates_map, tt,
2968/// For taskloops:
2969/// tt->task_data.lb, tt->task_data.ub, tt->task_data.st, tt->task_data.liter,
2970/// tt->reductions, tt->shareds);
2971/// return 0;
2972/// }
2973/// \endcode
2974static llvm::Function *
2976 OpenMPDirectiveKind Kind, QualType KmpInt32Ty,
2977 QualType KmpTaskTWithPrivatesPtrQTy,
2978 QualType KmpTaskTWithPrivatesQTy, QualType KmpTaskTQTy,
2979 QualType SharedsPtrTy, llvm::Function *TaskFunction,
2980 llvm::Value *TaskPrivatesMap) {
2981 ASTContext &C = CGM.getContext();
2982 FunctionArgList Args;
2983 ImplicitParamDecl GtidArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, KmpInt32Ty,
2985 ImplicitParamDecl TaskTypeArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2986 KmpTaskTWithPrivatesPtrQTy.withRestrict(),
2988 Args.push_back(&GtidArg);
2989 Args.push_back(&TaskTypeArg);
2990 const auto &TaskEntryFnInfo =
2991 CGM.getTypes().arrangeBuiltinFunctionDeclaration(KmpInt32Ty, Args);
2992 llvm::FunctionType *TaskEntryTy =
2993 CGM.getTypes().GetFunctionType(TaskEntryFnInfo);
2994 std::string Name = CGM.getOpenMPRuntime().getName({"omp_task_entry", ""});
2995 auto *TaskEntry = llvm::Function::Create(
2996 TaskEntryTy, llvm::GlobalValue::InternalLinkage, Name, &CGM.getModule());
2997 CGM.SetInternalFunctionAttributes(GlobalDecl(), TaskEntry, TaskEntryFnInfo);
2998 TaskEntry->setDoesNotRecurse();
2999 CodeGenFunction CGF(CGM);
3000 CGF.StartFunction(GlobalDecl(), KmpInt32Ty, TaskEntry, TaskEntryFnInfo, Args,
3001 Loc, Loc);
3002
3003 // TaskFunction(gtid, tt->task_data.part_id, &tt->privates, task_privates_map,
3004 // tt,
3005 // For taskloops:
3006 // tt->task_data.lb, tt->task_data.ub, tt->task_data.st, tt->task_data.liter,
3007 // tt->task_data.shareds);
3008 llvm::Value *GtidParam = CGF.EmitLoadOfScalar(
3009 CGF.GetAddrOfLocalVar(&GtidArg), /*Volatile=*/false, KmpInt32Ty, Loc);
3010 LValue TDBase = CGF.EmitLoadOfPointerLValue(
3011 CGF.GetAddrOfLocalVar(&TaskTypeArg),
3012 KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>());
3013 const auto *KmpTaskTWithPrivatesQTyRD =
3014 cast<RecordDecl>(KmpTaskTWithPrivatesQTy->getAsTagDecl());
3015 LValue Base =
3016 CGF.EmitLValueForField(TDBase, *KmpTaskTWithPrivatesQTyRD->field_begin());
3017 const auto *KmpTaskTQTyRD = cast<RecordDecl>(KmpTaskTQTy->getAsTagDecl());
3018 auto PartIdFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTPartId);
3019 LValue PartIdLVal = CGF.EmitLValueForField(Base, *PartIdFI);
3020 llvm::Value *PartidParam = PartIdLVal.getPointer(CGF);
3021
3022 auto SharedsFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTShareds);
3023 LValue SharedsLVal = CGF.EmitLValueForField(Base, *SharedsFI);
3024 llvm::Value *SharedsParam = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3025 CGF.EmitLoadOfScalar(SharedsLVal, Loc),
3026 CGF.ConvertTypeForMem(SharedsPtrTy));
3027
3028 auto PrivatesFI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin(), 1);
3029 llvm::Value *PrivatesParam;
3030 if (PrivatesFI != KmpTaskTWithPrivatesQTyRD->field_end()) {
3031 LValue PrivatesLVal = CGF.EmitLValueForField(TDBase, *PrivatesFI);
3032 PrivatesParam = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3033 PrivatesLVal.getPointer(CGF), CGF.VoidPtrTy);
3034 } else {
3035 PrivatesParam = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
3036 }
3037
3038 llvm::Value *CommonArgs[] = {
3039 GtidParam, PartidParam, PrivatesParam, TaskPrivatesMap,
3040 CGF.Builder
3042 CGF.VoidPtrTy, CGF.Int8Ty)
3043 .emitRawPointer(CGF)};
3044 SmallVector<llvm::Value *, 16> CallArgs(std::begin(CommonArgs),
3045 std::end(CommonArgs));
3046 if (isOpenMPTaskLoopDirective(Kind)) {
3047 auto LBFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTLowerBound);
3048 LValue LBLVal = CGF.EmitLValueForField(Base, *LBFI);
3049 llvm::Value *LBParam = CGF.EmitLoadOfScalar(LBLVal, Loc);
3050 auto UBFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTUpperBound);
3051 LValue UBLVal = CGF.EmitLValueForField(Base, *UBFI);
3052 llvm::Value *UBParam = CGF.EmitLoadOfScalar(UBLVal, Loc);
3053 auto StFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTStride);
3054 LValue StLVal = CGF.EmitLValueForField(Base, *StFI);
3055 llvm::Value *StParam = CGF.EmitLoadOfScalar(StLVal, Loc);
3056 auto LIFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTLastIter);
3057 LValue LILVal = CGF.EmitLValueForField(Base, *LIFI);
3058 llvm::Value *LIParam = CGF.EmitLoadOfScalar(LILVal, Loc);
3059 auto RFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTReductions);
3060 LValue RLVal = CGF.EmitLValueForField(Base, *RFI);
3061 llvm::Value *RParam = CGF.EmitLoadOfScalar(RLVal, Loc);
3062 CallArgs.push_back(LBParam);
3063 CallArgs.push_back(UBParam);
3064 CallArgs.push_back(StParam);
3065 CallArgs.push_back(LIParam);
3066 CallArgs.push_back(RParam);
3067 }
3068 CallArgs.push_back(SharedsParam);
3069
3070 CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, Loc, TaskFunction,
3071 CallArgs);
3072 CGF.EmitStoreThroughLValue(RValue::get(CGF.Builder.getInt32(/*C=*/0)),
3073 CGF.MakeAddrLValue(CGF.ReturnValue, KmpInt32Ty));
3074 CGF.FinishFunction();
3075 return TaskEntry;
3076}
3077
3080 QualType KmpInt32Ty,
3081 QualType KmpTaskTWithPrivatesPtrQTy,
3082 QualType KmpTaskTWithPrivatesQTy) {
3083 ASTContext &C = CGM.getContext();
3084 FunctionArgList Args;
3085 ImplicitParamDecl GtidArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, KmpInt32Ty,
3087 ImplicitParamDecl TaskTypeArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3088 KmpTaskTWithPrivatesPtrQTy.withRestrict(),
3090 Args.push_back(&GtidArg);
3091 Args.push_back(&TaskTypeArg);
3092 const auto &DestructorFnInfo =
3093 CGM.getTypes().arrangeBuiltinFunctionDeclaration(KmpInt32Ty, Args);
3094 llvm::FunctionType *DestructorFnTy =
3095 CGM.getTypes().GetFunctionType(DestructorFnInfo);
3096 std::string Name =
3097 CGM.getOpenMPRuntime().getName({"omp_task_destructor", ""});
3098 auto *DestructorFn =
3099 llvm::Function::Create(DestructorFnTy, llvm::GlobalValue::InternalLinkage,
3100 Name, &CGM.getModule());
3101 CGM.SetInternalFunctionAttributes(GlobalDecl(), DestructorFn,
3102 DestructorFnInfo);
3103 DestructorFn->setDoesNotRecurse();
3104 CodeGenFunction CGF(CGM);
3105 CGF.StartFunction(GlobalDecl(), KmpInt32Ty, DestructorFn, DestructorFnInfo,
3106 Args, Loc, Loc);
3107
3109 CGF.GetAddrOfLocalVar(&TaskTypeArg),
3110 KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>());
3111 const auto *KmpTaskTWithPrivatesQTyRD =
3112 cast<RecordDecl>(KmpTaskTWithPrivatesQTy->getAsTagDecl());
3113 auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin());
3114 Base = CGF.EmitLValueForField(Base, *FI);
3115 for (const auto *Field :
3116 cast<RecordDecl>(FI->getType()->getAsTagDecl())->fields()) {
3117 if (QualType::DestructionKind DtorKind =
3118 Field->getType().isDestructedType()) {
3119 LValue FieldLValue = CGF.EmitLValueForField(Base, Field);
3120 CGF.pushDestroy(DtorKind, FieldLValue.getAddress(), Field->getType());
3121 }
3122 }
3123 CGF.FinishFunction();
3124 return DestructorFn;
3125}
3126
3127/// Emit a privates mapping function for correct handling of private and
3128/// firstprivate variables.
3129/// \code
3130/// void .omp_task_privates_map.(const .privates. *noalias privs, <ty1>
3131/// **noalias priv1,..., <tyn> **noalias privn) {
3132/// *priv1 = &.privates.priv1;
3133/// ...;
3134/// *privn = &.privates.privn;
3135/// }
3136/// \endcode
3137static llvm::Value *
3139 const OMPTaskDataTy &Data, QualType PrivatesQTy,
3140 ArrayRef<PrivateDataTy> Privates) {
3141 ASTContext &C = CGM.getContext();
3142 FunctionArgList Args;
3143 ImplicitParamDecl TaskPrivatesArg(
3144 C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3145 C.getPointerType(PrivatesQTy).withConst().withRestrict(),
3147 Args.push_back(&TaskPrivatesArg);
3148 llvm::DenseMap<CanonicalDeclPtr<const VarDecl>, unsigned> PrivateVarsPos;
3149 unsigned Counter = 1;
3150 for (const Expr *E : Data.PrivateVars) {
3151 Args.push_back(ImplicitParamDecl::Create(
3152 C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3153 C.getPointerType(C.getPointerType(E->getType()))
3154 .withConst()
3155 .withRestrict(),
3157 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
3158 PrivateVarsPos[VD] = Counter;
3159 ++Counter;
3160 }
3161 for (const Expr *E : Data.FirstprivateVars) {
3162 Args.push_back(ImplicitParamDecl::Create(
3163 C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3164 C.getPointerType(C.getPointerType(E->getType()))
3165 .withConst()
3166 .withRestrict(),
3168 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
3169 PrivateVarsPos[VD] = Counter;
3170 ++Counter;
3171 }
3172 for (const Expr *E : Data.LastprivateVars) {
3173 Args.push_back(ImplicitParamDecl::Create(
3174 C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3175 C.getPointerType(C.getPointerType(E->getType()))
3176 .withConst()
3177 .withRestrict(),
3179 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
3180 PrivateVarsPos[VD] = Counter;
3181 ++Counter;
3182 }
3183 for (const VarDecl *VD : Data.PrivateLocals) {
3185 if (VD->getType()->isLValueReferenceType())
3186 Ty = C.getPointerType(Ty);
3187 if (isAllocatableDecl(VD))
3188 Ty = C.getPointerType(Ty);
3189 Args.push_back(ImplicitParamDecl::Create(
3190 C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3191 C.getPointerType(C.getPointerType(Ty)).withConst().withRestrict(),
3193 PrivateVarsPos[VD] = Counter;
3194 ++Counter;
3195 }
3196 const auto &TaskPrivatesMapFnInfo =
3197 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
3198 llvm::FunctionType *TaskPrivatesMapTy =
3199 CGM.getTypes().GetFunctionType(TaskPrivatesMapFnInfo);
3200 std::string Name =
3201 CGM.getOpenMPRuntime().getName({"omp_task_privates_map", ""});
3202 auto *TaskPrivatesMap = llvm::Function::Create(
3203 TaskPrivatesMapTy, llvm::GlobalValue::InternalLinkage, Name,
3204 &CGM.getModule());
3205 CGM.SetInternalFunctionAttributes(GlobalDecl(), TaskPrivatesMap,
3206 TaskPrivatesMapFnInfo);
3207 if (CGM.getLangOpts().Optimize) {
3208 TaskPrivatesMap->removeFnAttr(llvm::Attribute::NoInline);
3209 TaskPrivatesMap->removeFnAttr(llvm::Attribute::OptimizeNone);
3210 TaskPrivatesMap->addFnAttr(llvm::Attribute::AlwaysInline);
3211 }
3212 CodeGenFunction CGF(CGM);
3213 CGF.StartFunction(GlobalDecl(), C.VoidTy, TaskPrivatesMap,
3214 TaskPrivatesMapFnInfo, Args, Loc, Loc);
3215
3216 // *privi = &.privates.privi;
3218 CGF.GetAddrOfLocalVar(&TaskPrivatesArg),
3219 TaskPrivatesArg.getType()->castAs<PointerType>());
3220 const auto *PrivatesQTyRD = cast<RecordDecl>(PrivatesQTy->getAsTagDecl());
3221 Counter = 0;
3222 for (const FieldDecl *Field : PrivatesQTyRD->fields()) {
3223 LValue FieldLVal = CGF.EmitLValueForField(Base, Field);
3224 const VarDecl *VD = Args[PrivateVarsPos[Privates[Counter].second.Original]];
3225 LValue RefLVal =
3226 CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(VD), VD->getType());
3227 LValue RefLoadLVal = CGF.EmitLoadOfPointerLValue(
3228 RefLVal.getAddress(), RefLVal.getType()->castAs<PointerType>());
3229 CGF.EmitStoreOfScalar(FieldLVal.getPointer(CGF), RefLoadLVal);
3230 ++Counter;
3231 }
3232 CGF.FinishFunction();
3233 return TaskPrivatesMap;
3234}
3235
3236/// Emit initialization for private variables in task-based directives.
3239 Address KmpTaskSharedsPtr, LValue TDBase,
3240 const RecordDecl *KmpTaskTWithPrivatesQTyRD,
3241 QualType SharedsTy, QualType SharedsPtrTy,
3242 const OMPTaskDataTy &Data,
3243 ArrayRef<PrivateDataTy> Privates, bool ForDup) {
3244 ASTContext &C = CGF.getContext();
3245 auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin());
3246 LValue PrivatesBase = CGF.EmitLValueForField(TDBase, *FI);
3247 OpenMPDirectiveKind Kind = isOpenMPTaskLoopDirective(D.getDirectiveKind())
3248 ? OMPD_taskloop
3249 : OMPD_task;
3250 const CapturedStmt &CS = *D.getCapturedStmt(Kind);
3251 CodeGenFunction::CGCapturedStmtInfo CapturesInfo(CS);
3252 LValue SrcBase;
3253 bool IsTargetTask =
3254 isOpenMPTargetDataManagementDirective(D.getDirectiveKind()) ||
3255 isOpenMPTargetExecutionDirective(D.getDirectiveKind());
3256 // For target-based directives skip 4 firstprivate arrays BasePointersArray,
3257 // PointersArray, SizesArray, and MappersArray. The original variables for
3258 // these arrays are not captured and we get their addresses explicitly.
3259 if ((!IsTargetTask && !Data.FirstprivateVars.empty() && ForDup) ||
3260 (IsTargetTask && KmpTaskSharedsPtr.isValid())) {
3261 SrcBase = CGF.MakeAddrLValue(
3263 KmpTaskSharedsPtr, CGF.ConvertTypeForMem(SharedsPtrTy),
3264 CGF.ConvertTypeForMem(SharedsTy)),
3265 SharedsTy);
3266 }
3267 FI = cast<RecordDecl>(FI->getType()->getAsTagDecl())->field_begin();
3268 for (const PrivateDataTy &Pair : Privates) {
3269 // Do not initialize private locals.
3270 if (Pair.second.isLocalPrivate()) {
3271 ++FI;
3272 continue;
3273 }
3274 const VarDecl *VD = Pair.second.PrivateCopy;
3275 const Expr *Init = VD->getAnyInitializer();
3276 if (Init && (!ForDup || (isa<CXXConstructExpr>(Init) &&
3277 !CGF.isTrivialInitializer(Init)))) {
3278 LValue PrivateLValue = CGF.EmitLValueForField(PrivatesBase, *FI);
3279 if (const VarDecl *Elem = Pair.second.PrivateElemInit) {
3280 const VarDecl *OriginalVD = Pair.second.Original;
3281 // Check if the variable is the target-based BasePointersArray,
3282 // PointersArray, SizesArray, or MappersArray.
3283 LValue SharedRefLValue;
3284 QualType Type = PrivateLValue.getType();
3285 const FieldDecl *SharedField = CapturesInfo.lookup(OriginalVD);
3286 if (IsTargetTask && !SharedField) {
3287 assert(isa<ImplicitParamDecl>(OriginalVD) &&
3288 isa<CapturedDecl>(OriginalVD->getDeclContext()) &&
3289 cast<CapturedDecl>(OriginalVD->getDeclContext())
3290 ->getNumParams() == 0 &&
3291 isa<TranslationUnitDecl>(
3292 cast<CapturedDecl>(OriginalVD->getDeclContext())
3293 ->getDeclContext()) &&
3294 "Expected artificial target data variable.");
3295 SharedRefLValue =
3296 CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(OriginalVD), Type);
3297 } else if (ForDup) {
3298 SharedRefLValue = CGF.EmitLValueForField(SrcBase, SharedField);
3299 SharedRefLValue = CGF.MakeAddrLValue(
3300 SharedRefLValue.getAddress().withAlignment(
3301 C.getDeclAlign(OriginalVD)),
3302 SharedRefLValue.getType(), LValueBaseInfo(AlignmentSource::Decl),
3303 SharedRefLValue.getTBAAInfo());
3304 } else if (CGF.LambdaCaptureFields.count(
3305 Pair.second.Original->getCanonicalDecl()) > 0 ||
3306 isa_and_nonnull<BlockDecl>(CGF.CurCodeDecl)) {
3307 SharedRefLValue = CGF.EmitLValue(Pair.second.OriginalRef);
3308 } else {
3309 // Processing for implicitly captured variables.
3310 InlinedOpenMPRegionRAII Region(
3311 CGF, [](CodeGenFunction &, PrePostActionTy &) {}, OMPD_unknown,
3312 /*HasCancel=*/false, /*NoInheritance=*/true);
3313 SharedRefLValue = CGF.EmitLValue(Pair.second.OriginalRef);
3314 }
3315 if (Type->isArrayType()) {
3316 // Initialize firstprivate array.
3317 if (!isa<CXXConstructExpr>(Init) || CGF.isTrivialInitializer(Init)) {
3318 // Perform simple memcpy.
3319 CGF.EmitAggregateAssign(PrivateLValue, SharedRefLValue, Type);
3320 } else {
3321 // Initialize firstprivate array using element-by-element
3322 // initialization.
3324 PrivateLValue.getAddress(), SharedRefLValue.getAddress(), Type,
3325 [&CGF, Elem, Init, &CapturesInfo](Address DestElement,
3326 Address SrcElement) {
3327 // Clean up any temporaries needed by the initialization.
3328 CodeGenFunction::OMPPrivateScope InitScope(CGF);
3329 InitScope.addPrivate(Elem, SrcElement);
3330 (void)InitScope.Privatize();
3331 // Emit initialization for single element.
3332 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(
3333 CGF, &CapturesInfo);
3334 CGF.EmitAnyExprToMem(Init, DestElement,
3335 Init->getType().getQualifiers(),
3336 /*IsInitializer=*/false);
3337 });
3338 }
3339 } else {
3340 CodeGenFunction::OMPPrivateScope InitScope(CGF);
3341 InitScope.addPrivate(Elem, SharedRefLValue.getAddress());
3342 (void)InitScope.Privatize();
3343 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CapturesInfo);
3344 CGF.EmitExprAsInit(Init, VD, PrivateLValue,
3345 /*capturedByInit=*/false);
3346 }
3347 } else {
3348 CGF.EmitExprAsInit(Init, VD, PrivateLValue, /*capturedByInit=*/false);
3349 }
3350 }
3351 ++FI;
3352 }
3353}
3354
3355/// Check if duplication function is required for taskloops.
3357 ArrayRef<PrivateDataTy> Privates) {
3358 bool InitRequired = false;
3359 for (const PrivateDataTy &Pair : Privates) {
3360 if (Pair.second.isLocalPrivate())
3361 continue;
3362 const VarDecl *VD = Pair.second.PrivateCopy;
3363 const Expr *Init = VD->getAnyInitializer();
3364 InitRequired = InitRequired || (isa_and_nonnull<CXXConstructExpr>(Init) &&
3366 if (InitRequired)
3367 break;
3368 }
3369 return InitRequired;
3370}
3371
3372
3373/// Emit task_dup function (for initialization of
3374/// private/firstprivate/lastprivate vars and last_iter flag)
3375/// \code
3376/// void __task_dup_entry(kmp_task_t *task_dst, const kmp_task_t *task_src, int
3377/// lastpriv) {
3378/// // setup lastprivate flag
3379/// task_dst->last = lastpriv;
3380/// // could be constructor calls here...
3381/// }
3382/// \endcode
3383static llvm::Value *
3386 QualType KmpTaskTWithPrivatesPtrQTy,
3387 const RecordDecl *KmpTaskTWithPrivatesQTyRD,
3388 const RecordDecl *KmpTaskTQTyRD, QualType SharedsTy,
3389 QualType SharedsPtrTy, const OMPTaskDataTy &Data,
3390 ArrayRef<PrivateDataTy> Privates, bool WithLastIter) {
3391 ASTContext &C = CGM.getContext();
3392 FunctionArgList Args;
3393 ImplicitParamDecl DstArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3394 KmpTaskTWithPrivatesPtrQTy,
3396 ImplicitParamDecl SrcArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3397 KmpTaskTWithPrivatesPtrQTy,
3399 ImplicitParamDecl LastprivArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
3401 Args.push_back(&DstArg);
3402 Args.push_back(&SrcArg);
3403 Args.push_back(&LastprivArg);
3404 const auto &TaskDupFnInfo =
3405 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
3406 llvm::FunctionType *TaskDupTy = CGM.getTypes().GetFunctionType(TaskDupFnInfo);
3407 std::string Name = CGM.getOpenMPRuntime().getName({"omp_task_dup", ""});
3408 auto *TaskDup = llvm::Function::Create(
3409 TaskDupTy, llvm::GlobalValue::InternalLinkage, Name, &CGM.getModule());
3410 CGM.SetInternalFunctionAttributes(GlobalDecl(), TaskDup, TaskDupFnInfo);
3411 TaskDup->setDoesNotRecurse();
3412 CodeGenFunction CGF(CGM);
3413 CGF.StartFunction(GlobalDecl(), C.VoidTy, TaskDup, TaskDupFnInfo, Args, Loc,
3414 Loc);
3415
3416 LValue TDBase = CGF.EmitLoadOfPointerLValue(
3417 CGF.GetAddrOfLocalVar(&DstArg),
3418 KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>());
3419 // task_dst->liter = lastpriv;
3420 if (WithLastIter) {
3421 auto LIFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTLastIter);
3423 TDBase, *KmpTaskTWithPrivatesQTyRD->field_begin());
3424 LValue LILVal = CGF.EmitLValueForField(Base, *LIFI);
3425 llvm::Value *Lastpriv = CGF.EmitLoadOfScalar(
3426 CGF.GetAddrOfLocalVar(&LastprivArg), /*Volatile=*/false, C.IntTy, Loc);
3427 CGF.EmitStoreOfScalar(Lastpriv, LILVal);
3428 }
3429
3430 // Emit initial values for private copies (if any).
3431 assert(!Privates.empty());
3432 Address KmpTaskSharedsPtr = Address::invalid();
3433 if (!Data.FirstprivateVars.empty()) {
3434 LValue TDBase = CGF.EmitLoadOfPointerLValue(
3435 CGF.GetAddrOfLocalVar(&SrcArg),
3436 KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>());
3438 TDBase, *KmpTaskTWithPrivatesQTyRD->field_begin());
3439 KmpTaskSharedsPtr = Address(
3441 Base, *std::next(KmpTaskTQTyRD->field_begin(),
3442 KmpTaskTShareds)),
3443 Loc),
3444 CGF.Int8Ty, CGM.getNaturalTypeAlignment(SharedsTy));
3445 }
3446 emitPrivatesInit(CGF, D, KmpTaskSharedsPtr, TDBase, KmpTaskTWithPrivatesQTyRD,
3447 SharedsTy, SharedsPtrTy, Data, Privates, /*ForDup=*/true);
3448 CGF.FinishFunction();
3449 return TaskDup;
3450}
3451
3452/// Checks if destructor function is required to be generated.
3453/// \return true if cleanups are required, false otherwise.
3454static bool
3455checkDestructorsRequired(const RecordDecl *KmpTaskTWithPrivatesQTyRD,
3456 ArrayRef<PrivateDataTy> Privates) {
3457 for (const PrivateDataTy &P : Privates) {
3458 if (P.second.isLocalPrivate())
3459 continue;
3460 QualType Ty = P.second.Original->getType().getNonReferenceType();
3461 if (Ty.isDestructedType())
3462 return true;
3463 }
3464 return false;
3465}
3466
3467namespace {
3468/// Loop generator for OpenMP iterator expression.
3469class OMPIteratorGeneratorScope final
3470 : public CodeGenFunction::OMPPrivateScope {
3471 CodeGenFunction &CGF;
3472 const OMPIteratorExpr *E = nullptr;
3475 OMPIteratorGeneratorScope() = delete;
3476 OMPIteratorGeneratorScope(OMPIteratorGeneratorScope &) = delete;
3477
3478public:
3479 OMPIteratorGeneratorScope(CodeGenFunction &CGF, const OMPIteratorExpr *E)
3480 : CodeGenFunction::OMPPrivateScope(CGF), CGF(CGF), E(E) {
3481 if (!E)
3482 return;
3484 for (unsigned I = 0, End = E->numOfIterators(); I < End; ++I) {
3485 Uppers.push_back(CGF.EmitScalarExpr(E->getHelper(I).Upper));
3486 const auto *VD = cast<VarDecl>(E->getIteratorDecl(I));
3487 addPrivate(VD, CGF.CreateMemTemp(VD->getType(), VD->getName()));
3488 const OMPIteratorHelperData &HelperData = E->getHelper(I);
3489 addPrivate(
3490 HelperData.CounterVD,
3491 CGF.CreateMemTemp(HelperData.CounterVD->getType(), "counter.addr"));
3492 }
3493 Privatize();
3494
3495 for (unsigned I = 0, End = E->numOfIterators(); I < End; ++I) {
3496 const OMPIteratorHelperData &HelperData = E->getHelper(I);
3497 LValue CLVal =
3498 CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(HelperData.CounterVD),
3499 HelperData.CounterVD->getType());
3500 // Counter = 0;
3502 llvm::ConstantInt::get(CLVal.getAddress().getElementType(), 0),
3503 CLVal);
3504 CodeGenFunction::JumpDest &ContDest =
3505 ContDests.emplace_back(CGF.getJumpDestInCurrentScope("iter.cont"));
3506 CodeGenFunction::JumpDest &ExitDest =
3507 ExitDests.emplace_back(CGF.getJumpDestInCurrentScope("iter.exit"));
3508 // N = <number-of_iterations>;
3509 llvm::Value *N = Uppers[I];
3510 // cont:
3511 // if (Counter < N) goto body; else goto exit;
3512 CGF.EmitBlock(ContDest.getBlock());
3513 auto *CVal =
3514 CGF.EmitLoadOfScalar(CLVal, HelperData.CounterVD->getLocation());
3515 llvm::Value *Cmp =
3517 ? CGF.Builder.CreateICmpSLT(CVal, N)
3518 : CGF.Builder.CreateICmpULT(CVal, N);
3519 llvm::BasicBlock *BodyBB = CGF.createBasicBlock("iter.body");
3520 CGF.Builder.CreateCondBr(Cmp, BodyBB, ExitDest.getBlock());
3521 // body:
3522 CGF.EmitBlock(BodyBB);
3523 // Iteri = Begini + Counter * Stepi;
3524 CGF.EmitIgnoredExpr(HelperData.Update);
3525 }
3526 }
3527 ~OMPIteratorGeneratorScope() {
3528 if (!E)
3529 return;
3530 for (unsigned I = E->numOfIterators(); I > 0; --I) {
3531 // Counter = Counter + 1;
3532 const OMPIteratorHelperData &HelperData = E->getHelper(I - 1);
3533 CGF.EmitIgnoredExpr(HelperData.CounterUpdate);
3534 // goto cont;
3535 CGF.EmitBranchThroughCleanup(ContDests[I - 1]);
3536 // exit:
3537 CGF.EmitBlock(ExitDests[I - 1].getBlock(), /*IsFinished=*/I == 1);
3538 }
3539 }
3540};
3541} // namespace
3542
3543static std::pair<llvm::Value *, llvm::Value *>
3545 const auto *OASE = dyn_cast<OMPArrayShapingExpr>(E);
3546 llvm::Value *Addr;
3547 if (OASE) {
3548 const Expr *Base = OASE->getBase();
3549 Addr = CGF.EmitScalarExpr(Base);
3550 } else {
3551 Addr = CGF.EmitLValue(E).getPointer(CGF);
3552 }
3553 llvm::Value *SizeVal;
3554 QualType Ty = E->getType();
3555 if (OASE) {
3556 SizeVal = CGF.getTypeSize(OASE->getBase()->getType()->getPointeeType());
3557 for (const Expr *SE : OASE->getDimensions()) {
3558 llvm::Value *Sz = CGF.EmitScalarExpr(SE);
3559 Sz = CGF.EmitScalarConversion(
3560 Sz, SE->getType(), CGF.getContext().getSizeType(), SE->getExprLoc());
3561 SizeVal = CGF.Builder.CreateNUWMul(SizeVal, Sz);
3562 }
3563 } else if (const auto *ASE =
3564 dyn_cast<ArraySectionExpr>(E->IgnoreParenImpCasts())) {
3565 LValue UpAddrLVal = CGF.EmitArraySectionExpr(ASE, /*IsLowerBound=*/false);
3566 Address UpAddrAddress = UpAddrLVal.getAddress();
3567 llvm::Value *UpAddr = CGF.Builder.CreateConstGEP1_32(
3568 UpAddrAddress.getElementType(), UpAddrAddress.emitRawPointer(CGF),
3569 /*Idx0=*/1);
3570 llvm::Value *LowIntPtr = CGF.Builder.CreatePtrToInt(Addr, CGF.SizeTy);
3571 llvm::Value *UpIntPtr = CGF.Builder.CreatePtrToInt(UpAddr, CGF.SizeTy);
3572 SizeVal = CGF.Builder.CreateNUWSub(UpIntPtr, LowIntPtr);
3573 } else {
3574 SizeVal = CGF.getTypeSize(Ty);
3575 }
3576 return std::make_pair(Addr, SizeVal);
3577}
3578
3579/// Builds kmp_depend_info, if it is not built yet, and builds flags type.
3580static void getKmpAffinityType(ASTContext &C, QualType &KmpTaskAffinityInfoTy) {
3581 QualType FlagsTy = C.getIntTypeForBitwidth(32, /*Signed=*/false);
3582 if (KmpTaskAffinityInfoTy.isNull()) {
3583 RecordDecl *KmpAffinityInfoRD =
3584 C.buildImplicitRecord("kmp_task_affinity_info_t");
3585 KmpAffinityInfoRD->startDefinition();
3586 addFieldToRecordDecl(C, KmpAffinityInfoRD, C.getIntPtrType());
3587 addFieldToRecordDecl(C, KmpAffinityInfoRD, C.getSizeType());
3588 addFieldToRecordDecl(C, KmpAffinityInfoRD, FlagsTy);
3589 KmpAffinityInfoRD->completeDefinition();
3590 KmpTaskAffinityInfoTy = C.getRecordType(KmpAffinityInfoRD);
3591 }
3592}
3593
3597 llvm::Function *TaskFunction, QualType SharedsTy,
3598 Address Shareds, const OMPTaskDataTy &Data) {
3601 // Aggregate privates and sort them by the alignment.
3602 const auto *I = Data.PrivateCopies.begin();
3603 for (const Expr *E : Data.PrivateVars) {
3604 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
3605 Privates.emplace_back(
3606 C.getDeclAlign(VD),
3607 PrivateHelpersTy(E, VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()),
3608 /*PrivateElemInit=*/nullptr));
3609 ++I;
3610 }
3611 I = Data.FirstprivateCopies.begin();
3612 const auto *IElemInitRef = Data.FirstprivateInits.begin();
3613 for (const Expr *E : Data.FirstprivateVars) {
3614 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
3615 Privates.emplace_back(
3616 C.getDeclAlign(VD),
3617 PrivateHelpersTy(
3618 E, VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()),
3619 cast<VarDecl>(cast<DeclRefExpr>(*IElemInitRef)->getDecl())));
3620 ++I;
3621 ++IElemInitRef;
3622 }
3623 I = Data.LastprivateCopies.begin();
3624 for (const Expr *E : Data.LastprivateVars) {
3625 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
3626 Privates.emplace_back(
3627 C.getDeclAlign(VD),
3628 PrivateHelpersTy(E, VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()),
3629 /*PrivateElemInit=*/nullptr));
3630 ++I;
3631 }
3632 for (const VarDecl *VD : Data.PrivateLocals) {
3633 if (isAllocatableDecl(VD))
3634 Privates.emplace_back(CGM.getPointerAlign(), PrivateHelpersTy(VD));
3635 else
3636 Privates.emplace_back(C.getDeclAlign(VD), PrivateHelpersTy(VD));
3637 }
3638 llvm::stable_sort(Privates,
3639 [](const PrivateDataTy &L, const PrivateDataTy &R) {
3640 return L.first > R.first;
3641 });
3642 QualType KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
3643 // Build type kmp_routine_entry_t (if not built yet).
3644 emitKmpRoutineEntryT(KmpInt32Ty);
3645 // Build type kmp_task_t (if not built yet).
3646 if (isOpenMPTaskLoopDirective(D.getDirectiveKind())) {
3649 CGM, D.getDirectiveKind(), KmpInt32Ty, KmpRoutineEntryPtrQTy));
3650 }
3652 } else {
3653 assert((D.getDirectiveKind() == OMPD_task ||
3654 isOpenMPTargetExecutionDirective(D.getDirectiveKind()) ||
3655 isOpenMPTargetDataManagementDirective(D.getDirectiveKind())) &&
3656 "Expected taskloop, task or target directive");
3657 if (SavedKmpTaskTQTy.isNull()) {
3659 CGM, D.getDirectiveKind(), KmpInt32Ty, KmpRoutineEntryPtrQTy));
3660 }
3662 }
3663 const auto *KmpTaskTQTyRD = cast<RecordDecl>(KmpTaskTQTy->getAsTagDecl());
3664 // Build particular struct kmp_task_t for the given task.
3665 const RecordDecl *KmpTaskTWithPrivatesQTyRD =
3667 QualType KmpTaskTWithPrivatesQTy = C.getRecordType(KmpTaskTWithPrivatesQTyRD);
3668 QualType KmpTaskTWithPrivatesPtrQTy =
3669 C.getPointerType(KmpTaskTWithPrivatesQTy);
3670 llvm::Type *KmpTaskTWithPrivatesPtrTy = CGF.Builder.getPtrTy(0);
3671 llvm::Value *KmpTaskTWithPrivatesTySize =
3672 CGF.getTypeSize(KmpTaskTWithPrivatesQTy);
3673 QualType SharedsPtrTy = C.getPointerType(SharedsTy);
3674
3675 // Emit initial values for private copies (if any).
3676 llvm::Value *TaskPrivatesMap = nullptr;
3677 llvm::Type *TaskPrivatesMapTy =
3678 std::next(TaskFunction->arg_begin(), 3)->getType();
3679 if (!Privates.empty()) {
3680 auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin());
3681 TaskPrivatesMap =
3682 emitTaskPrivateMappingFunction(CGM, Loc, Data, FI->getType(), Privates);
3683 TaskPrivatesMap = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3684 TaskPrivatesMap, TaskPrivatesMapTy);
3685 } else {
3686 TaskPrivatesMap = llvm::ConstantPointerNull::get(
3687 cast<llvm::PointerType>(TaskPrivatesMapTy));
3688 }
3689 // Build a proxy function kmp_int32 .omp_task_entry.(kmp_int32 gtid,
3690 // kmp_task_t *tt);
3691 llvm::Function *TaskEntry = emitProxyTaskFunction(
3692 CGM, Loc, D.getDirectiveKind(), KmpInt32Ty, KmpTaskTWithPrivatesPtrQTy,
3693 KmpTaskTWithPrivatesQTy, KmpTaskTQTy, SharedsPtrTy, TaskFunction,
3694 TaskPrivatesMap);
3695
3696 // Build call kmp_task_t * __kmpc_omp_task_alloc(ident_t *, kmp_int32 gtid,
3697 // kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
3698 // kmp_routine_entry_t *task_entry);
3699 // Task flags. Format is taken from
3700 // https://github.com/llvm/llvm-project/blob/main/openmp/runtime/src/kmp.h,
3701 // description of kmp_tasking_flags struct.
3702 enum {
3703 TiedFlag = 0x1,
3704 FinalFlag = 0x2,
3705 DestructorsFlag = 0x8,
3706 PriorityFlag = 0x20,
3707 DetachableFlag = 0x40,
3708 };
3709 unsigned Flags = Data.Tied ? TiedFlag : 0;
3710 bool NeedsCleanup = false;
3711 if (!Privates.empty()) {
3712 NeedsCleanup =
3713 checkDestructorsRequired(KmpTaskTWithPrivatesQTyRD, Privates);
3714 if (NeedsCleanup)
3715 Flags = Flags | DestructorsFlag;
3716 }
3717 if (Data.Priority.getInt())
3718 Flags = Flags | PriorityFlag;
3719 if (D.hasClausesOfKind<OMPDetachClause>())
3720 Flags = Flags | DetachableFlag;
3721 llvm::Value *TaskFlags =
3722 Data.Final.getPointer()
3723 ? CGF.Builder.CreateSelect(Data.Final.getPointer(),
3724 CGF.Builder.getInt32(FinalFlag),
3725 CGF.Builder.getInt32(/*C=*/0))
3726 : CGF.Builder.getInt32(Data.Final.getInt() ? FinalFlag : 0);
3727 TaskFlags = CGF.Builder.CreateOr(TaskFlags, CGF.Builder.getInt32(Flags));
3728 llvm::Value *SharedsSize = CGM.getSize(C.getTypeSizeInChars(SharedsTy));
3730 getThreadID(CGF, Loc), TaskFlags, KmpTaskTWithPrivatesTySize,
3732 TaskEntry, KmpRoutineEntryPtrTy)};
3733 llvm::Value *NewTask;
3734 if (D.hasClausesOfKind<OMPNowaitClause>()) {
3735 // Check if we have any device clause associated with the directive.
3736 const Expr *Device = nullptr;
3737 if (auto *C = D.getSingleClause<OMPDeviceClause>())
3738 Device = C->getDevice();
3739 // Emit device ID if any otherwise use default value.
3740 llvm::Value *DeviceID;
3741 if (Device)
3742 DeviceID = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(Device),
3743 CGF.Int64Ty, /*isSigned=*/true);
3744 else
3745 DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF);
3746 AllocArgs.push_back(DeviceID);
3747 NewTask = CGF.EmitRuntimeCall(
3748 OMPBuilder.getOrCreateRuntimeFunction(
3749 CGM.getModule(), OMPRTL___kmpc_omp_target_task_alloc),
3750 AllocArgs);
3751 } else {
3752 NewTask =
3753 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
3754 CGM.getModule(), OMPRTL___kmpc_omp_task_alloc),
3755 AllocArgs);
3756 }
3757 // Emit detach clause initialization.
3758 // evt = (typeof(evt))__kmpc_task_allow_completion_event(loc, tid,
3759 // task_descriptor);
3760 if (const auto *DC = D.getSingleClause<OMPDetachClause>()) {
3761 const Expr *Evt = DC->getEventHandler()->IgnoreParenImpCasts();
3762 LValue EvtLVal = CGF.EmitLValue(Evt);
3763
3764 // Build kmp_event_t *__kmpc_task_allow_completion_event(ident_t *loc_ref,
3765 // int gtid, kmp_task_t *task);
3766 llvm::Value *Loc = emitUpdateLocation(CGF, DC->getBeginLoc());
3767 llvm::Value *Tid = getThreadID(CGF, DC->getBeginLoc());
3768 Tid = CGF.Builder.CreateIntCast(Tid, CGF.IntTy, /*isSigned=*/false);
3769 llvm::Value *EvtVal = CGF.EmitRuntimeCall(
3770 OMPBuilder.getOrCreateRuntimeFunction(
3771 CGM.getModule(), OMPRTL___kmpc_task_allow_completion_event),
3772 {Loc, Tid, NewTask});
3773 EvtVal = CGF.EmitScalarConversion(EvtVal, C.VoidPtrTy, Evt->getType(),
3774 Evt->getExprLoc());
3775 CGF.EmitStoreOfScalar(EvtVal, EvtLVal);
3776 }
3777 // Process affinity clauses.
3778 if (D.hasClausesOfKind<OMPAffinityClause>()) {
3779 // Process list of affinity data.
3781 Address AffinitiesArray = Address::invalid();
3782 // Calculate number of elements to form the array of affinity data.
3783 llvm::Value *NumOfElements = nullptr;
3784 unsigned NumAffinities = 0;
3785 for (const auto *C : D.getClausesOfKind<OMPAffinityClause>()) {
3786 if (const Expr *Modifier = C->getModifier()) {
3787 const auto *IE = cast<OMPIteratorExpr>(Modifier->IgnoreParenImpCasts());
3788 for (unsigned I = 0, E = IE->numOfIterators(); I < E; ++I) {
3789 llvm::Value *Sz = CGF.EmitScalarExpr(IE->getHelper(I).Upper);
3790 Sz = CGF.Builder.CreateIntCast(Sz, CGF.SizeTy, /*isSigned=*/false);
3791 NumOfElements =
3792 NumOfElements ? CGF.Builder.CreateNUWMul(NumOfElements, Sz) : Sz;
3793 }
3794 } else {
3795 NumAffinities += C->varlist_size();
3796 }
3797 }
3799 // Fields ids in kmp_task_affinity_info record.
3800 enum RTLAffinityInfoFieldsTy { BaseAddr, Len, Flags };
3801
3802 QualType KmpTaskAffinityInfoArrayTy;
3803 if (NumOfElements) {
3804 NumOfElements = CGF.Builder.CreateNUWAdd(
3805 llvm::ConstantInt::get(CGF.SizeTy, NumAffinities), NumOfElements);
3806 auto *OVE = new (C) OpaqueValueExpr(
3807 Loc,
3808 C.getIntTypeForBitwidth(C.getTypeSize(C.getSizeType()), /*Signed=*/0),
3809 VK_PRValue);
3810 CodeGenFunction::OpaqueValueMapping OpaqueMap(CGF, OVE,
3811 RValue::get(NumOfElements));
3812 KmpTaskAffinityInfoArrayTy = C.getVariableArrayType(
3814 /*IndexTypeQuals=*/0, SourceRange(Loc, Loc));
3815 // Properly emit variable-sized array.
3816 auto *PD = ImplicitParamDecl::Create(C, KmpTaskAffinityInfoArrayTy,
3818 CGF.EmitVarDecl(*PD);
3819 AffinitiesArray = CGF.GetAddrOfLocalVar(PD);
3820 NumOfElements = CGF.Builder.CreateIntCast(NumOfElements, CGF.Int32Ty,
3821 /*isSigned=*/false);
3822 } else {
3823 KmpTaskAffinityInfoArrayTy = C.getConstantArrayType(
3825 llvm::APInt(C.getTypeSize(C.getSizeType()), NumAffinities), nullptr,
3826 ArraySizeModifier::Normal, /*IndexTypeQuals=*/0);
3827 AffinitiesArray =
3828 CGF.CreateMemTemp(KmpTaskAffinityInfoArrayTy, ".affs.arr.addr");
3829 AffinitiesArray = CGF.Builder.CreateConstArrayGEP(AffinitiesArray, 0);
3830 NumOfElements = llvm::ConstantInt::get(CGM.Int32Ty, NumAffinities,
3831 /*isSigned=*/false);
3832 }
3833
3834 const auto *KmpAffinityInfoRD = KmpTaskAffinityInfoTy->getAsRecordDecl();
3835 // Fill array by elements without iterators.
3836 unsigned Pos = 0;
3837 bool HasIterator = false;
3838 for (const auto *C : D.getClausesOfKind<OMPAffinityClause>()) {
3839 if (C->getModifier()) {
3840 HasIterator = true;
3841 continue;
3842 }
3843 for (const Expr *E : C->varlist()) {
3844 llvm::Value *Addr;
3845 llvm::Value *Size;
3846 std::tie(Addr, Size) = getPointerAndSize(CGF, E);
3847 LValue Base =
3848 CGF.MakeAddrLValue(CGF.Builder.CreateConstGEP(AffinitiesArray, Pos),
3850 // affs[i].base_addr = &<Affinities[i].second>;
3851 LValue BaseAddrLVal = CGF.EmitLValueForField(
3852 Base, *std::next(KmpAffinityInfoRD->field_begin(), BaseAddr));
3853 CGF.EmitStoreOfScalar(CGF.Builder.CreatePtrToInt(Addr, CGF.IntPtrTy),
3854 BaseAddrLVal);
3855 // affs[i].len = sizeof(<Affinities[i].second>);
3856 LValue LenLVal = CGF.EmitLValueForField(
3857 Base, *std::next(KmpAffinityInfoRD->field_begin(), Len));
3858 CGF.EmitStoreOfScalar(Size, LenLVal);
3859 ++Pos;
3860 }
3861 }
3862 LValue PosLVal;
3863 if (HasIterator) {
3864 PosLVal = CGF.MakeAddrLValue(
3865 CGF.CreateMemTemp(C.getSizeType(), "affs.counter.addr"),
3866 C.getSizeType());
3867 CGF.EmitStoreOfScalar(llvm::ConstantInt::get(CGF.SizeTy, Pos), PosLVal);
3868 }
3869 // Process elements with iterators.
3870 for (const auto *C : D.getClausesOfKind<OMPAffinityClause>()) {
3871 const Expr *Modifier = C->getModifier();
3872 if (!Modifier)
3873 continue;
3874 OMPIteratorGeneratorScope IteratorScope(
3875 CGF, cast_or_null<OMPIteratorExpr>(Modifier->IgnoreParenImpCasts()));
3876 for (const Expr *E : C->varlist()) {
3877 llvm::Value *Addr;
3878 llvm::Value *Size;
3879 std::tie(Addr, Size) = getPointerAndSize(CGF, E);
3880 llvm::Value *Idx = CGF.EmitLoadOfScalar(PosLVal, E->getExprLoc());
3881 LValue Base =
3882 CGF.MakeAddrLValue(CGF.Builder.CreateGEP(CGF, AffinitiesArray, Idx),
3884 // affs[i].base_addr = &<Affinities[i].second>;
3885 LValue BaseAddrLVal = CGF.EmitLValueForField(
3886 Base, *std::next(KmpAffinityInfoRD->field_begin(), BaseAddr));
3887 CGF.EmitStoreOfScalar(CGF.Builder.CreatePtrToInt(Addr, CGF.IntPtrTy),
3888 BaseAddrLVal);
3889 // affs[i].len = sizeof(<Affinities[i].second>);
3890 LValue LenLVal = CGF.EmitLValueForField(
3891 Base, *std::next(KmpAffinityInfoRD->field_begin(), Len));
3892 CGF.EmitStoreOfScalar(Size, LenLVal);
3893 Idx = CGF.Builder.CreateNUWAdd(
3894 Idx, llvm::ConstantInt::get(Idx->getType(), 1));
3895 CGF.EmitStoreOfScalar(Idx, PosLVal);
3896 }
3897 }
3898 // Call to kmp_int32 __kmpc_omp_reg_task_with_affinity(ident_t *loc_ref,
3899 // kmp_int32 gtid, kmp_task_t *new_task, kmp_int32
3900 // naffins, kmp_task_affinity_info_t *affin_list);
3901 llvm::Value *LocRef = emitUpdateLocation(CGF, Loc);
3902 llvm::Value *GTid = getThreadID(CGF, Loc);
3903 llvm::Value *AffinListPtr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3904 AffinitiesArray.emitRawPointer(CGF), CGM.VoidPtrTy);
3905 // FIXME: Emit the function and ignore its result for now unless the
3906 // runtime function is properly implemented.
3907 (void)CGF.EmitRuntimeCall(
3908 OMPBuilder.getOrCreateRuntimeFunction(
3909 CGM.getModule(), OMPRTL___kmpc_omp_reg_task_with_affinity),
3910 {LocRef, GTid, NewTask, NumOfElements, AffinListPtr});
3911 }
3912 llvm::Value *NewTaskNewTaskTTy =
3914 NewTask, KmpTaskTWithPrivatesPtrTy);
3915 LValue Base = CGF.MakeNaturalAlignRawAddrLValue(NewTaskNewTaskTTy,
3916 KmpTaskTWithPrivatesQTy);
3917 LValue TDBase =
3918 CGF.EmitLValueForField(Base, *KmpTaskTWithPrivatesQTyRD->field_begin());
3919 // Fill the data in the resulting kmp_task_t record.
3920 // Copy shareds if there are any.
3921 Address KmpTaskSharedsPtr = Address::invalid();
3922 if (!SharedsTy->getAsStructureType()->getDecl()->field_empty()) {
3923 KmpTaskSharedsPtr = Address(
3924 CGF.EmitLoadOfScalar(
3926 TDBase,
3927 *std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTShareds)),
3928 Loc),
3929 CGF.Int8Ty, CGM.getNaturalTypeAlignment(SharedsTy));
3930 LValue Dest = CGF.MakeAddrLValue(KmpTaskSharedsPtr, SharedsTy);
3931 LValue Src = CGF.MakeAddrLValue(Shareds, SharedsTy);
3932 CGF.EmitAggregateCopy(Dest, Src, SharedsTy, AggValueSlot::DoesNotOverlap);
3933 }
3934 // Emit initial values for private copies (if any).
3936 if (!Privates.empty()) {
3937 emitPrivatesInit(CGF, D, KmpTaskSharedsPtr, Base, KmpTaskTWithPrivatesQTyRD,
3938 SharedsTy, SharedsPtrTy, Data, Privates,
3939 /*ForDup=*/false);
3940 if (isOpenMPTaskLoopDirective(D.getDirectiveKind()) &&
3941 (!Data.LastprivateVars.empty() || checkInitIsRequired(CGF, Privates))) {
3942 Result.TaskDupFn = emitTaskDupFunction(
3943 CGM, Loc, D, KmpTaskTWithPrivatesPtrQTy, KmpTaskTWithPrivatesQTyRD,
3944 KmpTaskTQTyRD, SharedsTy, SharedsPtrTy, Data, Privates,
3945 /*WithLastIter=*/!Data.LastprivateVars.empty());
3946 }
3947 }
3948 // Fields of union "kmp_cmplrdata_t" for destructors and priority.
3949 enum { Priority = 0, Destructors = 1 };
3950 // Provide pointer to function with destructors for privates.
3951 auto FI = std::next(KmpTaskTQTyRD->field_begin(), Data1);
3952 const RecordDecl *KmpCmplrdataUD =
3953 (*FI)->getType()->getAsUnionType()->getDecl();
3954 if (NeedsCleanup) {
3955 llvm::Value *DestructorFn = emitDestructorsFunction(
3956 CGM, Loc, KmpInt32Ty, KmpTaskTWithPrivatesPtrQTy,
3957 KmpTaskTWithPrivatesQTy);
3958 LValue Data1LV = CGF.EmitLValueForField(TDBase, *FI);
3959 LValue DestructorsLV = CGF.EmitLValueForField(
3960 Data1LV, *std::next(KmpCmplrdataUD->field_begin(), Destructors));
3962 DestructorFn, KmpRoutineEntryPtrTy),
3963 DestructorsLV);
3964 }
3965 // Set priority.
3966 if (Data.Priority.getInt()) {
3967 LValue Data2LV = CGF.EmitLValueForField(
3968 TDBase, *std::next(KmpTaskTQTyRD->field_begin(), Data2));
3969 LValue PriorityLV = CGF.EmitLValueForField(
3970 Data2LV, *std::next(KmpCmplrdataUD->field_begin(), Priority));
3971 CGF.EmitStoreOfScalar(Data.Priority.getPointer(), PriorityLV);
3972 }
3973 Result.NewTask = NewTask;
3974 Result.TaskEntry = TaskEntry;
3975 Result.NewTaskNewTaskTTy = NewTaskNewTaskTTy;
3976 Result.TDBase = TDBase;
3977 Result.KmpTaskTQTyRD = KmpTaskTQTyRD;
3978 return Result;
3979}
3980
3981/// Translates internal dependency kind into the runtime kind.
3983 RTLDependenceKindTy DepKind;
3984 switch (K) {
3985 case OMPC_DEPEND_in:
3986 DepKind = RTLDependenceKindTy::DepIn;
3987 break;
3988 // Out and InOut dependencies must use the same code.
3989 case OMPC_DEPEND_out:
3990 case OMPC_DEPEND_inout:
3991 DepKind = RTLDependenceKindTy::DepInOut;
3992 break;
3993 case OMPC_DEPEND_mutexinoutset:
3994 DepKind = RTLDependenceKindTy::DepMutexInOutSet;
3995 break;
3996 case OMPC_DEPEND_inoutset:
3997 DepKind = RTLDependenceKindTy::DepInOutSet;
3998 break;
3999 case OMPC_DEPEND_outallmemory:
4000 DepKind = RTLDependenceKindTy::DepOmpAllMem;
4001 break;
4002 case OMPC_DEPEND_source:
4003 case OMPC_DEPEND_sink:
4004 case OMPC_DEPEND_depobj:
4005 case OMPC_DEPEND_inoutallmemory:
4007 llvm_unreachable("Unknown task dependence type");
4008 }
4009 return DepKind;
4010}
4011
4012/// Builds kmp_depend_info, if it is not built yet, and builds flags type.
4013static void getDependTypes(ASTContext &C, QualType &KmpDependInfoTy,
4014 QualType &FlagsTy) {
4015 FlagsTy = C.getIntTypeForBitwidth(C.getTypeSize(C.BoolTy), /*Signed=*/false);
4016 if (KmpDependInfoTy.isNull()) {
4017 RecordDecl *KmpDependInfoRD = C.buildImplicitRecord("kmp_depend_info");
4018 KmpDependInfoRD->startDefinition();
4019 addFieldToRecordDecl(C, KmpDependInfoRD, C.getIntPtrType());
4020 addFieldToRecordDecl(C, KmpDependInfoRD, C.getSizeType());
4021 addFieldToRecordDecl(C, KmpDependInfoRD, FlagsTy);
4022 KmpDependInfoRD->completeDefinition();
4023 KmpDependInfoTy = C.getRecordType(KmpDependInfoRD);
4024 }
4025}
4026
4027std::pair<llvm::Value *, LValue>
4031 QualType FlagsTy;
4032 getDependTypes(C, KmpDependInfoTy, FlagsTy);
4033 RecordDecl *KmpDependInfoRD =
4034 cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
4035 QualType KmpDependInfoPtrTy = C.getPointerType(KmpDependInfoTy);
4037 DepobjLVal.getAddress().withElementType(
4038 CGF.ConvertTypeForMem(KmpDependInfoPtrTy)),
4039 KmpDependInfoPtrTy->castAs<PointerType>());
4040 Address DepObjAddr = CGF.Builder.CreateGEP(
4041 CGF, Base.getAddress(),
4042 llvm::ConstantInt::get(CGF.IntPtrTy, -1, /*isSigned=*/true));
4043 LValue NumDepsBase = CGF.MakeAddrLValue(
4044 DepObjAddr, KmpDependInfoTy, Base.getBaseInfo(), Base.getTBAAInfo());
4045 // NumDeps = deps[i].base_addr;
4046 LValue BaseAddrLVal = CGF.EmitLValueForField(
4047 NumDepsBase,
4048 *std::next(KmpDependInfoRD->field_begin(),
4049 static_cast<unsigned int>(RTLDependInfoFields::BaseAddr)));
4050 llvm::Value *NumDeps = CGF.EmitLoadOfScalar(BaseAddrLVal, Loc);
4051 return std::make_pair(NumDeps, Base);
4052}
4053
4054static void emitDependData(CodeGenFunction &CGF, QualType &KmpDependInfoTy,
4055 llvm::PointerUnion<unsigned *, LValue *> Pos,
4057 Address DependenciesArray) {
4058 CodeGenModule &CGM = CGF.CGM;
4059 ASTContext &C = CGM.getContext();
4060 QualType FlagsTy;
4061 getDependTypes(C, KmpDependInfoTy, FlagsTy);
4062 RecordDecl *KmpDependInfoRD =
4063 cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
4064 llvm::Type *LLVMFlagsTy = CGF.ConvertTypeForMem(FlagsTy);
4065
4066 OMPIteratorGeneratorScope IteratorScope(
4067 CGF, cast_or_null<OMPIteratorExpr>(
4068 Data.IteratorExpr ? Data.IteratorExpr->IgnoreParenImpCasts()
4069 : nullptr));
4070 for (const Expr *E : Data.DepExprs) {
4071 llvm::Value *Addr;
4072 llvm::Value *Size;
4073
4074 // The expression will be a nullptr in the 'omp_all_memory' case.
4075 if (E) {
4076 std::tie(Addr, Size) = getPointerAndSize(CGF, E);
4077 Addr = CGF.Builder.CreatePtrToInt(Addr, CGF.IntPtrTy);
4078 } else {
4079 Addr = llvm::ConstantInt::get(CGF.IntPtrTy, 0);
4080 Size = llvm::ConstantInt::get(CGF.SizeTy, 0);
4081 }
4082 LValue Base;
4083 if (unsigned *P = Pos.dyn_cast<unsigned *>()) {
4084 Base = CGF.MakeAddrLValue(
4085 CGF.Builder.CreateConstGEP(DependenciesArray, *P), KmpDependInfoTy);
4086 } else {
4087 assert(E && "Expected a non-null expression");
4088 LValue &PosLVal = *cast<LValue *>(Pos);
4089 llvm::Value *Idx = CGF.EmitLoadOfScalar(PosLVal, E->getExprLoc());
4090 Base = CGF.MakeAddrLValue(
4091 CGF.Builder.CreateGEP(CGF, DependenciesArray, Idx), KmpDependInfoTy);
4092 }
4093 // deps[i].base_addr = &<Dependencies[i].second>;
4094 LValue BaseAddrLVal = CGF.EmitLValueForField(
4095 Base,
4096 *std::next(KmpDependInfoRD->field_begin(),
4097 static_cast<unsigned int>(RTLDependInfoFields::BaseAddr)));
4098 CGF.EmitStoreOfScalar(Addr, BaseAddrLVal);
4099 // deps[i].len = sizeof(<Dependencies[i].second>);
4100 LValue LenLVal = CGF.EmitLValueForField(
4101 Base, *std::next(KmpDependInfoRD->field_begin(),
4102 static_cast<unsigned int>(RTLDependInfoFields::Len)));
4103 CGF.EmitStoreOfScalar(Size, LenLVal);
4104 // deps[i].flags = <Dependencies[i].first>;
4105 RTLDependenceKindTy DepKind = translateDependencyKind(Data.DepKind);
4106 LValue FlagsLVal = CGF.EmitLValueForField(
4107 Base,
4108 *std::next(KmpDependInfoRD->field_begin(),
4109 static_cast<unsigned int>(RTLDependInfoFields::Flags)));
4111 llvm::ConstantInt::get(LLVMFlagsTy, static_cast<unsigned int>(DepKind)),
4112 FlagsLVal);
4113 if (unsigned *P = Pos.dyn_cast<unsigned *>()) {
4114 ++(*P);
4115 } else {
4116 LValue &PosLVal = *cast<LValue *>(Pos);
4117 llvm::Value *Idx = CGF.EmitLoadOfScalar(PosLVal, E->getExprLoc());
4118 Idx = CGF.Builder.CreateNUWAdd(Idx,
4119 llvm::ConstantInt::get(Idx->getType(), 1));
4120 CGF.EmitStoreOfScalar(Idx, PosLVal);
4121 }
4122 }
4123}
4124
4126 CodeGenFunction &CGF, QualType &KmpDependInfoTy,
4128 assert(Data.DepKind == OMPC_DEPEND_depobj &&
4129 "Expected depobj dependency kind.");
4131 SmallVector<LValue, 4> SizeLVals;
4132 ASTContext &C = CGF.getContext();
4133 {
4134 OMPIteratorGeneratorScope IteratorScope(
4135 CGF, cast_or_null<OMPIteratorExpr>(
4136 Data.IteratorExpr ? Data.IteratorExpr->IgnoreParenImpCasts()
4137 : nullptr));
4138 for (const Expr *E : Data.DepExprs) {
4139 llvm::Value *NumDeps;
4140 LValue Base;
4141 LValue DepobjLVal = CGF.EmitLValue(E->IgnoreParenImpCasts());
4142 std::tie(NumDeps, Base) =
4143 getDepobjElements(CGF, DepobjLVal, E->getExprLoc());
4144 LValue NumLVal = CGF.MakeAddrLValue(
4145 CGF.CreateMemTemp(C.getUIntPtrType(), "depobj.size.addr"),
4146 C.getUIntPtrType());
4147 CGF.Builder.CreateStore(llvm::ConstantInt::get(CGF.IntPtrTy, 0),
4148 NumLVal.getAddress());
4149 llvm::Value *PrevVal = CGF.EmitLoadOfScalar(NumLVal, E->getExprLoc());
4150 llvm::Value *Add = CGF.Builder.CreateNUWAdd(PrevVal, NumDeps);
4151 CGF.EmitStoreOfScalar(Add, NumLVal);
4152 SizeLVals.push_back(NumLVal);
4153 }
4154 }
4155 for (unsigned I = 0, E = SizeLVals.size(); I < E; ++I) {
4156 llvm::Value *Size =
4157 CGF.EmitLoadOfScalar(SizeLVals[I], Data.DepExprs[I]->getExprLoc());
4158 Sizes.push_back(Size);
4159 }
4160 return Sizes;
4161}
4162
4164 QualType &KmpDependInfoTy,
4165 LValue PosLVal,
4167 Address DependenciesArray) {
4168 assert(Data.DepKind == OMPC_DEPEND_depobj &&
4169 "Expected depobj dependency kind.");
4170 llvm::Value *ElSize = CGF.getTypeSize(KmpDependInfoTy);
4171 {
4172 OMPIteratorGeneratorScope IteratorScope(
4173 CGF, cast_or_null<OMPIteratorExpr>(
4174 Data.IteratorExpr ? Data.IteratorExpr->IgnoreParenImpCasts()
4175 : nullptr));
4176 for (unsigned I = 0, End = Data.DepExprs.size(); I < End; ++I) {
4177 const Expr *E = Data.DepExprs[I];
4178 llvm::Value *NumDeps;
4179 LValue Base;
4180 LValue DepobjLVal = CGF.EmitLValue(E->IgnoreParenImpCasts());
4181 std::tie(NumDeps, Base) =
4182 getDepobjElements(CGF, DepobjLVal, E->getExprLoc());
4183
4184 // memcopy dependency data.
4185 llvm::Value *Size = CGF.Builder.CreateNUWMul(
4186 ElSize,
4187 CGF.Builder.CreateIntCast(NumDeps, CGF.SizeTy, /*isSigned=*/false));
4188 llvm::Value *Pos = CGF.EmitLoadOfScalar(PosLVal, E->getExprLoc());
4189 Address DepAddr = CGF.Builder.CreateGEP(CGF, DependenciesArray, Pos);
4190 CGF.Builder.CreateMemCpy(DepAddr, Base.getAddress(), Size);
4191
4192 // Increase pos.
4193 // pos += size;
4194 llvm::Value *Add = CGF.Builder.CreateNUWAdd(Pos, NumDeps);
4195 CGF.EmitStoreOfScalar(Add, PosLVal);
4196 }
4197 }
4198}
4199
4200std::pair<llvm::Value *, Address> CGOpenMPRuntime::emitDependClause(
4203 if (llvm::all_of(Dependencies, [](const OMPTaskDataTy::DependData &D) {
4204 return D.DepExprs.empty();
4205 }))
4206 return std::make_pair(nullptr, Address::invalid());
4207 // Process list of dependencies.
4209 Address DependenciesArray = Address::invalid();
4210 llvm::Value *NumOfElements = nullptr;
4211 unsigned NumDependencies = std::accumulate(
4212 Dependencies.begin(), Dependencies.end(), 0,
4213 [](unsigned V, const OMPTaskDataTy::DependData &D) {
4214 return D.DepKind == OMPC_DEPEND_depobj
4215 ? V
4216 : (V + (D.IteratorExpr ? 0 : D.DepExprs.size()));
4217 });
4218 QualType FlagsTy;
4219 getDependTypes(C, KmpDependInfoTy, FlagsTy);
4220 bool HasDepobjDeps = false;
4221 bool HasRegularWithIterators = false;
4222 llvm::Value *NumOfDepobjElements = llvm::ConstantInt::get(CGF.IntPtrTy, 0);
4223 llvm::Value *NumOfRegularWithIterators =
4224 llvm::ConstantInt::get(CGF.IntPtrTy, 0);
4225 // Calculate number of depobj dependencies and regular deps with the
4226 // iterators.
4227 for (const OMPTaskDataTy::DependData &D : Dependencies) {
4228 if (D.DepKind == OMPC_DEPEND_depobj) {
4231 for (llvm::Value *Size : Sizes) {
4232 NumOfDepobjElements =
4233 CGF.Builder.CreateNUWAdd(NumOfDepobjElements, Size);
4234 }
4235 HasDepobjDeps = true;
4236 continue;
4237 }
4238 // Include number of iterations, if any.
4239
4240 if (const auto *IE = cast_or_null<OMPIteratorExpr>(D.IteratorExpr)) {
4241 llvm::Value *ClauseIteratorSpace =
4242 llvm::ConstantInt::get(CGF.IntPtrTy, 1);
4243 for (unsigned I = 0, E = IE->numOfIterators(); I < E; ++I) {
4244 llvm::Value *Sz = CGF.EmitScalarExpr(IE->getHelper(I).Upper);
4245 Sz = CGF.Builder.CreateIntCast(Sz, CGF.IntPtrTy, /*isSigned=*/false);
4246 ClauseIteratorSpace = CGF.Builder.CreateNUWMul(Sz, ClauseIteratorSpace);
4247 }
4248 llvm::Value *NumClauseDeps = CGF.Builder.CreateNUWMul(
4249 ClauseIteratorSpace,
4250 llvm::ConstantInt::get(CGF.IntPtrTy, D.DepExprs.size()));
4251 NumOfRegularWithIterators =
4252 CGF.Builder.CreateNUWAdd(NumOfRegularWithIterators, NumClauseDeps);
4253 HasRegularWithIterators = true;
4254 continue;
4255 }
4256 }
4257
4258 QualType KmpDependInfoArrayTy;
4259 if (HasDepobjDeps || HasRegularWithIterators) {
4260 NumOfElements = llvm::ConstantInt::get(CGM.IntPtrTy, NumDependencies,
4261 /*isSigned=*/false);
4262 if (HasDepobjDeps) {
4263 NumOfElements =
4264 CGF.Builder.CreateNUWAdd(NumOfDepobjElements, NumOfElements);
4265 }
4266 if (HasRegularWithIterators) {
4267 NumOfElements =
4268 CGF.Builder.CreateNUWAdd(NumOfRegularWithIterators, NumOfElements);
4269 }
4270 auto *OVE = new (C) OpaqueValueExpr(
4271 Loc, C.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0),
4272 VK_PRValue);
4273 CodeGenFunction::OpaqueValueMapping OpaqueMap(CGF, OVE,
4274 RValue::get(NumOfElements));
4275 KmpDependInfoArrayTy =
4276 C.getVariableArrayType(KmpDependInfoTy, OVE, ArraySizeModifier::Normal,
4277 /*IndexTypeQuals=*/0, SourceRange(Loc, Loc));
4278 // CGF.EmitVariablyModifiedType(KmpDependInfoArrayTy);
4279 // Properly emit variable-sized array.
4280 auto *PD = ImplicitParamDecl::Create(C, KmpDependInfoArrayTy,
4282 CGF.EmitVarDecl(*PD);
4283 DependenciesArray = CGF.GetAddrOfLocalVar(PD);
4284 NumOfElements = CGF.Builder.CreateIntCast(NumOfElements, CGF.Int32Ty,
4285 /*isSigned=*/false);
4286 } else {
4287 KmpDependInfoArrayTy = C.getConstantArrayType(
4288 KmpDependInfoTy, llvm::APInt(/*numBits=*/64, NumDependencies), nullptr,
4289 ArraySizeModifier::Normal, /*IndexTypeQuals=*/0);
4290 DependenciesArray =
4291 CGF.CreateMemTemp(KmpDependInfoArrayTy, ".dep.arr.addr");
4292 DependenciesArray = CGF.Builder.CreateConstArrayGEP(DependenciesArray, 0);
4293 NumOfElements = llvm::ConstantInt::get(CGM.Int32Ty, NumDependencies,
4294 /*isSigned=*/false);
4295 }
4296 unsigned Pos = 0;
4297 for (unsigned I = 0, End = Dependencies.size(); I < End; ++I) {
4298 if (Dependencies[I].DepKind == OMPC_DEPEND_depobj ||
4299 Dependencies[I].IteratorExpr)
4300 continue;
4301 emitDependData(CGF, KmpDependInfoTy, &Pos, Dependencies[I],
4302 DependenciesArray);
4303 }
4304 // Copy regular dependencies with iterators.
4305 LValue PosLVal = CGF.MakeAddrLValue(
4306 CGF.CreateMemTemp(C.getSizeType(), "dep.counter.addr"), C.getSizeType());
4307 CGF.EmitStoreOfScalar(llvm::ConstantInt::get(CGF.SizeTy, Pos), PosLVal);
4308 for (unsigned I = 0, End = Dependencies.size(); I < End; ++I) {
4309 if (Dependencies[I].DepKind == OMPC_DEPEND_depobj ||
4310 !Dependencies[I].IteratorExpr)
4311 continue;
4312 emitDependData(CGF, KmpDependInfoTy, &PosLVal, Dependencies[I],
4313 DependenciesArray);
4314 }
4315 // Copy final depobj arrays without iterators.
4316 if (HasDepobjDeps) {
4317 for (unsigned I = 0, End = Dependencies.size(); I < End; ++I) {
4318 if (Dependencies[I].DepKind != OMPC_DEPEND_depobj)
4319 continue;
4320 emitDepobjElements(CGF, KmpDependInfoTy, PosLVal, Dependencies[I],
4321 DependenciesArray);
4322 }
4323 }
4324 DependenciesArray = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4325 DependenciesArray, CGF.VoidPtrTy, CGF.Int8Ty);
4326 return std::make_pair(NumOfElements, DependenciesArray);
4327}
4328
4330 CodeGenFunction &CGF, const OMPTaskDataTy::DependData &Dependencies,
4332 if (Dependencies.DepExprs.empty())
4333 return Address::invalid();
4334 // Process list of dependencies.
4336 Address DependenciesArray = Address::invalid();
4337 unsigned NumDependencies = Dependencies.DepExprs.size();
4338 QualType FlagsTy;
4339 getDependTypes(C, KmpDependInfoTy, FlagsTy);
4340 RecordDecl *KmpDependInfoRD =
4341 cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
4342
4343 llvm::Value *Size;
4344 // Define type kmp_depend_info[<Dependencies.size()>];
4345 // For depobj reserve one extra element to store the number of elements.
4346 // It is required to handle depobj(x) update(in) construct.
4347 // kmp_depend_info[<Dependencies.size()>] deps;
4348 llvm::Value *NumDepsVal;
4349 CharUnits Align = C.getTypeAlignInChars(KmpDependInfoTy);
4350 if (const auto *IE =
4351 cast_or_null<OMPIteratorExpr>(Dependencies.IteratorExpr)) {
4352 NumDepsVal = llvm::ConstantInt::get(CGF.SizeTy, 1);
4353 for (unsigned I = 0, E = IE->numOfIterators(); I < E; ++I) {
4354 llvm::Value *Sz = CGF.EmitScalarExpr(IE->getHelper(I).Upper);
4355 Sz = CGF.Builder.CreateIntCast(Sz, CGF.SizeTy, /*isSigned=*/false);
4356 NumDepsVal = CGF.Builder.CreateNUWMul(NumDepsVal, Sz);
4357 }
4358 Size = CGF.Builder.CreateNUWAdd(llvm::ConstantInt::get(CGF.SizeTy, 1),
4359 NumDepsVal);
4360 CharUnits SizeInBytes =
4361 C.getTypeSizeInChars(KmpDependInfoTy).alignTo(Align);
4362 llvm::Value *RecSize = CGM.getSize(SizeInBytes);
4363 Size = CGF.Builder.CreateNUWMul(Size, RecSize);
4364 NumDepsVal =
4365 CGF.Builder.CreateIntCast(NumDepsVal, CGF.IntPtrTy, /*isSigned=*/false);
4366 } else {
4367 QualType KmpDependInfoArrayTy = C.getConstantArrayType(
4368 KmpDependInfoTy, llvm::APInt(/*numBits=*/64, NumDependencies + 1),
4369 nullptr, ArraySizeModifier::Normal, /*IndexTypeQuals=*/0);
4370 CharUnits Sz = C.getTypeSizeInChars(KmpDependInfoArrayTy);
4371 Size = CGM.getSize(Sz.alignTo(Align));
4372 NumDepsVal = llvm::ConstantInt::get(CGF.IntPtrTy, NumDependencies);
4373 }
4374 // Need to allocate on the dynamic memory.
4375 llvm::Value *ThreadID = getThreadID(CGF, Loc);
4376 // Use default allocator.
4377 llvm::Value *Allocator = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
4378 llvm::Value *Args[] = {ThreadID, Size, Allocator};
4379
4380 llvm::Value *Addr =
4381 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
4382 CGM.getModule(), OMPRTL___kmpc_alloc),
4383 Args, ".dep.arr.addr");
4384 llvm::Type *KmpDependInfoLlvmTy = CGF.ConvertTypeForMem(KmpDependInfoTy);
4386 Addr, CGF.Builder.getPtrTy(0));
4387 DependenciesArray = Address(Addr, KmpDependInfoLlvmTy, Align);
4388 // Write number of elements in the first element of array for depobj.
4389 LValue Base = CGF.MakeAddrLValue(DependenciesArray, KmpDependInfoTy);
4390 // deps[i].base_addr = NumDependencies;
4391 LValue BaseAddrLVal = CGF.EmitLValueForField(
4392 Base,
4393 *std::next(KmpDependInfoRD->field_begin(),
4394 static_cast<unsigned int>(RTLDependInfoFields::BaseAddr)));
4395 CGF.EmitStoreOfScalar(NumDepsVal, BaseAddrLVal);
4396 llvm::PointerUnion<unsigned *, LValue *> Pos;
4397 unsigned