28#include "llvm/ADT/SmallSet.h"
29#include "llvm/BinaryFormat/Dwarf.h"
30#include "llvm/Frontend/OpenMP/OMPConstants.h"
31#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
32#include "llvm/IR/Constants.h"
33#include "llvm/IR/DebugInfoMetadata.h"
34#include "llvm/IR/Instructions.h"
35#include "llvm/IR/IntrinsicInst.h"
36#include "llvm/IR/Metadata.h"
37#include "llvm/Support/AtomicOrdering.h"
38#include "llvm/Support/Debug.h"
41using namespace CodeGen;
42using namespace llvm::omp;
44#define TTL_CODEGEN_TYPE "target-teams-loop-codegen"
53class OMPLexicalScope :
public CodeGenFunction::LexicalScope {
55 for (
const auto *
C : S.clauses()) {
57 if (
const auto *PreInit =
58 cast_or_null<DeclStmt>(CPI->getPreInitStmt())) {
59 for (
const auto *I : PreInit->decls()) {
60 if (!I->hasAttr<OMPCaptureNoInitAttr>()) {
63 CodeGenFunction::AutoVarEmission Emission =
72 CodeGenFunction::OMPPrivateScope InlinedShareds;
78 cast<BlockDecl>(CGF.
CurCodeDecl)->capturesVariable(VD));
84 const std::optional<OpenMPDirectiveKind> CapturedRegion = std::nullopt,
85 const bool EmitPreInitStmt =
true)
89 emitPreInitStmt(CGF, S);
92 assert(S.hasAssociatedStmt() &&
93 "Expected associated statement for inlined directive.");
94 const CapturedStmt *CS = S.getCapturedStmt(*CapturedRegion);
96 if (
C.capturesVariable() ||
C.capturesVariableByCopy()) {
97 auto *VD =
C.getCapturedVar();
99 "Canonical decl must be captured.");
103 InlinedShareds.isGlobalVarCaptured(VD)),
108 (void)InlinedShareds.Privatize();
114class OMPParallelScope final :
public OMPLexicalScope {
124 : OMPLexicalScope(CGF, S,
std::nullopt,
125 EmitPreInitStmt(S)) {}
130class OMPTeamsScope final :
public OMPLexicalScope {
139 : OMPLexicalScope(CGF, S,
std::nullopt,
140 EmitPreInitStmt(S)) {}
145class OMPLoopScope :
public CodeGenFunction::RunCleanupsScope {
147 const Stmt *PreInits;
148 CodeGenFunction::OMPMapVars PreCondVars;
149 if (
auto *LD = dyn_cast<OMPLoopDirective>(&S)) {
150 llvm::DenseSet<const VarDecl *> EmittedAsPrivate;
151 for (
const auto *
E : LD->counters()) {
152 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(
E)->getDecl());
154 (void)PreCondVars.setVarAddr(
159 for (
const Expr *IRef :
C->varlist()) {
161 cast<VarDecl>(cast<DeclRefExpr>(IRef)->getDecl());
162 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
163 QualType OrigVDTy = OrigVD->getType().getNonReferenceType();
164 (void)PreCondVars.setVarAddr(
173 (void)PreCondVars.apply(CGF);
176 LD->getInnermostCapturedStmt()->getCapturedStmt(),
177 true, LD->getLoopsNumber(),
178 [&CGF](
unsigned Cnt,
const Stmt *CurStmt) {
179 if (
const auto *CXXFor = dyn_cast<CXXForRangeStmt>(CurStmt)) {
180 if (
const Stmt *
Init = CXXFor->getInit())
182 CGF.
EmitStmt(CXXFor->getRangeStmt());
187 PreInits = LD->getPreInits();
188 }
else if (
const auto *
Tile = dyn_cast<OMPTileDirective>(&S)) {
189 PreInits =
Tile->getPreInits();
190 }
else if (
const auto *Unroll = dyn_cast<OMPUnrollDirective>(&S)) {
191 PreInits = Unroll->getPreInits();
192 }
else if (
const auto *Reverse = dyn_cast<OMPReverseDirective>(&S)) {
193 PreInits = Reverse->getPreInits();
194 }
else if (
const auto *Interchange =
195 dyn_cast<OMPInterchangeDirective>(&S)) {
196 PreInits = Interchange->getPreInits();
198 llvm_unreachable(
"Unknown loop-based directive kind.");
205 if (
auto *PreInitCompound = dyn_cast<CompoundStmt>(PreInits))
206 llvm::append_range(PreInitStmts, PreInitCompound->body());
208 PreInitStmts.push_back(PreInits);
210 for (
const Stmt *S : PreInitStmts) {
213 if (
auto *PreInitDecl = dyn_cast<DeclStmt>(S)) {
214 for (
Decl *I : PreInitDecl->decls())
221 PreCondVars.restore(CGF);
227 emitPreInitStmt(CGF, S);
231class OMPSimdLexicalScope :
public CodeGenFunction::LexicalScope {
232 CodeGenFunction::OMPPrivateScope InlinedShareds;
238 cast<BlockDecl>(CGF.
CurCodeDecl)->capturesVariable(VD));
244 InlinedShareds(CGF) {
245 for (
const auto *
C : S.clauses()) {
247 if (
const auto *PreInit =
248 cast_or_null<DeclStmt>(CPI->getPreInitStmt())) {
249 for (
const auto *I : PreInit->decls()) {
250 if (!I->hasAttr<OMPCaptureNoInitAttr>()) {
253 CodeGenFunction::AutoVarEmission Emission =
259 }
else if (
const auto *UDP = dyn_cast<OMPUseDevicePtrClause>(
C)) {
260 for (
const Expr *
E : UDP->varlist()) {
261 const Decl *
D = cast<DeclRefExpr>(
E)->getDecl();
262 if (
const auto *OED = dyn_cast<OMPCapturedExprDecl>(
D))
265 }
else if (
const auto *UDP = dyn_cast<OMPUseDeviceAddrClause>(
C)) {
266 for (
const Expr *
E : UDP->varlist()) {
268 if (
const auto *OED = dyn_cast<OMPCapturedExprDecl>(
D))
275 if (
const auto *TG = dyn_cast<OMPTaskgroupDirective>(&S)) {
276 if (
const Expr *
E = TG->getReductionRef())
277 CGF.
EmitVarDecl(*cast<VarDecl>(cast<DeclRefExpr>(
E)->getDecl()));
281 llvm::DenseSet<CanonicalDeclPtr<const Decl>> CopyArrayTemps;
283 if (
C->getModifier() != OMPC_REDUCTION_inscan)
285 for (
const Expr *
E :
C->copy_array_temps())
286 CopyArrayTemps.insert(cast<DeclRefExpr>(
E)->getDecl());
288 const auto *CS = cast_or_null<CapturedStmt>(S.getAssociatedStmt());
291 if (
C.capturesVariable() ||
C.capturesVariableByCopy()) {
292 auto *VD =
C.getCapturedVar();
293 if (CopyArrayTemps.contains(VD))
296 "Canonical decl must be captured.");
298 isCapturedVar(CGF, VD) ||
300 InlinedShareds.isGlobalVarCaptured(VD)),
308 (void)InlinedShareds.Privatize();
319 if (Kind != OMPD_loop)
324 BindKind =
C->getBindKind();
327 case OMPC_BIND_parallel:
329 case OMPC_BIND_teams:
330 return OMPD_distribute;
331 case OMPC_BIND_thread:
342LValue CodeGenFunction::EmitOMPSharedLValue(
const Expr *
E) {
343 if (
const auto *OrigDRE = dyn_cast<DeclRefExpr>(
E)) {
344 if (
const auto *OrigVD = dyn_cast<VarDecl>(OrigDRE->getDecl())) {
345 OrigVD = OrigVD->getCanonicalDecl();
360 llvm::Value *
Size =
nullptr;
361 auto SizeInChars =
C.getTypeSizeInChars(Ty);
362 if (SizeInChars.isZero()) {
368 Size ?
Builder.CreateNUWMul(Size, VlaSize.NumElts) : VlaSize.NumElts;
370 SizeInChars =
C.getTypeSizeInChars(Ty);
371 if (SizeInChars.isZero())
372 return llvm::ConstantInt::get(
SizeTy, 0);
380 const RecordDecl *RD = S.getCapturedRecordDecl();
382 auto CurCap = S.captures().begin();
384 E = S.capture_init_end();
385 I !=
E; ++I, ++CurField, ++CurCap) {
386 if (CurField->hasCapturedVLAType()) {
389 CapturedVars.push_back(Val);
390 }
else if (CurCap->capturesThis()) {
391 CapturedVars.push_back(CXXThisValue);
392 }
else if (CurCap->capturesVariableByCopy()) {
397 if (!CurField->getType()->isAnyPointerType()) {
401 Twine(CurCap->getCapturedVar()->getName(),
".casted"));
417 CapturedVars.push_back(CV);
419 assert(CurCap->capturesVariable() &&
"Expected capture by reference.");
441 return C.getLValueReferenceType(
447 if (
const auto *VLA = dyn_cast<VariableArrayType>(A))
449 if (!A->isVariablyModifiedType())
450 return C.getCanonicalType(
T);
452 return C.getCanonicalParamType(
T);
457struct FunctionOptions {
462 const bool UIntPtrCastRequired =
true;
465 const bool RegisterCastedArgsOnly =
false;
467 const StringRef FunctionName;
470 explicit FunctionOptions(
const CapturedStmt *S,
bool UIntPtrCastRequired,
471 bool RegisterCastedArgsOnly, StringRef FunctionName,
473 : S(S), UIntPtrCastRequired(UIntPtrCastRequired),
474 RegisterCastedArgsOnly(UIntPtrCastRequired && RegisterCastedArgsOnly),
475 FunctionName(FunctionName),
Loc(
Loc) {}
481 llvm::MapVector<
const Decl *, std::pair<const VarDecl *, Address>>
483 llvm::DenseMap<
const Decl *, std::pair<const Expr *, llvm::Value *>>
485 llvm::Value *&CXXThisValue,
const FunctionOptions &FO) {
487 const RecordDecl *RD = FO.S->getCapturedRecordDecl();
488 assert(CD->
hasBody() &&
"missing CapturedDecl body");
490 CXXThisValue =
nullptr;
500 auto I = FO.S->captures().begin();
502 if (!FO.UIntPtrCastRequired) {
522 if (FO.UIntPtrCastRequired &&
524 I->capturesVariableArrayType()))
527 if (I->capturesVariable() || I->capturesVariableByCopy()) {
528 CapVar = I->getCapturedVar();
530 }
else if (I->capturesThis()) {
533 assert(I->capturesVariableArrayType());
543 }
else if (DebugFunctionDecl && (CapVar || I->capturesThis())) {
545 Ctx, DebugFunctionDecl,
546 CapVar ? CapVar->
getBeginLoc() : FD->getBeginLoc(),
547 CapVar ? CapVar->
getLocation() : FD->getLocation(), II, ArgType,
553 Args.emplace_back(Arg);
555 TargetArgs.emplace_back(
556 FO.UIntPtrCastRequired
573 llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage,
577 F->setDoesNotThrow();
578 F->setDoesNotRecurse();
582 F->removeFnAttr(llvm::Attribute::NoInline);
583 F->addFnAttr(llvm::Attribute::AlwaysInline);
588 FO.UIntPtrCastRequired ? FO.Loc : FO.S->getBeginLoc(),
589 FO.UIntPtrCastRequired ? FO.Loc
592 I = FO.S->captures().begin();
596 if (!FO.UIntPtrCastRequired && Args[Cnt] != TargetArgs[Cnt]) {
604 if (I->capturesVariableByCopy() && FD->getType()->isAnyPointerType()) {
605 const VarDecl *CurVD = I->getCapturedVar();
606 if (!FO.RegisterCastedArgsOnly)
607 LocalAddrs.insert({Args[Cnt], {CurVD, LocalAddr}});
615 if (FD->hasCapturedVLAType()) {
616 if (FO.UIntPtrCastRequired) {
619 Args[Cnt]->getName(), ArgLVal),
624 VLASizes.try_emplace(Args[Cnt], VAT->
getSizeExpr(), ExprArg);
625 }
else if (I->capturesVariable()) {
626 const VarDecl *Var = I->getCapturedVar();
636 if (!FO.RegisterCastedArgsOnly) {
640 }
else if (I->capturesVariableByCopy()) {
641 assert(!FD->getType()->isAnyPointerType() &&
642 "Not expecting a captured pointer.");
643 const VarDecl *Var = I->getCapturedVar();
644 LocalAddrs.insert({Args[Cnt],
645 {Var, FO.UIntPtrCastRequired
647 CGF, I->getLocation(), FD->getType(),
648 Args[Cnt]->getName(), ArgLVal)
652 assert(I->capturesThis());
654 LocalAddrs.insert({Args[Cnt], {
nullptr, ArgLVal.
getAddress()}});
668 "CapturedStmtInfo should be set when generating the captured function");
671 bool NeedWrapperFunction =
674 llvm::MapVector<const Decl *, std::pair<const VarDecl *, Address>> LocalAddrs,
676 llvm::DenseMap<const Decl *, std::pair<const Expr *, llvm::Value *>> VLASizes,
679 llvm::raw_svector_ostream Out(Buffer);
683 llvm::Function *WrapperF =
nullptr;
684 if (NeedWrapperFunction) {
687 FunctionOptions WrapperFO(&S,
true,
693 WrapperCGF.CXXThisValue, WrapperFO);
696 FunctionOptions FO(&S, !NeedWrapperFunction,
false,
699 *
this, WrapperArgs, WrapperLocalAddrs, WrapperVLASizes, CXXThisValue, FO);
700 CodeGenFunction::OMPPrivateScope LocalScope(*
this);
701 for (
const auto &LocalAddrPair : WrapperLocalAddrs) {
702 if (LocalAddrPair.second.first) {
703 LocalScope.addPrivate(LocalAddrPair.second.first,
704 LocalAddrPair.second.second);
707 (void)LocalScope.Privatize();
708 for (
const auto &VLASizePair : WrapperVLASizes)
709 VLASizeMap[VLASizePair.second.first] = VLASizePair.second.second;
712 (void)LocalScope.ForceCleanup();
714 if (!NeedWrapperFunction)
718 WrapperF->removeFromParent();
719 F->getParent()->getFunctionList().insertAfter(F->getIterator(), WrapperF);
722 auto *PI = F->arg_begin();
723 for (
const auto *Arg : Args) {
725 auto I = LocalAddrs.find(Arg);
726 if (I != LocalAddrs.end()) {
727 LValue LV = WrapperCGF.MakeAddrLValue(
729 I->second.first ? I->second.first->getType() : Arg->getType(),
733 CallArg = WrapperCGF.EmitLoadOfScalar(LV, S.getBeginLoc());
735 auto EI = VLASizes.find(Arg);
736 if (EI != VLASizes.end()) {
740 WrapperCGF.MakeAddrLValue(WrapperCGF.GetAddrOfLocalVar(Arg),
742 CallArg = WrapperCGF.EmitLoadOfScalar(LV, S.getBeginLoc());
745 CallArgs.emplace_back(WrapperCGF.EmitFromMemory(
CallArg, Arg->
getType()));
749 WrapperCGF.FinishFunction();
764 llvm::Value *NumElements =
emitArrayLength(ArrayTy, ElementTy, DestAddr);
771 DestBegin, NumElements);
776 llvm::Value *IsEmpty =
777 Builder.CreateICmpEQ(DestBegin, DestEnd,
"omp.arraycpy.isempty");
778 Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
781 llvm::BasicBlock *EntryBB =
Builder.GetInsertBlock();
786 llvm::PHINode *SrcElementPHI =
787 Builder.CreatePHI(SrcBegin->getType(), 2,
"omp.arraycpy.srcElementPast");
788 SrcElementPHI->addIncoming(SrcBegin, EntryBB);
793 llvm::PHINode *DestElementPHI =
Builder.CreatePHI(
794 DestBegin->getType(), 2,
"omp.arraycpy.destElementPast");
795 DestElementPHI->addIncoming(DestBegin, EntryBB);
801 CopyGen(DestElementCurrent, SrcElementCurrent);
804 llvm::Value *DestElementNext =
806 1,
"omp.arraycpy.dest.element");
807 llvm::Value *SrcElementNext =
809 1,
"omp.arraycpy.src.element");
812 Builder.CreateICmpEQ(DestElementNext, DestEnd,
"omp.arraycpy.done");
813 Builder.CreateCondBr(Done, DoneBB, BodyBB);
814 DestElementPHI->addIncoming(DestElementNext,
Builder.GetInsertBlock());
815 SrcElementPHI->addIncoming(SrcElementNext,
Builder.GetInsertBlock());
825 const auto *BO = dyn_cast<BinaryOperator>(
Copy);
826 if (BO && BO->getOpcode() == BO_Assign) {
835 DestAddr, SrcAddr, OriginalType,
840 CodeGenFunction::OMPPrivateScope Remap(*
this);
841 Remap.addPrivate(DestVD, DestElement);
842 Remap.addPrivate(SrcVD, SrcElement);
843 (void)Remap.Privatize();
849 CodeGenFunction::OMPPrivateScope Remap(*
this);
850 Remap.addPrivate(SrcVD, SrcAddr);
851 Remap.addPrivate(DestVD, DestAddr);
852 (void)Remap.Privatize();
859 OMPPrivateScope &PrivateScope) {
863 bool DeviceConstTarget =
getLangOpts().OpenMPIsTargetDevice &&
865 bool FirstprivateIsLastprivate =
false;
866 llvm::DenseMap<const VarDecl *, OpenMPLastprivateModifier> Lastprivates;
868 for (
const auto *
D :
C->varlist())
869 Lastprivates.try_emplace(
873 llvm::DenseSet<const VarDecl *> EmittedAsFirstprivate;
878 bool MustEmitFirstprivateCopy =
879 CaptureRegions.size() == 1 && CaptureRegions.back() == OMPD_unknown;
881 const auto *IRef =
C->varlist_begin();
882 const auto *InitsRef =
C->inits().begin();
883 for (
const Expr *IInit :
C->private_copies()) {
884 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
885 bool ThisFirstprivateIsLastprivate =
886 Lastprivates.count(OrigVD->getCanonicalDecl()) > 0;
888 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
889 if (!MustEmitFirstprivateCopy && !ThisFirstprivateIsLastprivate && FD &&
891 (!VD || !VD->
hasAttr<OMPAllocateDeclAttr>())) {
892 EmittedAsFirstprivate.insert(OrigVD->getCanonicalDecl());
899 if (DeviceConstTarget && OrigVD->getType().isConstant(
getContext()) &&
901 (!VD || !VD->
hasAttr<OMPAllocateDeclAttr>())) {
902 EmittedAsFirstprivate.insert(OrigVD->getCanonicalDecl());
907 FirstprivateIsLastprivate =
908 FirstprivateIsLastprivate || ThisFirstprivateIsLastprivate;
909 if (EmittedAsFirstprivate.insert(OrigVD->getCanonicalDecl()).second) {
911 cast<VarDecl>(cast<DeclRefExpr>(*InitsRef)->getDecl());
920 if (CE && !CE.isReference()) {
926 if (CE && CE.isReference()) {
927 OriginalLVal = CE.getReferenceLValue(*
this, &DRE);
929 assert(!CE &&
"Expected non-constant firstprivate.");
952 RunCleanupsScope InitScope(*this);
954 setAddrOfLocalVar(VDInit, SrcElement);
955 EmitAnyExprToMem(Init, DestElement,
956 Init->getType().getQualifiers(),
958 LocalDeclMap.erase(VDInit);
963 PrivateScope.addPrivate(OrigVD, Emission.getAllocatedAddress());
969 setAddrOfLocalVar(VDInit, OriginalAddr);
971 LocalDeclMap.erase(VDInit);
973 if (ThisFirstprivateIsLastprivate &&
974 Lastprivates[OrigVD->getCanonicalDecl()] ==
975 OMPC_LASTPRIVATE_conditional) {
980 (*IRef)->getExprLoc());
985 LocalDeclMap.erase(VD);
986 setAddrOfLocalVar(VD, VDAddr);
988 IsRegistered = PrivateScope.addPrivate(OrigVD, VDAddr);
990 assert(IsRegistered &&
991 "firstprivate var already registered as private");
999 return FirstprivateIsLastprivate && !EmittedAsFirstprivate.empty();
1004 CodeGenFunction::OMPPrivateScope &PrivateScope) {
1007 llvm::DenseSet<const VarDecl *> EmittedAsPrivate;
1009 auto IRef =
C->varlist_begin();
1010 for (
const Expr *IInit :
C->private_copies()) {
1011 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
1012 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
1013 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
1018 assert(IsRegistered &&
"private var already registered as private");
1034 llvm::DenseSet<const VarDecl *> CopiedVars;
1035 llvm::BasicBlock *CopyBegin =
nullptr, *CopyEnd =
nullptr;
1037 auto IRef =
C->varlist_begin();
1038 auto ISrcRef =
C->source_exprs().begin();
1039 auto IDestRef =
C->destination_exprs().begin();
1040 for (
const Expr *AssignOp :
C->assignment_ops()) {
1041 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
1049 getContext().getTargetInfo().isTLSSupported()) {
1051 "Copyin threadprivates should have been captured!");
1055 LocalDeclMap.erase(VD);
1065 if (CopiedVars.size() == 1) {
1071 auto *MasterAddrInt =
Builder.CreatePtrToInt(
1073 auto *PrivateAddrInt =
Builder.CreatePtrToInt(
1076 Builder.CreateICmpNE(MasterAddrInt, PrivateAddrInt), CopyBegin,
1081 cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl());
1082 const auto *DestVD =
1083 cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
1103 bool HasAtLeastOneLastprivate =
false;
1105 llvm::DenseSet<const VarDecl *> SIMDLCVs;
1107 const auto *LoopDirective = cast<OMPLoopDirective>(&
D);
1108 for (
const Expr *
C : LoopDirective->counters()) {
1113 llvm::DenseSet<const VarDecl *> AlreadyEmittedVars;
1115 HasAtLeastOneLastprivate =
true;
1118 const auto *IRef =
C->varlist_begin();
1119 const auto *IDestRef =
C->destination_exprs().begin();
1120 for (
const Expr *IInit :
C->private_copies()) {
1123 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
1126 if (AlreadyEmittedVars.insert(OrigVD->getCanonicalDecl()).second) {
1127 const auto *DestVD =
1128 cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
1133 PrivateScope.addPrivate(DestVD,
EmitLValue(&DRE).getAddress());
1137 if (IInit && !SIMDLCVs.count(OrigVD->getCanonicalDecl())) {
1138 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
1140 if (
C->getKind() == OMPC_LASTPRIVATE_conditional) {
1143 setAddrOfLocalVar(VD, VDAddr);
1149 bool IsRegistered = PrivateScope.addPrivate(OrigVD, VDAddr);
1150 assert(IsRegistered &&
1151 "lastprivate var already registered as private");
1159 return HasAtLeastOneLastprivate;
1164 llvm::Value *IsLastIterCond) {
1173 llvm::BasicBlock *ThenBB =
nullptr;
1174 llvm::BasicBlock *DoneBB =
nullptr;
1175 if (IsLastIterCond) {
1181 return C->getKind() == OMPC_LASTPRIVATE_conditional;
1190 Builder.CreateCondBr(IsLastIterCond, ThenBB, DoneBB);
1193 llvm::DenseSet<const VarDecl *> AlreadyEmittedVars;
1194 llvm::DenseMap<const VarDecl *, const Expr *> LoopCountersAndUpdates;
1195 if (
const auto *LoopDirective = dyn_cast<OMPLoopDirective>(&
D)) {
1196 auto IC = LoopDirective->counters().begin();
1197 for (
const Expr *F : LoopDirective->finals()) {
1201 AlreadyEmittedVars.insert(
D);
1203 LoopCountersAndUpdates[
D] = F;
1208 auto IRef =
C->varlist_begin();
1209 auto ISrcRef =
C->source_exprs().begin();
1210 auto IDestRef =
C->destination_exprs().begin();
1211 for (
const Expr *AssignOp :
C->assignment_ops()) {
1212 const auto *PrivateVD =
1213 cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
1215 const auto *CanonicalVD = PrivateVD->getCanonicalDecl();
1216 if (AlreadyEmittedVars.insert(CanonicalVD).second) {
1220 if (
const Expr *FinalExpr = LoopCountersAndUpdates.lookup(CanonicalVD))
1223 cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl());
1224 const auto *DestVD =
1225 cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
1228 if (
const auto *RefTy = PrivateVD->getType()->getAs<
ReferenceType>())
1234 if (
C->getKind() == OMPC_LASTPRIVATE_conditional)
1236 *
this,
MakeAddrLValue(PrivateAddr, (*IRef)->getType()), PrivateVD,
1237 (*IRef)->getExprLoc());
1240 EmitOMPCopy(
Type, OriginalAddr, PrivateAddr, DestVD, SrcVD, AssignOp);
1246 if (
const Expr *PostUpdate =
C->getPostUpdateExpr())
1255 CodeGenFunction::OMPPrivateScope &PrivateScope,
bool ForInscan) {
1267 if (ForInscan != (
C->getModifier() == OMPC_REDUCTION_inscan))
1269 Shareds.append(
C->varlist_begin(),
C->varlist_end());
1270 Privates.append(
C->privates().begin(),
C->privates().end());
1271 ReductionOps.append(
C->reduction_ops().begin(),
C->reduction_ops().end());
1272 LHSs.append(
C->lhs_exprs().begin(),
C->lhs_exprs().end());
1273 RHSs.append(
C->rhs_exprs().begin(),
C->rhs_exprs().end());
1274 if (
C->getModifier() == OMPC_REDUCTION_task) {
1275 Data.ReductionVars.append(
C->privates().begin(),
C->privates().end());
1276 Data.ReductionOrigs.append(
C->varlist_begin(),
C->varlist_end());
1277 Data.ReductionCopies.append(
C->privates().begin(),
C->privates().end());
1278 Data.ReductionOps.append(
C->reduction_ops().begin(),
1279 C->reduction_ops().end());
1280 TaskLHSs.append(
C->lhs_exprs().begin(),
C->lhs_exprs().end());
1281 TaskRHSs.append(
C->rhs_exprs().begin(),
C->rhs_exprs().end());
1286 auto *ILHS = LHSs.begin();
1287 auto *IRHS = RHSs.begin();
1288 auto *IPriv = Privates.begin();
1289 for (
const Expr *IRef : Shareds) {
1290 const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*IPriv)->getDecl());
1292 RedCG.emitSharedOrigLValue(*
this, Count);
1293 RedCG.emitAggregateType(*
this, Count);
1295 RedCG.emitInitialization(*
this, Count, Emission.getAllocatedAddress(),
1296 RedCG.getSharedLValue(Count).getAddress(),
1298 CGF.EmitAutoVarInit(Emission);
1302 Address BaseAddr = RedCG.adjustPrivateAddress(
1303 *
this, Count, Emission.getAllocatedAddress());
1305 PrivateScope.addPrivate(RedCG.getBaseDecl(Count), BaseAddr);
1306 assert(IsRegistered &&
"private var already registered as private");
1310 const auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
1311 const auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
1313 bool isaOMPArraySectionExpr = isa<ArraySectionExpr>(IRef);
1317 PrivateScope.addPrivate(LHSVD, RedCG.getSharedLValue(Count).getAddress());
1320 isa<ArraySubscriptExpr>(IRef)) {
1323 PrivateScope.addPrivate(LHSVD, RedCG.getSharedLValue(Count).getAddress());
1324 PrivateScope.addPrivate(RHSVD,
1330 Address OriginalAddr = RedCG.getSharedLValue(Count).getAddress();
1337 PrivateScope.addPrivate(LHSVD, OriginalAddr);
1338 PrivateScope.addPrivate(
1348 if (!
Data.ReductionVars.empty()) {
1350 Data.IsReductionWithTaskMod =
true;
1354 const Expr *TaskRedRef =
nullptr;
1357 TaskRedRef = cast<OMPParallelDirective>(
D).getTaskReductionRefExpr();
1360 TaskRedRef = cast<OMPForDirective>(
D).getTaskReductionRefExpr();
1363 TaskRedRef = cast<OMPSectionsDirective>(
D).getTaskReductionRefExpr();
1365 case OMPD_parallel_for:
1366 TaskRedRef = cast<OMPParallelForDirective>(
D).getTaskReductionRefExpr();
1368 case OMPD_parallel_master:
1370 cast<OMPParallelMasterDirective>(
D).getTaskReductionRefExpr();
1372 case OMPD_parallel_sections:
1374 cast<OMPParallelSectionsDirective>(
D).getTaskReductionRefExpr();
1376 case OMPD_target_parallel:
1378 cast<OMPTargetParallelDirective>(
D).getTaskReductionRefExpr();
1380 case OMPD_target_parallel_for:
1382 cast<OMPTargetParallelForDirective>(
D).getTaskReductionRefExpr();
1384 case OMPD_distribute_parallel_for:
1386 cast<OMPDistributeParallelForDirective>(
D).getTaskReductionRefExpr();
1388 case OMPD_teams_distribute_parallel_for:
1389 TaskRedRef = cast<OMPTeamsDistributeParallelForDirective>(
D)
1390 .getTaskReductionRefExpr();
1392 case OMPD_target_teams_distribute_parallel_for:
1393 TaskRedRef = cast<OMPTargetTeamsDistributeParallelForDirective>(
D)
1394 .getTaskReductionRefExpr();
1402 case OMPD_parallel_for_simd:
1404 case OMPD_taskyield:
1408 case OMPD_taskgroup:
1416 case OMPD_cancellation_point:
1418 case OMPD_target_data:
1419 case OMPD_target_enter_data:
1420 case OMPD_target_exit_data:
1422 case OMPD_taskloop_simd:
1423 case OMPD_master_taskloop:
1424 case OMPD_master_taskloop_simd:
1425 case OMPD_parallel_master_taskloop:
1426 case OMPD_parallel_master_taskloop_simd:
1427 case OMPD_distribute:
1428 case OMPD_target_update:
1429 case OMPD_distribute_parallel_for_simd:
1430 case OMPD_distribute_simd:
1431 case OMPD_target_parallel_for_simd:
1432 case OMPD_target_simd:
1433 case OMPD_teams_distribute:
1434 case OMPD_teams_distribute_simd:
1435 case OMPD_teams_distribute_parallel_for_simd:
1436 case OMPD_target_teams:
1437 case OMPD_target_teams_distribute:
1438 case OMPD_target_teams_distribute_parallel_for_simd:
1439 case OMPD_target_teams_distribute_simd:
1440 case OMPD_declare_target:
1441 case OMPD_end_declare_target:
1442 case OMPD_threadprivate:
1444 case OMPD_declare_reduction:
1445 case OMPD_declare_mapper:
1446 case OMPD_declare_simd:
1448 case OMPD_declare_variant:
1449 case OMPD_begin_declare_variant:
1450 case OMPD_end_declare_variant:
1453 llvm_unreachable(
"Unexpected directive with task reductions.");
1456 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(TaskRedRef)->getDecl());
1459 false, TaskRedRef->
getType());
1471 bool HasAtLeastOneReduction =
false;
1472 bool IsReductionWithTaskMod =
false;
1475 if (
C->getModifier() == OMPC_REDUCTION_inscan)
1477 HasAtLeastOneReduction =
true;
1478 Privates.append(
C->privates().begin(),
C->privates().end());
1479 LHSExprs.append(
C->lhs_exprs().begin(),
C->lhs_exprs().end());
1480 RHSExprs.append(
C->rhs_exprs().begin(),
C->rhs_exprs().end());
1481 ReductionOps.append(
C->reduction_ops().begin(),
C->reduction_ops().end());
1482 IsReductionWithTaskMod =
1483 IsReductionWithTaskMod ||
C->getModifier() == OMPC_REDUCTION_task;
1485 if (HasAtLeastOneReduction) {
1487 if (IsReductionWithTaskMod) {
1491 bool TeamsLoopCanBeParallel =
false;
1492 if (
auto *TTLD = dyn_cast<OMPTargetTeamsGenericLoopDirective>(&
D))
1493 TeamsLoopCanBeParallel = TTLD->canBeParallelFor();
1496 TeamsLoopCanBeParallel || ReductionKind == OMPD_simd;
1497 bool SimpleReduction = ReductionKind == OMPD_simd;
1501 *
this,
D.
getEndLoc(), Privates, LHSExprs, RHSExprs, ReductionOps,
1502 {WithNowait, SimpleReduction, ReductionKind});
1511 llvm::BasicBlock *DoneBB =
nullptr;
1513 if (
const Expr *PostUpdate =
C->getPostUpdateExpr()) {
1515 if (llvm::Value *Cond = CondGen(CGF)) {
1520 CGF.
Builder.CreateCondBr(Cond, ThenBB, DoneBB);
1538 CodeGenBoundParametersTy;
1546 llvm::DenseSet<CanonicalDeclPtr<const VarDecl>> PrivateDecls;
1548 for (
const Expr *Ref :
C->varlist()) {
1549 if (!Ref->getType()->isScalarType())
1551 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
1554 PrivateDecls.insert(cast<VarDecl>(DRE->getDecl()));
1559 for (
const Expr *Ref :
C->varlist()) {
1560 if (!Ref->getType()->isScalarType())
1562 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
1565 PrivateDecls.insert(cast<VarDecl>(DRE->getDecl()));
1570 for (
const Expr *Ref :
C->varlist()) {
1571 if (!Ref->getType()->isScalarType())
1573 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
1576 PrivateDecls.insert(cast<VarDecl>(DRE->getDecl()));
1585 for (
const Expr *Ref :
C->varlist()) {
1586 if (!Ref->getType()->isScalarType())
1588 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
1591 PrivateDecls.insert(cast<VarDecl>(DRE->getDecl()));
1595 CGF, S, PrivateDecls);
1601 const CodeGenBoundParametersTy &CodeGenBoundParameters) {
1602 const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel);
1603 llvm::Value *NumThreads =
nullptr;
1604 llvm::Function *OutlinedFn =
1609 CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF);
1610 NumThreads = CGF.
EmitScalarExpr(NumThreadsClause->getNumThreads(),
1613 CGF, NumThreads, NumThreadsClause->getBeginLoc());
1616 CodeGenFunction::RunCleanupsScope ProcBindScope(CGF);
1618 CGF, ProcBindClause->getProcBindKind(), ProcBindClause->getBeginLoc());
1620 const Expr *IfCond =
nullptr;
1621 for (
const auto *
C : S.getClausesOfKind<
OMPIfClause>()) {
1622 if (
C->getNameModifier() == OMPD_unknown ||
1623 C->getNameModifier() == OMPD_parallel) {
1624 IfCond =
C->getCondition();
1629 OMPParallelScope
Scope(CGF, S);
1635 CodeGenBoundParameters(CGF, S, CapturedVars);
1638 CapturedVars, IfCond, NumThreads);
1643 if (!CVD->
hasAttr<OMPAllocateDeclAttr>())
1645 const auto *AA = CVD->
getAttr<OMPAllocateDeclAttr>();
1647 return !((AA->getAllocatorType() == OMPAllocateDeclAttr::OMPDefaultMemAlloc ||
1648 AA->getAllocatorType() == OMPAllocateDeclAttr::OMPNullMemAlloc) &&
1649 !AA->getAllocator());
1664 CGF, S.getBeginLoc(), OMPD_unknown,
false,
1684 Size = CGF.
Builder.CreateNUWAdd(
1693 const auto *AA = CVD->
getAttr<OMPAllocateDeclAttr>();
1694 assert(AA->getAllocator() &&
1695 "Expected allocator expression for non-default allocator.");
1699 if (Allocator->getType()->isIntegerTy())
1701 else if (Allocator->getType()->isPointerTy())
1705 llvm::Value *Addr = OMPBuilder.createOMPAlloc(
1708 llvm::CallInst *FreeCI =
1709 OMPBuilder.createOMPFree(CGF.
Builder, Addr, Allocator);
1733 std::string Suffix = getNameWithSeparators({
"cache",
""});
1736 llvm::CallInst *ThreadPrivateCacheCall =
1737 OMPBuilder.createCachedThreadPrivate(CGF.
Builder,
Data, Size, CacheName);
1745 llvm::raw_svector_ostream OS(Buffer);
1746 StringRef Sep = FirstSeparator;
1747 for (StringRef Part : Parts) {
1751 return OS.str().str();
1759 llvm::BasicBlock *FiniBB = splitBBWithSuffix(
Builder,
false,
1760 "." + RegionName +
".after");
1776 llvm::BasicBlock *FiniBB = splitBBWithSuffix(
Builder,
false,
1777 "." + RegionName +
".after");
1792 llvm::Value *IfCond =
nullptr;
1797 llvm::Value *NumThreads =
nullptr;
1802 ProcBindKind ProcBind = OMP_PROC_BIND_default;
1804 ProcBind = ProcBindClause->getProcBindKind();
1806 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
1810 auto FiniCB = [
this](InsertPointTy IP) {
1818 auto PrivCB = [](InsertPointTy AllocaIP, InsertPointTy CodeGenIP,
1819 llvm::Value &, llvm::Value &Val, llvm::Value *&ReplVal) {
1827 const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel);
1830 auto BodyGenCB = [&,
this](InsertPointTy AllocaIP,
1831 InsertPointTy CodeGenIP) {
1833 *
this, ParallelRegionBodyStmt, AllocaIP, CodeGenIP,
"parallel");
1836 CGCapturedStmtInfo CGSI(*CS,
CR_OpenMP);
1837 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(*
this, &CGSI);
1838 llvm::OpenMPIRBuilder::InsertPointTy AllocaIP(
1841 OMPBuilder.createParallel(
Builder, AllocaIP, BodyGenCB, PrivCB, FiniCB,
1842 IfCond, NumThreads, ProcBind, S.hasCancel()));
1849 OMPPrivateScope PrivateScope(CGF);
1854 (void)PrivateScope.Privatize();
1855 CGF.
EmitStmt(S.getCapturedStmt(OMPD_parallel)->getCapturedStmt());
1876class OMPTransformDirectiveScopeRAII {
1877 OMPLoopScope *
Scope =
nullptr;
1878 CodeGenFunction::CGCapturedStmtInfo *CGSI =
nullptr;
1879 CodeGenFunction::CGCapturedStmtRAII *CapInfoRAII =
nullptr;
1881 OMPTransformDirectiveScopeRAII(
const OMPTransformDirectiveScopeRAII &) =
1883 OMPTransformDirectiveScopeRAII &
1884 operator=(
const OMPTransformDirectiveScopeRAII &) =
delete;
1888 if (
const auto *Dir = dyn_cast<OMPLoopBasedDirective>(S)) {
1889 Scope =
new OMPLoopScope(CGF, *Dir);
1890 CGSI =
new CodeGenFunction::CGCapturedStmtInfo(
CR_OpenMP);
1891 CapInfoRAII =
new CodeGenFunction::CGCapturedStmtRAII(CGF, CGSI);
1894 ~OMPTransformDirectiveScopeRAII() {
1905 int MaxLevel,
int Level = 0) {
1906 assert(Level < MaxLevel &&
"Too deep lookup during loop body codegen.");
1907 const Stmt *SimplifiedS = S->IgnoreContainers();
1908 if (
const auto *CS = dyn_cast<CompoundStmt>(SimplifiedS)) {
1911 "LLVM IR generation of compound statement ('{}')");
1914 CodeGenFunction::LexicalScope
Scope(CGF, S->getSourceRange());
1915 for (
const Stmt *CurStmt : CS->body())
1916 emitBody(CGF, CurStmt, NextLoop, MaxLevel, Level);
1919 if (SimplifiedS == NextLoop) {
1920 if (
auto *Dir = dyn_cast<OMPLoopTransformationDirective>(SimplifiedS))
1921 SimplifiedS = Dir->getTransformedStmt();
1922 if (
const auto *CanonLoop = dyn_cast<OMPCanonicalLoop>(SimplifiedS))
1923 SimplifiedS = CanonLoop->getLoopStmt();
1924 if (
const auto *For = dyn_cast<ForStmt>(SimplifiedS)) {
1927 assert(isa<CXXForRangeStmt>(SimplifiedS) &&
1928 "Expected canonical for loop or range-based for loop.");
1929 const auto *CXXFor = cast<CXXForRangeStmt>(SimplifiedS);
1930 CGF.
EmitStmt(CXXFor->getLoopVarStmt());
1931 S = CXXFor->getBody();
1933 if (Level + 1 < MaxLevel) {
1936 emitBody(CGF, S, NextLoop, MaxLevel, Level + 1);
1945 RunCleanupsScope BodyScope(*
this);
1947 for (
const Expr *UE :
D.updates())
1955 for (
const Expr *UE :
C->updates())
1962 BreakContinueStack.push_back(BreakContinue(
LoopExit, Continue));
1963 for (
const Expr *
E :
D.finals_conditions()) {
1974 OMPPrivateScope InscanScope(*
this);
1976 bool IsInscanRegion = InscanScope.Privatize();
1977 if (IsInscanRegion) {
1987 if (EKind != OMPD_simd && !
getLangOpts().OpenMPSimd)
1996 D.getInnermostCapturedStmt()->getCapturedStmt()->IgnoreContainers();
2001 D.getLoopsNumber());
2009 BreakContinueStack.pop_back();
2020 std::unique_ptr<CodeGenFunction::CGCapturedStmtInfo> CSI =
2021 std::make_unique<CodeGenFunction::CGCapturedStmtInfo>(*S);
2022 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, CSI.get());
2029static llvm::CallInst *
2034 EffectiveArgs.reserve(Args.size() + 1);
2035 llvm::append_range(EffectiveArgs, Args);
2036 EffectiveArgs.push_back(Cap.second);
2041llvm::CanonicalLoopInfo *
2043 assert(Depth == 1 &&
"Nested loops with OpenMPIRBuilder not yet implemented");
2069 const Stmt *SyntacticalLoop = S->getLoopStmt();
2076 LexicalScope ForScope(*
this, S->getSourceRange());
2080 const Stmt *BodyStmt;
2081 if (
const auto *For = dyn_cast<ForStmt>(SyntacticalLoop)) {
2082 if (
const Stmt *InitStmt = For->getInit())
2084 BodyStmt = For->getBody();
2085 }
else if (
const auto *RangeFor =
2086 dyn_cast<CXXForRangeStmt>(SyntacticalLoop)) {
2087 if (
const DeclStmt *RangeStmt = RangeFor->getRangeStmt())
2089 if (
const DeclStmt *BeginStmt = RangeFor->getBeginStmt())
2091 if (
const DeclStmt *EndStmt = RangeFor->getEndStmt())
2093 if (
const DeclStmt *LoopVarStmt = RangeFor->getLoopVarStmt())
2095 BodyStmt = RangeFor->getBody();
2097 llvm_unreachable(
"Expected for-stmt or range-based for-stmt");
2100 const CapturedStmt *DistanceFunc = S->getDistanceFunc();
2117 auto BodyGen = [&,
this](llvm::OpenMPIRBuilder::InsertPointTy CodeGenIP,
2118 llvm::Value *IndVar) {
2123 const DeclRefExpr *LoopVarRef = S->getLoopVarRef();
2129 RunCleanupsScope BodyScope(*
this);
2132 llvm::CanonicalLoopInfo *CL =
2133 OMPBuilder.createCanonicalLoop(
Builder, BodyGen, DistVal);
2136 Builder.restoreIP(CL->getAfterIP());
2137 ForScope.ForceCleanup();
2145 const Expr *IncExpr,
2156 const auto &OMPED = cast<OMPExecutableDirective>(S);
2157 const CapturedStmt *ICS = OMPED.getInnermostCapturedStmt();
2171 llvm::BasicBlock *ExitBlock =
LoopExit.getBlock();
2172 if (RequiresCleanup)
2179 if (ExitBlock !=
LoopExit.getBlock()) {
2189 BreakContinueStack.push_back(BreakContinue(
LoopExit, Continue));
2197 BreakContinueStack.pop_back();
2208 bool HasLinears =
false;
2212 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(
Init)->getDecl());
2213 if (
const auto *Ref =
2216 const auto *OrigVD = cast<VarDecl>(Ref->getDecl());
2232 if (
const auto *CS = cast_or_null<BinaryOperator>(
C->getCalcStep()))
2233 if (
const auto *SaveRef = cast<DeclRefExpr>(CS->getLHS())) {
2247 llvm::BasicBlock *DoneBB =
nullptr;
2250 auto IC =
C->varlist_begin();
2251 for (
const Expr *F :
C->finals()) {
2253 if (llvm::Value *Cond = CondGen(*
this)) {
2258 Builder.CreateCondBr(Cond, ThenBB, DoneBB);
2262 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl());
2267 CodeGenFunction::OMPPrivateScope VarScope(*
this);
2268 VarScope.addPrivate(OrigVD, OrigAddr);
2269 (void)VarScope.Privatize();
2273 if (
const Expr *PostUpdate =
C->getPostUpdateExpr())
2285 llvm::APInt ClauseAlignment(64, 0);
2286 if (
const Expr *AlignmentExpr = Clause->getAlignment()) {
2289 ClauseAlignment = AlignmentCI->getValue();
2291 for (
const Expr *
E : Clause->varlist()) {
2292 llvm::APInt Alignment(ClauseAlignment);
2293 if (Alignment == 0) {
2303 assert((Alignment == 0 || Alignment.isPowerOf2()) &&
2304 "alignment is not power of 2");
2305 if (Alignment != 0) {
2319 auto I = S.private_counters().begin();
2320 for (
const Expr *
E : S.counters()) {
2321 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(
E)->getDecl());
2322 const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl());
2326 LocalDeclMap.erase(PrivateVD);
2327 (void)LoopScope.addPrivate(VD, VarEmission.getAllocatedAddress());
2335 (void)LoopScope.addPrivate(PrivateVD, VarEmission.getAllocatedAddress());
2341 if (!
C->getNumForLoops())
2343 for (
unsigned I = S.getLoopsNumber(),
E =
C->getLoopNumIterations().size();
2345 const auto *DRE = cast<DeclRefExpr>(
C->getLoopCounter(I));
2346 const auto *VD = cast<VarDecl>(DRE->getDecl());
2349 if (DRE->refersToEnclosingVariableOrCapture()) {
2350 (void)LoopScope.addPrivate(
2358 const Expr *Cond, llvm::BasicBlock *TrueBlock,
2359 llvm::BasicBlock *FalseBlock, uint64_t TrueCount) {
2363 CodeGenFunction::OMPPrivateScope PreCondScope(CGF);
2365 (void)PreCondScope.Privatize();
2367 for (
const Expr *I : S.inits()) {
2373 CodeGenFunction::OMPMapVars PreCondVars;
2374 for (
const Expr *
E : S.dependent_counters()) {
2378 "dependent counter must not be an iterator.");
2379 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(
E)->getDecl());
2382 (void)PreCondVars.setVarAddr(CGF, VD, CounterAddr);
2384 (void)PreCondVars.apply(CGF);
2385 for (
const Expr *
E : S.dependent_inits()) {
2392 PreCondVars.restore(CGF);
2399 llvm::DenseSet<const VarDecl *> SIMDLCVs;
2402 const auto *LoopDirective = cast<OMPLoopDirective>(&
D);
2403 for (
const Expr *
C : LoopDirective->counters()) {
2409 auto CurPrivate =
C->privates().begin();
2410 for (
const Expr *
E :
C->varlist()) {
2411 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(
E)->getDecl());
2412 const auto *PrivateVD =
2413 cast<VarDecl>(cast<DeclRefExpr>(*CurPrivate)->getDecl());
2419 assert(IsRegistered &&
"linear var already registered as private");
2437 auto *Val = cast<llvm::ConstantInt>(Len.
getScalarVal());
2446 auto *Val = cast<llvm::ConstantInt>(Len.
getScalarVal());
2461 if (
C->getKind() == OMPC_ORDER_concurrent)
2464 if ((EKind == OMPD_simd ||
2468 return C->getModifier() == OMPC_REDUCTION_inscan;
2479 llvm::BasicBlock *DoneBB =
nullptr;
2480 auto IC =
D.counters().begin();
2481 auto IPC =
D.private_counters().begin();
2482 for (
const Expr *F :
D.finals()) {
2483 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>((*IC))->getDecl());
2484 const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>((*IPC))->getDecl());
2485 const auto *CED = dyn_cast<OMPCapturedExprDecl>(OrigVD);
2487 OrigVD->hasGlobalStorage() || CED) {
2489 if (llvm::Value *Cond = CondGen(*
this)) {
2494 Builder.CreateCondBr(Cond, ThenBB, DoneBB);
2507 OMPPrivateScope VarScope(*
this);
2508 VarScope.addPrivate(OrigVD, OrigAddr);
2509 (void)VarScope.Privatize();
2521 CodeGenFunction::JumpDest
LoopExit) {
2529 auto VDecl = cast<VarDecl>(Helper->
getDecl());
2537 auto &&ThenGen = [&S, &SimdInitGen, &BodyCodeGen](
CodeGenFunction &CGF,
2540 CodeGenFunction::OMPLocalDeclMapRAII
Scope(CGF);
2546 CodeGenFunction::OMPLocalDeclMapRAII
Scope(CGF);
2551 const Expr *IfCond =
nullptr;
2554 for (
const auto *
C : S.getClausesOfKind<
OMPIfClause>()) {
2556 (
C->getNameModifier() == OMPD_unknown ||
2557 C->getNameModifier() == OMPD_simd)) {
2558 IfCond =
C->getCondition();
2574 OMPLoopScope PreInitScope(CGF, S);
2596 llvm::BasicBlock *ContBlock =
nullptr;
2603 emitPreCond(CGF, S, S.getPreCond(), ThenBlock, ContBlock,
2610 const Expr *IVExpr = S.getIterationVariable();
2611 const auto *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl());
2618 if (
const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
2619 CGF.
EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
2627 CodeGenFunction::OMPPrivateScope LoopScope(CGF);
2633 CGF, S, CGF.
EmitLValue(S.getIterationVariable()));
2635 (void)LoopScope.Privatize();
2646 S, LoopScope.requiresCleanups(), S.getCond(), S.getInc(),
2648 emitOMPLoopBodyWithStopPoint(CGF, S,
2649 CodeGenFunction::JumpDest());
2655 if (HasLastprivateClause)
2660 LoopScope.restoreMap();
2676 if (!(isa<OMPSimdlenClause>(
C) || isa<OMPSafelenClause>(
C) ||
2677 isa<OMPOrderClause>(
C) || isa<OMPAlignedClause>(
C)))
2684 if (
const auto *CanonLoop = dyn_cast<OMPCanonicalLoop>(S.getRawStmt())) {
2685 if (
const Stmt *SyntacticalLoop = CanonLoop->getLoopStmt()) {
2686 for (
const Stmt *SubStmt : SyntacticalLoop->
children()) {
2689 if (
const CompoundStmt *CS = dyn_cast<CompoundStmt>(SubStmt)) {
2693 if (isa<OMPOrderedDirective>(CSSubStmt)) {
2704static llvm::MapVector<llvm::Value *, llvm::Value *>
2706 llvm::MapVector<llvm::Value *, llvm::Value *> AlignedVars;
2708 llvm::APInt ClauseAlignment(64, 0);
2709 if (
const Expr *AlignmentExpr = Clause->getAlignment()) {
2712 ClauseAlignment = AlignmentCI->getValue();
2714 for (
const Expr *
E : Clause->varlist()) {
2715 llvm::APInt Alignment(ClauseAlignment);
2716 if (Alignment == 0) {
2726 assert((Alignment == 0 || Alignment.isPowerOf2()) &&
2727 "alignment is not power of 2");
2729 AlignedVars[PtrValue] = CGF.
Builder.getInt64(Alignment.getSExtValue());
2739 bool UseOMPIRBuilder =
2741 if (UseOMPIRBuilder) {
2745 if (UseOMPIRBuilder) {
2746 llvm::MapVector<llvm::Value *, llvm::Value *> AlignedVars =
2749 const Stmt *Inner = S.getRawStmt();
2750 llvm::CanonicalLoopInfo *CLI =
2751 CGF.EmitOMPCollapsedCanonicalLoopNest(Inner, 1);
2753 llvm::OpenMPIRBuilder &OMPBuilder =
2756 llvm::ConstantInt *Simdlen =
nullptr;
2760 auto *Val = cast<llvm::ConstantInt>(Len.
getScalarVal());
2763 llvm::ConstantInt *Safelen =
nullptr;
2767 auto *Val = cast<llvm::ConstantInt>(Len.
getScalarVal());
2770 llvm::omp::OrderKind Order = llvm::omp::OrderKind::OMP_ORDER_unknown;
2772 if (
C->getKind() == OpenMPOrderClauseKind::OMPC_ORDER_concurrent) {
2773 Order = llvm::omp::OrderKind::OMP_ORDER_concurrent;
2778 OMPBuilder.applySimd(CLI, AlignedVars,
2779 nullptr, Order, Simdlen, Safelen);
2786 OMPLexicalScope
Scope(CGF, S, OMPD_unknown);
2793 CodeGenFunction::ParentLoopDirectiveForScanRegion ScanRegion(CGF, S);
2801 OMPLexicalScope
Scope(CGF, S, OMPD_unknown);
2814 OMPTransformDirectiveScopeRAII TileScope(*
this, &S);
2820 OMPTransformDirectiveScopeRAII ReverseScope(*
this, &S);
2827 OMPTransformDirectiveScopeRAII InterchangeScope(*
this, &S);
2834 if (UseOMPIRBuilder) {
2836 const Stmt *Inner = S.getRawStmt();
2847 llvm::CanonicalLoopInfo *UnrolledCLI =
nullptr;
2851 OMPBuilder.unrollLoopFull(DL, CLI);
2854 if (
Expr *FactorExpr = PartialClause->getFactor()) {
2855 Factor = FactorExpr->EvaluateKnownConstInt(
getContext()).getZExtValue();
2856 assert(Factor >= 1 &&
"Only positive factors are valid");
2858 OMPBuilder.unrollLoopPartial(DL, CLI, Factor,
2859 NeedsUnrolledCLI ? &UnrolledCLI :
nullptr);
2861 OMPBuilder.unrollLoopHeuristic(DL, CLI);
2864 assert((!NeedsUnrolledCLI || UnrolledCLI) &&
2865 "NeedsUnrolledCLI implies UnrolledCLI to be set");
2882 if (
Expr *FactorExpr = PartialClause->getFactor()) {
2884 FactorExpr->EvaluateKnownConstInt(
getContext()).getZExtValue();
2885 assert(Factor >= 1 &&
"Only positive factors are valid");
2893void CodeGenFunction::EmitOMPOuterLoop(
2895 CodeGenFunction::OMPPrivateScope &LoopScope,
2896 const CodeGenFunction::OMPLoopArguments &LoopArgs,
2901 const Expr *IVExpr = S.getIterationVariable();
2915 llvm::Value *BoolCondVal =
nullptr;
2916 if (!DynamicOrOrdered) {
2927 RT.
emitForNext(*
this, S.getBeginLoc(), IVSize, IVSigned, LoopArgs.IL,
2928 LoopArgs.LB, LoopArgs.UB, LoopArgs.ST);
2933 llvm::BasicBlock *ExitBlock =
LoopExit.getBlock();
2934 if (LoopScope.requiresCleanups())
2938 Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock);
2939 if (ExitBlock !=
LoopExit.getBlock()) {
2947 if (DynamicOrOrdered)
2952 BreakContinueStack.push_back(BreakContinue(
LoopExit, Continue));
2963 if (
C->getKind() == OMPC_ORDER_concurrent)
2969 [&S, &LoopArgs,
LoopExit, &CodeGenLoop, IVSize, IVSigned, &CodeGenOrdered,
2977 CGF.EmitOMPInnerLoop(
2978 S, LoopScope.requiresCleanups(), LoopArgs.Cond, LoopArgs.IncExpr,
2980 CodeGenLoop(CGF, S, LoopExit);
2983 CodeGenOrdered(CGF, Loc, IVSize, IVSigned);
2988 BreakContinueStack.pop_back();
2989 if (!DynamicOrOrdered) {
3002 auto &&CodeGen = [DynamicOrOrdered, &S, &LoopArgs](
CodeGenFunction &CGF) {
3003 if (!DynamicOrOrdered)
3004 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getEndLoc(),
3007 OMPCancelStack.emitExit(*
this, EKind, CodeGen);
3010void CodeGenFunction::EmitOMPForOuterLoop(
3013 const OMPLoopArguments &LoopArgs,
3021 LoopArgs.Chunk !=
nullptr)) &&
3022 "static non-chunked schedule does not need outer loop");
3076 const Expr *IVExpr = S.getIterationVariable();
3080 if (DynamicOrOrdered) {
3081 const std::pair<llvm::Value *, llvm::Value *> DispatchBounds =
3082 CGDispatchBounds(*
this, S, LoopArgs.LB, LoopArgs.UB);
3083 llvm::Value *LBVal = DispatchBounds.first;
3084 llvm::Value *UBVal = DispatchBounds.second;
3088 IVSigned, Ordered, DipatchRTInputValues);
3091 IVSize, IVSigned, Ordered, LoopArgs.IL, LoopArgs.LB, LoopArgs.UB,
3092 LoopArgs.ST, LoopArgs.Chunk);
3099 const unsigned IVSize,
3100 const bool IVSigned) {
3107 OMPLoopArguments OuterLoopArgs(LoopArgs.LB, LoopArgs.UB, LoopArgs.ST,
3108 LoopArgs.IL, LoopArgs.Chunk, LoopArgs.EUB);
3109 OuterLoopArgs.IncExpr = S.getInc();
3110 OuterLoopArgs.Init = S.getInit();
3111 OuterLoopArgs.Cond = S.getCond();
3112 OuterLoopArgs.NextLB = S.getNextLowerBound();
3113 OuterLoopArgs.NextUB = S.getNextUpperBound();
3114 OuterLoopArgs.DKind = LoopArgs.DKind;
3115 EmitOMPOuterLoop(DynamicOrOrdered, IsMonotonic, S, LoopScope, OuterLoopArgs,
3117 if (DynamicOrOrdered) {
3123 const unsigned IVSize,
const bool IVSigned) {}
3125void CodeGenFunction::EmitOMPDistributeOuterLoop(
3127 OMPPrivateScope &LoopScope,
const OMPLoopArguments &LoopArgs,
3137 const Expr *IVExpr = S.getIterationVariable();
3143 IVSize, IVSigned,
false, LoopArgs.IL, LoopArgs.LB,
3144 LoopArgs.UB, LoopArgs.ST, LoopArgs.Chunk);
3151 IncExpr = S.getDistInc();
3153 IncExpr = S.getInc();
3158 OMPLoopArguments OuterLoopArgs;
3159 OuterLoopArgs.LB = LoopArgs.LB;
3160 OuterLoopArgs.UB = LoopArgs.UB;
3161 OuterLoopArgs.ST = LoopArgs.ST;
3162 OuterLoopArgs.IL = LoopArgs.IL;
3163 OuterLoopArgs.Chunk = LoopArgs.Chunk;
3165 ? S.getCombinedEnsureUpperBound()
3166 : S.getEnsureUpperBound();
3167 OuterLoopArgs.IncExpr = IncExpr;
3169 ? S.getCombinedInit()
3172 ? S.getCombinedCond()
3175 ? S.getCombinedNextLowerBound()
3176 : S.getNextLowerBound();
3178 ? S.getCombinedNextUpperBound()
3179 : S.getNextUpperBound();
3180 OuterLoopArgs.DKind = OMPD_distribute;
3182 EmitOMPOuterLoop(
false,
false, S,
3183 LoopScope, OuterLoopArgs, CodeGenLoopContent,
3187static std::pair<LValue, LValue>
3230static std::pair<llvm::Value *, llvm::Value *>
3241 llvm::Value *LBVal =
3243 llvm::Value *UBVal =
3245 return {LBVal, UBVal};
3251 const auto &Dir = cast<OMPLoopDirective>(S);
3253 CGF.
EmitLValue(cast<DeclRefExpr>(Dir.getCombinedLowerBoundVariable()));
3254 llvm::Value *LBCast = CGF.
Builder.CreateIntCast(
3256 CapturedVars.push_back(LBCast);
3258 CGF.
EmitLValue(cast<DeclRefExpr>(Dir.getCombinedUpperBoundVariable()));
3260 llvm::Value *UBCast = CGF.
Builder.CreateIntCast(
3262 CapturedVars.push_back(UBCast);
3268 CodeGenFunction::JumpDest
LoopExit) {
3273 bool HasCancel =
false;
3275 if (
const auto *
D = dyn_cast<OMPTeamsDistributeParallelForDirective>(&S))
3276 HasCancel =
D->hasCancel();
3277 else if (
const auto *
D = dyn_cast<OMPDistributeParallelForDirective>(&S))
3278 HasCancel =
D->hasCancel();
3279 else if (
const auto *
D =
3280 dyn_cast<OMPTargetTeamsDistributeParallelForDirective>(&S))
3281 HasCancel =
D->hasCancel();
3283 CodeGenFunction::OMPCancelStackRAII CancelRegion(CGF, EKind, HasCancel);
3291 CGInlinedWorksharingLoop,
3301 OMPLexicalScope
Scope(*
this, S, OMPD_parallel);
3311 OMPLexicalScope
Scope(*
this, S, OMPD_parallel);
3320 OMPLexicalScope
Scope(*
this, S, OMPD_unknown);
3331 llvm::Constant *Addr;
3334 S, ParentName, Fn, Addr,
true, CodeGen);
3335 assert(Fn && Addr &&
"Target device function emission failed.");
3347struct ScheduleKindModifiersTy {
3363 const auto *IVExpr = cast<DeclRefExpr>(S.getIterationVariable());
3364 const auto *IVDecl = cast<VarDecl>(IVExpr->getDecl());
3370 if (
const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
3378 bool HasLastprivateClause;
3381 OMPLoopScope PreInitScope(*
this, S);
3386 llvm::BasicBlock *ContBlock =
nullptr;
3393 emitPreCond(*
this, S, S.getPreCond(), ThenBlock, ContBlock,
3399 RunCleanupsScope DoacrossCleanupScope(*
this);
3400 bool Ordered =
false;
3402 if (OrderedClause->getNumForLoops())
3408 llvm::DenseSet<const Expr *> EmittedFinals;
3413 std::pair<LValue, LValue> Bounds = CodeGenLoopBounds(*
this, S);
3414 LValue LB = Bounds.first;
3415 LValue UB = Bounds.second;
3424 OMPPrivateScope LoopScope(*
this);
3430 *
this, S.getBeginLoc(), OMPD_unknown,
false,
3435 *
this, S,
EmitLValue(S.getIterationVariable()));
3440 (void)LoopScope.Privatize();
3445 const Expr *ChunkExpr =
nullptr;
3448 ScheduleKind.
Schedule =
C->getScheduleKind();
3449 ScheduleKind.
M1 =
C->getFirstScheduleModifier();
3450 ScheduleKind.
M2 =
C->getSecondScheduleModifier();
3451 ChunkExpr =
C->getChunkSize();
3455 *
this, S, ScheduleKind.
Schedule, ChunkExpr);
3457 bool HasChunkSizeOne =
false;
3458 llvm::Value *Chunk =
nullptr;
3462 S.getIterationVariable()->getType(),
3466 llvm::APSInt EvaluatedChunk =
Result.Val.getInt();
3467 HasChunkSizeOne = (EvaluatedChunk.getLimitedValue() == 1);
3476 bool StaticChunkedOne =
3478 Chunk !=
nullptr) &&
3482 (ScheduleKind.
Schedule == OMPC_SCHEDULE_static &&
3483 !(ScheduleKind.
M1 == OMPC_SCHEDULE_MODIFIER_nonmonotonic ||
3484 ScheduleKind.
M2 == OMPC_SCHEDULE_MODIFIER_nonmonotonic)) ||
3485 ScheduleKind.
M1 == OMPC_SCHEDULE_MODIFIER_monotonic ||
3486 ScheduleKind.
M2 == OMPC_SCHEDULE_MODIFIER_monotonic;
3488 Chunk !=
nullptr) ||
3489 StaticChunkedOne) &&
3499 if (C->getKind() == OMPC_ORDER_concurrent)
3500 CGF.LoopStack.setParallel(true);
3503 [IVSize, IVSigned, Ordered, IL, LB, UB, ST, StaticChunkedOne, Chunk,
3512 IVSize, IVSigned, Ordered, IL.getAddress(), LB.getAddress(),
3513 UB.getAddress(), ST.getAddress(),
3514 StaticChunkedOne ? Chunk :
nullptr);
3515 CGF.CGM.getOpenMPRuntime().emitForStaticInit(
3516 CGF, S.getBeginLoc(), EKind, ScheduleKind, StaticInit);
3518 if (!StaticChunkedOne)
3519 CGF.EmitIgnoredExpr(S.getEnsureUpperBound());
3521 CGF.EmitIgnoredExpr(S.getInit());
3535 CGF.EmitOMPInnerLoop(
3536 S, LoopScope.requiresCleanups(),
3537 StaticChunkedOne ? S.getCombinedParForInDistCond()
3539 StaticChunkedOne ? S.getDistInc() : S.getInc(),
3541 emitOMPLoopBodyWithStopPoint(CGF, S, LoopExit);
3548 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getEndLoc(),
3551 OMPCancelStack.emitExit(*
this, EKind, CodeGen);
3555 OMPLoopArguments LoopArguments(LB.getAddress(), UB.getAddress(),
3556 ST.getAddress(), IL.getAddress(), Chunk,
3558 LoopArguments.DKind = OMPD_for;
3559 EmitOMPForOuterLoop(ScheduleKind, IsMonotonic, S, LoopScope, Ordered,
3560 LoopArguments, CGDispatchBounds);
3564 return CGF.
Builder.CreateIsNotNull(
3570 ? OMPD_parallel_for_simd
3575 return CGF.
Builder.CreateIsNotNull(
3579 if (HasLastprivateClause)
3583 LoopScope.restoreMap();
3585 return CGF.
Builder.CreateIsNotNull(
3589 DoacrossCleanupScope.ForceCleanup();
3596 return HasLastprivateClause;
3602static std::pair<LValue, LValue>
3604 const auto &LS = cast<OMPLoopDirective>(S);
3616static std::pair<llvm::Value *, llvm::Value *>
3619 const auto &LS = cast<OMPLoopDirective>(S);
3620 const Expr *IVExpr = LS.getIterationVariable();
3622 llvm::Value *LBVal = CGF.
Builder.getIntN(IVSize, 0);
3624 return {LBVal, UBVal};
3636 llvm::function_ref<llvm::Value *(
CodeGenFunction &)> NumIteratorsGen) {
3637 llvm::Value *OMPScanNumIterations = CGF.
Builder.CreateIntCast(
3638 NumIteratorsGen(CGF), CGF.
SizeTy,
false);
3644 assert(
C->getModifier() == OMPC_REDUCTION_inscan &&
3645 "Only inscan reductions are expected.");
3646 Shareds.append(
C->varlist_begin(),
C->varlist_end());
3647 Privates.append(
C->privates().begin(),
C->privates().end());
3648 ReductionOps.append(
C->reduction_ops().begin(),
C->reduction_ops().end());
3649 CopyArrayTemps.append(
C->copy_array_temps().begin(),
3650 C->copy_array_temps().end());
3658 auto *ITA = CopyArrayTemps.begin();
3659 for (
const Expr *IRef : Privates) {
3660 const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(IRef)->getDecl());
3663 if (PrivateVD->getType()->isVariablyModifiedType()) {
3667 CodeGenFunction::OpaqueValueMapping DimMapping(
3669 cast<OpaqueValueExpr>(
3670 cast<VariableArrayType>((*ITA)->getType()->getAsArrayTypeUnsafe())
3674 CGF.
EmitVarDecl(*cast<VarDecl>(cast<DeclRefExpr>(*ITA)->getDecl()));
3688 llvm::function_ref<llvm::Value *(
CodeGenFunction &)> NumIteratorsGen) {
3689 llvm::Value *OMPScanNumIterations = CGF.
Builder.CreateIntCast(
3690 NumIteratorsGen(CGF), CGF.
SizeTy,
false);
3698 assert(
C->getModifier() == OMPC_REDUCTION_inscan &&
3699 "Only inscan reductions are expected.");
3700 Shareds.append(
C->varlist_begin(),
C->varlist_end());
3701 LHSs.append(
C->lhs_exprs().begin(),
C->lhs_exprs().end());
3702 RHSs.append(
C->rhs_exprs().begin(),
C->rhs_exprs().end());
3703 Privates.append(
C->privates().begin(),
C->privates().end());
3704 CopyOps.append(
C->copy_ops().begin(),
C->copy_ops().end());
3705 CopyArrayElems.append(
C->copy_array_elems().begin(),
3706 C->copy_array_elems().end());
3710 llvm::Value *OMPLast = CGF.
Builder.CreateNSWSub(
3711 OMPScanNumIterations,
3712 llvm::ConstantInt::get(CGF.
SizeTy, 1,
false));
3713 for (
unsigned I = 0,
E = CopyArrayElems.size(); I <
E; ++I) {
3714 const Expr *PrivateExpr = Privates[I];
3715 const Expr *OrigExpr = Shareds[I];
3716 const Expr *CopyArrayElem = CopyArrayElems[I];
3717 CodeGenFunction::OpaqueValueMapping IdxMapping(
3719 cast<OpaqueValueExpr>(
3720 cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()),
3726 cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()),
3727 cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()), CopyOps[I]);
3755 llvm::Value *OMPScanNumIterations = CGF.
Builder.CreateIntCast(
3756 NumIteratorsGen(CGF), CGF.
SizeTy,
false);
3763 assert(
C->getModifier() == OMPC_REDUCTION_inscan &&
3764 "Only inscan reductions are expected.");
3765 Privates.append(
C->privates().begin(),
C->privates().end());
3766 ReductionOps.append(
C->reduction_ops().begin(),
C->reduction_ops().end());
3767 LHSs.append(
C->lhs_exprs().begin(),
C->lhs_exprs().end());
3768 RHSs.append(
C->rhs_exprs().begin(),
C->rhs_exprs().end());
3769 CopyArrayElems.append(
C->copy_array_elems().begin(),
3770 C->copy_array_elems().end());
3772 CodeGenFunction::ParentLoopDirectiveForScanRegion ScanRegion(CGF, S);
3781 CodeGenFunction::OMPLocalDeclMapRAII
Scope(CGF);
3785 auto &&CodeGen = [&S, OMPScanNumIterations, &LHSs, &RHSs, &CopyArrayElems,
3792 llvm::BasicBlock *InputBB = CGF.Builder.GetInsertBlock();
3793 llvm::BasicBlock *LoopBB = CGF.createBasicBlock(
"omp.outer.log.scan.body");
3794 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(
"omp.outer.log.scan.exit");
3796 CGF.CGM.getIntrinsic(llvm::Intrinsic::log2, CGF.DoubleTy);
3798 CGF.Builder.CreateUIToFP(OMPScanNumIterations, CGF.DoubleTy);
3799 llvm::Value *LogVal = CGF.EmitNounwindRuntimeCall(F, Arg);
3800 F = CGF.CGM.getIntrinsic(llvm::Intrinsic::ceil, CGF.DoubleTy);
3801 LogVal = CGF.EmitNounwindRuntimeCall(F, LogVal);
3802 LogVal = CGF.Builder.CreateFPToUI(LogVal, CGF.IntTy);
3803 llvm::Value *NMin1 = CGF.Builder.CreateNUWSub(
3804 OMPScanNumIterations, llvm::ConstantInt::get(CGF.SizeTy, 1));
3806 CGF.EmitBlock(LoopBB);
3807 auto *Counter = CGF.Builder.CreatePHI(CGF.IntTy, 2);
3809 auto *Pow2K = CGF.Builder.CreatePHI(CGF.SizeTy, 2);
3810 Counter->addIncoming(llvm::ConstantInt::get(CGF.IntTy, 0), InputBB);
3811 Pow2K->addIncoming(llvm::ConstantInt::get(CGF.SizeTy, 1), InputBB);
3814 llvm::BasicBlock *InnerLoopBB =
3815 CGF.createBasicBlock(
"omp.inner.log.scan.body");
3816 llvm::BasicBlock *InnerExitBB =
3817 CGF.createBasicBlock(
"omp.inner.log.scan.exit");
3818 llvm::Value *CmpI = CGF.Builder.CreateICmpUGE(NMin1, Pow2K);
3819 CGF.Builder.CreateCondBr(CmpI, InnerLoopBB, InnerExitBB);
3820 CGF.EmitBlock(InnerLoopBB);
3821 auto *IVal = CGF.Builder.CreatePHI(CGF.SizeTy, 2);
3822 IVal->addIncoming(NMin1, LoopBB);
3824 CodeGenFunction::OMPPrivateScope PrivScope(CGF);
3825 auto *ILHS = LHSs.begin();
3826 auto *IRHS = RHSs.begin();
3827 for (
const Expr *CopyArrayElem : CopyArrayElems) {
3828 const auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
3829 const auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
3832 CodeGenFunction::OpaqueValueMapping IdxMapping(
3834 cast<OpaqueValueExpr>(
3835 cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()),
3837 LHSAddr = CGF.EmitLValue(CopyArrayElem).getAddress();
3839 PrivScope.addPrivate(LHSVD, LHSAddr);
3842 llvm::Value *OffsetIVal = CGF.Builder.CreateNUWSub(IVal, Pow2K);
3843 CodeGenFunction::OpaqueValueMapping IdxMapping(
3845 cast<OpaqueValueExpr>(
3846 cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()),
3848 RHSAddr = CGF.EmitLValue(CopyArrayElem).getAddress();
3850 PrivScope.addPrivate(RHSVD, RHSAddr);
3854 PrivScope.Privatize();
3855 CGF.CGM.getOpenMPRuntime().emitReduction(
3856 CGF, S.getEndLoc(), Privates, LHSs, RHSs, ReductionOps,
3857 {true, true, OMPD_unknown});
3859 llvm::Value *NextIVal =
3860 CGF.Builder.CreateNUWSub(IVal, llvm::ConstantInt::get(CGF.SizeTy, 1));
3861 IVal->addIncoming(NextIVal, CGF.Builder.GetInsertBlock());
3862 CmpI = CGF.Builder.CreateICmpUGE(NextIVal, Pow2K);
3863 CGF.Builder.CreateCondBr(CmpI, InnerLoopBB, InnerExitBB);
3864 CGF.EmitBlock(InnerExitBB);
3866 CGF.Builder.CreateNUWAdd(Counter, llvm::ConstantInt::get(CGF.IntTy, 1));
3867 Counter->addIncoming(Next, CGF.Builder.GetInsertBlock());
3869 llvm::Value *NextPow2K =
3870 CGF.Builder.CreateShl(Pow2K, 1,
"",
true);
3871 Pow2K->addIncoming(NextPow2K, CGF.Builder.GetInsertBlock());
3872 llvm::Value *Cmp = CGF.Builder.CreateICmpNE(Next, LogVal);
3873 CGF.Builder.CreateCondBr(Cmp, LoopBB, ExitBB);
3875 CGF.EmitBlock(ExitBB);
3879 CGF.CGM.getOpenMPRuntime().emitMasterRegion(CGF, CodeGen, S.getBeginLoc());
3880 CGF.CGM.getOpenMPRuntime().emitBarrierCall(
3881 CGF, S.getBeginLoc(), OMPD_unknown,
false,
3888 CGF.OMPFirstScanLoop =
false;
3895 bool HasLastprivates;
3899 return C->getModifier() == OMPC_REDUCTION_inscan;
3902 CodeGenFunction::OMPLocalDeclMapRAII
Scope(CGF);
3903 OMPLoopScope LoopScope(CGF, S);
3906 const auto &&FirstGen = [&S, HasCancel, EKind](
CodeGenFunction &CGF) {
3907 CodeGenFunction::OMPCancelStackRAII CancelRegion(CGF, EKind, HasCancel);
3915 const auto &&SecondGen = [&S, HasCancel, EKind,
3917 CodeGenFunction::OMPCancelStackRAII CancelRegion(CGF, EKind, HasCancel);
3928 CodeGenFunction::OMPCancelStackRAII CancelRegion(CGF, EKind, HasCancel);
3933 return HasLastprivates;
3943 if (isa<OMPNowaitClause, OMPBindClause>(
C))
3946 if (
auto *SC = dyn_cast<OMPScheduleClause>(
C)) {
3951 switch (SC->getScheduleKind()) {
3952 case OMPC_SCHEDULE_auto:
3953 case OMPC_SCHEDULE_dynamic:
3954 case OMPC_SCHEDULE_runtime:
3955 case OMPC_SCHEDULE_guided:
3956 case OMPC_SCHEDULE_static:
3969static llvm::omp::ScheduleKind
3971 switch (ScheduleClauseKind) {
3973 return llvm::omp::OMP_SCHEDULE_Default;
3974 case OMPC_SCHEDULE_auto:
3975 return llvm::omp::OMP_SCHEDULE_Auto;
3976 case OMPC_SCHEDULE_dynamic:
3977 return llvm::omp::OMP_SCHEDULE_Dynamic;
3978 case OMPC_SCHEDULE_guided:
3979 return llvm::omp::OMP_SCHEDULE_Guided;
3980 case OMPC_SCHEDULE_runtime:
3981 return llvm::omp::OMP_SCHEDULE_Runtime;
3982 case OMPC_SCHEDULE_static:
3983 return llvm::omp::OMP_SCHEDULE_Static;
3985 llvm_unreachable(
"Unhandled schedule kind");
3992 bool HasLastprivates =
false;
3995 auto &&CodeGen = [&S, &
CGM, HasCancel, &HasLastprivates,
3998 if (UseOMPIRBuilder) {
4001 llvm::omp::ScheduleKind SchedKind = llvm::omp::OMP_SCHEDULE_Default;
4002 llvm::Value *ChunkSize =
nullptr;
4006 if (
const Expr *ChunkSizeExpr = SchedClause->getChunkSize())
4011 const Stmt *Inner = S.getRawStmt();
4012 llvm::CanonicalLoopInfo *CLI =
4015 llvm::OpenMPIRBuilder &OMPBuilder =
4017 llvm::OpenMPIRBuilder::InsertPointTy AllocaIP(
4019 OMPBuilder.applyWorkshareLoop(
4020 CGF.
Builder.getCurrentDebugLocation(), CLI, AllocaIP, NeedsBarrier,
4021 SchedKind, ChunkSize,
false,
4032 OMPLexicalScope
Scope(CGF, S, OMPD_unknown);
4037 if (!UseOMPIRBuilder) {
4051 bool HasLastprivates =
false;
4059 OMPLexicalScope
Scope(*
this, S, OMPD_unknown);
4072 llvm::Value *
Init =
nullptr) {
4080 const Stmt *
CapturedStmt = S.getInnermostCapturedStmt()->getCapturedStmt();
4082 bool HasLastprivates =
false;
4088 C.getIntTypeForBitwidth(32, 1);
4091 CGF.Builder.getInt32(0));
4092 llvm::ConstantInt *GlobalUBVal = CS !=
nullptr
4093 ? CGF.Builder.getInt32(CS->size() - 1)
4094 : CGF.Builder.getInt32(0);
4098 CGF.Builder.getInt32(1));
4100 CGF.Builder.getInt32(0));
4104 CodeGenFunction::OpaqueValueMapping OpaqueIV(CGF, &IVRefExpr, IV);
4106 CodeGenFunction::OpaqueValueMapping OpaqueUB(CGF, &UBRefExpr, UB);
4127 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(
".omp.sections.exit");
4129 CGF.Builder.CreateSwitch(CGF.EmitLoadOfScalar(IV, S.getBeginLoc()),
4130 ExitBB, CS ==
nullptr ? 1 : CS->size());
4132 unsigned CaseNumber = 0;
4134 auto CaseBB = CGF.createBasicBlock(
".omp.sections.case");
4135 CGF.EmitBlock(CaseBB);
4136 SwitchStmt->addCase(CGF.Builder.getInt32(CaseNumber), CaseBB);
4137 CGF.EmitStmt(SubStmt);
4138 CGF.EmitBranch(ExitBB);
4142 llvm::BasicBlock *CaseBB = CGF.createBasicBlock(
".omp.sections.case");
4143 CGF.EmitBlock(CaseBB);
4144 SwitchStmt->addCase(CGF.Builder.getInt32(0), CaseBB);
4146 CGF.EmitBranch(ExitBB);
4148 CGF.EmitBlock(ExitBB,
true);
4151 CodeGenFunction::OMPPrivateScope LoopScope(CGF);
4152 if (CGF.EmitOMPFirstprivateClause(S, LoopScope)) {
4156 CGF.CGM.getOpenMPRuntime().emitBarrierCall(
4157 CGF, S.getBeginLoc(), OMPD_unknown,
false,
4160 CGF.EmitOMPPrivateClause(S, LoopScope);
4162 HasLastprivates = CGF.EmitOMPLastprivateClauseInit(S, LoopScope);
4163 CGF.EmitOMPReductionClauseInit(S, LoopScope);
4164 (void)LoopScope.Privatize();
4166 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S);
4170 ScheduleKind.
Schedule = OMPC_SCHEDULE_static;
4174 CGF.CGM.getOpenMPRuntime().emitForStaticInit(CGF, S.getBeginLoc(), EKind,
4175 ScheduleKind, StaticInit);
4177 llvm::Value *UBVal = CGF.EmitLoadOfScalar(UB, S.getBeginLoc());
4178 llvm::Value *MinUBGlobalUB = CGF.Builder.CreateSelect(
4179 CGF.Builder.CreateICmpSLT(UBVal, GlobalUBVal), UBVal, GlobalUBVal);
4180 CGF.EmitStoreOfScalar(MinUBGlobalUB, UB);
4182 CGF.EmitStoreOfScalar(CGF.EmitLoadOfScalar(LB, S.getBeginLoc()), IV);
4184 CGF.EmitOMPInnerLoop(S,
false, Cond, Inc, BodyGen,
4188 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getEndLoc(),
4191 CGF.OMPCancelStack.emitExit(CGF, EKind, CodeGen);
4192 CGF.EmitOMPReductionClauseFinal(S, OMPD_parallel);
4195 return CGF.
Builder.CreateIsNotNull(
4200 if (HasLastprivates)
4207 bool HasCancel =
false;
4208 if (
auto *OSD = dyn_cast<OMPSectionsDirective>(&S))
4209 HasCancel = OSD->hasCancel();
4210 else if (
auto *OPSD = dyn_cast<OMPParallelSectionsDirective>(&S))
4211 HasCancel = OPSD->hasCancel();
4212 OMPCancelStackRAII CancelRegion(*
this, EKind, HasCancel);
4229 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
4230 using BodyGenCallbackTy = llvm::OpenMPIRBuilder::StorableBodyGenCallbackTy;
4232 auto FiniCB = [
this](InsertPointTy IP) {
4236 const CapturedStmt *ICS = S.getInnermostCapturedStmt();
4237 const Stmt *
CapturedStmt = S.getInnermostCapturedStmt()->getCapturedStmt();
4242 auto SectionCB = [
this, SubStmt](InsertPointTy AllocaIP,
4243 InsertPointTy CodeGenIP) {
4245 *
this, SubStmt, AllocaIP, CodeGenIP,
"section");
4247 SectionCBVector.push_back(SectionCB);
4250 auto SectionCB = [
this,
CapturedStmt](InsertPointTy AllocaIP,
4251 InsertPointTy CodeGenIP) {
4255 SectionCBVector.push_back(SectionCB);
4262 auto PrivCB = [](InsertPointTy AllocaIP, InsertPointTy CodeGenIP,
4263 llvm::Value &, llvm::Value &Val, llvm::Value *&ReplVal) {
4271 CGCapturedStmtInfo CGSI(*ICS,
CR_OpenMP);
4272 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(*
this, &CGSI);
4273 llvm::OpenMPIRBuilder::InsertPointTy AllocaIP(
4275 Builder.restoreIP(OMPBuilder.createSections(
4276 Builder, AllocaIP, SectionCBVector, PrivCB, FiniCB, S.hasCancel(),
4283 OMPLexicalScope
Scope(*
this, S, OMPD_unknown);
4298 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
4300 const Stmt *SectionRegionBodyStmt = S.getAssociatedStmt();
4301 auto FiniCB = [
this](InsertPointTy IP) {
4305 auto BodyGenCB = [SectionRegionBodyStmt,
this](InsertPointTy AllocaIP,
4306 InsertPointTy CodeGenIP) {
4308 *
this, SectionRegionBodyStmt, AllocaIP, CodeGenIP,
"section");
4311 LexicalScope
Scope(*
this, S.getSourceRange());
4313 Builder.restoreIP(OMPBuilder.createSection(
Builder, BodyGenCB, FiniCB));
4317 LexicalScope
Scope(*
this, S.getSourceRange());
4332 CopyprivateVars.append(
C->varlist_begin(),
C->varlist_end());
4333 DestExprs.append(
C->destination_exprs().begin(),
4334 C->destination_exprs().end());
4335 SrcExprs.append(
C->source_exprs().begin(),
C->source_exprs().end());
4336 AssignmentOps.append(
C->assignment_ops().begin(),
4337 C->assignment_ops().end());
4346 CGF.
EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt());
4351 OMPLexicalScope
Scope(*
this, S, OMPD_unknown);
4353 CopyprivateVars, DestExprs,
4354 SrcExprs, AssignmentOps);
4358 if (!S.getSingleClause<
OMPNowaitClause>() && CopyprivateVars.empty()) {
4360 *
this, S.getBeginLoc(),
4378 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
4380 const Stmt *MasterRegionBodyStmt = S.getAssociatedStmt();
4382 auto FiniCB = [
this](InsertPointTy IP) {
4386 auto BodyGenCB = [MasterRegionBodyStmt,
this](InsertPointTy AllocaIP,
4387 InsertPointTy CodeGenIP) {
4389 *
this, MasterRegionBodyStmt, AllocaIP, CodeGenIP,
"master");
4392 LexicalScope
Scope(*
this, S.getSourceRange());
4394 Builder.restoreIP(OMPBuilder.createMaster(
Builder, BodyGenCB, FiniCB));
4398 LexicalScope
Scope(*
this, S.getSourceRange());
4408 Expr *Filter =
nullptr;
4410 Filter = FilterClause->getThreadID();
4418 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
4420 const Stmt *MaskedRegionBodyStmt = S.getAssociatedStmt();
4423 Filter = FilterClause->getThreadID();
4424 llvm::Value *FilterVal =
Filter
4428 auto FiniCB = [
this](InsertPointTy IP) {
4432 auto BodyGenCB = [MaskedRegionBodyStmt,
this](InsertPointTy AllocaIP,
4433 InsertPointTy CodeGenIP) {
4435 *
this, MaskedRegionBodyStmt, AllocaIP, CodeGenIP,
"masked");
4438 LexicalScope
Scope(*
this, S.getSourceRange());
4441 OMPBuilder.createMasked(
Builder, BodyGenCB, FiniCB, FilterVal));
4445 LexicalScope
Scope(*
this, S.getSourceRange());
4453 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
4455 const Stmt *CriticalRegionBodyStmt = S.getAssociatedStmt();
4456 const Expr *Hint =
nullptr;
4457 if (
const auto *HintClause = S.getSingleClause<
OMPHintClause>())
4458 Hint = HintClause->getHint();
4463 llvm::Value *HintInst =
nullptr;
4468 auto FiniCB = [
this](InsertPointTy IP) {
4472 auto BodyGenCB = [CriticalRegionBodyStmt,
this](InsertPointTy AllocaIP,
4473 InsertPointTy CodeGenIP) {
4475 *
this, CriticalRegionBodyStmt, AllocaIP, CodeGenIP,
"critical");
4478 LexicalScope
Scope(*
this, S.getSourceRange());
4480 Builder.restoreIP(OMPBuilder.createCritical(
4481 Builder, BodyGenCB, FiniCB, S.getDirectiveName().getAsString(),
4489 CGF.
EmitStmt(S.getAssociatedStmt());
4491 const Expr *Hint =
nullptr;
4492 if (
const auto *HintClause = S.getSingleClause<
OMPHintClause>())
4493 Hint = HintClause->getHint();
4494 LexicalScope
Scope(*
this, S.getSourceRange());
4497 S.getDirectiveName().getAsString(),
4498 CodeGen, S.getBeginLoc(), Hint);
4512 CodeGenFunction::OMPLocalDeclMapRAII
Scope(CGF);
4514 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGSI);
4515 OMPLoopScope LoopScope(CGF, S);
4520 return C->getModifier() == OMPC_REDUCTION_inscan;
4546 CodeGenFunction::OMPLocalDeclMapRAII
Scope(CGF);
4548 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGSI);
4549 OMPLoopScope LoopScope(CGF, S);
4554 return C->getModifier() == OMPC_REDUCTION_inscan;
4575 OMPPrivateScope PrivateScope(CGF);
4580 (void)PrivateScope.Privatize();
4602 OMPPrivateScope PrivateScope(CGF);
4607 (void)PrivateScope.Privatize();
4630 CGF.EmitSections(S);
4644class CheckVarsEscapingUntiedTaskDeclContext final
4649 explicit CheckVarsEscapingUntiedTaskDeclContext() =
default;
4650 virtual ~CheckVarsEscapingUntiedTaskDeclContext() =
default;
4651 void VisitDeclStmt(
const DeclStmt *S) {
4655 for (
const Decl *
D : S->decls()) {
4656 if (
const auto *VD = dyn_cast_or_null<VarDecl>(
D))
4658 PrivateDecls.push_back(VD);
4664 void VisitBlockExpr(
const BlockExpr *) {}
4665 void VisitStmt(
const Stmt *S) {
4668 for (
const Stmt *Child : S->children())
4682 bool OmpAllMemory =
false;
4685 return C->getDependencyKind() == OMPC_DEPEND_outallmemory ||
4686 C->getDependencyKind() == OMPC_DEPEND_inoutallmemory;
4688 OmpAllMemory =
true;
4693 Data.Dependences.emplace_back(OMPC_DEPEND_outallmemory,
4702 if (Kind == OMPC_DEPEND_outallmemory || Kind == OMPC_DEPEND_inoutallmemory)
4704 if (OmpAllMemory && (Kind == OMPC_DEPEND_out || Kind == OMPC_DEPEND_inout))
4707 Data.Dependences.emplace_back(
C->getDependencyKind(),
C->getModifier());
4708 DD.
DepExprs.append(
C->varlist_begin(),
C->varlist_end());
4717 const CapturedStmt *CS = S.getCapturedStmt(CapturedRegion);
4719 auto PartId = std::next(I);
4720 auto TaskT = std::next(I, 4);
4725 const Expr *Cond = Clause->getCondition();
4728 Data.Final.setInt(CondConstant);
4733 Data.Final.setInt(
false);
4737 const Expr *Prio = Clause->getPriority();
4738 Data.Priority.setInt(
true);
4746 llvm::DenseSet<const VarDecl *> EmittedAsPrivate;
4749 auto IRef =
C->varlist_begin();
4750 for (
const Expr *IInit :
C->private_copies()) {
4751 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
4752 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
4753 Data.PrivateVars.push_back(*IRef);
4754 Data.PrivateCopies.push_back(IInit);
4759 EmittedAsPrivate.clear();
4762 auto IRef =
C->varlist_begin();
4763 auto IElemInitRef =
C->inits().begin();
4764 for (
const Expr *IInit :
C->private_copies()) {
4765 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
4766 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
4767 Data.FirstprivateVars.push_back(*IRef);
4768 Data.FirstprivateCopies.push_back(IInit);
4769 Data.FirstprivateInits.push_back(*IElemInitRef);
4776 llvm::MapVector<const VarDecl *, const DeclRefExpr *> LastprivateDstsOrigs;
4778 auto IRef =
C->varlist_begin();
4779 auto ID =
C->destination_exprs().begin();
4780 for (
const Expr *IInit :
C->private_copies()) {
4781 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
4782 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
4783 Data.LastprivateVars.push_back(*IRef);
4784 Data.LastprivateCopies.push_back(IInit);
4786 LastprivateDstsOrigs.insert(
4787 std::make_pair(cast<VarDecl>(cast<DeclRefExpr>(*ID)->getDecl()),
4788 cast<DeclRefExpr>(*IRef)));
4796 Data.ReductionVars.append(
C->varlist_begin(),
C->varlist_end());
4797 Data.ReductionOrigs.append(
C->varlist_begin(),
C->varlist_end());
4798 Data.ReductionCopies.append(
C->privates().begin(),
C->privates().end());
4799 Data.ReductionOps.append(
C->reduction_ops().begin(),
4800 C->reduction_ops().end());
4801 LHSs.append(
C->lhs_exprs().begin(),
C->lhs_exprs().end());
4802 RHSs.append(
C->rhs_exprs().begin(),
C->rhs_exprs().end());
4805 *
this, S.getBeginLoc(), LHSs, RHSs,
Data);
4810 CheckVarsEscapingUntiedTaskDeclContext Checker;
4811 Checker.Visit(S.getInnermostCapturedStmt()->getCapturedStmt());
4812 Data.PrivateLocals.append(Checker.getPrivateDecls().begin(),
4813 Checker.getPrivateDecls().end());
4815 auto &&CodeGen = [&
Data, &S, CS, &BodyGen, &LastprivateDstsOrigs,
4818 llvm::MapVector<CanonicalDeclPtr<const VarDecl>,
4819 std::pair<Address, Address>>
4822 OMPPrivateScope
Scope(CGF);
4824 if (
auto *DI = CGF.getDebugInfo()) {
4825 llvm::SmallDenseMap<const VarDecl *, FieldDecl *> CaptureFields =
4826 CGF.CapturedStmtInfo->getCaptureFields();
4827 llvm::Value *ContextValue = CGF.CapturedStmtInfo->getContextValue();
4828 if (CaptureFields.size() && ContextValue) {
4829 unsigned CharWidth = CGF.getContext().getCharWidth();
4843 for (
auto It = CaptureFields.begin(); It != CaptureFields.end(); ++It) {
4844 const VarDecl *SharedVar = It->first;
4847 CGF.getContext().getASTRecordLayout(CaptureRecord);
4850 if (CGF.CGM.getCodeGenOpts().hasReducedDebugInfo())
4851 (void)DI->EmitDeclareOfAutoVariable(SharedVar, ContextValue,
4852 CGF.Builder,
false);
4855 auto UpdateExpr = [](llvm::LLVMContext &Ctx,
auto *
Declare,
4860 Ops.push_back(llvm::dwarf::DW_OP_plus_uconst);
4861 Ops.push_back(Offset);
4863 Ops.push_back(llvm::dwarf::DW_OP_deref);
4864 Declare->setExpression(llvm::DIExpression::get(Ctx, Ops));
4866 llvm::Instruction &
Last = CGF.Builder.GetInsertBlock()->back();
4867 if (
auto DDI = dyn_cast<llvm::DbgVariableIntrinsic>(&
Last))
4868 UpdateExpr(DDI->getContext(), DDI, Offset);
4871 assert(!
Last.isTerminator() &&
"unexpected terminator");
4873 CGF.Builder.GetInsertBlock()->getTrailingDbgRecords()) {
4874 for (llvm::DbgVariableRecord &DVR : llvm::reverse(
4875 llvm::filterDbgVars(Marker->getDbgRecordRange()))) {
4876 UpdateExpr(
Last.getContext(), &DVR, Offset);
4884 if (!
Data.PrivateVars.empty() || !
Data.FirstprivateVars.empty() ||
4885 !
Data.LastprivateVars.empty() || !
Data.PrivateLocals.empty()) {
4886 enum { PrivatesParam = 2, CopyFnParam = 3 };
4887 llvm::Value *CopyFn = CGF.Builder.CreateLoad(
4888 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(CopyFnParam)));
4889 llvm::Value *PrivatesPtr = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(
4890 CS->getCapturedDecl()->getParam(PrivatesParam)));
4895 CallArgs.push_back(PrivatesPtr);
4896 ParamTypes.push_back(PrivatesPtr->getType());
4897 for (
const Expr *
E :
Data.PrivateVars) {
4898 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(
E)->getDecl());
4900 CGF.getContext().getPointerType(
E->
getType()),
".priv.ptr.addr");
4901 PrivatePtrs.emplace_back(VD, PrivatePtr);
4903 ParamTypes.push_back(PrivatePtr.
getType());
4905 for (
const Expr *
E :
Data.FirstprivateVars) {
4906 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(
E)->getDecl());
4908 CGF.CreateMemTemp(CGF.getContext().getPointerType(
E->
getType()),
4909 ".firstpriv.ptr.addr");
4910 PrivatePtrs.emplace_back(VD, PrivatePtr);
4911 FirstprivatePtrs.emplace_back(VD, PrivatePtr);
4913 ParamTypes.push_back(PrivatePtr.
getType());
4915 for (
const Expr *
E :
Data.LastprivateVars) {
4916 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(
E)->getDecl());
4918 CGF.CreateMemTemp(CGF.getContext().getPointerType(
E->
getType()),
4919 ".lastpriv.ptr.addr");
4920 PrivatePtrs.emplace_back(VD, PrivatePtr);
4922 ParamTypes.push_back(PrivatePtr.
getType());
4927 Ty = CGF.getContext().getPointerType(Ty);
4929 Ty = CGF.getContext().getPointerType(Ty);
4931 CGF.getContext().getPointerType(Ty),
".local.ptr.addr");
4932 auto Result = UntiedLocalVars.insert(
4935 if (
Result.second ==
false)
4936 *
Result.first = std::make_pair(
4939 ParamTypes.push_back(PrivatePtr.
getType());
4941 auto *CopyFnTy = llvm::FunctionType::get(CGF.Builder.getVoidTy(),
4943 CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
4944 CGF, S.getBeginLoc(), {CopyFnTy, CopyFn}, CallArgs);
4945 for (
const auto &Pair : LastprivateDstsOrigs) {
4946 const auto *OrigVD = cast<VarDecl>(Pair.second->getDecl());
4949 CGF.CapturedStmtInfo->lookup(OrigVD) !=
nullptr,
4951 Pair.second->getExprLoc());
4952 Scope.addPrivate(Pair.first, CGF.EmitLValue(&DRE).getAddress());
4954 for (
const auto &Pair : PrivatePtrs) {
4956 CGF.Builder.CreateLoad(Pair.second),
4957 CGF.ConvertTypeForMem(Pair.first->getType().getNonReferenceType()),
4958 CGF.getContext().getDeclAlign(Pair.first));
4959 Scope.addPrivate(Pair.first, Replacement);
4960 if (
auto *DI = CGF.getDebugInfo())
4961 if (CGF.CGM.getCodeGenOpts().hasReducedDebugInfo())
4962 (void)DI->EmitDeclareOfAutoVariable(
4963 Pair.first, Pair.second.getBasePointer(), CGF.Builder,
4968 for (
auto &Pair : UntiedLocalVars) {
4969 QualType VDType = Pair.first->getType().getNonReferenceType();
4970 if (Pair.first->getType()->isLValueReferenceType())
4971 VDType = CGF.getContext().getPointerType(VDType);
4973 llvm::Value *Ptr = CGF.Builder.CreateLoad(Pair.second.first);
4976 CGF.ConvertTypeForMem(CGF.getContext().getPointerType(VDType)),
4977 CGF.getPointerAlign());
4978 Pair.second.first = Replacement;
4979 Ptr = CGF.Builder.CreateLoad(Replacement);
4980 Replacement =
Address(Ptr, CGF.ConvertTypeForMem(VDType),
4981 CGF.getContext().getDeclAlign(Pair.first));
4982 Pair.second.second = Replacement;
4984 llvm::Value *Ptr = CGF.Builder.CreateLoad(Pair.second.first);
4985 Address Replacement(Ptr, CGF.ConvertTypeForMem(VDType),
4986 CGF.getContext().getDeclAlign(Pair.first));
4987 Pair.second.first = Replacement;
4991 if (
Data.Reductions) {
4992 OMPPrivateScope FirstprivateScope(CGF);
4993 for (
const auto &Pair : FirstprivatePtrs) {
4995 CGF.Builder.CreateLoad(Pair.second),
4996 CGF.ConvertTypeForMem(Pair.first->getType().getNonReferenceType()),
4997 CGF.getContext().getDeclAlign(Pair.first));
4998 FirstprivateScope.addPrivate(Pair.first, Replacement);
5000 (void)FirstprivateScope.Privatize();
5001 OMPLexicalScope LexScope(CGF, S, CapturedRegion);
5003 Data.ReductionCopies,
Data.ReductionOps);
5004 llvm::Value *ReductionsPtr = CGF.Builder.CreateLoad(
5005 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(9)));
5006 for (
unsigned Cnt = 0,
E =
Data.ReductionVars.size(); Cnt <
E; ++Cnt) {
5007 RedCG.emitSharedOrigLValue(CGF, Cnt);
5008 RedCG.emitAggregateType(CGF, Cnt);
5012 CGF.CGM.getOpenMPRuntime().emitTaskReductionFixups(CGF, S.getBeginLoc(),
5014 Address Replacement = CGF.CGM.getOpenMPRuntime().getTaskReductionItem(
5015 CGF, S.getBeginLoc(), ReductionsPtr, RedCG.getSharedLValue(Cnt));
5017 CGF.EmitScalarConversion(Replacement.emitRawPointer(CGF),
5018 CGF.getContext().VoidPtrTy,
5019 CGF.getContext().getPointerType(
5020 Data.ReductionCopies[Cnt]->getType()),
5021 Data.ReductionCopies[Cnt]->getExprLoc()),
5022 CGF.ConvertTypeForMem(
Data.ReductionCopies[Cnt]->getType()),
5023 Replacement.getAlignment());
5024 Replacement = RedCG.adjustPrivateAddress(CGF, Cnt, Replacement);
5025 Scope.addPrivate(RedCG.getBaseDecl(Cnt), Replacement);
5029 (void)
Scope.Privatize();
5035 auto IPriv =
C->privates().begin();
5036 auto IRed =
C->reduction_ops().begin();
5037 auto ITD =
C->taskgroup_descriptors().begin();
5038 for (
const Expr *Ref :
C->varlist()) {
5039 InRedVars.emplace_back(Ref);
5040 InRedPrivs.emplace_back(*IPriv);
5041 InRedOps.emplace_back(*IRed);
5042 TaskgroupDescriptors.emplace_back(*ITD);
5043 std::advance(IPriv, 1);
5044 std::advance(IRed, 1);
5045 std::advance(ITD, 1);
5050 OMPPrivateScope InRedScope(CGF);
5051 if (!InRedVars.empty()) {
5053 for (
unsigned Cnt = 0,
E = InRedVars.size(); Cnt <
E; ++Cnt) {
5054 RedCG.emitSharedOrigLValue(CGF, Cnt);
5055 RedCG.emitAggregateType(CGF, Cnt);
5061 CGF.CGM.getOpenMPRuntime().emitTaskReductionFixups(CGF, S.getBeginLoc(),
5063 llvm::Value *ReductionsPtr;
5064 if (
const Expr *TRExpr = TaskgroupDescriptors[Cnt]) {
5065 ReductionsPtr = CGF.EmitLoadOfScalar(CGF.EmitLValue(TRExpr),
5066 TRExpr->getExprLoc());
5068 ReductionsPtr = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
5070 Address Replacement = CGF.CGM.getOpenMPRuntime().getTaskReductionItem(
5071 CGF, S.getBeginLoc(), ReductionsPtr, RedCG.getSharedLValue(Cnt));
5073 CGF.EmitScalarConversion(
5074 Replacement.emitRawPointer(CGF), CGF.getContext().VoidPtrTy,
5075 CGF.getContext().getPointerType(InRedPrivs[Cnt]->getType()),
5076 InRedPrivs[Cnt]->getExprLoc()),
5077 CGF.ConvertTypeForMem(InRedPrivs[Cnt]->getType()),
5078 Replacement.getAlignment());
5079 Replacement = RedCG.adjustPrivateAddress(CGF, Cnt, Replacement);
5080 InRedScope.addPrivate(RedCG.getBaseDecl(Cnt), Replacement);
5083 (void)InRedScope.Privatize();
5092 S, *I, *PartId, *TaskT, EKind, CodeGen,
Data.Tied,
Data.NumberOfParts);
5093 OMPLexicalScope
Scope(*
this, S, std::nullopt,
5096 TaskGen(*
this, OutlinedFn,
Data);
5113 QualType ElemType =
C.getBaseElementType(Ty);
5123 Data.FirstprivateVars.emplace_back(OrigRef);
5124 Data.FirstprivateCopies.emplace_back(PrivateRef);
5125 Data.FirstprivateInits.emplace_back(InitRef);
5131 OMPTargetDataInfo &InputInfo) {
5137 auto PartId = std::next(I);
5138 auto TaskT = std::next(I, 4);
5141 Data.Final.setInt(
false);
5144 auto IRef =
C->varlist_begin();
5145 auto IElemInitRef =
C->inits().begin();
5146 for (
auto *IInit :
C->private_copies()) {
5147 Data.FirstprivateVars.push_back(*IRef);
5148 Data.FirstprivateCopies.push_back(IInit);
5149 Data.FirstprivateInits.push_back(*IElemInitRef);
5157 Data.ReductionVars.append(
C->varlist_begin(),
C->varlist_end());
5158 Data.ReductionOrigs.append(
C->varlist_begin(),
C->varlist_end());
5159 Data.ReductionCopies.append(
C->privates().begin(),
C->privates().end());
5160 Data.ReductionOps.append(
C->reduction_ops().begin(),
5161 C->reduction_ops().end());
5162 LHSs.append(
C->lhs_exprs().begin(),
C->lhs_exprs().end());
5163 RHSs.append(
C->rhs_exprs().begin(),
C->rhs_exprs().end());
5165 OMPPrivateScope TargetScope(*
this);
5170 if (InputInfo.NumberOfTargetItems > 0) {
5173 llvm::APInt ArrSize(32, InputInfo.NumberOfTargetItems);
5178 getContext(),
Data, BaseAndPointerAndMapperType, CD, S.getBeginLoc());
5180 getContext(),
Data, BaseAndPointerAndMapperType, CD, S.getBeginLoc());
5187 TargetScope.addPrivate(BPVD, InputInfo.BasePointersArray);
5188 TargetScope.addPrivate(PVD, InputInfo.PointersArray);
5189 TargetScope.addPrivate(SVD, InputInfo.SizesArray);
5192 if (!isa_and_nonnull<llvm::ConstantPointerNull>(
5193 InputInfo.MappersArray.emitRawPointer(*
this))) {
5195 getContext(),
Data, BaseAndPointerAndMapperType, CD, S.getBeginLoc());
5196 TargetScope.addPrivate(MVD, InputInfo.MappersArray);
5199 (void)TargetScope.Privatize();
5202 auto &&CodeGen = [&
Data, &S, CS, &BodyGen, BPVD, PVD, SVD, MVD, EKind,
5205 OMPPrivateScope
Scope(CGF);
5206 if (!
Data.FirstprivateVars.empty()) {
5207 enum { PrivatesParam = 2, CopyFnParam = 3 };
5208 llvm::Value *CopyFn = CGF.Builder.CreateLoad(
5209 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(CopyFnParam)));
5210 llvm::Value *PrivatesPtr = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(
5211 CS->getCapturedDecl()->getParam(PrivatesParam)));
5216 CallArgs.push_back(PrivatesPtr);
5217 ParamTypes.push_back(PrivatesPtr->getType());
5218 for (
const Expr *
E :
Data.FirstprivateVars) {
5219 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(
E)->getDecl());
5221 CGF.CreateMemTemp(CGF.getContext().getPointerType(
E->
getType()),
5222 ".firstpriv.ptr.addr");
5223 PrivatePtrs.emplace_back(VD, PrivatePtr);
5225 ParamTypes.push_back(PrivatePtr.
getType());
5227 auto *CopyFnTy = llvm::FunctionType::get(CGF.Builder.getVoidTy(),
5229 CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
5230 CGF, S.getBeginLoc(), {CopyFnTy, CopyFn}, CallArgs);
5231 for (
const auto &Pair : PrivatePtrs) {
5233 CGF.Builder.CreateLoad(Pair.second),
5234 CGF.ConvertTypeForMem(Pair.first->getType().getNonReferenceType()),
5235 CGF.getContext().getDeclAlign(Pair.first));
5236 Scope.addPrivate(Pair.first, Replacement);
5239 CGF.processInReduction(S,
Data, CGF, CS,
Scope);
5240 if (InputInfo.NumberOfTargetItems > 0) {
5241 InputInfo.BasePointersArray = CGF.Builder.CreateConstArrayGEP(
5242 CGF.GetAddrOfLocalVar(BPVD), 0);
5243 InputInfo.PointersArray = CGF.Builder.CreateConstArrayGEP(
5244 CGF.GetAddrOfLocalVar(PVD), 0);
5245 InputInfo.SizesArray = CGF.Builder.CreateConstArrayGEP(
5246 CGF.GetAddrOfLocalVar(SVD), 0);
5249 InputInfo.MappersArray = CGF.Builder.CreateConstArrayGEP(
5250 CGF.GetAddrOfLocalVar(MVD), 0);
5254 OMPLexicalScope LexScope(CGF, S, OMPD_task,
false);
5256 if (CGF.CGM.getLangOpts().OpenMP >= 51 &&
5261 CGF.CGM.getOpenMPRuntime().emitThreadLimitClause(
5262 CGF, TL->getThreadLimit().front(), S.getBeginLoc());
5267 S, *I, *PartId, *TaskT, EKind, CodeGen,
true,
5268 Data.NumberOfParts);
5269 llvm::APInt TrueOrFalse(32, S.hasClausesOfKind<
OMPNowaitClause>() ? 1 : 0);
5274 SharedsTy, CapturedStruct, &IfCond,
Data);
5281 OMPPrivateScope &
Scope) {
5283 if (
Data.Reductions) {
5285 OMPLexicalScope LexScope(CGF, S, CapturedRegion);
5287 Data.ReductionCopies,
Data.ReductionOps);
5290 for (
unsigned Cnt = 0,
E =
Data.ReductionVars.size(); Cnt <
E; ++Cnt) {
5291 RedCG.emitSharedOrigLValue(CGF, Cnt);
5292 RedCG.emitAggregateType(CGF, Cnt);
5299 CGF, S.getBeginLoc(), ReductionsPtr, RedCG.getSharedLValue(Cnt));
5304 Data.ReductionCopies[Cnt]->getType()),
5305 Data.ReductionCopies[Cnt]->getExprLoc()),
5307 Replacement.getAlignment());
5308 Replacement = RedCG.adjustPrivateAddress(CGF, Cnt, Replacement);
5309 Scope.addPrivate(RedCG.getBaseDecl(Cnt), Replacement);
5312 (void)
Scope.Privatize();
5318 auto IPriv =
C->privates().begin();
5319 auto IRed =
C->reduction_ops().begin();
5320 auto ITD =
C->taskgroup_descriptors().begin();
5321 for (
const Expr *Ref :
C->varlist()) {
5322 InRedVars.emplace_back(Ref);
5323 InRedPrivs.emplace_back(*IPriv);
5324 InRedOps.emplace_back(*IRed);
5325 TaskgroupDescriptors.emplace_back(*ITD);
5326 std::advance(IPriv, 1);
5327 std::advance(IRed, 1);
5328 std::advance(ITD, 1);
5331 OMPPrivateScope InRedScope(CGF);
5332 if (!InRedVars.empty()) {
5334 for (
unsigned Cnt = 0,
E = InRedVars.size(); Cnt <
E; ++Cnt) {
5335 RedCG.emitSharedOrigLValue(CGF, Cnt);
5336 RedCG.emitAggregateType(CGF, Cnt);
5342 llvm::Value *ReductionsPtr;
5343 if (
const Expr *TRExpr = TaskgroupDescriptors[Cnt]) {
5347 ReductionsPtr = llvm::ConstantPointerNull::get(CGF.
VoidPtrTy);
5350 CGF, S.getBeginLoc(), ReductionsPtr, RedCG.getSharedLValue(Cnt));
5355 InRedPrivs[Cnt]->getExprLoc()),
5357 Replacement.getAlignment());
5358 Replacement = RedCG.adjustPrivateAddress(CGF, Cnt, Replacement);
5359 InRedScope.addPrivate(RedCG.getBaseDecl(Cnt), Replacement);
5362 (void)InRedScope.Privatize();
5370 const Expr *IfCond =
nullptr;
5371 for (
const auto *
C : S.getClausesOfKind<
OMPIfClause>()) {
5372 if (
C->getNameModifier() == OMPD_unknown ||
5373 C->getNameModifier() == OMPD_task) {
5374 IfCond =
C->getCondition();
5385 auto &&TaskGen = [&S, SharedsTy, CapturedStruct,
5388 CGF.CGM.getOpenMPRuntime().emitTaskCall(CGF, S.getBeginLoc(), S, OutlinedFn,
5389 SharedsTy, CapturedStruct, IfCond,
5406 bool IsFatal =
false;
5425 return T.clauses().empty();
5430 OMPLexicalScope
Scope(*
this, S, OMPD_unknown);
5433 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
5437 auto BodyGenCB = [&,
this](InsertPointTy AllocaIP,
5438 InsertPointTy CodeGenIP) {
5440 EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt());
5442 CodeGenFunction::CGCapturedStmtInfo CapStmtInfo;
5445 Builder.restoreIP(OMPBuilder.createTaskgroup(
Builder, AllocaIP, BodyGenCB));
5450 if (
const Expr *
E = S.getReductionRef()) {
5455 Data.ReductionVars.append(
C->varlist_begin(),
C->varlist_end());
5456 Data.ReductionOrigs.append(
C->varlist_begin(),
C->varlist_end());
5457 Data.ReductionCopies.append(
C->privates().begin(),
C->privates().end());
5458 Data.ReductionOps.append(
C->reduction_ops().begin(),
5459 C->reduction_ops().end());
5460 LHSs.append(
C->lhs_exprs().begin(),
C->lhs_exprs().end());
5461 RHSs.append(
C->rhs_exprs().begin(),
C->rhs_exprs().end());
5463 llvm::Value *ReductionDesc =
5466 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(
E)->getDecl());
5471 CGF.
EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt());
5478 ? llvm::AtomicOrdering::NotAtomic
5479 : llvm::AtomicOrdering::AcquireRelease;
5483 if (
const auto *FlushClause = S.getSingleClause<
OMPFlushClause>())
5485 FlushClause->varlist_end());
5486 return std::nullopt;
5488 S.getBeginLoc(), AO);
5497 Dependencies.DepExprs.append(DC->varlist_begin(), DC->varlist_end());
5499 *
this, Dependencies, DC->getBeginLoc());
5509 *
this, DOLVal, UC->getDependencyKind(), UC->getBeginLoc());
5528 if (
C->getModifier() != OMPC_REDUCTION_inscan)
5530 Shareds.append(
C->varlist_begin(),
C->varlist_end());
5531 Privates.append(
C->privates().begin(),
C->privates().end());
5532 LHSs.append(
C->lhs_exprs().begin(),
C->lhs_exprs().end());
5533 RHSs.append(
C->rhs_exprs().begin(),
C->rhs_exprs().end());
5534 ReductionOps.append(
C->reduction_ops().begin(),
C->reduction_ops().end());
5535 CopyOps.append(
C->copy_ops().begin(),
C->copy_ops().end());
5536 CopyArrayTemps.append(
C->copy_array_temps().begin(),
5537 C->copy_array_temps().end());
5538 CopyArrayElems.append(
C->copy_array_elems().begin(),
5539 C->copy_array_elems().end());
5583 : BreakContinueStack.back().ContinueBlock.getBlock());
5588 LexicalScope
Scope(*
this, S.getSourceRange());
5594 for (
unsigned I = 0,
E = CopyArrayElems.size(); I <
E; ++I) {
5595 const Expr *PrivateExpr = Privates[I];
5596 const Expr *TempExpr = CopyArrayTemps[I];
5598 *cast<VarDecl>(cast<DeclRefExpr>(TempExpr)->getDecl()));
5603 cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()),
5604 cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()),
5609 *
this, ParentDir.
getEndLoc(), Privates, LHSs, RHSs, ReductionOps,
5610 {true, true, OMPD_simd});
5611 for (
unsigned I = 0,
E = CopyArrayElems.size(); I <
E; ++I) {
5612 const Expr *PrivateExpr = Privates[I];
5619 const Expr *TempExpr = CopyArrayTemps[I];
5625 cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()),
5626 cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()), CopyOps[I]);
5631 ? BreakContinueStack.back().ContinueBlock.getBlock()
5637 EmitBranch(BreakContinueStack.back().ContinueBlock.getBlock());
5642 const auto *IVExpr = cast<OMPLoopDirective>(ParentDir)
5643 .getIterationVariable()
5648 for (
unsigned I = 0,
E = CopyArrayElems.size(); I <
E; ++I) {
5649 const Expr *PrivateExpr = Privates[I];
5650 const Expr *OrigExpr = Shareds[I];
5651 const Expr *CopyArrayElem = CopyArrayElems[I];
5652 OpaqueValueMapping IdxMapping(
5654 cast<OpaqueValueExpr>(
5655 cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()),
5661 cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()),
5662 cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()), CopyOps[I]);
5665 EmitBranch(BreakContinueStack.back().ContinueBlock.getBlock());
5668 EmitBranch(BreakContinueStack.back().ContinueBlock.getBlock());
5673 const auto *IVExpr = cast<OMPLoopDirective>(ParentDir)
5674 .getIterationVariable()
5679 llvm::BasicBlock *ExclusiveExitBB =
nullptr;
5684 Builder.CreateCondBr(Cmp, ExclusiveExitBB, ContBB);
5687 IdxVal =
Builder.CreateNUWSub(IdxVal, llvm::ConstantInt::get(
SizeTy, 1));
5689 for (
unsigned I = 0,
E = CopyArrayElems.size(); I <
E; ++I) {
5690 const Expr *PrivateExpr = Privates[I];
5691 const Expr *OrigExpr = Shareds[I];
5692 const Expr *CopyArrayElem = CopyArrayElems[I];
5693 OpaqueValueMapping IdxMapping(
5695 cast<OpaqueValueExpr>(
5696 cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()),
5702 cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()),
5703 cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()), CopyOps[I]);
5718 const auto *IVExpr = cast<DeclRefExpr>(S.getIterationVariable());
5719 const auto *IVDecl = cast<VarDecl>(IVExpr->getDecl());
5725 if (
const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
5733 bool HasLastprivateClause =
false;
5736 OMPLoopScope PreInitScope(*
this, S);
5741 llvm::BasicBlock *ContBlock =
nullptr;
5748 emitPreCond(*
this, S, S.getPreCond(), ThenBlock, ContBlock,
5760 *
this, cast<DeclRefExpr>(
5762 ? S.getCombinedLowerBoundVariable()
5763 : S.getLowerBoundVariable())));
5765 *
this, cast<DeclRefExpr>(
5767 ? S.getCombinedUpperBoundVariable()
5768 : S.getUpperBoundVariable())));
5774 OMPPrivateScope LoopScope(*
this);
5780 *
this, S.getBeginLoc(), OMPD_unknown,
false,
5790 (void)LoopScope.Privatize();
5795 llvm::Value *Chunk =
nullptr;
5798 ScheduleKind =
C->getDistScheduleKind();
5799 if (
const Expr *Ch =
C->getChunkSize()) {
5802 S.getIterationVariable()->getType(),
5808 *
this, S, ScheduleKind, Chunk);
5821 bool StaticChunked =
5825 Chunk !=
nullptr) ||
5830 StaticChunked ? Chunk :
nullptr);
5837 ? S.getCombinedEnsureUpperBound()
5838 : S.getEnsureUpperBound());
5841 ? S.getCombinedInit()
5846 ? S.getCombinedCond()
5850 Cond = S.getCombinedDistCond();
5882 [&S, &LoopScope, Cond, IncExpr,
LoopExit, &CodeGenLoop,
5884 CGF.EmitOMPInnerLoop(
5885 S, LoopScope.requiresCleanups(), Cond, IncExpr,
5887 CodeGenLoop(CGF, S, LoopExit);
5890 if (StaticChunked) {
5891 CGF.EmitIgnoredExpr(S.getCombinedNextLowerBound());
5892 CGF.EmitIgnoredExpr(S.getCombinedNextUpperBound());
5893 CGF.EmitIgnoredExpr(S.getCombinedEnsureUpperBound());
5894 CGF.EmitIgnoredExpr(S.getCombinedInit());
5904 const OMPLoopArguments LoopArguments = {
5907 EmitOMPDistributeOuterLoop(ScheduleKind, S, LoopScope, LoopArguments,
5912 return CGF.
Builder.CreateIsNotNull(
5923 return CGF.
Builder.CreateIsNotNull(
5928 if (HasLastprivateClause) {
5950 OMPLexicalScope
Scope(CGF, S, OMPD_unknown);
5963 CodeGenFunction::CGCapturedStmtInfo CapStmtInfo;
5966 Fn->setDoesNotRecurse();
5970template <
typename T>
5972 llvm::OpenMPIRBuilder::InsertPointTy AllocaIP,
5973 llvm::OpenMPIRBuilder &OMPBuilder) {
5975 unsigned NumLoops =
C->getNumLoops();
5979 for (
unsigned I = 0; I < NumLoops; I++) {
5980 const Expr *CounterVal =
C->getLoopData(I);
5985 StoreValues.emplace_back(StoreValue);
5987 OMPDoacrossKind<T> ODK;
5988 bool IsDependSource = ODK.isSource(
C);
5990 OMPBuilder.createOrderedDepend(CGF.
Builder, AllocaIP, NumLoops,
5991 StoreValues,
".cnt.addr", IsDependSource));
5997 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
6002 assert(!S.hasAssociatedStmt() &&
"No associated statement must be in "
6003 "ordered depend|doacross construct.");
6015 auto FiniCB = [
this](InsertPointTy IP) {
6019 auto BodyGenCB = [&S,
C,
this](InsertPointTy AllocaIP,
6020 InsertPointTy CodeGenIP) {
6025 llvm::BasicBlock *FiniBB = splitBBWithSuffix(
6026 Builder,
false,
".ordered.after");
6029 llvm::Function *OutlinedFn =
6031 assert(S.getBeginLoc().isValid() &&
6032 "Outlined function call location must be valid.");
6035 OutlinedFn, CapturedVars);
6042 OMPLexicalScope
Scope(*
this, S, OMPD_unknown);
6044 OMPBuilder.createOrderedThreadsSimd(
Builder, BodyGenCB, FiniCB, !
C));
6050 assert(!S.hasAssociatedStmt() &&
6051 "No associated statement must be in ordered depend construct.");
6057 assert(!S.hasAssociatedStmt() &&
6058 "No associated statement must be in ordered doacross construct.");
6069 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars);
6070 llvm::Function *OutlinedFn =
6073 OutlinedFn, CapturedVars);
6079 OMPLexicalScope
Scope(*
this, S, OMPD_unknown);
6087 "DestType must have scalar evaluation kind.");
6088 assert(!Val.
isAggregate() &&
"Must be a scalar or complex.");
6099 "DestType must have complex evaluation kind.");
6108 ScalarVal, llvm::Constant::getNullValue(ScalarVal->getType()));
6110 assert(Val.
isComplex() &&
"Must be a scalar or complex.");
6131 llvm::AtomicOrdering AO,
LValue LVal,
6136 LVal,
Loc, llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO),
6154 llvm_unreachable(
"Must be a scalar or complex.");
6162 assert(
V->isLValue() &&
"V of 'omp atomic read' is not lvalue");
6163 assert(
X->isLValue() &&
"X of 'omp atomic read' is not lvalue");
6172 case llvm::AtomicOrdering::Acquire:
6173 case llvm::AtomicOrdering::AcquireRelease:
6174 case llvm::AtomicOrdering::SequentiallyConsistent:
6176 llvm::AtomicOrdering::Acquire);
6178 case llvm::AtomicOrdering::Monotonic:
6179 case llvm::AtomicOrdering::Release:
6181 case llvm::AtomicOrdering::NotAtomic:
6182 case llvm::AtomicOrdering::Unordered:
6183 llvm_unreachable(
"Unexpected ordering.");
6190 llvm::AtomicOrdering AO,
const Expr *
X,
6193 assert(
X->isLValue() &&
"X of 'omp atomic write' is not lvalue");
6201 case llvm::AtomicOrdering::Release:
6202 case llvm::AtomicOrdering::AcquireRelease:
6203 case llvm::AtomicOrdering::SequentiallyConsistent:
6205 llvm::AtomicOrdering::Release);
6207 case llvm::AtomicOrdering::Acquire:
6208 case llvm::AtomicOrdering::Monotonic:
6210 case llvm::AtomicOrdering::NotAtomic:
6211 case llvm::AtomicOrdering::Unordered:
6212 llvm_unreachable(
"Unexpected ordering.");
6219 llvm::AtomicOrdering AO,
6220 bool IsXLHSInRHSPart) {
6225 if (BO == BO_Comma || !
Update.isScalar() || !
X.isSimple() ||
6226 (!isa<llvm::ConstantInt>(
Update.getScalarVal()) &&
6227 (
Update.getScalarVal()->getType() !=
X.getAddress().getElementType())) ||
6230 return std::make_pair(
false,
RValue::get(
nullptr));
6233 if (
T->isIntegerTy())
6236 if (
T->isFloatingPointTy() && (BO == BO_Add || BO == BO_Sub))
6242 if (!CheckAtomicSupport(
Update.getScalarVal()->getType(), BO) ||
6243 !CheckAtomicSupport(
X.getAddress().getElementType(), BO))
6244 return std::make_pair(
false,
RValue::get(
nullptr));
6246 bool IsInteger =
X.getAddress().getElementType()->isIntegerTy();
6247 llvm::AtomicRMWInst::BinOp RMWOp;
6250 RMWOp = IsInteger ? llvm::AtomicRMWInst::Add : llvm::AtomicRMWInst::FAdd;
6253 if (!IsXLHSInRHSPart)
6254 return std::make_pair(
false,
RValue::get(
nullptr));
6255 RMWOp = IsInteger ? llvm::AtomicRMWInst::Sub : llvm::AtomicRMWInst::FSub;
6258 RMWOp = llvm::AtomicRMWInst::And;
6261 RMWOp = llvm::AtomicRMWInst::Or;
6264 RMWOp = llvm::AtomicRMWInst::Xor;
6268 RMWOp =
X.getType()->hasSignedIntegerRepresentation()
6269 ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Min
6270 : llvm::AtomicRMWInst::Max)
6271 : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMin
6272 : llvm::AtomicRMWInst::UMax);
6274 RMWOp = IsXLHSInRHSPart ? llvm::AtomicRMWInst::FMin
6275 : llvm::AtomicRMWInst::FMax;
6279 RMWOp =
X.getType()->hasSignedIntegerRepresentation()
6280 ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Max
6281 : llvm::AtomicRMWInst::Min)
6282 : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMax
6283 : llvm::AtomicRMWInst::UMin);
6285 RMWOp = IsXLHSInRHSPart ? llvm::AtomicRMWInst::FMax
6286 : llvm::AtomicRMWInst::FMin;
6289 RMWOp = llvm::AtomicRMWInst::Xchg;
6298 return std::make_pair(
false,
RValue::get(
nullptr));
6317 llvm_unreachable(
"Unsupported atomic update operation");
6319 llvm::Value *UpdateVal =
Update.getScalarVal();
6320 if (
auto *IC = dyn_cast<llvm::ConstantInt>(UpdateVal)) {
6322 UpdateVal = CGF.
Builder.CreateIntCast(
6323 IC,
X.getAddress().getElementType(),
6324 X.getType()->hasSignedIntegerRepresentation());
6326 UpdateVal = CGF.
Builder.CreateCast(llvm::Instruction::CastOps::UIToFP, IC,
6327 X.getAddress().getElementType());
6329 llvm::AtomicRMWInst *Res =
6346 if (
X.isGlobalReg()) {
6359 llvm::AtomicOrdering AO,
const Expr *
X,
6363 "Update expr in 'atomic update' must be a binary operator.");
6371 assert(
X->isLValue() &&
"X of 'omp atomic update' is not lvalue");
6374 const auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts());
6375 const auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts());
6378 auto &&Gen = [&CGF, UE, ExprRValue, XRValExpr, ERValExpr](
RValue XRValue) {
6379 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue);
6380 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue);
6384 XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO,
Loc, Gen);
6391 case llvm::AtomicOrdering::Release:
6392 case llvm::AtomicOrdering::AcquireRelease:
6393 case llvm::AtomicOrdering::SequentiallyConsistent:
6395 llvm::AtomicOrdering::Release);
6397 case llvm::AtomicOrdering::Acquire:
6398 case llvm::AtomicOrdering::Monotonic:
6400 case llvm::AtomicOrdering::NotAtomic:
6401 case llvm::AtomicOrdering::Unordered:
6402 llvm_unreachable(
"Unexpected ordering.");
6420 llvm_unreachable(
"Must be a scalar or complex.");
6424 llvm::AtomicOrdering AO,
6425 bool IsPostfixUpdate,
const Expr *
V,
6427 const Expr *UE,
bool IsXLHSInRHSPart,
6429 assert(
X->isLValue() &&
"X of 'omp atomic capture' is not lvalue");
6430 assert(
V->isLValue() &&
"V of 'omp atomic capture' is not lvalue");
6439 "Update expr in 'atomic capture' must be a binary operator.");
6447 const auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts());
6448 const auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts());
6450 NewVValType = XRValExpr->
getType();
6452 auto &&Gen = [&CGF, &NewVVal, UE, ExprRValue, XRValExpr, ERValExpr,
6453 IsPostfixUpdate](
RValue XRValue) {
6454 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue);
6455 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue);
6457 NewVVal = IsPostfixUpdate ? XRValue : Res;
6461 XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO,
Loc, Gen);
6465 if (IsPostfixUpdate) {
6467 NewVVal = Res.second;
6471 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue);
6472 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, Res.second);
6478 NewVValType =
X->getType().getNonReferenceType();
6480 X->getType().getNonReferenceType(),
Loc);
6481 auto &&Gen = [&NewVVal, ExprRValue](
RValue XRValue) {
6487 XLValue, ExprRValue, BO_Assign,
false, AO,
6492 NewVVal = IsPostfixUpdate ? Res.
second : ExprRValue;
6508 case llvm::AtomicOrdering::Release:
6510 llvm::AtomicOrdering::Release);
6512 case llvm::AtomicOrdering::Acquire:
6514 llvm::AtomicOrdering::Acquire);
6516 case llvm::AtomicOrdering::AcquireRelease:
6517 case llvm::AtomicOrdering::SequentiallyConsistent:
6519 CGF, std::nullopt,
Loc, llvm::AtomicOrdering::AcquireRelease);
6521 case llvm::AtomicOrdering::Monotonic:
6523 case llvm::AtomicOrdering::NotAtomic:
6524 case llvm::AtomicOrdering::Unordered:
6525 llvm_unreachable(
"Unexpected ordering.");
6531 CodeGenFunction &CGF, llvm::AtomicOrdering AO, llvm::AtomicOrdering FailAO,
6533 const Expr *CE,
bool IsXBinopExpr,
bool IsPostfixUpdate,
bool IsFailOnly,
6535 llvm::OpenMPIRBuilder &OMPBuilder =
6538 OMPAtomicCompareOp Op;
6539 assert(isa<BinaryOperator>(CE) &&
"CE is not a BinaryOperator");
6540 switch (cast<BinaryOperator>(CE)->getOpcode()) {
6542 Op = OMPAtomicCompareOp::EQ;
6545 Op = OMPAtomicCompareOp::MIN;
6548 Op = OMPAtomicCompareOp::MAX;
6551 llvm_unreachable(
"unsupported atomic compare binary operator");
6557 auto EmitRValueWithCastIfNeeded = [&CGF,
Loc](
const Expr *
X,
const Expr *
E) {
6562 if (NewE->
getType() ==
X->getType())
6567 llvm::Value *EVal = EmitRValueWithCastIfNeeded(
X,
E);
6568 llvm::Value *DVal =
D ? EmitRValueWithCastIfNeeded(
X,
D) :
nullptr;
6569 if (
auto *CI = dyn_cast<llvm::ConstantInt>(EVal))
6570 EVal = CGF.
Builder.CreateIntCast(
6574 if (
auto *CI = dyn_cast<llvm::ConstantInt>(DVal))
6575 DVal = CGF.
Builder.CreateIntCast(
6577 D->getType()->hasSignedIntegerRepresentation());
6579 llvm::OpenMPIRBuilder::AtomicOpValue XOpVal{
6581 X->getType()->hasSignedIntegerRepresentation(),
6582 X->getType().isVolatileQualified()};
6583 llvm::OpenMPIRBuilder::AtomicOpValue VOpVal, ROpVal;
6588 V->getType()->hasSignedIntegerRepresentation(),
6589 V->getType().isVolatileQualified()};
6599 if (FailAO == llvm::AtomicOrdering::NotAtomic) {
6602 CGF.
Builder.restoreIP(OMPBuilder.createAtomicCompare(
6603 CGF.
Builder, XOpVal, VOpVal, ROpVal, EVal, DVal, AO, Op, IsXBinopExpr,
6604 IsPostfixUpdate, IsFailOnly));
6606 CGF.
Builder.restoreIP(OMPBuilder.createAtomicCompare(
6607 CGF.
Builder, XOpVal, VOpVal, ROpVal, EVal, DVal, AO, Op, IsXBinopExpr,
6608 IsPostfixUpdate, IsFailOnly, FailAO));
6612 llvm::AtomicOrdering AO,
6613 llvm::AtomicOrdering FailAO,
bool IsPostfixUpdate,
6616 const Expr *CE,
bool IsXLHSInRHSPart,
6631 IsXLHSInRHSPart,
Loc);
6633 case OMPC_compare: {
6635 IsXLHSInRHSPart, IsPostfixUpdate, IsFailOnly,
Loc);
6639 llvm_unreachable(
"Clause is not allowed in 'omp atomic'.");
6646 llvm::AtomicOrdering FailAO = llvm::AtomicOrdering::NotAtomic;
6647 bool MemOrderingSpecified =
false;
6649 AO = llvm::AtomicOrdering::SequentiallyConsistent;
6650 MemOrderingSpecified =
true;
6652 AO = llvm::AtomicOrdering::AcquireRelease;
6653 MemOrderingSpecified =
true;
6655 AO = llvm::AtomicOrdering::Acquire;
6656 MemOrderingSpecified =
true;
6658 AO = llvm::AtomicOrdering::Release;
6659 MemOrderingSpecified =
true;
6661 AO = llvm::AtomicOrdering::Monotonic;
6662 MemOrderingSpecified =
true;
6664 llvm::SmallSet<OpenMPClauseKind, 2> KindsEncountered;
6673 if (K == OMPC_seq_cst || K == OMPC_acq_rel || K == OMPC_acquire ||
6674 K == OMPC_release || K == OMPC_relaxed || K == OMPC_hint)
6677 KindsEncountered.insert(K);
6682 if (KindsEncountered.contains(OMPC_compare) &&
6683 KindsEncountered.contains(OMPC_capture))
6684 Kind = OMPC_compare;
6685 if (!MemOrderingSpecified) {
6686 llvm::AtomicOrdering DefaultOrder =
6688 if (DefaultOrder == llvm::AtomicOrdering::Monotonic ||
6689 DefaultOrder == llvm::AtomicOrdering::SequentiallyConsistent ||
6690 (DefaultOrder == llvm::AtomicOrdering::AcquireRelease &&
6691 Kind == OMPC_capture)) {
6693 }
else if (DefaultOrder == llvm::AtomicOrdering::AcquireRelease) {
6694 if (Kind == OMPC_unknown || Kind == OMPC_update || Kind == OMPC_write) {
6695 AO = llvm::AtomicOrdering::Release;
6696 }
else if (Kind == OMPC_read) {
6697 assert(Kind == OMPC_read &&
"Unexpected atomic kind.");
6698 AO = llvm::AtomicOrdering::Acquire;
6703 if (KindsEncountered.contains(OMPC_compare) &&
6704 KindsEncountered.contains(OMPC_fail)) {
6705 Kind = OMPC_compare;
6706 const auto *FailClause = S.getSingleClause<
OMPFailClause>();
6709 if (FailParameter == llvm::omp::OMPC_relaxed)
6710 FailAO = llvm::AtomicOrdering::Monotonic;
6711 else if (FailParameter == llvm::omp::OMPC_acquire)
6712 FailAO = llvm::AtomicOrdering::Acquire;
6713 else if (FailParameter == llvm::omp::OMPC_seq_cst)
6714 FailAO = llvm::AtomicOrdering::SequentiallyConsistent;
6718 LexicalScope
Scope(*
this, S.getSourceRange());
6721 S.getV(), S.getR(), S.getExpr(), S.getUpdateExpr(),
6722 S.getD(), S.getCondExpr(), S.isXLHSInRHSPart(),
6723 S.isFailOnly(), S.getBeginLoc());
6734 OMPLexicalScope
Scope(CGF, S, OMPD_target);
6737 CGF.
EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt());
6743 llvm::Function *Fn =
nullptr;
6744 llvm::Constant *FnID =
nullptr;
6746 const Expr *IfCond =
nullptr;
6748 for (
const auto *
C : S.getClausesOfKind<
OMPIfClause>()) {
6749 if (
C->getNameModifier() == OMPD_unknown ||
6750 C->getNameModifier() == OMPD_target) {
6751 IfCond =
C->getCondition();
6757 llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier>
Device(
6760 Device.setPointerAndInt(
C->getDevice(),
C->getModifier());
6765 bool IsOffloadEntry =
true;
6769 IsOffloadEntry =
false;
6772 IsOffloadEntry =
false;
6774 if (
CGM.
getLangOpts().OpenMPOffloadMandatory && !IsOffloadEntry) {
6777 "No offloading entry generated while offloading is mandatory.");
6781 assert(CGF.
CurFuncDecl &&
"No parent declaration for target region!");
6782 StringRef ParentName;
6785 if (
const auto *
D = dyn_cast<CXXConstructorDecl>(CGF.
CurFuncDecl))
6787 else if (
const auto *
D = dyn_cast<CXXDestructorDecl>(CGF.
CurFuncDecl))
6795 IsOffloadEntry, CodeGen);
6796 OMPLexicalScope
Scope(CGF, S, OMPD_task);
6797 auto &&SizeEmitter =
6800 if (IsOffloadEntry) {
6801 OMPLoopScope(CGF,
D);
6803 llvm::Value *NumIterations = CGF.
EmitScalarExpr(
D.getNumIterations());
6804 NumIterations = CGF.
Builder.CreateIntCast(NumIterations, CGF.
Int64Ty,
6806 return NumIterations;
6817 CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
6820 (void)PrivateScope.Privatize();
6824 CGF.
EmitStmt(S.getCapturedStmt(OMPD_target)->getCapturedStmt());
6829 StringRef ParentName,
6835 llvm::Constant *Addr;
6838 S, ParentName, Fn, Addr,
true, CodeGen);
6839 assert(Fn && Addr &&
"Target device function emission failed.");
6853 const CapturedStmt *CS = S.getCapturedStmt(OMPD_teams);
6854 llvm::Function *OutlinedFn =
6862 const Expr *NumTeams = NT ? NT->getNumTeams().front() :
nullptr;
6863 const Expr *ThreadLimit = TL ? TL->getThreadLimit().front() :
nullptr;
6869 OMPTeamsScope
Scope(CGF, S);
6880 OMPPrivateScope PrivateScope(CGF);
6884 (void)PrivateScope.Privatize();
6885 CGF.
EmitStmt(S.getCapturedStmt(OMPD_teams)->getCapturedStmt());
6895 auto *CS = S.getCapturedStmt(OMPD_teams);
6900 CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
6904 (void)PrivateScope.Privatize();
6922 llvm::Constant *Addr;
6925 S, ParentName, Fn, Addr,
true, CodeGen);
6926 assert(Fn && Addr &&
"Target device function emission failed.");
6949 CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
6951 (void)PrivateScope.Privatize();
6968 llvm::Constant *Addr;
6971 S, ParentName, Fn, Addr,
true, CodeGen);
6972 assert(Fn && Addr &&
"Target device function emission failed.");
6995 CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
6997 (void)PrivateScope.Privatize();
7014 llvm::Constant *Addr;
7017 S, ParentName, Fn, Addr,
true, CodeGen);
7018 assert(Fn && Addr &&
"Target device function emission failed.");
7040 OMPPrivateScope PrivateScope(CGF);
7041 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
7042 (void)PrivateScope.Privatize();
7043 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute,
7045 CGF.EmitOMPReductionClauseFinal(S, OMPD_teams);
7062 OMPPrivateScope PrivateScope(CGF);
7063 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
7064 (void)PrivateScope.Privatize();
7065 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_simd,
7067 CGF.EmitOMPReductionClauseFinal(S, OMPD_teams);
7085 OMPPrivateScope PrivateScope(CGF);
7086 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
7087 (void)PrivateScope.Privatize();
7088 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute,
7090 CGF.EmitOMPReductionClauseFinal(S, OMPD_teams);
7108 OMPPrivateScope PrivateScope(CGF);
7109 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
7110 (void)PrivateScope.Privatize();
7111 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(
7112 CGF, OMPD_distribute, CodeGenDistribute,
false);
7113 CGF.EmitOMPReductionClauseFinal(S, OMPD_teams);
7123 llvm::Value *
Device =
nullptr;
7124 llvm::Value *NumDependences =
nullptr;
7125 llvm::Value *DependenceList =
nullptr;
7133 if (!
Data.Dependences.empty()) {
7135 std::tie(NumDependences, DependenciesArray) =
7145 "OMPNowaitClause clause is used separately in OMPInteropDirective.");
7148 if (!ItOMPInitClause.empty()) {
7151 llvm::Value *InteropvarPtr =
7153 llvm::omp::OMPInteropType InteropType =
7154 llvm::omp::OMPInteropType::Unknown;
7155 if (
C->getIsTarget()) {
7156 InteropType = llvm::omp::OMPInteropType::Target;
7158 assert(
C->getIsTargetSync() &&
7159 "Expected interop-type target/targetsync");
7160 InteropType = llvm::omp::OMPInteropType::TargetSync;
7162 OMPBuilder.createOMPInteropInit(
Builder, InteropvarPtr, InteropType,
7163 Device, NumDependences, DependenceList,
7164 Data.HasNowaitClause);
7168 if (!ItOMPDestroyClause.empty()) {
7171 llvm::Value *InteropvarPtr =
7173 OMPBuilder.createOMPInteropDestroy(
Builder, InteropvarPtr,
Device,
7174 NumDependences, DependenceList,
7175 Data.HasNowaitClause);
7178 auto ItOMPUseClause = S.getClausesOfKind<
OMPUseClause>();
7179 if (!ItOMPUseClause.empty()) {
7182 llvm::Value *InteropvarPtr =
7184 OMPBuilder.createOMPInteropUse(
Builder, InteropvarPtr,
Device,
7185 NumDependences, DependenceList,
7186 Data.HasNowaitClause);
7204 CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
7206 (void)PrivateScope.Privatize();
7208 CGF, OMPD_distribute, CodeGenDistribute,
false);
7227 llvm::Constant *Addr;
7230 S, ParentName, Fn, Addr,
true, CodeGen);
7231 assert(Fn && Addr &&
"Target device function emission failed.");
7256 CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
7258 (void)PrivateScope.Privatize();
7260 CGF, OMPD_distribute, CodeGenDistribute,
false);
7279 llvm::Constant *Addr;
7282 S, ParentName, Fn, Addr,
true, CodeGen);
7283 assert(Fn && Addr &&
"Target device function emission failed.");
7297 S.getCancelRegion());
7301 const Expr *IfCond =
nullptr;
7302 for (
const auto *
C : S.getClausesOfKind<
OMPIfClause>()) {
7303 if (
C->getNameModifier() == OMPD_unknown ||
7304 C->getNameModifier() == OMPD_cancel) {
7305 IfCond =
C->getCondition();
7313 if (S.getCancelRegion() == OMPD_parallel ||
7314 S.getCancelRegion() == OMPD_sections ||
7315 S.getCancelRegion() == OMPD_section) {
7316 llvm::Value *IfCondition =
nullptr;
7321 OMPBuilder.createCancel(
Builder, IfCondition, S.getCancelRegion()));
7326 S.getCancelRegion());
7329CodeGenFunction::JumpDest
7331 if (Kind == OMPD_parallel || Kind == OMPD_task ||
7332 Kind == OMPD_target_parallel || Kind == OMPD_taskloop ||
7333 Kind == OMPD_master_taskloop || Kind == OMPD_parallel_master_taskloop)
7335 assert(Kind == OMPD_for || Kind == OMPD_section || Kind == OMPD_sections ||
7336 Kind == OMPD_parallel_sections || Kind == OMPD_parallel_for ||
7337 Kind == OMPD_distribute_parallel_for ||
7338 Kind == OMPD_target_parallel_for ||
7339 Kind == OMPD_teams_distribute_parallel_for ||
7340 Kind == OMPD_target_teams_distribute_parallel_for);
7341 return OMPCancelStack.getExitBlock();
7346 const llvm::DenseMap<const ValueDecl *, llvm::Value *>
7347 CaptureDeviceAddrMap) {
7348 llvm::SmallDenseSet<CanonicalDeclPtr<const Decl>, 4> Processed;
7349 for (
const Expr *OrigVarIt :
C.varlist()) {
7350 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(OrigVarIt)->getDecl());
7351 if (!Processed.insert(OrigVD).second)
7358 if (
const auto *OED = dyn_cast<OMPCapturedExprDecl>(MatchingVD)) {
7361 const auto *ME = cast<MemberExpr>(OED->getInit());
7362 assert(isa<CXXThisExpr>(ME->getBase()->IgnoreImpCasts()) &&
7363 "Base should be the current struct!");
7364 MatchingVD = ME->getMemberDecl();
7369 auto InitAddrIt = CaptureDeviceAddrMap.find(MatchingVD);
7370 if (InitAddrIt == CaptureDeviceAddrMap.end())
7376 bool IsRegistered = PrivateScope.addPrivate(
7378 Address(InitAddrIt->second, Ty,
7380 assert(IsRegistered &&
"firstprivate var already registered as private");
7388 while (
const auto *OASE = dyn_cast<ArraySectionExpr>(
Base))
7389 Base = OASE->getBase()->IgnoreParenImpCasts();
7390 while (
const auto *ASE = dyn_cast<ArraySubscriptExpr>(
Base))
7391 Base = ASE->getBase()->IgnoreParenImpCasts();
7392 return cast<VarDecl>(cast<DeclRefExpr>(
Base)->getDecl());
7397 const llvm::DenseMap<const ValueDecl *, llvm::Value *>
7398 CaptureDeviceAddrMap) {
7399 llvm::SmallDenseSet<CanonicalDeclPtr<const Decl>, 4> Processed;
7400 for (
const Expr *Ref :
C.varlist()) {
7402 if (!Processed.insert(OrigVD).second)
7408 if (
const auto *OED = dyn_cast<OMPCapturedExprDecl>(MatchingVD)) {
7411 const auto *ME = cast<MemberExpr>(OED->getInit());
7412 assert(isa<CXXThisExpr>(ME->getBase()) &&
7413 "Base should be the current struct!");
7414 MatchingVD = ME->getMemberDecl();
7419 auto InitAddrIt = CaptureDeviceAddrMap.find(MatchingVD);
7420 if (InitAddrIt == CaptureDeviceAddrMap.end())
7426 Address(InitAddrIt->second, Ty,
7430 if (isa<DeclRefExpr>(Ref->IgnoreParenImpCasts()) ||
7439 (void)PrivateScope.addPrivate(OrigVD, PrivAddr);
7452 bool PrivatizeDevicePointers =
false;
7454 bool &PrivatizeDevicePointers;
7457 explicit DevicePointerPrivActionTy(
bool &PrivatizeDevicePointers)
7458 : PrivatizeDevicePointers(PrivatizeDevicePointers) {}
7460 PrivatizeDevicePointers =
true;
7463 DevicePointerPrivActionTy PrivAction(PrivatizeDevicePointers);
7467 CGF.
EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt());
7473 PrivatizeDevicePointers =
false;
7479 if (PrivatizeDevicePointers) {
7480 OMPPrivateScope PrivateScope(CGF);
7484 Info.CaptureDeviceAddrMap);
7487 Info.CaptureDeviceAddrMap);
7488 (void)PrivateScope.Privatize();
7493 std::optional<OpenMPDirectiveKind> CaptureRegion;
7497 for (
const Expr *
E :
C->varlist()) {
7498 const Decl *
D = cast<DeclRefExpr>(
E)->getDecl();
7499 if (
const auto *OED = dyn_cast<OMPCapturedExprDecl>(
D))
7503 for (
const Expr *
E :
C->varlist()) {
7505 if (
const auto *OED = dyn_cast<OMPCapturedExprDecl>(
D))
7509 CaptureRegion = OMPD_unknown;
7512 OMPLexicalScope
Scope(CGF, S, CaptureRegion);
7519 PrivRCG.setAction(Action);
7524 OMPLexicalScope
Scope(CGF, S);
7539 const Expr *IfCond =
nullptr;
7541 IfCond =
C->getCondition();
7549 RCG.setAction(PrivAction);
7564 const Expr *IfCond =
nullptr;
7566 IfCond =
C->getCondition();
7573 OMPLexicalScope
Scope(*
this, S, OMPD_task);
7585 const Expr *IfCond =
nullptr;
7587 IfCond =
C->getCondition();
7594 OMPLexicalScope
Scope(*
this, S, OMPD_task);
7602 const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel);
7606 CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
7610 (void)PrivateScope.Privatize();
7630 llvm::Constant *Addr;
7633 S, ParentName, Fn, Addr,
true, CodeGen);
7634 assert(Fn && Addr &&
"Target device function emission failed.");
7653 CodeGenFunction::OMPCancelStackRAII CancelRegion(
7654 CGF, OMPD_target_parallel_for, S.hasCancel());
7670 llvm::Constant *Addr;
7673 S, ParentName, Fn, Addr,
true, CodeGen);
7674 assert(Fn && Addr &&
"Target device function emission failed.");
7709 llvm::Constant *Addr;
7712 S, ParentName, Fn, Addr,
true, CodeGen);
7713 assert(Fn && Addr &&
"Target device function emission failed.");
7727 CodeGenFunction::OMPPrivateScope &Privates) {
7728 const auto *VDecl = cast<VarDecl>(Helper->
getDecl());
7735 const CapturedStmt *CS = S.getCapturedStmt(OMPD_taskloop);
7738 OMPLexicalScope
Scope(*
this, S, OMPD_taskloop,
false);
7742 const Expr *IfCond =
nullptr;
7743 for (
const auto *
C : S.getClausesOfKind<
OMPIfClause>()) {
7744 if (
C->getNameModifier() == OMPD_unknown ||
7745 C->getNameModifier() == OMPD_taskloop) {
7746 IfCond =
C->getCondition();
7759 Data.Schedule.setInt(
false);
7763 Data.Schedule.setInt(
true);
7778 llvm::BasicBlock *ContBlock =
nullptr;
7779 OMPLoopScope PreInitScope(CGF, S);
7780 if (CGF.ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) {
7784 llvm::BasicBlock *ThenBlock = CGF.createBasicBlock(
"taskloop.if.then");
7785 ContBlock = CGF.createBasicBlock(
"taskloop.if.end");
7786 emitPreCond(CGF, S, S.getPreCond(), ThenBlock, ContBlock,
7787 CGF.getProfileCount(&S));
7788 CGF.EmitBlock(ThenBlock);
7789 CGF.incrementProfileCounter(&S);
7792 (void)CGF.EmitOMPLinearClauseInit(S);
7794 OMPPrivateScope LoopScope(CGF);
7796 enum { LowerBound = 5, UpperBound, Stride, LastIter };
7798 auto *LBP = std::next(I, LowerBound);
7799 auto *UBP = std::next(I, UpperBound);
7800 auto *STP = std::next(I, Stride);
7801 auto *LIP = std::next(I, LastIter);
7802 mapParam(CGF, cast<DeclRefExpr>(S.getLowerBoundVariable()), *LBP,
7804 mapParam(CGF, cast<DeclRefExpr>(S.getUpperBoundVariable()), *UBP,
7806 mapParam(CGF, cast<DeclRefExpr>(S.getStrideVariable()), *STP, LoopScope);
7807 mapParam(CGF, cast<DeclRefExpr>(S.getIsLastIterVariable()), *LIP,
7809 CGF.EmitOMPPrivateLoopCounters(S, LoopScope);
7810 CGF.EmitOMPLinearClause(S, LoopScope);
7811 bool HasLastprivateClause = CGF.EmitOMPLastprivateClauseInit(S, LoopScope);
7812 (void)LoopScope.Privatize();
7814 const Expr *IVExpr = S.getIterationVariable();
7815 const auto *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl());
7816 CGF.EmitVarDecl(*IVDecl);
7817 CGF.EmitIgnoredExpr(S.getInit());
7822 if (
const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
7823 CGF.EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
7825 CGF.EmitIgnoredExpr(S.getCalcLastIteration());
7829 OMPLexicalScope
Scope(CGF, S, OMPD_taskloop,
false);
7837 CGF.EmitOMPInnerLoop(
7838 S, LoopScope.requiresCleanups(), S.getCond(), S.getInc(),
7840 emitOMPLoopBodyWithStopPoint(CGF, S,
7841 CodeGenFunction::JumpDest());
7848 CGF.EmitBranch(ContBlock);
7849 CGF.EmitBlock(ContBlock,
true);
7852 if (HasLastprivateClause) {
7853 CGF.EmitOMPLastprivateClauseFinal(
7855 CGF.Builder.CreateIsNotNull(CGF.EmitLoadOfScalar(
7856 CGF.GetAddrOfLocalVar(*LIP),
false,
7857 (*LIP)->getType(), S.getBeginLoc())));
7859 LoopScope.restoreMap();
7861 return CGF.
Builder.CreateIsNotNull(
7863 (*LIP)->
getType(), S.getBeginLoc()));
7866 auto &&TaskGen = [&S, SharedsTy, CapturedStruct,
7869 auto &&CodeGen = [&S, OutlinedFn, SharedsTy, CapturedStruct, IfCond,
7871 OMPLoopScope PreInitScope(CGF, S);
7872 CGF.CGM.getOpenMPRuntime().emitTaskLoopCall(CGF, S.getBeginLoc(), S,
7873 OutlinedFn, SharedsTy,
7874 CapturedStruct, IfCond,
Data);
7876 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_taskloop,
7904 OMPLexicalScope
Scope(*
this, S);
7916 OMPLexicalScope
Scope(*
this, S, std::nullopt,
false);
7928 OMPLexicalScope
Scope(*
this, S);
7940 OMPLexicalScope
Scope(CGF, S, OMPD_parallel,
false);
7958 OMPLexicalScope
Scope(CGF, S, OMPD_parallel,
false);
7977 const Expr *IfCond =
nullptr;
7979 IfCond =
C->getCondition();
7986 OMPLexicalScope
Scope(*
this, S, OMPD_task);
7997 BindKind =
C->getBindKind();
8000 case OMPC_BIND_parallel:
8002 case OMPC_BIND_teams:
8004 case OMPC_BIND_thread:
8015 const auto *ForS = dyn_cast<ForStmt>(CS);
8016 if (ForS && !isa<DeclStmt>(ForS->getInit())) {
8017 OMPPrivateScope LoopScope(CGF);
8019 (void)LoopScope.Privatize();
8021 LoopScope.restoreMap();
8026 OMPLexicalScope
Scope(*
this, S, OMPD_unknown);
8061 OMPPrivateScope PrivateScope(CGF);
8062 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
8063 (void)PrivateScope.Privatize();
8064 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute,
8066 CGF.EmitOMPReductionClauseFinal(S, OMPD_teams);
8075 std::string StatusMsg,
8079 StatusMsg +=
": DEVICE";
8081 StatusMsg +=
": HOST";
8088 llvm::dbgs() << StatusMsg <<
": " <<
FileName <<
": " << LineNo <<
"\n";
8107 CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
8109 (void)PrivateScope.Privatize();
8111 CGF, OMPD_distribute, CodeGenDistribute,
false);
8136 CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
8138 (void)PrivateScope.Privatize();
8140 CGF, OMPD_distribute, CodeGenDistribute,
false);
8154 if (S.canBeParallelFor())
8167 if (S.canBeParallelFor())
8173 llvm::Constant *Addr;
8176 S, ParentName, Fn, Addr,
true, CodeGen);
8177 assert(Fn && Addr &&
8178 "Target device function emission failed for 'target teams loop'.");
8188 CodeGenFunction::OMPCancelStackRAII CancelRegion(
8189 CGF, OMPD_target_parallel_loop,
false);
8205 llvm::Constant *Addr;
8208 S, ParentName, Fn, Addr,
true, CodeGen);
8209 assert(Fn && Addr &&
"Target device function emission failed.");
8224 if (
const auto *SD = dyn_cast<OMPScanDirective>(&
D)) {
8228 if (!
D.hasAssociatedStmt() || !
D.getAssociatedStmt())
8231 OMPPrivateScope GlobalsScope(CGF);
8235 for (
const Expr *Ref :
C->varlist()) {
8236 const auto *DRE = cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
8239 const auto *VD = dyn_cast<VarDecl>(DRE->getDecl());
8242 if (!CGF.LocalDeclMap.count(VD)) {
8244 GlobalsScope.addPrivate(VD, GlobLVal.
getAddress());
8250 (void)GlobalsScope.Privatize();
8251 ParentLoopDirectiveForScanRegion ScanRegion(CGF,
D);
8254 if (
const auto *LD = dyn_cast<OMPLoopDirective>(&
D)) {
8255 for (
const Expr *
E : LD->counters()) {
8256 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(
E)->getDecl());
8259 GlobalsScope.addPrivate(VD, GlobLVal.
getAddress());
8261 if (isa<OMPCapturedExprDecl>(VD)) {
8263 if (!CGF.LocalDeclMap.count(VD))
8268 if (!
C->getNumForLoops())
8270 for (
unsigned I = LD->getLoopsNumber(),
8271 E =
C->getLoopNumIterations().size();
8273 if (
const auto *VD = dyn_cast<OMPCapturedExprDecl>(
8274 cast<DeclRefExpr>(
C->getLoopCounter(I))->getDecl())) {
8276 if (!CGF.LocalDeclMap.count(VD))
8282 (void)GlobalsScope.Privatize();
8283 CGF.
EmitStmt(
D.getInnermostCapturedStmt()->getCapturedStmt());
8286 if (
D.getDirectiveKind() == OMPD_atomic ||
8287 D.getDirectiveKind() == OMPD_critical ||
8288 D.getDirectiveKind() == OMPD_section ||
8289 D.getDirectiveKind() == OMPD_master ||
8290 D.getDirectiveKind() == OMPD_masked ||
8291 D.getDirectiveKind() == OMPD_unroll ||
8292 D.getDirectiveKind() == OMPD_assume) {
8297 OMPSimdLexicalScope
Scope(*
this,
D);
8301 :
D.getDirectiveKind(),
Defines the clang::ASTContext interface.
static bool isAllocatableDecl(const VarDecl *VD)
static const VarDecl * getBaseDecl(const Expr *Ref, const DeclRefExpr *&DE)
static void emitTargetRegion(CodeGenFunction &CGF, const OMPTargetDirective &S, PrePostActionTy &Action)
static void emitOMPSimdRegion(CodeGenFunction &CGF, const OMPLoopDirective &S, PrePostActionTy &Action)
static llvm::Function * emitOutlinedOrderedFunction(CodeGenModule &CGM, const CapturedStmt *S, SourceLocation Loc)
static const VarDecl * getBaseDecl(const Expr *Ref)
static void emitTargetTeamsGenericLoopRegionAsParallel(CodeGenFunction &CGF, PrePostActionTy &Action, const OMPTargetTeamsGenericLoopDirective &S)
static void emitOMPAtomicReadExpr(CodeGenFunction &CGF, llvm::AtomicOrdering AO, const Expr *X, const Expr *V, SourceLocation Loc)
static void emitOMPAtomicCaptureExpr(CodeGenFunction &CGF, llvm::AtomicOrdering AO, bool IsPostfixUpdate, const Expr *V, const Expr *X, const Expr *E, const Expr *UE, bool IsXLHSInRHSPart, SourceLocation Loc)
static void emitScanBasedDirective(CodeGenFunction &CGF, const OMPLoopDirective &S, llvm::function_ref< llvm::Value *(CodeGenFunction &)> NumIteratorsGen, llvm::function_ref< void(CodeGenFunction &)> FirstGen, llvm::function_ref< void(CodeGenFunction &)> SecondGen)
Emits the code for the directive with inscan reductions.
static void emitSimpleAtomicStore(CodeGenFunction &CGF, llvm::AtomicOrdering AO, LValue LVal, RValue RVal)
static Address castValueFromUintptr(CodeGenFunction &CGF, SourceLocation Loc, QualType DstType, StringRef Name, LValue AddrLV)
static void emitDistributeParallelForDistributeInnerBoundParams(CodeGenFunction &CGF, const OMPExecutableDirective &S, llvm::SmallVectorImpl< llvm::Value * > &CapturedVars)
static void emitScanBasedDirectiveFinals(CodeGenFunction &CGF, const OMPLoopDirective &S, llvm::function_ref< llvm::Value *(CodeGenFunction &)> NumIteratorsGen)
Copies final inscan reductions values to the original variables.
static void checkForLastprivateConditionalUpdate(CodeGenFunction &CGF, const OMPExecutableDirective &S)
static std::pair< LValue, LValue > emitForLoopBounds(CodeGenFunction &CGF, const OMPExecutableDirective &S)
The following two functions generate expressions for the loop lower and upper bounds in case of stati...
static void emitTargetParallelForRegion(CodeGenFunction &CGF, const OMPTargetParallelForDirective &S, PrePostActionTy &Action)
static LValue EmitOMPHelperVar(CodeGenFunction &CGF, const DeclRefExpr *Helper)
Emit a helper variable and return corresponding lvalue.
static void emitOMPAtomicUpdateExpr(CodeGenFunction &CGF, llvm::AtomicOrdering AO, const Expr *X, const Expr *E, const Expr *UE, bool IsXLHSInRHSPart, SourceLocation Loc)
static llvm::Value * convertToScalarValue(CodeGenFunction &CGF, RValue Val, QualType SrcType, QualType DestType, SourceLocation Loc)
static void emitPreCond(CodeGenFunction &CGF, const OMPLoopDirective &S, const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount)
static std::pair< bool, RValue > emitOMPAtomicRMW(CodeGenFunction &CGF, LValue X, RValue Update, BinaryOperatorKind BO, llvm::AtomicOrdering AO, bool IsXLHSInRHSPart)
static std::pair< LValue, LValue > emitDistributeParallelForInnerBounds(CodeGenFunction &CGF, const OMPExecutableDirective &S)
static void emitTargetTeamsGenericLoopRegionAsDistribute(CodeGenFunction &CGF, PrePostActionTy &Action, const OMPTargetTeamsGenericLoopDirective &S)
static void emitTargetParallelRegion(CodeGenFunction &CGF, const OMPTargetParallelDirective &S, PrePostActionTy &Action)
static std::pair< llvm::Value *, llvm::Value * > emitDispatchForLoopBounds(CodeGenFunction &CGF, const OMPExecutableDirective &S, Address LB, Address UB)
When dealing with dispatch schedules (e.g.
static void emitMaster(CodeGenFunction &CGF, const OMPExecutableDirective &S)
static void emitRestoreIP(CodeGenFunction &CGF, const T *C, llvm::OpenMPIRBuilder::InsertPointTy AllocaIP, llvm::OpenMPIRBuilder &OMPBuilder)
static void emitCommonOMPTargetDirective(CodeGenFunction &CGF, const OMPExecutableDirective &S, const RegionCodeGenTy &CodeGen)
static void emitSimdlenSafelenClause(CodeGenFunction &CGF, const OMPExecutableDirective &D)
static void emitAlignedClause(CodeGenFunction &CGF, const OMPExecutableDirective &D)
static bool isSimdSupportedByOpenMPIRBuilder(const OMPLoopDirective &S)
static void emitCommonOMPParallelDirective(CodeGenFunction &CGF, const OMPExecutableDirective &S, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen, const CodeGenBoundParametersTy &CodeGenBoundParameters)
static bool emitWorksharingDirective(CodeGenFunction &CGF, const OMPLoopDirective &S, bool HasCancel)
static void emitPostUpdateForReductionClause(CodeGenFunction &CGF, const OMPExecutableDirective &D, const llvm::function_ref< llvm::Value *(CodeGenFunction &)> CondGen)
static void emitEmptyOrdered(CodeGenFunction &, SourceLocation Loc, const unsigned IVSize, const bool IVSigned)
static void emitTargetTeamsLoopCodegenStatus(CodeGenFunction &CGF, std::string StatusMsg, const OMPExecutableDirective &D)
static bool isForSupportedByOpenMPIRBuilder(const OMPLoopDirective &S, bool HasCancel)
static RValue emitSimpleAtomicLoad(CodeGenFunction &CGF, llvm::AtomicOrdering AO, LValue LVal, SourceLocation Loc)
static std::pair< llvm::Value *, llvm::Value * > emitDistributeParallelForDispatchBounds(CodeGenFunction &CGF, const OMPExecutableDirective &S, Address LB, Address UB)
if the 'for' loop has a dispatch schedule (e.g.
void emitOMPDistributeDirective(const OMPLoopDirective &S, CodeGenFunction &CGF, CodeGenModule &CGM)
static void emitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind, llvm::AtomicOrdering AO, llvm::AtomicOrdering FailAO, bool IsPostfixUpdate, const Expr *X, const Expr *V, const Expr *R, const Expr *E, const Expr *UE, const Expr *D, const Expr *CE, bool IsXLHSInRHSPart, bool IsFailOnly, SourceLocation Loc)
bool isSupportedByOpenMPIRBuilder(const OMPTaskgroupDirective &T)
static CodeGenFunction::ComplexPairTy convertToComplexValue(CodeGenFunction &CGF, RValue Val, QualType SrcType, QualType DestType, SourceLocation Loc)
static ImplicitParamDecl * createImplicitFirstprivateForType(ASTContext &C, OMPTaskDataTy &Data, QualType Ty, CapturedDecl *CD, SourceLocation Loc)
static EmittedClosureTy emitCapturedStmtFunc(CodeGenFunction &ParentCGF, const CapturedStmt *S)
Emit a captured statement and return the function as well as its captured closure context.
static void emitOMPLoopBodyWithStopPoint(CodeGenFunction &CGF, const OMPLoopDirective &S, CodeGenFunction::JumpDest LoopExit)
static void emitOMPCopyinClause(CodeGenFunction &CGF, const OMPExecutableDirective &S)
static void emitTargetTeamsDistributeParallelForRegion(CodeGenFunction &CGF, const OMPTargetTeamsDistributeParallelForDirective &S, PrePostActionTy &Action)
static llvm::CallInst * emitCapturedStmtCall(CodeGenFunction &ParentCGF, EmittedClosureTy Cap, llvm::ArrayRef< llvm::Value * > Args)
Emit a call to a previously captured closure.
static void emitMasked(CodeGenFunction &CGF, const OMPExecutableDirective &S)
static void emitBody(CodeGenFunction &CGF, const Stmt *S, const Stmt *NextLoop, int MaxLevel, int Level=0)
static void emitEmptyBoundParameters(CodeGenFunction &, const OMPExecutableDirective &, llvm::SmallVectorImpl< llvm::Value * > &)
static void emitTargetParallelForSimdRegion(CodeGenFunction &CGF, const OMPTargetParallelForSimdDirective &S, PrePostActionTy &Action)
static void emitOMPAtomicCompareExpr(CodeGenFunction &CGF, llvm::AtomicOrdering AO, llvm::AtomicOrdering FailAO, const Expr *X, const Expr *V, const Expr *R, const Expr *E, const Expr *D, const Expr *CE, bool IsXBinopExpr, bool IsPostfixUpdate, bool IsFailOnly, SourceLocation Loc)
std::pair< llvm::Function *, llvm::Value * > EmittedClosureTy
static OpenMPDirectiveKind getEffectiveDirectiveKind(const OMPExecutableDirective &S)
static void emitTargetTeamsRegion(CodeGenFunction &CGF, PrePostActionTy &Action, const OMPTargetTeamsDirective &S)
void emitOMPSimdDirective(const OMPLoopDirective &S, CodeGenFunction &CGF, CodeGenModule &CGM)
static void buildDependences(const OMPExecutableDirective &S, OMPTaskDataTy &Data)
static RValue convertToType(CodeGenFunction &CGF, RValue Value, QualType SourceType, QualType ResType, SourceLocation Loc)
static void emitScanBasedDirectiveDecls(CodeGenFunction &CGF, const OMPLoopDirective &S, llvm::function_ref< llvm::Value *(CodeGenFunction &)> NumIteratorsGen)
Emits internal temp array declarations for the directive with inscan reductions.
static void emitTargetTeamsDistributeParallelForSimdRegion(CodeGenFunction &CGF, const OMPTargetTeamsDistributeParallelForSimdDirective &S, PrePostActionTy &Action)
static void emitTargetTeamsDistributeSimdRegion(CodeGenFunction &CGF, PrePostActionTy &Action, const OMPTargetTeamsDistributeSimdDirective &S)
static llvm::MapVector< llvm::Value *, llvm::Value * > GetAlignedMapping(const OMPLoopDirective &S, CodeGenFunction &CGF)
static llvm::omp::ScheduleKind convertClauseKindToSchedKind(OpenMPScheduleClauseKind ScheduleClauseKind)
void emitOMPForDirective(const OMPLoopDirective &S, CodeGenFunction &CGF, CodeGenModule &CGM, bool HasCancel)
static void mapParam(CodeGenFunction &CGF, const DeclRefExpr *Helper, const ImplicitParamDecl *PVD, CodeGenFunction::OMPPrivateScope &Privates)
Emit a helper variable and return corresponding lvalue.
static void emitCommonOMPTeamsDirective(CodeGenFunction &CGF, const OMPExecutableDirective &S, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen)
static void emitTargetParallelGenericLoopRegion(CodeGenFunction &CGF, const OMPTargetParallelGenericLoopDirective &S, PrePostActionTy &Action)
static QualType getCanonicalParamType(ASTContext &C, QualType T)
static void emitCommonSimdLoop(CodeGenFunction &CGF, const OMPLoopDirective &S, const RegionCodeGenTy &SimdInitGen, const RegionCodeGenTy &BodyCodeGen)
static LValue createSectionLVal(CodeGenFunction &CGF, QualType Ty, const Twine &Name, llvm::Value *Init=nullptr)
static void emitOMPAtomicWriteExpr(CodeGenFunction &CGF, llvm::AtomicOrdering AO, const Expr *X, const Expr *E, SourceLocation Loc)
static llvm::Function * emitOutlinedFunctionPrologue(CodeGenFunction &CGF, FunctionArgList &Args, llvm::MapVector< const Decl *, std::pair< const VarDecl *, Address > > &LocalAddrs, llvm::DenseMap< const Decl *, std::pair< const Expr *, llvm::Value * > > &VLASizes, llvm::Value *&CXXThisValue, const FunctionOptions &FO)
static void emitInnerParallelForWhenCombined(CodeGenFunction &CGF, const OMPLoopDirective &S, CodeGenFunction::JumpDest LoopExit)
static void emitTargetTeamsDistributeRegion(CodeGenFunction &CGF, PrePostActionTy &Action, const OMPTargetTeamsDistributeDirective &S)
enum clang::sema::@1655::IndirectLocalPathEntry::EntryKind Kind
This file defines OpenMP nodes for declarative directives.
static const Decl * getCanonicalDecl(const Decl *D)
This file defines OpenMP AST classes for clauses.
Defines some OpenMP-specific enums and functions.
Defines the PrettyStackTraceEntry class, which is used to make crashes give more contextual informati...
Defines the SourceManager interface.
This file defines OpenMP AST classes for executable directives and clauses.
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
SourceManager & getSourceManager()
TranslationUnitDecl * getTranslationUnitDecl() const
QualType getRecordType(const RecordDecl *Decl) const
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
QualType getConstantArrayType(QualType EltTy, const llvm::APInt &ArySize, const Expr *SizeExpr, ArraySizeModifier ASM, unsigned IndexTypeQuals) const
Return the unique reference to the type for a constant array of the specified element type.
QualType getUIntPtrType() const
Return a type compatible with "uintptr_t" (C99 7.18.1.4), as defined by the target.
QualType getIntTypeForBitwidth(unsigned DestWidth, unsigned Signed) const
getIntTypeForBitwidth - sets integer QualTy according to specified details: bitwidth,...
TypeSourceInfo * getTrivialTypeSourceInfo(QualType T, SourceLocation Loc=SourceLocation()) const
Allocate a TypeSourceInfo where all locations have been initialized to a given location,...
unsigned getOpenMPDefaultSimdAlign(QualType T) const
Get default simd alignment of the specified complete type in bits.
CharUnits getDeclAlign(const Decl *D, bool ForAlignof=false) const
Return a conservative estimate of the alignment of the specified decl D.
int64_t toBits(CharUnits CharSize) const
Convert a size in characters to a size in bits.
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
QualType getFunctionType(QualType ResultTy, ArrayRef< QualType > Args, const FunctionProtoType::ExtProtoInfo &EPI) const
Return a normal function type with a typed argument list.
const TargetInfo & getTargetInfo() const
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Represents an attribute applied to a statement.
ArrayRef< const Attr * > getAttrs() const
A builtin binary operation expression such as "x + y" or "x <= y".
static BinaryOperator * Create(const ASTContext &C, Expr *lhs, Expr *rhs, Opcode opc, QualType ResTy, ExprValueKind VK, ExprObjectKind OK, SourceLocation opLoc, FPOptionsOverride FPFeatures)
BlockExpr - Adaptor class for mixing a BlockDecl with expressions.
Represents the body of a CapturedStmt, and serves as its DeclContext.
unsigned getContextParamPosition() const
static CapturedDecl * Create(ASTContext &C, DeclContext *DC, unsigned NumParams)
param_iterator param_end() const
Retrieve an iterator one past the last parameter decl.
param_iterator param_begin() const
Retrieve an iterator pointing to the first parameter decl.
Stmt * getBody() const override
getBody - If this Decl represents a declaration for a body of code, such as a function or method defi...
ImplicitParamDecl * getParam(unsigned i) const
This captures a statement into a function.
CapturedDecl * getCapturedDecl()
Retrieve the outlined function declaration.
const RecordDecl * getCapturedRecordDecl() const
Retrieve the record declaration for captured variables.
Stmt * getCapturedStmt()
Retrieve the statement being captured.
Expr *const * const_capture_init_iterator
Const iterator that walks over the capture initialization arguments.
CharUnits - This is an opaque type for sizes expressed in character units.
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
CharUnits alignTo(const CharUnits &Align) const
alignTo - Returns the next integer (mod 2**64) that is greater than or equal to this quantity and is ...
bool hasReducedDebugInfo() const
Check if type and variable info should be emitted.
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
CharUnits getAlignment() const
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Address withAlignment(CharUnits NewAlignment) const
Return address with different alignment, but same pointer and element type.
llvm::PointerType * getType() const
Return the type of the pointer value.
static AggValueSlot ignored()
ignored - Returns an aggregate value slot indicating that the aggregate value is being ignored.
static ApplyDebugLocation CreateDefaultArtificial(CodeGenFunction &CGF, SourceLocation TemporaryLocation)
Apply TemporaryLocation if it is valid.
llvm::Value * CreateIsNull(Address Addr, const Twine &Name="")
Address CreatePointerBitCastOrAddrSpaceCast(Address Addr, llvm::Type *Ty, llvm::Type *ElementTy, const llvm::Twine &Name="")
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Address CreateInBoundsGEP(Address Addr, ArrayRef< llvm::Value * > IdxList, llvm::Type *ElementType, CharUnits Align, const Twine &Name="")
CGFunctionInfo - Class to encapsulate the information about a function definition.
Manages list of lastprivate conditional decls for the specified directive.
static LastprivateConditionalRAII disable(CodeGenFunction &CGF, const OMPExecutableDirective &S)
Manages list of nontemporal decls for the specified directive.
Struct that keeps all the relevant information that should be kept throughout a 'target data' region.
Manages list of nontemporal decls for the specified directive.
virtual void emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc, const OMPExecutableDirective &D, llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds, const Expr *IfCond, const OMPTaskDataTy &Data)
Emit task region for the task directive.
virtual llvm::Value * emitForNext(CodeGenFunction &CGF, SourceLocation Loc, unsigned IVSize, bool IVSigned, Address IL, Address LB, Address UB, Address ST)
Call __kmpc_dispatch_next( ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter, kmp_int[32|64] *p_lowe...
virtual void emitTargetDataStandAloneCall(CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond, const Expr *Device)
Emit the data mapping/movement code associated with the directive D that should be of the form 'targe...
virtual void emitNumThreadsClause(CodeGenFunction &CGF, llvm::Value *NumThreads, SourceLocation Loc)
Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_threads)...
virtual void emitSingleRegion(CodeGenFunction &CGF, const RegionCodeGenTy &SingleOpGen, SourceLocation Loc, ArrayRef< const Expr * > CopyprivateVars, ArrayRef< const Expr * > DestExprs, ArrayRef< const Expr * > SrcExprs, ArrayRef< const Expr * > AssignmentOps)
Emits a single region.
virtual Address getTaskReductionItem(CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *ReductionsPtr, LValue SharedLVal)
Get the address of void * type of the privatue copy of the reduction item specified by the SharedLVal...
virtual void emitForDispatchDeinit(CodeGenFunction &CGF, SourceLocation Loc)
This is used for non static scheduled types and when the ordered clause is present on the loop constr...
virtual void getDefaultScheduleAndChunk(CodeGenFunction &CGF, const OMPLoopDirective &S, OpenMPScheduleClauseKind &ScheduleKind, const Expr *&ChunkExpr) const
Choose default schedule type and chunk value for the schedule clause.
virtual void emitTeamsCall(CodeGenFunction &CGF, const OMPExecutableDirective &D, SourceLocation Loc, llvm::Function *OutlinedFn, ArrayRef< llvm::Value * > CapturedVars)
Emits code for teams call of the OutlinedFn with variables captured in a record which address is stor...
virtual void emitCancellationPointCall(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind CancelRegion)
Emit code for 'cancellation point' construct.
virtual const VarDecl * translateParameter(const FieldDecl *FD, const VarDecl *NativeParam) const
Translates the native parameter of outlined function if this is required for target.
virtual llvm::Function * emitTeamsOutlinedFunction(CodeGenFunction &CGF, const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen)
Emits outlined function for the specified OpenMP teams directive D.
virtual void emitDoacrossInit(CodeGenFunction &CGF, const OMPLoopDirective &D, ArrayRef< Expr * > NumIterations)
Emit initialization for doacross loop nesting support.
virtual void adjustTargetSpecificDataForLambdas(CodeGenFunction &CGF, const OMPExecutableDirective &D) const
Adjust some parameters for the target-based directives, like addresses of the variables captured by r...
virtual void emitTargetDataCalls(CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond, const Expr *Device, const RegionCodeGenTy &CodeGen, CGOpenMPRuntime::TargetDataInfo &Info)
Emit the target data mapping code associated with D.
virtual Address getParameterAddress(CodeGenFunction &CGF, const VarDecl *NativeParam, const VarDecl *TargetParam) const
Gets the address of the native argument basing on the address of the target-specific parameter.
virtual void emitTaskgroupRegion(CodeGenFunction &CGF, const RegionCodeGenTy &TaskgroupOpGen, SourceLocation Loc)
Emit a taskgroup region.
virtual void emitNumTeamsClause(CodeGenFunction &CGF, const Expr *NumTeams, const Expr *ThreadLimit, SourceLocation Loc)
Emits call to void __kmpc_push_num_teams(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_teams,...
virtual llvm::Value * emitTaskReductionInit(CodeGenFunction &CGF, SourceLocation Loc, ArrayRef< const Expr * > LHSExprs, ArrayRef< const Expr * > RHSExprs, const OMPTaskDataTy &Data)
Emit a code for initialization of task reduction clause.
void emitUpdateClause(CodeGenFunction &CGF, LValue DepobjLVal, OpenMPDependClauseKind NewDepKind, SourceLocation Loc)
Updates the dependency kind in the specified depobj object.
virtual void emitLastprivateConditionalFinalUpdate(CodeGenFunction &CGF, LValue PrivLVal, const VarDecl *VD, SourceLocation Loc)
Gets the address of the global copy used for lastprivate conditional update, if any.
virtual void emitErrorCall(CodeGenFunction &CGF, SourceLocation Loc, Expr *ME, bool IsFatal)
Emit __kmpc_error call for error directive extern void __kmpc_error(ident_t *loc, int severity,...
virtual void emitTaskyieldCall(CodeGenFunction &CGF, SourceLocation Loc)
Emits code for a taskyield directive.
virtual void emitFlush(CodeGenFunction &CGF, ArrayRef< const Expr * > Vars, SourceLocation Loc, llvm::AtomicOrdering AO)
Emit flush of the variables specified in 'omp flush' directive.
virtual void emitTaskwaitCall(CodeGenFunction &CGF, SourceLocation Loc, const OMPTaskDataTy &Data)
Emit code for 'taskwait' directive.
virtual void emitProcBindClause(CodeGenFunction &CGF, llvm::omp::ProcBindKind ProcBind, SourceLocation Loc)
Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 global_tid, int proc_bind) to generat...
virtual void emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind Kind, bool EmitChecks=true, bool ForceSimpleCall=false)
Emit an implicit/explicit barrier for OpenMP threads.
virtual void emitDistributeStaticInit(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDistScheduleClauseKind SchedKind, const StaticRTInput &Values)
virtual void emitForStaticFinish(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind DKind)
Call the appropriate runtime routine to notify that we finished all the work with current loop.
void emitIfClause(CodeGenFunction &CGF, const Expr *Cond, const RegionCodeGenTy &ThenGen, const RegionCodeGenTy &ElseGen)
Emits code for OpenMP 'if' clause using specified CodeGen function.
Address emitDepobjDependClause(CodeGenFunction &CGF, const OMPTaskDataTy::DependData &Dependencies, SourceLocation Loc)
Emits list of dependecies based on the provided data (array of dependence/expression pairs) for depob...
virtual llvm::Function * emitParallelOutlinedFunction(CodeGenFunction &CGF, const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen)
Emits outlined function for the specified OpenMP parallel directive D.
virtual void emitForStaticInit(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind DKind, const OpenMPScheduleTy &ScheduleKind, const StaticRTInput &Values)
Call the appropriate runtime routine to initialize it before start of loop.
llvm::AtomicOrdering getDefaultMemoryOrdering() const
Gets default memory ordering as specified in requires directive.
virtual bool isStaticNonchunked(OpenMPScheduleClauseKind ScheduleKind, bool Chunked) const
Check if the specified ScheduleKind is static non-chunked.
virtual void emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc, const Expr *IfCond, OpenMPDirectiveKind CancelRegion)
Emit code for 'cancel' construct.
virtual void emitMasterRegion(CodeGenFunction &CGF, const RegionCodeGenTy &MasterOpGen, SourceLocation Loc)
Emits a master region.
virtual llvm::Function * emitTaskOutlinedFunction(const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, const VarDecl *PartIDVar, const VarDecl *TaskTVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen, bool Tied, unsigned &NumberOfParts)
Emits outlined function for the OpenMP task directive D.
void emitDestroyClause(CodeGenFunction &CGF, LValue DepobjLVal, SourceLocation Loc)
Emits the code to destroy the dependency object provided in depobj directive.
virtual void emitTaskReductionFixups(CodeGenFunction &CGF, SourceLocation Loc, ReductionCodeGen &RCG, unsigned N)
Required to resolve existing problems in the runtime.
virtual void emitDoacrossOrdered(CodeGenFunction &CGF, const OMPDependClause *C)
Emit code for doacross ordered directive with 'depend' clause.
virtual void checkAndEmitLastprivateConditional(CodeGenFunction &CGF, const Expr *LHS)
Checks if the provided LVal is lastprivate conditional and emits the code to update the value of the ...
virtual void getDefaultDistScheduleAndChunk(CodeGenFunction &CGF, const OMPLoopDirective &S, OpenMPDistScheduleClauseKind &ScheduleKind, llvm::Value *&Chunk) const
Choose default schedule type and chunk value for the dist_schedule clause.
llvm::OpenMPIRBuilder & getOMPBuilder()
virtual void emitTargetOutlinedFunction(const OMPExecutableDirective &D, StringRef ParentName, llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID, bool IsOffloadEntry, const RegionCodeGenTy &CodeGen)
Emit outilined function for 'target' directive.
virtual void emitCriticalRegion(CodeGenFunction &CGF, StringRef CriticalName, const RegionCodeGenTy &CriticalOpGen, SourceLocation Loc, const Expr *Hint=nullptr)
Emits a critical region.
virtual void emitForOrderedIterationEnd(CodeGenFunction &CGF, SourceLocation Loc, unsigned IVSize, bool IVSigned)
Call the appropriate runtime routine to notify that we finished iteration of the ordered loop with th...
virtual void checkAndEmitSharedLastprivateConditional(CodeGenFunction &CGF, const OMPExecutableDirective &D, const llvm::DenseSet< CanonicalDeclPtr< const VarDecl > > &IgnoredDecls)
Checks if the lastprivate conditional was updated in inner region and writes the value.
virtual void emitInlinedDirective(CodeGenFunction &CGF, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen, bool HasCancel=false)
Emit code for the directive that does not require outlining.
virtual void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc, llvm::Function *OutlinedFn, ArrayRef< llvm::Value * > CapturedVars, const Expr *IfCond, llvm::Value *NumThreads)
Emits code for parallel or serial call of the OutlinedFn with variables captured in a record which ad...
virtual bool isStaticChunked(OpenMPScheduleClauseKind ScheduleKind, bool Chunked) const
Check if the specified ScheduleKind is static chunked.
virtual void emitTargetCall(CodeGenFunction &CGF, const OMPExecutableDirective &D, llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID, const Expr *IfCond, llvm::PointerIntPair< const Expr *, 2, OpenMPDeviceClauseModifier > Device, llvm::function_ref< llvm::Value *(CodeGenFunction &CGF, const OMPLoopDirective &D)> SizeEmitter)
Emit the target offloading code associated with D.
virtual void emitReduction(CodeGenFunction &CGF, SourceLocation Loc, ArrayRef< const Expr * > Privates, ArrayRef< const Expr * > LHSExprs, ArrayRef< const Expr * > RHSExprs, ArrayRef< const Expr * > ReductionOps, ReductionOptionsTy Options)
Emit a code for reduction clause.
std::pair< llvm::Value *, Address > emitDependClause(CodeGenFunction &CGF, ArrayRef< OMPTaskDataTy::DependData > Dependencies, SourceLocation Loc)
Emits list of dependecies based on the provided data (array of dependence/expression pairs).
virtual bool isDynamic(OpenMPScheduleClauseKind ScheduleKind) const
Check if the specified ScheduleKind is dynamic.
Address emitLastprivateConditionalInit(CodeGenFunction &CGF, const VarDecl *VD)
Create specialized alloca to handle lastprivate conditionals.
virtual void emitOrderedRegion(CodeGenFunction &CGF, const RegionCodeGenTy &OrderedOpGen, SourceLocation Loc, bool IsThreads)
Emit an ordered region.
virtual void emitOutlinedFunctionCall(CodeGenFunction &CGF, SourceLocation Loc, llvm::FunctionCallee OutlinedFn, ArrayRef< llvm::Value * > Args=std::nullopt) const
Emits call of the outlined function with the provided arguments, translating these arguments to corre...
virtual void emitTaskReductionFini(CodeGenFunction &CGF, SourceLocation Loc, bool IsWorksharingReduction)
Emits the following code for reduction clause with task modifier:
virtual void emitMaskedRegion(CodeGenFunction &CGF, const RegionCodeGenTy &MaskedOpGen, SourceLocation Loc, const Expr *Filter=nullptr)
Emits a masked region.
virtual void emitForDispatchInit(CodeGenFunction &CGF, SourceLocation Loc, const OpenMPScheduleTy &ScheduleKind, unsigned IVSize, bool IVSigned, bool Ordered, const DispatchRTInput &DispatchValues)
Call the appropriate runtime routine to initialize it before start of loop.
virtual void EmitBody(CodeGenFunction &CGF, const Stmt *S)
Emit the captured statement body.
virtual StringRef getHelperName() const
Get the name of the capture helper.
virtual const FieldDecl * lookup(const VarDecl *VD) const
Lookup the captured field decl for a variable.
RAII for preserving necessary info during inlined region body codegen.
Cleanup action for allocate support.
RAII for preserving necessary info during Outlined region body codegen.
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
void EmitOMPTaskBasedDirective(const OMPExecutableDirective &S, const OpenMPDirectiveKind CapturedRegion, const RegionCodeGenTy &BodyGen, const TaskGenTy &TaskGen, OMPTaskDataTy &Data)
void EmitOMPTargetTeamsDirective(const OMPTargetTeamsDirective &S)
void FinishFunction(SourceLocation EndLoc=SourceLocation())
FinishFunction - Complete IR generation of the current function.
void EmitOMPParallelGenericLoopDirective(const OMPLoopDirective &S)
void EmitOMPAggregateAssign(Address DestAddr, Address SrcAddr, QualType OriginalType, const llvm::function_ref< void(Address, Address)> CopyGen)
Perform element by element copying of arrays with type OriginalType from SrcAddr to DestAddr using co...
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount, Stmt::Likelihood LH=Stmt::LH_None, const Expr *ConditionalOp=nullptr)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target)
The given basic block lies in the current EH scope, but may be a target of a potentially scope-crossi...
void EmitOMPCopy(QualType OriginalType, Address DestAddr, Address SrcAddr, const VarDecl *DestVD, const VarDecl *SrcVD, const Expr *Copy)
Emit proper copying of data from one variable to another.
void EmitOMPOrderedDirective(const OMPOrderedDirective &S)
void EmitOMPTargetDirective(const OMPTargetDirective &S)
bool EmitOMPFirstprivateClause(const OMPExecutableDirective &D, OMPPrivateScope &PrivateScope)
void EmitOMPReductionClauseInit(const OMPExecutableDirective &D, OMPPrivateScope &PrivateScope, bool ForInscan=false)
Emit initial code for reduction variables.
void EmitOMPAtomicDirective(const OMPAtomicDirective &S)
void EmitOMPTargetEnterDataDirective(const OMPTargetEnterDataDirective &S)
void EmitOMPParallelMasterTaskLoopDirective(const OMPParallelMasterTaskLoopDirective &S)
void EmitAutoVarDecl(const VarDecl &D)
EmitAutoVarDecl - Emit an auto variable declaration.
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
static void EmitOMPTargetTeamsDeviceFunction(CodeGenModule &CGM, StringRef ParentName, const OMPTargetTeamsDirective &S)
Emit device code for the target teams directive.
void EmitOMPReverseDirective(const OMPReverseDirective &S)
static bool hasScalarEvaluationKind(QualType T)
llvm::function_ref< std::pair< llvm::Value *, llvm::Value * >(CodeGenFunction &, const OMPExecutableDirective &S, Address LB, Address UB)> CodeGenDispatchBoundsTy
CGCapturedStmtInfo * CapturedStmtInfo
void EmitDecl(const Decl &D)
EmitDecl - Emit a declaration.
static void EmitOMPTargetTeamsDistributeDeviceFunction(CodeGenModule &CGM, StringRef ParentName, const OMPTargetTeamsDistributeDirective &S)
Emit device code for the target teams distribute directive.
Address EmitLoadOfPointer(Address Ptr, const PointerType *PtrTy, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
Load a pointer with type PtrTy stored at address Ptr.
static void EmitOMPTargetParallelForSimdDeviceFunction(CodeGenModule &CGM, StringRef ParentName, const OMPTargetParallelForSimdDirective &S)
Emit device code for the target parallel for simd directive.
llvm::Value * emitArrayLength(const ArrayType *arrayType, QualType &baseType, Address &addr)
emitArrayLength - Compute the length of an array, even if it's a VLA, and drill down to the base elem...
VlaSizePair getVLASize(const VariableArrayType *vla)
Returns an LLVM value that corresponds to the size, in non-variably-sized elements,...
const OMPExecutableDirective * OMPParentLoopDirectiveForScan
Parent loop-based directive for scan directive.
void EmitOMPTeamsDistributeParallelForDirective(const OMPTeamsDistributeParallelForDirective &S)
void EmitOMPTaskDirective(const OMPTaskDirective &S)
void EmitOMPScanDirective(const OMPScanDirective &S)
void EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S)
JumpDest getOMPCancelDestination(OpenMPDirectiveKind Kind)
void EmitOMPMasterTaskLoopSimdDirective(const OMPMasterTaskLoopSimdDirective &S)
void EmitOMPUseDevicePtrClause(const OMPUseDevicePtrClause &C, OMPPrivateScope &PrivateScope, const llvm::DenseMap< const ValueDecl *, llvm::Value * > CaptureDeviceAddrMap)
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
RValue EmitAtomicLoad(LValue LV, SourceLocation SL, AggValueSlot Slot=AggValueSlot::ignored())
void EmitOMPDistributeLoop(const OMPLoopDirective &S, const CodeGenLoopTy &CodeGenLoop, Expr *IncExpr)
Emit code for the distribute loop-based directive.
void EmitOMPTaskLoopBasedDirective(const OMPLoopDirective &S)
static void EmitOMPTargetDeviceFunction(CodeGenModule &CGM, StringRef ParentName, const OMPTargetDirective &S)
Emit device code for the target directive.
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void EmitAtomicUpdate(LValue LVal, llvm::AtomicOrdering AO, const llvm::function_ref< RValue(RValue)> &UpdateOp, bool IsVolatile)
const LangOptions & getLangOpts() const
static void EmitOMPTargetSimdDeviceFunction(CodeGenModule &CGM, StringRef ParentName, const OMPTargetSimdDirective &S)
Emit device code for the target simd directive.
void EmitOMPDistributeSimdDirective(const OMPDistributeSimdDirective &S)
void EmitOMPPrivateLoopCounters(const OMPLoopDirective &S, OMPPrivateScope &LoopScope)
Emit initial code for loop counters of loop-based directives.
void EmitOMPDistributeParallelForDirective(const OMPDistributeParallelForDirective &S)
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
void EmitOMPLastprivateClauseFinal(const OMPExecutableDirective &D, bool NoFinals, llvm::Value *IsLastIterCond=nullptr)
Emit final copying of lastprivate values to original variables at the end of the worksharing or simd ...
void EmitOMPInterchangeDirective(const OMPInterchangeDirective &S)
void EmitOMPTeamsGenericLoopDirective(const OMPTeamsGenericLoopDirective &S)
void EmitOMPTeamsDistributeDirective(const OMPTeamsDistributeDirective &S)
Address EmitLoadOfReference(LValue RefLVal, LValueBaseInfo *PointeeBaseInfo=nullptr, TBAAAccessInfo *PointeeTBAAInfo=nullptr)
void EmitExprAsInit(const Expr *init, const ValueDecl *D, LValue lvalue, bool capturedByInit)
EmitExprAsInit - Emits the code necessary to initialize a location in memory with the given initializ...
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
void EmitOMPParallelDirective(const OMPParallelDirective &S)
void EmitOMPSimdFinal(const OMPLoopDirective &D, const llvm::function_ref< llvm::Value *(CodeGenFunction &)> CondGen)
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
llvm::Type * ConvertTypeForMem(QualType T)
void EmitOMPDistributeParallelForSimdDirective(const OMPDistributeParallelForSimdDirective &S)
const Decl * CurCodeDecl
CurCodeDecl - This is the inner-most code context, which includes blocks.
void EmitOMPForSimdDirective(const OMPForSimdDirective &S)
llvm::AssertingVH< llvm::Instruction > AllocaInsertPt
AllocaInsertPoint - This is an instruction in the entry block before which we prefer to insert alloca...
void EmitAggregateAssign(LValue Dest, LValue Src, QualType EltTy)
Emit an aggregate assignment.
void GenerateOpenMPCapturedVars(const CapturedStmt &S, SmallVectorImpl< llvm::Value * > &CapturedVars)
CodeGenFunction * ParentCGF
JumpDest ReturnBlock
ReturnBlock - Unified return block.
const llvm::function_ref< void(CodeGenFunction &, llvm::Function *, const OMPTaskDataTy &)> TaskGenTy
static void EmitOMPTargetTeamsGenericLoopDeviceFunction(CodeGenModule &CGM, StringRef ParentName, const OMPTargetTeamsGenericLoopDirective &S)
Emit device code for the target teams loop directive.
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
void EmitOMPFlushDirective(const OMPFlushDirective &S)
llvm::BasicBlock * OMPBeforeScanBlock
void EmitOMPCancelDirective(const OMPCancelDirective &S)
void EmitOMPGenericLoopDirective(const OMPGenericLoopDirective &S)
void EmitOMPTargetTeamsDistributeDirective(const OMPTargetTeamsDistributeDirective &S)
ConstantEmission tryEmitAsConstant(DeclRefExpr *refExpr)
void EmitOMPTaskgroupDirective(const OMPTaskgroupDirective &S)
static void EmitOMPTargetTeamsDistributeParallelForDeviceFunction(CodeGenModule &CGM, StringRef ParentName, const OMPTargetTeamsDistributeParallelForDirective &S)
void emitOMPSimpleStore(LValue LVal, RValue RVal, QualType RValTy, SourceLocation Loc)
void EmitOMPInnerLoop(const OMPExecutableDirective &S, bool RequiresCleanup, const Expr *LoopCond, const Expr *IncExpr, const llvm::function_ref< void(CodeGenFunction &)> BodyGen, const llvm::function_ref< void(CodeGenFunction &)> PostIncGen)
Emit inner loop of the worksharing/simd construct.
llvm::DebugLoc SourceLocToDebugLoc(SourceLocation Location)
Converts Location to a DebugLoc, if debug information is enabled.
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
void EmitOMPDepobjDirective(const OMPDepobjDirective &S)
void EmitOMPSingleDirective(const OMPSingleDirective &S)
void EmitOMPTargetTeamsGenericLoopDirective(const OMPTargetTeamsGenericLoopDirective &S)
llvm::CanonicalLoopInfo * EmitOMPCollapsedCanonicalLoopNest(const Stmt *S, int Depth)
Emit the Stmt S and return its topmost canonical loop, if any.
void EmitSimpleOMPExecutableDirective(const OMPExecutableDirective &D)
Emit simple code for OpenMP directives in Simd-only mode.
void EmitOMPDistributeDirective(const OMPDistributeDirective &S)
RValue EmitAnyExpr(const Expr *E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
EmitAnyExpr - Emit code to compute the specified expression which can have any type.
void EmitOMPParallelForDirective(const OMPParallelForDirective &S)
void EmitOMPTeamsDirective(const OMPTeamsDirective &S)
void StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, const CGFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation Loc=SourceLocation(), SourceLocation StartLoc=SourceLocation())
Emit code for the start of a function.
void EmitOMPUnrollDirective(const OMPUnrollDirective &S)
void EmitOMPParallelMasterTaskLoopSimdDirective(const OMPParallelMasterTaskLoopSimdDirective &S)
void EmitOMPTargetDataDirective(const OMPTargetDataDirective &S)
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
llvm::Value * EmitComplexToScalarConversion(ComplexPairTy Src, QualType SrcTy, QualType DstTy, SourceLocation Loc)
Emit a conversion from the specified complex type to the specified destination type,...
bool isTrivialInitializer(const Expr *Init)
Determine whether the given initializer is trivial in the sense that it requires no code to be genera...
CGDebugInfo * getDebugInfo()
void EmitOMPTargetTeamsDistributeParallelForSimdDirective(const OMPTargetTeamsDistributeParallelForSimdDirective &S)
void EmitOMPTeamsDistributeParallelForSimdDirective(const OMPTeamsDistributeParallelForSimdDirective &S)
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block,...
llvm::Function * GenerateCapturedStmtFunction(const CapturedStmt &S)
void EmitOMPInteropDirective(const OMPInteropDirective &S)
void EmitOMPReductionClauseFinal(const OMPExecutableDirective &D, const OpenMPDirectiveKind ReductionKind)
Emit final update of reduction values to original variables at the end of the directive.
llvm::BasicBlock * OMPScanDispatch
llvm::AtomicRMWInst * emitAtomicRMWInst(llvm::AtomicRMWInst::BinOp Op, Address Addr, llvm::Value *Val, llvm::AtomicOrdering Order=llvm::AtomicOrdering::SequentiallyConsistent, llvm::SyncScope::ID SSID=llvm::SyncScope::System)
Emit an atomicrmw instruction, and applying relevant metadata when applicable.
llvm::function_ref< std::pair< LValue, LValue >(CodeGenFunction &, const OMPExecutableDirective &S)> CodeGenLoopBoundsTy
void emitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty, SourceLocation Loc, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue=nullptr)
void EmitOMPTargetTaskBasedDirective(const OMPExecutableDirective &S, const RegionCodeGenTy &BodyGen, OMPTargetDataInfo &InputInfo)
void EmitOMPTargetParallelDirective(const OMPTargetParallelDirective &S)
void EmitOMPParallelMaskedDirective(const OMPParallelMaskedDirective &S)
static void EmitOMPTargetParallelForDeviceFunction(CodeGenModule &CGM, StringRef ParentName, const OMPTargetParallelForDirective &S)
Emit device code for the target parallel for directive.
void EmitOMPSimdInit(const OMPLoopDirective &D)
Helpers for the OpenMP loop directives.
int ExpectedOMPLoopDepth
Number of nested loop to be consumed by the last surrounding loop-associated directive.
void EmitVarDecl(const VarDecl &D)
EmitVarDecl - Emit a local variable declaration.
void EmitOMPParallelForSimdDirective(const OMPParallelForSimdDirective &S)
ASTContext & getContext() const
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
const Decl * CurFuncDecl
CurFuncDecl - Holds the Decl for the current outermost non-closure context.
static void EmitOMPTargetTeamsDistributeSimdDeviceFunction(CodeGenModule &CGM, StringRef ParentName, const OMPTargetTeamsDistributeSimdDirective &S)
Emit device code for the target teams distribute simd directive.
SmallVector< llvm::CanonicalLoopInfo *, 4 > OMPLoopNestStack
List of recently emitted OMPCanonicalLoops.
bool EmitOMPWorksharingLoop(const OMPLoopDirective &S, Expr *EUB, const CodeGenLoopBoundsTy &CodeGenLoopBounds, const CodeGenDispatchBoundsTy &CGDispatchBounds)
Emit code for the worksharing loop-based directive.
void EmitOMPLinearClause(const OMPLoopDirective &D, CodeGenFunction::OMPPrivateScope &PrivateScope)
Emit initial code for linear clauses.
void EmitBranchThroughCleanup(JumpDest Dest)
EmitBranchThroughCleanup - Emit a branch from the current insert block through the normal cleanup han...
AutoVarEmission EmitAutoVarAlloca(const VarDecl &var)
bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result, bool AllowLabels=false)
ConstantFoldsToSimpleInteger - If the specified expression does not fold to a constant,...
void EmitAutoVarCleanups(const AutoVarEmission &emission)
void EmitOMPTileDirective(const OMPTileDirective &S)
bool EmitOMPLinearClauseInit(const OMPLoopDirective &D)
Emit initial code for linear variables.
llvm::DenseMap< const ValueDecl *, FieldDecl * > LambdaCaptureFields
void EmitOMPLoopBody(const OMPLoopDirective &D, JumpDest LoopExit)
Helper for the OpenMP loop directives.
void EmitOMPLinearClauseFinal(const OMPLoopDirective &D, const llvm::function_ref< llvm::Value *(CodeGenFunction &)> CondGen)
Emit final code for linear clauses.
void EmitOMPSectionsDirective(const OMPSectionsDirective &S)
llvm::BasicBlock * OMPScanExitBlock
static void EmitOMPTargetTeamsDistributeParallelForSimdDeviceFunction(CodeGenModule &CGM, StringRef ParentName, const OMPTargetTeamsDistributeParallelForSimdDirective &S)
Emit device code for the target teams distribute parallel for simd directive.
void EmitStoreThroughGlobalRegLValue(RValue Src, LValue Dst)
void EmitOMPUseDeviceAddrClause(const OMPUseDeviceAddrClause &C, OMPPrivateScope &PrivateScope, const llvm::DenseMap< const ValueDecl *, llvm::Value * > CaptureDeviceAddrMap)
void EmitOMPTargetSimdDirective(const OMPTargetSimdDirective &S)
void EmitOMPTaskyieldDirective(const OMPTaskyieldDirective &S)
void EmitOMPSimdDirective(const OMPSimdDirective &S)
void EmitOMPCriticalDirective(const OMPCriticalDirective &S)
void EmitOMPForDirective(const OMPForDirective &S)
void EmitOMPMetaDirective(const OMPMetaDirective &S)
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
void EmitOMPTargetUpdateDirective(const OMPTargetUpdateDirective &S)
LValue InitCapturedStruct(const CapturedStmt &S)
void EmitOMPParallelMasterDirective(const OMPParallelMasterDirective &S)
void EmitOMPTargetTeamsDistributeSimdDirective(const OMPTargetTeamsDistributeSimdDirective &S)
void processInReduction(const OMPExecutableDirective &S, OMPTaskDataTy &Data, CodeGenFunction &CGF, const CapturedStmt *CS, OMPPrivateScope &Scope)
void EmitOMPMasterDirective(const OMPMasterDirective &S)
void EmitOMPMasterTaskLoopDirective(const OMPMasterTaskLoopDirective &S)
void EmitOMPTargetParallelGenericLoopDirective(const OMPTargetParallelGenericLoopDirective &S)
void EmitOMPAssumeDirective(const OMPAssumeDirective &S)
bool EmitOMPCopyinClause(const OMPExecutableDirective &D)
Emit code for copyin clause in D directive.
void EmitOMPMaskedDirective(const OMPMaskedDirective &S)
uint64_t getProfileCount(const Stmt *S)
Get the profiler's count for the given statement.
void EmitOMPPrivateClause(const OMPExecutableDirective &D, OMPPrivateScope &PrivateScope)
void EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S)
llvm::Function * GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S, SourceLocation Loc)
void EmitOMPTargetParallelForSimdDirective(const OMPTargetParallelForSimdDirective &S)
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
Address GenerateCapturedStmtArgument(const CapturedStmt &S)
static void EmitOMPTargetParallelGenericLoopDeviceFunction(CodeGenModule &CGM, StringRef ParentName, const OMPTargetParallelGenericLoopDirective &S)
Emit device code for the target parallel loop directive.
llvm::function_ref< void(CodeGenFunction &, SourceLocation, const unsigned, const bool)> CodeGenOrderedTy
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit)
llvm::Value * EmitScalarConversion(llvm::Value *Src, QualType SrcTy, QualType DstTy, SourceLocation Loc)
Emit a conversion from the specified type to the specified destination type, both of which are LLVM s...
llvm::BasicBlock * OMPAfterScanBlock
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
void EmitOMPTargetExitDataDirective(const OMPTargetExitDataDirective &S)
void EmitOMPErrorDirective(const OMPErrorDirective &S)
void EmitOMPSectionDirective(const OMPSectionDirective &S)
static void EmitOMPTargetParallelDeviceFunction(CodeGenModule &CGM, StringRef ParentName, const OMPTargetParallelDirective &S)
void EmitOMPBarrierDirective(const OMPBarrierDirective &S)
void EmitStopPoint(const Stmt *S)
EmitStopPoint - Emit a debug stoppoint if we are emitting debug info.
void EmitOMPCancellationPointDirective(const OMPCancellationPointDirective &S)
void EmitStmt(const Stmt *S, ArrayRef< const Attr * > Attrs=std::nullopt)
EmitStmt - Emit the code for the statement.
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
llvm::LLVMContext & getLLVMContext()
void EmitOMPTaskLoopSimdDirective(const OMPTaskLoopSimdDirective &S)
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
void EmitOMPTargetTeamsDistributeParallelForDirective(const OMPTargetTeamsDistributeParallelForDirective &S)
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler's counter for the given statement by StepV.
bool EmitOMPLastprivateClauseInit(const OMPExecutableDirective &D, OMPPrivateScope &PrivateScope)
Emit initial code for lastprivate variables.
void EmitOMPParallelSectionsDirective(const OMPParallelSectionsDirective &S)
llvm::function_ref< void(CodeGenFunction &, const OMPLoopDirective &, JumpDest)> CodeGenLoopTy
void EmitOMPCanonicalLoop(const OMPCanonicalLoop *S)
Emit an OMPCanonicalLoop using the OpenMPIRBuilder.
void EmitOMPTeamsDistributeSimdDirective(const OMPTeamsDistributeSimdDirective &S)
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
void EmitOMPTargetParallelForDirective(const OMPTargetParallelForDirective &S)
std::pair< bool, RValue > EmitOMPAtomicSimpleUpdateExpr(LValue X, RValue E, BinaryOperatorKind BO, bool IsXLHSInRHSPart, llvm::AtomicOrdering AO, SourceLocation Loc, const llvm::function_ref< RValue(RValue)> CommonGen)
Emit atomic update code for constructs: X = X BO E or X = E BO E.
This class organizes the cross-function state that is used while generating LLVM code.
void SetInternalFunctionAttributes(GlobalDecl GD, llvm::Function *F, const CGFunctionInfo &FI)
Set the attributes on the LLVM function for the given decl and function info.
llvm::Module & getModule() const
CharUnits GetTargetTypeStoreSize(llvm::Type *Ty) const
Return the store size, in character units, of the given LLVM type.
DiagnosticsEngine & getDiags() const
const LangOptions & getLangOpts() const
CharUnits getNaturalTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, bool forPointeeType=false)
CodeGenTypes & getTypes()
const llvm::DataLayout & getDataLayout() const
CGOpenMPRuntime & getOpenMPRuntime()
Return a reference to the configured OpenMP runtime.
llvm::Constant * GetAddrOfGlobal(GlobalDecl GD, ForDefinition_t IsForDefinition=NotForDefinition)
ASTContext & getContext() const
const CodeGenOptions & getCodeGenOpts() const
StringRef getMangledName(GlobalDecl GD)
llvm::Constant * getStaticLocalDeclAddress(const VarDecl *D)
llvm::ConstantInt * getSize(CharUnits numChars)
Emit the given number of characters as a value of type size_t.
void assignRegionCounters(GlobalDecl GD, llvm::Function *Fn)
Assign counters to regions and configure them for PGO of a given function.
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.
const CGFunctionInfo & arrangeBuiltinFunctionDeclaration(QualType resultType, const FunctionArgList &args)
A builtin function is a freestanding function using the default C conventions.
llvm::Type * ConvertTypeForMem(QualType T)
ConvertTypeForMem - Convert type T into a llvm::Type.
FunctionArgList - Type for representing both the decl and type of parameters to a function.
LValue - This represents an lvalue references.
llvm::Value * getPointer(CodeGenFunction &CGF) const
Address getAddress() const
void setAddress(Address address)
void setUnrollCount(unsigned C)
Set the unroll count for the next loop pushed.
void setVectorizeWidth(unsigned W)
Set the vectorize width for the next loop pushed.
void setParallel(bool Enable=true)
Set the next pushed loop as parallel.
void setUnrollState(const LoopAttributes::LVEnableState &State)
Set the next pushed loop unroll state.
void pop()
End the current loop.
void push(llvm::BasicBlock *Header, const llvm::DebugLoc &StartLoc, const llvm::DebugLoc &EndLoc)
Begin a new structured loop.
void setVectorizeEnable(bool Enable=true)
Set the next pushed loop 'vectorize.enable'.
A basic class for pre|post-action for advanced codegen sequence for OpenMP region.
virtual void Enter(CodeGenFunction &CGF)
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
static RValue get(llvm::Value *V)
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
An abstract representation of an aligned address.
llvm::PointerType * getType() const
Return the type of the pointer value.
llvm::Value * getPointer() const
Class intended to support codegen of all kind of the reduction clauses.
void emitAggregateType(CodeGenFunction &CGF, unsigned N)
Emits the code for the variable-modified type, if required.
void emitSharedOrigLValue(CodeGenFunction &CGF, unsigned N)
Emits lvalue for the shared and original reduction item.
Class provides a way to call simple version of codegen for OpenMP region, or an advanced with possibl...
Complex values, per C99 6.2.5p11.
CompoundStmt - This represents a group of statements like { stmt stmt }.
ConstStmtVisitor - This class implements a simple visitor for Stmt subclasses.
DeclContext * getParent()
getParent - Returns the containing DeclContext.
A reference to a declared variable, function, enum, etc.
static DeclRefExpr * Create(const ASTContext &Context, NestedNameSpecifierLoc QualifierLoc, SourceLocation TemplateKWLoc, ValueDecl *D, bool RefersToEnclosingVariableOrCapture, SourceLocation NameLoc, QualType T, ExprValueKind VK, NamedDecl *FoundD=nullptr, const TemplateArgumentListInfo *TemplateArgs=nullptr, NonOdrUseReason NOUR=NOUR_None)
DeclStmt - Adaptor class for mixing declarations with statements and expressions.
Decl - This represents one declaration (or definition), e.g.
SourceLocation getEndLoc() const LLVM_READONLY
virtual Stmt * getBody() const
getBody - If this Decl represents a declaration for a body of code, such as a function or method defi...
SourceLocation getBodyRBrace() const
getBodyRBrace - Gets the right brace of the body, if a body exists.
virtual bool hasBody() const
Returns true if this Decl represents a declaration for a body of code, such as a function or method d...
SourceLocation getLocation() const
SourceLocation getBeginLoc() const LLVM_READONLY
virtual Decl * getCanonicalDecl()
Retrieves the "canonical" declaration of the given declaration.
The name of a declaration.
SourceLocation getBeginLoc() const LLVM_READONLY
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
unsigned getCustomDiagID(Level L, const char(&FormatString)[N])
Return an ID for a diagnostic with the specified format string and level.
This represents one expression.
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
Expr * IgnoreParenImpCasts() LLVM_READONLY
Skip past any parentheses and implicit casts which might surround this expression until reaching a fi...
Expr * IgnoreImplicitAsWritten() LLVM_READONLY
Skip past any implicit AST nodes which might surround this expression until reaching a fixed point.
Expr * IgnoreImpCasts() LLVM_READONLY
Skip past any implicit casts which might surround this expression until reaching a fixed point.
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Represents difference between two FPOptions values.
Represents a member of a struct/union/class.
Represents a function declaration or definition.
static FunctionDecl * Create(ASTContext &C, DeclContext *DC, SourceLocation StartLoc, SourceLocation NLoc, DeclarationName N, QualType T, TypeSourceInfo *TInfo, StorageClass SC, bool UsesFPIntrin=false, bool isInlineSpecified=false, bool hasWrittenPrototype=true, ConstexprSpecKind ConstexprKind=ConstexprSpecKind::Unspecified, Expr *TrailingRequiresClause=nullptr)
GlobalDecl - represents a global declaration.
One of these records is kept for each identifier that is lexed.
IdentifierInfo & get(StringRef Name)
Return the identifier token info for the specified named identifier.
static ImplicitCastExpr * Create(const ASTContext &Context, QualType T, CastKind Kind, Expr *Operand, const CXXCastPath *BasePath, ExprValueKind Cat, FPOptionsOverride FPO)
static ImplicitParamDecl * Create(ASTContext &C, DeclContext *DC, SourceLocation IdLoc, IdentifierInfo *Id, QualType T, ImplicitParamKind ParamKind)
Create implicit parameter.
A C++ lambda expression, which produces a function object (of unspecified type) that can be invoked l...
std::vector< llvm::Triple > OMPTargetTriples
Triples of the OpenMP targets that the host code codegen should take into account in order to generat...
Represents a point when we exit a loop.
IdentifierInfo * getIdentifier() const
Get the identifier that names this declaration, if there is one.
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
A C++ nested-name-specifier augmented with source location information.
This represents 'acq_rel' clause in the '#pragma omp atomic|flush' directives.
This represents 'acquire' clause in the '#pragma omp atomic|flush' directives.
This represents clause 'aligned' in the '#pragma omp ...' directives.
This represents '#pragma omp atomic' directive.
This represents '#pragma omp barrier' directive.
This represents 'bind' clause in the '#pragma omp ...' directives.
This represents '#pragma omp cancel' directive.
This represents '#pragma omp cancellation point' directive.
Representation of an OpenMP canonical loop.
static OMPClauseWithPreInit * get(OMPClause *C)
This is a basic class for representing single OpenMP clause.
This represents clause 'copyin' in the '#pragma omp ...' directives.
This represents clause 'copyprivate' in the '#pragma omp ...' directives.
This represents '#pragma omp critical' directive.
This represents implicit clause 'depend' for the '#pragma omp task' directive.
This represents implicit clause 'depobj' for the '#pragma omp depobj' directive.
This represents '#pragma omp depobj' directive.
This represents 'destroy' clause in the '#pragma omp depobj' directive or the '#pragma omp interop' d...
This represents 'device' clause in the '#pragma omp ...' directive.
This represents 'dist_schedule' clause in the '#pragma omp ...' directive.
This represents '#pragma omp distribute' directive.
This represents '#pragma omp distribute parallel for' composite directive.
This represents '#pragma omp distribute parallel for simd' composite directive.
This represents '#pragma omp distribute simd' composite directive.
This represents the 'doacross' clause for the '#pragma omp ordered' directive.
This represents '#pragma omp error' directive.
This is a basic class for representing single OpenMP executable directive.
OpenMPDirectiveKind getDirectiveKind() const
SourceLocation getEndLoc() const
Returns ending location of directive.
static llvm::iterator_range< specific_clause_iterator< SpecificClause > > getClausesOfKind(ArrayRef< OMPClause * > Clauses)
This represents 'fail' clause in the '#pragma omp atomic' directive.
OpenMPClauseKind getFailParameter() const
Gets the parameter (type memory-order-clause) in Fail clause.
This represents 'filter' clause in the '#pragma omp ...' directive.
This represents 'final' clause in the '#pragma omp ...' directive.
This represents clause 'firstprivate' in the '#pragma omp ...' directives.
This represents implicit clause 'flush' for the '#pragma omp flush' directive.
This represents '#pragma omp flush' directive.
This represents '#pragma omp for' directive.
This represents '#pragma omp for simd' directive.
Representation of the 'full' clause of the '#pragma omp unroll' directive.
This represents '#pragma omp loop' directive.
This represents 'grainsize' clause in the '#pragma omp ...' directive.
This represents 'hint' clause in the '#pragma omp ...' directive.
This represents 'if' clause in the '#pragma omp ...' directive.
This represents clause 'in_reduction' in the '#pragma omp task' directives.
This represents clause 'inclusive' in the '#pragma omp scan' directive.
This represents the 'init' clause in '#pragma omp ...' directives.
Represents the '#pragma omp interchange' loop transformation directive.
This represents '#pragma omp interop' directive.
This represents clause 'lastprivate' in the '#pragma omp ...' directives.
This represents clause 'linear' in the '#pragma omp ...' directives.
The base class for all loop-based directives, including loop transformation directives.
static Stmt * tryToFindNextInnerLoop(Stmt *CurStmt, bool TryImperfectlyNestedLoops)
Try to find the next loop sub-statement in the specified statement CurStmt.
static bool doForAllLoops(Stmt *CurStmt, bool TryImperfectlyNestedLoops, unsigned NumLoops, llvm::function_ref< bool(unsigned, Stmt *)> Callback, llvm::function_ref< void(OMPLoopTransformationDirective *)> OnTransformationCallback)
Calls the specified callback function for all the loops in CurStmt, from the outermost to the innermo...
This is a common base class for loop directives ('omp simd', 'omp for', 'omp for simd' etc....
Expr * getPrevUpperBoundVariable() const
Expr * getPrevLowerBoundVariable() const
Expr * getIterationVariable() const
Expr * getUpperBoundVariable() const
Expr * getLowerBoundVariable() const
This represents '#pragma omp masked' directive.
This represents '#pragma omp master' directive.
This represents '#pragma omp master taskloop' directive.
This represents '#pragma omp master taskloop simd' directive.
This represents 'message' clause in the '#pragma omp error' directive.
Expr * getMessageString() const
Returns message string of the clause.
This represents 'nogroup' clause in the '#pragma omp ...' directive.
This represents 'nowait' clause in the '#pragma omp ...' directive.
This represents 'num_tasks' clause in the '#pragma omp ...' directive.
This represents 'num_teams' clause in the '#pragma omp ...' directive.
This represents 'num_threads' clause in the '#pragma omp ...' directive.
This represents 'order' clause in the '#pragma omp ...' directive.
This represents 'ordered' clause in the '#pragma omp ...' directive.
This represents '#pragma omp ordered' directive.
This represents '#pragma omp parallel' directive.
This represents '#pragma omp parallel for' directive.
This represents '#pragma omp parallel for simd' directive.
This represents '#pragma omp parallel masked' directive.
This represents '#pragma omp parallel master' directive.
This represents '#pragma omp parallel master taskloop' directive.
This represents '#pragma omp parallel master taskloop simd' directive.
This represents '#pragma omp parallel sections' directive.
Representation of the 'partial' clause of the '#pragma omp unroll' directive.
This represents 'priority' clause in the '#pragma omp ...' directive.
This represents clause 'private' in the '#pragma omp ...' directives.
This represents 'proc_bind' clause in the '#pragma omp ...' directive.
This represents clause 'reduction' in the '#pragma omp ...' directives.
This represents 'relaxed' clause in the '#pragma omp atomic' directives.
This represents 'release' clause in the '#pragma omp atomic|flush' directives.
Represents the '#pragma omp reverse' loop transformation directive.
This represents 'simd' clause in the '#pragma omp ...' directive.
This represents 'safelen' clause in the '#pragma omp ...' directive.
This represents '#pragma omp scan' directive.
This represents 'schedule' clause in the '#pragma omp ...' directive.
This represents '#pragma omp section' directive.
This represents '#pragma omp sections' directive.
This represents 'seq_cst' clause in the '#pragma omp atomic' directive.
This represents 'severity' clause in the '#pragma omp error' directive.
OpenMPSeverityClauseKind getSeverityKind() const
Returns kind of the clause.
This represents '#pragma omp simd' directive.
This represents 'simdlen' clause in the '#pragma omp ...' directive.
This represents '#pragma omp single' directive.
This represents '#pragma omp target data' directive.
This represents '#pragma omp target' directive.
This represents '#pragma omp target enter data' directive.
This represents '#pragma omp target exit data' directive.
This represents '#pragma omp target parallel' directive.
This represents '#pragma omp target parallel for' directive.
This represents '#pragma omp target parallel for simd' directive.
This represents '#pragma omp target parallel loop' directive.
This represents '#pragma omp target simd' directive.
This represents '#pragma omp target teams' directive.
This represents '#pragma omp target teams distribute' combined directive.
This represents '#pragma omp target teams distribute parallel for' combined directive.
This represents '#pragma omp target teams distribute parallel for simd' combined directive.
This represents '#pragma omp target teams distribute simd' combined directive.
This represents '#pragma omp target teams loop' directive.
This represents '#pragma omp target update' directive.
This represents '#pragma omp task' directive.
This represents '#pragma omp taskloop' directive.
This represents '#pragma omp taskloop simd' directive.
This represents clause 'task_reduction' in the '#pragma omp taskgroup' directives.
This represents '#pragma omp taskgroup' directive.
This represents '#pragma omp taskwait' directive.
This represents '#pragma omp taskyield' directive.
This represents '#pragma omp teams' directive.
This represents '#pragma omp teams distribute' directive.
This represents '#pragma omp teams distribute parallel for' composite directive.
This represents '#pragma omp teams distribute parallel for simd' composite directive.
This represents '#pragma omp teams distribute simd' combined directive.
This represents '#pragma omp teams loop' directive.
This represents 'thread_limit' clause in the '#pragma omp ...' directive.
This represents the '#pragma omp tile' loop transformation directive.
This represents the '#pragma omp unroll' loop transformation directive.
This represents 'untied' clause in the '#pragma omp ...' directive.
This represents 'update' clause in the '#pragma omp atomic' directive.
This represents the 'use' clause in '#pragma omp ...' directives.
This represents clause 'use_device_addr' in the '#pragma omp ...' directives.
This represents clause 'use_device_ptr' in the '#pragma omp ...' directives.
OpaqueValueExpr - An expression referring to an opaque object of a fixed type and value class.
static ParmVarDecl * Create(ASTContext &C, DeclContext *DC, SourceLocation StartLoc, SourceLocation IdLoc, const IdentifierInfo *Id, QualType T, TypeSourceInfo *TInfo, StorageClass S, Expr *DefArg)
PointerType - C99 6.7.5.1 - Pointer Declarators.
Represents an unpacked "presumed" location which can be presented to the user.
const char * getFilename() const
Return the presumed filename of this location.
unsigned getLine() const
Return the presumed line number of this location.
If a crash happens while one of these objects are live, the message is printed out along with the spe...
A (possibly-)qualified type.
bool isVolatileQualified() const
Determine whether this type is volatile-qualified.
QualType getNonReferenceType() const
If Type is a reference type (e.g., const int&), returns the type that the reference refers to ("const...
Represents a struct/union/class.
field_range fields() const
field_iterator field_begin() const
Base for LValueReferenceType and RValueReferenceType.
Scope - A scope is a transient data structure that is used while parsing the program.
Encodes a location in the source.
A trivial tuple used to represent a source range.
SourceLocation getEnd() const
SourceLocation getBegin() const
Stmt - This represents one statement.
SourceLocation getBeginLoc() const LLVM_READONLY
SwitchStmt - This represents a 'switch' stmt.
virtual bool hasBuiltinAtomic(uint64_t AtomicSizeInBits, uint64_t AlignmentInBits) const
Returns true if the given target supports lock-free atomic operations at the specified width and alig...
bool isTLSSupported() const
Whether the target supports thread-local storage.
The base class of the type hierarchy.
bool isPointerType() const
const T * castAs() const
Member-template castAs<specific type>.
bool isReferenceType() const
bool isScalarType() const
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
bool isLValueReferenceType() const
bool isAnyComplexType() const
bool hasSignedIntegerRepresentation() const
Determine whether this type has an signed integer representation of some sort, e.g....
bool isVariablyModifiedType() const
Whether this type is a variably-modified type (C99 6.7.5).
const ArrayType * getAsArrayTypeUnsafe() const
A variant of getAs<> for array types which silently discards qualifiers from the outermost type.
bool isAnyPointerType() const
bool isRecordType() const
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
static UnaryOperator * Create(const ASTContext &C, Expr *input, Opcode opc, QualType type, ExprValueKind VK, ExprObjectKind OK, SourceLocation l, bool CanOverflow, FPOptionsOverride FPFeatures)
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Represents a variable declaration or definition.
TLSKind getTLSKind() const
VarDecl * getCanonicalDecl() override
Retrieves the "canonical" declaration of the given declaration.
@ CInit
C-style initialization with assignment.
bool hasGlobalStorage() const
Returns true for all variables that do not have local storage.
bool isStaticLocal() const
Returns true if a variable with function scope is a static local variable.
const Expr * getInit() const
bool hasLocalStorage() const
Returns true if a variable with function scope is a non-static local variable.
@ TLS_None
Not a TLS variable.
Represents a C array with a specified size that is not an integer-constant-expression.
Expr * getSizeExpr() const
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
bool Inc(InterpState &S, CodePtr OpPC)
1) Pops a pointer from the stack 2) Load the value from the pointer 3) Writes the value increased by ...
The JSON file list parser is used to communicate input to InstallAPI.
bool isOpenMPWorksharingDirective(OpenMPDirectiveKind DKind)
Checks if the specified directive is a worksharing directive.
@ Tile
'tile' clause, allowed on 'loop' and Combined constructs.
bool needsTaskBasedThreadLimit(OpenMPDirectiveKind DKind)
Checks if the specified target directive, combined or not, needs task based thread_limit.
@ Ctor_Complete
Complete object ctor.
if(T->getSizeExpr()) TRY_TO(TraverseStmt(const_cast< Expr * >(T -> getSizeExpr())))
llvm::omp::Directive OpenMPDirectiveKind
OpenMP directives.
@ OK_Ordinary
An ordinary object is located at an address in memory.
bool isOpenMPDistributeDirective(OpenMPDirectiveKind DKind)
Checks if the specified directive is a distribute directive.
OpenMPScheduleClauseModifier
OpenMP modifiers for 'schedule' clause.
@ OMPC_SCHEDULE_MODIFIER_unknown
llvm::omp::Clause OpenMPClauseKind
OpenMP clauses.
bool isOpenMPParallelDirective(OpenMPDirectiveKind DKind)
Checks if the specified directive is a parallel-kind directive.
OpenMPDistScheduleClauseKind
OpenMP attributes for 'dist_schedule' clause.
@ OMPC_DIST_SCHEDULE_unknown
bool isOpenMPTaskingDirective(OpenMPDirectiveKind Kind)
Checks if the specified directive kind is one of tasking directives - task, taskloop,...
bool isOpenMPTargetExecutionDirective(OpenMPDirectiveKind DKind)
Checks if the specified directive is a target code offload directive.
@ Result
The result type of a method or function.
bool isOpenMPTeamsDirective(OpenMPDirectiveKind DKind)
Checks if the specified directive is a teams-kind directive.
bool isOpenMPGenericLoopDirective(OpenMPDirectiveKind DKind)
Checks if the specified directive constitutes a 'loop' directive in the outermost nest.
OpenMPBindClauseKind
OpenMP bindings for the 'bind' clause.
OpenMPDependClauseKind
OpenMP attributes for 'depend' clause.
@ Dtor_Complete
Complete object dtor.
bool isOpenMPLoopBoundSharingDirective(OpenMPDirectiveKind Kind)
Checks if the specified directive kind is one of the composite or combined directives that need loop ...
bool isOpenMPSimdDirective(OpenMPDirectiveKind DKind)
Checks if the specified directive is a simd directive.
@ VK_PRValue
A pr-value expression (in the C++11 taxonomy) produces a temporary value.
@ VK_LValue
An l-value expression is a reference to an object with independent storage.
const FunctionProtoType * T
void getOpenMPCaptureRegions(llvm::SmallVectorImpl< OpenMPDirectiveKind > &CaptureRegions, OpenMPDirectiveKind DKind)
Return the captured regions of an OpenMP directive.
@ ThreadPrivateVar
Parameter for Thread private variable.
@ Other
Other implicit parameter.
OpenMPScheduleClauseKind
OpenMP attributes for 'schedule' clause.
bool isOpenMPTaskLoopDirective(OpenMPDirectiveKind DKind)
Checks if the specified directive is a taskloop directive.
Diagnostic wrappers for TextAPI types for error reporting.
static Address getAddrOfThreadPrivate(CodeGenFunction &CGF, const VarDecl *VD, Address VDAddr, SourceLocation Loc)
Returns address of the threadprivate variable for the current thread.
llvm::OpenMPIRBuilder::InsertPointTy InsertPointTy
static void EmitOMPOutlinedRegionBody(CodeGenFunction &CGF, const Stmt *RegionBodyStmt, InsertPointTy AllocaIP, InsertPointTy CodeGenIP, Twine RegionName)
Emit the body of an OMP region that will be outlined in OpenMPIRBuilder::finalize().
static Address getAddressOfLocalVariable(CodeGenFunction &CGF, const VarDecl *VD)
Gets the OpenMP-specific address of the local variable /p VD.
static void EmitCaptureStmt(CodeGenFunction &CGF, InsertPointTy CodeGenIP, llvm::BasicBlock &FiniBB, llvm::Function *Fn, ArrayRef< llvm::Value * > Args)
static std::string getNameWithSeparators(ArrayRef< StringRef > Parts, StringRef FirstSeparator=".", StringRef Separator=".")
Get the platform-specific name separator.
static void FinalizeOMPRegion(CodeGenFunction &CGF, InsertPointTy IP)
Emit the Finalization for an OMP region.
static void EmitOMPInlinedRegionBody(CodeGenFunction &CGF, const Stmt *RegionBodyStmt, InsertPointTy AllocaIP, InsertPointTy CodeGenIP, Twine RegionName)
Emit the body of an OMP region.
llvm::PointerType * VoidPtrTy
llvm::IntegerType * Int64Ty
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
llvm::IntegerType * SizeTy
llvm::IntegerType * Int32Ty
llvm::IntegerType * IntPtrTy
llvm::PointerType * Int8PtrTy
SmallVector< const Expr *, 4 > DepExprs
EvalResult is a struct with detailed info about an evaluated expression.
Extra information about a function prototype.
Scheduling data for loop-based OpenMP directives.
OpenMPScheduleClauseModifier M2
OpenMPScheduleClauseModifier M1
OpenMPScheduleClauseKind Schedule