clang 23.0.0git
CodeGenFunction.cpp
Go to the documentation of this file.
1//===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This coordinates the per-function state used while generating code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CodeGenFunction.h"
14#include "CGBlocks.h"
15#include "CGCUDARuntime.h"
16#include "CGCXXABI.h"
17#include "CGCleanup.h"
18#include "CGDebugInfo.h"
19#include "CGHLSLRuntime.h"
20#include "CGOpenMPRuntime.h"
21#include "CodeGenModule.h"
22#include "CodeGenPGO.h"
23#include "TargetInfo.h"
25#include "clang/AST/ASTLambda.h"
26#include "clang/AST/Attr.h"
27#include "clang/AST/Decl.h"
28#include "clang/AST/DeclCXX.h"
29#include "clang/AST/Expr.h"
31#include "clang/AST/StmtCXX.h"
32#include "clang/AST/StmtObjC.h"
39#include "llvm/ADT/ArrayRef.h"
40#include "llvm/ADT/ScopeExit.h"
41#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
42#include "llvm/IR/DataLayout.h"
43#include "llvm/IR/Dominators.h"
44#include "llvm/IR/FPEnv.h"
45#include "llvm/IR/Instruction.h"
46#include "llvm/IR/IntrinsicInst.h"
47#include "llvm/IR/Intrinsics.h"
48#include "llvm/IR/IntrinsicsPowerPC.h"
49#include "llvm/IR/MDBuilder.h"
50#include "llvm/Support/CRC.h"
51#include "llvm/Support/SipHash.h"
52#include "llvm/Support/xxhash.h"
53#include "llvm/Transforms/Scalar/LowerExpectIntrinsic.h"
54#include "llvm/Transforms/Utils/PromoteMemToReg.h"
55#include <optional>
56
57using namespace clang;
58using namespace CodeGen;
59
60/// shouldEmitLifetimeMarkers - Decide whether we need emit the life-time
61/// markers.
62static bool shouldEmitLifetimeMarkers(const CodeGenOptions &CGOpts,
63 const LangOptions &LangOpts) {
64 if (CGOpts.DisableLifetimeMarkers)
65 return false;
66
67 // Sanitizers may use markers.
68 if (CGOpts.SanitizeAddressUseAfterScope ||
69 LangOpts.Sanitize.has(SanitizerKind::HWAddress) ||
70 LangOpts.Sanitize.has(SanitizerKind::Memory) ||
71 LangOpts.Sanitize.has(SanitizerKind::MemtagStack))
72 return true;
73
74 // For now, only in optimized builds.
75 return CGOpts.OptimizationLevel != 0;
76}
77
78CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext)
79 : CodeGenTypeCache(cgm), CGM(cgm), Target(cgm.getTarget()),
80 Builder(cgm, cgm.getModule().getContext(), CGBuilderInserterTy(this)),
82 DebugInfo(CGM.getModuleDebugInfo()),
83 PGO(std::make_unique<CodeGenPGO>(cgm)),
84 ShouldEmitLifetimeMarkers(
85 shouldEmitLifetimeMarkers(CGM.getCodeGenOpts(), CGM.getLangOpts())) {
86 if (!suppressNewContext)
87 CGM.getCXXABI().getMangleContext().startNewFunction();
88 EHStack.setCGF(this);
89
91}
92
94 assert(LifetimeExtendedCleanupStack.empty() && "failed to emit a cleanup");
95 assert(DeferredDeactivationCleanupStack.empty() &&
96 "missed to deactivate a cleanup");
97
98 if (getLangOpts().OpenMP && CurFn)
99 CGM.getOpenMPRuntime().functionFinished(*this);
100
101 // If we have an OpenMPIRBuilder we want to finalize functions (incl.
102 // outlining etc) at some point. Doing it once the function codegen is done
103 // seems to be a reasonable spot. We do it here, as opposed to the deletion
104 // time of the CodeGenModule, because we have to ensure the IR has not yet
105 // been "emitted" to the outside, thus, modifications are still sensible.
106 if (CGM.getLangOpts().OpenMPIRBuilder && CurFn)
107 CGM.getOpenMPRuntime().getOMPBuilder().finalize(CurFn);
108}
109
110// Map the LangOption for exception behavior into
111// the corresponding enum in the IR.
112llvm::fp::ExceptionBehavior
114
115 switch (Kind) {
116 case LangOptions::FPE_Ignore: return llvm::fp::ebIgnore;
117 case LangOptions::FPE_MayTrap: return llvm::fp::ebMayTrap;
118 case LangOptions::FPE_Strict: return llvm::fp::ebStrict;
119 default:
120 llvm_unreachable("Unsupported FP Exception Behavior");
121 }
122}
123
125 llvm::FastMathFlags FMF;
126 FMF.setAllowReassoc(FPFeatures.getAllowFPReassociate());
127 FMF.setNoNaNs(FPFeatures.getNoHonorNaNs());
128 FMF.setNoInfs(FPFeatures.getNoHonorInfs());
129 FMF.setNoSignedZeros(FPFeatures.getNoSignedZero());
130 FMF.setAllowReciprocal(FPFeatures.getAllowReciprocal());
131 FMF.setApproxFunc(FPFeatures.getAllowApproxFunc());
132 FMF.setAllowContract(FPFeatures.allowFPContractAcrossStatement());
133 Builder.setFastMathFlags(FMF);
134}
135
137 const Expr *E)
138 : CGF(CGF) {
139 ConstructorHelper(E->getFPFeaturesInEffect(CGF.getLangOpts()));
140}
141
143 FPOptions FPFeatures)
144 : CGF(CGF) {
145 ConstructorHelper(FPFeatures);
146}
147
148void CodeGenFunction::CGFPOptionsRAII::ConstructorHelper(FPOptions FPFeatures) {
149 OldFPFeatures = CGF.CurFPFeatures;
150 CGF.CurFPFeatures = FPFeatures;
151
152 OldExcept = CGF.Builder.getDefaultConstrainedExcept();
153 OldRounding = CGF.Builder.getDefaultConstrainedRounding();
154
155 if (OldFPFeatures == FPFeatures)
156 return;
157
158 FMFGuard.emplace(CGF.Builder);
159
160 llvm::RoundingMode NewRoundingBehavior = FPFeatures.getRoundingMode();
161 CGF.Builder.setDefaultConstrainedRounding(NewRoundingBehavior);
162 auto NewExceptionBehavior =
164 CGF.Builder.setDefaultConstrainedExcept(NewExceptionBehavior);
165
166 CGF.SetFastMathFlags(FPFeatures);
167
168 assert((CGF.CurFuncDecl == nullptr || CGF.Builder.getIsFPConstrained() ||
169 isa<CXXConstructorDecl>(CGF.CurFuncDecl) ||
170 isa<CXXDestructorDecl>(CGF.CurFuncDecl) ||
171 (NewExceptionBehavior == llvm::fp::ebIgnore &&
172 NewRoundingBehavior == llvm::RoundingMode::NearestTiesToEven)) &&
173 "FPConstrained should be enabled on entire function");
174
175 auto mergeFnAttrValue = [&](StringRef Name, bool Value) {
176 auto OldValue =
177 CGF.CurFn->getFnAttribute(Name).getValueAsBool();
178 auto NewValue = OldValue & Value;
179 if (OldValue != NewValue)
180 CGF.CurFn->addFnAttr(Name, llvm::toStringRef(NewValue));
181 };
182 mergeFnAttrValue("no-signed-zeros-fp-math", FPFeatures.getNoSignedZero());
183}
184
186 CGF.CurFPFeatures = OldFPFeatures;
187 CGF.Builder.setDefaultConstrainedExcept(OldExcept);
188 CGF.Builder.setDefaultConstrainedRounding(OldRounding);
189}
190
191static LValue
192makeNaturalAlignAddrLValue(llvm::Value *V, QualType T, bool ForPointeeType,
193 bool MightBeSigned, CodeGenFunction &CGF,
194 KnownNonNull_t IsKnownNonNull = NotKnownNonNull) {
195 LValueBaseInfo BaseInfo;
196 TBAAAccessInfo TBAAInfo;
197 CharUnits Alignment =
198 CGF.CGM.getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo, ForPointeeType);
199 Address Addr =
200 MightBeSigned
201 ? CGF.makeNaturalAddressForPointer(V, T, Alignment, false, nullptr,
202 nullptr, IsKnownNonNull)
203 : Address(V, CGF.ConvertTypeForMem(T), Alignment, IsKnownNonNull);
204 return CGF.MakeAddrLValue(Addr, T, BaseInfo, TBAAInfo);
205}
206
207LValue
209 KnownNonNull_t IsKnownNonNull) {
210 return ::makeNaturalAlignAddrLValue(V, T, /*ForPointeeType*/ false,
211 /*MightBeSigned*/ true, *this,
212 IsKnownNonNull);
213}
214
215LValue
217 return ::makeNaturalAlignAddrLValue(V, T, /*ForPointeeType*/ true,
218 /*MightBeSigned*/ true, *this);
219}
220
222 QualType T) {
223 return ::makeNaturalAlignAddrLValue(V, T, /*ForPointeeType*/ false,
224 /*MightBeSigned*/ false, *this);
225}
226
228 QualType T) {
229 return ::makeNaturalAlignAddrLValue(V, T, /*ForPointeeType*/ true,
230 /*MightBeSigned*/ false, *this);
231}
232
234 return CGM.getTypes().ConvertTypeForMem(T);
235}
236
238 return CGM.getTypes().ConvertType(T);
239}
240
242 llvm::Type *LLVMTy) {
243 return CGM.getTypes().convertTypeForLoadStore(ASTTy, LLVMTy);
244}
245
247 type = type.getCanonicalType();
248 while (true) {
249 switch (type->getTypeClass()) {
250#define TYPE(name, parent)
251#define ABSTRACT_TYPE(name, parent)
252#define NON_CANONICAL_TYPE(name, parent) case Type::name:
253#define DEPENDENT_TYPE(name, parent) case Type::name:
254#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name:
255#include "clang/AST/TypeNodes.inc"
256 llvm_unreachable("non-canonical or dependent type in IR-generation");
257
258 case Type::Auto:
259 case Type::DeducedTemplateSpecialization:
260 llvm_unreachable("undeduced type in IR-generation");
261
262 // Various scalar types.
263 case Type::Builtin:
264 case Type::Pointer:
265 case Type::BlockPointer:
266 case Type::LValueReference:
267 case Type::RValueReference:
268 case Type::MemberPointer:
269 case Type::Vector:
270 case Type::ExtVector:
271 case Type::ConstantMatrix:
272 case Type::FunctionProto:
273 case Type::FunctionNoProto:
274 case Type::Enum:
275 case Type::ObjCObjectPointer:
276 case Type::Pipe:
277 case Type::BitInt:
278 case Type::HLSLAttributedResource:
279 case Type::HLSLInlineSpirv:
280 case Type::OverflowBehavior:
281 return TEK_Scalar;
282
283 // Complexes.
284 case Type::Complex:
285 return TEK_Complex;
286
287 // Arrays, records, and Objective-C objects.
288 case Type::ConstantArray:
289 case Type::IncompleteArray:
290 case Type::VariableArray:
291 case Type::Record:
292 case Type::ObjCObject:
293 case Type::ObjCInterface:
294 case Type::ArrayParameter:
295 return TEK_Aggregate;
296
297 // We operate on atomic values according to their underlying type.
298 case Type::Atomic:
299 type = cast<AtomicType>(type)->getValueType();
300 continue;
301 }
302 llvm_unreachable("unknown type kind!");
303 }
304}
305
307 // For cleanliness, we try to avoid emitting the return block for
308 // simple cases.
309 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
310
311 if (CurBB) {
312 assert(!CurBB->getTerminator() && "Unexpected terminated block.");
313
314 // We have a valid insert point, reuse it if it is empty or there are no
315 // explicit jumps to the return block.
316 if (CurBB->empty() || ReturnBlock.getBlock()->use_empty()) {
317 ReturnBlock.getBlock()->replaceAllUsesWith(CurBB);
318 delete ReturnBlock.getBlock();
320 } else
321 EmitBlock(ReturnBlock.getBlock());
322 return llvm::DebugLoc();
323 }
324
325 // Otherwise, if the return block is the target of a single direct
326 // branch then we can just put the code in that block instead. This
327 // cleans up functions which started with a unified return block.
328 if (ReturnBlock.getBlock()->hasOneUse()) {
329 auto *BI =
330 dyn_cast<llvm::UncondBrInst>(*ReturnBlock.getBlock()->user_begin());
331 if (BI && BI->getSuccessor(0) == ReturnBlock.getBlock()) {
332 // Record/return the DebugLoc of the simple 'return' expression to be used
333 // later by the actual 'ret' instruction.
334 llvm::DebugLoc Loc = BI->getDebugLoc();
335 Builder.SetInsertPoint(BI->getParent());
336 BI->eraseFromParent();
337 delete ReturnBlock.getBlock();
339 return Loc;
340 }
341 }
342
343 // FIXME: We are at an unreachable point, there is no reason to emit the block
344 // unless it has uses. However, we still need a place to put the debug
345 // region.end for now.
346
347 EmitBlock(ReturnBlock.getBlock());
348 return llvm::DebugLoc();
349}
350
351static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) {
352 if (!BB) return;
353 if (!BB->use_empty()) {
354 CGF.CurFn->insert(CGF.CurFn->end(), BB);
355 return;
356 }
357 delete BB;
358}
359
361 assert(BreakContinueStack.empty() &&
362 "mismatched push/pop in break/continue stack!");
363 assert(LifetimeExtendedCleanupStack.empty() &&
364 "mismatched push/pop of cleanups in EHStack!");
365 assert(DeferredDeactivationCleanupStack.empty() &&
366 "mismatched activate/deactivate of cleanups!");
367
368 if (CGM.shouldEmitConvergenceTokens()) {
369 ConvergenceTokenStack.pop_back();
370 assert(ConvergenceTokenStack.empty() &&
371 "mismatched push/pop in convergence stack!");
372 }
373
374 bool OnlySimpleReturnStmts = NumSimpleReturnExprs > 0
375 && NumSimpleReturnExprs == NumReturnExprs
376 && ReturnBlock.getBlock()->use_empty();
377 // Usually the return expression is evaluated before the cleanup
378 // code. If the function contains only a simple return statement,
379 // such as a constant, the location before the cleanup code becomes
380 // the last useful breakpoint in the function, because the simple
381 // return expression will be evaluated after the cleanup code. To be
382 // safe, set the debug location for cleanup code to the location of
383 // the return statement. Otherwise the cleanup code should be at the
384 // end of the function's lexical scope.
385 //
386 // If there are multiple branches to the return block, the branch
387 // instructions will get the location of the return statements and
388 // all will be fine.
389 if (CGDebugInfo *DI = getDebugInfo()) {
390 if (OnlySimpleReturnStmts)
391 DI->EmitLocation(Builder, LastStopPoint);
392 else
393 DI->EmitLocation(Builder, EndLoc);
394 }
395
396 // Pop any cleanups that might have been associated with the
397 // parameters. Do this in whatever block we're currently in; it's
398 // important to do this before we enter the return block or return
399 // edges will be *really* confused.
400 bool HasCleanups = EHStack.stable_begin() != PrologueCleanupDepth;
401 bool HasOnlyNoopCleanups =
402 HasCleanups && EHStack.containsOnlyNoopCleanups(PrologueCleanupDepth);
403 bool EmitRetDbgLoc = !HasCleanups || HasOnlyNoopCleanups;
404
405 std::optional<ApplyDebugLocation> OAL;
406 if (HasCleanups) {
407 // Make sure the line table doesn't jump back into the body for
408 // the ret after it's been at EndLoc.
409 if (CGDebugInfo *DI = getDebugInfo()) {
410 if (OnlySimpleReturnStmts)
411 DI->EmitLocation(Builder, EndLoc);
412 else
413 // We may not have a valid end location. Try to apply it anyway, and
414 // fall back to an artificial location if needed.
416 }
417
419 }
420
421 // Emit function epilog (to return).
422 llvm::DebugLoc Loc = EmitReturnBlock();
423
425 if (CGM.getCodeGenOpts().InstrumentFunctions)
426 CurFn->addFnAttr("instrument-function-exit", "__cyg_profile_func_exit");
427 if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining)
428 CurFn->addFnAttr("instrument-function-exit-inlined",
429 "__cyg_profile_func_exit");
430 }
431
432 // Emit debug descriptor for function end.
433 if (CGDebugInfo *DI = getDebugInfo())
434 DI->EmitFunctionEnd(Builder, CurFn);
435
436 // Reset the debug location to that of the simple 'return' expression, if any
437 // rather than that of the end of the function's scope '}'.
438 uint64_t RetKeyInstructionsAtomGroup = Loc ? Loc->getAtomGroup() : 0;
439 ApplyDebugLocation AL(*this, Loc);
440 EmitFunctionEpilog(*CurFnInfo, EmitRetDbgLoc, EndLoc,
441 RetKeyInstructionsAtomGroup);
443
444 assert(EHStack.empty() &&
445 "did not remove all scopes from cleanup stack!");
446
447 // If someone did an indirect goto, emit the indirect goto block at the end of
448 // the function.
449 if (IndirectBranch) {
450 EmitBlock(IndirectBranch->getParent());
451 Builder.ClearInsertionPoint();
452 }
453
454 // If some of our locals escaped, insert a call to llvm.localescape in the
455 // entry block.
456 if (!EscapedLocals.empty()) {
457 // Invert the map from local to index into a simple vector. There should be
458 // no holes.
460 EscapeArgs.resize(EscapedLocals.size());
461 for (auto &Pair : EscapedLocals)
462 EscapeArgs[Pair.second] = Pair.first;
463 llvm::Function *FrameEscapeFn = llvm::Intrinsic::getOrInsertDeclaration(
464 &CGM.getModule(), llvm::Intrinsic::localescape);
465 CGBuilderTy(CGM, AllocaInsertPt).CreateCall(FrameEscapeFn, EscapeArgs);
466 }
467
468 // Remove the AllocaInsertPt instruction, which is just a convenience for us.
469 llvm::Instruction *Ptr = AllocaInsertPt;
470 AllocaInsertPt = nullptr;
471 Ptr->eraseFromParent();
472
473 // PostAllocaInsertPt, if created, was lazily created when it was required,
474 // remove it now since it was just created for our own convenience.
475 if (PostAllocaInsertPt) {
476 llvm::Instruction *PostPtr = PostAllocaInsertPt;
477 PostAllocaInsertPt = nullptr;
478 PostPtr->eraseFromParent();
479 }
480
481 // If someone took the address of a label but never did an indirect goto, we
482 // made a zero entry PHI node, which is illegal, zap it now.
483 if (IndirectBranch) {
484 llvm::PHINode *PN = cast<llvm::PHINode>(IndirectBranch->getAddress());
485 if (PN->getNumIncomingValues() == 0) {
486 PN->replaceAllUsesWith(llvm::PoisonValue::get(PN->getType()));
487 PN->eraseFromParent();
488 }
489 }
490
492 EmitIfUsed(*this, TerminateLandingPad);
493 EmitIfUsed(*this, TerminateHandler);
494 EmitIfUsed(*this, UnreachableBlock);
495
496 for (const auto &FuncletAndParent : TerminateFunclets)
497 EmitIfUsed(*this, FuncletAndParent.second);
498
499 if (CGM.getCodeGenOpts().EmitDeclMetadata)
500 EmitDeclMetadata();
501
502 for (const auto &R : DeferredReplacements) {
503 if (llvm::Value *Old = R.first) {
504 Old->replaceAllUsesWith(R.second);
505 cast<llvm::Instruction>(Old)->eraseFromParent();
506 }
507 }
508 DeferredReplacements.clear();
509
510 // Eliminate CleanupDestSlot alloca by replacing it with SSA values and
511 // PHIs if the current function is a coroutine. We don't do it for all
512 // functions as it may result in slight increase in numbers of instructions
513 // if compiled with no optimizations. We do it for coroutine as the lifetime
514 // of CleanupDestSlot alloca make correct coroutine frame building very
515 // difficult.
516 if (NormalCleanupDest.isValid() && isCoroutine()) {
517 llvm::DominatorTree DT(*CurFn);
518 llvm::PromoteMemToReg(
519 cast<llvm::AllocaInst>(NormalCleanupDest.getPointer()), DT);
521 }
522
523 // Scan function arguments for vector width.
524 for (llvm::Argument &A : CurFn->args())
525 if (auto *VT = dyn_cast<llvm::VectorType>(A.getType()))
526 LargestVectorWidth =
527 std::max((uint64_t)LargestVectorWidth,
528 VT->getPrimitiveSizeInBits().getKnownMinValue());
529
530 // Update vector width based on return type.
531 if (auto *VT = dyn_cast<llvm::VectorType>(CurFn->getReturnType()))
532 LargestVectorWidth =
533 std::max((uint64_t)LargestVectorWidth,
534 VT->getPrimitiveSizeInBits().getKnownMinValue());
535
536 if (CurFnInfo->getMaxVectorWidth() > LargestVectorWidth)
537 LargestVectorWidth = CurFnInfo->getMaxVectorWidth();
538
539 // Add the min-legal-vector-width attribute. This contains the max width from:
540 // 1. min-vector-width attribute used in the source program.
541 // 2. Any builtins used that have a vector width specified.
542 // 3. Values passed in and out of inline assembly.
543 // 4. Width of vector arguments and return types for this function.
544 // 5. Width of vector arguments and return types for functions called by this
545 // function.
546 if (getContext().getTargetInfo().getTriple().isX86())
547 CurFn->addFnAttr("min-legal-vector-width",
548 llvm::utostr(LargestVectorWidth));
549
550 // If we generated an unreachable return block, delete it now.
551 if (ReturnBlock.isValid() && ReturnBlock.getBlock()->use_empty()) {
552 Builder.ClearInsertionPoint();
553 ReturnBlock.getBlock()->eraseFromParent();
554 }
555 if (ReturnValue.isValid()) {
556 auto *RetAlloca =
557 dyn_cast<llvm::AllocaInst>(ReturnValue.emitRawPointer(*this));
558 if (RetAlloca && RetAlloca->use_empty()) {
559 RetAlloca->eraseFromParent();
561 }
562 }
563}
564
565/// ShouldInstrumentFunction - Return true if the current function should be
566/// instrumented with __cyg_profile_func_* calls
568 if (!CGM.getCodeGenOpts().InstrumentFunctions &&
569 !CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining &&
570 !CGM.getCodeGenOpts().InstrumentFunctionEntryBare)
571 return false;
572 if (!CurFuncDecl || CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>())
573 return false;
574 return true;
575}
576
578 if (!CurFuncDecl)
579 return false;
580 return CurFuncDecl->hasAttr<DisableSanitizerInstrumentationAttr>();
581}
582
583/// ShouldXRayInstrument - Return true if the current function should be
584/// instrumented with XRay nop sleds.
586 return CGM.getCodeGenOpts().XRayInstrumentFunctions;
587}
588
589/// AlwaysEmitXRayCustomEvents - Return true if we should emit IR for calls to
590/// the __xray_customevent(...) builtin calls, when doing XRay instrumentation.
592 return CGM.getCodeGenOpts().XRayInstrumentFunctions &&
593 (CGM.getCodeGenOpts().XRayAlwaysEmitCustomEvents ||
594 CGM.getCodeGenOpts().XRayInstrumentationBundle.Mask ==
596}
597
599 return CGM.getCodeGenOpts().XRayInstrumentFunctions &&
600 (CGM.getCodeGenOpts().XRayAlwaysEmitTypedEvents ||
601 CGM.getCodeGenOpts().XRayInstrumentationBundle.Mask ==
603}
604
605llvm::ConstantInt *
607 // Remove any (C++17) exception specifications, to allow calling e.g. a
608 // noexcept function through a non-noexcept pointer.
609 if (!Ty->isFunctionNoProtoType())
611 std::string Mangled;
612 llvm::raw_string_ostream Out(Mangled);
613 CGM.getCXXABI().getMangleContext().mangleCanonicalTypeName(Ty, Out, false);
614 return llvm::ConstantInt::get(
615 CGM.Int32Ty, static_cast<uint32_t>(llvm::xxh3_64bits(Mangled)));
616}
617
618void CodeGenFunction::EmitKernelMetadata(const FunctionDecl *FD,
619 llvm::Function *Fn) {
620 if (!FD->hasAttr<DeviceKernelAttr>() && !FD->hasAttr<CUDAGlobalAttr>())
621 return;
622
623 llvm::LLVMContext &Context = getLLVMContext();
624
625 CGM.GenKernelArgMetadata(Fn, FD, this);
626
627 if (!(getLangOpts().OpenCL ||
628 (getLangOpts().CUDA &&
629 getContext().getTargetInfo().getTriple().isSPIRV())))
630 return;
631
632 if (const VecTypeHintAttr *A = FD->getAttr<VecTypeHintAttr>()) {
633 QualType HintQTy = A->getTypeHint();
634 const ExtVectorType *HintEltQTy = HintQTy->getAs<ExtVectorType>();
635 bool IsSignedInteger =
636 HintQTy->isSignedIntegerType() ||
637 (HintEltQTy && HintEltQTy->getElementType()->isSignedIntegerType());
638 llvm::Metadata *AttrMDArgs[] = {
639 llvm::ConstantAsMetadata::get(llvm::PoisonValue::get(
640 CGM.getTypes().ConvertType(A->getTypeHint()))),
641 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
642 llvm::IntegerType::get(Context, 32),
643 llvm::APInt(32, (uint64_t)(IsSignedInteger ? 1 : 0))))};
644 Fn->setMetadata("vec_type_hint", llvm::MDNode::get(Context, AttrMDArgs));
645 }
646
647 if (const WorkGroupSizeHintAttr *A = FD->getAttr<WorkGroupSizeHintAttr>()) {
648 auto Eval = [&](Expr *E) {
649 return E->EvaluateKnownConstInt(FD->getASTContext()).getExtValue();
650 };
651 llvm::Metadata *AttrMDArgs[] = {
652 llvm::ConstantAsMetadata::get(Builder.getInt32(Eval(A->getXDim()))),
653 llvm::ConstantAsMetadata::get(Builder.getInt32(Eval(A->getYDim()))),
654 llvm::ConstantAsMetadata::get(Builder.getInt32(Eval(A->getZDim())))};
655 Fn->setMetadata("work_group_size_hint", llvm::MDNode::get(Context, AttrMDArgs));
656 }
657
658 if (const ReqdWorkGroupSizeAttr *A = FD->getAttr<ReqdWorkGroupSizeAttr>()) {
659 auto Eval = [&](Expr *E) {
660 return E->EvaluateKnownConstInt(FD->getASTContext()).getExtValue();
661 };
662 llvm::Metadata *AttrMDArgs[] = {
663 llvm::ConstantAsMetadata::get(Builder.getInt32(Eval(A->getXDim()))),
664 llvm::ConstantAsMetadata::get(Builder.getInt32(Eval(A->getYDim()))),
665 llvm::ConstantAsMetadata::get(Builder.getInt32(Eval(A->getZDim())))};
666 Fn->setMetadata("reqd_work_group_size", llvm::MDNode::get(Context, AttrMDArgs));
667 }
668
669 if (const OpenCLIntelReqdSubGroupSizeAttr *A =
670 FD->getAttr<OpenCLIntelReqdSubGroupSizeAttr>()) {
671 llvm::Metadata *AttrMDArgs[] = {
672 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getSubGroupSize()))};
673 Fn->setMetadata("intel_reqd_sub_group_size",
674 llvm::MDNode::get(Context, AttrMDArgs));
675 }
676}
677
678/// Determine whether the function F ends with a return stmt.
679static bool endsWithReturn(const Decl* F) {
680 const Stmt *Body = nullptr;
681 if (auto *FD = dyn_cast_or_null<FunctionDecl>(F))
682 Body = FD->getBody();
683 else if (auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(F))
684 Body = OMD->getBody();
685
686 if (auto *CS = dyn_cast_or_null<CompoundStmt>(Body)) {
687 auto LastStmt = CS->body_rbegin();
688 if (LastStmt != CS->body_rend())
689 return isa<ReturnStmt>(*LastStmt);
690 }
691 return false;
692}
693
695 if (SanOpts.has(SanitizerKind::Thread)) {
696 Fn->addFnAttr("sanitize_thread_no_checking_at_run_time");
697 Fn->removeFnAttr(llvm::Attribute::SanitizeThread);
698 }
699}
700
701/// Check if the return value of this function requires sanitization.
702bool CodeGenFunction::requiresReturnValueCheck() const {
703 return requiresReturnValueNullabilityCheck() ||
704 (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute) && CurCodeDecl &&
705 CurCodeDecl->getAttr<ReturnsNonNullAttr>());
706}
707
708static bool matchesStlAllocatorFn(const Decl *D, const ASTContext &Ctx) {
709 auto *MD = dyn_cast_or_null<CXXMethodDecl>(D);
710 if (!MD || !MD->getDeclName().getAsIdentifierInfo() ||
711 !MD->getDeclName().getAsIdentifierInfo()->isStr("allocate") ||
712 (MD->getNumParams() != 1 && MD->getNumParams() != 2))
713 return false;
714
715 if (!Ctx.hasSameType(MD->parameters()[0]->getType(), Ctx.getSizeType()))
716 return false;
717
718 if (MD->getNumParams() == 2) {
719 auto *PT = MD->parameters()[1]->getType()->getAs<PointerType>();
720 if (!PT || !PT->isVoidPointerType() ||
721 !PT->getPointeeType().isConstQualified())
722 return false;
723 }
724
725 return true;
726}
727
728bool CodeGenFunction::isInAllocaArgument(CGCXXABI &ABI, QualType Ty) {
729 const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
730 return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory;
731}
732
733bool CodeGenFunction::hasInAllocaArg(const CXXMethodDecl *MD) {
734 return getTarget().getTriple().getArch() == llvm::Triple::x86 &&
736 llvm::any_of(MD->parameters(), [&](ParmVarDecl *P) {
737 return isInAllocaArgument(CGM.getCXXABI(), P->getType());
738 });
739}
740
741/// Return the UBSan prologue signature for \p FD if one is available.
742static llvm::Constant *getPrologueSignature(CodeGenModule &CGM,
743 const FunctionDecl *FD) {
744 if (const auto *MD = dyn_cast<CXXMethodDecl>(FD))
745 if (!MD->isStatic())
746 return nullptr;
748}
749
751 llvm::Function *Fn,
752 const CGFunctionInfo &FnInfo,
753 const FunctionArgList &Args,
754 SourceLocation Loc,
755 SourceLocation StartLoc) {
756 assert(!CurFn &&
757 "Do not use a CodeGenFunction object for more than one function");
758
759 const Decl *D = GD.getDecl();
760
761 DidCallStackSave = false;
762 CurCodeDecl = D;
763 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
764 if (FD && FD->usesSEHTry())
765 CurSEHParent = GD;
766 CurFuncDecl = (D ? D->getNonClosureContext() : nullptr);
767 FnRetTy = RetTy;
768 CurFn = Fn;
769 CurFnInfo = &FnInfo;
770 assert(CurFn->isDeclaration() && "Function already has body?");
771
772 // If this function is ignored for any of the enabled sanitizers,
773 // disable the sanitizer for the function.
774 do {
775#define SANITIZER(NAME, ID) \
776 if (SanOpts.empty()) \
777 break; \
778 if (SanOpts.has(SanitizerKind::ID)) \
779 if (CGM.isInNoSanitizeList(SanitizerKind::ID, Fn, Loc)) \
780 SanOpts.set(SanitizerKind::ID, false);
781
782#include "clang/Basic/Sanitizers.def"
783#undef SANITIZER
784 } while (false);
785
786 if (D) {
787 const bool SanitizeBounds = SanOpts.hasOneOf(SanitizerKind::Bounds);
788 SanitizerMask no_sanitize_mask;
789 bool NoSanitizeCoverage = false;
790
791 for (auto *Attr : D->specific_attrs<NoSanitizeAttr>()) {
792 no_sanitize_mask |= Attr->getMask();
793 // SanitizeCoverage is not handled by SanOpts.
794 if (Attr->hasCoverage())
795 NoSanitizeCoverage = true;
796 }
797
798 // Apply the no_sanitize* attributes to SanOpts.
799 SanOpts.Mask &= ~no_sanitize_mask;
800 if (no_sanitize_mask & SanitizerKind::Address)
801 SanOpts.set(SanitizerKind::KernelAddress, false);
802 if (no_sanitize_mask & SanitizerKind::KernelAddress)
803 SanOpts.set(SanitizerKind::Address, false);
804 if (no_sanitize_mask & SanitizerKind::HWAddress)
805 SanOpts.set(SanitizerKind::KernelHWAddress, false);
806 if (no_sanitize_mask & SanitizerKind::KernelHWAddress)
807 SanOpts.set(SanitizerKind::HWAddress, false);
808
809 if (SanitizeBounds && !SanOpts.hasOneOf(SanitizerKind::Bounds))
810 Fn->addFnAttr(llvm::Attribute::NoSanitizeBounds);
811
812 if (NoSanitizeCoverage && CGM.getCodeGenOpts().hasSanitizeCoverage())
813 Fn->addFnAttr(llvm::Attribute::NoSanitizeCoverage);
814
815 // Some passes need the non-negated no_sanitize attribute. Pass them on.
816 if (CGM.getCodeGenOpts().hasSanitizeBinaryMetadata()) {
817 if (no_sanitize_mask & SanitizerKind::Thread)
818 Fn->addFnAttr("no_sanitize_thread");
819 }
820 }
821
823 CurFn->addFnAttr(llvm::Attribute::DisableSanitizerInstrumentation);
824 } else {
825 // Apply sanitizer attributes to the function.
826 if (SanOpts.hasOneOf(SanitizerKind::Address | SanitizerKind::KernelAddress))
827 Fn->addFnAttr(llvm::Attribute::SanitizeAddress);
828 if (SanOpts.hasOneOf(SanitizerKind::HWAddress |
829 SanitizerKind::KernelHWAddress))
830 Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress);
831 if (SanOpts.has(SanitizerKind::MemtagStack))
832 Fn->addFnAttr(llvm::Attribute::SanitizeMemTag);
833 if (SanOpts.has(SanitizerKind::Thread))
834 Fn->addFnAttr(llvm::Attribute::SanitizeThread);
835 if (SanOpts.has(SanitizerKind::Type))
836 Fn->addFnAttr(llvm::Attribute::SanitizeType);
837 if (SanOpts.has(SanitizerKind::NumericalStability))
838 Fn->addFnAttr(llvm::Attribute::SanitizeNumericalStability);
839 if (SanOpts.hasOneOf(SanitizerKind::Memory | SanitizerKind::KernelMemory))
840 Fn->addFnAttr(llvm::Attribute::SanitizeMemory);
841 if (SanOpts.has(SanitizerKind::AllocToken))
842 Fn->addFnAttr(llvm::Attribute::SanitizeAllocToken);
843 }
844 if (SanOpts.has(SanitizerKind::SafeStack))
845 Fn->addFnAttr(llvm::Attribute::SafeStack);
846 if (SanOpts.has(SanitizerKind::ShadowCallStack))
847 Fn->addFnAttr(llvm::Attribute::ShadowCallStack);
848
849 if (SanOpts.has(SanitizerKind::Realtime))
850 if (FD && FD->getASTContext().hasAnyFunctionEffects())
851 for (const FunctionEffectWithCondition &Fe : FD->getFunctionEffects()) {
852 if (Fe.Effect.kind() == FunctionEffect::Kind::NonBlocking)
853 Fn->addFnAttr(llvm::Attribute::SanitizeRealtime);
854 else if (Fe.Effect.kind() == FunctionEffect::Kind::Blocking)
855 Fn->addFnAttr(llvm::Attribute::SanitizeRealtimeBlocking);
856 }
857
858 // Apply fuzzing attribute to the function.
859 if (SanOpts.hasOneOf(SanitizerKind::Fuzzer | SanitizerKind::FuzzerNoLink))
860 Fn->addFnAttr(llvm::Attribute::OptForFuzzing);
861
862 // Ignore TSan memory acesses from within ObjC/ObjC++ dealloc, initialize,
863 // .cxx_destruct, __destroy_helper_block_ and all of their calees at run time.
864 if (SanOpts.has(SanitizerKind::Thread)) {
865 if (const auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(D)) {
866 const IdentifierInfo *II = OMD->getSelector().getIdentifierInfoForSlot(0);
867 if (OMD->getMethodFamily() == OMF_dealloc ||
868 OMD->getMethodFamily() == OMF_initialize ||
869 (OMD->getSelector().isUnarySelector() && II->isStr(".cxx_destruct"))) {
871 }
872 }
873 }
874
875 // Ignore unrelated casts in STL allocate() since the allocator must cast
876 // from void* to T* before object initialization completes. Don't match on the
877 // namespace because not all allocators are in std::
878 if (D && SanOpts.has(SanitizerKind::CFIUnrelatedCast)) {
880 SanOpts.Mask &= ~SanitizerKind::CFIUnrelatedCast;
881 }
882
883 // Ignore null checks in coroutine functions since the coroutines passes
884 // are not aware of how to move the extra UBSan instructions across the split
885 // coroutine boundaries.
886 if (D && SanOpts.has(SanitizerKind::Null))
887 if (FD && FD->getBody() &&
888 FD->getBody()->getStmtClass() == Stmt::CoroutineBodyStmtClass)
889 SanOpts.Mask &= ~SanitizerKind::Null;
890
891 // Apply xray attributes to the function (as a string, for now)
892 bool AlwaysXRayAttr = false;
893 if (const auto *XRayAttr = D ? D->getAttr<XRayInstrumentAttr>() : nullptr) {
894 if (CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
896 CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
898 if (XRayAttr->alwaysXRayInstrument() && ShouldXRayInstrumentFunction()) {
899 Fn->addFnAttr("function-instrument", "xray-always");
900 AlwaysXRayAttr = true;
901 }
902 if (XRayAttr->neverXRayInstrument())
903 Fn->addFnAttr("function-instrument", "xray-never");
904 if (const auto *LogArgs = D->getAttr<XRayLogArgsAttr>())
906 Fn->addFnAttr("xray-log-args",
907 llvm::utostr(LogArgs->getArgumentCount()));
908 }
909 } else {
910 if (ShouldXRayInstrumentFunction() && !CGM.imbueXRayAttrs(Fn, Loc))
911 Fn->addFnAttr(
912 "xray-instruction-threshold",
913 llvm::itostr(CGM.getCodeGenOpts().XRayInstructionThreshold));
914 }
915
917 if (CGM.getCodeGenOpts().XRayIgnoreLoops)
918 Fn->addFnAttr("xray-ignore-loops");
919
920 if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
922 Fn->addFnAttr("xray-skip-exit");
923
924 if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
926 Fn->addFnAttr("xray-skip-entry");
927
928 auto FuncGroups = CGM.getCodeGenOpts().XRayTotalFunctionGroups;
929 if (FuncGroups > 1) {
930 auto FuncName = llvm::ArrayRef<uint8_t>(CurFn->getName().bytes_begin(),
931 CurFn->getName().bytes_end());
932 auto Group = crc32(FuncName) % FuncGroups;
933 if (Group != CGM.getCodeGenOpts().XRaySelectedFunctionGroup &&
934 !AlwaysXRayAttr)
935 Fn->addFnAttr("function-instrument", "xray-never");
936 }
937 }
938
939 if (CGM.getCodeGenOpts().getProfileInstr() !=
940 llvm::driver::ProfileInstrKind::ProfileNone) {
941 switch (CGM.isFunctionBlockedFromProfileInstr(Fn, Loc)) {
943 Fn->addFnAttr(llvm::Attribute::SkipProfile);
944 break;
946 Fn->addFnAttr(llvm::Attribute::NoProfile);
947 break;
949 break;
950 }
951 }
952
953 unsigned Count, Offset;
954 StringRef Section;
955 if (const auto *Attr =
956 D ? D->getAttr<PatchableFunctionEntryAttr>() : nullptr) {
957 Count = Attr->getCount();
958 Offset = Attr->getOffset();
959 Section = Attr->getSection();
960 } else {
961 Count = CGM.getCodeGenOpts().PatchableFunctionEntryCount;
962 Offset = CGM.getCodeGenOpts().PatchableFunctionEntryOffset;
963 }
964 if (Section.empty())
965 Section = CGM.getCodeGenOpts().PatchableFunctionEntrySection;
966 if (Count && Offset <= Count) {
967 Fn->addFnAttr("patchable-function-entry", std::to_string(Count - Offset));
968 if (Offset)
969 Fn->addFnAttr("patchable-function-prefix", std::to_string(Offset));
970 if (!Section.empty())
971 Fn->addFnAttr("patchable-function-entry-section", Section);
972 }
973 // Instruct that functions for COFF/CodeView targets should start with a
974 // patchable instruction, but only on x86/x64. Don't forward this to ARM/ARM64
975 // backends as they don't need it -- instructions on these architectures are
976 // always atomically patchable at runtime.
977 if (CGM.getCodeGenOpts().HotPatch &&
978 getContext().getTargetInfo().getTriple().isX86() &&
979 getContext().getTargetInfo().getTriple().getEnvironment() !=
980 llvm::Triple::CODE16)
981 Fn->addFnAttr("patchable-function", "prologue-short-redirect");
982
983 // Add no-jump-tables value.
984 if (CGM.getCodeGenOpts().NoUseJumpTables)
985 Fn->addFnAttr("no-jump-tables", "true");
986
987 // Add no-inline-line-tables value.
988 if (CGM.getCodeGenOpts().NoInlineLineTables)
989 Fn->addFnAttr("no-inline-line-tables");
990
991 // Add profile-sample-accurate value.
992 if (CGM.getCodeGenOpts().ProfileSampleAccurate)
993 Fn->addFnAttr("profile-sample-accurate");
994
995 if (!CGM.getCodeGenOpts().SampleProfileFile.empty())
996 Fn->addFnAttr("use-sample-profile");
997
998 if (D && D->hasAttr<CFICanonicalJumpTableAttr>())
999 Fn->addFnAttr("cfi-canonical-jump-table");
1000
1001 if (D && D->hasAttr<NoProfileFunctionAttr>())
1002 Fn->addFnAttr(llvm::Attribute::NoProfile);
1003
1004 if (D && D->hasAttr<HybridPatchableAttr>())
1005 Fn->addFnAttr(llvm::Attribute::HybridPatchable);
1006
1007 if (D) {
1008 // Function attributes take precedence over command line flags.
1009 if (auto *A = D->getAttr<FunctionReturnThunksAttr>()) {
1010 switch (A->getThunkType()) {
1011 case FunctionReturnThunksAttr::Kind::Keep:
1012 break;
1013 case FunctionReturnThunksAttr::Kind::Extern:
1014 Fn->addFnAttr(llvm::Attribute::FnRetThunkExtern);
1015 break;
1016 }
1017 } else if (CGM.getCodeGenOpts().FunctionReturnThunks)
1018 Fn->addFnAttr(llvm::Attribute::FnRetThunkExtern);
1019 }
1020
1021 if (FD && (getLangOpts().OpenCL ||
1022 (getLangOpts().CUDA &&
1023 getContext().getTargetInfo().getTriple().isSPIRV()) ||
1024 ((getLangOpts().HIP || getLangOpts().OffloadViaLLVM) &&
1025 getLangOpts().CUDAIsDevice))) {
1026 // Add metadata for a kernel function.
1027 EmitKernelMetadata(FD, Fn);
1028 }
1029
1030 if (FD && FD->hasAttr<ClspvLibclcBuiltinAttr>()) {
1031 Fn->setMetadata("clspv_libclc_builtin",
1032 llvm::MDNode::get(getLLVMContext(), {}));
1033 }
1034
1035 // If we are checking function types, emit a function type signature as
1036 // prologue data.
1037 if (FD && SanOpts.has(SanitizerKind::Function) &&
1039 if (llvm::Constant *PrologueSig = getPrologueSignature(CGM, FD)) {
1040 llvm::LLVMContext &Ctx = Fn->getContext();
1041 llvm::MDBuilder MDB(Ctx);
1042 Fn->setMetadata(
1043 llvm::LLVMContext::MD_func_sanitize,
1044 MDB.createRTTIPointerPrologue(
1045 PrologueSig, getUBSanFunctionTypeHash(FD->getType())));
1046 }
1047 }
1048
1049 // If we're checking nullability, we need to know whether we can check the
1050 // return value. Initialize the flag to 'true' and refine it in EmitParmDecl.
1051 if (SanOpts.has(SanitizerKind::NullabilityReturn)) {
1052 auto Nullability = FnRetTy->getNullability();
1053 if (Nullability && *Nullability == NullabilityKind::NonNull &&
1054 !FnRetTy->isRecordType()) {
1055 if (!(SanOpts.has(SanitizerKind::ReturnsNonnullAttribute) &&
1056 CurCodeDecl && CurCodeDecl->getAttr<ReturnsNonNullAttr>()))
1057 RetValNullabilityPrecondition =
1058 llvm::ConstantInt::getTrue(getLLVMContext());
1059 }
1060 }
1061
1062 // If we're in C++ mode and the function name is "main", it is guaranteed
1063 // to be norecurse by the standard (3.6.1.3 "The function main shall not be
1064 // used within a program").
1065 //
1066 // OpenCL C 2.0 v2.2-11 s6.9.i:
1067 // Recursion is not supported.
1068 //
1069 // HLSL
1070 // Recursion is not supported.
1071 //
1072 // SYCL v1.2.1 s3.10:
1073 // kernels cannot include RTTI information, exception classes,
1074 // recursive code, virtual functions or make use of C++ libraries that
1075 // are not compiled for the device.
1076 if (FD &&
1077 ((getLangOpts().CPlusPlus && FD->isMain()) || getLangOpts().OpenCL ||
1078 getLangOpts().HLSL || getLangOpts().SYCLIsDevice ||
1079 (getLangOpts().CUDA && FD->hasAttr<CUDAGlobalAttr>())))
1080 Fn->addFnAttr(llvm::Attribute::NoRecurse);
1081
1082 llvm::RoundingMode RM = getLangOpts().getDefaultRoundingMode();
1083 llvm::fp::ExceptionBehavior FPExceptionBehavior =
1084 ToConstrainedExceptMD(getLangOpts().getDefaultExceptionMode());
1085 Builder.setDefaultConstrainedRounding(RM);
1086 Builder.setDefaultConstrainedExcept(FPExceptionBehavior);
1087 if ((FD && (FD->UsesFPIntrin() || FD->hasAttr<StrictFPAttr>())) ||
1088 (!FD && (FPExceptionBehavior != llvm::fp::ebIgnore ||
1089 RM != llvm::RoundingMode::NearestTiesToEven))) {
1090 Builder.setIsFPConstrained(true);
1091 Fn->addFnAttr(llvm::Attribute::StrictFP);
1092 }
1093
1094 // If a custom alignment is used, force realigning to this alignment on
1095 // any main function which certainly will need it.
1096 if (FD && ((FD->isMain() || FD->isMSVCRTEntryPoint()) &&
1097 CGM.getCodeGenOpts().StackAlignment))
1098 Fn->addFnAttr("stackrealign");
1099
1100 // "main" doesn't need to zero out call-used registers.
1101 if (FD && FD->isMain())
1102 Fn->removeFnAttr("zero-call-used-regs");
1103
1104 // Add vscale_range attribute if appropriate.
1105 llvm::StringMap<bool> FeatureMap;
1106 auto IsArmStreaming = TargetInfo::ArmStreamingKind::NotStreaming;
1107 if (FD) {
1108 getContext().getFunctionFeatureMap(FeatureMap, FD);
1109 if (const auto *T = FD->getType()->getAs<FunctionProtoType>())
1110 if (T->getAArch64SMEAttributes() &
1113
1114 if (IsArmStreamingFunction(FD, true))
1116 }
1117 std::optional<std::pair<unsigned, unsigned>> VScaleRange =
1118 getContext().getTargetInfo().getVScaleRange(getLangOpts(), IsArmStreaming,
1119 &FeatureMap);
1120 if (VScaleRange) {
1121 CurFn->addFnAttr(llvm::Attribute::getWithVScaleRangeArgs(
1122 getLLVMContext(), VScaleRange->first, VScaleRange->second));
1123 }
1124
1125 llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn);
1126
1127 // Create a marker to make it easy to insert allocas into the entryblock
1128 // later. Don't create this with the builder, because we don't want it
1129 // folded.
1130 llvm::Value *Poison = llvm::PoisonValue::get(Int32Ty);
1131 AllocaInsertPt = new llvm::BitCastInst(Poison, Int32Ty, "allocapt", EntryBB);
1132
1134
1135 Builder.SetInsertPoint(EntryBB);
1136
1137 // If we're checking the return value, allocate space for a pointer to a
1138 // precise source location of the checked return statement.
1139 if (requiresReturnValueCheck()) {
1140 ReturnLocation = CreateDefaultAlignTempAlloca(Int8PtrTy, "return.sloc.ptr");
1141 Builder.CreateStore(llvm::ConstantPointerNull::get(Int8PtrTy),
1142 ReturnLocation);
1143 }
1144
1145 // Emit subprogram debug descriptor.
1146 if (CGDebugInfo *DI = getDebugInfo()) {
1147 // Reconstruct the type from the argument list so that implicit parameters,
1148 // such as 'this' and 'vtt', show up in the debug info. Preserve the calling
1149 // convention.
1150 DI->emitFunctionStart(GD, Loc, StartLoc,
1151 DI->getFunctionType(FD, RetTy, Args), CurFn,
1153 }
1154
1156 if (CGM.getCodeGenOpts().InstrumentFunctions)
1157 CurFn->addFnAttr("instrument-function-entry", "__cyg_profile_func_enter");
1158 if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining)
1159 CurFn->addFnAttr("instrument-function-entry-inlined",
1160 "__cyg_profile_func_enter");
1161 if (CGM.getCodeGenOpts().InstrumentFunctionEntryBare)
1162 CurFn->addFnAttr("instrument-function-entry-inlined",
1163 "__cyg_profile_func_enter_bare");
1164 }
1165
1166 // Since emitting the mcount call here impacts optimizations such as function
1167 // inlining, we just add an attribute to insert a mcount call in backend.
1168 // The attribute "counting-function" is set to mcount function name which is
1169 // architecture dependent.
1170 if (CGM.getCodeGenOpts().InstrumentForProfiling) {
1171 // Calls to fentry/mcount should not be generated if function has
1172 // the no_instrument_function attribute.
1173 if (!CurFuncDecl || !CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>()) {
1174 if (CGM.getCodeGenOpts().CallFEntry)
1175 Fn->addFnAttr("fentry-call", "true");
1176 else {
1177 Fn->addFnAttr("instrument-function-entry-inlined",
1178 getTarget().getMCountName());
1179 }
1180 if (CGM.getCodeGenOpts().MNopMCount) {
1181 if (!CGM.getCodeGenOpts().CallFEntry)
1182 CGM.getDiags().Report(diag::err_opt_not_valid_without_opt)
1183 << "-mnop-mcount" << "-mfentry";
1184 Fn->addFnAttr("mnop-mcount");
1185 }
1186
1187 if (CGM.getCodeGenOpts().RecordMCount) {
1188 if (!CGM.getCodeGenOpts().CallFEntry)
1189 CGM.getDiags().Report(diag::err_opt_not_valid_without_opt)
1190 << "-mrecord-mcount" << "-mfentry";
1191 Fn->addFnAttr("mrecord-mcount");
1192 }
1193 }
1194 }
1195
1196 if (CGM.getCodeGenOpts().PackedStack) {
1197 if (getContext().getTargetInfo().getTriple().getArch() !=
1198 llvm::Triple::systemz)
1199 CGM.getDiags().Report(diag::err_opt_not_valid_on_target)
1200 << "-mpacked-stack";
1201 Fn->addFnAttr("packed-stack");
1202 }
1203
1204 if (CGM.getCodeGenOpts().WarnStackSize != UINT_MAX &&
1205 !CGM.getDiags().isIgnored(diag::warn_fe_backend_frame_larger_than, Loc))
1206 Fn->addFnAttr("warn-stack-size",
1207 std::to_string(CGM.getCodeGenOpts().WarnStackSize));
1208
1209 if (RetTy->isVoidType()) {
1210 // Void type; nothing to return.
1212
1213 // Count the implicit return.
1214 if (!endsWithReturn(D))
1215 ++NumReturnExprs;
1216 } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect) {
1217 // Indirect return; emit returned value directly into sret slot.
1218 // This reduces code size, and affects correctness in C++.
1219 auto AI = CurFn->arg_begin();
1220 if (CurFnInfo->getReturnInfo().isSRetAfterThis())
1221 ++AI;
1223 &*AI, RetTy, CurFnInfo->getReturnInfo().getIndirectAlign(), false,
1224 nullptr, nullptr, KnownNonNull);
1225 if (!CurFnInfo->getReturnInfo().getIndirectByVal()) {
1227 CreateDefaultAlignTempAlloca(ReturnValue.getType(), "result.ptr");
1228 Builder.CreateStore(ReturnValue.emitRawPointer(*this),
1230 }
1231 } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::InAlloca &&
1232 !hasScalarEvaluationKind(CurFnInfo->getReturnType())) {
1233 // Load the sret pointer from the argument struct and return into that.
1234 unsigned Idx = CurFnInfo->getReturnInfo().getInAllocaFieldIndex();
1235 llvm::Function::arg_iterator EI = CurFn->arg_end();
1236 --EI;
1237 llvm::Value *Addr = Builder.CreateStructGEP(
1238 CurFnInfo->getArgStruct(), &*EI, Idx);
1239 llvm::Type *Ty =
1240 cast<llvm::GetElementPtrInst>(Addr)->getResultElementType();
1242 Addr = Builder.CreateAlignedLoad(Ty, Addr, getPointerAlign(), "agg.result");
1244 CGM.getNaturalTypeAlignment(RetTy), KnownNonNull);
1245 } else {
1246 ReturnValue = CreateIRTempWithoutCast(RetTy, "retval");
1247
1248 // Tell the epilog emitter to autorelease the result. We do this
1249 // now so that various specialized functions can suppress it
1250 // during their IR-generation.
1251 if (getLangOpts().ObjCAutoRefCount &&
1252 !CurFnInfo->isReturnsRetained() &&
1253 RetTy->isObjCRetainableType())
1254 AutoreleaseResult = true;
1255 }
1256
1258
1259 PrologueCleanupDepth = EHStack.stable_begin();
1260
1261 // Emit OpenMP specific initialization of the device functions.
1262 if (getLangOpts().OpenMP && CurCodeDecl)
1263 CGM.getOpenMPRuntime().emitFunctionProlog(*this, CurCodeDecl);
1264
1265 if (FD && getLangOpts().HLSL) {
1266 // Handle emitting HLSL entry functions.
1267 if (FD->hasAttr<HLSLShaderAttr>()) {
1268 CGM.getHLSLRuntime().emitEntryFunction(FD, Fn);
1269 }
1270 }
1271
1273
1274 if (const CXXMethodDecl *MD = dyn_cast_if_present<CXXMethodDecl>(D);
1275 MD && !MD->isStatic()) {
1276 bool IsInLambda =
1277 MD->getParent()->isLambda() && MD->getOverloadedOperator() == OO_Call;
1279 CGM.getCXXABI().EmitInstanceFunctionProlog(*this);
1280 if (IsInLambda) {
1281 // We're in a lambda; figure out the captures.
1285 // If the lambda captures the object referred to by '*this' - either by
1286 // value or by reference, make sure CXXThisValue points to the correct
1287 // object.
1288
1289 // Get the lvalue for the field (which is a copy of the enclosing object
1290 // or contains the address of the enclosing object).
1292 if (!LambdaThisCaptureField->getType()->isPointerType()) {
1293 // If the enclosing object was captured by value, just use its
1294 // address. Sign this pointer.
1295 CXXThisValue = ThisFieldLValue.getPointer(*this);
1296 } else {
1297 // Load the lvalue pointed to by the field, since '*this' was captured
1298 // by reference.
1299 CXXThisValue =
1300 EmitLoadOfLValue(ThisFieldLValue, SourceLocation()).getScalarVal();
1301 }
1302 }
1303 for (auto *FD : MD->getParent()->fields()) {
1304 if (FD->hasCapturedVLAType()) {
1305 auto *ExprArg = EmitLoadOfLValue(EmitLValueForLambdaField(FD),
1307 auto VAT = FD->getCapturedVLAType();
1308 VLASizeMap[VAT->getSizeExpr()] = ExprArg;
1309 }
1310 }
1311 } else if (MD->isImplicitObjectMemberFunction()) {
1312 // Not in a lambda; just use 'this' from the method.
1313 // FIXME: Should we generate a new load for each use of 'this'? The
1314 // fast register allocator would be happier...
1315 CXXThisValue = CXXABIThisValue;
1316 }
1317
1318 // Check the 'this' pointer once per function, if it's available.
1319 if (CXXABIThisValue) {
1320 SanitizerSet SkippedChecks;
1321 SkippedChecks.set(SanitizerKind::ObjectSize, true);
1322 QualType ThisTy = MD->getThisType();
1323
1324 // If this is the call operator of a lambda with no captures, it
1325 // may have a static invoker function, which may call this operator with
1326 // a null 'this' pointer.
1328 SkippedChecks.set(SanitizerKind::Null, true);
1329
1332 Loc, CXXABIThisValue, ThisTy, CXXABIThisAlignment, SkippedChecks);
1333 }
1334 }
1335
1336 // If any of the arguments have a variably modified type, make sure to
1337 // emit the type size, but only if the function is not naked. Naked functions
1338 // have no prolog to run this evaluation.
1339 if (!FD || !FD->hasAttr<NakedAttr>()) {
1340 for (const VarDecl *VD : Args) {
1341 // Dig out the type as written from ParmVarDecls; it's unclear whether
1342 // the standard (C99 6.9.1p10) requires this, but we're following the
1343 // precedent set by gcc.
1344 QualType Ty;
1345 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(VD))
1346 Ty = PVD->getOriginalType();
1347 else
1348 Ty = VD->getType();
1349
1350 if (Ty->isVariablyModifiedType())
1352 }
1353 }
1354 // Emit a location at the end of the prologue.
1355 if (CGDebugInfo *DI = getDebugInfo())
1356 DI->EmitLocation(Builder, StartLoc);
1357 // TODO: Do we need to handle this in two places like we do with
1358 // target-features/target-cpu?
1359 if (CurFuncDecl)
1360 if (const auto *VecWidth = CurFuncDecl->getAttr<MinVectorWidthAttr>())
1361 LargestVectorWidth = VecWidth->getVectorWidth();
1362
1363 if (CGM.shouldEmitConvergenceTokens())
1364 ConvergenceTokenStack.push_back(getOrEmitConvergenceEntryToken(CurFn));
1365}
1366
1370 if (const CompoundStmt *S = dyn_cast<CompoundStmt>(Body))
1372 else
1373 EmitStmt(Body);
1374}
1375
1376/// When instrumenting to collect profile data, the counts for some blocks
1377/// such as switch cases need to not include the fall-through counts, so
1378/// emit a branch around the instrumentation code. When not instrumenting,
1379/// this just calls EmitBlock().
1381 const Stmt *S) {
1382 llvm::BasicBlock *SkipCountBB = nullptr;
1383 if (HaveInsertPoint() && CGM.getCodeGenOpts().hasProfileClangInstr()) {
1384 // When instrumenting for profiling, the fallthrough to certain
1385 // statements needs to skip over the instrumentation code so that we
1386 // get an accurate count.
1387 SkipCountBB = createBasicBlock("skipcount");
1388 EmitBranch(SkipCountBB);
1389 }
1390 EmitBlock(BB);
1391 uint64_t CurrentCount = getCurrentProfileCount();
1394 if (SkipCountBB)
1395 EmitBlock(SkipCountBB);
1396}
1397
1398/// Tries to mark the given function nounwind based on the
1399/// non-existence of any throwing calls within it. We believe this is
1400/// lightweight enough to do at -O0.
1401static void TryMarkNoThrow(llvm::Function *F) {
1402 // LLVM treats 'nounwind' on a function as part of the type, so we
1403 // can't do this on functions that can be overwritten.
1404 if (F->isInterposable()) return;
1405
1406 for (llvm::BasicBlock &BB : *F)
1407 for (llvm::Instruction &I : BB)
1408 if (I.mayThrow())
1409 return;
1410
1411 F->setDoesNotThrow();
1412}
1413
1415 FunctionArgList &Args) {
1416 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
1417 QualType ResTy = FD->getReturnType();
1418
1419 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
1420 if (MD && MD->isImplicitObjectMemberFunction()) {
1421 if (CGM.getCXXABI().HasThisReturn(GD))
1422 ResTy = MD->getThisType();
1423 else if (CGM.getCXXABI().hasMostDerivedReturn(GD))
1424 ResTy = CGM.getContext().VoidPtrTy;
1425 CGM.getCXXABI().buildThisParam(*this, Args);
1426 }
1427
1428 // The base version of an inheriting constructor whose constructed base is a
1429 // virtual base is not passed any arguments (because it doesn't actually call
1430 // the inherited constructor).
1431 bool PassedParams = true;
1432 if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
1433 if (auto Inherited = CD->getInheritedConstructor())
1434 PassedParams =
1435 getTypes().inheritingCtorHasParams(Inherited, GD.getCtorType());
1436
1437 if (PassedParams) {
1438 for (auto *Param : FD->parameters()) {
1439 Args.push_back(Param);
1440 if (!Param->hasAttr<PassObjectSizeAttr>())
1441 continue;
1442
1444 getContext(), Param->getDeclContext(), Param->getLocation(),
1445 /*Id=*/nullptr, getContext().getSizeType(), ImplicitParamKind::Other);
1446 SizeArguments[Param] = Implicit;
1447 Args.push_back(Implicit);
1448 }
1449 }
1450
1451 if (MD && (isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD)))
1452 CGM.getCXXABI().addImplicitStructorParams(*this, ResTy, Args);
1453
1454 return ResTy;
1455}
1456
1457void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
1458 const CGFunctionInfo &FnInfo) {
1459 assert(Fn && "generating code for null Function");
1460 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
1461 CurGD = GD;
1462
1463 FunctionArgList Args;
1464 QualType ResTy = BuildFunctionArgList(GD, Args);
1465
1466 CGM.getTargetCodeGenInfo().checkFunctionABI(CGM, FD);
1467
1468 if (FD->isInlineBuiltinDeclaration()) {
1469 // When generating code for a builtin with an inline declaration, use a
1470 // mangled name to hold the actual body, while keeping an external
1471 // definition in case the function pointer is referenced somewhere.
1472 std::string FDInlineName = (Fn->getName() + ".inline").str();
1473 llvm::Module *M = Fn->getParent();
1474 llvm::Function *Clone = M->getFunction(FDInlineName);
1475 if (!Clone) {
1476 Clone = llvm::Function::Create(Fn->getFunctionType(),
1477 llvm::GlobalValue::InternalLinkage,
1478 Fn->getAddressSpace(), FDInlineName, M);
1479 Clone->addFnAttr(llvm::Attribute::AlwaysInline);
1480 }
1481 Fn->setLinkage(llvm::GlobalValue::ExternalLinkage);
1482 Fn = Clone;
1483 } else {
1484 // Detect the unusual situation where an inline version is shadowed by a
1485 // non-inline version. In that case we should pick the external one
1486 // everywhere. That's GCC behavior too. Unfortunately, I cannot find a way
1487 // to detect that situation before we reach codegen, so do some late
1488 // replacement.
1489 for (const FunctionDecl *PD = FD->getPreviousDecl(); PD;
1490 PD = PD->getPreviousDecl()) {
1491 if (LLVM_UNLIKELY(PD->isInlineBuiltinDeclaration())) {
1492 std::string FDInlineName = (Fn->getName() + ".inline").str();
1493 llvm::Module *M = Fn->getParent();
1494 if (llvm::Function *Clone = M->getFunction(FDInlineName)) {
1495 Clone->replaceAllUsesWith(Fn);
1496 Clone->eraseFromParent();
1497 }
1498 break;
1499 }
1500 }
1501 }
1502
1503 // Check if we should generate debug info for this function.
1504 if (FD->hasAttr<NoDebugAttr>()) {
1505 // Clear non-distinct debug info that was possibly attached to the function
1506 // due to an earlier declaration without the nodebug attribute
1507 Fn->setSubprogram(nullptr);
1508 // Disable debug info indefinitely for this function
1509 DebugInfo = nullptr;
1510 }
1511 // Finalize function debug info on exit.
1512 llvm::scope_exit Cleanup([this] {
1513 if (CGDebugInfo *DI = getDebugInfo())
1514 DI->completeFunction();
1515 });
1516
1517 // The function might not have a body if we're generating thunks for a
1518 // function declaration.
1519 SourceRange BodyRange;
1520 if (Stmt *Body = FD->getBody())
1521 BodyRange = Body->getSourceRange();
1522 else
1523 BodyRange = FD->getLocation();
1524 CurEHLocation = BodyRange.getEnd();
1525
1526 // Use the location of the start of the function to determine where
1527 // the function definition is located. By default use the location
1528 // of the declaration as the location for the subprogram. A function
1529 // may lack a declaration in the source code if it is created by code
1530 // gen. (examples: _GLOBAL__I_a, __cxx_global_array_dtor, thunk).
1531 SourceLocation Loc = FD->getLocation();
1532
1533 // If this is a function specialization then use the pattern body
1534 // as the location for the function.
1535 if (const FunctionDecl *SpecDecl = FD->getTemplateInstantiationPattern())
1536 if (SpecDecl->hasBody(SpecDecl))
1537 Loc = SpecDecl->getLocation();
1538
1539 Stmt *Body = FD->getBody();
1540
1541 if (Body) {
1542 // Coroutines always emit lifetime markers.
1543 if (isa<CoroutineBodyStmt>(Body))
1544 ShouldEmitLifetimeMarkers = true;
1545
1546 // Initialize helper which will detect jumps which can cause invalid
1547 // lifetime markers.
1548 if (ShouldEmitLifetimeMarkers)
1549 Bypasses.Init(CGM, Body);
1550 }
1551
1552 // Emit the standard function prologue.
1553 StartFunction(GD, ResTy, Fn, FnInfo, Args, Loc, BodyRange.getBegin());
1554
1555 // Save parameters for coroutine function.
1556 if (Body && isa_and_nonnull<CoroutineBodyStmt>(Body))
1557 llvm::append_range(FnArgs, FD->parameters());
1558
1559 // Ensure that the function adheres to the forward progress guarantee, which
1560 // is required by certain optimizations.
1561 // In C++11 and up, the attribute will be removed if the body contains a
1562 // trivial empty loop.
1564 CurFn->addFnAttr(llvm::Attribute::MustProgress);
1565
1566 // Generate the body of the function.
1567 PGO->assignRegionCounters(GD, CurFn);
1568 if (isa<CXXDestructorDecl>(FD))
1569 EmitDestructorBody(Args);
1570 else if (isa<CXXConstructorDecl>(FD))
1571 EmitConstructorBody(Args);
1572 else if (getLangOpts().CUDA &&
1573 !getLangOpts().CUDAIsDevice &&
1574 FD->hasAttr<CUDAGlobalAttr>())
1575 CGM.getCUDARuntime().emitDeviceStub(*this, Args);
1576 else if (isa<CXXMethodDecl>(FD) &&
1577 cast<CXXMethodDecl>(FD)->isLambdaStaticInvoker()) {
1578 // The lambda static invoker function is special, because it forwards or
1579 // clones the body of the function call operator (but is actually static).
1581 } else if (isa<CXXMethodDecl>(FD) &&
1583 !FnInfo.isDelegateCall() &&
1584 cast<CXXMethodDecl>(FD)->getParent()->getLambdaStaticInvoker() &&
1585 hasInAllocaArg(cast<CXXMethodDecl>(FD))) {
1586 // If emitting a lambda with static invoker on X86 Windows, change
1587 // the call operator body.
1588 // Make sure that this is a call operator with an inalloca arg and check
1589 // for delegate call to make sure this is the original call op and not the
1590 // new forwarding function for the static invoker.
1592 } else if (FD->isDefaulted() && isa<CXXMethodDecl>(FD) &&
1593 (cast<CXXMethodDecl>(FD)->isCopyAssignmentOperator() ||
1594 cast<CXXMethodDecl>(FD)->isMoveAssignmentOperator())) {
1595 // Implicit copy-assignment gets the same special treatment as implicit
1596 // copy-constructors.
1598 } else if (DeviceKernelAttr::isOpenCLSpelling(
1599 FD->getAttr<DeviceKernelAttr>()) &&
1601 CallArgList CallArgs;
1602 for (unsigned i = 0; i < Args.size(); ++i) {
1603 Address ArgAddr = GetAddrOfLocalVar(Args[i]);
1604 QualType ArgQualType = Args[i]->getType();
1605 RValue ArgRValue = convertTempToRValue(ArgAddr, ArgQualType, Loc);
1606 CallArgs.add(ArgRValue, ArgQualType);
1607 }
1609 const FunctionType *FT = cast<FunctionType>(FD->getType());
1610 CGM.getTargetCodeGenInfo().setOCLKernelStubCallingConvention(FT);
1611 const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeFreeFunctionCall(
1612 CallArgs, FT, /*ChainCall=*/false);
1613 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FnInfo);
1614 llvm::Constant *GDStubFunctionPointer =
1615 CGM.getRawFunctionPointer(GDStub, FTy);
1616 CGCallee GDStubCallee = CGCallee::forDirect(GDStubFunctionPointer, GDStub);
1617 EmitCall(FnInfo, GDStubCallee, ReturnValueSlot(), CallArgs, nullptr, false,
1618 Loc);
1619 } else if (Body) {
1620 EmitFunctionBody(Body);
1621 } else
1622 llvm_unreachable("no definition for emitted function");
1623
1624 // C++11 [stmt.return]p2:
1625 // Flowing off the end of a function [...] results in undefined behavior in
1626 // a value-returning function.
1627 // C11 6.9.1p12:
1628 // If the '}' that terminates a function is reached, and the value of the
1629 // function call is used by the caller, the behavior is undefined.
1631 !FD->getReturnType()->isVoidType() && Builder.GetInsertBlock()) {
1632 bool ShouldEmitUnreachable =
1633 CGM.getCodeGenOpts().StrictReturn ||
1634 !CGM.MayDropFunctionReturn(FD->getASTContext(), FD->getReturnType());
1635 if (SanOpts.has(SanitizerKind::Return)) {
1636 auto CheckOrdinal = SanitizerKind::SO_Return;
1637 auto CheckHandler = SanitizerHandler::MissingReturn;
1638 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
1639 llvm::Value *IsFalse = Builder.getFalse();
1640 EmitCheck(std::make_pair(IsFalse, CheckOrdinal), CheckHandler,
1642 } else if (ShouldEmitUnreachable) {
1643 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
1644 EmitTrapCall(llvm::Intrinsic::trap);
1645 }
1646 if (SanOpts.has(SanitizerKind::Return) || ShouldEmitUnreachable) {
1647 Builder.CreateUnreachable();
1648 Builder.ClearInsertionPoint();
1649 }
1650 }
1651
1652 // Emit the standard function epilogue.
1653 FinishFunction(BodyRange.getEnd());
1654
1655 PGO->verifyCounterMap();
1656
1657 if (CurCodeDecl->hasAttr<PersonalityAttr>()) {
1658 StringRef Identifier =
1659 CurCodeDecl->getAttr<PersonalityAttr>()->getRoutine()->getName();
1660 llvm::FunctionCallee PersonalityRoutine =
1661 CGM.CreateRuntimeFunction(llvm::FunctionType::get(CGM.Int32Ty, true),
1662 Identifier, {}, /*local=*/true);
1663 Fn->setPersonalityFn(cast<llvm::Constant>(PersonalityRoutine.getCallee()));
1664 }
1665
1666 // If we haven't marked the function nothrow through other means, do
1667 // a quick pass now to see if we can.
1668 if (!CurFn->doesNotThrow())
1670}
1671
1672/// ContainsLabel - Return true if the statement contains a label in it. If
1673/// this statement is not executed normally, it not containing a label means
1674/// that we can just remove the code.
1675bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) {
1676 // Null statement, not a label!
1677 if (!S) return false;
1678
1679 // If this is a label, we have to emit the code, consider something like:
1680 // if (0) { ... foo: bar(); } goto foo;
1681 //
1682 // TODO: If anyone cared, we could track __label__'s, since we know that you
1683 // can't jump to one from outside their declared region.
1684 if (isa<LabelStmt>(S))
1685 return true;
1686
1687 // If this is a case/default statement, and we haven't seen a switch, we have
1688 // to emit the code.
1689 if (isa<SwitchCase>(S) && !IgnoreCaseStmts)
1690 return true;
1691
1692 // If this is a switch statement, we want to ignore cases below it.
1693 if (isa<SwitchStmt>(S))
1694 IgnoreCaseStmts = true;
1695
1696 // Scan subexpressions for verboten labels.
1697 for (const Stmt *SubStmt : S->children())
1698 if (ContainsLabel(SubStmt, IgnoreCaseStmts))
1699 return true;
1700
1701 return false;
1702}
1703
1704/// containsBreak - Return true if the statement contains a break out of it.
1705/// If the statement (recursively) contains a switch or loop with a break
1706/// inside of it, this is fine.
1708 // Null statement, not a label!
1709 if (!S) return false;
1710
1711 // If this is a switch or loop that defines its own break scope, then we can
1712 // include it and anything inside of it.
1713 if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) || isa<DoStmt>(S) ||
1714 isa<ForStmt>(S))
1715 return false;
1716
1717 if (isa<BreakStmt>(S))
1718 return true;
1719
1720 // Scan subexpressions for verboten breaks.
1721 for (const Stmt *SubStmt : S->children())
1722 if (containsBreak(SubStmt))
1723 return true;
1724
1725 return false;
1726}
1727
1729 if (!S) return false;
1730
1731 // Some statement kinds add a scope and thus never add a decl to the current
1732 // scope. Note, this list is longer than the list of statements that might
1733 // have an unscoped decl nested within them, but this way is conservatively
1734 // correct even if more statement kinds are added.
1735 if (isa<IfStmt>(S) || isa<SwitchStmt>(S) || isa<WhileStmt>(S) ||
1739 return false;
1740
1741 if (isa<DeclStmt>(S))
1742 return true;
1743
1744 for (const Stmt *SubStmt : S->children())
1745 if (mightAddDeclToScope(SubStmt))
1746 return true;
1747
1748 return false;
1749}
1750
1751/// ConstantFoldsToSimpleInteger - If the specified expression does not fold
1752/// to a constant, or if it does but contains a label, return false. If it
1753/// constant folds return true and set the boolean result in Result.
1755 bool &ResultBool,
1756 bool AllowLabels) {
1757 // If MC/DC is enabled, disable folding so that we can instrument all
1758 // conditions to yield complete test vectors. We still keep track of
1759 // folded conditions during region mapping and visualization.
1760 if (!AllowLabels && CGM.getCodeGenOpts().hasProfileClangInstr() &&
1761 CGM.getCodeGenOpts().MCDCCoverage)
1762 return false;
1763
1764 llvm::APSInt ResultInt;
1765 if (!ConstantFoldsToSimpleInteger(Cond, ResultInt, AllowLabels))
1766 return false;
1767
1768 ResultBool = ResultInt.getBoolValue();
1769 return true;
1770}
1771
1772/// ConstantFoldsToSimpleInteger - If the specified expression does not fold
1773/// to a constant, or if it does but contains a label, return false. If it
1774/// constant folds return true and set the folded value.
1776 llvm::APSInt &ResultInt,
1777 bool AllowLabels) {
1778 // FIXME: Rename and handle conversion of other evaluatable things
1779 // to bool.
1781 if (!Cond->EvaluateAsInt(Result, getContext()))
1782 return false; // Not foldable, not integer or not fully evaluatable.
1783
1784 llvm::APSInt Int = Result.Val.getInt();
1785 if (!AllowLabels && CodeGenFunction::ContainsLabel(Cond))
1786 return false; // Contains a label.
1787
1788 PGO->markStmtMaybeUsed(Cond);
1789 ResultInt = std::move(Int);
1790 return true;
1791}
1792
1793/// Strip parentheses and simplistic logical-NOT operators.
1795 while (true) {
1796 const Expr *SC = IgnoreExprNodes(
1799 if (C == SC)
1800 return SC;
1801 C = SC;
1802 }
1803}
1804
1805/// Determine whether the given condition is an instrumentable condition
1806/// (i.e. no "&&" or "||").
1808 const BinaryOperator *BOp = dyn_cast<BinaryOperator>(stripCond(C));
1809 return (!BOp || !BOp->isLogicalOp());
1810}
1811
1812/// EmitBranchToCounterBlock - Emit a conditional branch to a new block that
1813/// increments a profile counter based on the semantics of the given logical
1814/// operator opcode. This is used to instrument branch condition coverage for
1815/// logical operators.
1817 const Expr *Cond, BinaryOperator::Opcode LOp, llvm::BasicBlock *TrueBlock,
1818 llvm::BasicBlock *FalseBlock, uint64_t TrueCount /* = 0 */,
1819 Stmt::Likelihood LH /* =None */, const Expr *CntrIdx /* = nullptr */) {
1820 // If not instrumenting, just emit a branch.
1821 bool InstrumentRegions = CGM.getCodeGenOpts().hasProfileClangInstr();
1822 if (!InstrumentRegions || !isInstrumentedCondition(Cond))
1823 return EmitBranchOnBoolExpr(Cond, TrueBlock, FalseBlock, TrueCount, LH);
1824
1825 const Stmt *CntrStmt = (CntrIdx ? CntrIdx : Cond);
1826
1827 llvm::BasicBlock *ThenBlock = nullptr;
1828 llvm::BasicBlock *ElseBlock = nullptr;
1829 llvm::BasicBlock *NextBlock = nullptr;
1830
1831 // Create the block we'll use to increment the appropriate counter.
1832 llvm::BasicBlock *CounterIncrBlock = createBasicBlock("lop.rhscnt");
1833
1834 llvm::BasicBlock *SkipIncrBlock =
1835 (hasSkipCounter(CntrStmt) ? createBasicBlock("lop.rhsskip") : nullptr);
1836 llvm::BasicBlock *SkipNextBlock = nullptr;
1837
1838 // Set block pointers according to Logical-AND (BO_LAnd) semantics. This
1839 // means we need to evaluate the condition and increment the counter on TRUE:
1840 //
1841 // if (Cond)
1842 // goto CounterIncrBlock;
1843 // else
1844 // goto FalseBlock;
1845 //
1846 // CounterIncrBlock:
1847 // Counter++;
1848 // goto TrueBlock;
1849
1850 if (LOp == BO_LAnd) {
1851 SkipNextBlock = FalseBlock;
1852 ThenBlock = CounterIncrBlock;
1853 ElseBlock = (SkipIncrBlock ? SkipIncrBlock : SkipNextBlock);
1854 NextBlock = TrueBlock;
1855 }
1856
1857 // Set block pointers according to Logical-OR (BO_LOr) semantics. This means
1858 // we need to evaluate the condition and increment the counter on FALSE:
1859 //
1860 // if (Cond)
1861 // goto TrueBlock;
1862 // else
1863 // goto CounterIncrBlock;
1864 //
1865 // CounterIncrBlock:
1866 // Counter++;
1867 // goto FalseBlock;
1868
1869 else if (LOp == BO_LOr) {
1870 SkipNextBlock = TrueBlock;
1871 ThenBlock = (SkipIncrBlock ? SkipIncrBlock : SkipNextBlock);
1872 ElseBlock = CounterIncrBlock;
1873 NextBlock = FalseBlock;
1874 } else {
1875 llvm_unreachable("Expected Opcode must be that of a Logical Operator");
1876 }
1877
1878 // Emit Branch based on condition.
1879 EmitBranchOnBoolExpr(Cond, ThenBlock, ElseBlock, TrueCount, LH);
1880
1881 if (SkipIncrBlock) {
1882 EmitBlock(SkipIncrBlock);
1884 EmitBranch(SkipNextBlock);
1885 }
1886
1887 // Emit the block containing the counter increment(s).
1888 EmitBlock(CounterIncrBlock);
1889
1890 // Increment corresponding counter; if index not provided, use Cond as index.
1892
1893 // Go to the next block.
1894 EmitBranch(NextBlock);
1895}
1896
1897/// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if
1898/// statement) to the specified blocks. Based on the condition, this might try
1899/// to simplify the codegen of the conditional based on the branch.
1900/// \param LH The value of the likelihood attribute on the True branch.
1901/// \param ConditionalOp Used by MC/DC code coverage to track the result of the
1902/// ConditionalOperator (ternary) through a recursive call for the operator's
1903/// LHS and RHS nodes.
1905 const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock,
1906 uint64_t TrueCount, Stmt::Likelihood LH, const Expr *ConditionalOp,
1907 const VarDecl *ConditionalDecl) {
1908 Cond = Cond->IgnoreParens();
1909
1910 if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) {
1911 bool HasSkip = hasSkipCounter(CondBOp);
1912
1913 // Handle X && Y in a condition.
1914 if (CondBOp->getOpcode() == BO_LAnd) {
1915 // If we have "1 && X", simplify the code. "0 && X" would have constant
1916 // folded if the case was simple enough.
1917 bool ConstantBool = false;
1918 if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
1919 ConstantBool) {
1920 // br(1 && X) -> br(X).
1921 incrementProfileCounter(CondBOp);
1922 EmitBranchToCounterBlock(CondBOp->getRHS(), BO_LAnd, TrueBlock,
1923 FalseBlock, TrueCount, LH);
1924 return;
1925 }
1926
1927 // If we have "X && 1", simplify the code to use an uncond branch.
1928 // "X && 0" would have been constant folded to 0.
1929 if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
1930 ConstantBool) {
1931 // br(X && 1) -> br(X).
1932 EmitBranchToCounterBlock(CondBOp->getLHS(), BO_LAnd, TrueBlock,
1933 FalseBlock, TrueCount, LH, CondBOp);
1934 return;
1935 }
1936
1937 // Emit the LHS as a conditional. If the LHS conditional is false, we
1938 // want to jump to the FalseBlock.
1939 llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true");
1940 llvm::BasicBlock *LHSFalse =
1941 (HasSkip ? createBasicBlock("land.lhsskip") : FalseBlock);
1942 // The counter tells us how often we evaluate RHS, and all of TrueCount
1943 // can be propagated to that branch.
1944 uint64_t RHSCount = getProfileCount(CondBOp->getRHS());
1945
1946 ConditionalEvaluation eval(*this);
1947 {
1948 ApplyDebugLocation DL(*this, Cond);
1949 // Propagate the likelihood attribute like __builtin_expect
1950 // __builtin_expect(X && Y, 1) -> X and Y are likely
1951 // __builtin_expect(X && Y, 0) -> only Y is unlikely
1952 EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, LHSFalse, RHSCount,
1953 LH == Stmt::LH_Unlikely ? Stmt::LH_None : LH);
1954 if (HasSkip) {
1955 EmitBlock(LHSFalse);
1957 EmitBranch(FalseBlock);
1958 }
1959 EmitBlock(LHSTrue);
1960 }
1961
1963 setCurrentProfileCount(getProfileCount(CondBOp->getRHS()));
1964
1965 // Any temporaries created here are conditional.
1966 eval.begin(*this);
1967 EmitBranchToCounterBlock(CondBOp->getRHS(), BO_LAnd, TrueBlock,
1968 FalseBlock, TrueCount, LH);
1969 eval.end(*this);
1970 return;
1971 }
1972
1973 if (CondBOp->getOpcode() == BO_LOr) {
1974 // If we have "0 || X", simplify the code. "1 || X" would have constant
1975 // folded if the case was simple enough.
1976 bool ConstantBool = false;
1977 if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
1978 !ConstantBool) {
1979 // br(0 || X) -> br(X).
1980 incrementProfileCounter(CondBOp);
1981 EmitBranchToCounterBlock(CondBOp->getRHS(), BO_LOr, TrueBlock,
1982 FalseBlock, TrueCount, LH);
1983 return;
1984 }
1985
1986 // If we have "X || 0", simplify the code to use an uncond branch.
1987 // "X || 1" would have been constant folded to 1.
1988 if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
1989 !ConstantBool) {
1990 // br(X || 0) -> br(X).
1991 EmitBranchToCounterBlock(CondBOp->getLHS(), BO_LOr, TrueBlock,
1992 FalseBlock, TrueCount, LH, CondBOp);
1993 return;
1994 }
1995 // Emit the LHS as a conditional. If the LHS conditional is true, we
1996 // want to jump to the TrueBlock.
1997 llvm::BasicBlock *LHSTrue =
1998 (HasSkip ? createBasicBlock("lor.lhsskip") : TrueBlock);
1999 llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false");
2000 // We have the count for entry to the RHS and for the whole expression
2001 // being true, so we can divy up True count between the short circuit and
2002 // the RHS.
2003 uint64_t LHSCount =
2004 getCurrentProfileCount() - getProfileCount(CondBOp->getRHS());
2005 uint64_t RHSCount = TrueCount - LHSCount;
2006
2007 ConditionalEvaluation eval(*this);
2008 {
2009 // Propagate the likelihood attribute like __builtin_expect
2010 // __builtin_expect(X || Y, 1) -> only Y is likely
2011 // __builtin_expect(X || Y, 0) -> both X and Y are unlikely
2012 ApplyDebugLocation DL(*this, Cond);
2013 EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, LHSFalse, LHSCount,
2014 LH == Stmt::LH_Likely ? Stmt::LH_None : LH);
2015 if (HasSkip) {
2016 EmitBlock(LHSTrue);
2018 EmitBranch(TrueBlock);
2019 }
2020 EmitBlock(LHSFalse);
2021 }
2022
2024 setCurrentProfileCount(getProfileCount(CondBOp->getRHS()));
2025
2026 // Any temporaries created here are conditional.
2027 eval.begin(*this);
2028 EmitBranchToCounterBlock(CondBOp->getRHS(), BO_LOr, TrueBlock, FalseBlock,
2029 RHSCount, LH);
2030
2031 eval.end(*this);
2032 return;
2033 }
2034 }
2035
2036 if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) {
2037 // br(!x, t, f) -> br(x, f, t)
2038 // Avoid doing this optimization when instrumenting a condition for MC/DC.
2039 // LNot is taken as part of the condition for simplicity, and changing its
2040 // sense negatively impacts test vector tracking.
2041 bool MCDCCondition = CGM.getCodeGenOpts().hasProfileClangInstr() &&
2042 CGM.getCodeGenOpts().MCDCCoverage &&
2044 if (CondUOp->getOpcode() == UO_LNot && !MCDCCondition) {
2045 // Negate the count.
2046 uint64_t FalseCount = getCurrentProfileCount() - TrueCount;
2047 // The values of the enum are chosen to make this negation possible.
2048 LH = static_cast<Stmt::Likelihood>(-LH);
2049 // Negate the condition and swap the destination blocks.
2050 return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock,
2051 FalseCount, LH);
2052 }
2053 }
2054
2055 if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) {
2056 // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f))
2057 llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true");
2058 llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false");
2059
2060 // The ConditionalOperator itself has no likelihood information for its
2061 // true and false branches. This matches the behavior of __builtin_expect.
2062 ConditionalEvaluation cond(*this);
2063 EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock,
2065
2066 // When computing PGO branch weights, we only know the overall count for
2067 // the true block. This code is essentially doing tail duplication of the
2068 // naive code-gen, introducing new edges for which counts are not
2069 // available. Divide the counts proportionally between the LHS and RHS of
2070 // the conditional operator.
2071 uint64_t LHSScaledTrueCount = 0;
2072 if (TrueCount) {
2073 double LHSRatio =
2074 getProfileCount(CondOp) / (double)getCurrentProfileCount();
2075 LHSScaledTrueCount = TrueCount * LHSRatio;
2076 }
2077
2078 cond.begin(*this);
2079 EmitBlock(LHSBlock);
2081 {
2082 ApplyDebugLocation DL(*this, Cond);
2083 EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock,
2084 LHSScaledTrueCount, LH, CondOp);
2085 }
2086 cond.end(*this);
2087
2088 cond.begin(*this);
2089 EmitBlock(RHSBlock);
2091 EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock,
2092 TrueCount - LHSScaledTrueCount, LH, CondOp);
2093 cond.end(*this);
2094
2095 return;
2096 }
2097
2098 if (const CXXThrowExpr *Throw = dyn_cast<CXXThrowExpr>(Cond)) {
2099 // Conditional operator handling can give us a throw expression as a
2100 // condition for a case like:
2101 // br(c ? throw x : y, t, f) -> br(c, br(throw x, t, f), br(y, t, f)
2102 // Fold this to:
2103 // br(c, throw x, br(y, t, f))
2104 EmitCXXThrowExpr(Throw, /*KeepInsertionPoint*/false);
2105 return;
2106 }
2107
2108 // Emit the code with the fully general case.
2109 llvm::Value *CondV;
2110 {
2111 ApplyDebugLocation DL(*this, Cond);
2112 CondV = EvaluateExprAsBool(Cond);
2113 }
2114
2115 MaybeEmitDeferredVarDeclInit(ConditionalDecl);
2116
2117 // If not at the top of the logical operator nest, update MCDC temp with the
2118 // boolean result of the evaluated condition.
2119 {
2120 const Expr *MCDCBaseExpr = Cond;
2121 // When a nested ConditionalOperator (ternary) is encountered in a boolean
2122 // expression, MC/DC tracks the result of the ternary, and this is tied to
2123 // the ConditionalOperator expression and not the ternary's LHS or RHS. If
2124 // this is the case, the ConditionalOperator expression is passed through
2125 // the ConditionalOp parameter and then used as the MCDC base expression.
2126 if (ConditionalOp)
2127 MCDCBaseExpr = ConditionalOp;
2128
2129 if (isMCDCBranchExpr(stripCond(MCDCBaseExpr)) &&
2131 maybeUpdateMCDCCondBitmap(MCDCBaseExpr, CondV);
2132 }
2133
2134 llvm::MDNode *Weights = nullptr;
2135 llvm::MDNode *Unpredictable = nullptr;
2136
2137 // If the branch has a condition wrapped by __builtin_unpredictable,
2138 // create metadata that specifies that the branch is unpredictable.
2139 // Don't bother if not optimizing because that metadata would not be used.
2140 auto *Call = dyn_cast<CallExpr>(Cond->IgnoreImpCasts());
2141 if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) {
2142 auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
2143 if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
2144 llvm::MDBuilder MDHelper(getLLVMContext());
2145 Unpredictable = MDHelper.createUnpredictable();
2146 }
2147 }
2148
2149 // If there is a Likelihood knowledge for the cond, lower it.
2150 // Note that if not optimizing this won't emit anything.
2151 llvm::Value *NewCondV = emitCondLikelihoodViaExpectIntrinsic(CondV, LH);
2152 if (CondV != NewCondV)
2153 CondV = NewCondV;
2154 else {
2155 // Otherwise, lower profile counts. Note that we do this even at -O0.
2156 uint64_t CurrentCount = std::max(getCurrentProfileCount(), TrueCount);
2157 Weights = createProfileWeights(TrueCount, CurrentCount - TrueCount);
2158 }
2159
2160 llvm::Instruction *BrInst = Builder.CreateCondBr(CondV, TrueBlock, FalseBlock,
2161 Weights, Unpredictable);
2162 addInstToNewSourceAtom(BrInst, CondV);
2163
2164 switch (HLSLControlFlowAttr) {
2165 case HLSLControlFlowHintAttr::Microsoft_branch:
2166 case HLSLControlFlowHintAttr::Microsoft_flatten: {
2167 llvm::MDBuilder MDHelper(CGM.getLLVMContext());
2168
2169 llvm::ConstantInt *BranchHintConstant =
2171 HLSLControlFlowHintAttr::Spelling::Microsoft_branch
2172 ? llvm::ConstantInt::get(CGM.Int32Ty, 1)
2173 : llvm::ConstantInt::get(CGM.Int32Ty, 2);
2174
2176 {MDHelper.createString("hlsl.controlflow.hint"),
2177 MDHelper.createConstant(BranchHintConstant)});
2178 BrInst->setMetadata("hlsl.controlflow.hint",
2179 llvm::MDNode::get(CGM.getLLVMContext(), Vals));
2180 break;
2181 }
2182 // This is required to avoid warnings during compilation
2183 case HLSLControlFlowHintAttr::SpellingNotCalculated:
2184 break;
2185 }
2186}
2187
2188llvm::Value *CodeGenFunction::EmitScalarOrConstFoldImmArg(unsigned ICEArguments,
2189 unsigned Idx,
2190 const CallExpr *E) {
2191 llvm::Value *Arg = nullptr;
2192 if ((ICEArguments & (1 << Idx)) == 0) {
2193 Arg = EmitScalarExpr(E->getArg(Idx));
2194 } else {
2195 // If this is required to be a constant, constant fold it so that we
2196 // know that the generated intrinsic gets a ConstantInt.
2197 std::optional<llvm::APSInt> Result =
2199 assert(Result && "Expected argument to be a constant");
2200 Arg = llvm::ConstantInt::get(getLLVMContext(), *Result);
2201 }
2202 return Arg;
2203}
2204
2205/// ErrorUnsupported - Print out an error that codegen doesn't support the
2206/// specified stmt yet.
2207void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type) {
2208 CGM.ErrorUnsupported(S, Type);
2209}
2210
2211/// emitNonZeroVLAInit - Emit the "zero" initialization of a
2212/// variable-length array whose elements have a non-zero bit-pattern.
2213///
2214/// \param baseType the inner-most element type of the array
2215/// \param src - a char* pointing to the bit-pattern for a single
2216/// base element of the array
2217/// \param sizeInChars - the total size of the VLA, in chars
2219 Address dest, Address src,
2220 llvm::Value *sizeInChars) {
2221 CGBuilderTy &Builder = CGF.Builder;
2222
2223 CharUnits baseSize = CGF.getContext().getTypeSizeInChars(baseType);
2224 llvm::Value *baseSizeInChars
2225 = llvm::ConstantInt::get(CGF.IntPtrTy, baseSize.getQuantity());
2226
2227 Address begin = dest.withElementType(CGF.Int8Ty);
2228 llvm::Value *end = Builder.CreateInBoundsGEP(begin.getElementType(),
2229 begin.emitRawPointer(CGF),
2230 sizeInChars, "vla.end");
2231
2232 llvm::BasicBlock *originBB = CGF.Builder.GetInsertBlock();
2233 llvm::BasicBlock *loopBB = CGF.createBasicBlock("vla-init.loop");
2234 llvm::BasicBlock *contBB = CGF.createBasicBlock("vla-init.cont");
2235
2236 // Make a loop over the VLA. C99 guarantees that the VLA element
2237 // count must be nonzero.
2238 CGF.EmitBlock(loopBB);
2239
2240 llvm::PHINode *cur = Builder.CreatePHI(begin.getType(), 2, "vla.cur");
2241 cur->addIncoming(begin.emitRawPointer(CGF), originBB);
2242
2243 CharUnits curAlign =
2244 dest.getAlignment().alignmentOfArrayElement(baseSize);
2245
2246 // memcpy the individual element bit-pattern.
2247 Builder.CreateMemCpy(Address(cur, CGF.Int8Ty, curAlign), src, baseSizeInChars,
2248 /*volatile*/ false);
2249
2250 // Go to the next element.
2251 llvm::Value *next =
2252 Builder.CreateInBoundsGEP(CGF.Int8Ty, cur, baseSizeInChars, "vla.next");
2253
2254 // Leave if that's the end of the VLA.
2255 llvm::Value *done = Builder.CreateICmpEQ(next, end, "vla-init.isdone");
2256 Builder.CreateCondBr(done, contBB, loopBB);
2257 cur->addIncoming(next, loopBB);
2258
2259 CGF.EmitBlock(contBB);
2260}
2261
2263 const PFPField &Field) {
2264 return EmitAddressOfPFPField(
2265 RecordPtr,
2266 Builder.CreateConstInBoundsByteGEP(RecordPtr.withElementType(Int8Ty),
2267 Field.Offset),
2268 Field.Field);
2269}
2270
2272 Address PtrPtr,
2273 const FieldDecl *Field) {
2274 llvm::Value *Disc;
2275 if (CGM.getContext().arePFPFieldsTriviallyCopyable(Field->getParent())) {
2276 uint64_t FieldSignature =
2277 llvm::getPointerAuthStableSipHash(CGM.getPFPFieldName(Field));
2278 Disc = llvm::ConstantInt::get(CGM.Int64Ty, FieldSignature);
2279 } else
2280 Disc = Builder.CreatePtrToInt(RecordPtr.getBasePointer(), CGM.Int64Ty);
2281
2282 llvm::GlobalValue *DS = CGM.getPFPDeactivationSymbol(Field);
2283 llvm::OperandBundleDef DSBundle("deactivation-symbol", DS);
2284 llvm::Value *Args[] = {PtrPtr.getBasePointer(), Disc, Builder.getTrue()};
2285 return Address(
2286 Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::protected_field_ptr,
2287 PtrPtr.getType()),
2288 Args, DSBundle),
2289 VoidPtrTy, PtrPtr.getAlignment());
2290}
2291
2292void
2294 // Ignore empty classes in C++.
2295 if (getLangOpts().CPlusPlus)
2296 if (const auto *RD = Ty->getAsCXXRecordDecl(); RD && RD->isEmpty())
2297 return;
2298
2299 if (DestPtr.getElementType() != Int8Ty)
2300 DestPtr = DestPtr.withElementType(Int8Ty);
2301
2302 // Get size and alignment info for this aggregate.
2304
2305 llvm::Value *SizeVal;
2306 const VariableArrayType *vla;
2307
2308 // Don't bother emitting a zero-byte memset.
2309 if (size.isZero()) {
2310 // But note that getTypeInfo returns 0 for a VLA.
2311 if (const VariableArrayType *vlaType =
2312 dyn_cast_or_null<VariableArrayType>(
2313 getContext().getAsArrayType(Ty))) {
2314 auto VlaSize = getVLASize(vlaType);
2315 SizeVal = VlaSize.NumElts;
2316 CharUnits eltSize = getContext().getTypeSizeInChars(VlaSize.Type);
2317 if (!eltSize.isOne())
2318 SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(eltSize));
2319 vla = vlaType;
2320 } else {
2321 return;
2322 }
2323 } else {
2324 SizeVal = CGM.getSize(size);
2325 vla = nullptr;
2326 }
2327
2328 // If the type contains a pointer to data member we can't memset it to zero.
2329 // Instead, create a null constant and copy it to the destination.
2330 // TODO: there are other patterns besides zero that we can usefully memset,
2331 // like -1, which happens to be the pattern used by member-pointers.
2332 if (!CGM.getTypes().isZeroInitializable(Ty)) {
2333 // For a VLA, emit a single element, then splat that over the VLA.
2334 if (vla) Ty = getContext().getBaseElementType(vla);
2335
2336 llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty);
2337
2338 llvm::GlobalVariable *NullVariable =
2339 new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(),
2340 /*isConstant=*/true,
2341 llvm::GlobalVariable::PrivateLinkage,
2342 NullConstant, Twine());
2343 CharUnits NullAlign = DestPtr.getAlignment();
2344 NullVariable->setAlignment(NullAlign.getAsAlign());
2345 Address SrcPtr(NullVariable, Builder.getInt8Ty(), NullAlign);
2346
2347 if (vla) return emitNonZeroVLAInit(*this, Ty, DestPtr, SrcPtr, SizeVal);
2348
2349 // Get and call the appropriate llvm.memcpy overload.
2350 Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, false);
2351 } else {
2352 // Otherwise, just memset the whole thing to zero. This is legal
2353 // because in LLVM, all default initializers (other than the ones we just
2354 // handled above, and the case handled below) are guaranteed to have a bit
2355 // pattern of all zeros.
2356 Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal, false);
2357 }
2358
2359 // With the pointer field protection feature, null pointers do not have a bit
2360 // pattern of zero in memory, so we must initialize them separately.
2361 for (auto &Field : getContext().findPFPFields(Ty)) {
2362 auto addr = EmitAddressOfPFPField(DestPtr, Field);
2363 Builder.CreateStore(llvm::ConstantPointerNull::get(VoidPtrTy), addr);
2364 }
2365}
2366
2367llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelDecl *L) {
2368 // Make sure that there is a block for the indirect goto.
2369 if (!IndirectBranch)
2371
2372 llvm::BasicBlock *BB = getJumpDestForLabel(L).getBlock();
2373
2374 // Make sure the indirect branch includes all of the address-taken blocks.
2375 IndirectBranch->addDestination(BB);
2376 return llvm::BlockAddress::get(CurFn->getType(), BB);
2377}
2378
2380 // If we already made the indirect branch for indirect goto, return its block.
2381 if (IndirectBranch) return IndirectBranch->getParent();
2382
2383 CGBuilderTy TmpBuilder(CGM, createBasicBlock("indirectgoto"));
2384
2385 // Create the PHI node that indirect gotos will add entries to.
2386 llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, 0,
2387 "indirect.goto.dest");
2388
2389 // Create the indirect branch instruction.
2390 IndirectBranch = TmpBuilder.CreateIndirectBr(DestVal);
2391 return IndirectBranch->getParent();
2392}
2393
2394/// Computes the length of an array in elements, as well as the base
2395/// element type and a properly-typed first element pointer.
2396llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType,
2397 QualType &baseType,
2398 Address &addr) {
2399 const ArrayType *arrayType = origArrayType;
2400
2401 // If it's a VLA, we have to load the stored size. Note that
2402 // this is the size of the VLA in bytes, not its size in elements.
2403 llvm::Value *numVLAElements = nullptr;
2406
2407 // Walk into all VLAs. This doesn't require changes to addr,
2408 // which has type T* where T is the first non-VLA element type.
2409 do {
2410 QualType elementType = arrayType->getElementType();
2411 arrayType = getContext().getAsArrayType(elementType);
2412
2413 // If we only have VLA components, 'addr' requires no adjustment.
2414 if (!arrayType) {
2415 baseType = elementType;
2416 return numVLAElements;
2417 }
2419
2420 // We get out here only if we find a constant array type
2421 // inside the VLA.
2422 }
2423
2424 // We have some number of constant-length arrays, so addr should
2425 // have LLVM type [M x [N x [...]]]*. Build a GEP that walks
2426 // down to the first element of addr.
2428
2429 // GEP down to the array type.
2430 llvm::ConstantInt *zero = Builder.getInt32(0);
2431 gepIndices.push_back(zero);
2432
2433 uint64_t countFromCLAs = 1;
2434 QualType eltType;
2435
2436 llvm::ArrayType *llvmArrayType =
2437 dyn_cast<llvm::ArrayType>(addr.getElementType());
2438 while (llvmArrayType) {
2440 assert(cast<ConstantArrayType>(arrayType)->getZExtSize() ==
2441 llvmArrayType->getNumElements());
2442
2443 gepIndices.push_back(zero);
2444 countFromCLAs *= llvmArrayType->getNumElements();
2445 eltType = arrayType->getElementType();
2446
2447 llvmArrayType =
2448 dyn_cast<llvm::ArrayType>(llvmArrayType->getElementType());
2449 arrayType = getContext().getAsArrayType(arrayType->getElementType());
2450 assert((!llvmArrayType || arrayType) &&
2451 "LLVM and Clang types are out-of-synch");
2452 }
2453
2454 if (arrayType) {
2455 // From this point onwards, the Clang array type has been emitted
2456 // as some other type (probably a packed struct). Compute the array
2457 // size, and just emit the 'begin' expression as a bitcast.
2458 while (arrayType) {
2459 countFromCLAs *= cast<ConstantArrayType>(arrayType)->getZExtSize();
2460 eltType = arrayType->getElementType();
2461 arrayType = getContext().getAsArrayType(eltType);
2462 }
2463
2464 llvm::Type *baseType = ConvertType(eltType);
2465 addr = addr.withElementType(baseType);
2466 } else {
2467 // Create the actual GEP.
2468 addr = Address(Builder.CreateInBoundsGEP(addr.getElementType(),
2469 addr.emitRawPointer(*this),
2470 gepIndices, "array.begin"),
2471 ConvertTypeForMem(eltType), addr.getAlignment());
2472 }
2473
2474 baseType = eltType;
2475
2476 llvm::Value *numElements
2477 = llvm::ConstantInt::get(SizeTy, countFromCLAs);
2478
2479 // If we had any VLA dimensions, factor them in.
2480 if (numVLAElements)
2481 numElements = Builder.CreateNUWMul(numVLAElements, numElements);
2482
2483 return numElements;
2484}
2485
2488 assert(vla && "type was not a variable array type!");
2489 return getVLASize(vla);
2490}
2491
2494 // The number of elements so far; always size_t.
2495 llvm::Value *numElements = nullptr;
2496
2497 QualType elementType;
2498 do {
2499 elementType = type->getElementType();
2500 llvm::Value *vlaSize = VLASizeMap[type->getSizeExpr()];
2501 assert(vlaSize && "no size for VLA!");
2502 assert(vlaSize->getType() == SizeTy);
2503
2504 if (!numElements) {
2505 numElements = vlaSize;
2506 } else {
2507 // It's undefined behavior if this wraps around, so mark it that way.
2508 // FIXME: Teach -fsanitize=undefined to trap this.
2509 numElements = Builder.CreateNUWMul(numElements, vlaSize);
2510 }
2511 } while ((type = getContext().getAsVariableArrayType(elementType)));
2512
2513 return { numElements, elementType };
2514}
2515
2519 assert(vla && "type was not a variable array type!");
2520 return getVLAElements1D(vla);
2521}
2522
2525 llvm::Value *VlaSize = VLASizeMap[Vla->getSizeExpr()];
2526 assert(VlaSize && "no size for VLA!");
2527 assert(VlaSize->getType() == SizeTy);
2528 return { VlaSize, Vla->getElementType() };
2529}
2530
2532 assert(type->isVariablyModifiedType() &&
2533 "Must pass variably modified type to EmitVLASizes!");
2534
2536
2537 // We're going to walk down into the type and look for VLA
2538 // expressions.
2539 do {
2540 assert(type->isVariablyModifiedType());
2541
2542 const Type *ty = type.getTypePtr();
2543 switch (ty->getTypeClass()) {
2544
2545#define TYPE(Class, Base)
2546#define ABSTRACT_TYPE(Class, Base)
2547#define NON_CANONICAL_TYPE(Class, Base)
2548#define DEPENDENT_TYPE(Class, Base) case Type::Class:
2549#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base)
2550#include "clang/AST/TypeNodes.inc"
2551 llvm_unreachable("unexpected dependent type!");
2552
2553 // These types are never variably-modified.
2554 case Type::Builtin:
2555 case Type::Complex:
2556 case Type::Vector:
2557 case Type::ExtVector:
2558 case Type::ConstantMatrix:
2559 case Type::Record:
2560 case Type::Enum:
2561 case Type::Using:
2562 case Type::TemplateSpecialization:
2563 case Type::ObjCTypeParam:
2564 case Type::ObjCObject:
2565 case Type::ObjCInterface:
2566 case Type::ObjCObjectPointer:
2567 case Type::BitInt:
2568 case Type::HLSLInlineSpirv:
2569 case Type::PredefinedSugar:
2570 llvm_unreachable("type class is never variably-modified!");
2571
2572 case Type::Adjusted:
2573 type = cast<AdjustedType>(ty)->getAdjustedType();
2574 break;
2575
2576 case Type::Decayed:
2577 type = cast<DecayedType>(ty)->getPointeeType();
2578 break;
2579
2580 case Type::Pointer:
2581 type = cast<PointerType>(ty)->getPointeeType();
2582 break;
2583
2584 case Type::BlockPointer:
2585 type = cast<BlockPointerType>(ty)->getPointeeType();
2586 break;
2587
2588 case Type::LValueReference:
2589 case Type::RValueReference:
2590 type = cast<ReferenceType>(ty)->getPointeeType();
2591 break;
2592
2593 case Type::MemberPointer:
2594 type = cast<MemberPointerType>(ty)->getPointeeType();
2595 break;
2596
2597 case Type::ArrayParameter:
2598 case Type::ConstantArray:
2599 case Type::IncompleteArray:
2600 // Losing element qualification here is fine.
2601 type = cast<ArrayType>(ty)->getElementType();
2602 break;
2603
2604 case Type::VariableArray: {
2605 // Losing element qualification here is fine.
2607
2608 // Unknown size indication requires no size computation.
2609 // Otherwise, evaluate and record it.
2610 if (const Expr *sizeExpr = vat->getSizeExpr()) {
2611 // It's possible that we might have emitted this already,
2612 // e.g. with a typedef and a pointer to it.
2613 llvm::Value *&entry = VLASizeMap[sizeExpr];
2614 if (!entry) {
2615 llvm::Value *size = EmitScalarExpr(sizeExpr);
2616
2617 // C11 6.7.6.2p5:
2618 // If the size is an expression that is not an integer constant
2619 // expression [...] each time it is evaluated it shall have a value
2620 // greater than zero.
2621 if (SanOpts.has(SanitizerKind::VLABound)) {
2622 auto CheckOrdinal = SanitizerKind::SO_VLABound;
2623 auto CheckHandler = SanitizerHandler::VLABoundNotPositive;
2624 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
2625 llvm::Value *Zero = llvm::Constant::getNullValue(size->getType());
2626 clang::QualType SEType = sizeExpr->getType();
2627 llvm::Value *CheckCondition =
2628 SEType->isSignedIntegerType()
2629 ? Builder.CreateICmpSGT(size, Zero)
2630 : Builder.CreateICmpUGT(size, Zero);
2631 llvm::Constant *StaticArgs[] = {
2632 EmitCheckSourceLocation(sizeExpr->getBeginLoc()),
2633 EmitCheckTypeDescriptor(SEType)};
2634 EmitCheck(std::make_pair(CheckCondition, CheckOrdinal),
2635 CheckHandler, StaticArgs, size);
2636 }
2637
2638 // Always zexting here would be wrong if it weren't
2639 // undefined behavior to have a negative bound.
2640 // FIXME: What about when size's type is larger than size_t?
2641 entry = Builder.CreateIntCast(size, SizeTy, /*signed*/ false);
2642 }
2643 }
2644 type = vat->getElementType();
2645 break;
2646 }
2647
2648 case Type::FunctionProto:
2649 case Type::FunctionNoProto:
2650 type = cast<FunctionType>(ty)->getReturnType();
2651 break;
2652
2653 case Type::Paren:
2654 case Type::TypeOf:
2655 case Type::UnaryTransform:
2656 case Type::Attributed:
2657 case Type::BTFTagAttributed:
2658 case Type::OverflowBehavior:
2659 case Type::HLSLAttributedResource:
2660 case Type::SubstTemplateTypeParm:
2661 case Type::MacroQualified:
2662 case Type::CountAttributed:
2663 // Keep walking after single level desugaring.
2664 type = type.getSingleStepDesugaredType(getContext());
2665 break;
2666
2667 case Type::Typedef:
2668 case Type::Decltype:
2669 case Type::Auto:
2670 case Type::DeducedTemplateSpecialization:
2671 case Type::PackIndexing:
2672 // Stop walking: nothing to do.
2673 return;
2674
2675 case Type::TypeOfExpr:
2676 // Stop walking: emit typeof expression.
2677 EmitIgnoredExpr(cast<TypeOfExprType>(ty)->getUnderlyingExpr());
2678 return;
2679
2680 case Type::Atomic:
2681 type = cast<AtomicType>(ty)->getValueType();
2682 break;
2683
2684 case Type::Pipe:
2685 type = cast<PipeType>(ty)->getElementType();
2686 break;
2687 }
2688 } while (type->isVariablyModifiedType());
2689}
2690
2692 if (getContext().getBuiltinVaListType()->isArrayType())
2693 return EmitPointerWithAlignment(E);
2694 return EmitLValue(E).getAddress();
2695}
2696
2700
2702 const APValue &Init) {
2703 assert(Init.hasValue() && "Invalid DeclRefExpr initializer!");
2704 if (CGDebugInfo *Dbg = getDebugInfo())
2705 if (CGM.getCodeGenOpts().hasReducedDebugInfo())
2706 Dbg->EmitGlobalVariable(E->getDecl(), Init);
2707}
2708
2711 // At the moment, the only aggressive peephole we do in IR gen
2712 // is trunc(zext) folding, but if we add more, we can easily
2713 // extend this protection.
2714
2715 if (!rvalue.isScalar()) return PeepholeProtection();
2716 llvm::Value *value = rvalue.getScalarVal();
2717 if (!isa<llvm::ZExtInst>(value)) return PeepholeProtection();
2718
2719 // Just make an extra bitcast.
2720 assert(HaveInsertPoint());
2721 llvm::Instruction *inst = new llvm::BitCastInst(value, value->getType(), "",
2722 Builder.GetInsertBlock());
2723
2724 PeepholeProtection protection;
2725 protection.Inst = inst;
2726 return protection;
2727}
2728
2730 if (!protection.Inst) return;
2731
2732 // In theory, we could try to duplicate the peepholes now, but whatever.
2733 protection.Inst->eraseFromParent();
2734}
2735
2737 QualType Ty, SourceLocation Loc,
2738 SourceLocation AssumptionLoc,
2739 llvm::Value *Alignment,
2740 llvm::Value *OffsetValue) {
2741 if (Alignment->getType() != IntPtrTy)
2742 Alignment =
2743 Builder.CreateIntCast(Alignment, IntPtrTy, false, "casted.align");
2744 if (OffsetValue && OffsetValue->getType() != IntPtrTy)
2745 OffsetValue =
2746 Builder.CreateIntCast(OffsetValue, IntPtrTy, true, "casted.offset");
2747 llvm::Value *TheCheck = nullptr;
2748 if (SanOpts.has(SanitizerKind::Alignment)) {
2749 llvm::Value *PtrIntValue =
2750 Builder.CreatePtrToInt(PtrValue, IntPtrTy, "ptrint");
2751
2752 if (OffsetValue) {
2753 bool IsOffsetZero = false;
2754 if (const auto *CI = dyn_cast<llvm::ConstantInt>(OffsetValue))
2755 IsOffsetZero = CI->isZero();
2756
2757 if (!IsOffsetZero)
2758 PtrIntValue = Builder.CreateSub(PtrIntValue, OffsetValue, "offsetptr");
2759 }
2760
2761 llvm::Value *Zero = llvm::ConstantInt::get(IntPtrTy, 0);
2762 llvm::Value *Mask =
2763 Builder.CreateSub(Alignment, llvm::ConstantInt::get(IntPtrTy, 1));
2764 llvm::Value *MaskedPtr = Builder.CreateAnd(PtrIntValue, Mask, "maskedptr");
2765 TheCheck = Builder.CreateICmpEQ(MaskedPtr, Zero, "maskcond");
2766 }
2767 llvm::Instruction *Assumption = Builder.CreateAlignmentAssumption(
2768 CGM.getDataLayout(), PtrValue, Alignment, OffsetValue);
2769
2770 if (!SanOpts.has(SanitizerKind::Alignment))
2771 return;
2772 emitAlignmentAssumptionCheck(PtrValue, Ty, Loc, AssumptionLoc, Alignment,
2773 OffsetValue, TheCheck, Assumption);
2774}
2775
2777 const Expr *E,
2778 SourceLocation AssumptionLoc,
2779 llvm::Value *Alignment,
2780 llvm::Value *OffsetValue) {
2781 QualType Ty = E->getType();
2782 SourceLocation Loc = E->getExprLoc();
2783
2784 emitAlignmentAssumption(PtrValue, Ty, Loc, AssumptionLoc, Alignment,
2785 OffsetValue);
2786}
2787
2788llvm::Value *CodeGenFunction::EmitAnnotationCall(llvm::Function *AnnotationFn,
2789 llvm::Value *AnnotatedVal,
2790 StringRef AnnotationStr,
2791 SourceLocation Location,
2792 const AnnotateAttr *Attr) {
2794 AnnotatedVal,
2795 CGM.EmitAnnotationString(AnnotationStr),
2796 CGM.EmitAnnotationUnit(Location),
2797 CGM.EmitAnnotationLineNo(Location),
2798 };
2799 if (Attr)
2800 Args.push_back(CGM.EmitAnnotationArgs(Attr));
2801 return Builder.CreateCall(AnnotationFn, Args);
2802}
2803
2804void CodeGenFunction::EmitVarAnnotations(const VarDecl *D, llvm::Value *V) {
2805 assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
2806 for (const auto *I : D->specific_attrs<AnnotateAttr>())
2807 EmitAnnotationCall(CGM.getIntrinsic(llvm::Intrinsic::var_annotation,
2808 {V->getType(), CGM.ConstGlobalsPtrTy}),
2809 V, I->getAnnotation(), D->getLocation(), I);
2810}
2811
2813 Address Addr) {
2814 assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
2815 llvm::Value *V = Addr.emitRawPointer(*this);
2816 llvm::Type *VTy = V->getType();
2817 auto *PTy = dyn_cast<llvm::PointerType>(VTy);
2818 unsigned AS = PTy ? PTy->getAddressSpace() : 0;
2819 llvm::PointerType *IntrinTy =
2820 llvm::PointerType::get(CGM.getLLVMContext(), AS);
2821 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::ptr_annotation,
2822 {IntrinTy, CGM.ConstGlobalsPtrTy});
2823
2824 for (const auto *I : D->specific_attrs<AnnotateAttr>()) {
2825 // FIXME Always emit the cast inst so we can differentiate between
2826 // annotation on the first field of a struct and annotation on the struct
2827 // itself.
2828 if (VTy != IntrinTy)
2829 V = Builder.CreateBitCast(V, IntrinTy);
2830 V = EmitAnnotationCall(F, V, I->getAnnotation(), D->getLocation(), I);
2831 V = Builder.CreateBitCast(V, VTy);
2832 }
2833
2834 return Address(V, Addr.getElementType(), Addr.getAlignment());
2835}
2836
2838
2840 : CGF(CGF) {
2841 assert(!CGF->IsSanitizerScope);
2842 CGF->IsSanitizerScope = true;
2843}
2844
2846 CGF->IsSanitizerScope = false;
2847}
2848
2849void CodeGenFunction::InsertHelper(llvm::Instruction *I,
2850 const llvm::Twine &Name,
2851 llvm::BasicBlock::iterator InsertPt) const {
2852 LoopStack.InsertHelper(I);
2853 if (IsSanitizerScope)
2854 I->setNoSanitizeMetadata();
2855}
2856
2858 llvm::Instruction *I, const llvm::Twine &Name,
2859 llvm::BasicBlock::iterator InsertPt) const {
2860 llvm::IRBuilderDefaultInserter::InsertHelper(I, Name, InsertPt);
2861 if (CGF)
2862 CGF->InsertHelper(I, Name, InsertPt);
2863}
2864
2865// Emits an error if we don't have a valid set of target features for the
2866// called function.
2868 const FunctionDecl *TargetDecl) {
2869 // SemaChecking cannot handle below x86 builtins because they have different
2870 // parameter ranges with different TargetAttribute of caller.
2871 if (CGM.getContext().getTargetInfo().getTriple().isX86()) {
2872 unsigned BuiltinID = TargetDecl->getBuiltinID();
2873 if (BuiltinID == X86::BI__builtin_ia32_cmpps ||
2874 BuiltinID == X86::BI__builtin_ia32_cmpss ||
2875 BuiltinID == X86::BI__builtin_ia32_cmppd ||
2876 BuiltinID == X86::BI__builtin_ia32_cmpsd) {
2877 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl);
2878 llvm::StringMap<bool> TargetFetureMap;
2879 CGM.getContext().getFunctionFeatureMap(TargetFetureMap, FD);
2880 llvm::APSInt Result =
2881 *(E->getArg(2)->getIntegerConstantExpr(CGM.getContext()));
2882 if (Result.getSExtValue() > 7 && !TargetFetureMap.lookup("avx"))
2883 CGM.getDiags().Report(E->getBeginLoc(), diag::err_builtin_needs_feature)
2884 << TargetDecl->getDeclName() << "avx";
2885 }
2886 }
2887 return checkTargetFeatures(E->getBeginLoc(), TargetDecl);
2888}
2889
2890// Emits an error if we don't have a valid set of target features for the
2891// called function.
2893 const FunctionDecl *TargetDecl) {
2894 // Early exit if this is an indirect call.
2895 if (!TargetDecl)
2896 return;
2897
2898 // Get the current enclosing function if it exists. If it doesn't
2899 // we can't check the target features anyhow.
2900 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl);
2901 if (!FD)
2902 return;
2903
2904 bool IsAlwaysInline = TargetDecl->hasAttr<AlwaysInlineAttr>();
2905 bool IsFlatten = FD && FD->hasAttr<FlattenAttr>();
2906
2907 // Grab the required features for the call. For a builtin this is listed in
2908 // the td file with the default cpu, for an always_inline function this is any
2909 // listed cpu and any listed features.
2910 unsigned BuiltinID = TargetDecl->getBuiltinID();
2911 std::string MissingFeature;
2912 llvm::StringMap<bool> CallerFeatureMap;
2913 CGM.getContext().getFunctionFeatureMap(CallerFeatureMap, FD);
2914 // When compiling in HipStdPar mode we have to be conservative in rejecting
2915 // target specific features in the FE, and defer the possible error to the
2916 // AcceleratorCodeSelection pass, wherein iff an unsupported target builtin is
2917 // referenced by an accelerator executable function, we emit an error.
2918 bool IsHipStdPar = getLangOpts().HIPStdPar && getLangOpts().CUDAIsDevice;
2919 if (BuiltinID) {
2920 StringRef FeatureList(CGM.getContext().BuiltinInfo.getRequiredFeatures(BuiltinID));
2922 FeatureList, CallerFeatureMap) && !IsHipStdPar) {
2923 CGM.getDiags().Report(Loc, diag::err_builtin_needs_feature)
2924 << TargetDecl->getDeclName()
2925 << FeatureList;
2926 }
2927 } else if (!TargetDecl->isMultiVersion() &&
2928 TargetDecl->hasAttr<TargetAttr>()) {
2929 // Get the required features for the callee.
2930
2931 const TargetAttr *TD = TargetDecl->getAttr<TargetAttr>();
2933 CGM.getContext().filterFunctionTargetAttrs(TD);
2934
2935 SmallVector<StringRef, 1> ReqFeatures;
2936 llvm::StringMap<bool> CalleeFeatureMap;
2937 CGM.getContext().getFunctionFeatureMap(CalleeFeatureMap, TargetDecl);
2938
2939 for (const auto &F : ParsedAttr.Features) {
2940 if (F[0] == '+' && CalleeFeatureMap.lookup(F.substr(1)))
2941 ReqFeatures.push_back(StringRef(F).substr(1));
2942 }
2943
2944 for (const auto &F : CalleeFeatureMap) {
2945 // Only positive features are "required".
2946 if (F.getValue())
2947 ReqFeatures.push_back(F.getKey());
2948 }
2949 if (!llvm::all_of(ReqFeatures,
2950 [&](StringRef Feature) {
2951 if (!CallerFeatureMap.lookup(Feature)) {
2952 MissingFeature = Feature.str();
2953 return false;
2954 }
2955 return true;
2956 }) &&
2957 !IsHipStdPar) {
2958 if (IsAlwaysInline)
2959 CGM.getDiags().Report(Loc, diag::err_function_needs_feature)
2960 << FD->getDeclName() << TargetDecl->getDeclName() << MissingFeature;
2961 else if (IsFlatten)
2962 CGM.getDiags().Report(Loc, diag::err_flatten_function_needs_feature)
2963 << FD->getDeclName() << TargetDecl->getDeclName() << MissingFeature;
2964 }
2965
2966 } else if (!FD->isMultiVersion() && FD->hasAttr<TargetAttr>()) {
2967 llvm::StringMap<bool> CalleeFeatureMap;
2968 CGM.getContext().getFunctionFeatureMap(CalleeFeatureMap, TargetDecl);
2969
2970 for (const auto &F : CalleeFeatureMap) {
2971 if (F.getValue() &&
2972 (!CallerFeatureMap.lookup(F.getKey()) ||
2973 !CallerFeatureMap.find(F.getKey())->getValue()) &&
2974 !IsHipStdPar) {
2975 if (IsAlwaysInline)
2976 CGM.getDiags().Report(Loc, diag::err_function_needs_feature)
2977 << FD->getDeclName() << TargetDecl->getDeclName() << F.getKey();
2978 else if (IsFlatten)
2979 CGM.getDiags().Report(Loc, diag::err_flatten_function_needs_feature)
2980 << FD->getDeclName() << TargetDecl->getDeclName() << F.getKey();
2981 }
2982 }
2983 }
2984}
2985
2986void CodeGenFunction::EmitSanitizerStatReport(llvm::SanitizerStatKind SSK) {
2987 if (!CGM.getCodeGenOpts().SanitizeStats)
2988 return;
2989
2990 llvm::IRBuilder<> IRB(Builder.GetInsertBlock(), Builder.GetInsertPoint());
2991 IRB.SetCurrentDebugLocation(Builder.getCurrentDebugLocation());
2992 CGM.getSanStats().create(IRB, SSK);
2993}
2994
2996 const CGCallee &Callee, SmallVectorImpl<llvm::OperandBundleDef> &Bundles) {
2997 const CGCalleeInfo &CI = Callee.getAbstractInfo();
2999 if (!FP)
3000 return;
3001
3002 StringRef Salt;
3003 if (const auto &Info = FP->getExtraAttributeInfo())
3004 Salt = Info.CFISalt;
3005
3006 Bundles.emplace_back("kcfi", CGM.CreateKCFITypeId(FP->desugar(), Salt));
3007}
3008
3009llvm::Value *
3010CodeGenFunction::FormAArch64ResolverCondition(const FMVResolverOption &RO) {
3011 return RO.Features.empty() ? nullptr : EmitAArch64CpuSupports(RO.Features);
3012}
3013
3014llvm::Value *
3015CodeGenFunction::FormX86ResolverCondition(const FMVResolverOption &RO) {
3016 llvm::Value *Condition = nullptr;
3017
3018 if (RO.Architecture) {
3019 StringRef Arch = *RO.Architecture;
3020 // If arch= specifies an x86-64 micro-architecture level, test the feature
3021 // with __builtin_cpu_supports, otherwise use __builtin_cpu_is.
3022 if (Arch.starts_with("x86-64"))
3023 Condition = EmitX86CpuSupports({Arch});
3024 else
3025 Condition = EmitX86CpuIs(Arch);
3026 }
3027
3028 if (!RO.Features.empty()) {
3029 llvm::Value *FeatureCond = EmitX86CpuSupports(RO.Features);
3030 Condition =
3031 Condition ? Builder.CreateAnd(Condition, FeatureCond) : FeatureCond;
3032 }
3033 return Condition;
3034}
3035
3037 llvm::Function *Resolver,
3038 CGBuilderTy &Builder,
3039 llvm::Function *FuncToReturn,
3040 bool SupportsIFunc) {
3041 if (SupportsIFunc) {
3042 Builder.CreateRet(FuncToReturn);
3043 return;
3044 }
3045
3047 llvm::make_pointer_range(Resolver->args()));
3048
3049 llvm::CallInst *Result = Builder.CreateCall(FuncToReturn, Args);
3050 Result->setTailCallKind(llvm::CallInst::TCK_MustTail);
3051
3052 if (Resolver->getReturnType()->isVoidTy())
3053 Builder.CreateRetVoid();
3054 else
3055 Builder.CreateRet(Result);
3056}
3057
3059 llvm::Function *Resolver, ArrayRef<FMVResolverOption> Options) {
3060
3061 llvm::Triple::ArchType ArchType =
3062 getContext().getTargetInfo().getTriple().getArch();
3063
3064 switch (ArchType) {
3065 case llvm::Triple::x86:
3066 case llvm::Triple::x86_64:
3067 EmitX86MultiVersionResolver(Resolver, Options);
3068 return;
3069 case llvm::Triple::aarch64:
3070 EmitAArch64MultiVersionResolver(Resolver, Options);
3071 return;
3072 case llvm::Triple::riscv32:
3073 case llvm::Triple::riscv64:
3074 case llvm::Triple::riscv32be:
3075 case llvm::Triple::riscv64be:
3076 EmitRISCVMultiVersionResolver(Resolver, Options);
3077 return;
3078 case llvm::Triple::ppc:
3079 case llvm::Triple::ppc64:
3080 if (getContext().getTargetInfo().getTriple().isOSAIX()) {
3081 EmitPPCAIXMultiVersionResolver(Resolver, Options);
3082 return;
3083 }
3084 [[fallthrough]];
3085 default:
3086 assert(false &&
3087 "Only implemented for x86, AArch64, RISC-V, and PowerPC AIX");
3088 }
3089}
3090
3091/**
3092 * define internal ptr @foo.resolver() {
3093 * entry:
3094 * %is_version_1 = __builtin_cpu_supports(version_1)
3095 * br i1 %1, label %if.version_1, label %if.else_2
3096 *
3097 * if.version_1:
3098 * ret ptr @foo.version_1
3099 *
3100 * if.else_2:
3101 * %is_version_2 = __builtin_cpu_supports(version_2)
3102 * ...
3103 * if.else: ; preds = %entry
3104 * ret ptr @foo.default
3105 * }
3106 */
3108 llvm::Function *Resolver, ArrayRef<FMVResolverOption> Options) {
3109
3110 // entry:
3111 llvm::BasicBlock *CurBlock = createBasicBlock("entry", Resolver);
3112
3114 for (const FMVResolverOption &RO : Options) {
3115 Builder.SetInsertPoint(CurBlock);
3116 // The 'default' or 'generic' case.
3117 if (!RO.Architecture && RO.Features.empty()) {
3118 // if.else:
3119 // ret ptr @foo.default
3120 assert(&RO == Options.end() - 1 &&
3121 "Default or Generic case must be last");
3122 Builder.CreateRet(RO.Function);
3123 return;
3124 }
3125 // if.else_n:
3126 // %is_version_n = __builtin_cpu_supports(version_n)
3127 // br i1 %is_version_n, label %if.version_n, label %if.else_n+1
3128 //
3129 // if.version_n:
3130 // ret ptr @foo_version_n
3131 assert(RO.Features.size() == 1 &&
3132 "for now one feature requirement per version");
3133
3134 assert(RO.Features[0].starts_with("cpu="));
3135 StringRef CPU = RO.Features[0].split("=").second.trim();
3136 StringRef Feature = llvm::StringSwitch<StringRef>(CPU)
3137 .Case("pwr7", "arch_2_06")
3138 .Case("pwr8", "arch_2_07")
3139 .Case("pwr9", "arch_3_00")
3140 .Case("pwr10", "arch_3_1")
3141 .Case("pwr11", "arch_3_1")
3142 .Default("error");
3143
3144 llvm::Value *Condition = EmitPPCBuiltinCpu(
3145 Builtin::BI__builtin_cpu_supports, Builder.getInt1Ty(), Feature);
3146
3147 llvm::BasicBlock *ThenBlock = createBasicBlock("if.version", Resolver);
3148 CurBlock = createBasicBlock("if.else", Resolver);
3149 Builder.CreateCondBr(Condition, ThenBlock, CurBlock);
3150
3151 Builder.SetInsertPoint(ThenBlock);
3152 Builder.CreateRet(RO.Function);
3153 }
3154
3155 llvm_unreachable("Default case missing");
3156}
3157
3159 llvm::Function *Resolver, ArrayRef<FMVResolverOption> Options) {
3160
3161 if (getContext().getTargetInfo().getTriple().getOS() !=
3162 llvm::Triple::OSType::Linux) {
3163 CGM.getDiags().Report(diag::err_os_unsupport_riscv_fmv);
3164 return;
3165 }
3166
3167 llvm::BasicBlock *CurBlock = createBasicBlock("resolver_entry", Resolver);
3168 Builder.SetInsertPoint(CurBlock);
3170
3171 bool SupportsIFunc = getContext().getTargetInfo().supportsIFunc();
3172 bool HasDefault = false;
3173 unsigned DefaultIndex = 0;
3174
3175 // Check the each candidate function.
3176 for (unsigned Index = 0; Index < Options.size(); Index++) {
3177
3178 if (Options[Index].Features.empty()) {
3179 HasDefault = true;
3180 DefaultIndex = Index;
3181 continue;
3182 }
3183
3184 Builder.SetInsertPoint(CurBlock);
3185
3186 // FeaturesCondition: The bitmask of the required extension has been
3187 // enabled by the runtime object.
3188 // (__riscv_feature_bits.features[i] & REQUIRED_BITMASK) ==
3189 // REQUIRED_BITMASK
3190 //
3191 // When condition is met, return this version of the function.
3192 // Otherwise, try the next version.
3193 //
3194 // if (FeaturesConditionVersion1)
3195 // return Version1;
3196 // else if (FeaturesConditionVersion2)
3197 // return Version2;
3198 // else if (FeaturesConditionVersion3)
3199 // return Version3;
3200 // ...
3201 // else
3202 // return DefaultVersion;
3203
3204 // TODO: Add a condition to check the length before accessing elements.
3205 // Without checking the length first, we may access an incorrect memory
3206 // address when using different versions.
3207 llvm::SmallVector<StringRef, 8> CurrTargetAttrFeats;
3208 llvm::SmallVector<std::string, 8> TargetAttrFeats;
3209
3210 for (StringRef Feat : Options[Index].Features) {
3211 std::vector<std::string> FeatStr =
3213
3214 assert(FeatStr.size() == 1 && "Feature string not delimited");
3215
3216 std::string &CurrFeat = FeatStr.front();
3217 if (CurrFeat[0] == '+')
3218 TargetAttrFeats.push_back(CurrFeat.substr(1));
3219 }
3220
3221 if (TargetAttrFeats.empty())
3222 continue;
3223
3224 for (std::string &Feat : TargetAttrFeats)
3225 CurrTargetAttrFeats.push_back(Feat);
3226
3227 Builder.SetInsertPoint(CurBlock);
3228 llvm::Value *FeatsCondition = EmitRISCVCpuSupports(CurrTargetAttrFeats);
3229
3230 llvm::BasicBlock *RetBlock = createBasicBlock("resolver_return", Resolver);
3231 CGBuilderTy RetBuilder(CGM, RetBlock);
3232 CreateMultiVersionResolverReturn(CGM, Resolver, RetBuilder,
3233 Options[Index].Function, SupportsIFunc);
3234 llvm::BasicBlock *ElseBlock = createBasicBlock("resolver_else", Resolver);
3235
3236 Builder.SetInsertPoint(CurBlock);
3237 Builder.CreateCondBr(FeatsCondition, RetBlock, ElseBlock);
3238
3239 CurBlock = ElseBlock;
3240 }
3241
3242 // Finally, emit the default one.
3243 if (HasDefault) {
3244 Builder.SetInsertPoint(CurBlock);
3246 CGM, Resolver, Builder, Options[DefaultIndex].Function, SupportsIFunc);
3247 return;
3248 }
3249
3250 // If no generic/default, emit an unreachable.
3251 Builder.SetInsertPoint(CurBlock);
3252 llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap);
3253 TrapCall->setDoesNotReturn();
3254 TrapCall->setDoesNotThrow();
3255 Builder.CreateUnreachable();
3256 Builder.ClearInsertionPoint();
3257}
3258
3260 llvm::Function *Resolver, ArrayRef<FMVResolverOption> Options) {
3261 assert(!Options.empty() && "No multiversion resolver options found");
3262 assert(Options.back().Features.size() == 0 && "Default case must be last");
3263 bool SupportsIFunc = getContext().getTargetInfo().supportsIFunc();
3264 assert(SupportsIFunc &&
3265 "Multiversion resolver requires target IFUNC support");
3266 bool AArch64CpuInitialized = false;
3267 llvm::BasicBlock *CurBlock = createBasicBlock("resolver_entry", Resolver);
3268
3269 for (const FMVResolverOption &RO : Options) {
3270 Builder.SetInsertPoint(CurBlock);
3271 llvm::Value *Condition = FormAArch64ResolverCondition(RO);
3272
3273 // The 'default' or 'all features enabled' case.
3274 if (!Condition) {
3275 CreateMultiVersionResolverReturn(CGM, Resolver, Builder, RO.Function,
3276 SupportsIFunc);
3277 return;
3278 }
3279
3280 if (!AArch64CpuInitialized) {
3281 Builder.SetInsertPoint(CurBlock, CurBlock->begin());
3282 EmitAArch64CpuInit();
3283 AArch64CpuInitialized = true;
3284 Builder.SetInsertPoint(CurBlock);
3285 }
3286
3287 // Skip unreachable versions.
3288 if (RO.Function == nullptr)
3289 continue;
3290
3291 llvm::BasicBlock *RetBlock = createBasicBlock("resolver_return", Resolver);
3292 CGBuilderTy RetBuilder(CGM, RetBlock);
3293 CreateMultiVersionResolverReturn(CGM, Resolver, RetBuilder, RO.Function,
3294 SupportsIFunc);
3295 CurBlock = createBasicBlock("resolver_else", Resolver);
3296 Builder.CreateCondBr(Condition, RetBlock, CurBlock);
3297 }
3298
3299 // If no default, emit an unreachable.
3300 Builder.SetInsertPoint(CurBlock);
3301 llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap);
3302 TrapCall->setDoesNotReturn();
3303 TrapCall->setDoesNotThrow();
3304 Builder.CreateUnreachable();
3305 Builder.ClearInsertionPoint();
3306}
3307
3309 llvm::Function *Resolver, ArrayRef<FMVResolverOption> Options) {
3310
3311 bool SupportsIFunc = getContext().getTargetInfo().supportsIFunc();
3312
3313 // Main function's basic block.
3314 llvm::BasicBlock *CurBlock = createBasicBlock("resolver_entry", Resolver);
3315 Builder.SetInsertPoint(CurBlock);
3316 EmitX86CpuInit();
3317
3318 for (const FMVResolverOption &RO : Options) {
3319 Builder.SetInsertPoint(CurBlock);
3320 llvm::Value *Condition = FormX86ResolverCondition(RO);
3321
3322 // The 'default' or 'generic' case.
3323 if (!Condition) {
3324 assert(&RO == Options.end() - 1 &&
3325 "Default or Generic case must be last");
3326 CreateMultiVersionResolverReturn(CGM, Resolver, Builder, RO.Function,
3327 SupportsIFunc);
3328 return;
3329 }
3330
3331 llvm::BasicBlock *RetBlock = createBasicBlock("resolver_return", Resolver);
3332 CGBuilderTy RetBuilder(CGM, RetBlock);
3333 CreateMultiVersionResolverReturn(CGM, Resolver, RetBuilder, RO.Function,
3334 SupportsIFunc);
3335 CurBlock = createBasicBlock("resolver_else", Resolver);
3336 Builder.CreateCondBr(Condition, RetBlock, CurBlock);
3337 }
3338
3339 // If no generic/default, emit an unreachable.
3340 Builder.SetInsertPoint(CurBlock);
3341 llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap);
3342 TrapCall->setDoesNotReturn();
3343 TrapCall->setDoesNotThrow();
3344 Builder.CreateUnreachable();
3345 Builder.ClearInsertionPoint();
3346}
3347
3348// Loc - where the diagnostic will point, where in the source code this
3349// alignment has failed.
3350// SecondaryLoc - if present (will be present if sufficiently different from
3351// Loc), the diagnostic will additionally point a "Note:" to this location.
3352// It should be the location where the __attribute__((assume_aligned))
3353// was written e.g.
3355 llvm::Value *Ptr, QualType Ty, SourceLocation Loc,
3356 SourceLocation SecondaryLoc, llvm::Value *Alignment,
3357 llvm::Value *OffsetValue, llvm::Value *TheCheck,
3358 llvm::Instruction *Assumption) {
3359 assert(isa_and_nonnull<llvm::CallInst>(Assumption) &&
3360 cast<llvm::CallInst>(Assumption)->getCalledOperand() ==
3361 llvm::Intrinsic::getOrInsertDeclaration(
3362 Builder.GetInsertBlock()->getParent()->getParent(),
3363 llvm::Intrinsic::assume) &&
3364 "Assumption should be a call to llvm.assume().");
3365 assert(&(Builder.GetInsertBlock()->back()) == Assumption &&
3366 "Assumption should be the last instruction of the basic block, "
3367 "since the basic block is still being generated.");
3368
3369 if (!SanOpts.has(SanitizerKind::Alignment))
3370 return;
3371
3372 // Don't check pointers to volatile data. The behavior here is implementation-
3373 // defined.
3375 return;
3376
3377 // We need to temorairly remove the assumption so we can insert the
3378 // sanitizer check before it, else the check will be dropped by optimizations.
3379 Assumption->removeFromParent();
3380
3381 {
3382 auto CheckOrdinal = SanitizerKind::SO_Alignment;
3383 auto CheckHandler = SanitizerHandler::AlignmentAssumption;
3384 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
3385
3386 if (!OffsetValue)
3387 OffsetValue = Builder.getInt1(false); // no offset.
3388
3389 llvm::Constant *StaticData[] = {EmitCheckSourceLocation(Loc),
3390 EmitCheckSourceLocation(SecondaryLoc),
3392 llvm::Value *DynamicData[] = {Ptr, Alignment, OffsetValue};
3393 EmitCheck({std::make_pair(TheCheck, CheckOrdinal)}, CheckHandler,
3394 StaticData, DynamicData);
3395 }
3396
3397 // We are now in the (new, empty) "cont" basic block.
3398 // Reintroduce the assumption.
3399 Builder.Insert(Assumption);
3400 // FIXME: Assumption still has it's original basic block as it's Parent.
3401}
3402
3404 if (CGDebugInfo *DI = getDebugInfo())
3405 return DI->SourceLocToDebugLoc(Location);
3406
3407 return llvm::DebugLoc();
3408}
3409
3410llvm::Value *
3411CodeGenFunction::emitCondLikelihoodViaExpectIntrinsic(llvm::Value *Cond,
3412 Stmt::Likelihood LH) {
3413 switch (LH) {
3414 case Stmt::LH_None:
3415 return Cond;
3416 case Stmt::LH_Likely:
3417 case Stmt::LH_Unlikely:
3418 // Don't generate llvm.expect on -O0 as the backend won't use it for
3419 // anything.
3420 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
3421 return Cond;
3422 llvm::Type *CondTy = Cond->getType();
3423 assert(CondTy->isIntegerTy(1) && "expecting condition to be a boolean");
3424 llvm::Function *FnExpect =
3425 CGM.getIntrinsic(llvm::Intrinsic::expect, CondTy);
3426 llvm::Value *ExpectedValueOfCond =
3427 llvm::ConstantInt::getBool(CondTy, LH == Stmt::LH_Likely);
3428 return Builder.CreateCall(FnExpect, {Cond, ExpectedValueOfCond},
3429 Cond->getName() + ".expval");
3430 }
3431 llvm_unreachable("Unknown Likelihood");
3432}
3433
3434llvm::Value *CodeGenFunction::emitBoolVecConversion(llvm::Value *SrcVec,
3435 unsigned NumElementsDst,
3436 const llvm::Twine &Name) {
3437 auto *SrcTy = cast<llvm::FixedVectorType>(SrcVec->getType());
3438 unsigned NumElementsSrc = SrcTy->getNumElements();
3439 if (NumElementsSrc == NumElementsDst)
3440 return SrcVec;
3441
3442 std::vector<int> ShuffleMask(NumElementsDst, -1);
3443 for (unsigned MaskIdx = 0;
3444 MaskIdx < std::min<>(NumElementsDst, NumElementsSrc); ++MaskIdx)
3445 ShuffleMask[MaskIdx] = MaskIdx;
3446
3447 return Builder.CreateShuffleVector(SrcVec, ShuffleMask, Name);
3448}
3449
3451 const CGPointerAuthInfo &PointerAuth,
3453 if (!PointerAuth.isSigned())
3454 return;
3455
3456 auto *Key = Builder.getInt32(PointerAuth.getKey());
3457
3458 llvm::Value *Discriminator = PointerAuth.getDiscriminator();
3459 if (!Discriminator)
3460 Discriminator = Builder.getSize(0);
3461
3462 llvm::Value *Args[] = {Key, Discriminator};
3463 Bundles.emplace_back("ptrauth", Args);
3464}
3465
3467 const CGPointerAuthInfo &PointerAuth,
3468 llvm::Value *Pointer,
3469 unsigned IntrinsicID) {
3470 if (!PointerAuth)
3471 return Pointer;
3472
3473 auto Key = CGF.Builder.getInt32(PointerAuth.getKey());
3474
3475 llvm::Value *Discriminator = PointerAuth.getDiscriminator();
3476 if (!Discriminator) {
3477 Discriminator = CGF.Builder.getSize(0);
3478 }
3479
3480 // Convert the pointer to intptr_t before signing it.
3481 auto OrigType = Pointer->getType();
3482 Pointer = CGF.Builder.CreatePtrToInt(Pointer, CGF.IntPtrTy);
3483
3484 // call i64 @llvm.ptrauth.sign.i64(i64 %pointer, i32 %key, i64 %discriminator)
3485 auto Intrinsic = CGF.CGM.getIntrinsic(IntrinsicID);
3486 Pointer = CGF.EmitRuntimeCall(Intrinsic, {Pointer, Key, Discriminator});
3487
3488 // Convert back to the original type.
3489 Pointer = CGF.Builder.CreateIntToPtr(Pointer, OrigType);
3490 return Pointer;
3491}
3492
3493llvm::Value *
3495 llvm::Value *Pointer) {
3496 if (!PointerAuth.shouldSign())
3497 return Pointer;
3498 return EmitPointerAuthCommon(*this, PointerAuth, Pointer,
3499 llvm::Intrinsic::ptrauth_sign);
3500}
3501
3502static llvm::Value *EmitStrip(CodeGenFunction &CGF,
3503 const CGPointerAuthInfo &PointerAuth,
3504 llvm::Value *Pointer) {
3505 auto StripIntrinsic = CGF.CGM.getIntrinsic(llvm::Intrinsic::ptrauth_strip);
3506
3507 auto Key = CGF.Builder.getInt32(PointerAuth.getKey());
3508 // Convert the pointer to intptr_t before signing it.
3509 auto OrigType = Pointer->getType();
3511 StripIntrinsic, {CGF.Builder.CreatePtrToInt(Pointer, CGF.IntPtrTy), Key});
3512 return CGF.Builder.CreateIntToPtr(Pointer, OrigType);
3513}
3514
3515llvm::Value *
3517 llvm::Value *Pointer) {
3518 if (PointerAuth.shouldStrip()) {
3519 return EmitStrip(*this, PointerAuth, Pointer);
3520 }
3521 if (!PointerAuth.shouldAuth()) {
3522 return Pointer;
3523 }
3524
3525 return EmitPointerAuthCommon(*this, PointerAuth, Pointer,
3526 llvm::Intrinsic::ptrauth_auth);
3527}
3528
3530 llvm::Instruction *KeyInstruction, llvm::Value *Backup) {
3531 if (CGDebugInfo *DI = getDebugInfo())
3532 DI->addInstToCurrentSourceAtom(KeyInstruction, Backup);
3533}
3534
3536 llvm::Instruction *KeyInstruction, llvm::Value *Backup, uint64_t Atom) {
3537 if (CGDebugInfo *DI = getDebugInfo())
3538 DI->addInstToSpecificSourceAtom(KeyInstruction, Backup, Atom);
3539}
3540
3541void CodeGenFunction::addInstToNewSourceAtom(llvm::Instruction *KeyInstruction,
3542 llvm::Value *Backup) {
3543 if (CGDebugInfo *DI = getDebugInfo()) {
3545 DI->addInstToCurrentSourceAtom(KeyInstruction, Backup);
3546 }
3547}
3548
3550 QualType Ty) {
3551 for (auto &Field : getContext().findPFPFields(Ty)) {
3552 if (getContext().arePFPFieldsTriviallyCopyable(Field.Field->getParent()))
3553 continue;
3554 auto DestFieldPtr = EmitAddressOfPFPField(DestPtr, Field);
3555 auto SrcFieldPtr = EmitAddressOfPFPField(SrcPtr, Field);
3556 Builder.CreateStore(Builder.CreateLoad(SrcFieldPtr), DestFieldPtr);
3557 }
3558}
static void findPFPFields(const ASTContext &Ctx, QualType Ty, CharUnits Offset, std::vector< PFPField > &Fields, bool IncludeVBases)
Defines the clang::ASTContext interface.
#define V(N, I)
This file provides some common utility functions for processing Lambda related AST Constructs.
Defines enum values for all the target-independent builtin functions.
static llvm::Value * EmitPointerAuthCommon(CodeGenFunction &CGF, const CGPointerAuthInfo &PointerAuth, llvm::Value *Pointer, unsigned IntrinsicID)
static void CreateMultiVersionResolverReturn(CodeGenModule &CGM, llvm::Function *Resolver, CGBuilderTy &Builder, llvm::Function *FuncToReturn, bool SupportsIFunc)
static llvm::Value * EmitStrip(CodeGenFunction &CGF, const CGPointerAuthInfo &PointerAuth, llvm::Value *Pointer)
static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType, Address dest, Address src, llvm::Value *sizeInChars)
emitNonZeroVLAInit - Emit the "zero" initialization of a variable-length array whose elements have a ...
static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB)
static LValue makeNaturalAlignAddrLValue(llvm::Value *V, QualType T, bool ForPointeeType, bool MightBeSigned, CodeGenFunction &CGF, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
static void TryMarkNoThrow(llvm::Function *F)
Tries to mark the given function nounwind based on the non-existence of any throwing calls within it.
static llvm::Constant * getPrologueSignature(CodeGenModule &CGM, const FunctionDecl *FD)
Return the UBSan prologue signature for FD if one is available.
static bool endsWithReturn(const Decl *F)
Determine whether the function F ends with a return stmt.
static bool shouldEmitLifetimeMarkers(const CodeGenOptions &CGOpts, const LangOptions &LangOpts)
shouldEmitLifetimeMarkers - Decide whether we need emit the life-time markers.
static bool matchesStlAllocatorFn(const Decl *D, const ASTContext &Ctx)
Defines the C++ Decl subclasses, other than those for templates (found in DeclTemplate....
static StringRef getTriple(const Command &Job)
Defines the Objective-C statement AST node classes.
Enumerates target-specific builtins in their own namespaces within namespace clang.
APValue - This class implements a discriminated union of [uninitialized] [APSInt] [APFloat],...
Definition APValue.h:122
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:226
QualType getFunctionTypeWithExceptionSpec(QualType Orig, const FunctionProtoType::ExceptionSpecInfo &ESI) const
Get a function type and produce the equivalent function type with the specified exception specificati...
QualType getBaseElementType(const ArrayType *VAT) const
Return the innermost element type of an array type.
bool hasAnyFunctionEffects() const
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
static bool hasSameType(QualType T1, QualType T2)
Determine whether the given types T1 and T2 are equivalent.
const VariableArrayType * getAsVariableArrayType(QualType T) const
QualType getSizeType() const
Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.
const TargetInfo & getTargetInfo() const
Definition ASTContext.h:917
void getFunctionFeatureMap(llvm::StringMap< bool > &FeatureMap, const FunctionDecl *) const
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition TypeBase.h:3772
QualType getElementType() const
Definition TypeBase.h:3784
Attr - This represents one attribute.
Definition Attr.h:46
A builtin binary operation expression such as "x + y" or "x <= y".
Definition Expr.h:4041
static bool isLogicalOp(Opcode Opc)
Definition Expr.h:4174
BinaryOperatorKind Opcode
Definition Expr.h:4046
Represents a C++ constructor within a class.
Definition DeclCXX.h:2611
Represents a static or instance method of a struct/union/class.
Definition DeclCXX.h:2136
bool isImplicitObjectMemberFunction() const
[C++2b][dcl.fct]/p7 An implicit object member function is a non-static member function without an exp...
Definition DeclCXX.cpp:2728
const CXXRecordDecl * getParent() const
Return the parent of this method declaration, which is the class in which this method is defined.
Definition DeclCXX.h:2262
QualType getThisType() const
Return the type of the this pointer.
Definition DeclCXX.cpp:2827
bool isStatic() const
Definition DeclCXX.cpp:2419
bool isLambda() const
Determine whether this class describes a lambda function object.
Definition DeclCXX.h:1018
void getCaptureFields(llvm::DenseMap< const ValueDecl *, FieldDecl * > &Captures, FieldDecl *&ThisCapture) const
For a closure type, retrieve the mapping from captured variables and this to the non-static data memb...
Definition DeclCXX.cpp:1790
bool isCapturelessLambda() const
Definition DeclCXX.h:1064
bool isEmpty() const
Determine whether this is an empty class in the sense of (C++11 [meta.unary.prop]).
Definition DeclCXX.h:1186
A C++ throw-expression (C++ [except.throw]).
Definition ExprCXX.h:1209
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2946
Expr * getArg(unsigned Arg)
getArg - Return the specified argument.
Definition Expr.h:3150
SourceLocation getBeginLoc() const
Definition Expr.h:3280
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition CharUnits.h:122
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition CharUnits.h:189
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition CharUnits.h:185
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
Definition CharUnits.h:214
bool isOne() const
isOne - Test whether the quantity equals one.
Definition CharUnits.h:125
CodeGenOptions - Track various options which control how the code is optimized and passed to the back...
@ InAlloca
InAlloca - Pass the argument directly using the LLVM inalloca attribute.
@ Indirect
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition Address.h:128
llvm::Value * getBasePointer() const
Definition Address.h:198
static Address invalid()
Definition Address.h:176
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition Address.h:253
CharUnits getAlignment() const
Definition Address.h:194
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition Address.h:209
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition Address.h:276
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition Address.h:204
A scoped helper to set the current source atom group for CGDebugInfo::addInstToCurrentSourceAtom.
A scoped helper to set the current debug location to the specified location or preferred location of ...
static ApplyDebugLocation CreateDefaultArtificial(CodeGenFunction &CGF, SourceLocation TemporaryLocation)
Apply TemporaryLocation if it is valid.
void InsertHelper(llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock::iterator InsertPt) const override
This forwards to CodeGenFunction::InsertHelper.
llvm::ConstantInt * getSize(CharUnits N)
Definition CGBuilder.h:109
@ RAA_DirectInMemory
Pass it on the stack using its defined layout.
Definition CGCXXABI.h:158
virtual RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const =0
Returns how an argument of the given record type should be passed.
Abstract information about a function or function prototype.
Definition CGCall.h:41
const FunctionProtoType * getCalleeFunctionProtoType() const
Definition CGCall.h:56
All available information about a concrete callee.
Definition CGCall.h:63
static CGCallee forDirect(llvm::Constant *functionPtr, const CGCalleeInfo &abstractInfo=CGCalleeInfo())
Definition CGCall.h:137
This class gathers all debug information during compilation and is responsible for emitting to llvm g...
Definition CGDebugInfo.h:59
CGFunctionInfo - Class to encapsulate the information about a function definition.
llvm::Value * getDiscriminator() const
CallArgList - Type for representing both the value and type of arguments in a call.
Definition CGCall.h:274
void add(RValue rvalue, QualType type)
Definition CGCall.h:302
CGFPOptionsRAII(CodeGenFunction &CGF, FPOptions FPFeatures)
An object to manage conditionally-evaluated expressions.
An object which temporarily prevents a value from being destroyed by aggressive peephole optimization...
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
void EmitRISCVMultiVersionResolver(llvm::Function *Resolver, ArrayRef< FMVResolverOption > Options)
GlobalDecl CurGD
CurGD - The GlobalDecl for the current function being compiled.
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount, Stmt::Likelihood LH=Stmt::LH_None, const Expr *ConditionalOp=nullptr, const VarDecl *ConditionalDecl=nullptr)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
void setCurrentProfileCount(uint64_t Count)
Set the profiler's current count.
llvm::Value * emitBoolVecConversion(llvm::Value *SrcVec, unsigned NumElementsDst, const llvm::Twine &Name="")
void EmitAArch64MultiVersionResolver(llvm::Function *Resolver, ArrayRef< FMVResolverOption > Options)
JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target)
The given basic block lies in the current EH scope, but may be a target of a potentially scope-crossi...
llvm::Value * EmitScalarOrConstFoldImmArg(unsigned ICEArguments, unsigned Idx, const CallExpr *E)
SanitizerSet SanOpts
Sanitizers enabled for this function.
void EmitNullInitialization(Address DestPtr, QualType Ty)
EmitNullInitialization - Generate code to set a value of the given type to null, If the type contains...
void checkTargetFeatures(const CallExpr *E, const FunctionDecl *TargetDecl)
static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts=false)
ContainsLabel - Return true if the statement contains a label in it.
void EmitPPCAIXMultiVersionResolver(llvm::Function *Resolver, ArrayRef< FMVResolverOption > Options)
define internal ptr @foo.resolver() { entry: is_version_1 = __builtin_cpu_supports(version_1) br i1 %...
bool ShouldSkipSanitizerInstrumentation()
ShouldSkipSanitizerInstrumentation - Return true if the current function should not be instrumented w...
llvm::Value * EmitPPCBuiltinCpu(unsigned BuiltinID, llvm::Type *ReturnType, StringRef CPUStr)
Definition PPC.cpp:73
llvm::BlockAddress * GetAddrOfLabel(const LabelDecl *L)
llvm::Value * EmitRISCVCpuSupports(const CallExpr *E)
Definition RISCV.cpp:970
llvm::Value * EmitRISCVCpuInit()
Definition RISCV.cpp:960
static bool hasScalarEvaluationKind(QualType T)
llvm::Type * ConvertType(QualType T)
void GenerateCode(GlobalDecl GD, llvm::Function *Fn, const CGFunctionInfo &FnInfo)
void EmitSanitizerStatReport(llvm::SanitizerStatKind SSK)
void addInstToNewSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup)
Add KeyInstruction and an optional Backup instruction to a new atom group (See ApplyAtomGroup for mor...
PeepholeProtection protectFromPeepholes(RValue rvalue)
protectFromPeepholes - Protect a value that we're intending to store to the side, but which will prob...
void EmitLambdaStaticInvokeBody(const CXXMethodDecl *MD)
Definition CGClass.cpp:3203
bool CurFuncIsThunk
In C++, whether we are code generating a thunk.
LValue MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T)
Given a value of type T* that may not be to a complete object, construct an l-value with the natural ...
JumpDest getJumpDestForLabel(const LabelDecl *S)
getBasicBlockForLabel - Return the LLVM basicblock that the specified label maps to.
Definition CGStmt.cpp:694
void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint=true)
SmallVector< llvm::ConvergenceControlInst *, 4 > ConvergenceTokenStack
Stack to track the controlled convergence tokens.
void unprotectFromPeepholes(PeepholeProtection protection)
RValue convertTempToRValue(Address addr, QualType type, SourceLocation Loc)
Given the address of a temporary variable, produce an r-value of its type.
Definition CGExpr.cpp:7144
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
Definition CGExpr.cpp:4001
llvm::SmallVector< DeferredDeactivateCleanup > DeferredDeactivationCleanupStack
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void addInstToCurrentSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup)
See CGDebugInfo::addInstToCurrentSourceAtom.
const LangOptions & getLangOpts() const
void addInstToSpecificSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup, uint64_t Atom)
See CGDebugInfo::addInstToSpecificSourceAtom.
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
void EmitVarAnnotations(const VarDecl *D, llvm::Value *V)
Emit local annotations for the local variable V, declared by D.
llvm::BasicBlock * EHResumeBlock
EHResumeBlock - Unified block containing a call to llvm.eh.resume.
Address EmitFieldAnnotations(const FieldDecl *D, Address V)
Emit field annotations for the given field & value.
void EmitConstructorBody(FunctionArgList &Args)
EmitConstructorBody - Emits the body of the current constructor.
Definition CGClass.cpp:827
void EmitKCFIOperandBundle(const CGCallee &Callee, SmallVectorImpl< llvm::OperandBundleDef > &Bundles)
void EmitDeclRefExprDbgValue(const DeclRefExpr *E, const APValue &Init)
Address makeNaturalAddressForPointer(llvm::Value *Ptr, QualType T, CharUnits Alignment=CharUnits::Zero(), bool ForPointeeType=false, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
Construct an address with the natural alignment of T.
LValue MakeNaturalAlignPointeeRawAddrLValue(llvm::Value *V, QualType T)
Same as MakeNaturalAlignPointeeAddrLValue except that the pointer is known to be unsigned.
@ TCK_ConstructorCall
Checking the 'this' pointer for a constructor call.
@ TCK_MemberCall
Checking the 'this' pointer for a call to a non-static member function.
const Decl * CurCodeDecl
CurCodeDecl - This is the inner-most code context, which includes blocks.
bool hasSkipCounter(const Stmt *S) const
llvm::AssertingVH< llvm::Instruction > AllocaInsertPt
AllocaInsertPoint - This is an instruction in the entry block before which we prefer to insert alloca...
void EmitFunctionBody(const Stmt *Body)
JumpDest ReturnBlock
ReturnBlock - Unified return block.
llvm::DebugLoc SourceLocToDebugLoc(SourceLocation Location)
Converts Location to a DebugLoc, if debug information is enabled.
llvm::Constant * EmitCheckTypeDescriptor(QualType T)
Emit a description of a type in a format suitable for passing to a runtime sanitizer handler.
Definition CGExpr.cpp:3891
llvm::DebugLoc EmitReturnBlock()
Emit the unified return block, trying to avoid its emission when possible.
RawAddress CreateDefaultAlignTempAlloca(llvm::Type *Ty, const Twine &Name="tmp")
CreateDefaultAlignedTempAlloca - This creates an alloca with the default ABI alignment of the given L...
Definition CGExpr.cpp:176
const TargetInfo & getTarget() const
llvm::Value * EmitAnnotationCall(llvm::Function *AnnotationFn, llvm::Value *AnnotatedVal, StringRef AnnotationStr, SourceLocation Location, const AnnotateAttr *Attr)
Emit an annotation call (intrinsic).
Address EmitCompoundStmtWithoutScope(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
Definition CGStmt.cpp:569
void PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize, std::initializer_list< llvm::Value ** > ValuesToReload={})
Takes the old cleanup stack size and emits the cleanup blocks that have been added.
void maybeCreateMCDCCondBitmap()
Allocate a temp value on the stack that MCDC can use to track condition results.
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
Definition CGExpr.cpp:251
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
Definition CGExpr.cpp:2497
static bool isInstrumentedCondition(const Expr *C)
isInstrumentedCondition - Determine whether the given condition is an instrumentable condition (i....
VlaSizePair getVLAElements1D(const VariableArrayType *vla)
Return the number of elements for a single dimension for the given array type.
RawAddress CreateIRTempWithoutCast(QualType T, const Twine &Name="tmp")
CreateIRTempWithoutCast - Create a temporary IR object of the given type, with appropriate alignment.
Definition CGExpr.cpp:183
bool AlwaysEmitXRayCustomEvents() const
AlwaysEmitXRayCustomEvents - Return true if we must unconditionally emit XRay custom event handling c...
void StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, const CGFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation Loc=SourceLocation(), SourceLocation StartLoc=SourceLocation())
Emit code for the start of a function.
llvm::Value * EmitPointerAuthSign(const CGPointerAuthInfo &Info, llvm::Value *Pointer)
void markAsIgnoreThreadCheckingAtRuntime(llvm::Function *Fn)
Annotate the function with an attribute that disables TSan checking at runtime.
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
Definition CGExpr.cpp:232
void EmitPointerAuthOperandBundle(const CGPointerAuthInfo &Info, SmallVectorImpl< llvm::OperandBundleDef > &Bundles)
void EmitCheck(ArrayRef< std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > Checked, SanitizerHandler Check, ArrayRef< llvm::Constant * > StaticArgs, ArrayRef< llvm::Value * > DynamicArgs, const TrapReason *TR=nullptr)
Create a basic block that will either trap or call a handler function in the UBSan runtime with the p...
Definition CGExpr.cpp:4149
void InsertHelper(llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock::iterator InsertPt) const
CGBuilder insert helper.
llvm::Value * emitArrayLength(const ArrayType *arrayType, QualType &baseType, Address &addr)
emitArrayLength - Compute the length of an array, even if it's a VLA, and drill down to the base elem...
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
bool AlwaysEmitXRayTypedEvents() const
AlwaysEmitXRayTypedEvents - Return true if clang must unconditionally emit XRay typed event handling ...
void EmitStartEHSpec(const Decl *D)
EmitStartEHSpec - Emit the start of the exception spec.
void EmitDestructorBody(FunctionArgList &Args)
EmitDestructorBody - Emits the body of the current destructor.
Definition CGClass.cpp:1539
void EmitX86MultiVersionResolver(llvm::Function *Resolver, ArrayRef< FMVResolverOption > Options)
bool ShouldInstrumentFunction()
ShouldInstrumentFunction - Return true if the current function should be instrumented with __cyg_prof...
void maybeUpdateMCDCCondBitmap(const Expr *E, llvm::Value *Val)
Update the MCDC temp value with the condition's evaluated result.
void emitAlignmentAssumptionCheck(llvm::Value *Ptr, QualType Ty, SourceLocation Loc, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue, llvm::Value *TheCheck, llvm::Instruction *Assumption)
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **CallOrInvoke, bool IsMustTail, SourceLocation Loc, bool IsVirtualFunctionPointerThunk=false)
EmitCall - Generate a call of the given function, expecting the given result type,...
Definition CGCall.cpp:5340
llvm::ConstantInt * getUBSanFunctionTypeHash(QualType T) const
Return a type hash constant for a function instrumented by -fsanitize=function.
void EmitBranchToCounterBlock(const Expr *Cond, BinaryOperator::Opcode LOp, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount=0, Stmt::Likelihood LH=Stmt::LH_None, const Expr *CntrIdx=nullptr)
EmitBranchToCounterBlock - Emit a conditional branch to a new block that increments a profile counter...
VlaSizePair getVLASize(const VariableArrayType *vla)
Returns an LLVM value that corresponds to the size, in non-variably-sized elements,...
bool isMCDCBranchExpr(const Expr *E) const
void EmitMultiVersionResolver(llvm::Function *Resolver, ArrayRef< FMVResolverOption > Options)
const Decl * CurFuncDecl
CurFuncDecl - Holds the Decl for the current outermost non-closure context.
static const Expr * stripCond(const Expr *C)
Ignore parentheses and logical-NOT to track conditions consistently.
void EmitFunctionProlog(const CGFunctionInfo &FI, llvm::Function *Fn, const FunctionArgList &Args)
EmitFunctionProlog - Emit the target specific LLVM code to load the arguments for the given function.
Definition CGCall.cpp:3195
Address EmitAddressOfPFPField(Address RecordPtr, const PFPField &Field)
void SetFastMathFlags(FPOptions FPFeatures)
Set the codegen fast-math flags.
llvm::SmallVector< char, 256 > LifetimeExtendedCleanupStack
Address EmitVAListRef(const Expr *E)
void EmitLambdaInAllocaCallOpBody(const CXXMethodDecl *MD)
Definition CGClass.cpp:3260
Address ReturnValuePointer
ReturnValuePointer - The temporary alloca to hold a pointer to sret.
static bool mightAddDeclToScope(const Stmt *S)
Determine if the given statement might introduce a declaration into the current scope,...
void EmitStmt(const Stmt *S, ArrayRef< const Attr * > Attrs={})
EmitStmt - Emit the code for the statement.
Definition CGStmt.cpp:58
llvm::DenseMap< const ValueDecl *, FieldDecl * > LambdaCaptureFields
bool AutoreleaseResult
In ARC, whether we should autorelease the return value.
llvm::CallInst * EmitRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
uint64_t getCurrentProfileCount()
Get the profiler's current count.
llvm::Type * ConvertTypeForMem(QualType T)
void EmitEndEHSpec(const Decl *D)
EmitEndEHSpec - Emit the end of the exception spec.
LValue EmitLValueForLambdaField(const FieldDecl *Field)
Definition CGExpr.cpp:5611
void emitPFPPostCopyUpdates(Address DestPtr, Address SrcPtr, QualType Ty)
Copy all PFP fields from SrcPtr to DestPtr while updating signatures, assuming that DestPtr was alrea...
CodeGenTypes & getTypes() const
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
bool IsSanitizerScope
True if CodeGen currently emits code implementing sanitizer checks.
static bool containsBreak(const Stmt *S)
containsBreak - Return true if the statement contains a break out of it.
void emitImplicitAssignmentOperatorBody(FunctionArgList &Args)
Definition CGClass.cpp:1662
HLSLControlFlowHintAttr::Spelling HLSLControlFlowAttr
HLSL Branch attribute.
void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, LValue LV, QualType Type, SanitizerSet SkippedChecks=SanitizerSet(), llvm::Value *ArraySize=nullptr)
llvm::SmallVector< const ParmVarDecl *, 4 > FnArgs
Save Parameter Decl for coroutine.
void EmitFunctionEpilog(const CGFunctionInfo &FI, bool EmitRetDbgLoc, SourceLocation EndLoc, uint64_t RetKeyInstructionsSourceAtom)
EmitFunctionEpilog - Emit the target specific LLVM code to return the given temporary.
Definition CGCall.cpp:4089
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
Definition CGExpr.cpp:1591
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block,...
Definition CGStmt.cpp:660
RawAddress NormalCleanupDest
i32s containing the indexes of the cleanup destinations.
llvm::Type * convertTypeForLoadStore(QualType ASTTy, llvm::Type *LLVMTy=nullptr)
llvm::BasicBlock * GetIndirectGotoBlock()
EHScopeStack::stable_iterator PrologueCleanupDepth
PrologueCleanupDepth - The cleanup depth enclosing all the cleanups associated with the parameters.
Address EmitMSVAListRef(const Expr *E)
Emit a "reference" to a __builtin_ms_va_list; this is always the value of the expression,...
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
llvm::CallInst * EmitTrapCall(llvm::Intrinsic::ID IntrID)
Emit a call to trap or debugtrap and attach function attribute "trap-func-name" if specified.
Definition CGExpr.cpp:4569
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void FinishFunction(SourceLocation EndLoc=SourceLocation())
FinishFunction - Complete IR generation of the current function.
const CGFunctionInfo * CurFnInfo
uint64_t getProfileCount(const Stmt *S)
Get the profiler's count for the given statement.
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result, bool AllowLabels=false)
ConstantFoldsToSimpleInteger - If the specified expression does not fold to a constant,...
void ErrorUnsupported(const Stmt *S, const char *Type)
ErrorUnsupported - Print out an error that codegen doesn't support the specified stmt yet.
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
Definition CGExpr.cpp:1707
bool ShouldXRayInstrumentFunction() const
ShouldXRayInstrument - Return true if the current function should be instrumented with XRay nop sleds...
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
llvm::LLVMContext & getLLVMContext()
bool SawAsmBlock
Whether we processed a Microsoft-style asm block during CodeGen.
bool checkIfFunctionMustProgress()
Returns true if a function must make progress, which means the mustprogress attribute can be added.
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler's counter for the given statement by StepV.
void emitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty, SourceLocation Loc, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue=nullptr)
bool isMCDCDecisionExpr(const Expr *E) const
void EmitVariablyModifiedType(QualType Ty)
EmitVLASize - Capture all the sizes for the VLA expressions in the given variably-modified type and s...
void MaybeEmitDeferredVarDeclInit(const VarDecl *var)
Definition CGDecl.cpp:2089
void EmitBlockWithFallThrough(llvm::BasicBlock *BB, const Stmt *S)
When instrumenting to collect profile data, the counts for some blocks such as switch cases need to n...
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
Definition CGStmt.cpp:640
LValue MakeNaturalAlignRawAddrLValue(llvm::Value *V, QualType T)
QualType BuildFunctionArgList(GlobalDecl GD, FunctionArgList &Args)
llvm::Value * EmitPointerAuthAuth(const CGPointerAuthInfo &Info, llvm::Value *Pointer)
This class organizes the cross-function state that is used while generating LLVM code.
CharUnits getNaturalTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, bool forPointeeType=false)
const TargetCodeGenInfo & getTargetCodeGenInfo()
const CodeGenOptions & getCodeGenOpts() const
void GenKernelArgMetadata(llvm::Function *FN, const FunctionDecl *FD=nullptr, CodeGenFunction *CGF=nullptr)
OpenCL v1.2 s5.6.4.6 allows the compiler to store kernel argument information in the program executab...
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys={})
Per-function PGO state.
Definition CodeGenPGO.h:29
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
bool inheritingCtorHasParams(const InheritedConstructor &Inherited, CXXCtorType Type)
Determine if a C++ inheriting constructor should have parameters matching those of its inherited cons...
Definition CGCall.cpp:394
FunctionArgList - Type for representing both the decl and type of parameters to a function.
Definition CGCall.h:375
LValue - This represents an lvalue references.
Definition CGValue.h:183
llvm::Value * getPointer(CodeGenFunction &CGF) const
Address getAddress() const
Definition CGValue.h:373
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition CGValue.h:42
bool isScalar() const
Definition CGValue.h:64
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition CGValue.h:72
ReturnValueSlot - Contains the address where the return value of a function can be stored,...
Definition CGCall.h:379
virtual llvm::Constant * getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const
Return a constant used by UBSan as a signature to identify functions possessing type information,...
Definition TargetInfo.h:243
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition Stmt.h:1741
ConditionalOperator - The ?
Definition Expr.h:4394
A reference to a declared variable, function, enum, etc.
Definition Expr.h:1273
ValueDecl * getDecl()
Definition Expr.h:1341
Decl - This represents one declaration (or definition), e.g.
Definition DeclBase.h:86
T * getAttr() const
Definition DeclBase.h:573
ASTContext & getASTContext() const LLVM_READONLY
Definition DeclBase.cpp:546
Decl * getNonClosureContext()
Find the innermost non-closure ancestor of this declaration, walking up through blocks,...
llvm::iterator_range< specific_attr_iterator< T > > specific_attrs() const
Definition DeclBase.h:559
SourceLocation getLocation() const
Definition DeclBase.h:439
bool hasAttr() const
Definition DeclBase.h:577
This represents one expression.
Definition Expr.h:112
FPOptions getFPFeaturesInEffect(const LangOptions &LO) const
Returns the set of floating point options that apply to this expression.
Definition Expr.cpp:3989
std::optional< llvm::APSInt > getIntegerConstantExpr(const ASTContext &Ctx) const
isIntegerConstantExpr - Return the value if this expression is a valid integer constant expression.
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:277
QualType getType() const
Definition Expr.h:144
ExtVectorType - Extended vector type.
Definition TypeBase.h:4317
LangOptions::FPExceptionModeKind getExceptionMode() const
bool allowFPContractAcrossStatement() const
RoundingMode getRoundingMode() const
Represents a member of a struct/union/class.
Definition Decl.h:3175
Represents a function declaration or definition.
Definition Decl.h:2015
bool isMultiVersion() const
True if this function is considered a multiversioned function.
Definition Decl.h:2704
Stmt * getBody(const FunctionDecl *&Definition) const
Retrieve the body (definition) of the function.
Definition Decl.cpp:3280
unsigned getBuiltinID(bool ConsiderWrapperFunctions=false) const
Returns a value indicating whether this function corresponds to a builtin function.
Definition Decl.cpp:3763
bool UsesFPIntrin() const
Determine whether the function was declared in source context that requires constrained FP intrinsics...
Definition Decl.h:2924
bool usesSEHTry() const
Indicates the function uses __try.
Definition Decl.h:2533
QualType getReturnType() const
Definition Decl.h:2860
ArrayRef< ParmVarDecl * > parameters() const
Definition Decl.h:2789
FunctionDecl * getTemplateInstantiationPattern(bool ForDefinition=true) const
Retrieve the function declaration from which this function could be instantiated, if it is an instant...
Definition Decl.cpp:4265
FunctionEffectsRef getFunctionEffects() const
Definition Decl.h:3149
bool isMSVCRTEntryPoint() const
Determines whether this function is a MSVCRT user defined entry point.
Definition Decl.cpp:3376
bool isInlineBuiltinDeclaration() const
Determine if this function provides an inline implementation of a builtin.
Definition Decl.cpp:3527
bool hasImplicitReturnZero() const
Whether falling off this function implicitly returns null/zero.
Definition Decl.h:2443
bool isMain() const
Determines whether this function is "main", which is the entry point into an executable program.
Definition Decl.cpp:3369
bool isDefaulted() const
Whether this function is defaulted.
Definition Decl.h:2400
OverloadedOperatorKind getOverloadedOperator() const
getOverloadedOperator - Which C++ overloaded operator this function represents, if any.
Definition Decl.cpp:4131
FunctionDecl * getPreviousDecl()
Return the previous declaration of this declaration or NULL if this is the first declaration.
Represents a prototype with parameter type info, e.g.
Definition TypeBase.h:5357
QualType desugar() const
Definition TypeBase.h:5938
FunctionTypeExtraAttributeInfo getExtraAttributeInfo() const
Return the extra attribute information.
Definition TypeBase.h:5846
FunctionType - C99 6.7.5.3 - Function Declarators.
Definition TypeBase.h:4553
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
CXXCtorType getCtorType() const
Definition GlobalDecl.h:108
KernelReferenceKind getKernelReferenceKind() const
Definition GlobalDecl.h:135
const Decl * getDecl() const
Definition GlobalDecl.h:106
One of these records is kept for each identifier that is lexed.
bool isStr(const char(&Str)[StrLen]) const
Return true if this is the identifier for the specified string.
static ImplicitParamDecl * Create(ASTContext &C, DeclContext *DC, SourceLocation IdLoc, IdentifierInfo *Id, QualType T, ImplicitParamKind ParamKind)
Create implicit parameter.
Definition Decl.cpp:5603
Represents the declaration of a label.
Definition Decl.h:524
FPExceptionModeKind
Possible floating point exception behavior.
@ FPE_Strict
Strictly preserve the floating-point exception semantics.
@ FPE_MayTrap
Transformations do not cause new exceptions but may hide some.
@ FPE_Ignore
Assume that floating-point exceptions are masked.
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
SanitizerSet Sanitize
Set of enabled sanitizers.
RoundingMode getDefaultRoundingMode() const
DeclarationName getDeclName() const
Get the actual, stored name of the declaration, which may be a special name.
Definition Decl.h:340
Represents a parameter to a function.
Definition Decl.h:1805
ParsedAttr - Represents a syntactic attribute.
Definition ParsedAttr.h:119
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition TypeBase.h:3378
@ Forbid
Profiling is forbidden using the noprofile attribute.
Definition ProfileList.h:37
@ Skip
Profiling is skipped using the skipprofile attribute.
Definition ProfileList.h:35
@ Allow
Profiling is allowed.
Definition ProfileList.h:33
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isVolatileQualified() const
Determine whether this type is volatile-qualified.
Definition TypeBase.h:8515
field_range fields() const
Definition Decl.h:4545
Encodes a location in the source.
A trivial tuple used to represent a source range.
Stmt - This represents one statement.
Definition Stmt.h:86
child_range children()
Definition Stmt.cpp:304
StmtClass getStmtClass() const
Definition Stmt.h:1494
Likelihood
The likelihood of a branch being taken.
Definition Stmt.h:1437
@ LH_Unlikely
Branch has the [[unlikely]] attribute.
Definition Stmt.h:1438
@ LH_None
No attribute set or branches of the IfStmt have the same attribute.
Definition Stmt.h:1439
@ LH_Likely
Branch has the [[likely]] attribute.
Definition Stmt.h:1441
bool isMicrosoft() const
Is this ABI an MSVC-compatible ABI?
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
virtual std::optional< std::pair< unsigned, unsigned > > getVScaleRange(const LangOptions &LangOpts, ArmStreamingKind Mode, llvm::StringMap< bool > *FeatureMap=nullptr) const
Returns target-specific min and max values VScale_Range.
bool supportsIFunc() const
Identify whether this target supports IFuncs.
TargetCXXABI getCXXABI() const
Get the C++ ABI currently in use.
virtual ParsedTargetAttr parseTargetAttr(StringRef Str) const
bool isVoidType() const
Definition TypeBase.h:9034
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char,...
Definition Type.cpp:2231
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition Type.h:26
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:754
bool isVariablyModifiedType() const
Whether this type is a variably-modified type (C99 6.7.5).
Definition TypeBase.h:2850
TypeClass getTypeClass() const
Definition TypeBase.h:2433
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9261
bool isObjCRetainableType() const
Definition Type.cpp:5368
bool isFunctionNoProtoType() const
Definition TypeBase.h:2648
bool isCFIUncheckedCalleeFunctionType() const
Definition TypeBase.h:8714
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition Expr.h:2247
QualType getType() const
Definition Decl.h:723
Represents a variable declaration or definition.
Definition Decl.h:926
Represents a C array with a specified size that is not an integer-constant-expression.
Definition TypeBase.h:4016
Expr * getSizeExpr() const
Definition TypeBase.h:4030
QualType getElementType() const
Definition TypeBase.h:4239
Defines the clang::TargetInfo interface.
#define UINT_MAX
Definition limits.h:64
bool evaluateRequiredTargetFeatures(llvm::StringRef RequiredFatures, const llvm::StringMap< bool > &TargetFetureMap)
Returns true if the required target features of a builtin function are enabled.
@ Type
The l-value was considered opaque, so the alignment was determined from a type.
Definition CGValue.h:155
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
Definition CGValue.h:146
TypeEvaluationKind
The kind of evaluation to perform on values of a particular type.
CGBuilderInserter CGBuilderInserterTy
Definition CGBuilder.h:47
constexpr XRayInstrMask Typed
Definition XRayInstr.h:42
constexpr XRayInstrMask FunctionExit
Definition XRayInstr.h:40
constexpr XRayInstrMask FunctionEntry
Definition XRayInstr.h:39
constexpr XRayInstrMask Custom
Definition XRayInstr.h:41
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< ArrayType > arrayType
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
Definition Address.h:330
@ CPlusPlus
Expr * IgnoreBuiltinExpectSingleStep(Expr *E)
Definition IgnoreExpr.h:135
@ NonNull
Values of this type can never be null.
Definition Specifiers.h:350
Expr * IgnoreExprNodes(Expr *E, FnTys &&... Fns)
Given an expression E and functions Fn_1,...,Fn_n : Expr * -> Expr *, Recursively apply each of the f...
Definition IgnoreExpr.h:24
nullptr
This class represents a compute construct, representing a 'Kind' of ‘parallel’, 'serial',...
Expr * Cond
};
bool isLambdaCallOperator(const CXXMethodDecl *MD)
Definition ASTLambda.h:28
@ Result
The result type of a method or function.
Definition TypeBase.h:905
Expr * IgnoreImplicitCastsSingleStep(Expr *E)
Definition IgnoreExpr.h:38
Expr * IgnoreUOpLNotSingleStep(Expr *E)
Definition IgnoreExpr.h:127
Expr * IgnoreParensSingleStep(Expr *E)
Definition IgnoreExpr.h:157
llvm::fp::ExceptionBehavior ToConstrainedExceptMD(LangOptions::FPExceptionModeKind Kind)
U cast(CodeGen::Address addr)
Definition Address.h:327
bool IsArmStreamingFunction(const FunctionDecl *FD, bool IncludeLocallyStreaming)
Returns whether the given FunctionDecl has an __arm[_locally]_streaming attribute.
Definition Decl.cpp:6100
@ Other
Other implicit parameter.
Definition Decl.h:1761
@ EST_None
no exception specification
@ Implicit
An implicit conversion.
Definition Sema.h:440
A jump destination is an abstract label, branching to which may require a jump out through normal cle...
This structure provides a set of types that are commonly used during IR emission.
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:648
A FunctionEffect plus a potential boolean expression determining whether the effect is declared (e....
Definition TypeBase.h:5094
Contains information gathered from parsing the contents of TargetAttr.
Definition TargetInfo.h:60
std::vector< std::string > Features
Definition TargetInfo.h:61
void set(SanitizerMask K, bool Value)
Enable or disable a certain (single) sanitizer.
Definition Sanitizers.h:187
bool has(SanitizerMask K) const
Check if a certain (single) sanitizer is enabled.
Definition Sanitizers.h:174