clang 23.0.0git
CodeGenFunction.cpp
Go to the documentation of this file.
1//===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This coordinates the per-function state used while generating code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CodeGenFunction.h"
14#include "CGBlocks.h"
15#include "CGCUDARuntime.h"
16#include "CGCXXABI.h"
17#include "CGCleanup.h"
18#include "CGDebugInfo.h"
19#include "CGHLSLRuntime.h"
20#include "CGOpenMPRuntime.h"
21#include "CodeGenModule.h"
22#include "CodeGenPGO.h"
23#include "TargetInfo.h"
25#include "clang/AST/ASTLambda.h"
26#include "clang/AST/Attr.h"
27#include "clang/AST/Decl.h"
28#include "clang/AST/DeclCXX.h"
29#include "clang/AST/Expr.h"
31#include "clang/AST/StmtCXX.h"
32#include "clang/AST/StmtObjC.h"
39#include "llvm/ADT/ArrayRef.h"
40#include "llvm/ADT/ScopeExit.h"
41#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
42#include "llvm/IR/DataLayout.h"
43#include "llvm/IR/Dominators.h"
44#include "llvm/IR/FPEnv.h"
45#include "llvm/IR/Instruction.h"
46#include "llvm/IR/IntrinsicInst.h"
47#include "llvm/IR/Intrinsics.h"
48#include "llvm/IR/MDBuilder.h"
49#include "llvm/Support/CRC.h"
50#include "llvm/Support/xxhash.h"
51#include "llvm/Transforms/Scalar/LowerExpectIntrinsic.h"
52#include "llvm/Transforms/Utils/PromoteMemToReg.h"
53#include <optional>
54
55using namespace clang;
56using namespace CodeGen;
57
58namespace llvm {
59extern cl::opt<bool> EnableSingleByteCoverage;
60} // namespace llvm
61
62/// shouldEmitLifetimeMarkers - Decide whether we need emit the life-time
63/// markers.
64static bool shouldEmitLifetimeMarkers(const CodeGenOptions &CGOpts,
65 const LangOptions &LangOpts) {
66 if (CGOpts.DisableLifetimeMarkers)
67 return false;
68
69 // Sanitizers may use markers.
70 if (CGOpts.SanitizeAddressUseAfterScope ||
71 LangOpts.Sanitize.has(SanitizerKind::HWAddress) ||
72 LangOpts.Sanitize.has(SanitizerKind::Memory) ||
73 LangOpts.Sanitize.has(SanitizerKind::MemtagStack))
74 return true;
75
76 // For now, only in optimized builds.
77 return CGOpts.OptimizationLevel != 0;
78}
79
80CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext)
81 : CodeGenTypeCache(cgm), CGM(cgm), Target(cgm.getTarget()),
82 Builder(cgm, cgm.getModule().getContext(), llvm::ConstantFolder(),
85 DebugInfo(CGM.getModuleDebugInfo()),
86 PGO(std::make_unique<CodeGenPGO>(cgm)),
87 ShouldEmitLifetimeMarkers(
88 shouldEmitLifetimeMarkers(CGM.getCodeGenOpts(), CGM.getLangOpts())) {
89 if (!suppressNewContext)
90 CGM.getCXXABI().getMangleContext().startNewFunction();
91 EHStack.setCGF(this);
92
94}
95
97 assert(LifetimeExtendedCleanupStack.empty() && "failed to emit a cleanup");
98 assert(DeferredDeactivationCleanupStack.empty() &&
99 "missed to deactivate a cleanup");
100
101 if (getLangOpts().OpenMP && CurFn)
102 CGM.getOpenMPRuntime().functionFinished(*this);
103
104 // If we have an OpenMPIRBuilder we want to finalize functions (incl.
105 // outlining etc) at some point. Doing it once the function codegen is done
106 // seems to be a reasonable spot. We do it here, as opposed to the deletion
107 // time of the CodeGenModule, because we have to ensure the IR has not yet
108 // been "emitted" to the outside, thus, modifications are still sensible.
109 if (CGM.getLangOpts().OpenMPIRBuilder && CurFn)
110 CGM.getOpenMPRuntime().getOMPBuilder().finalize(CurFn);
111}
112
113// Map the LangOption for exception behavior into
114// the corresponding enum in the IR.
115llvm::fp::ExceptionBehavior
117
118 switch (Kind) {
119 case LangOptions::FPE_Ignore: return llvm::fp::ebIgnore;
120 case LangOptions::FPE_MayTrap: return llvm::fp::ebMayTrap;
121 case LangOptions::FPE_Strict: return llvm::fp::ebStrict;
122 default:
123 llvm_unreachable("Unsupported FP Exception Behavior");
124 }
125}
126
128 llvm::FastMathFlags FMF;
129 FMF.setAllowReassoc(FPFeatures.getAllowFPReassociate());
130 FMF.setNoNaNs(FPFeatures.getNoHonorNaNs());
131 FMF.setNoInfs(FPFeatures.getNoHonorInfs());
132 FMF.setNoSignedZeros(FPFeatures.getNoSignedZero());
133 FMF.setAllowReciprocal(FPFeatures.getAllowReciprocal());
134 FMF.setApproxFunc(FPFeatures.getAllowApproxFunc());
135 FMF.setAllowContract(FPFeatures.allowFPContractAcrossStatement());
136 Builder.setFastMathFlags(FMF);
137}
138
140 const Expr *E)
141 : CGF(CGF) {
142 ConstructorHelper(E->getFPFeaturesInEffect(CGF.getLangOpts()));
143}
144
146 FPOptions FPFeatures)
147 : CGF(CGF) {
148 ConstructorHelper(FPFeatures);
149}
150
151void CodeGenFunction::CGFPOptionsRAII::ConstructorHelper(FPOptions FPFeatures) {
152 OldFPFeatures = CGF.CurFPFeatures;
153 CGF.CurFPFeatures = FPFeatures;
154
155 OldExcept = CGF.Builder.getDefaultConstrainedExcept();
156 OldRounding = CGF.Builder.getDefaultConstrainedRounding();
157
158 if (OldFPFeatures == FPFeatures)
159 return;
160
161 FMFGuard.emplace(CGF.Builder);
162
163 llvm::RoundingMode NewRoundingBehavior = FPFeatures.getRoundingMode();
164 CGF.Builder.setDefaultConstrainedRounding(NewRoundingBehavior);
165 auto NewExceptionBehavior =
167 CGF.Builder.setDefaultConstrainedExcept(NewExceptionBehavior);
168
169 CGF.SetFastMathFlags(FPFeatures);
170
171 assert((CGF.CurFuncDecl == nullptr || CGF.Builder.getIsFPConstrained() ||
172 isa<CXXConstructorDecl>(CGF.CurFuncDecl) ||
173 isa<CXXDestructorDecl>(CGF.CurFuncDecl) ||
174 (NewExceptionBehavior == llvm::fp::ebIgnore &&
175 NewRoundingBehavior == llvm::RoundingMode::NearestTiesToEven)) &&
176 "FPConstrained should be enabled on entire function");
177
178 auto mergeFnAttrValue = [&](StringRef Name, bool Value) {
179 auto OldValue =
180 CGF.CurFn->getFnAttribute(Name).getValueAsBool();
181 auto NewValue = OldValue & Value;
182 if (OldValue != NewValue)
183 CGF.CurFn->addFnAttr(Name, llvm::toStringRef(NewValue));
184 };
185 mergeFnAttrValue("no-infs-fp-math", FPFeatures.getNoHonorInfs());
186 mergeFnAttrValue("no-nans-fp-math", FPFeatures.getNoHonorNaNs());
187 mergeFnAttrValue("no-signed-zeros-fp-math", FPFeatures.getNoSignedZero());
188}
189
191 CGF.CurFPFeatures = OldFPFeatures;
192 CGF.Builder.setDefaultConstrainedExcept(OldExcept);
193 CGF.Builder.setDefaultConstrainedRounding(OldRounding);
194}
195
196static LValue
197makeNaturalAlignAddrLValue(llvm::Value *V, QualType T, bool ForPointeeType,
198 bool MightBeSigned, CodeGenFunction &CGF,
199 KnownNonNull_t IsKnownNonNull = NotKnownNonNull) {
200 LValueBaseInfo BaseInfo;
201 TBAAAccessInfo TBAAInfo;
202 CharUnits Alignment =
203 CGF.CGM.getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo, ForPointeeType);
204 Address Addr =
205 MightBeSigned
206 ? CGF.makeNaturalAddressForPointer(V, T, Alignment, false, nullptr,
207 nullptr, IsKnownNonNull)
208 : Address(V, CGF.ConvertTypeForMem(T), Alignment, IsKnownNonNull);
209 return CGF.MakeAddrLValue(Addr, T, BaseInfo, TBAAInfo);
210}
211
212LValue
214 KnownNonNull_t IsKnownNonNull) {
215 return ::makeNaturalAlignAddrLValue(V, T, /*ForPointeeType*/ false,
216 /*MightBeSigned*/ true, *this,
217 IsKnownNonNull);
218}
219
220LValue
222 return ::makeNaturalAlignAddrLValue(V, T, /*ForPointeeType*/ true,
223 /*MightBeSigned*/ true, *this);
224}
225
227 QualType T) {
228 return ::makeNaturalAlignAddrLValue(V, T, /*ForPointeeType*/ false,
229 /*MightBeSigned*/ false, *this);
230}
231
233 QualType T) {
234 return ::makeNaturalAlignAddrLValue(V, T, /*ForPointeeType*/ true,
235 /*MightBeSigned*/ false, *this);
236}
237
239 return CGM.getTypes().ConvertTypeForMem(T);
240}
241
243 return CGM.getTypes().ConvertType(T);
244}
245
247 llvm::Type *LLVMTy) {
248 return CGM.getTypes().convertTypeForLoadStore(ASTTy, LLVMTy);
249}
250
252 type = type.getCanonicalType();
253 while (true) {
254 switch (type->getTypeClass()) {
255#define TYPE(name, parent)
256#define ABSTRACT_TYPE(name, parent)
257#define NON_CANONICAL_TYPE(name, parent) case Type::name:
258#define DEPENDENT_TYPE(name, parent) case Type::name:
259#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name:
260#include "clang/AST/TypeNodes.inc"
261 llvm_unreachable("non-canonical or dependent type in IR-generation");
262
263 case Type::Auto:
264 case Type::DeducedTemplateSpecialization:
265 llvm_unreachable("undeduced type in IR-generation");
266
267 // Various scalar types.
268 case Type::Builtin:
269 case Type::Pointer:
270 case Type::BlockPointer:
271 case Type::LValueReference:
272 case Type::RValueReference:
273 case Type::MemberPointer:
274 case Type::Vector:
275 case Type::ExtVector:
276 case Type::ConstantMatrix:
277 case Type::FunctionProto:
278 case Type::FunctionNoProto:
279 case Type::Enum:
280 case Type::ObjCObjectPointer:
281 case Type::Pipe:
282 case Type::BitInt:
283 case Type::HLSLAttributedResource:
284 case Type::HLSLInlineSpirv:
285 return TEK_Scalar;
286
287 // Complexes.
288 case Type::Complex:
289 return TEK_Complex;
290
291 // Arrays, records, and Objective-C objects.
292 case Type::ConstantArray:
293 case Type::IncompleteArray:
294 case Type::VariableArray:
295 case Type::Record:
296 case Type::ObjCObject:
297 case Type::ObjCInterface:
298 case Type::ArrayParameter:
299 return TEK_Aggregate;
300
301 // We operate on atomic values according to their underlying type.
302 case Type::Atomic:
303 type = cast<AtomicType>(type)->getValueType();
304 continue;
305 }
306 llvm_unreachable("unknown type kind!");
307 }
308}
309
311 // For cleanliness, we try to avoid emitting the return block for
312 // simple cases.
313 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
314
315 if (CurBB) {
316 assert(!CurBB->getTerminator() && "Unexpected terminated block.");
317
318 // We have a valid insert point, reuse it if it is empty or there are no
319 // explicit jumps to the return block.
320 if (CurBB->empty() || ReturnBlock.getBlock()->use_empty()) {
321 ReturnBlock.getBlock()->replaceAllUsesWith(CurBB);
322 delete ReturnBlock.getBlock();
324 } else
325 EmitBlock(ReturnBlock.getBlock());
326 return llvm::DebugLoc();
327 }
328
329 // Otherwise, if the return block is the target of a single direct
330 // branch then we can just put the code in that block instead. This
331 // cleans up functions which started with a unified return block.
332 if (ReturnBlock.getBlock()->hasOneUse()) {
333 llvm::BranchInst *BI =
334 dyn_cast<llvm::BranchInst>(*ReturnBlock.getBlock()->user_begin());
335 if (BI && BI->isUnconditional() &&
336 BI->getSuccessor(0) == ReturnBlock.getBlock()) {
337 // Record/return the DebugLoc of the simple 'return' expression to be used
338 // later by the actual 'ret' instruction.
339 llvm::DebugLoc Loc = BI->getDebugLoc();
340 Builder.SetInsertPoint(BI->getParent());
341 BI->eraseFromParent();
342 delete ReturnBlock.getBlock();
344 return Loc;
345 }
346 }
347
348 // FIXME: We are at an unreachable point, there is no reason to emit the block
349 // unless it has uses. However, we still need a place to put the debug
350 // region.end for now.
351
352 EmitBlock(ReturnBlock.getBlock());
353 return llvm::DebugLoc();
354}
355
356static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) {
357 if (!BB) return;
358 if (!BB->use_empty()) {
359 CGF.CurFn->insert(CGF.CurFn->end(), BB);
360 return;
361 }
362 delete BB;
363}
364
366 assert(BreakContinueStack.empty() &&
367 "mismatched push/pop in break/continue stack!");
368 assert(LifetimeExtendedCleanupStack.empty() &&
369 "mismatched push/pop of cleanups in EHStack!");
370 assert(DeferredDeactivationCleanupStack.empty() &&
371 "mismatched activate/deactivate of cleanups!");
372
373 if (CGM.shouldEmitConvergenceTokens()) {
374 ConvergenceTokenStack.pop_back();
375 assert(ConvergenceTokenStack.empty() &&
376 "mismatched push/pop in convergence stack!");
377 }
378
379 bool OnlySimpleReturnStmts = NumSimpleReturnExprs > 0
380 && NumSimpleReturnExprs == NumReturnExprs
381 && ReturnBlock.getBlock()->use_empty();
382 // Usually the return expression is evaluated before the cleanup
383 // code. If the function contains only a simple return statement,
384 // such as a constant, the location before the cleanup code becomes
385 // the last useful breakpoint in the function, because the simple
386 // return expression will be evaluated after the cleanup code. To be
387 // safe, set the debug location for cleanup code to the location of
388 // the return statement. Otherwise the cleanup code should be at the
389 // end of the function's lexical scope.
390 //
391 // If there are multiple branches to the return block, the branch
392 // instructions will get the location of the return statements and
393 // all will be fine.
394 if (CGDebugInfo *DI = getDebugInfo()) {
395 if (OnlySimpleReturnStmts)
396 DI->EmitLocation(Builder, LastStopPoint);
397 else
398 DI->EmitLocation(Builder, EndLoc);
399 }
400
401 // Pop any cleanups that might have been associated with the
402 // parameters. Do this in whatever block we're currently in; it's
403 // important to do this before we enter the return block or return
404 // edges will be *really* confused.
405 bool HasCleanups = EHStack.stable_begin() != PrologueCleanupDepth;
406 bool HasOnlyNoopCleanups =
407 HasCleanups && EHStack.containsOnlyNoopCleanups(PrologueCleanupDepth);
408 bool EmitRetDbgLoc = !HasCleanups || HasOnlyNoopCleanups;
409
410 std::optional<ApplyDebugLocation> OAL;
411 if (HasCleanups) {
412 // Make sure the line table doesn't jump back into the body for
413 // the ret after it's been at EndLoc.
414 if (CGDebugInfo *DI = getDebugInfo()) {
415 if (OnlySimpleReturnStmts)
416 DI->EmitLocation(Builder, EndLoc);
417 else
418 // We may not have a valid end location. Try to apply it anyway, and
419 // fall back to an artificial location if needed.
421 }
422
424 }
425
426 // Emit function epilog (to return).
427 llvm::DebugLoc Loc = EmitReturnBlock();
428
430 if (CGM.getCodeGenOpts().InstrumentFunctions)
431 CurFn->addFnAttr("instrument-function-exit", "__cyg_profile_func_exit");
432 if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining)
433 CurFn->addFnAttr("instrument-function-exit-inlined",
434 "__cyg_profile_func_exit");
435 }
436
437 // Emit debug descriptor for function end.
438 if (CGDebugInfo *DI = getDebugInfo())
439 DI->EmitFunctionEnd(Builder, CurFn);
440
441 // Reset the debug location to that of the simple 'return' expression, if any
442 // rather than that of the end of the function's scope '}'.
443 uint64_t RetKeyInstructionsAtomGroup = Loc ? Loc->getAtomGroup() : 0;
444 ApplyDebugLocation AL(*this, Loc);
445 EmitFunctionEpilog(*CurFnInfo, EmitRetDbgLoc, EndLoc,
446 RetKeyInstructionsAtomGroup);
448
449 assert(EHStack.empty() &&
450 "did not remove all scopes from cleanup stack!");
451
452 // If someone did an indirect goto, emit the indirect goto block at the end of
453 // the function.
454 if (IndirectBranch) {
455 EmitBlock(IndirectBranch->getParent());
456 Builder.ClearInsertionPoint();
457 }
458
459 // If some of our locals escaped, insert a call to llvm.localescape in the
460 // entry block.
461 if (!EscapedLocals.empty()) {
462 // Invert the map from local to index into a simple vector. There should be
463 // no holes.
465 EscapeArgs.resize(EscapedLocals.size());
466 for (auto &Pair : EscapedLocals)
467 EscapeArgs[Pair.second] = Pair.first;
468 llvm::Function *FrameEscapeFn = llvm::Intrinsic::getOrInsertDeclaration(
469 &CGM.getModule(), llvm::Intrinsic::localescape);
470 CGBuilderTy(*this, AllocaInsertPt).CreateCall(FrameEscapeFn, EscapeArgs);
471 }
472
473 // Remove the AllocaInsertPt instruction, which is just a convenience for us.
474 llvm::Instruction *Ptr = AllocaInsertPt;
475 AllocaInsertPt = nullptr;
476 Ptr->eraseFromParent();
477
478 // PostAllocaInsertPt, if created, was lazily created when it was required,
479 // remove it now since it was just created for our own convenience.
480 if (PostAllocaInsertPt) {
481 llvm::Instruction *PostPtr = PostAllocaInsertPt;
482 PostAllocaInsertPt = nullptr;
483 PostPtr->eraseFromParent();
484 }
485
486 // If someone took the address of a label but never did an indirect goto, we
487 // made a zero entry PHI node, which is illegal, zap it now.
488 if (IndirectBranch) {
489 llvm::PHINode *PN = cast<llvm::PHINode>(IndirectBranch->getAddress());
490 if (PN->getNumIncomingValues() == 0) {
491 PN->replaceAllUsesWith(llvm::PoisonValue::get(PN->getType()));
492 PN->eraseFromParent();
493 }
494 }
495
497 EmitIfUsed(*this, TerminateLandingPad);
498 EmitIfUsed(*this, TerminateHandler);
499 EmitIfUsed(*this, UnreachableBlock);
500
501 for (const auto &FuncletAndParent : TerminateFunclets)
502 EmitIfUsed(*this, FuncletAndParent.second);
503
504 if (CGM.getCodeGenOpts().EmitDeclMetadata)
505 EmitDeclMetadata();
506
507 for (const auto &R : DeferredReplacements) {
508 if (llvm::Value *Old = R.first) {
509 Old->replaceAllUsesWith(R.second);
510 cast<llvm::Instruction>(Old)->eraseFromParent();
511 }
512 }
513 DeferredReplacements.clear();
514
515 // Eliminate CleanupDestSlot alloca by replacing it with SSA values and
516 // PHIs if the current function is a coroutine. We don't do it for all
517 // functions as it may result in slight increase in numbers of instructions
518 // if compiled with no optimizations. We do it for coroutine as the lifetime
519 // of CleanupDestSlot alloca make correct coroutine frame building very
520 // difficult.
521 if (NormalCleanupDest.isValid() && isCoroutine()) {
522 llvm::DominatorTree DT(*CurFn);
523 llvm::PromoteMemToReg(
524 cast<llvm::AllocaInst>(NormalCleanupDest.getPointer()), DT);
526 }
527
528 // Scan function arguments for vector width.
529 for (llvm::Argument &A : CurFn->args())
530 if (auto *VT = dyn_cast<llvm::VectorType>(A.getType()))
531 LargestVectorWidth =
532 std::max((uint64_t)LargestVectorWidth,
533 VT->getPrimitiveSizeInBits().getKnownMinValue());
534
535 // Update vector width based on return type.
536 if (auto *VT = dyn_cast<llvm::VectorType>(CurFn->getReturnType()))
537 LargestVectorWidth =
538 std::max((uint64_t)LargestVectorWidth,
539 VT->getPrimitiveSizeInBits().getKnownMinValue());
540
541 if (CurFnInfo->getMaxVectorWidth() > LargestVectorWidth)
542 LargestVectorWidth = CurFnInfo->getMaxVectorWidth();
543
544 // Add the min-legal-vector-width attribute. This contains the max width from:
545 // 1. min-vector-width attribute used in the source program.
546 // 2. Any builtins used that have a vector width specified.
547 // 3. Values passed in and out of inline assembly.
548 // 4. Width of vector arguments and return types for this function.
549 // 5. Width of vector arguments and return types for functions called by this
550 // function.
551 if (getContext().getTargetInfo().getTriple().isX86())
552 CurFn->addFnAttr("min-legal-vector-width",
553 llvm::utostr(LargestVectorWidth));
554
555 // If we generated an unreachable return block, delete it now.
556 if (ReturnBlock.isValid() && ReturnBlock.getBlock()->use_empty()) {
557 Builder.ClearInsertionPoint();
558 ReturnBlock.getBlock()->eraseFromParent();
559 }
560 if (ReturnValue.isValid()) {
561 auto *RetAlloca =
562 dyn_cast<llvm::AllocaInst>(ReturnValue.emitRawPointer(*this));
563 if (RetAlloca && RetAlloca->use_empty()) {
564 RetAlloca->eraseFromParent();
566 }
567 }
568}
569
570/// ShouldInstrumentFunction - Return true if the current function should be
571/// instrumented with __cyg_profile_func_* calls
573 if (!CGM.getCodeGenOpts().InstrumentFunctions &&
574 !CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining &&
575 !CGM.getCodeGenOpts().InstrumentFunctionEntryBare)
576 return false;
577 if (!CurFuncDecl || CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>())
578 return false;
579 return true;
580}
581
583 if (!CurFuncDecl)
584 return false;
585 return CurFuncDecl->hasAttr<DisableSanitizerInstrumentationAttr>();
586}
587
588/// ShouldXRayInstrument - Return true if the current function should be
589/// instrumented with XRay nop sleds.
591 return CGM.getCodeGenOpts().XRayInstrumentFunctions;
592}
593
594/// AlwaysEmitXRayCustomEvents - Return true if we should emit IR for calls to
595/// the __xray_customevent(...) builtin calls, when doing XRay instrumentation.
597 return CGM.getCodeGenOpts().XRayInstrumentFunctions &&
598 (CGM.getCodeGenOpts().XRayAlwaysEmitCustomEvents ||
599 CGM.getCodeGenOpts().XRayInstrumentationBundle.Mask ==
601}
602
604 return CGM.getCodeGenOpts().XRayInstrumentFunctions &&
605 (CGM.getCodeGenOpts().XRayAlwaysEmitTypedEvents ||
606 CGM.getCodeGenOpts().XRayInstrumentationBundle.Mask ==
608}
609
610llvm::ConstantInt *
612 // Remove any (C++17) exception specifications, to allow calling e.g. a
613 // noexcept function through a non-noexcept pointer.
614 if (!Ty->isFunctionNoProtoType())
616 std::string Mangled;
617 llvm::raw_string_ostream Out(Mangled);
618 CGM.getCXXABI().getMangleContext().mangleCanonicalTypeName(Ty, Out, false);
619 return llvm::ConstantInt::get(
620 CGM.Int32Ty, static_cast<uint32_t>(llvm::xxh3_64bits(Mangled)));
621}
622
623void CodeGenFunction::EmitKernelMetadata(const FunctionDecl *FD,
624 llvm::Function *Fn) {
625 if (!FD->hasAttr<DeviceKernelAttr>() && !FD->hasAttr<CUDAGlobalAttr>())
626 return;
627
628 llvm::LLVMContext &Context = getLLVMContext();
629
630 CGM.GenKernelArgMetadata(Fn, FD, this);
631
632 if (!(getLangOpts().OpenCL ||
633 (getLangOpts().CUDA &&
634 getContext().getTargetInfo().getTriple().isSPIRV())))
635 return;
636
637 if (const VecTypeHintAttr *A = FD->getAttr<VecTypeHintAttr>()) {
638 QualType HintQTy = A->getTypeHint();
639 const ExtVectorType *HintEltQTy = HintQTy->getAs<ExtVectorType>();
640 bool IsSignedInteger =
641 HintQTy->isSignedIntegerType() ||
642 (HintEltQTy && HintEltQTy->getElementType()->isSignedIntegerType());
643 llvm::Metadata *AttrMDArgs[] = {
644 llvm::ConstantAsMetadata::get(llvm::PoisonValue::get(
645 CGM.getTypes().ConvertType(A->getTypeHint()))),
646 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
647 llvm::IntegerType::get(Context, 32),
648 llvm::APInt(32, (uint64_t)(IsSignedInteger ? 1 : 0))))};
649 Fn->setMetadata("vec_type_hint", llvm::MDNode::get(Context, AttrMDArgs));
650 }
651
652 if (const WorkGroupSizeHintAttr *A = FD->getAttr<WorkGroupSizeHintAttr>()) {
653 auto Eval = [&](Expr *E) {
654 return E->EvaluateKnownConstInt(FD->getASTContext()).getExtValue();
655 };
656 llvm::Metadata *AttrMDArgs[] = {
657 llvm::ConstantAsMetadata::get(Builder.getInt32(Eval(A->getXDim()))),
658 llvm::ConstantAsMetadata::get(Builder.getInt32(Eval(A->getYDim()))),
659 llvm::ConstantAsMetadata::get(Builder.getInt32(Eval(A->getZDim())))};
660 Fn->setMetadata("work_group_size_hint", llvm::MDNode::get(Context, AttrMDArgs));
661 }
662
663 if (const ReqdWorkGroupSizeAttr *A = FD->getAttr<ReqdWorkGroupSizeAttr>()) {
664 auto Eval = [&](Expr *E) {
665 return E->EvaluateKnownConstInt(FD->getASTContext()).getExtValue();
666 };
667 llvm::Metadata *AttrMDArgs[] = {
668 llvm::ConstantAsMetadata::get(Builder.getInt32(Eval(A->getXDim()))),
669 llvm::ConstantAsMetadata::get(Builder.getInt32(Eval(A->getYDim()))),
670 llvm::ConstantAsMetadata::get(Builder.getInt32(Eval(A->getZDim())))};
671 Fn->setMetadata("reqd_work_group_size", llvm::MDNode::get(Context, AttrMDArgs));
672 }
673
674 if (const OpenCLIntelReqdSubGroupSizeAttr *A =
675 FD->getAttr<OpenCLIntelReqdSubGroupSizeAttr>()) {
676 llvm::Metadata *AttrMDArgs[] = {
677 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getSubGroupSize()))};
678 Fn->setMetadata("intel_reqd_sub_group_size",
679 llvm::MDNode::get(Context, AttrMDArgs));
680 }
681}
682
683/// Determine whether the function F ends with a return stmt.
684static bool endsWithReturn(const Decl* F) {
685 const Stmt *Body = nullptr;
686 if (auto *FD = dyn_cast_or_null<FunctionDecl>(F))
687 Body = FD->getBody();
688 else if (auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(F))
689 Body = OMD->getBody();
690
691 if (auto *CS = dyn_cast_or_null<CompoundStmt>(Body)) {
692 auto LastStmt = CS->body_rbegin();
693 if (LastStmt != CS->body_rend())
694 return isa<ReturnStmt>(*LastStmt);
695 }
696 return false;
697}
698
700 if (SanOpts.has(SanitizerKind::Thread)) {
701 Fn->addFnAttr("sanitize_thread_no_checking_at_run_time");
702 Fn->removeFnAttr(llvm::Attribute::SanitizeThread);
703 }
704}
705
706/// Check if the return value of this function requires sanitization.
707bool CodeGenFunction::requiresReturnValueCheck() const {
708 return requiresReturnValueNullabilityCheck() ||
709 (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute) && CurCodeDecl &&
710 CurCodeDecl->getAttr<ReturnsNonNullAttr>());
711}
712
713static bool matchesStlAllocatorFn(const Decl *D, const ASTContext &Ctx) {
714 auto *MD = dyn_cast_or_null<CXXMethodDecl>(D);
715 if (!MD || !MD->getDeclName().getAsIdentifierInfo() ||
716 !MD->getDeclName().getAsIdentifierInfo()->isStr("allocate") ||
717 (MD->getNumParams() != 1 && MD->getNumParams() != 2))
718 return false;
719
720 if (!Ctx.hasSameType(MD->parameters()[0]->getType(), Ctx.getSizeType()))
721 return false;
722
723 if (MD->getNumParams() == 2) {
724 auto *PT = MD->parameters()[1]->getType()->getAs<PointerType>();
725 if (!PT || !PT->isVoidPointerType() ||
726 !PT->getPointeeType().isConstQualified())
727 return false;
728 }
729
730 return true;
731}
732
733bool CodeGenFunction::isInAllocaArgument(CGCXXABI &ABI, QualType Ty) {
734 const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
735 return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory;
736}
737
738bool CodeGenFunction::hasInAllocaArg(const CXXMethodDecl *MD) {
739 return getTarget().getTriple().getArch() == llvm::Triple::x86 &&
741 llvm::any_of(MD->parameters(), [&](ParmVarDecl *P) {
742 return isInAllocaArgument(CGM.getCXXABI(), P->getType());
743 });
744}
745
746/// Return the UBSan prologue signature for \p FD if one is available.
747static llvm::Constant *getPrologueSignature(CodeGenModule &CGM,
748 const FunctionDecl *FD) {
749 if (const auto *MD = dyn_cast<CXXMethodDecl>(FD))
750 if (!MD->isStatic())
751 return nullptr;
753}
754
756 llvm::Function *Fn,
757 const CGFunctionInfo &FnInfo,
758 const FunctionArgList &Args,
759 SourceLocation Loc,
760 SourceLocation StartLoc) {
761 assert(!CurFn &&
762 "Do not use a CodeGenFunction object for more than one function");
763
764 const Decl *D = GD.getDecl();
765
766 DidCallStackSave = false;
767 CurCodeDecl = D;
768 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
769 if (FD && FD->usesSEHTry())
770 CurSEHParent = GD;
771 CurFuncDecl = (D ? D->getNonClosureContext() : nullptr);
772 FnRetTy = RetTy;
773 CurFn = Fn;
774 CurFnInfo = &FnInfo;
775 assert(CurFn->isDeclaration() && "Function already has body?");
776
777 // If this function is ignored for any of the enabled sanitizers,
778 // disable the sanitizer for the function.
779 do {
780#define SANITIZER(NAME, ID) \
781 if (SanOpts.empty()) \
782 break; \
783 if (SanOpts.has(SanitizerKind::ID)) \
784 if (CGM.isInNoSanitizeList(SanitizerKind::ID, Fn, Loc)) \
785 SanOpts.set(SanitizerKind::ID, false);
786
787#include "clang/Basic/Sanitizers.def"
788#undef SANITIZER
789 } while (false);
790
791 if (D) {
792 const bool SanitizeBounds = SanOpts.hasOneOf(SanitizerKind::Bounds);
793 SanitizerMask no_sanitize_mask;
794 bool NoSanitizeCoverage = false;
795
796 for (auto *Attr : D->specific_attrs<NoSanitizeAttr>()) {
797 no_sanitize_mask |= Attr->getMask();
798 // SanitizeCoverage is not handled by SanOpts.
799 if (Attr->hasCoverage())
800 NoSanitizeCoverage = true;
801 }
802
803 // Apply the no_sanitize* attributes to SanOpts.
804 SanOpts.Mask &= ~no_sanitize_mask;
805 if (no_sanitize_mask & SanitizerKind::Address)
806 SanOpts.set(SanitizerKind::KernelAddress, false);
807 if (no_sanitize_mask & SanitizerKind::KernelAddress)
808 SanOpts.set(SanitizerKind::Address, false);
809 if (no_sanitize_mask & SanitizerKind::HWAddress)
810 SanOpts.set(SanitizerKind::KernelHWAddress, false);
811 if (no_sanitize_mask & SanitizerKind::KernelHWAddress)
812 SanOpts.set(SanitizerKind::HWAddress, false);
813
814 if (SanitizeBounds && !SanOpts.hasOneOf(SanitizerKind::Bounds))
815 Fn->addFnAttr(llvm::Attribute::NoSanitizeBounds);
816
817 if (NoSanitizeCoverage && CGM.getCodeGenOpts().hasSanitizeCoverage())
818 Fn->addFnAttr(llvm::Attribute::NoSanitizeCoverage);
819
820 // Some passes need the non-negated no_sanitize attribute. Pass them on.
821 if (CGM.getCodeGenOpts().hasSanitizeBinaryMetadata()) {
822 if (no_sanitize_mask & SanitizerKind::Thread)
823 Fn->addFnAttr("no_sanitize_thread");
824 }
825 }
826
828 CurFn->addFnAttr(llvm::Attribute::DisableSanitizerInstrumentation);
829 } else {
830 // Apply sanitizer attributes to the function.
831 if (SanOpts.hasOneOf(SanitizerKind::Address | SanitizerKind::KernelAddress))
832 Fn->addFnAttr(llvm::Attribute::SanitizeAddress);
833 if (SanOpts.hasOneOf(SanitizerKind::HWAddress |
834 SanitizerKind::KernelHWAddress))
835 Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress);
836 if (SanOpts.has(SanitizerKind::MemtagStack))
837 Fn->addFnAttr(llvm::Attribute::SanitizeMemTag);
838 if (SanOpts.has(SanitizerKind::Thread))
839 Fn->addFnAttr(llvm::Attribute::SanitizeThread);
840 if (SanOpts.has(SanitizerKind::Type))
841 Fn->addFnAttr(llvm::Attribute::SanitizeType);
842 if (SanOpts.has(SanitizerKind::NumericalStability))
843 Fn->addFnAttr(llvm::Attribute::SanitizeNumericalStability);
844 if (SanOpts.hasOneOf(SanitizerKind::Memory | SanitizerKind::KernelMemory))
845 Fn->addFnAttr(llvm::Attribute::SanitizeMemory);
846 if (SanOpts.has(SanitizerKind::AllocToken))
847 Fn->addFnAttr(llvm::Attribute::SanitizeAllocToken);
848 }
849 if (SanOpts.has(SanitizerKind::SafeStack))
850 Fn->addFnAttr(llvm::Attribute::SafeStack);
851 if (SanOpts.has(SanitizerKind::ShadowCallStack))
852 Fn->addFnAttr(llvm::Attribute::ShadowCallStack);
853
854 if (SanOpts.has(SanitizerKind::Realtime))
855 if (FD && FD->getASTContext().hasAnyFunctionEffects())
856 for (const FunctionEffectWithCondition &Fe : FD->getFunctionEffects()) {
857 if (Fe.Effect.kind() == FunctionEffect::Kind::NonBlocking)
858 Fn->addFnAttr(llvm::Attribute::SanitizeRealtime);
859 else if (Fe.Effect.kind() == FunctionEffect::Kind::Blocking)
860 Fn->addFnAttr(llvm::Attribute::SanitizeRealtimeBlocking);
861 }
862
863 // Apply fuzzing attribute to the function.
864 if (SanOpts.hasOneOf(SanitizerKind::Fuzzer | SanitizerKind::FuzzerNoLink))
865 Fn->addFnAttr(llvm::Attribute::OptForFuzzing);
866
867 // Ignore TSan memory acesses from within ObjC/ObjC++ dealloc, initialize,
868 // .cxx_destruct, __destroy_helper_block_ and all of their calees at run time.
869 if (SanOpts.has(SanitizerKind::Thread)) {
870 if (const auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(D)) {
871 const IdentifierInfo *II = OMD->getSelector().getIdentifierInfoForSlot(0);
872 if (OMD->getMethodFamily() == OMF_dealloc ||
873 OMD->getMethodFamily() == OMF_initialize ||
874 (OMD->getSelector().isUnarySelector() && II->isStr(".cxx_destruct"))) {
876 }
877 }
878 }
879
880 // Ignore unrelated casts in STL allocate() since the allocator must cast
881 // from void* to T* before object initialization completes. Don't match on the
882 // namespace because not all allocators are in std::
883 if (D && SanOpts.has(SanitizerKind::CFIUnrelatedCast)) {
885 SanOpts.Mask &= ~SanitizerKind::CFIUnrelatedCast;
886 }
887
888 // Ignore null checks in coroutine functions since the coroutines passes
889 // are not aware of how to move the extra UBSan instructions across the split
890 // coroutine boundaries.
891 if (D && SanOpts.has(SanitizerKind::Null))
892 if (FD && FD->getBody() &&
893 FD->getBody()->getStmtClass() == Stmt::CoroutineBodyStmtClass)
894 SanOpts.Mask &= ~SanitizerKind::Null;
895
896 // Apply xray attributes to the function (as a string, for now)
897 bool AlwaysXRayAttr = false;
898 if (const auto *XRayAttr = D ? D->getAttr<XRayInstrumentAttr>() : nullptr) {
899 if (CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
901 CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
903 if (XRayAttr->alwaysXRayInstrument() && ShouldXRayInstrumentFunction()) {
904 Fn->addFnAttr("function-instrument", "xray-always");
905 AlwaysXRayAttr = true;
906 }
907 if (XRayAttr->neverXRayInstrument())
908 Fn->addFnAttr("function-instrument", "xray-never");
909 if (const auto *LogArgs = D->getAttr<XRayLogArgsAttr>())
911 Fn->addFnAttr("xray-log-args",
912 llvm::utostr(LogArgs->getArgumentCount()));
913 }
914 } else {
915 if (ShouldXRayInstrumentFunction() && !CGM.imbueXRayAttrs(Fn, Loc))
916 Fn->addFnAttr(
917 "xray-instruction-threshold",
918 llvm::itostr(CGM.getCodeGenOpts().XRayInstructionThreshold));
919 }
920
922 if (CGM.getCodeGenOpts().XRayIgnoreLoops)
923 Fn->addFnAttr("xray-ignore-loops");
924
925 if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
927 Fn->addFnAttr("xray-skip-exit");
928
929 if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
931 Fn->addFnAttr("xray-skip-entry");
932
933 auto FuncGroups = CGM.getCodeGenOpts().XRayTotalFunctionGroups;
934 if (FuncGroups > 1) {
935 auto FuncName = llvm::ArrayRef<uint8_t>(CurFn->getName().bytes_begin(),
936 CurFn->getName().bytes_end());
937 auto Group = crc32(FuncName) % FuncGroups;
938 if (Group != CGM.getCodeGenOpts().XRaySelectedFunctionGroup &&
939 !AlwaysXRayAttr)
940 Fn->addFnAttr("function-instrument", "xray-never");
941 }
942 }
943
944 if (CGM.getCodeGenOpts().getProfileInstr() !=
945 llvm::driver::ProfileInstrKind::ProfileNone) {
946 switch (CGM.isFunctionBlockedFromProfileInstr(Fn, Loc)) {
948 Fn->addFnAttr(llvm::Attribute::SkipProfile);
949 break;
951 Fn->addFnAttr(llvm::Attribute::NoProfile);
952 break;
954 break;
955 }
956 }
957
958 unsigned Count, Offset;
959 StringRef Section;
960 if (const auto *Attr =
961 D ? D->getAttr<PatchableFunctionEntryAttr>() : nullptr) {
962 Count = Attr->getCount();
963 Offset = Attr->getOffset();
964 Section = Attr->getSection();
965 } else {
966 Count = CGM.getCodeGenOpts().PatchableFunctionEntryCount;
967 Offset = CGM.getCodeGenOpts().PatchableFunctionEntryOffset;
968 }
969 if (Section.empty())
970 Section = CGM.getCodeGenOpts().PatchableFunctionEntrySection;
971 if (Count && Offset <= Count) {
972 Fn->addFnAttr("patchable-function-entry", std::to_string(Count - Offset));
973 if (Offset)
974 Fn->addFnAttr("patchable-function-prefix", std::to_string(Offset));
975 if (!Section.empty())
976 Fn->addFnAttr("patchable-function-entry-section", Section);
977 }
978 // Instruct that functions for COFF/CodeView targets should start with a
979 // patchable instruction, but only on x86/x64. Don't forward this to ARM/ARM64
980 // backends as they don't need it -- instructions on these architectures are
981 // always atomically patchable at runtime.
982 if (CGM.getCodeGenOpts().HotPatch &&
983 getContext().getTargetInfo().getTriple().isX86() &&
984 getContext().getTargetInfo().getTriple().getEnvironment() !=
985 llvm::Triple::CODE16)
986 Fn->addFnAttr("patchable-function", "prologue-short-redirect");
987
988 // Add no-jump-tables value.
989 if (CGM.getCodeGenOpts().NoUseJumpTables)
990 Fn->addFnAttr("no-jump-tables", "true");
991
992 // Add no-inline-line-tables value.
993 if (CGM.getCodeGenOpts().NoInlineLineTables)
994 Fn->addFnAttr("no-inline-line-tables");
995
996 // Add profile-sample-accurate value.
997 if (CGM.getCodeGenOpts().ProfileSampleAccurate)
998 Fn->addFnAttr("profile-sample-accurate");
999
1000 if (!CGM.getCodeGenOpts().SampleProfileFile.empty())
1001 Fn->addFnAttr("use-sample-profile");
1002
1003 if (D && D->hasAttr<CFICanonicalJumpTableAttr>())
1004 Fn->addFnAttr("cfi-canonical-jump-table");
1005
1006 if (D && D->hasAttr<NoProfileFunctionAttr>())
1007 Fn->addFnAttr(llvm::Attribute::NoProfile);
1008
1009 if (D && D->hasAttr<HybridPatchableAttr>())
1010 Fn->addFnAttr(llvm::Attribute::HybridPatchable);
1011
1012 if (D) {
1013 // Function attributes take precedence over command line flags.
1014 if (auto *A = D->getAttr<FunctionReturnThunksAttr>()) {
1015 switch (A->getThunkType()) {
1016 case FunctionReturnThunksAttr::Kind::Keep:
1017 break;
1018 case FunctionReturnThunksAttr::Kind::Extern:
1019 Fn->addFnAttr(llvm::Attribute::FnRetThunkExtern);
1020 break;
1021 }
1022 } else if (CGM.getCodeGenOpts().FunctionReturnThunks)
1023 Fn->addFnAttr(llvm::Attribute::FnRetThunkExtern);
1024 }
1025
1026 if (FD && (getLangOpts().OpenCL ||
1027 (getLangOpts().CUDA &&
1028 getContext().getTargetInfo().getTriple().isSPIRV()) ||
1029 ((getLangOpts().HIP || getLangOpts().OffloadViaLLVM) &&
1030 getLangOpts().CUDAIsDevice))) {
1031 // Add metadata for a kernel function.
1032 EmitKernelMetadata(FD, Fn);
1033 }
1034
1035 if (FD && FD->hasAttr<ClspvLibclcBuiltinAttr>()) {
1036 Fn->setMetadata("clspv_libclc_builtin",
1037 llvm::MDNode::get(getLLVMContext(), {}));
1038 }
1039
1040 // If we are checking function types, emit a function type signature as
1041 // prologue data.
1042 if (FD && SanOpts.has(SanitizerKind::Function) &&
1044 if (llvm::Constant *PrologueSig = getPrologueSignature(CGM, FD)) {
1045 llvm::LLVMContext &Ctx = Fn->getContext();
1046 llvm::MDBuilder MDB(Ctx);
1047 Fn->setMetadata(
1048 llvm::LLVMContext::MD_func_sanitize,
1049 MDB.createRTTIPointerPrologue(
1050 PrologueSig, getUBSanFunctionTypeHash(FD->getType())));
1051 }
1052 }
1053
1054 // If we're checking nullability, we need to know whether we can check the
1055 // return value. Initialize the flag to 'true' and refine it in EmitParmDecl.
1056 if (SanOpts.has(SanitizerKind::NullabilityReturn)) {
1057 auto Nullability = FnRetTy->getNullability();
1058 if (Nullability && *Nullability == NullabilityKind::NonNull &&
1059 !FnRetTy->isRecordType()) {
1060 if (!(SanOpts.has(SanitizerKind::ReturnsNonnullAttribute) &&
1061 CurCodeDecl && CurCodeDecl->getAttr<ReturnsNonNullAttr>()))
1062 RetValNullabilityPrecondition =
1063 llvm::ConstantInt::getTrue(getLLVMContext());
1064 }
1065 }
1066
1067 // If we're in C++ mode and the function name is "main", it is guaranteed
1068 // to be norecurse by the standard (3.6.1.3 "The function main shall not be
1069 // used within a program").
1070 //
1071 // OpenCL C 2.0 v2.2-11 s6.9.i:
1072 // Recursion is not supported.
1073 //
1074 // HLSL
1075 // Recursion is not supported.
1076 //
1077 // SYCL v1.2.1 s3.10:
1078 // kernels cannot include RTTI information, exception classes,
1079 // recursive code, virtual functions or make use of C++ libraries that
1080 // are not compiled for the device.
1081 if (FD &&
1082 ((getLangOpts().CPlusPlus && FD->isMain()) || getLangOpts().OpenCL ||
1083 getLangOpts().HLSL || getLangOpts().SYCLIsDevice ||
1084 (getLangOpts().CUDA && FD->hasAttr<CUDAGlobalAttr>())))
1085 Fn->addFnAttr(llvm::Attribute::NoRecurse);
1086
1087 llvm::RoundingMode RM = getLangOpts().getDefaultRoundingMode();
1088 llvm::fp::ExceptionBehavior FPExceptionBehavior =
1089 ToConstrainedExceptMD(getLangOpts().getDefaultExceptionMode());
1090 Builder.setDefaultConstrainedRounding(RM);
1091 Builder.setDefaultConstrainedExcept(FPExceptionBehavior);
1092 if ((FD && (FD->UsesFPIntrin() || FD->hasAttr<StrictFPAttr>())) ||
1093 (!FD && (FPExceptionBehavior != llvm::fp::ebIgnore ||
1094 RM != llvm::RoundingMode::NearestTiesToEven))) {
1095 Builder.setIsFPConstrained(true);
1096 Fn->addFnAttr(llvm::Attribute::StrictFP);
1097 }
1098
1099 // If a custom alignment is used, force realigning to this alignment on
1100 // any main function which certainly will need it.
1101 if (FD && ((FD->isMain() || FD->isMSVCRTEntryPoint()) &&
1102 CGM.getCodeGenOpts().StackAlignment))
1103 Fn->addFnAttr("stackrealign");
1104
1105 // "main" doesn't need to zero out call-used registers.
1106 if (FD && FD->isMain())
1107 Fn->removeFnAttr("zero-call-used-regs");
1108
1109 // Add vscale_range attribute if appropriate.
1110 llvm::StringMap<bool> FeatureMap;
1111 auto IsArmStreaming = TargetInfo::ArmStreamingKind::NotStreaming;
1112 if (FD) {
1113 getContext().getFunctionFeatureMap(FeatureMap, FD);
1114 if (const auto *T = FD->getType()->getAs<FunctionProtoType>())
1115 if (T->getAArch64SMEAttributes() &
1118
1119 if (IsArmStreamingFunction(FD, true))
1121 }
1122 std::optional<std::pair<unsigned, unsigned>> VScaleRange =
1123 getContext().getTargetInfo().getVScaleRange(getLangOpts(), IsArmStreaming,
1124 &FeatureMap);
1125 if (VScaleRange) {
1126 CurFn->addFnAttr(llvm::Attribute::getWithVScaleRangeArgs(
1127 getLLVMContext(), VScaleRange->first, VScaleRange->second));
1128 }
1129
1130 llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn);
1131
1132 // Create a marker to make it easy to insert allocas into the entryblock
1133 // later. Don't create this with the builder, because we don't want it
1134 // folded.
1135 llvm::Value *Poison = llvm::PoisonValue::get(Int32Ty);
1136 AllocaInsertPt = new llvm::BitCastInst(Poison, Int32Ty, "allocapt", EntryBB);
1137
1139
1140 Builder.SetInsertPoint(EntryBB);
1141
1142 // If we're checking the return value, allocate space for a pointer to a
1143 // precise source location of the checked return statement.
1144 if (requiresReturnValueCheck()) {
1145 ReturnLocation = CreateDefaultAlignTempAlloca(Int8PtrTy, "return.sloc.ptr");
1146 Builder.CreateStore(llvm::ConstantPointerNull::get(Int8PtrTy),
1147 ReturnLocation);
1148 }
1149
1150 // Emit subprogram debug descriptor.
1151 if (CGDebugInfo *DI = getDebugInfo()) {
1152 // Reconstruct the type from the argument list so that implicit parameters,
1153 // such as 'this' and 'vtt', show up in the debug info. Preserve the calling
1154 // convention.
1155 DI->emitFunctionStart(GD, Loc, StartLoc,
1156 DI->getFunctionType(FD, RetTy, Args), CurFn,
1158 }
1159
1161 if (CGM.getCodeGenOpts().InstrumentFunctions)
1162 CurFn->addFnAttr("instrument-function-entry", "__cyg_profile_func_enter");
1163 if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining)
1164 CurFn->addFnAttr("instrument-function-entry-inlined",
1165 "__cyg_profile_func_enter");
1166 if (CGM.getCodeGenOpts().InstrumentFunctionEntryBare)
1167 CurFn->addFnAttr("instrument-function-entry-inlined",
1168 "__cyg_profile_func_enter_bare");
1169 }
1170
1171 // Since emitting the mcount call here impacts optimizations such as function
1172 // inlining, we just add an attribute to insert a mcount call in backend.
1173 // The attribute "counting-function" is set to mcount function name which is
1174 // architecture dependent.
1175 if (CGM.getCodeGenOpts().InstrumentForProfiling) {
1176 // Calls to fentry/mcount should not be generated if function has
1177 // the no_instrument_function attribute.
1178 if (!CurFuncDecl || !CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>()) {
1179 if (CGM.getCodeGenOpts().CallFEntry)
1180 Fn->addFnAttr("fentry-call", "true");
1181 else {
1182 Fn->addFnAttr("instrument-function-entry-inlined",
1183 getTarget().getMCountName());
1184 }
1185 if (CGM.getCodeGenOpts().MNopMCount) {
1186 if (!CGM.getCodeGenOpts().CallFEntry)
1187 CGM.getDiags().Report(diag::err_opt_not_valid_without_opt)
1188 << "-mnop-mcount" << "-mfentry";
1189 Fn->addFnAttr("mnop-mcount");
1190 }
1191
1192 if (CGM.getCodeGenOpts().RecordMCount) {
1193 if (!CGM.getCodeGenOpts().CallFEntry)
1194 CGM.getDiags().Report(diag::err_opt_not_valid_without_opt)
1195 << "-mrecord-mcount" << "-mfentry";
1196 Fn->addFnAttr("mrecord-mcount");
1197 }
1198 }
1199 }
1200
1201 if (CGM.getCodeGenOpts().PackedStack) {
1202 if (getContext().getTargetInfo().getTriple().getArch() !=
1203 llvm::Triple::systemz)
1204 CGM.getDiags().Report(diag::err_opt_not_valid_on_target)
1205 << "-mpacked-stack";
1206 Fn->addFnAttr("packed-stack");
1207 }
1208
1209 if (CGM.getCodeGenOpts().WarnStackSize != UINT_MAX &&
1210 !CGM.getDiags().isIgnored(diag::warn_fe_backend_frame_larger_than, Loc))
1211 Fn->addFnAttr("warn-stack-size",
1212 std::to_string(CGM.getCodeGenOpts().WarnStackSize));
1213
1214 if (RetTy->isVoidType()) {
1215 // Void type; nothing to return.
1217
1218 // Count the implicit return.
1219 if (!endsWithReturn(D))
1220 ++NumReturnExprs;
1221 } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect) {
1222 // Indirect return; emit returned value directly into sret slot.
1223 // This reduces code size, and affects correctness in C++.
1224 auto AI = CurFn->arg_begin();
1225 if (CurFnInfo->getReturnInfo().isSRetAfterThis())
1226 ++AI;
1228 &*AI, RetTy, CurFnInfo->getReturnInfo().getIndirectAlign(), false,
1229 nullptr, nullptr, KnownNonNull);
1230 if (!CurFnInfo->getReturnInfo().getIndirectByVal()) {
1232 CreateDefaultAlignTempAlloca(ReturnValue.getType(), "result.ptr");
1233 Builder.CreateStore(ReturnValue.emitRawPointer(*this),
1235 }
1236 } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::InAlloca &&
1237 !hasScalarEvaluationKind(CurFnInfo->getReturnType())) {
1238 // Load the sret pointer from the argument struct and return into that.
1239 unsigned Idx = CurFnInfo->getReturnInfo().getInAllocaFieldIndex();
1240 llvm::Function::arg_iterator EI = CurFn->arg_end();
1241 --EI;
1242 llvm::Value *Addr = Builder.CreateStructGEP(
1243 CurFnInfo->getArgStruct(), &*EI, Idx);
1244 llvm::Type *Ty =
1245 cast<llvm::GetElementPtrInst>(Addr)->getResultElementType();
1247 Addr = Builder.CreateAlignedLoad(Ty, Addr, getPointerAlign(), "agg.result");
1249 CGM.getNaturalTypeAlignment(RetTy), KnownNonNull);
1250 } else {
1251 ReturnValue = CreateIRTemp(RetTy, "retval");
1252
1253 // Tell the epilog emitter to autorelease the result. We do this
1254 // now so that various specialized functions can suppress it
1255 // during their IR-generation.
1256 if (getLangOpts().ObjCAutoRefCount &&
1257 !CurFnInfo->isReturnsRetained() &&
1258 RetTy->isObjCRetainableType())
1259 AutoreleaseResult = true;
1260 }
1261
1263
1264 PrologueCleanupDepth = EHStack.stable_begin();
1265
1266 // Emit OpenMP specific initialization of the device functions.
1267 if (getLangOpts().OpenMP && CurCodeDecl)
1268 CGM.getOpenMPRuntime().emitFunctionProlog(*this, CurCodeDecl);
1269
1270 if (FD && getLangOpts().HLSL) {
1271 // Handle emitting HLSL entry functions.
1272 if (FD->hasAttr<HLSLShaderAttr>()) {
1273 CGM.getHLSLRuntime().emitEntryFunction(FD, Fn);
1274 }
1275 }
1276
1278
1279 if (const CXXMethodDecl *MD = dyn_cast_if_present<CXXMethodDecl>(D);
1280 MD && !MD->isStatic()) {
1281 bool IsInLambda =
1282 MD->getParent()->isLambda() && MD->getOverloadedOperator() == OO_Call;
1284 CGM.getCXXABI().EmitInstanceFunctionProlog(*this);
1285 if (IsInLambda) {
1286 // We're in a lambda; figure out the captures.
1290 // If the lambda captures the object referred to by '*this' - either by
1291 // value or by reference, make sure CXXThisValue points to the correct
1292 // object.
1293
1294 // Get the lvalue for the field (which is a copy of the enclosing object
1295 // or contains the address of the enclosing object).
1297 if (!LambdaThisCaptureField->getType()->isPointerType()) {
1298 // If the enclosing object was captured by value, just use its
1299 // address. Sign this pointer.
1300 CXXThisValue = ThisFieldLValue.getPointer(*this);
1301 } else {
1302 // Load the lvalue pointed to by the field, since '*this' was captured
1303 // by reference.
1304 CXXThisValue =
1305 EmitLoadOfLValue(ThisFieldLValue, SourceLocation()).getScalarVal();
1306 }
1307 }
1308 for (auto *FD : MD->getParent()->fields()) {
1309 if (FD->hasCapturedVLAType()) {
1310 auto *ExprArg = EmitLoadOfLValue(EmitLValueForLambdaField(FD),
1312 auto VAT = FD->getCapturedVLAType();
1313 VLASizeMap[VAT->getSizeExpr()] = ExprArg;
1314 }
1315 }
1316 } else if (MD->isImplicitObjectMemberFunction()) {
1317 // Not in a lambda; just use 'this' from the method.
1318 // FIXME: Should we generate a new load for each use of 'this'? The
1319 // fast register allocator would be happier...
1320 CXXThisValue = CXXABIThisValue;
1321 }
1322
1323 // Check the 'this' pointer once per function, if it's available.
1324 if (CXXABIThisValue) {
1325 SanitizerSet SkippedChecks;
1326 SkippedChecks.set(SanitizerKind::ObjectSize, true);
1327 QualType ThisTy = MD->getThisType();
1328
1329 // If this is the call operator of a lambda with no captures, it
1330 // may have a static invoker function, which may call this operator with
1331 // a null 'this' pointer.
1333 SkippedChecks.set(SanitizerKind::Null, true);
1334
1337 Loc, CXXABIThisValue, ThisTy, CXXABIThisAlignment, SkippedChecks);
1338 }
1339 }
1340
1341 // If any of the arguments have a variably modified type, make sure to
1342 // emit the type size, but only if the function is not naked. Naked functions
1343 // have no prolog to run this evaluation.
1344 if (!FD || !FD->hasAttr<NakedAttr>()) {
1345 for (const VarDecl *VD : Args) {
1346 // Dig out the type as written from ParmVarDecls; it's unclear whether
1347 // the standard (C99 6.9.1p10) requires this, but we're following the
1348 // precedent set by gcc.
1349 QualType Ty;
1350 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(VD))
1351 Ty = PVD->getOriginalType();
1352 else
1353 Ty = VD->getType();
1354
1355 if (Ty->isVariablyModifiedType())
1357 }
1358 }
1359 // Emit a location at the end of the prologue.
1360 if (CGDebugInfo *DI = getDebugInfo())
1361 DI->EmitLocation(Builder, StartLoc);
1362 // TODO: Do we need to handle this in two places like we do with
1363 // target-features/target-cpu?
1364 if (CurFuncDecl)
1365 if (const auto *VecWidth = CurFuncDecl->getAttr<MinVectorWidthAttr>())
1366 LargestVectorWidth = VecWidth->getVectorWidth();
1367
1368 if (CGM.shouldEmitConvergenceTokens())
1369 ConvergenceTokenStack.push_back(getOrEmitConvergenceEntryToken(CurFn));
1370}
1371
1375 if (const CompoundStmt *S = dyn_cast<CompoundStmt>(Body))
1377 else
1378 EmitStmt(Body);
1379}
1380
1381/// When instrumenting to collect profile data, the counts for some blocks
1382/// such as switch cases need to not include the fall-through counts, so
1383/// emit a branch around the instrumentation code. When not instrumenting,
1384/// this just calls EmitBlock().
1386 const Stmt *S) {
1387 llvm::BasicBlock *SkipCountBB = nullptr;
1388 // Do not skip over the instrumentation when single byte coverage mode is
1389 // enabled.
1390 if (HaveInsertPoint() && CGM.getCodeGenOpts().hasProfileClangInstr() &&
1392 // When instrumenting for profiling, the fallthrough to certain
1393 // statements needs to skip over the instrumentation code so that we
1394 // get an accurate count.
1395 SkipCountBB = createBasicBlock("skipcount");
1396 EmitBranch(SkipCountBB);
1397 }
1398 EmitBlock(BB);
1399 uint64_t CurrentCount = getCurrentProfileCount();
1402 if (SkipCountBB)
1403 EmitBlock(SkipCountBB);
1404}
1405
1406/// Tries to mark the given function nounwind based on the
1407/// non-existence of any throwing calls within it. We believe this is
1408/// lightweight enough to do at -O0.
1409static void TryMarkNoThrow(llvm::Function *F) {
1410 // LLVM treats 'nounwind' on a function as part of the type, so we
1411 // can't do this on functions that can be overwritten.
1412 if (F->isInterposable()) return;
1413
1414 for (llvm::BasicBlock &BB : *F)
1415 for (llvm::Instruction &I : BB)
1416 if (I.mayThrow())
1417 return;
1418
1419 F->setDoesNotThrow();
1420}
1421
1423 FunctionArgList &Args) {
1424 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
1425 QualType ResTy = FD->getReturnType();
1426
1427 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
1428 if (MD && MD->isImplicitObjectMemberFunction()) {
1429 if (CGM.getCXXABI().HasThisReturn(GD))
1430 ResTy = MD->getThisType();
1431 else if (CGM.getCXXABI().hasMostDerivedReturn(GD))
1432 ResTy = CGM.getContext().VoidPtrTy;
1433 CGM.getCXXABI().buildThisParam(*this, Args);
1434 }
1435
1436 // The base version of an inheriting constructor whose constructed base is a
1437 // virtual base is not passed any arguments (because it doesn't actually call
1438 // the inherited constructor).
1439 bool PassedParams = true;
1440 if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
1441 if (auto Inherited = CD->getInheritedConstructor())
1442 PassedParams =
1443 getTypes().inheritingCtorHasParams(Inherited, GD.getCtorType());
1444
1445 if (PassedParams) {
1446 for (auto *Param : FD->parameters()) {
1447 Args.push_back(Param);
1448 if (!Param->hasAttr<PassObjectSizeAttr>())
1449 continue;
1450
1452 getContext(), Param->getDeclContext(), Param->getLocation(),
1453 /*Id=*/nullptr, getContext().getSizeType(), ImplicitParamKind::Other);
1454 SizeArguments[Param] = Implicit;
1455 Args.push_back(Implicit);
1456 }
1457 }
1458
1459 if (MD && (isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD)))
1460 CGM.getCXXABI().addImplicitStructorParams(*this, ResTy, Args);
1461
1462 return ResTy;
1463}
1464
1465void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
1466 const CGFunctionInfo &FnInfo) {
1467 assert(Fn && "generating code for null Function");
1468 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
1469 CurGD = GD;
1470
1471 FunctionArgList Args;
1472 QualType ResTy = BuildFunctionArgList(GD, Args);
1473
1474 CGM.getTargetCodeGenInfo().checkFunctionABI(CGM, FD);
1475
1476 if (FD->isInlineBuiltinDeclaration()) {
1477 // When generating code for a builtin with an inline declaration, use a
1478 // mangled name to hold the actual body, while keeping an external
1479 // definition in case the function pointer is referenced somewhere.
1480 std::string FDInlineName = (Fn->getName() + ".inline").str();
1481 llvm::Module *M = Fn->getParent();
1482 llvm::Function *Clone = M->getFunction(FDInlineName);
1483 if (!Clone) {
1484 Clone = llvm::Function::Create(Fn->getFunctionType(),
1485 llvm::GlobalValue::InternalLinkage,
1486 Fn->getAddressSpace(), FDInlineName, M);
1487 Clone->addFnAttr(llvm::Attribute::AlwaysInline);
1488 }
1489 Fn->setLinkage(llvm::GlobalValue::ExternalLinkage);
1490 Fn = Clone;
1491 } else {
1492 // Detect the unusual situation where an inline version is shadowed by a
1493 // non-inline version. In that case we should pick the external one
1494 // everywhere. That's GCC behavior too. Unfortunately, I cannot find a way
1495 // to detect that situation before we reach codegen, so do some late
1496 // replacement.
1497 for (const FunctionDecl *PD = FD->getPreviousDecl(); PD;
1498 PD = PD->getPreviousDecl()) {
1499 if (LLVM_UNLIKELY(PD->isInlineBuiltinDeclaration())) {
1500 std::string FDInlineName = (Fn->getName() + ".inline").str();
1501 llvm::Module *M = Fn->getParent();
1502 if (llvm::Function *Clone = M->getFunction(FDInlineName)) {
1503 Clone->replaceAllUsesWith(Fn);
1504 Clone->eraseFromParent();
1505 }
1506 break;
1507 }
1508 }
1509 }
1510
1511 // Check if we should generate debug info for this function.
1512 if (FD->hasAttr<NoDebugAttr>()) {
1513 // Clear non-distinct debug info that was possibly attached to the function
1514 // due to an earlier declaration without the nodebug attribute
1515 Fn->setSubprogram(nullptr);
1516 // Disable debug info indefinitely for this function
1517 DebugInfo = nullptr;
1518 }
1519 // Finalize function debug info on exit.
1520 llvm::scope_exit Cleanup([this] {
1521 if (CGDebugInfo *DI = getDebugInfo())
1522 DI->completeFunction();
1523 });
1524
1525 // The function might not have a body if we're generating thunks for a
1526 // function declaration.
1527 SourceRange BodyRange;
1528 if (Stmt *Body = FD->getBody())
1529 BodyRange = Body->getSourceRange();
1530 else
1531 BodyRange = FD->getLocation();
1532 CurEHLocation = BodyRange.getEnd();
1533
1534 // Use the location of the start of the function to determine where
1535 // the function definition is located. By default use the location
1536 // of the declaration as the location for the subprogram. A function
1537 // may lack a declaration in the source code if it is created by code
1538 // gen. (examples: _GLOBAL__I_a, __cxx_global_array_dtor, thunk).
1539 SourceLocation Loc = FD->getLocation();
1540
1541 // If this is a function specialization then use the pattern body
1542 // as the location for the function.
1543 if (const FunctionDecl *SpecDecl = FD->getTemplateInstantiationPattern())
1544 if (SpecDecl->hasBody(SpecDecl))
1545 Loc = SpecDecl->getLocation();
1546
1547 Stmt *Body = FD->getBody();
1548
1549 if (Body) {
1550 // Coroutines always emit lifetime markers.
1551 if (isa<CoroutineBodyStmt>(Body))
1552 ShouldEmitLifetimeMarkers = true;
1553
1554 // Initialize helper which will detect jumps which can cause invalid
1555 // lifetime markers.
1556 if (ShouldEmitLifetimeMarkers)
1557 Bypasses.Init(CGM, Body);
1558 }
1559
1560 // Emit the standard function prologue.
1561 StartFunction(GD, ResTy, Fn, FnInfo, Args, Loc, BodyRange.getBegin());
1562
1563 // Save parameters for coroutine function.
1564 if (Body && isa_and_nonnull<CoroutineBodyStmt>(Body))
1565 llvm::append_range(FnArgs, FD->parameters());
1566
1567 // Ensure that the function adheres to the forward progress guarantee, which
1568 // is required by certain optimizations.
1569 // In C++11 and up, the attribute will be removed if the body contains a
1570 // trivial empty loop.
1572 CurFn->addFnAttr(llvm::Attribute::MustProgress);
1573
1574 // Generate the body of the function.
1575 PGO->assignRegionCounters(GD, CurFn);
1576 if (isa<CXXDestructorDecl>(FD))
1577 EmitDestructorBody(Args);
1578 else if (isa<CXXConstructorDecl>(FD))
1579 EmitConstructorBody(Args);
1580 else if (getLangOpts().CUDA &&
1581 !getLangOpts().CUDAIsDevice &&
1582 FD->hasAttr<CUDAGlobalAttr>())
1583 CGM.getCUDARuntime().emitDeviceStub(*this, Args);
1584 else if (isa<CXXMethodDecl>(FD) &&
1585 cast<CXXMethodDecl>(FD)->isLambdaStaticInvoker()) {
1586 // The lambda static invoker function is special, because it forwards or
1587 // clones the body of the function call operator (but is actually static).
1589 } else if (isa<CXXMethodDecl>(FD) &&
1591 !FnInfo.isDelegateCall() &&
1592 cast<CXXMethodDecl>(FD)->getParent()->getLambdaStaticInvoker() &&
1593 hasInAllocaArg(cast<CXXMethodDecl>(FD))) {
1594 // If emitting a lambda with static invoker on X86 Windows, change
1595 // the call operator body.
1596 // Make sure that this is a call operator with an inalloca arg and check
1597 // for delegate call to make sure this is the original call op and not the
1598 // new forwarding function for the static invoker.
1600 } else if (FD->isDefaulted() && isa<CXXMethodDecl>(FD) &&
1601 (cast<CXXMethodDecl>(FD)->isCopyAssignmentOperator() ||
1602 cast<CXXMethodDecl>(FD)->isMoveAssignmentOperator())) {
1603 // Implicit copy-assignment gets the same special treatment as implicit
1604 // copy-constructors.
1606 } else if (DeviceKernelAttr::isOpenCLSpelling(
1607 FD->getAttr<DeviceKernelAttr>()) &&
1609 CallArgList CallArgs;
1610 for (unsigned i = 0; i < Args.size(); ++i) {
1611 Address ArgAddr = GetAddrOfLocalVar(Args[i]);
1612 QualType ArgQualType = Args[i]->getType();
1613 RValue ArgRValue = convertTempToRValue(ArgAddr, ArgQualType, Loc);
1614 CallArgs.add(ArgRValue, ArgQualType);
1615 }
1617 const FunctionType *FT = cast<FunctionType>(FD->getType());
1618 CGM.getTargetCodeGenInfo().setOCLKernelStubCallingConvention(FT);
1619 const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeFreeFunctionCall(
1620 CallArgs, FT, /*ChainCall=*/false);
1621 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FnInfo);
1622 llvm::Constant *GDStubFunctionPointer =
1623 CGM.getRawFunctionPointer(GDStub, FTy);
1624 CGCallee GDStubCallee = CGCallee::forDirect(GDStubFunctionPointer, GDStub);
1625 EmitCall(FnInfo, GDStubCallee, ReturnValueSlot(), CallArgs, nullptr, false,
1626 Loc);
1627 } else if (Body) {
1628 EmitFunctionBody(Body);
1629 } else
1630 llvm_unreachable("no definition for emitted function");
1631
1632 // C++11 [stmt.return]p2:
1633 // Flowing off the end of a function [...] results in undefined behavior in
1634 // a value-returning function.
1635 // C11 6.9.1p12:
1636 // If the '}' that terminates a function is reached, and the value of the
1637 // function call is used by the caller, the behavior is undefined.
1639 !FD->getReturnType()->isVoidType() && Builder.GetInsertBlock()) {
1640 bool ShouldEmitUnreachable =
1641 CGM.getCodeGenOpts().StrictReturn ||
1642 !CGM.MayDropFunctionReturn(FD->getASTContext(), FD->getReturnType());
1643 if (SanOpts.has(SanitizerKind::Return)) {
1644 auto CheckOrdinal = SanitizerKind::SO_Return;
1645 auto CheckHandler = SanitizerHandler::MissingReturn;
1646 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
1647 llvm::Value *IsFalse = Builder.getFalse();
1648 EmitCheck(std::make_pair(IsFalse, CheckOrdinal), CheckHandler,
1650 } else if (ShouldEmitUnreachable) {
1651 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
1652 EmitTrapCall(llvm::Intrinsic::trap);
1653 }
1654 if (SanOpts.has(SanitizerKind::Return) || ShouldEmitUnreachable) {
1655 Builder.CreateUnreachable();
1656 Builder.ClearInsertionPoint();
1657 }
1658 }
1659
1660 // Emit the standard function epilogue.
1661 FinishFunction(BodyRange.getEnd());
1662
1663 PGO->verifyCounterMap();
1664
1665 // If we haven't marked the function nothrow through other means, do
1666 // a quick pass now to see if we can.
1667 if (!CurFn->doesNotThrow())
1669}
1670
1671/// ContainsLabel - Return true if the statement contains a label in it. If
1672/// this statement is not executed normally, it not containing a label means
1673/// that we can just remove the code.
1674bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) {
1675 // Null statement, not a label!
1676 if (!S) return false;
1677
1678 // If this is a label, we have to emit the code, consider something like:
1679 // if (0) { ... foo: bar(); } goto foo;
1680 //
1681 // TODO: If anyone cared, we could track __label__'s, since we know that you
1682 // can't jump to one from outside their declared region.
1683 if (isa<LabelStmt>(S))
1684 return true;
1685
1686 // If this is a case/default statement, and we haven't seen a switch, we have
1687 // to emit the code.
1688 if (isa<SwitchCase>(S) && !IgnoreCaseStmts)
1689 return true;
1690
1691 // If this is a switch statement, we want to ignore cases below it.
1692 if (isa<SwitchStmt>(S))
1693 IgnoreCaseStmts = true;
1694
1695 // Scan subexpressions for verboten labels.
1696 for (const Stmt *SubStmt : S->children())
1697 if (ContainsLabel(SubStmt, IgnoreCaseStmts))
1698 return true;
1699
1700 return false;
1701}
1702
1703/// containsBreak - Return true if the statement contains a break out of it.
1704/// If the statement (recursively) contains a switch or loop with a break
1705/// inside of it, this is fine.
1707 // Null statement, not a label!
1708 if (!S) return false;
1709
1710 // If this is a switch or loop that defines its own break scope, then we can
1711 // include it and anything inside of it.
1712 if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) || isa<DoStmt>(S) ||
1713 isa<ForStmt>(S))
1714 return false;
1715
1716 if (isa<BreakStmt>(S))
1717 return true;
1718
1719 // Scan subexpressions for verboten breaks.
1720 for (const Stmt *SubStmt : S->children())
1721 if (containsBreak(SubStmt))
1722 return true;
1723
1724 return false;
1725}
1726
1728 if (!S) return false;
1729
1730 // Some statement kinds add a scope and thus never add a decl to the current
1731 // scope. Note, this list is longer than the list of statements that might
1732 // have an unscoped decl nested within them, but this way is conservatively
1733 // correct even if more statement kinds are added.
1734 if (isa<IfStmt>(S) || isa<SwitchStmt>(S) || isa<WhileStmt>(S) ||
1738 return false;
1739
1740 if (isa<DeclStmt>(S))
1741 return true;
1742
1743 for (const Stmt *SubStmt : S->children())
1744 if (mightAddDeclToScope(SubStmt))
1745 return true;
1746
1747 return false;
1748}
1749
1750/// ConstantFoldsToSimpleInteger - If the specified expression does not fold
1751/// to a constant, or if it does but contains a label, return false. If it
1752/// constant folds return true and set the boolean result in Result.
1754 bool &ResultBool,
1755 bool AllowLabels) {
1756 // If MC/DC is enabled, disable folding so that we can instrument all
1757 // conditions to yield complete test vectors. We still keep track of
1758 // folded conditions during region mapping and visualization.
1759 if (!AllowLabels && CGM.getCodeGenOpts().hasProfileClangInstr() &&
1760 CGM.getCodeGenOpts().MCDCCoverage)
1761 return false;
1762
1763 llvm::APSInt ResultInt;
1764 if (!ConstantFoldsToSimpleInteger(Cond, ResultInt, AllowLabels))
1765 return false;
1766
1767 ResultBool = ResultInt.getBoolValue();
1768 return true;
1769}
1770
1771/// ConstantFoldsToSimpleInteger - If the specified expression does not fold
1772/// to a constant, or if it does but contains a label, return false. If it
1773/// constant folds return true and set the folded value.
1775 llvm::APSInt &ResultInt,
1776 bool AllowLabels) {
1777 // FIXME: Rename and handle conversion of other evaluatable things
1778 // to bool.
1780 if (!Cond->EvaluateAsInt(Result, getContext()))
1781 return false; // Not foldable, not integer or not fully evaluatable.
1782
1783 llvm::APSInt Int = Result.Val.getInt();
1784 if (!AllowLabels && CodeGenFunction::ContainsLabel(Cond))
1785 return false; // Contains a label.
1786
1787 PGO->markStmtMaybeUsed(Cond);
1788 ResultInt = Int;
1789 return true;
1790}
1791
1792/// Strip parentheses and simplistic logical-NOT operators.
1794 while (true) {
1795 const Expr *SC = IgnoreExprNodes(
1798 if (C == SC)
1799 return SC;
1800 C = SC;
1801 }
1802}
1803
1804/// Determine whether the given condition is an instrumentable condition
1805/// (i.e. no "&&" or "||").
1807 const BinaryOperator *BOp = dyn_cast<BinaryOperator>(stripCond(C));
1808 return (!BOp || !BOp->isLogicalOp());
1809}
1810
1811/// EmitBranchToCounterBlock - Emit a conditional branch to a new block that
1812/// increments a profile counter based on the semantics of the given logical
1813/// operator opcode. This is used to instrument branch condition coverage for
1814/// logical operators.
1816 const Expr *Cond, BinaryOperator::Opcode LOp, llvm::BasicBlock *TrueBlock,
1817 llvm::BasicBlock *FalseBlock, uint64_t TrueCount /* = 0 */,
1818 Stmt::Likelihood LH /* =None */, const Expr *CntrIdx /* = nullptr */) {
1819 // If not instrumenting, just emit a branch.
1820 bool InstrumentRegions = CGM.getCodeGenOpts().hasProfileClangInstr();
1821 if (!InstrumentRegions || !isInstrumentedCondition(Cond))
1822 return EmitBranchOnBoolExpr(Cond, TrueBlock, FalseBlock, TrueCount, LH);
1823
1824 const Stmt *CntrStmt = (CntrIdx ? CntrIdx : Cond);
1825
1826 llvm::BasicBlock *ThenBlock = nullptr;
1827 llvm::BasicBlock *ElseBlock = nullptr;
1828 llvm::BasicBlock *NextBlock = nullptr;
1829
1830 // Create the block we'll use to increment the appropriate counter.
1831 llvm::BasicBlock *CounterIncrBlock = createBasicBlock("lop.rhscnt");
1832
1833 // Set block pointers according to Logical-AND (BO_LAnd) semantics. This
1834 // means we need to evaluate the condition and increment the counter on TRUE:
1835 //
1836 // if (Cond)
1837 // goto CounterIncrBlock;
1838 // else
1839 // goto FalseBlock;
1840 //
1841 // CounterIncrBlock:
1842 // Counter++;
1843 // goto TrueBlock;
1844
1845 if (LOp == BO_LAnd) {
1846 ThenBlock = CounterIncrBlock;
1847 ElseBlock = FalseBlock;
1848 NextBlock = TrueBlock;
1849 }
1850
1851 // Set block pointers according to Logical-OR (BO_LOr) semantics. This means
1852 // we need to evaluate the condition and increment the counter on FALSE:
1853 //
1854 // if (Cond)
1855 // goto TrueBlock;
1856 // else
1857 // goto CounterIncrBlock;
1858 //
1859 // CounterIncrBlock:
1860 // Counter++;
1861 // goto FalseBlock;
1862
1863 else if (LOp == BO_LOr) {
1864 ThenBlock = TrueBlock;
1865 ElseBlock = CounterIncrBlock;
1866 NextBlock = FalseBlock;
1867 } else {
1868 llvm_unreachable("Expected Opcode must be that of a Logical Operator");
1869 }
1870
1871 // Emit Branch based on condition.
1872 EmitBranchOnBoolExpr(Cond, ThenBlock, ElseBlock, TrueCount, LH);
1873
1874 // Emit the block containing the counter increment(s).
1875 EmitBlock(CounterIncrBlock);
1876
1877 // Increment corresponding counter; if index not provided, use Cond as index.
1878 incrementProfileCounter(CntrStmt);
1879
1880 // Go to the next block.
1881 EmitBranch(NextBlock);
1882}
1883
1884/// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if
1885/// statement) to the specified blocks. Based on the condition, this might try
1886/// to simplify the codegen of the conditional based on the branch.
1887/// \param LH The value of the likelihood attribute on the True branch.
1888/// \param ConditionalOp Used by MC/DC code coverage to track the result of the
1889/// ConditionalOperator (ternary) through a recursive call for the operator's
1890/// LHS and RHS nodes.
1892 const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock,
1893 uint64_t TrueCount, Stmt::Likelihood LH, const Expr *ConditionalOp,
1894 const VarDecl *ConditionalDecl) {
1895 Cond = Cond->IgnoreParens();
1896
1897 if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) {
1898 // Handle X && Y in a condition.
1899 if (CondBOp->getOpcode() == BO_LAnd) {
1900 // If we have "1 && X", simplify the code. "0 && X" would have constant
1901 // folded if the case was simple enough.
1902 bool ConstantBool = false;
1903 if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
1904 ConstantBool) {
1905 // br(1 && X) -> br(X).
1906 incrementProfileCounter(CondBOp);
1907 EmitBranchToCounterBlock(CondBOp->getRHS(), BO_LAnd, TrueBlock,
1908 FalseBlock, TrueCount, LH);
1909 return;
1910 }
1911
1912 // If we have "X && 1", simplify the code to use an uncond branch.
1913 // "X && 0" would have been constant folded to 0.
1914 if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
1915 ConstantBool) {
1916 // br(X && 1) -> br(X).
1917 EmitBranchToCounterBlock(CondBOp->getLHS(), BO_LAnd, TrueBlock,
1918 FalseBlock, TrueCount, LH, CondBOp);
1919 return;
1920 }
1921
1922 // Emit the LHS as a conditional. If the LHS conditional is false, we
1923 // want to jump to the FalseBlock.
1924 llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true");
1925 // The counter tells us how often we evaluate RHS, and all of TrueCount
1926 // can be propagated to that branch.
1927 uint64_t RHSCount = getProfileCount(CondBOp->getRHS());
1928
1929 ConditionalEvaluation eval(*this);
1930 {
1931 ApplyDebugLocation DL(*this, Cond);
1932 // Propagate the likelihood attribute like __builtin_expect
1933 // __builtin_expect(X && Y, 1) -> X and Y are likely
1934 // __builtin_expect(X && Y, 0) -> only Y is unlikely
1935 EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock, RHSCount,
1936 LH == Stmt::LH_Unlikely ? Stmt::LH_None : LH);
1937 EmitBlock(LHSTrue);
1938 }
1939
1940 incrementProfileCounter(CondBOp);
1941 setCurrentProfileCount(getProfileCount(CondBOp->getRHS()));
1942
1943 // Any temporaries created here are conditional.
1944 eval.begin(*this);
1945 EmitBranchToCounterBlock(CondBOp->getRHS(), BO_LAnd, TrueBlock,
1946 FalseBlock, TrueCount, LH);
1947 eval.end(*this);
1948 return;
1949 }
1950
1951 if (CondBOp->getOpcode() == BO_LOr) {
1952 // If we have "0 || X", simplify the code. "1 || X" would have constant
1953 // folded if the case was simple enough.
1954 bool ConstantBool = false;
1955 if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
1956 !ConstantBool) {
1957 // br(0 || X) -> br(X).
1958 incrementProfileCounter(CondBOp);
1959 EmitBranchToCounterBlock(CondBOp->getRHS(), BO_LOr, TrueBlock,
1960 FalseBlock, TrueCount, LH);
1961 return;
1962 }
1963
1964 // If we have "X || 0", simplify the code to use an uncond branch.
1965 // "X || 1" would have been constant folded to 1.
1966 if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
1967 !ConstantBool) {
1968 // br(X || 0) -> br(X).
1969 EmitBranchToCounterBlock(CondBOp->getLHS(), BO_LOr, TrueBlock,
1970 FalseBlock, TrueCount, LH, CondBOp);
1971 return;
1972 }
1973 // Emit the LHS as a conditional. If the LHS conditional is true, we
1974 // want to jump to the TrueBlock.
1975 llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false");
1976 // We have the count for entry to the RHS and for the whole expression
1977 // being true, so we can divy up True count between the short circuit and
1978 // the RHS.
1979 uint64_t LHSCount =
1980 getCurrentProfileCount() - getProfileCount(CondBOp->getRHS());
1981 uint64_t RHSCount = TrueCount - LHSCount;
1982
1983 ConditionalEvaluation eval(*this);
1984 {
1985 // Propagate the likelihood attribute like __builtin_expect
1986 // __builtin_expect(X || Y, 1) -> only Y is likely
1987 // __builtin_expect(X || Y, 0) -> both X and Y are unlikely
1988 ApplyDebugLocation DL(*this, Cond);
1989 EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse, LHSCount,
1990 LH == Stmt::LH_Likely ? Stmt::LH_None : LH);
1991 EmitBlock(LHSFalse);
1992 }
1993
1994 incrementProfileCounter(CondBOp);
1995 setCurrentProfileCount(getProfileCount(CondBOp->getRHS()));
1996
1997 // Any temporaries created here are conditional.
1998 eval.begin(*this);
1999 EmitBranchToCounterBlock(CondBOp->getRHS(), BO_LOr, TrueBlock, FalseBlock,
2000 RHSCount, LH);
2001
2002 eval.end(*this);
2003 return;
2004 }
2005 }
2006
2007 if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) {
2008 // br(!x, t, f) -> br(x, f, t)
2009 // Avoid doing this optimization when instrumenting a condition for MC/DC.
2010 // LNot is taken as part of the condition for simplicity, and changing its
2011 // sense negatively impacts test vector tracking.
2012 bool MCDCCondition = CGM.getCodeGenOpts().hasProfileClangInstr() &&
2013 CGM.getCodeGenOpts().MCDCCoverage &&
2015 if (CondUOp->getOpcode() == UO_LNot && !MCDCCondition) {
2016 // Negate the count.
2017 uint64_t FalseCount = getCurrentProfileCount() - TrueCount;
2018 // The values of the enum are chosen to make this negation possible.
2019 LH = static_cast<Stmt::Likelihood>(-LH);
2020 // Negate the condition and swap the destination blocks.
2021 return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock,
2022 FalseCount, LH);
2023 }
2024 }
2025
2026 if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) {
2027 // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f))
2028 llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true");
2029 llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false");
2030
2031 // The ConditionalOperator itself has no likelihood information for its
2032 // true and false branches. This matches the behavior of __builtin_expect.
2033 ConditionalEvaluation cond(*this);
2034 EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock,
2036
2037 // When computing PGO branch weights, we only know the overall count for
2038 // the true block. This code is essentially doing tail duplication of the
2039 // naive code-gen, introducing new edges for which counts are not
2040 // available. Divide the counts proportionally between the LHS and RHS of
2041 // the conditional operator.
2042 uint64_t LHSScaledTrueCount = 0;
2043 if (TrueCount) {
2044 double LHSRatio =
2045 getProfileCount(CondOp) / (double)getCurrentProfileCount();
2046 LHSScaledTrueCount = TrueCount * LHSRatio;
2047 }
2048
2049 cond.begin(*this);
2050 EmitBlock(LHSBlock);
2052 {
2053 ApplyDebugLocation DL(*this, Cond);
2054 EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock,
2055 LHSScaledTrueCount, LH, CondOp);
2056 }
2057 cond.end(*this);
2058
2059 cond.begin(*this);
2060 EmitBlock(RHSBlock);
2061 EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock,
2062 TrueCount - LHSScaledTrueCount, LH, CondOp);
2063 cond.end(*this);
2064
2065 return;
2066 }
2067
2068 if (const CXXThrowExpr *Throw = dyn_cast<CXXThrowExpr>(Cond)) {
2069 // Conditional operator handling can give us a throw expression as a
2070 // condition for a case like:
2071 // br(c ? throw x : y, t, f) -> br(c, br(throw x, t, f), br(y, t, f)
2072 // Fold this to:
2073 // br(c, throw x, br(y, t, f))
2074 EmitCXXThrowExpr(Throw, /*KeepInsertionPoint*/false);
2075 return;
2076 }
2077
2078 // Emit the code with the fully general case.
2079 llvm::Value *CondV;
2080 {
2081 ApplyDebugLocation DL(*this, Cond);
2082 CondV = EvaluateExprAsBool(Cond);
2083 }
2084
2085 MaybeEmitDeferredVarDeclInit(ConditionalDecl);
2086
2087 // If not at the top of the logical operator nest, update MCDC temp with the
2088 // boolean result of the evaluated condition.
2089 {
2090 const Expr *MCDCBaseExpr = Cond;
2091 // When a nested ConditionalOperator (ternary) is encountered in a boolean
2092 // expression, MC/DC tracks the result of the ternary, and this is tied to
2093 // the ConditionalOperator expression and not the ternary's LHS or RHS. If
2094 // this is the case, the ConditionalOperator expression is passed through
2095 // the ConditionalOp parameter and then used as the MCDC base expression.
2096 if (ConditionalOp)
2097 MCDCBaseExpr = ConditionalOp;
2098
2099 if (isMCDCBranchExpr(stripCond(MCDCBaseExpr)) &&
2101 maybeUpdateMCDCCondBitmap(MCDCBaseExpr, CondV);
2102 }
2103
2104 llvm::MDNode *Weights = nullptr;
2105 llvm::MDNode *Unpredictable = nullptr;
2106
2107 // If the branch has a condition wrapped by __builtin_unpredictable,
2108 // create metadata that specifies that the branch is unpredictable.
2109 // Don't bother if not optimizing because that metadata would not be used.
2110 auto *Call = dyn_cast<CallExpr>(Cond->IgnoreImpCasts());
2111 if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) {
2112 auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
2113 if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
2114 llvm::MDBuilder MDHelper(getLLVMContext());
2115 Unpredictable = MDHelper.createUnpredictable();
2116 }
2117 }
2118
2119 // If there is a Likelihood knowledge for the cond, lower it.
2120 // Note that if not optimizing this won't emit anything.
2121 llvm::Value *NewCondV = emitCondLikelihoodViaExpectIntrinsic(CondV, LH);
2122 if (CondV != NewCondV)
2123 CondV = NewCondV;
2124 else {
2125 // Otherwise, lower profile counts. Note that we do this even at -O0.
2126 uint64_t CurrentCount = std::max(getCurrentProfileCount(), TrueCount);
2127 Weights = createProfileWeights(TrueCount, CurrentCount - TrueCount);
2128 }
2129
2130 llvm::Instruction *BrInst = Builder.CreateCondBr(CondV, TrueBlock, FalseBlock,
2131 Weights, Unpredictable);
2132 addInstToNewSourceAtom(BrInst, CondV);
2133
2134 switch (HLSLControlFlowAttr) {
2135 case HLSLControlFlowHintAttr::Microsoft_branch:
2136 case HLSLControlFlowHintAttr::Microsoft_flatten: {
2137 llvm::MDBuilder MDHelper(CGM.getLLVMContext());
2138
2139 llvm::ConstantInt *BranchHintConstant =
2141 HLSLControlFlowHintAttr::Spelling::Microsoft_branch
2142 ? llvm::ConstantInt::get(CGM.Int32Ty, 1)
2143 : llvm::ConstantInt::get(CGM.Int32Ty, 2);
2144
2146 {MDHelper.createString("hlsl.controlflow.hint"),
2147 MDHelper.createConstant(BranchHintConstant)});
2148 BrInst->setMetadata("hlsl.controlflow.hint",
2149 llvm::MDNode::get(CGM.getLLVMContext(), Vals));
2150 break;
2151 }
2152 // This is required to avoid warnings during compilation
2153 case HLSLControlFlowHintAttr::SpellingNotCalculated:
2154 break;
2155 }
2156}
2157
2158llvm::Value *CodeGenFunction::EmitScalarOrConstFoldImmArg(unsigned ICEArguments,
2159 unsigned Idx,
2160 const CallExpr *E) {
2161 llvm::Value *Arg = nullptr;
2162 if ((ICEArguments & (1 << Idx)) == 0) {
2163 Arg = EmitScalarExpr(E->getArg(Idx));
2164 } else {
2165 // If this is required to be a constant, constant fold it so that we
2166 // know that the generated intrinsic gets a ConstantInt.
2167 std::optional<llvm::APSInt> Result =
2169 assert(Result && "Expected argument to be a constant");
2170 Arg = llvm::ConstantInt::get(getLLVMContext(), *Result);
2171 }
2172 return Arg;
2173}
2174
2175/// ErrorUnsupported - Print out an error that codegen doesn't support the
2176/// specified stmt yet.
2177void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type) {
2178 CGM.ErrorUnsupported(S, Type);
2179}
2180
2181/// emitNonZeroVLAInit - Emit the "zero" initialization of a
2182/// variable-length array whose elements have a non-zero bit-pattern.
2183///
2184/// \param baseType the inner-most element type of the array
2185/// \param src - a char* pointing to the bit-pattern for a single
2186/// base element of the array
2187/// \param sizeInChars - the total size of the VLA, in chars
2189 Address dest, Address src,
2190 llvm::Value *sizeInChars) {
2191 CGBuilderTy &Builder = CGF.Builder;
2192
2193 CharUnits baseSize = CGF.getContext().getTypeSizeInChars(baseType);
2194 llvm::Value *baseSizeInChars
2195 = llvm::ConstantInt::get(CGF.IntPtrTy, baseSize.getQuantity());
2196
2197 Address begin = dest.withElementType(CGF.Int8Ty);
2198 llvm::Value *end = Builder.CreateInBoundsGEP(begin.getElementType(),
2199 begin.emitRawPointer(CGF),
2200 sizeInChars, "vla.end");
2201
2202 llvm::BasicBlock *originBB = CGF.Builder.GetInsertBlock();
2203 llvm::BasicBlock *loopBB = CGF.createBasicBlock("vla-init.loop");
2204 llvm::BasicBlock *contBB = CGF.createBasicBlock("vla-init.cont");
2205
2206 // Make a loop over the VLA. C99 guarantees that the VLA element
2207 // count must be nonzero.
2208 CGF.EmitBlock(loopBB);
2209
2210 llvm::PHINode *cur = Builder.CreatePHI(begin.getType(), 2, "vla.cur");
2211 cur->addIncoming(begin.emitRawPointer(CGF), originBB);
2212
2213 CharUnits curAlign =
2214 dest.getAlignment().alignmentOfArrayElement(baseSize);
2215
2216 // memcpy the individual element bit-pattern.
2217 Builder.CreateMemCpy(Address(cur, CGF.Int8Ty, curAlign), src, baseSizeInChars,
2218 /*volatile*/ false);
2219
2220 // Go to the next element.
2221 llvm::Value *next =
2222 Builder.CreateInBoundsGEP(CGF.Int8Ty, cur, baseSizeInChars, "vla.next");
2223
2224 // Leave if that's the end of the VLA.
2225 llvm::Value *done = Builder.CreateICmpEQ(next, end, "vla-init.isdone");
2226 Builder.CreateCondBr(done, contBB, loopBB);
2227 cur->addIncoming(next, loopBB);
2228
2229 CGF.EmitBlock(contBB);
2230}
2231
2232void
2234 // Ignore empty classes in C++.
2235 if (getLangOpts().CPlusPlus)
2236 if (const auto *RD = Ty->getAsCXXRecordDecl(); RD && RD->isEmpty())
2237 return;
2238
2239 if (DestPtr.getElementType() != Int8Ty)
2240 DestPtr = DestPtr.withElementType(Int8Ty);
2241
2242 // Get size and alignment info for this aggregate.
2244
2245 llvm::Value *SizeVal;
2246 const VariableArrayType *vla;
2247
2248 // Don't bother emitting a zero-byte memset.
2249 if (size.isZero()) {
2250 // But note that getTypeInfo returns 0 for a VLA.
2251 if (const VariableArrayType *vlaType =
2252 dyn_cast_or_null<VariableArrayType>(
2253 getContext().getAsArrayType(Ty))) {
2254 auto VlaSize = getVLASize(vlaType);
2255 SizeVal = VlaSize.NumElts;
2256 CharUnits eltSize = getContext().getTypeSizeInChars(VlaSize.Type);
2257 if (!eltSize.isOne())
2258 SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(eltSize));
2259 vla = vlaType;
2260 } else {
2261 return;
2262 }
2263 } else {
2264 SizeVal = CGM.getSize(size);
2265 vla = nullptr;
2266 }
2267
2268 // If the type contains a pointer to data member we can't memset it to zero.
2269 // Instead, create a null constant and copy it to the destination.
2270 // TODO: there are other patterns besides zero that we can usefully memset,
2271 // like -1, which happens to be the pattern used by member-pointers.
2272 if (!CGM.getTypes().isZeroInitializable(Ty)) {
2273 // For a VLA, emit a single element, then splat that over the VLA.
2274 if (vla) Ty = getContext().getBaseElementType(vla);
2275
2276 llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty);
2277
2278 llvm::GlobalVariable *NullVariable =
2279 new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(),
2280 /*isConstant=*/true,
2281 llvm::GlobalVariable::PrivateLinkage,
2282 NullConstant, Twine());
2283 CharUnits NullAlign = DestPtr.getAlignment();
2284 NullVariable->setAlignment(NullAlign.getAsAlign());
2285 Address SrcPtr(NullVariable, Builder.getInt8Ty(), NullAlign);
2286
2287 if (vla) return emitNonZeroVLAInit(*this, Ty, DestPtr, SrcPtr, SizeVal);
2288
2289 // Get and call the appropriate llvm.memcpy overload.
2290 Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, false);
2291 return;
2292 }
2293
2294 // Otherwise, just memset the whole thing to zero. This is legal
2295 // because in LLVM, all default initializers (other than the ones we just
2296 // handled above) are guaranteed to have a bit pattern of all zeros.
2297 Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal, false);
2298}
2299
2300llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelDecl *L) {
2301 // Make sure that there is a block for the indirect goto.
2302 if (!IndirectBranch)
2304
2305 llvm::BasicBlock *BB = getJumpDestForLabel(L).getBlock();
2306
2307 // Make sure the indirect branch includes all of the address-taken blocks.
2308 IndirectBranch->addDestination(BB);
2309 return llvm::BlockAddress::get(CurFn->getType(), BB);
2310}
2311
2313 // If we already made the indirect branch for indirect goto, return its block.
2314 if (IndirectBranch) return IndirectBranch->getParent();
2315
2316 CGBuilderTy TmpBuilder(*this, createBasicBlock("indirectgoto"));
2317
2318 // Create the PHI node that indirect gotos will add entries to.
2319 llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, 0,
2320 "indirect.goto.dest");
2321
2322 // Create the indirect branch instruction.
2323 IndirectBranch = TmpBuilder.CreateIndirectBr(DestVal);
2324 return IndirectBranch->getParent();
2325}
2326
2327/// Computes the length of an array in elements, as well as the base
2328/// element type and a properly-typed first element pointer.
2329llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType,
2330 QualType &baseType,
2331 Address &addr) {
2332 const ArrayType *arrayType = origArrayType;
2333
2334 // If it's a VLA, we have to load the stored size. Note that
2335 // this is the size of the VLA in bytes, not its size in elements.
2336 llvm::Value *numVLAElements = nullptr;
2339
2340 // Walk into all VLAs. This doesn't require changes to addr,
2341 // which has type T* where T is the first non-VLA element type.
2342 do {
2343 QualType elementType = arrayType->getElementType();
2344 arrayType = getContext().getAsArrayType(elementType);
2345
2346 // If we only have VLA components, 'addr' requires no adjustment.
2347 if (!arrayType) {
2348 baseType = elementType;
2349 return numVLAElements;
2350 }
2352
2353 // We get out here only if we find a constant array type
2354 // inside the VLA.
2355 }
2356
2357 // We have some number of constant-length arrays, so addr should
2358 // have LLVM type [M x [N x [...]]]*. Build a GEP that walks
2359 // down to the first element of addr.
2361
2362 // GEP down to the array type.
2363 llvm::ConstantInt *zero = Builder.getInt32(0);
2364 gepIndices.push_back(zero);
2365
2366 uint64_t countFromCLAs = 1;
2367 QualType eltType;
2368
2369 llvm::ArrayType *llvmArrayType =
2370 dyn_cast<llvm::ArrayType>(addr.getElementType());
2371 while (llvmArrayType) {
2373 assert(cast<ConstantArrayType>(arrayType)->getZExtSize() ==
2374 llvmArrayType->getNumElements());
2375
2376 gepIndices.push_back(zero);
2377 countFromCLAs *= llvmArrayType->getNumElements();
2378 eltType = arrayType->getElementType();
2379
2380 llvmArrayType =
2381 dyn_cast<llvm::ArrayType>(llvmArrayType->getElementType());
2382 arrayType = getContext().getAsArrayType(arrayType->getElementType());
2383 assert((!llvmArrayType || arrayType) &&
2384 "LLVM and Clang types are out-of-synch");
2385 }
2386
2387 if (arrayType) {
2388 // From this point onwards, the Clang array type has been emitted
2389 // as some other type (probably a packed struct). Compute the array
2390 // size, and just emit the 'begin' expression as a bitcast.
2391 while (arrayType) {
2392 countFromCLAs *= cast<ConstantArrayType>(arrayType)->getZExtSize();
2393 eltType = arrayType->getElementType();
2394 arrayType = getContext().getAsArrayType(eltType);
2395 }
2396
2397 llvm::Type *baseType = ConvertType(eltType);
2398 addr = addr.withElementType(baseType);
2399 } else {
2400 // Create the actual GEP.
2401 addr = Address(Builder.CreateInBoundsGEP(addr.getElementType(),
2402 addr.emitRawPointer(*this),
2403 gepIndices, "array.begin"),
2404 ConvertTypeForMem(eltType), addr.getAlignment());
2405 }
2406
2407 baseType = eltType;
2408
2409 llvm::Value *numElements
2410 = llvm::ConstantInt::get(SizeTy, countFromCLAs);
2411
2412 // If we had any VLA dimensions, factor them in.
2413 if (numVLAElements)
2414 numElements = Builder.CreateNUWMul(numVLAElements, numElements);
2415
2416 return numElements;
2417}
2418
2421 assert(vla && "type was not a variable array type!");
2422 return getVLASize(vla);
2423}
2424
2427 // The number of elements so far; always size_t.
2428 llvm::Value *numElements = nullptr;
2429
2430 QualType elementType;
2431 do {
2432 elementType = type->getElementType();
2433 llvm::Value *vlaSize = VLASizeMap[type->getSizeExpr()];
2434 assert(vlaSize && "no size for VLA!");
2435 assert(vlaSize->getType() == SizeTy);
2436
2437 if (!numElements) {
2438 numElements = vlaSize;
2439 } else {
2440 // It's undefined behavior if this wraps around, so mark it that way.
2441 // FIXME: Teach -fsanitize=undefined to trap this.
2442 numElements = Builder.CreateNUWMul(numElements, vlaSize);
2443 }
2444 } while ((type = getContext().getAsVariableArrayType(elementType)));
2445
2446 return { numElements, elementType };
2447}
2448
2452 assert(vla && "type was not a variable array type!");
2453 return getVLAElements1D(vla);
2454}
2455
2458 llvm::Value *VlaSize = VLASizeMap[Vla->getSizeExpr()];
2459 assert(VlaSize && "no size for VLA!");
2460 assert(VlaSize->getType() == SizeTy);
2461 return { VlaSize, Vla->getElementType() };
2462}
2463
2465 assert(type->isVariablyModifiedType() &&
2466 "Must pass variably modified type to EmitVLASizes!");
2467
2469
2470 // We're going to walk down into the type and look for VLA
2471 // expressions.
2472 do {
2473 assert(type->isVariablyModifiedType());
2474
2475 const Type *ty = type.getTypePtr();
2476 switch (ty->getTypeClass()) {
2477
2478#define TYPE(Class, Base)
2479#define ABSTRACT_TYPE(Class, Base)
2480#define NON_CANONICAL_TYPE(Class, Base)
2481#define DEPENDENT_TYPE(Class, Base) case Type::Class:
2482#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base)
2483#include "clang/AST/TypeNodes.inc"
2484 llvm_unreachable("unexpected dependent type!");
2485
2486 // These types are never variably-modified.
2487 case Type::Builtin:
2488 case Type::Complex:
2489 case Type::Vector:
2490 case Type::ExtVector:
2491 case Type::ConstantMatrix:
2492 case Type::Record:
2493 case Type::Enum:
2494 case Type::Using:
2495 case Type::TemplateSpecialization:
2496 case Type::ObjCTypeParam:
2497 case Type::ObjCObject:
2498 case Type::ObjCInterface:
2499 case Type::ObjCObjectPointer:
2500 case Type::BitInt:
2501 case Type::HLSLInlineSpirv:
2502 case Type::PredefinedSugar:
2503 llvm_unreachable("type class is never variably-modified!");
2504
2505 case Type::Adjusted:
2506 type = cast<AdjustedType>(ty)->getAdjustedType();
2507 break;
2508
2509 case Type::Decayed:
2510 type = cast<DecayedType>(ty)->getPointeeType();
2511 break;
2512
2513 case Type::Pointer:
2514 type = cast<PointerType>(ty)->getPointeeType();
2515 break;
2516
2517 case Type::BlockPointer:
2518 type = cast<BlockPointerType>(ty)->getPointeeType();
2519 break;
2520
2521 case Type::LValueReference:
2522 case Type::RValueReference:
2523 type = cast<ReferenceType>(ty)->getPointeeType();
2524 break;
2525
2526 case Type::MemberPointer:
2527 type = cast<MemberPointerType>(ty)->getPointeeType();
2528 break;
2529
2530 case Type::ArrayParameter:
2531 case Type::ConstantArray:
2532 case Type::IncompleteArray:
2533 // Losing element qualification here is fine.
2534 type = cast<ArrayType>(ty)->getElementType();
2535 break;
2536
2537 case Type::VariableArray: {
2538 // Losing element qualification here is fine.
2540
2541 // Unknown size indication requires no size computation.
2542 // Otherwise, evaluate and record it.
2543 if (const Expr *sizeExpr = vat->getSizeExpr()) {
2544 // It's possible that we might have emitted this already,
2545 // e.g. with a typedef and a pointer to it.
2546 llvm::Value *&entry = VLASizeMap[sizeExpr];
2547 if (!entry) {
2548 llvm::Value *size = EmitScalarExpr(sizeExpr);
2549
2550 // C11 6.7.6.2p5:
2551 // If the size is an expression that is not an integer constant
2552 // expression [...] each time it is evaluated it shall have a value
2553 // greater than zero.
2554 if (SanOpts.has(SanitizerKind::VLABound)) {
2555 auto CheckOrdinal = SanitizerKind::SO_VLABound;
2556 auto CheckHandler = SanitizerHandler::VLABoundNotPositive;
2557 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
2558 llvm::Value *Zero = llvm::Constant::getNullValue(size->getType());
2559 clang::QualType SEType = sizeExpr->getType();
2560 llvm::Value *CheckCondition =
2561 SEType->isSignedIntegerType()
2562 ? Builder.CreateICmpSGT(size, Zero)
2563 : Builder.CreateICmpUGT(size, Zero);
2564 llvm::Constant *StaticArgs[] = {
2565 EmitCheckSourceLocation(sizeExpr->getBeginLoc()),
2566 EmitCheckTypeDescriptor(SEType)};
2567 EmitCheck(std::make_pair(CheckCondition, CheckOrdinal),
2568 CheckHandler, StaticArgs, size);
2569 }
2570
2571 // Always zexting here would be wrong if it weren't
2572 // undefined behavior to have a negative bound.
2573 // FIXME: What about when size's type is larger than size_t?
2574 entry = Builder.CreateIntCast(size, SizeTy, /*signed*/ false);
2575 }
2576 }
2577 type = vat->getElementType();
2578 break;
2579 }
2580
2581 case Type::FunctionProto:
2582 case Type::FunctionNoProto:
2583 type = cast<FunctionType>(ty)->getReturnType();
2584 break;
2585
2586 case Type::Paren:
2587 case Type::TypeOf:
2588 case Type::UnaryTransform:
2589 case Type::Attributed:
2590 case Type::BTFTagAttributed:
2591 case Type::HLSLAttributedResource:
2592 case Type::SubstTemplateTypeParm:
2593 case Type::MacroQualified:
2594 case Type::CountAttributed:
2595 // Keep walking after single level desugaring.
2596 type = type.getSingleStepDesugaredType(getContext());
2597 break;
2598
2599 case Type::Typedef:
2600 case Type::Decltype:
2601 case Type::Auto:
2602 case Type::DeducedTemplateSpecialization:
2603 case Type::PackIndexing:
2604 // Stop walking: nothing to do.
2605 return;
2606
2607 case Type::TypeOfExpr:
2608 // Stop walking: emit typeof expression.
2609 EmitIgnoredExpr(cast<TypeOfExprType>(ty)->getUnderlyingExpr());
2610 return;
2611
2612 case Type::Atomic:
2613 type = cast<AtomicType>(ty)->getValueType();
2614 break;
2615
2616 case Type::Pipe:
2617 type = cast<PipeType>(ty)->getElementType();
2618 break;
2619 }
2620 } while (type->isVariablyModifiedType());
2621}
2622
2624 if (getContext().getBuiltinVaListType()->isArrayType())
2625 return EmitPointerWithAlignment(E);
2626 return EmitLValue(E).getAddress();
2627}
2628
2632
2634 const APValue &Init) {
2635 assert(Init.hasValue() && "Invalid DeclRefExpr initializer!");
2636 if (CGDebugInfo *Dbg = getDebugInfo())
2637 if (CGM.getCodeGenOpts().hasReducedDebugInfo())
2638 Dbg->EmitGlobalVariable(E->getDecl(), Init);
2639}
2640
2643 // At the moment, the only aggressive peephole we do in IR gen
2644 // is trunc(zext) folding, but if we add more, we can easily
2645 // extend this protection.
2646
2647 if (!rvalue.isScalar()) return PeepholeProtection();
2648 llvm::Value *value = rvalue.getScalarVal();
2649 if (!isa<llvm::ZExtInst>(value)) return PeepholeProtection();
2650
2651 // Just make an extra bitcast.
2652 assert(HaveInsertPoint());
2653 llvm::Instruction *inst = new llvm::BitCastInst(value, value->getType(), "",
2654 Builder.GetInsertBlock());
2655
2656 PeepholeProtection protection;
2657 protection.Inst = inst;
2658 return protection;
2659}
2660
2662 if (!protection.Inst) return;
2663
2664 // In theory, we could try to duplicate the peepholes now, but whatever.
2665 protection.Inst->eraseFromParent();
2666}
2667
2669 QualType Ty, SourceLocation Loc,
2670 SourceLocation AssumptionLoc,
2671 llvm::Value *Alignment,
2672 llvm::Value *OffsetValue) {
2673 if (Alignment->getType() != IntPtrTy)
2674 Alignment =
2675 Builder.CreateIntCast(Alignment, IntPtrTy, false, "casted.align");
2676 if (OffsetValue && OffsetValue->getType() != IntPtrTy)
2677 OffsetValue =
2678 Builder.CreateIntCast(OffsetValue, IntPtrTy, true, "casted.offset");
2679 llvm::Value *TheCheck = nullptr;
2680 if (SanOpts.has(SanitizerKind::Alignment)) {
2681 llvm::Value *PtrIntValue =
2682 Builder.CreatePtrToInt(PtrValue, IntPtrTy, "ptrint");
2683
2684 if (OffsetValue) {
2685 bool IsOffsetZero = false;
2686 if (const auto *CI = dyn_cast<llvm::ConstantInt>(OffsetValue))
2687 IsOffsetZero = CI->isZero();
2688
2689 if (!IsOffsetZero)
2690 PtrIntValue = Builder.CreateSub(PtrIntValue, OffsetValue, "offsetptr");
2691 }
2692
2693 llvm::Value *Zero = llvm::ConstantInt::get(IntPtrTy, 0);
2694 llvm::Value *Mask =
2695 Builder.CreateSub(Alignment, llvm::ConstantInt::get(IntPtrTy, 1));
2696 llvm::Value *MaskedPtr = Builder.CreateAnd(PtrIntValue, Mask, "maskedptr");
2697 TheCheck = Builder.CreateICmpEQ(MaskedPtr, Zero, "maskcond");
2698 }
2699 llvm::Instruction *Assumption = Builder.CreateAlignmentAssumption(
2700 CGM.getDataLayout(), PtrValue, Alignment, OffsetValue);
2701
2702 if (!SanOpts.has(SanitizerKind::Alignment))
2703 return;
2704 emitAlignmentAssumptionCheck(PtrValue, Ty, Loc, AssumptionLoc, Alignment,
2705 OffsetValue, TheCheck, Assumption);
2706}
2707
2709 const Expr *E,
2710 SourceLocation AssumptionLoc,
2711 llvm::Value *Alignment,
2712 llvm::Value *OffsetValue) {
2713 QualType Ty = E->getType();
2714 SourceLocation Loc = E->getExprLoc();
2715
2716 emitAlignmentAssumption(PtrValue, Ty, Loc, AssumptionLoc, Alignment,
2717 OffsetValue);
2718}
2719
2720llvm::Value *CodeGenFunction::EmitAnnotationCall(llvm::Function *AnnotationFn,
2721 llvm::Value *AnnotatedVal,
2722 StringRef AnnotationStr,
2723 SourceLocation Location,
2724 const AnnotateAttr *Attr) {
2726 AnnotatedVal,
2727 CGM.EmitAnnotationString(AnnotationStr),
2728 CGM.EmitAnnotationUnit(Location),
2729 CGM.EmitAnnotationLineNo(Location),
2730 };
2731 if (Attr)
2732 Args.push_back(CGM.EmitAnnotationArgs(Attr));
2733 return Builder.CreateCall(AnnotationFn, Args);
2734}
2735
2736void CodeGenFunction::EmitVarAnnotations(const VarDecl *D, llvm::Value *V) {
2737 assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
2738 for (const auto *I : D->specific_attrs<AnnotateAttr>())
2739 EmitAnnotationCall(CGM.getIntrinsic(llvm::Intrinsic::var_annotation,
2740 {V->getType(), CGM.ConstGlobalsPtrTy}),
2741 V, I->getAnnotation(), D->getLocation(), I);
2742}
2743
2745 Address Addr) {
2746 assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
2747 llvm::Value *V = Addr.emitRawPointer(*this);
2748 llvm::Type *VTy = V->getType();
2749 auto *PTy = dyn_cast<llvm::PointerType>(VTy);
2750 unsigned AS = PTy ? PTy->getAddressSpace() : 0;
2751 llvm::PointerType *IntrinTy =
2752 llvm::PointerType::get(CGM.getLLVMContext(), AS);
2753 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::ptr_annotation,
2754 {IntrinTy, CGM.ConstGlobalsPtrTy});
2755
2756 for (const auto *I : D->specific_attrs<AnnotateAttr>()) {
2757 // FIXME Always emit the cast inst so we can differentiate between
2758 // annotation on the first field of a struct and annotation on the struct
2759 // itself.
2760 if (VTy != IntrinTy)
2761 V = Builder.CreateBitCast(V, IntrinTy);
2762 V = EmitAnnotationCall(F, V, I->getAnnotation(), D->getLocation(), I);
2763 V = Builder.CreateBitCast(V, VTy);
2764 }
2765
2766 return Address(V, Addr.getElementType(), Addr.getAlignment());
2767}
2768
2770
2772 : CGF(CGF) {
2773 assert(!CGF->IsSanitizerScope);
2774 CGF->IsSanitizerScope = true;
2775}
2776
2778 CGF->IsSanitizerScope = false;
2779}
2780
2781void CodeGenFunction::InsertHelper(llvm::Instruction *I,
2782 const llvm::Twine &Name,
2783 llvm::BasicBlock::iterator InsertPt) const {
2784 LoopStack.InsertHelper(I);
2785 if (IsSanitizerScope)
2786 I->setNoSanitizeMetadata();
2787}
2788
2790 llvm::Instruction *I, const llvm::Twine &Name,
2791 llvm::BasicBlock::iterator InsertPt) const {
2792 llvm::IRBuilderDefaultInserter::InsertHelper(I, Name, InsertPt);
2793 if (CGF)
2794 CGF->InsertHelper(I, Name, InsertPt);
2795}
2796
2797// Emits an error if we don't have a valid set of target features for the
2798// called function.
2800 const FunctionDecl *TargetDecl) {
2801 // SemaChecking cannot handle below x86 builtins because they have different
2802 // parameter ranges with different TargetAttribute of caller.
2803 if (CGM.getContext().getTargetInfo().getTriple().isX86()) {
2804 unsigned BuiltinID = TargetDecl->getBuiltinID();
2805 if (BuiltinID == X86::BI__builtin_ia32_cmpps ||
2806 BuiltinID == X86::BI__builtin_ia32_cmpss ||
2807 BuiltinID == X86::BI__builtin_ia32_cmppd ||
2808 BuiltinID == X86::BI__builtin_ia32_cmpsd) {
2809 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl);
2810 llvm::StringMap<bool> TargetFetureMap;
2811 CGM.getContext().getFunctionFeatureMap(TargetFetureMap, FD);
2812 llvm::APSInt Result =
2813 *(E->getArg(2)->getIntegerConstantExpr(CGM.getContext()));
2814 if (Result.getSExtValue() > 7 && !TargetFetureMap.lookup("avx"))
2815 CGM.getDiags().Report(E->getBeginLoc(), diag::err_builtin_needs_feature)
2816 << TargetDecl->getDeclName() << "avx";
2817 }
2818 }
2819 return checkTargetFeatures(E->getBeginLoc(), TargetDecl);
2820}
2821
2822// Emits an error if we don't have a valid set of target features for the
2823// called function.
2825 const FunctionDecl *TargetDecl) {
2826 // Early exit if this is an indirect call.
2827 if (!TargetDecl)
2828 return;
2829
2830 // Get the current enclosing function if it exists. If it doesn't
2831 // we can't check the target features anyhow.
2832 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl);
2833 if (!FD)
2834 return;
2835
2836 bool IsAlwaysInline = TargetDecl->hasAttr<AlwaysInlineAttr>();
2837 bool IsFlatten = FD && FD->hasAttr<FlattenAttr>();
2838
2839 // Grab the required features for the call. For a builtin this is listed in
2840 // the td file with the default cpu, for an always_inline function this is any
2841 // listed cpu and any listed features.
2842 unsigned BuiltinID = TargetDecl->getBuiltinID();
2843 std::string MissingFeature;
2844 llvm::StringMap<bool> CallerFeatureMap;
2845 CGM.getContext().getFunctionFeatureMap(CallerFeatureMap, FD);
2846 // When compiling in HipStdPar mode we have to be conservative in rejecting
2847 // target specific features in the FE, and defer the possible error to the
2848 // AcceleratorCodeSelection pass, wherein iff an unsupported target builtin is
2849 // referenced by an accelerator executable function, we emit an error.
2850 bool IsHipStdPar = getLangOpts().HIPStdPar && getLangOpts().CUDAIsDevice;
2851 if (BuiltinID) {
2852 StringRef FeatureList(CGM.getContext().BuiltinInfo.getRequiredFeatures(BuiltinID));
2854 FeatureList, CallerFeatureMap) && !IsHipStdPar) {
2855 CGM.getDiags().Report(Loc, diag::err_builtin_needs_feature)
2856 << TargetDecl->getDeclName()
2857 << FeatureList;
2858 }
2859 } else if (!TargetDecl->isMultiVersion() &&
2860 TargetDecl->hasAttr<TargetAttr>()) {
2861 // Get the required features for the callee.
2862
2863 const TargetAttr *TD = TargetDecl->getAttr<TargetAttr>();
2865 CGM.getContext().filterFunctionTargetAttrs(TD);
2866
2867 SmallVector<StringRef, 1> ReqFeatures;
2868 llvm::StringMap<bool> CalleeFeatureMap;
2869 CGM.getContext().getFunctionFeatureMap(CalleeFeatureMap, TargetDecl);
2870
2871 for (const auto &F : ParsedAttr.Features) {
2872 if (F[0] == '+' && CalleeFeatureMap.lookup(F.substr(1)))
2873 ReqFeatures.push_back(StringRef(F).substr(1));
2874 }
2875
2876 for (const auto &F : CalleeFeatureMap) {
2877 // Only positive features are "required".
2878 if (F.getValue())
2879 ReqFeatures.push_back(F.getKey());
2880 }
2881 if (!llvm::all_of(ReqFeatures,
2882 [&](StringRef Feature) {
2883 if (!CallerFeatureMap.lookup(Feature)) {
2884 MissingFeature = Feature.str();
2885 return false;
2886 }
2887 return true;
2888 }) &&
2889 !IsHipStdPar) {
2890 if (IsAlwaysInline)
2891 CGM.getDiags().Report(Loc, diag::err_function_needs_feature)
2892 << FD->getDeclName() << TargetDecl->getDeclName() << MissingFeature;
2893 else if (IsFlatten)
2894 CGM.getDiags().Report(Loc, diag::err_flatten_function_needs_feature)
2895 << FD->getDeclName() << TargetDecl->getDeclName() << MissingFeature;
2896 }
2897
2898 } else if (!FD->isMultiVersion() && FD->hasAttr<TargetAttr>()) {
2899 llvm::StringMap<bool> CalleeFeatureMap;
2900 CGM.getContext().getFunctionFeatureMap(CalleeFeatureMap, TargetDecl);
2901
2902 for (const auto &F : CalleeFeatureMap) {
2903 if (F.getValue() &&
2904 (!CallerFeatureMap.lookup(F.getKey()) ||
2905 !CallerFeatureMap.find(F.getKey())->getValue()) &&
2906 !IsHipStdPar) {
2907 if (IsAlwaysInline)
2908 CGM.getDiags().Report(Loc, diag::err_function_needs_feature)
2909 << FD->getDeclName() << TargetDecl->getDeclName() << F.getKey();
2910 else if (IsFlatten)
2911 CGM.getDiags().Report(Loc, diag::err_flatten_function_needs_feature)
2912 << FD->getDeclName() << TargetDecl->getDeclName() << F.getKey();
2913 }
2914 }
2915 }
2916}
2917
2918void CodeGenFunction::EmitSanitizerStatReport(llvm::SanitizerStatKind SSK) {
2919 if (!CGM.getCodeGenOpts().SanitizeStats)
2920 return;
2921
2922 llvm::IRBuilder<> IRB(Builder.GetInsertBlock(), Builder.GetInsertPoint());
2923 IRB.SetCurrentDebugLocation(Builder.getCurrentDebugLocation());
2924 CGM.getSanStats().create(IRB, SSK);
2925}
2926
2928 const CGCallee &Callee, SmallVectorImpl<llvm::OperandBundleDef> &Bundles) {
2929 const CGCalleeInfo &CI = Callee.getAbstractInfo();
2931 if (!FP)
2932 return;
2933
2934 StringRef Salt;
2935 if (const auto &Info = FP->getExtraAttributeInfo())
2936 Salt = Info.CFISalt;
2937
2938 Bundles.emplace_back("kcfi", CGM.CreateKCFITypeId(FP->desugar(), Salt));
2939}
2940
2941llvm::Value *
2942CodeGenFunction::FormAArch64ResolverCondition(const FMVResolverOption &RO) {
2943 return RO.Features.empty() ? nullptr : EmitAArch64CpuSupports(RO.Features);
2944}
2945
2946llvm::Value *
2947CodeGenFunction::FormX86ResolverCondition(const FMVResolverOption &RO) {
2948 llvm::Value *Condition = nullptr;
2949
2950 if (RO.Architecture) {
2951 StringRef Arch = *RO.Architecture;
2952 // If arch= specifies an x86-64 micro-architecture level, test the feature
2953 // with __builtin_cpu_supports, otherwise use __builtin_cpu_is.
2954 if (Arch.starts_with("x86-64"))
2955 Condition = EmitX86CpuSupports({Arch});
2956 else
2957 Condition = EmitX86CpuIs(Arch);
2958 }
2959
2960 if (!RO.Features.empty()) {
2961 llvm::Value *FeatureCond = EmitX86CpuSupports(RO.Features);
2962 Condition =
2963 Condition ? Builder.CreateAnd(Condition, FeatureCond) : FeatureCond;
2964 }
2965 return Condition;
2966}
2967
2969 llvm::Function *Resolver,
2970 CGBuilderTy &Builder,
2971 llvm::Function *FuncToReturn,
2972 bool SupportsIFunc) {
2973 if (SupportsIFunc) {
2974 Builder.CreateRet(FuncToReturn);
2975 return;
2976 }
2977
2979 llvm::make_pointer_range(Resolver->args()));
2980
2981 llvm::CallInst *Result = Builder.CreateCall(FuncToReturn, Args);
2982 Result->setTailCallKind(llvm::CallInst::TCK_MustTail);
2983
2984 if (Resolver->getReturnType()->isVoidTy())
2985 Builder.CreateRetVoid();
2986 else
2987 Builder.CreateRet(Result);
2988}
2989
2991 llvm::Function *Resolver, ArrayRef<FMVResolverOption> Options) {
2992
2993 llvm::Triple::ArchType ArchType =
2994 getContext().getTargetInfo().getTriple().getArch();
2995
2996 switch (ArchType) {
2997 case llvm::Triple::x86:
2998 case llvm::Triple::x86_64:
2999 EmitX86MultiVersionResolver(Resolver, Options);
3000 return;
3001 case llvm::Triple::aarch64:
3002 EmitAArch64MultiVersionResolver(Resolver, Options);
3003 return;
3004 case llvm::Triple::riscv32:
3005 case llvm::Triple::riscv64:
3006 EmitRISCVMultiVersionResolver(Resolver, Options);
3007 return;
3008
3009 default:
3010 assert(false && "Only implemented for x86, AArch64 and RISC-V targets");
3011 }
3012}
3013
3015 llvm::Function *Resolver, ArrayRef<FMVResolverOption> Options) {
3016
3017 if (getContext().getTargetInfo().getTriple().getOS() !=
3018 llvm::Triple::OSType::Linux) {
3019 CGM.getDiags().Report(diag::err_os_unsupport_riscv_fmv);
3020 return;
3021 }
3022
3023 llvm::BasicBlock *CurBlock = createBasicBlock("resolver_entry", Resolver);
3024 Builder.SetInsertPoint(CurBlock);
3026
3027 bool SupportsIFunc = getContext().getTargetInfo().supportsIFunc();
3028 bool HasDefault = false;
3029 unsigned DefaultIndex = 0;
3030
3031 // Check the each candidate function.
3032 for (unsigned Index = 0; Index < Options.size(); Index++) {
3033
3034 if (Options[Index].Features.empty()) {
3035 HasDefault = true;
3036 DefaultIndex = Index;
3037 continue;
3038 }
3039
3040 Builder.SetInsertPoint(CurBlock);
3041
3042 // FeaturesCondition: The bitmask of the required extension has been
3043 // enabled by the runtime object.
3044 // (__riscv_feature_bits.features[i] & REQUIRED_BITMASK) ==
3045 // REQUIRED_BITMASK
3046 //
3047 // When condition is met, return this version of the function.
3048 // Otherwise, try the next version.
3049 //
3050 // if (FeaturesConditionVersion1)
3051 // return Version1;
3052 // else if (FeaturesConditionVersion2)
3053 // return Version2;
3054 // else if (FeaturesConditionVersion3)
3055 // return Version3;
3056 // ...
3057 // else
3058 // return DefaultVersion;
3059
3060 // TODO: Add a condition to check the length before accessing elements.
3061 // Without checking the length first, we may access an incorrect memory
3062 // address when using different versions.
3063 llvm::SmallVector<StringRef, 8> CurrTargetAttrFeats;
3064 llvm::SmallVector<std::string, 8> TargetAttrFeats;
3065
3066 for (StringRef Feat : Options[Index].Features) {
3067 std::vector<std::string> FeatStr =
3069
3070 assert(FeatStr.size() == 1 && "Feature string not delimited");
3071
3072 std::string &CurrFeat = FeatStr.front();
3073 if (CurrFeat[0] == '+')
3074 TargetAttrFeats.push_back(CurrFeat.substr(1));
3075 }
3076
3077 if (TargetAttrFeats.empty())
3078 continue;
3079
3080 for (std::string &Feat : TargetAttrFeats)
3081 CurrTargetAttrFeats.push_back(Feat);
3082
3083 Builder.SetInsertPoint(CurBlock);
3084 llvm::Value *FeatsCondition = EmitRISCVCpuSupports(CurrTargetAttrFeats);
3085
3086 llvm::BasicBlock *RetBlock = createBasicBlock("resolver_return", Resolver);
3087 CGBuilderTy RetBuilder(*this, RetBlock);
3088 CreateMultiVersionResolverReturn(CGM, Resolver, RetBuilder,
3089 Options[Index].Function, SupportsIFunc);
3090 llvm::BasicBlock *ElseBlock = createBasicBlock("resolver_else", Resolver);
3091
3092 Builder.SetInsertPoint(CurBlock);
3093 Builder.CreateCondBr(FeatsCondition, RetBlock, ElseBlock);
3094
3095 CurBlock = ElseBlock;
3096 }
3097
3098 // Finally, emit the default one.
3099 if (HasDefault) {
3100 Builder.SetInsertPoint(CurBlock);
3102 CGM, Resolver, Builder, Options[DefaultIndex].Function, SupportsIFunc);
3103 return;
3104 }
3105
3106 // If no generic/default, emit an unreachable.
3107 Builder.SetInsertPoint(CurBlock);
3108 llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap);
3109 TrapCall->setDoesNotReturn();
3110 TrapCall->setDoesNotThrow();
3111 Builder.CreateUnreachable();
3112 Builder.ClearInsertionPoint();
3113}
3114
3116 llvm::Function *Resolver, ArrayRef<FMVResolverOption> Options) {
3117 assert(!Options.empty() && "No multiversion resolver options found");
3118 assert(Options.back().Features.size() == 0 && "Default case must be last");
3119 bool SupportsIFunc = getContext().getTargetInfo().supportsIFunc();
3120 assert(SupportsIFunc &&
3121 "Multiversion resolver requires target IFUNC support");
3122 bool AArch64CpuInitialized = false;
3123 llvm::BasicBlock *CurBlock = createBasicBlock("resolver_entry", Resolver);
3124
3125 for (const FMVResolverOption &RO : Options) {
3126 Builder.SetInsertPoint(CurBlock);
3127 llvm::Value *Condition = FormAArch64ResolverCondition(RO);
3128
3129 // The 'default' or 'all features enabled' case.
3130 if (!Condition) {
3131 CreateMultiVersionResolverReturn(CGM, Resolver, Builder, RO.Function,
3132 SupportsIFunc);
3133 return;
3134 }
3135
3136 if (!AArch64CpuInitialized) {
3137 Builder.SetInsertPoint(CurBlock, CurBlock->begin());
3138 EmitAArch64CpuInit();
3139 AArch64CpuInitialized = true;
3140 Builder.SetInsertPoint(CurBlock);
3141 }
3142
3143 // Skip unreachable versions.
3144 if (RO.Function == nullptr)
3145 continue;
3146
3147 llvm::BasicBlock *RetBlock = createBasicBlock("resolver_return", Resolver);
3148 CGBuilderTy RetBuilder(*this, RetBlock);
3149 CreateMultiVersionResolverReturn(CGM, Resolver, RetBuilder, RO.Function,
3150 SupportsIFunc);
3151 CurBlock = createBasicBlock("resolver_else", Resolver);
3152 Builder.CreateCondBr(Condition, RetBlock, CurBlock);
3153 }
3154
3155 // If no default, emit an unreachable.
3156 Builder.SetInsertPoint(CurBlock);
3157 llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap);
3158 TrapCall->setDoesNotReturn();
3159 TrapCall->setDoesNotThrow();
3160 Builder.CreateUnreachable();
3161 Builder.ClearInsertionPoint();
3162}
3163
3165 llvm::Function *Resolver, ArrayRef<FMVResolverOption> Options) {
3166
3167 bool SupportsIFunc = getContext().getTargetInfo().supportsIFunc();
3168
3169 // Main function's basic block.
3170 llvm::BasicBlock *CurBlock = createBasicBlock("resolver_entry", Resolver);
3171 Builder.SetInsertPoint(CurBlock);
3172 EmitX86CpuInit();
3173
3174 for (const FMVResolverOption &RO : Options) {
3175 Builder.SetInsertPoint(CurBlock);
3176 llvm::Value *Condition = FormX86ResolverCondition(RO);
3177
3178 // The 'default' or 'generic' case.
3179 if (!Condition) {
3180 assert(&RO == Options.end() - 1 &&
3181 "Default or Generic case must be last");
3182 CreateMultiVersionResolverReturn(CGM, Resolver, Builder, RO.Function,
3183 SupportsIFunc);
3184 return;
3185 }
3186
3187 llvm::BasicBlock *RetBlock = createBasicBlock("resolver_return", Resolver);
3188 CGBuilderTy RetBuilder(*this, RetBlock);
3189 CreateMultiVersionResolverReturn(CGM, Resolver, RetBuilder, RO.Function,
3190 SupportsIFunc);
3191 CurBlock = createBasicBlock("resolver_else", Resolver);
3192 Builder.CreateCondBr(Condition, RetBlock, CurBlock);
3193 }
3194
3195 // If no generic/default, emit an unreachable.
3196 Builder.SetInsertPoint(CurBlock);
3197 llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap);
3198 TrapCall->setDoesNotReturn();
3199 TrapCall->setDoesNotThrow();
3200 Builder.CreateUnreachable();
3201 Builder.ClearInsertionPoint();
3202}
3203
3204// Loc - where the diagnostic will point, where in the source code this
3205// alignment has failed.
3206// SecondaryLoc - if present (will be present if sufficiently different from
3207// Loc), the diagnostic will additionally point a "Note:" to this location.
3208// It should be the location where the __attribute__((assume_aligned))
3209// was written e.g.
3211 llvm::Value *Ptr, QualType Ty, SourceLocation Loc,
3212 SourceLocation SecondaryLoc, llvm::Value *Alignment,
3213 llvm::Value *OffsetValue, llvm::Value *TheCheck,
3214 llvm::Instruction *Assumption) {
3215 assert(isa_and_nonnull<llvm::CallInst>(Assumption) &&
3216 cast<llvm::CallInst>(Assumption)->getCalledOperand() ==
3217 llvm::Intrinsic::getOrInsertDeclaration(
3218 Builder.GetInsertBlock()->getParent()->getParent(),
3219 llvm::Intrinsic::assume) &&
3220 "Assumption should be a call to llvm.assume().");
3221 assert(&(Builder.GetInsertBlock()->back()) == Assumption &&
3222 "Assumption should be the last instruction of the basic block, "
3223 "since the basic block is still being generated.");
3224
3225 if (!SanOpts.has(SanitizerKind::Alignment))
3226 return;
3227
3228 // Don't check pointers to volatile data. The behavior here is implementation-
3229 // defined.
3231 return;
3232
3233 // We need to temorairly remove the assumption so we can insert the
3234 // sanitizer check before it, else the check will be dropped by optimizations.
3235 Assumption->removeFromParent();
3236
3237 {
3238 auto CheckOrdinal = SanitizerKind::SO_Alignment;
3239 auto CheckHandler = SanitizerHandler::AlignmentAssumption;
3240 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
3241
3242 if (!OffsetValue)
3243 OffsetValue = Builder.getInt1(false); // no offset.
3244
3245 llvm::Constant *StaticData[] = {EmitCheckSourceLocation(Loc),
3246 EmitCheckSourceLocation(SecondaryLoc),
3248 llvm::Value *DynamicData[] = {Ptr, Alignment, OffsetValue};
3249 EmitCheck({std::make_pair(TheCheck, CheckOrdinal)}, CheckHandler,
3250 StaticData, DynamicData);
3251 }
3252
3253 // We are now in the (new, empty) "cont" basic block.
3254 // Reintroduce the assumption.
3255 Builder.Insert(Assumption);
3256 // FIXME: Assumption still has it's original basic block as it's Parent.
3257}
3258
3260 if (CGDebugInfo *DI = getDebugInfo())
3261 return DI->SourceLocToDebugLoc(Location);
3262
3263 return llvm::DebugLoc();
3264}
3265
3266llvm::Value *
3267CodeGenFunction::emitCondLikelihoodViaExpectIntrinsic(llvm::Value *Cond,
3268 Stmt::Likelihood LH) {
3269 switch (LH) {
3270 case Stmt::LH_None:
3271 return Cond;
3272 case Stmt::LH_Likely:
3273 case Stmt::LH_Unlikely:
3274 // Don't generate llvm.expect on -O0 as the backend won't use it for
3275 // anything.
3276 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
3277 return Cond;
3278 llvm::Type *CondTy = Cond->getType();
3279 assert(CondTy->isIntegerTy(1) && "expecting condition to be a boolean");
3280 llvm::Function *FnExpect =
3281 CGM.getIntrinsic(llvm::Intrinsic::expect, CondTy);
3282 llvm::Value *ExpectedValueOfCond =
3283 llvm::ConstantInt::getBool(CondTy, LH == Stmt::LH_Likely);
3284 return Builder.CreateCall(FnExpect, {Cond, ExpectedValueOfCond},
3285 Cond->getName() + ".expval");
3286 }
3287 llvm_unreachable("Unknown Likelihood");
3288}
3289
3290llvm::Value *CodeGenFunction::emitBoolVecConversion(llvm::Value *SrcVec,
3291 unsigned NumElementsDst,
3292 const llvm::Twine &Name) {
3293 auto *SrcTy = cast<llvm::FixedVectorType>(SrcVec->getType());
3294 unsigned NumElementsSrc = SrcTy->getNumElements();
3295 if (NumElementsSrc == NumElementsDst)
3296 return SrcVec;
3297
3298 std::vector<int> ShuffleMask(NumElementsDst, -1);
3299 for (unsigned MaskIdx = 0;
3300 MaskIdx < std::min<>(NumElementsDst, NumElementsSrc); ++MaskIdx)
3301 ShuffleMask[MaskIdx] = MaskIdx;
3302
3303 return Builder.CreateShuffleVector(SrcVec, ShuffleMask, Name);
3304}
3305
3307 const CGPointerAuthInfo &PointerAuth,
3309 if (!PointerAuth.isSigned())
3310 return;
3311
3312 auto *Key = Builder.getInt32(PointerAuth.getKey());
3313
3314 llvm::Value *Discriminator = PointerAuth.getDiscriminator();
3315 if (!Discriminator)
3316 Discriminator = Builder.getSize(0);
3317
3318 llvm::Value *Args[] = {Key, Discriminator};
3319 Bundles.emplace_back("ptrauth", Args);
3320}
3321
3323 const CGPointerAuthInfo &PointerAuth,
3324 llvm::Value *Pointer,
3325 unsigned IntrinsicID) {
3326 if (!PointerAuth)
3327 return Pointer;
3328
3329 auto Key = CGF.Builder.getInt32(PointerAuth.getKey());
3330
3331 llvm::Value *Discriminator = PointerAuth.getDiscriminator();
3332 if (!Discriminator) {
3333 Discriminator = CGF.Builder.getSize(0);
3334 }
3335
3336 // Convert the pointer to intptr_t before signing it.
3337 auto OrigType = Pointer->getType();
3338 Pointer = CGF.Builder.CreatePtrToInt(Pointer, CGF.IntPtrTy);
3339
3340 // call i64 @llvm.ptrauth.sign.i64(i64 %pointer, i32 %key, i64 %discriminator)
3341 auto Intrinsic = CGF.CGM.getIntrinsic(IntrinsicID);
3342 Pointer = CGF.EmitRuntimeCall(Intrinsic, {Pointer, Key, Discriminator});
3343
3344 // Convert back to the original type.
3345 Pointer = CGF.Builder.CreateIntToPtr(Pointer, OrigType);
3346 return Pointer;
3347}
3348
3349llvm::Value *
3351 llvm::Value *Pointer) {
3352 if (!PointerAuth.shouldSign())
3353 return Pointer;
3354 return EmitPointerAuthCommon(*this, PointerAuth, Pointer,
3355 llvm::Intrinsic::ptrauth_sign);
3356}
3357
3358static llvm::Value *EmitStrip(CodeGenFunction &CGF,
3359 const CGPointerAuthInfo &PointerAuth,
3360 llvm::Value *Pointer) {
3361 auto StripIntrinsic = CGF.CGM.getIntrinsic(llvm::Intrinsic::ptrauth_strip);
3362
3363 auto Key = CGF.Builder.getInt32(PointerAuth.getKey());
3364 // Convert the pointer to intptr_t before signing it.
3365 auto OrigType = Pointer->getType();
3367 StripIntrinsic, {CGF.Builder.CreatePtrToInt(Pointer, CGF.IntPtrTy), Key});
3368 return CGF.Builder.CreateIntToPtr(Pointer, OrigType);
3369}
3370
3371llvm::Value *
3373 llvm::Value *Pointer) {
3374 if (PointerAuth.shouldStrip()) {
3375 return EmitStrip(*this, PointerAuth, Pointer);
3376 }
3377 if (!PointerAuth.shouldAuth()) {
3378 return Pointer;
3379 }
3380
3381 return EmitPointerAuthCommon(*this, PointerAuth, Pointer,
3382 llvm::Intrinsic::ptrauth_auth);
3383}
3384
3386 llvm::Instruction *KeyInstruction, llvm::Value *Backup) {
3387 if (CGDebugInfo *DI = getDebugInfo())
3388 DI->addInstToCurrentSourceAtom(KeyInstruction, Backup);
3389}
3390
3392 llvm::Instruction *KeyInstruction, llvm::Value *Backup, uint64_t Atom) {
3393 if (CGDebugInfo *DI = getDebugInfo())
3394 DI->addInstToSpecificSourceAtom(KeyInstruction, Backup, Atom);
3395}
3396
3397void CodeGenFunction::addInstToNewSourceAtom(llvm::Instruction *KeyInstruction,
3398 llvm::Value *Backup) {
3399 if (CGDebugInfo *DI = getDebugInfo()) {
3401 DI->addInstToCurrentSourceAtom(KeyInstruction, Backup);
3402 }
3403}
Defines the clang::ASTContext interface.
#define V(N, I)
This file provides some common utility functions for processing Lambda related AST Constructs.
Defines enum values for all the target-independent builtin functions.
static llvm::Value * EmitPointerAuthCommon(CodeGenFunction &CGF, const CGPointerAuthInfo &PointerAuth, llvm::Value *Pointer, unsigned IntrinsicID)
static void CreateMultiVersionResolverReturn(CodeGenModule &CGM, llvm::Function *Resolver, CGBuilderTy &Builder, llvm::Function *FuncToReturn, bool SupportsIFunc)
static llvm::Value * EmitStrip(CodeGenFunction &CGF, const CGPointerAuthInfo &PointerAuth, llvm::Value *Pointer)
static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType, Address dest, Address src, llvm::Value *sizeInChars)
emitNonZeroVLAInit - Emit the "zero" initialization of a variable-length array whose elements have a ...
static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB)
static LValue makeNaturalAlignAddrLValue(llvm::Value *V, QualType T, bool ForPointeeType, bool MightBeSigned, CodeGenFunction &CGF, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
static void TryMarkNoThrow(llvm::Function *F)
Tries to mark the given function nounwind based on the non-existence of any throwing calls within it.
static llvm::Constant * getPrologueSignature(CodeGenModule &CGM, const FunctionDecl *FD)
Return the UBSan prologue signature for FD if one is available.
static bool endsWithReturn(const Decl *F)
Determine whether the function F ends with a return stmt.
static bool shouldEmitLifetimeMarkers(const CodeGenOptions &CGOpts, const LangOptions &LangOpts)
shouldEmitLifetimeMarkers - Decide whether we need emit the life-time markers.
static bool matchesStlAllocatorFn(const Decl *D, const ASTContext &Ctx)
Defines the C++ Decl subclasses, other than those for templates (found in DeclTemplate....
Defines the Objective-C statement AST node classes.
Enumerates target-specific builtins in their own namespaces within namespace clang.
APValue - This class implements a discriminated union of [uninitialized] [APSInt] [APFloat],...
Definition APValue.h:122
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:220
QualType getFunctionTypeWithExceptionSpec(QualType Orig, const FunctionProtoType::ExceptionSpecInfo &ESI) const
Get a function type and produce the equivalent function type with the specified exception specificati...
QualType getBaseElementType(const ArrayType *VAT) const
Return the innermost element type of an array type.
bool hasAnyFunctionEffects() const
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
static bool hasSameType(QualType T1, QualType T2)
Determine whether the given types T1 and T2 are equivalent.
const VariableArrayType * getAsVariableArrayType(QualType T) const
QualType getSizeType() const
Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.
const TargetInfo & getTargetInfo() const
Definition ASTContext.h:909
void getFunctionFeatureMap(llvm::StringMap< bool > &FeatureMap, const FunctionDecl *) const
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition TypeBase.h:3723
QualType getElementType() const
Definition TypeBase.h:3735
Attr - This represents one attribute.
Definition Attr.h:46
A builtin binary operation expression such as "x + y" or "x <= y".
Definition Expr.h:4038
static bool isLogicalOp(Opcode Opc)
Definition Expr.h:4171
BinaryOperatorKind Opcode
Definition Expr.h:4043
Represents a C++ constructor within a class.
Definition DeclCXX.h:2604
Represents a static or instance method of a struct/union/class.
Definition DeclCXX.h:2129
bool isImplicitObjectMemberFunction() const
[C++2b][dcl.fct]/p7 An implicit object member function is a non-static member function without an exp...
Definition DeclCXX.cpp:2710
const CXXRecordDecl * getParent() const
Return the parent of this method declaration, which is the class in which this method is defined.
Definition DeclCXX.h:2255
QualType getThisType() const
Return the type of the this pointer.
Definition DeclCXX.cpp:2809
bool isStatic() const
Definition DeclCXX.cpp:2401
bool isLambda() const
Determine whether this class describes a lambda function object.
Definition DeclCXX.h:1018
void getCaptureFields(llvm::DenseMap< const ValueDecl *, FieldDecl * > &Captures, FieldDecl *&ThisCapture) const
For a closure type, retrieve the mapping from captured variables and this to the non-static data memb...
Definition DeclCXX.cpp:1784
bool isCapturelessLambda() const
Definition DeclCXX.h:1064
bool isEmpty() const
Determine whether this is an empty class in the sense of (C++11 [meta.unary.prop]).
Definition DeclCXX.h:1186
A C++ throw-expression (C++ [except.throw]).
Definition ExprCXX.h:1208
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2943
Expr * getArg(unsigned Arg)
getArg - Return the specified argument.
Definition Expr.h:3147
SourceLocation getBeginLoc() const
Definition Expr.h:3277
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition CharUnits.h:122
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition CharUnits.h:189
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition CharUnits.h:185
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
Definition CharUnits.h:214
bool isOne() const
isOne - Test whether the quantity equals one.
Definition CharUnits.h:125
CodeGenOptions - Track various options which control how the code is optimized and passed to the back...
@ InAlloca
InAlloca - Pass the argument directly using the LLVM inalloca attribute.
@ Indirect
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition Address.h:128
static Address invalid()
Definition Address.h:176
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition Address.h:253
CharUnits getAlignment() const
Definition Address.h:194
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition Address.h:209
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition Address.h:276
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition Address.h:204
A scoped helper to set the current source atom group for CGDebugInfo::addInstToCurrentSourceAtom.
A scoped helper to set the current debug location to the specified location or preferred location of ...
static ApplyDebugLocation CreateDefaultArtificial(CodeGenFunction &CGF, SourceLocation TemporaryLocation)
Apply TemporaryLocation if it is valid.
void InsertHelper(llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock::iterator InsertPt) const override
This forwards to CodeGenFunction::InsertHelper.
llvm::ConstantInt * getSize(CharUnits N)
Definition CGBuilder.h:103
@ RAA_DirectInMemory
Pass it on the stack using its defined layout.
Definition CGCXXABI.h:158
virtual RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const =0
Returns how an argument of the given record type should be passed.
Abstract information about a function or function prototype.
Definition CGCall.h:41
const FunctionProtoType * getCalleeFunctionProtoType() const
Definition CGCall.h:56
All available information about a concrete callee.
Definition CGCall.h:63
static CGCallee forDirect(llvm::Constant *functionPtr, const CGCalleeInfo &abstractInfo=CGCalleeInfo())
Definition CGCall.h:137
This class gathers all debug information during compilation and is responsible for emitting to llvm g...
Definition CGDebugInfo.h:59
CGFunctionInfo - Class to encapsulate the information about a function definition.
llvm::Value * getDiscriminator() const
CallArgList - Type for representing both the value and type of arguments in a call.
Definition CGCall.h:274
void add(RValue rvalue, QualType type)
Definition CGCall.h:302
CGFPOptionsRAII(CodeGenFunction &CGF, FPOptions FPFeatures)
An object to manage conditionally-evaluated expressions.
An object which temporarily prevents a value from being destroyed by aggressive peephole optimization...
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
void EmitRISCVMultiVersionResolver(llvm::Function *Resolver, ArrayRef< FMVResolverOption > Options)
GlobalDecl CurGD
CurGD - The GlobalDecl for the current function being compiled.
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount, Stmt::Likelihood LH=Stmt::LH_None, const Expr *ConditionalOp=nullptr, const VarDecl *ConditionalDecl=nullptr)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
void setCurrentProfileCount(uint64_t Count)
Set the profiler's current count.
llvm::Value * emitBoolVecConversion(llvm::Value *SrcVec, unsigned NumElementsDst, const llvm::Twine &Name="")
void EmitAArch64MultiVersionResolver(llvm::Function *Resolver, ArrayRef< FMVResolverOption > Options)
JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target)
The given basic block lies in the current EH scope, but may be a target of a potentially scope-crossi...
llvm::Value * EmitScalarOrConstFoldImmArg(unsigned ICEArguments, unsigned Idx, const CallExpr *E)
SanitizerSet SanOpts
Sanitizers enabled for this function.
void EmitNullInitialization(Address DestPtr, QualType Ty)
EmitNullInitialization - Generate code to set a value of the given type to null, If the type contains...
RawAddress CreateIRTemp(QualType T, const Twine &Name="tmp")
CreateIRTemp - Create a temporary IR object of the given type, with appropriate alignment.
Definition CGExpr.cpp:184
void checkTargetFeatures(const CallExpr *E, const FunctionDecl *TargetDecl)
static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts=false)
ContainsLabel - Return true if the statement contains a label in it.
bool ShouldSkipSanitizerInstrumentation()
ShouldSkipSanitizerInstrumentation - Return true if the current function should not be instrumented w...
llvm::BlockAddress * GetAddrOfLabel(const LabelDecl *L)
llvm::Value * EmitRISCVCpuSupports(const CallExpr *E)
Definition RISCV.cpp:970
llvm::Value * EmitRISCVCpuInit()
Definition RISCV.cpp:960
static bool hasScalarEvaluationKind(QualType T)
llvm::Type * ConvertType(QualType T)
void GenerateCode(GlobalDecl GD, llvm::Function *Fn, const CGFunctionInfo &FnInfo)
void EmitSanitizerStatReport(llvm::SanitizerStatKind SSK)
void addInstToNewSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup)
Add KeyInstruction and an optional Backup instruction to a new atom group (See ApplyAtomGroup for mor...
PeepholeProtection protectFromPeepholes(RValue rvalue)
protectFromPeepholes - Protect a value that we're intending to store to the side, but which will prob...
void EmitLambdaStaticInvokeBody(const CXXMethodDecl *MD)
Definition CGClass.cpp:3187
bool CurFuncIsThunk
In C++, whether we are code generating a thunk.
LValue MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T)
Given a value of type T* that may not be to a complete object, construct an l-value with the natural ...
JumpDest getJumpDestForLabel(const LabelDecl *S)
getBasicBlockForLabel - Return the LLVM basicblock that the specified label maps to.
Definition CGStmt.cpp:710
void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint=true)
SmallVector< llvm::ConvergenceControlInst *, 4 > ConvergenceTokenStack
Stack to track the controlled convergence tokens.
void unprotectFromPeepholes(PeepholeProtection protection)
RValue convertTempToRValue(Address addr, QualType type, SourceLocation Loc)
Given the address of a temporary variable, produce an r-value of its type.
Definition CGExpr.cpp:6952
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
Definition CGExpr.cpp:3903
llvm::SmallVector< DeferredDeactivateCleanup > DeferredDeactivationCleanupStack
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void addInstToCurrentSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup)
See CGDebugInfo::addInstToCurrentSourceAtom.
const LangOptions & getLangOpts() const
void addInstToSpecificSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup, uint64_t Atom)
See CGDebugInfo::addInstToSpecificSourceAtom.
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
void EmitVarAnnotations(const VarDecl *D, llvm::Value *V)
Emit local annotations for the local variable V, declared by D.
llvm::BasicBlock * EHResumeBlock
EHResumeBlock - Unified block containing a call to llvm.eh.resume.
Address EmitFieldAnnotations(const FieldDecl *D, Address V)
Emit field annotations for the given field & value.
void EmitConstructorBody(FunctionArgList &Args)
EmitConstructorBody - Emits the body of the current constructor.
Definition CGClass.cpp:817
void EmitKCFIOperandBundle(const CGCallee &Callee, SmallVectorImpl< llvm::OperandBundleDef > &Bundles)
void EmitDeclRefExprDbgValue(const DeclRefExpr *E, const APValue &Init)
Address makeNaturalAddressForPointer(llvm::Value *Ptr, QualType T, CharUnits Alignment=CharUnits::Zero(), bool ForPointeeType=false, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
Construct an address with the natural alignment of T.
LValue MakeNaturalAlignPointeeRawAddrLValue(llvm::Value *V, QualType T)
Same as MakeNaturalAlignPointeeAddrLValue except that the pointer is known to be unsigned.
@ TCK_ConstructorCall
Checking the 'this' pointer for a constructor call.
@ TCK_MemberCall
Checking the 'this' pointer for a call to a non-static member function.
const Decl * CurCodeDecl
CurCodeDecl - This is the inner-most code context, which includes blocks.
llvm::AssertingVH< llvm::Instruction > AllocaInsertPt
AllocaInsertPoint - This is an instruction in the entry block before which we prefer to insert alloca...
void EmitFunctionBody(const Stmt *Body)
JumpDest ReturnBlock
ReturnBlock - Unified return block.
llvm::DebugLoc SourceLocToDebugLoc(SourceLocation Location)
Converts Location to a DebugLoc, if debug information is enabled.
llvm::Constant * EmitCheckTypeDescriptor(QualType T)
Emit a description of a type in a format suitable for passing to a runtime sanitizer handler.
Definition CGExpr.cpp:3793
llvm::DebugLoc EmitReturnBlock()
Emit the unified return block, trying to avoid its emission when possible.
RawAddress CreateDefaultAlignTempAlloca(llvm::Type *Ty, const Twine &Name="tmp")
CreateDefaultAlignedTempAlloca - This creates an alloca with the default ABI alignment of the given L...
Definition CGExpr.cpp:177
const TargetInfo & getTarget() const
llvm::Value * EmitAnnotationCall(llvm::Function *AnnotationFn, llvm::Value *AnnotatedVal, StringRef AnnotationStr, SourceLocation Location, const AnnotateAttr *Attr)
Emit an annotation call (intrinsic).
Address EmitCompoundStmtWithoutScope(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
Definition CGStmt.cpp:585
void PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize, std::initializer_list< llvm::Value ** > ValuesToReload={})
Takes the old cleanup stack size and emits the cleanup blocks that have been added.
void maybeCreateMCDCCondBitmap()
Allocate a temp value on the stack that MCDC can use to track condition results.
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
Definition CGExpr.cpp:245
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
Definition CGExpr.cpp:2407
static bool isInstrumentedCondition(const Expr *C)
isInstrumentedCondition - Determine whether the given condition is an instrumentable condition (i....
VlaSizePair getVLAElements1D(const VariableArrayType *vla)
Return the number of elements for a single dimension for the given array type.
bool AlwaysEmitXRayCustomEvents() const
AlwaysEmitXRayCustomEvents - Return true if we must unconditionally emit XRay custom event handling c...
void StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, const CGFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation Loc=SourceLocation(), SourceLocation StartLoc=SourceLocation())
Emit code for the start of a function.
llvm::Value * EmitPointerAuthSign(const CGPointerAuthInfo &Info, llvm::Value *Pointer)
void markAsIgnoreThreadCheckingAtRuntime(llvm::Function *Fn)
Annotate the function with an attribute that disables TSan checking at runtime.
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
Definition CGExpr.cpp:226
void EmitPointerAuthOperandBundle(const CGPointerAuthInfo &Info, SmallVectorImpl< llvm::OperandBundleDef > &Bundles)
void EmitCheck(ArrayRef< std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > Checked, SanitizerHandler Check, ArrayRef< llvm::Constant * > StaticArgs, ArrayRef< llvm::Value * > DynamicArgs, const TrapReason *TR=nullptr)
Create a basic block that will either trap or call a handler function in the UBSan runtime with the p...
Definition CGExpr.cpp:4051
void InsertHelper(llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock::iterator InsertPt) const
CGBuilder insert helper.
llvm::Value * emitArrayLength(const ArrayType *arrayType, QualType &baseType, Address &addr)
emitArrayLength - Compute the length of an array, even if it's a VLA, and drill down to the base elem...
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
bool AlwaysEmitXRayTypedEvents() const
AlwaysEmitXRayTypedEvents - Return true if clang must unconditionally emit XRay typed event handling ...
void EmitStartEHSpec(const Decl *D)
EmitStartEHSpec - Emit the start of the exception spec.
void EmitDestructorBody(FunctionArgList &Args)
EmitDestructorBody - Emits the body of the current destructor.
Definition CGClass.cpp:1522
void EmitX86MultiVersionResolver(llvm::Function *Resolver, ArrayRef< FMVResolverOption > Options)
bool ShouldInstrumentFunction()
ShouldInstrumentFunction - Return true if the current function should be instrumented with __cyg_prof...
void maybeUpdateMCDCCondBitmap(const Expr *E, llvm::Value *Val)
Update the MCDC temp value with the condition's evaluated result.
void emitAlignmentAssumptionCheck(llvm::Value *Ptr, QualType Ty, SourceLocation Loc, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue, llvm::Value *TheCheck, llvm::Instruction *Assumption)
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **CallOrInvoke, bool IsMustTail, SourceLocation Loc, bool IsVirtualFunctionPointerThunk=false)
EmitCall - Generate a call of the given function, expecting the given result type,...
Definition CGCall.cpp:5248
llvm::ConstantInt * getUBSanFunctionTypeHash(QualType T) const
Return a type hash constant for a function instrumented by -fsanitize=function.
void EmitBranchToCounterBlock(const Expr *Cond, BinaryOperator::Opcode LOp, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount=0, Stmt::Likelihood LH=Stmt::LH_None, const Expr *CntrIdx=nullptr)
EmitBranchToCounterBlock - Emit a conditional branch to a new block that increments a profile counter...
VlaSizePair getVLASize(const VariableArrayType *vla)
Returns an LLVM value that corresponds to the size, in non-variably-sized elements,...
bool isMCDCBranchExpr(const Expr *E) const
void EmitMultiVersionResolver(llvm::Function *Resolver, ArrayRef< FMVResolverOption > Options)
const Decl * CurFuncDecl
CurFuncDecl - Holds the Decl for the current outermost non-closure context.
static const Expr * stripCond(const Expr *C)
Ignore parentheses and logical-NOT to track conditions consistently.
void EmitFunctionProlog(const CGFunctionInfo &FI, llvm::Function *Fn, const FunctionArgList &Args)
EmitFunctionProlog - Emit the target specific LLVM code to load the arguments for the given function.
Definition CGCall.cpp:3113
void SetFastMathFlags(FPOptions FPFeatures)
Set the codegen fast-math flags.
llvm::SmallVector< char, 256 > LifetimeExtendedCleanupStack
Address EmitVAListRef(const Expr *E)
void EmitLambdaInAllocaCallOpBody(const CXXMethodDecl *MD)
Definition CGClass.cpp:3244
Address ReturnValuePointer
ReturnValuePointer - The temporary alloca to hold a pointer to sret.
static bool mightAddDeclToScope(const Stmt *S)
Determine if the given statement might introduce a declaration into the current scope,...
void EmitStmt(const Stmt *S, ArrayRef< const Attr * > Attrs={})
EmitStmt - Emit the code for the statement.
Definition CGStmt.cpp:61
llvm::DenseMap< const ValueDecl *, FieldDecl * > LambdaCaptureFields
bool AutoreleaseResult
In ARC, whether we should autorelease the return value.
llvm::CallInst * EmitRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
uint64_t getCurrentProfileCount()
Get the profiler's current count.
llvm::Type * ConvertTypeForMem(QualType T)
void EmitEndEHSpec(const Decl *D)
EmitEndEHSpec - Emit the end of the exception spec.
LValue EmitLValueForLambdaField(const FieldDecl *Field)
Definition CGExpr.cpp:5476
CodeGenTypes & getTypes() const
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
bool IsSanitizerScope
True if CodeGen currently emits code implementing sanitizer checks.
static bool containsBreak(const Stmt *S)
containsBreak - Return true if the statement contains a break out of it.
void emitImplicitAssignmentOperatorBody(FunctionArgList &Args)
Definition CGClass.cpp:1645
HLSLControlFlowHintAttr::Spelling HLSLControlFlowAttr
HLSL Branch attribute.
void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, LValue LV, QualType Type, SanitizerSet SkippedChecks=SanitizerSet(), llvm::Value *ArraySize=nullptr)
llvm::SmallVector< const ParmVarDecl *, 4 > FnArgs
Save Parameter Decl for coroutine.
void EmitFunctionEpilog(const CGFunctionInfo &FI, bool EmitRetDbgLoc, SourceLocation EndLoc, uint64_t RetKeyInstructionsSourceAtom)
EmitFunctionEpilog - Emit the target specific LLVM code to return the given temporary.
Definition CGCall.cpp:4000
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
Definition CGExpr.cpp:1576
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block,...
Definition CGStmt.cpp:676
RawAddress NormalCleanupDest
i32s containing the indexes of the cleanup destinations.
llvm::Type * convertTypeForLoadStore(QualType ASTTy, llvm::Type *LLVMTy=nullptr)
llvm::BasicBlock * GetIndirectGotoBlock()
EHScopeStack::stable_iterator PrologueCleanupDepth
PrologueCleanupDepth - The cleanup depth enclosing all the cleanups associated with the parameters.
Address EmitMSVAListRef(const Expr *E)
Emit a "reference" to a __builtin_ms_va_list; this is always the value of the expression,...
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
llvm::CallInst * EmitTrapCall(llvm::Intrinsic::ID IntrID)
Emit a call to trap or debugtrap and attach function attribute "trap-func-name" if specified.
Definition CGExpr.cpp:4466
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void FinishFunction(SourceLocation EndLoc=SourceLocation())
FinishFunction - Complete IR generation of the current function.
const CGFunctionInfo * CurFnInfo
uint64_t getProfileCount(const Stmt *S)
Get the profiler's count for the given statement.
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result, bool AllowLabels=false)
ConstantFoldsToSimpleInteger - If the specified expression does not fold to a constant,...
void ErrorUnsupported(const Stmt *S, const char *Type)
ErrorUnsupported - Print out an error that codegen doesn't support the specified stmt yet.
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
Definition CGExpr.cpp:1692
bool ShouldXRayInstrumentFunction() const
ShouldXRayInstrument - Return true if the current function should be instrumented with XRay nop sleds...
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
llvm::LLVMContext & getLLVMContext()
bool SawAsmBlock
Whether we processed a Microsoft-style asm block during CodeGen.
bool checkIfFunctionMustProgress()
Returns true if a function must make progress, which means the mustprogress attribute can be added.
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler's counter for the given statement by StepV.
void emitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty, SourceLocation Loc, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue=nullptr)
bool isMCDCDecisionExpr(const Expr *E) const
void EmitVariablyModifiedType(QualType Ty)
EmitVLASize - Capture all the sizes for the VLA expressions in the given variably-modified type and s...
void MaybeEmitDeferredVarDeclInit(const VarDecl *var)
Definition CGDecl.cpp:2074
void EmitBlockWithFallThrough(llvm::BasicBlock *BB, const Stmt *S)
When instrumenting to collect profile data, the counts for some blocks such as switch cases need to n...
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
Definition CGStmt.cpp:656
LValue MakeNaturalAlignRawAddrLValue(llvm::Value *V, QualType T)
QualType BuildFunctionArgList(GlobalDecl GD, FunctionArgList &Args)
llvm::Value * EmitPointerAuthAuth(const CGPointerAuthInfo &Info, llvm::Value *Pointer)
This class organizes the cross-function state that is used while generating LLVM code.
CharUnits getNaturalTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, bool forPointeeType=false)
const TargetCodeGenInfo & getTargetCodeGenInfo()
const CodeGenOptions & getCodeGenOpts() const
void GenKernelArgMetadata(llvm::Function *FN, const FunctionDecl *FD=nullptr, CodeGenFunction *CGF=nullptr)
OpenCL v1.2 s5.6.4.6 allows the compiler to store kernel argument information in the program executab...
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys={})
Per-function PGO state.
Definition CodeGenPGO.h:29
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
bool inheritingCtorHasParams(const InheritedConstructor &Inherited, CXXCtorType Type)
Determine if a C++ inheriting constructor should have parameters matching those of its inherited cons...
Definition CGCall.cpp:392
FunctionArgList - Type for representing both the decl and type of parameters to a function.
Definition CGCall.h:375
LValue - This represents an lvalue references.
Definition CGValue.h:183
llvm::Value * getPointer(CodeGenFunction &CGF) const
Address getAddress() const
Definition CGValue.h:373
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition CGValue.h:42
bool isScalar() const
Definition CGValue.h:64
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition CGValue.h:72
ReturnValueSlot - Contains the address where the return value of a function can be stored,...
Definition CGCall.h:379
virtual llvm::Constant * getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const
Return a constant used by UBSan as a signature to identify functions possessing type information,...
Definition TargetInfo.h:243
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition Stmt.h:1732
ConditionalOperator - The ?
Definition Expr.h:4391
A reference to a declared variable, function, enum, etc.
Definition Expr.h:1270
ValueDecl * getDecl()
Definition Expr.h:1338
Decl - This represents one declaration (or definition), e.g.
Definition DeclBase.h:86
T * getAttr() const
Definition DeclBase.h:573
ASTContext & getASTContext() const LLVM_READONLY
Definition DeclBase.cpp:546
Decl * getNonClosureContext()
Find the innermost non-closure ancestor of this declaration, walking up through blocks,...
llvm::iterator_range< specific_attr_iterator< T > > specific_attrs() const
Definition DeclBase.h:559
SourceLocation getLocation() const
Definition DeclBase.h:439
bool hasAttr() const
Definition DeclBase.h:577
This represents one expression.
Definition Expr.h:112
FPOptions getFPFeaturesInEffect(const LangOptions &LO) const
Returns the set of floating point options that apply to this expression.
Definition Expr.cpp:3968
std::optional< llvm::APSInt > getIntegerConstantExpr(const ASTContext &Ctx) const
isIntegerConstantExpr - Return the value if this expression is a valid integer constant expression.
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:276
QualType getType() const
Definition Expr.h:144
ExtVectorType - Extended vector type.
Definition TypeBase.h:4268
LangOptions::FPExceptionModeKind getExceptionMode() const
bool allowFPContractAcrossStatement() const
RoundingMode getRoundingMode() const
Represents a member of a struct/union/class.
Definition Decl.h:3160
Represents a function declaration or definition.
Definition Decl.h:2000
bool isMultiVersion() const
True if this function is considered a multiversioned function.
Definition Decl.h:2689
Stmt * getBody(const FunctionDecl *&Definition) const
Retrieve the body (definition) of the function.
Definition Decl.cpp:3279
unsigned getBuiltinID(bool ConsiderWrapperFunctions=false) const
Returns a value indicating whether this function corresponds to a builtin function.
Definition Decl.cpp:3762
bool UsesFPIntrin() const
Determine whether the function was declared in source context that requires constrained FP intrinsics...
Definition Decl.h:2909
bool usesSEHTry() const
Indicates the function uses __try.
Definition Decl.h:2518
QualType getReturnType() const
Definition Decl.h:2845
ArrayRef< ParmVarDecl * > parameters() const
Definition Decl.h:2774
FunctionDecl * getTemplateInstantiationPattern(bool ForDefinition=true) const
Retrieve the function declaration from which this function could be instantiated, if it is an instant...
Definition Decl.cpp:4264
FunctionEffectsRef getFunctionEffects() const
Definition Decl.h:3134
bool isMSVCRTEntryPoint() const
Determines whether this function is a MSVCRT user defined entry point.
Definition Decl.cpp:3375
bool isInlineBuiltinDeclaration() const
Determine if this function provides an inline implementation of a builtin.
Definition Decl.cpp:3526
bool hasImplicitReturnZero() const
Whether falling off this function implicitly returns null/zero.
Definition Decl.h:2428
bool isMain() const
Determines whether this function is "main", which is the entry point into an executable program.
Definition Decl.cpp:3368
bool isDefaulted() const
Whether this function is defaulted.
Definition Decl.h:2385
OverloadedOperatorKind getOverloadedOperator() const
getOverloadedOperator - Which C++ overloaded operator this function represents, if any.
Definition Decl.cpp:4130
FunctionDecl * getPreviousDecl()
Return the previous declaration of this declaration or NULL if this is the first declaration.
Represents a prototype with parameter type info, e.g.
Definition TypeBase.h:5269
QualType desugar() const
Definition TypeBase.h:5850
FunctionTypeExtraAttributeInfo getExtraAttributeInfo() const
Return the extra attribute information.
Definition TypeBase.h:5758
FunctionType - C99 6.7.5.3 - Function Declarators.
Definition TypeBase.h:4465
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
CXXCtorType getCtorType() const
Definition GlobalDecl.h:108
KernelReferenceKind getKernelReferenceKind() const
Definition GlobalDecl.h:135
const Decl * getDecl() const
Definition GlobalDecl.h:106
One of these records is kept for each identifier that is lexed.
bool isStr(const char(&Str)[StrLen]) const
Return true if this is the identifier for the specified string.
static ImplicitParamDecl * Create(ASTContext &C, DeclContext *DC, SourceLocation IdLoc, IdentifierInfo *Id, QualType T, ImplicitParamKind ParamKind)
Create implicit parameter.
Definition Decl.cpp:5597
Represents the declaration of a label.
Definition Decl.h:524
FPExceptionModeKind
Possible floating point exception behavior.
@ FPE_Strict
Strictly preserve the floating-point exception semantics.
@ FPE_MayTrap
Transformations do not cause new exceptions but may hide some.
@ FPE_Ignore
Assume that floating-point exceptions are masked.
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
SanitizerSet Sanitize
Set of enabled sanitizers.
RoundingMode getDefaultRoundingMode() const
DeclarationName getDeclName() const
Get the actual, stored name of the declaration, which may be a special name.
Definition Decl.h:340
Represents a parameter to a function.
Definition Decl.h:1790
ParsedAttr - Represents a syntactic attribute.
Definition ParsedAttr.h:119
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition TypeBase.h:3329
@ Forbid
Profiling is forbidden using the noprofile attribute.
Definition ProfileList.h:37
@ Skip
Profiling is skipped using the skipprofile attribute.
Definition ProfileList.h:35
@ Allow
Profiling is allowed.
Definition ProfileList.h:33
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isVolatileQualified() const
Determine whether this type is volatile-qualified.
Definition TypeBase.h:8376
field_range fields() const
Definition Decl.h:4527
Encodes a location in the source.
A trivial tuple used to represent a source range.
Stmt - This represents one statement.
Definition Stmt.h:86
child_range children()
Definition Stmt.cpp:299
StmtClass getStmtClass() const
Definition Stmt.h:1485
Likelihood
The likelihood of a branch being taken.
Definition Stmt.h:1428
@ LH_Unlikely
Branch has the [[unlikely]] attribute.
Definition Stmt.h:1429
@ LH_None
No attribute set or branches of the IfStmt have the same attribute.
Definition Stmt.h:1430
@ LH_Likely
Branch has the [[likely]] attribute.
Definition Stmt.h:1432
bool isMicrosoft() const
Is this ABI an MSVC-compatible ABI?
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
virtual std::optional< std::pair< unsigned, unsigned > > getVScaleRange(const LangOptions &LangOpts, ArmStreamingKind Mode, llvm::StringMap< bool > *FeatureMap=nullptr) const
Returns target-specific min and max values VScale_Range.
bool supportsIFunc() const
Identify whether this target supports IFuncs.
TargetCXXABI getCXXABI() const
Get the C++ ABI currently in use.
virtual ParsedTargetAttr parseTargetAttr(StringRef Str) const
bool isVoidType() const
Definition TypeBase.h:8891
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char,...
Definition Type.cpp:2206
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition Type.h:26
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:753
bool isVariablyModifiedType() const
Whether this type is a variably-modified type (C99 6.7.5).
Definition TypeBase.h:2801
TypeClass getTypeClass() const
Definition TypeBase.h:2385
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9111
bool isObjCRetainableType() const
Definition Type.cpp:5284
bool isFunctionNoProtoType() const
Definition TypeBase.h:2600
bool isCFIUncheckedCalleeFunctionType() const
Definition TypeBase.h:8575
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition Expr.h:2244
QualType getType() const
Definition Decl.h:723
Represents a variable declaration or definition.
Definition Decl.h:926
Represents a C array with a specified size that is not an integer-constant-expression.
Definition TypeBase.h:3967
Expr * getSizeExpr() const
Definition TypeBase.h:3981
QualType getElementType() const
Definition TypeBase.h:4190
Defines the clang::TargetInfo interface.
#define UINT_MAX
Definition limits.h:64
bool evaluateRequiredTargetFeatures(llvm::StringRef RequiredFatures, const llvm::StringMap< bool > &TargetFetureMap)
Returns true if the required target features of a builtin function are enabled.
@ Type
The l-value was considered opaque, so the alignment was determined from a type.
Definition CGValue.h:155
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
Definition CGValue.h:146
TypeEvaluationKind
The kind of evaluation to perform on values of a particular type.
CGBuilderInserter CGBuilderInserterTy
Definition CGBuilder.h:45
constexpr XRayInstrMask Typed
Definition XRayInstr.h:42
constexpr XRayInstrMask FunctionExit
Definition XRayInstr.h:40
constexpr XRayInstrMask FunctionEntry
Definition XRayInstr.h:39
constexpr XRayInstrMask Custom
Definition XRayInstr.h:41
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< ArrayType > arrayType
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
Definition Address.h:330
@ CPlusPlus
Expr * IgnoreBuiltinExpectSingleStep(Expr *E)
Definition IgnoreExpr.h:135
@ NonNull
Values of this type can never be null.
Definition Specifiers.h:350
Expr * IgnoreExprNodes(Expr *E, FnTys &&... Fns)
Given an expression E and functions Fn_1,...,Fn_n : Expr * -> Expr *, Recursively apply each of the f...
Definition IgnoreExpr.h:24
nullptr
This class represents a compute construct, representing a 'Kind' of ‘parallel’, 'serial',...
Expr * Cond
};
bool isLambdaCallOperator(const CXXMethodDecl *MD)
Definition ASTLambda.h:28
@ Result
The result type of a method or function.
Definition TypeBase.h:905
const FunctionProtoType * T
Expr * IgnoreImplicitCastsSingleStep(Expr *E)
Definition IgnoreExpr.h:38
Expr * IgnoreUOpLNotSingleStep(Expr *E)
Definition IgnoreExpr.h:127
Expr * IgnoreParensSingleStep(Expr *E)
Definition IgnoreExpr.h:157
llvm::fp::ExceptionBehavior ToConstrainedExceptMD(LangOptions::FPExceptionModeKind Kind)
U cast(CodeGen::Address addr)
Definition Address.h:327
bool IsArmStreamingFunction(const FunctionDecl *FD, bool IncludeLocallyStreaming)
Returns whether the given FunctionDecl has an __arm[_locally]_streaming attribute.
Definition Decl.cpp:6094
@ Other
Other implicit parameter.
Definition Decl.h:1746
@ EST_None
no exception specification
@ Implicit
An implicit conversion.
Definition Sema.h:440
Diagnostic wrappers for TextAPI types for error reporting.
Definition Dominators.h:30
cl::opt< bool > EnableSingleByteCoverage
A jump destination is an abstract label, branching to which may require a jump out through normal cle...
This structure provides a set of types that are commonly used during IR emission.
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:645
A FunctionEffect plus a potential boolean expression determining whether the effect is declared (e....
Definition TypeBase.h:5006
Contains information gathered from parsing the contents of TargetAttr.
Definition TargetInfo.h:60
std::vector< std::string > Features
Definition TargetInfo.h:61
void set(SanitizerMask K, bool Value)
Enable or disable a certain (single) sanitizer.
Definition Sanitizers.h:187
bool has(SanitizerMask K) const
Check if a certain (single) sanitizer is enabled.
Definition Sanitizers.h:174