clang 22.0.0git
CodeGenFunction.cpp
Go to the documentation of this file.
1//===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This coordinates the per-function state used while generating code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CodeGenFunction.h"
14#include "CGBlocks.h"
15#include "CGCUDARuntime.h"
16#include "CGCXXABI.h"
17#include "CGCleanup.h"
18#include "CGDebugInfo.h"
19#include "CGHLSLRuntime.h"
20#include "CGOpenMPRuntime.h"
21#include "CodeGenModule.h"
22#include "CodeGenPGO.h"
23#include "TargetInfo.h"
25#include "clang/AST/ASTLambda.h"
26#include "clang/AST/Attr.h"
27#include "clang/AST/Decl.h"
28#include "clang/AST/DeclCXX.h"
29#include "clang/AST/Expr.h"
30#include "clang/AST/StmtCXX.h"
31#include "clang/AST/StmtObjC.h"
38#include "llvm/ADT/ArrayRef.h"
39#include "llvm/ADT/ScopeExit.h"
40#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
41#include "llvm/IR/DataLayout.h"
42#include "llvm/IR/Dominators.h"
43#include "llvm/IR/FPEnv.h"
44#include "llvm/IR/Instruction.h"
45#include "llvm/IR/IntrinsicInst.h"
46#include "llvm/IR/Intrinsics.h"
47#include "llvm/IR/MDBuilder.h"
48#include "llvm/Support/CRC.h"
49#include "llvm/Support/xxhash.h"
50#include "llvm/Transforms/Scalar/LowerExpectIntrinsic.h"
51#include "llvm/Transforms/Utils/PromoteMemToReg.h"
52#include <optional>
53
54using namespace clang;
55using namespace CodeGen;
56
57namespace llvm {
58extern cl::opt<bool> EnableSingleByteCoverage;
59} // namespace llvm
60
61/// shouldEmitLifetimeMarkers - Decide whether we need emit the life-time
62/// markers.
63static bool shouldEmitLifetimeMarkers(const CodeGenOptions &CGOpts,
64 const LangOptions &LangOpts) {
65 if (CGOpts.DisableLifetimeMarkers)
66 return false;
67
68 // Sanitizers may use markers.
69 if (CGOpts.SanitizeAddressUseAfterScope ||
70 LangOpts.Sanitize.has(SanitizerKind::HWAddress) ||
71 LangOpts.Sanitize.has(SanitizerKind::Memory))
72 return true;
73
74 // For now, only in optimized builds.
75 return CGOpts.OptimizationLevel != 0;
76}
77
78CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext)
79 : CodeGenTypeCache(cgm), CGM(cgm), Target(cgm.getTarget()),
80 Builder(cgm, cgm.getModule().getContext(), llvm::ConstantFolder(),
83 DebugInfo(CGM.getModuleDebugInfo()),
84 PGO(std::make_unique<CodeGenPGO>(cgm)),
85 ShouldEmitLifetimeMarkers(
86 shouldEmitLifetimeMarkers(CGM.getCodeGenOpts(), CGM.getLangOpts())) {
87 if (!suppressNewContext)
88 CGM.getCXXABI().getMangleContext().startNewFunction();
89 EHStack.setCGF(this);
90
92}
93
95 assert(LifetimeExtendedCleanupStack.empty() && "failed to emit a cleanup");
96 assert(DeferredDeactivationCleanupStack.empty() &&
97 "missed to deactivate a cleanup");
98
99 if (getLangOpts().OpenMP && CurFn)
100 CGM.getOpenMPRuntime().functionFinished(*this);
101
102 // If we have an OpenMPIRBuilder we want to finalize functions (incl.
103 // outlining etc) at some point. Doing it once the function codegen is done
104 // seems to be a reasonable spot. We do it here, as opposed to the deletion
105 // time of the CodeGenModule, because we have to ensure the IR has not yet
106 // been "emitted" to the outside, thus, modifications are still sensible.
107 if (CGM.getLangOpts().OpenMPIRBuilder && CurFn)
108 CGM.getOpenMPRuntime().getOMPBuilder().finalize(CurFn);
109}
110
111// Map the LangOption for exception behavior into
112// the corresponding enum in the IR.
113llvm::fp::ExceptionBehavior
115
116 switch (Kind) {
117 case LangOptions::FPE_Ignore: return llvm::fp::ebIgnore;
118 case LangOptions::FPE_MayTrap: return llvm::fp::ebMayTrap;
119 case LangOptions::FPE_Strict: return llvm::fp::ebStrict;
120 default:
121 llvm_unreachable("Unsupported FP Exception Behavior");
122 }
123}
124
126 llvm::FastMathFlags FMF;
127 FMF.setAllowReassoc(FPFeatures.getAllowFPReassociate());
128 FMF.setNoNaNs(FPFeatures.getNoHonorNaNs());
129 FMF.setNoInfs(FPFeatures.getNoHonorInfs());
130 FMF.setNoSignedZeros(FPFeatures.getNoSignedZero());
131 FMF.setAllowReciprocal(FPFeatures.getAllowReciprocal());
132 FMF.setApproxFunc(FPFeatures.getAllowApproxFunc());
133 FMF.setAllowContract(FPFeatures.allowFPContractAcrossStatement());
134 Builder.setFastMathFlags(FMF);
135}
136
138 const Expr *E)
139 : CGF(CGF) {
140 ConstructorHelper(E->getFPFeaturesInEffect(CGF.getLangOpts()));
141}
142
144 FPOptions FPFeatures)
145 : CGF(CGF) {
146 ConstructorHelper(FPFeatures);
147}
148
149void CodeGenFunction::CGFPOptionsRAII::ConstructorHelper(FPOptions FPFeatures) {
150 OldFPFeatures = CGF.CurFPFeatures;
151 CGF.CurFPFeatures = FPFeatures;
152
153 OldExcept = CGF.Builder.getDefaultConstrainedExcept();
154 OldRounding = CGF.Builder.getDefaultConstrainedRounding();
155
156 if (OldFPFeatures == FPFeatures)
157 return;
158
159 FMFGuard.emplace(CGF.Builder);
160
161 llvm::RoundingMode NewRoundingBehavior = FPFeatures.getRoundingMode();
162 CGF.Builder.setDefaultConstrainedRounding(NewRoundingBehavior);
163 auto NewExceptionBehavior =
165 CGF.Builder.setDefaultConstrainedExcept(NewExceptionBehavior);
166
167 CGF.SetFastMathFlags(FPFeatures);
168
169 assert((CGF.CurFuncDecl == nullptr || CGF.Builder.getIsFPConstrained() ||
170 isa<CXXConstructorDecl>(CGF.CurFuncDecl) ||
171 isa<CXXDestructorDecl>(CGF.CurFuncDecl) ||
172 (NewExceptionBehavior == llvm::fp::ebIgnore &&
173 NewRoundingBehavior == llvm::RoundingMode::NearestTiesToEven)) &&
174 "FPConstrained should be enabled on entire function");
175
176 auto mergeFnAttrValue = [&](StringRef Name, bool Value) {
177 auto OldValue =
178 CGF.CurFn->getFnAttribute(Name).getValueAsBool();
179 auto NewValue = OldValue & Value;
180 if (OldValue != NewValue)
181 CGF.CurFn->addFnAttr(Name, llvm::toStringRef(NewValue));
182 };
183 mergeFnAttrValue("no-infs-fp-math", FPFeatures.getNoHonorInfs());
184 mergeFnAttrValue("no-nans-fp-math", FPFeatures.getNoHonorNaNs());
185 mergeFnAttrValue("no-signed-zeros-fp-math", FPFeatures.getNoSignedZero());
186}
187
189 CGF.CurFPFeatures = OldFPFeatures;
190 CGF.Builder.setDefaultConstrainedExcept(OldExcept);
191 CGF.Builder.setDefaultConstrainedRounding(OldRounding);
192}
193
194static LValue
195makeNaturalAlignAddrLValue(llvm::Value *V, QualType T, bool ForPointeeType,
196 bool MightBeSigned, CodeGenFunction &CGF,
197 KnownNonNull_t IsKnownNonNull = NotKnownNonNull) {
198 LValueBaseInfo BaseInfo;
199 TBAAAccessInfo TBAAInfo;
200 CharUnits Alignment =
201 CGF.CGM.getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo, ForPointeeType);
202 Address Addr =
203 MightBeSigned
204 ? CGF.makeNaturalAddressForPointer(V, T, Alignment, false, nullptr,
205 nullptr, IsKnownNonNull)
206 : Address(V, CGF.ConvertTypeForMem(T), Alignment, IsKnownNonNull);
207 return CGF.MakeAddrLValue(Addr, T, BaseInfo, TBAAInfo);
208}
209
210LValue
212 KnownNonNull_t IsKnownNonNull) {
213 return ::makeNaturalAlignAddrLValue(V, T, /*ForPointeeType*/ false,
214 /*MightBeSigned*/ true, *this,
215 IsKnownNonNull);
216}
217
218LValue
220 return ::makeNaturalAlignAddrLValue(V, T, /*ForPointeeType*/ true,
221 /*MightBeSigned*/ true, *this);
222}
223
225 QualType T) {
226 return ::makeNaturalAlignAddrLValue(V, T, /*ForPointeeType*/ false,
227 /*MightBeSigned*/ false, *this);
228}
229
231 QualType T) {
232 return ::makeNaturalAlignAddrLValue(V, T, /*ForPointeeType*/ true,
233 /*MightBeSigned*/ false, *this);
234}
235
237 return CGM.getTypes().ConvertTypeForMem(T);
238}
239
241 return CGM.getTypes().ConvertType(T);
242}
243
245 llvm::Type *LLVMTy) {
246 return CGM.getTypes().convertTypeForLoadStore(ASTTy, LLVMTy);
247}
248
250 type = type.getCanonicalType();
251 while (true) {
252 switch (type->getTypeClass()) {
253#define TYPE(name, parent)
254#define ABSTRACT_TYPE(name, parent)
255#define NON_CANONICAL_TYPE(name, parent) case Type::name:
256#define DEPENDENT_TYPE(name, parent) case Type::name:
257#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name:
258#include "clang/AST/TypeNodes.inc"
259 llvm_unreachable("non-canonical or dependent type in IR-generation");
260
261 case Type::Auto:
262 case Type::DeducedTemplateSpecialization:
263 llvm_unreachable("undeduced type in IR-generation");
264
265 // Various scalar types.
266 case Type::Builtin:
267 case Type::Pointer:
268 case Type::BlockPointer:
269 case Type::LValueReference:
270 case Type::RValueReference:
271 case Type::MemberPointer:
272 case Type::Vector:
273 case Type::ExtVector:
274 case Type::ConstantMatrix:
275 case Type::FunctionProto:
276 case Type::FunctionNoProto:
277 case Type::Enum:
278 case Type::ObjCObjectPointer:
279 case Type::Pipe:
280 case Type::BitInt:
281 case Type::HLSLAttributedResource:
282 case Type::HLSLInlineSpirv:
283 return TEK_Scalar;
284
285 // Complexes.
286 case Type::Complex:
287 return TEK_Complex;
288
289 // Arrays, records, and Objective-C objects.
290 case Type::ConstantArray:
291 case Type::IncompleteArray:
292 case Type::VariableArray:
293 case Type::Record:
294 case Type::ObjCObject:
295 case Type::ObjCInterface:
296 case Type::ArrayParameter:
297 return TEK_Aggregate;
298
299 // We operate on atomic values according to their underlying type.
300 case Type::Atomic:
301 type = cast<AtomicType>(type)->getValueType();
302 continue;
303 }
304 llvm_unreachable("unknown type kind!");
305 }
306}
307
309 // For cleanliness, we try to avoid emitting the return block for
310 // simple cases.
311 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
312
313 if (CurBB) {
314 assert(!CurBB->getTerminator() && "Unexpected terminated block.");
315
316 // We have a valid insert point, reuse it if it is empty or there are no
317 // explicit jumps to the return block.
318 if (CurBB->empty() || ReturnBlock.getBlock()->use_empty()) {
319 ReturnBlock.getBlock()->replaceAllUsesWith(CurBB);
320 delete ReturnBlock.getBlock();
322 } else
323 EmitBlock(ReturnBlock.getBlock());
324 return llvm::DebugLoc();
325 }
326
327 // Otherwise, if the return block is the target of a single direct
328 // branch then we can just put the code in that block instead. This
329 // cleans up functions which started with a unified return block.
330 if (ReturnBlock.getBlock()->hasOneUse()) {
331 llvm::BranchInst *BI =
332 dyn_cast<llvm::BranchInst>(*ReturnBlock.getBlock()->user_begin());
333 if (BI && BI->isUnconditional() &&
334 BI->getSuccessor(0) == ReturnBlock.getBlock()) {
335 // Record/return the DebugLoc of the simple 'return' expression to be used
336 // later by the actual 'ret' instruction.
337 llvm::DebugLoc Loc = BI->getDebugLoc();
338 Builder.SetInsertPoint(BI->getParent());
339 BI->eraseFromParent();
340 delete ReturnBlock.getBlock();
342 return Loc;
343 }
344 }
345
346 // FIXME: We are at an unreachable point, there is no reason to emit the block
347 // unless it has uses. However, we still need a place to put the debug
348 // region.end for now.
349
350 EmitBlock(ReturnBlock.getBlock());
351 return llvm::DebugLoc();
352}
353
354static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) {
355 if (!BB) return;
356 if (!BB->use_empty()) {
357 CGF.CurFn->insert(CGF.CurFn->end(), BB);
358 return;
359 }
360 delete BB;
361}
362
364 assert(BreakContinueStack.empty() &&
365 "mismatched push/pop in break/continue stack!");
366 assert(LifetimeExtendedCleanupStack.empty() &&
367 "mismatched push/pop of cleanups in EHStack!");
368 assert(DeferredDeactivationCleanupStack.empty() &&
369 "mismatched activate/deactivate of cleanups!");
370
371 if (CGM.shouldEmitConvergenceTokens()) {
372 ConvergenceTokenStack.pop_back();
373 assert(ConvergenceTokenStack.empty() &&
374 "mismatched push/pop in convergence stack!");
375 }
376
377 bool OnlySimpleReturnStmts = NumSimpleReturnExprs > 0
378 && NumSimpleReturnExprs == NumReturnExprs
379 && ReturnBlock.getBlock()->use_empty();
380 // Usually the return expression is evaluated before the cleanup
381 // code. If the function contains only a simple return statement,
382 // such as a constant, the location before the cleanup code becomes
383 // the last useful breakpoint in the function, because the simple
384 // return expression will be evaluated after the cleanup code. To be
385 // safe, set the debug location for cleanup code to the location of
386 // the return statement. Otherwise the cleanup code should be at the
387 // end of the function's lexical scope.
388 //
389 // If there are multiple branches to the return block, the branch
390 // instructions will get the location of the return statements and
391 // all will be fine.
392 if (CGDebugInfo *DI = getDebugInfo()) {
393 if (OnlySimpleReturnStmts)
394 DI->EmitLocation(Builder, LastStopPoint);
395 else
396 DI->EmitLocation(Builder, EndLoc);
397 }
398
399 // Pop any cleanups that might have been associated with the
400 // parameters. Do this in whatever block we're currently in; it's
401 // important to do this before we enter the return block or return
402 // edges will be *really* confused.
403 bool HasCleanups = EHStack.stable_begin() != PrologueCleanupDepth;
404 bool HasOnlyNoopCleanups =
405 HasCleanups && EHStack.containsOnlyNoopCleanups(PrologueCleanupDepth);
406 bool EmitRetDbgLoc = !HasCleanups || HasOnlyNoopCleanups;
407
408 std::optional<ApplyDebugLocation> OAL;
409 if (HasCleanups) {
410 // Make sure the line table doesn't jump back into the body for
411 // the ret after it's been at EndLoc.
412 if (CGDebugInfo *DI = getDebugInfo()) {
413 if (OnlySimpleReturnStmts)
414 DI->EmitLocation(Builder, EndLoc);
415 else
416 // We may not have a valid end location. Try to apply it anyway, and
417 // fall back to an artificial location if needed.
419 }
420
422 }
423
424 // Emit function epilog (to return).
425 llvm::DebugLoc Loc = EmitReturnBlock();
426
428 if (CGM.getCodeGenOpts().InstrumentFunctions)
429 CurFn->addFnAttr("instrument-function-exit", "__cyg_profile_func_exit");
430 if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining)
431 CurFn->addFnAttr("instrument-function-exit-inlined",
432 "__cyg_profile_func_exit");
433 }
434
435 // Emit debug descriptor for function end.
436 if (CGDebugInfo *DI = getDebugInfo())
437 DI->EmitFunctionEnd(Builder, CurFn);
438
439 // Reset the debug location to that of the simple 'return' expression, if any
440 // rather than that of the end of the function's scope '}'.
441 uint64_t RetKeyInstructionsAtomGroup = Loc ? Loc->getAtomGroup() : 0;
442 ApplyDebugLocation AL(*this, Loc);
443 EmitFunctionEpilog(*CurFnInfo, EmitRetDbgLoc, EndLoc,
444 RetKeyInstructionsAtomGroup);
446
447 assert(EHStack.empty() &&
448 "did not remove all scopes from cleanup stack!");
449
450 // If someone did an indirect goto, emit the indirect goto block at the end of
451 // the function.
452 if (IndirectBranch) {
453 EmitBlock(IndirectBranch->getParent());
454 Builder.ClearInsertionPoint();
455 }
456
457 // If some of our locals escaped, insert a call to llvm.localescape in the
458 // entry block.
459 if (!EscapedLocals.empty()) {
460 // Invert the map from local to index into a simple vector. There should be
461 // no holes.
463 EscapeArgs.resize(EscapedLocals.size());
464 for (auto &Pair : EscapedLocals)
465 EscapeArgs[Pair.second] = Pair.first;
466 llvm::Function *FrameEscapeFn = llvm::Intrinsic::getOrInsertDeclaration(
467 &CGM.getModule(), llvm::Intrinsic::localescape);
468 CGBuilderTy(*this, AllocaInsertPt).CreateCall(FrameEscapeFn, EscapeArgs);
469 }
470
471 // Remove the AllocaInsertPt instruction, which is just a convenience for us.
472 llvm::Instruction *Ptr = AllocaInsertPt;
473 AllocaInsertPt = nullptr;
474 Ptr->eraseFromParent();
475
476 // PostAllocaInsertPt, if created, was lazily created when it was required,
477 // remove it now since it was just created for our own convenience.
478 if (PostAllocaInsertPt) {
479 llvm::Instruction *PostPtr = PostAllocaInsertPt;
480 PostAllocaInsertPt = nullptr;
481 PostPtr->eraseFromParent();
482 }
483
484 // If someone took the address of a label but never did an indirect goto, we
485 // made a zero entry PHI node, which is illegal, zap it now.
486 if (IndirectBranch) {
487 llvm::PHINode *PN = cast<llvm::PHINode>(IndirectBranch->getAddress());
488 if (PN->getNumIncomingValues() == 0) {
489 PN->replaceAllUsesWith(llvm::PoisonValue::get(PN->getType()));
490 PN->eraseFromParent();
491 }
492 }
493
495 EmitIfUsed(*this, TerminateLandingPad);
496 EmitIfUsed(*this, TerminateHandler);
497 EmitIfUsed(*this, UnreachableBlock);
498
499 for (const auto &FuncletAndParent : TerminateFunclets)
500 EmitIfUsed(*this, FuncletAndParent.second);
501
502 if (CGM.getCodeGenOpts().EmitDeclMetadata)
503 EmitDeclMetadata();
504
505 for (const auto &R : DeferredReplacements) {
506 if (llvm::Value *Old = R.first) {
507 Old->replaceAllUsesWith(R.second);
508 cast<llvm::Instruction>(Old)->eraseFromParent();
509 }
510 }
511 DeferredReplacements.clear();
512
513 // Eliminate CleanupDestSlot alloca by replacing it with SSA values and
514 // PHIs if the current function is a coroutine. We don't do it for all
515 // functions as it may result in slight increase in numbers of instructions
516 // if compiled with no optimizations. We do it for coroutine as the lifetime
517 // of CleanupDestSlot alloca make correct coroutine frame building very
518 // difficult.
519 if (NormalCleanupDest.isValid() && isCoroutine()) {
520 llvm::DominatorTree DT(*CurFn);
521 llvm::PromoteMemToReg(
522 cast<llvm::AllocaInst>(NormalCleanupDest.getPointer()), DT);
524 }
525
526 // Scan function arguments for vector width.
527 for (llvm::Argument &A : CurFn->args())
528 if (auto *VT = dyn_cast<llvm::VectorType>(A.getType()))
529 LargestVectorWidth =
530 std::max((uint64_t)LargestVectorWidth,
531 VT->getPrimitiveSizeInBits().getKnownMinValue());
532
533 // Update vector width based on return type.
534 if (auto *VT = dyn_cast<llvm::VectorType>(CurFn->getReturnType()))
535 LargestVectorWidth =
536 std::max((uint64_t)LargestVectorWidth,
537 VT->getPrimitiveSizeInBits().getKnownMinValue());
538
539 if (CurFnInfo->getMaxVectorWidth() > LargestVectorWidth)
540 LargestVectorWidth = CurFnInfo->getMaxVectorWidth();
541
542 // Add the min-legal-vector-width attribute. This contains the max width from:
543 // 1. min-vector-width attribute used in the source program.
544 // 2. Any builtins used that have a vector width specified.
545 // 3. Values passed in and out of inline assembly.
546 // 4. Width of vector arguments and return types for this function.
547 // 5. Width of vector arguments and return types for functions called by this
548 // function.
549 if (getContext().getTargetInfo().getTriple().isX86())
550 CurFn->addFnAttr("min-legal-vector-width",
551 llvm::utostr(LargestVectorWidth));
552
553 // If we generated an unreachable return block, delete it now.
554 if (ReturnBlock.isValid() && ReturnBlock.getBlock()->use_empty()) {
555 Builder.ClearInsertionPoint();
556 ReturnBlock.getBlock()->eraseFromParent();
557 }
558 if (ReturnValue.isValid()) {
559 auto *RetAlloca =
560 dyn_cast<llvm::AllocaInst>(ReturnValue.emitRawPointer(*this));
561 if (RetAlloca && RetAlloca->use_empty()) {
562 RetAlloca->eraseFromParent();
564 }
565 }
566}
567
568/// ShouldInstrumentFunction - Return true if the current function should be
569/// instrumented with __cyg_profile_func_* calls
571 if (!CGM.getCodeGenOpts().InstrumentFunctions &&
572 !CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining &&
573 !CGM.getCodeGenOpts().InstrumentFunctionEntryBare)
574 return false;
575 if (!CurFuncDecl || CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>())
576 return false;
577 return true;
578}
579
581 if (!CurFuncDecl)
582 return false;
583 return CurFuncDecl->hasAttr<DisableSanitizerInstrumentationAttr>();
584}
585
586/// ShouldXRayInstrument - Return true if the current function should be
587/// instrumented with XRay nop sleds.
589 return CGM.getCodeGenOpts().XRayInstrumentFunctions;
590}
591
592/// AlwaysEmitXRayCustomEvents - Return true if we should emit IR for calls to
593/// the __xray_customevent(...) builtin calls, when doing XRay instrumentation.
595 return CGM.getCodeGenOpts().XRayInstrumentFunctions &&
596 (CGM.getCodeGenOpts().XRayAlwaysEmitCustomEvents ||
597 CGM.getCodeGenOpts().XRayInstrumentationBundle.Mask ==
599}
600
602 return CGM.getCodeGenOpts().XRayInstrumentFunctions &&
603 (CGM.getCodeGenOpts().XRayAlwaysEmitTypedEvents ||
604 CGM.getCodeGenOpts().XRayInstrumentationBundle.Mask ==
606}
607
608llvm::ConstantInt *
610 // Remove any (C++17) exception specifications, to allow calling e.g. a
611 // noexcept function through a non-noexcept pointer.
612 if (!Ty->isFunctionNoProtoType())
614 std::string Mangled;
615 llvm::raw_string_ostream Out(Mangled);
616 CGM.getCXXABI().getMangleContext().mangleCanonicalTypeName(Ty, Out, false);
617 return llvm::ConstantInt::get(
618 CGM.Int32Ty, static_cast<uint32_t>(llvm::xxh3_64bits(Mangled)));
619}
620
621void CodeGenFunction::EmitKernelMetadata(const FunctionDecl *FD,
622 llvm::Function *Fn) {
623 if (!FD->hasAttr<DeviceKernelAttr>() && !FD->hasAttr<CUDAGlobalAttr>())
624 return;
625
626 llvm::LLVMContext &Context = getLLVMContext();
627
628 CGM.GenKernelArgMetadata(Fn, FD, this);
629
630 if (!(getLangOpts().OpenCL ||
631 (getLangOpts().CUDA &&
632 getContext().getTargetInfo().getTriple().isSPIRV())))
633 return;
634
635 if (const VecTypeHintAttr *A = FD->getAttr<VecTypeHintAttr>()) {
636 QualType HintQTy = A->getTypeHint();
637 const ExtVectorType *HintEltQTy = HintQTy->getAs<ExtVectorType>();
638 bool IsSignedInteger =
639 HintQTy->isSignedIntegerType() ||
640 (HintEltQTy && HintEltQTy->getElementType()->isSignedIntegerType());
641 llvm::Metadata *AttrMDArgs[] = {
642 llvm::ConstantAsMetadata::get(llvm::PoisonValue::get(
643 CGM.getTypes().ConvertType(A->getTypeHint()))),
644 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
645 llvm::IntegerType::get(Context, 32),
646 llvm::APInt(32, (uint64_t)(IsSignedInteger ? 1 : 0))))};
647 Fn->setMetadata("vec_type_hint", llvm::MDNode::get(Context, AttrMDArgs));
648 }
649
650 if (const WorkGroupSizeHintAttr *A = FD->getAttr<WorkGroupSizeHintAttr>()) {
651 auto Eval = [&](Expr *E) {
652 return E->EvaluateKnownConstInt(FD->getASTContext()).getExtValue();
653 };
654 llvm::Metadata *AttrMDArgs[] = {
655 llvm::ConstantAsMetadata::get(Builder.getInt32(Eval(A->getXDim()))),
656 llvm::ConstantAsMetadata::get(Builder.getInt32(Eval(A->getYDim()))),
657 llvm::ConstantAsMetadata::get(Builder.getInt32(Eval(A->getZDim())))};
658 Fn->setMetadata("work_group_size_hint", llvm::MDNode::get(Context, AttrMDArgs));
659 }
660
661 if (const ReqdWorkGroupSizeAttr *A = FD->getAttr<ReqdWorkGroupSizeAttr>()) {
662 auto Eval = [&](Expr *E) {
663 return E->EvaluateKnownConstInt(FD->getASTContext()).getExtValue();
664 };
665 llvm::Metadata *AttrMDArgs[] = {
666 llvm::ConstantAsMetadata::get(Builder.getInt32(Eval(A->getXDim()))),
667 llvm::ConstantAsMetadata::get(Builder.getInt32(Eval(A->getYDim()))),
668 llvm::ConstantAsMetadata::get(Builder.getInt32(Eval(A->getZDim())))};
669 Fn->setMetadata("reqd_work_group_size", llvm::MDNode::get(Context, AttrMDArgs));
670 }
671
672 if (const OpenCLIntelReqdSubGroupSizeAttr *A =
673 FD->getAttr<OpenCLIntelReqdSubGroupSizeAttr>()) {
674 llvm::Metadata *AttrMDArgs[] = {
675 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getSubGroupSize()))};
676 Fn->setMetadata("intel_reqd_sub_group_size",
677 llvm::MDNode::get(Context, AttrMDArgs));
678 }
679}
680
681/// Determine whether the function F ends with a return stmt.
682static bool endsWithReturn(const Decl* F) {
683 const Stmt *Body = nullptr;
684 if (auto *FD = dyn_cast_or_null<FunctionDecl>(F))
685 Body = FD->getBody();
686 else if (auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(F))
687 Body = OMD->getBody();
688
689 if (auto *CS = dyn_cast_or_null<CompoundStmt>(Body)) {
690 auto LastStmt = CS->body_rbegin();
691 if (LastStmt != CS->body_rend())
692 return isa<ReturnStmt>(*LastStmt);
693 }
694 return false;
695}
696
698 if (SanOpts.has(SanitizerKind::Thread)) {
699 Fn->addFnAttr("sanitize_thread_no_checking_at_run_time");
700 Fn->removeFnAttr(llvm::Attribute::SanitizeThread);
701 }
702}
703
704/// Check if the return value of this function requires sanitization.
705bool CodeGenFunction::requiresReturnValueCheck() const {
706 return requiresReturnValueNullabilityCheck() ||
707 (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute) && CurCodeDecl &&
708 CurCodeDecl->getAttr<ReturnsNonNullAttr>());
709}
710
711static bool matchesStlAllocatorFn(const Decl *D, const ASTContext &Ctx) {
712 auto *MD = dyn_cast_or_null<CXXMethodDecl>(D);
713 if (!MD || !MD->getDeclName().getAsIdentifierInfo() ||
714 !MD->getDeclName().getAsIdentifierInfo()->isStr("allocate") ||
715 (MD->getNumParams() != 1 && MD->getNumParams() != 2))
716 return false;
717
718 if (!Ctx.hasSameType(MD->parameters()[0]->getType(), Ctx.getSizeType()))
719 return false;
720
721 if (MD->getNumParams() == 2) {
722 auto *PT = MD->parameters()[1]->getType()->getAs<PointerType>();
723 if (!PT || !PT->isVoidPointerType() ||
724 !PT->getPointeeType().isConstQualified())
725 return false;
726 }
727
728 return true;
729}
730
731bool CodeGenFunction::isInAllocaArgument(CGCXXABI &ABI, QualType Ty) {
732 const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
733 return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory;
734}
735
736bool CodeGenFunction::hasInAllocaArg(const CXXMethodDecl *MD) {
737 return getTarget().getTriple().getArch() == llvm::Triple::x86 &&
739 llvm::any_of(MD->parameters(), [&](ParmVarDecl *P) {
740 return isInAllocaArgument(CGM.getCXXABI(), P->getType());
741 });
742}
743
744/// Return the UBSan prologue signature for \p FD if one is available.
745static llvm::Constant *getPrologueSignature(CodeGenModule &CGM,
746 const FunctionDecl *FD) {
747 if (const auto *MD = dyn_cast<CXXMethodDecl>(FD))
748 if (!MD->isStatic())
749 return nullptr;
751}
752
754 llvm::Function *Fn,
755 const CGFunctionInfo &FnInfo,
756 const FunctionArgList &Args,
757 SourceLocation Loc,
758 SourceLocation StartLoc) {
759 assert(!CurFn &&
760 "Do not use a CodeGenFunction object for more than one function");
761
762 const Decl *D = GD.getDecl();
763
764 DidCallStackSave = false;
765 CurCodeDecl = D;
766 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
767 if (FD && FD->usesSEHTry())
768 CurSEHParent = GD;
769 CurFuncDecl = (D ? D->getNonClosureContext() : nullptr);
770 FnRetTy = RetTy;
771 CurFn = Fn;
772 CurFnInfo = &FnInfo;
773 assert(CurFn->isDeclaration() && "Function already has body?");
774
775 // If this function is ignored for any of the enabled sanitizers,
776 // disable the sanitizer for the function.
777 do {
778#define SANITIZER(NAME, ID) \
779 if (SanOpts.empty()) \
780 break; \
781 if (SanOpts.has(SanitizerKind::ID)) \
782 if (CGM.isInNoSanitizeList(SanitizerKind::ID, Fn, Loc)) \
783 SanOpts.set(SanitizerKind::ID, false);
784
785#include "clang/Basic/Sanitizers.def"
786#undef SANITIZER
787 } while (false);
788
789 if (D) {
790 const bool SanitizeBounds = SanOpts.hasOneOf(SanitizerKind::Bounds);
791 SanitizerMask no_sanitize_mask;
792 bool NoSanitizeCoverage = false;
793
794 for (auto *Attr : D->specific_attrs<NoSanitizeAttr>()) {
795 no_sanitize_mask |= Attr->getMask();
796 // SanitizeCoverage is not handled by SanOpts.
797 if (Attr->hasCoverage())
798 NoSanitizeCoverage = true;
799 }
800
801 // Apply the no_sanitize* attributes to SanOpts.
802 SanOpts.Mask &= ~no_sanitize_mask;
803 if (no_sanitize_mask & SanitizerKind::Address)
804 SanOpts.set(SanitizerKind::KernelAddress, false);
805 if (no_sanitize_mask & SanitizerKind::KernelAddress)
806 SanOpts.set(SanitizerKind::Address, false);
807 if (no_sanitize_mask & SanitizerKind::HWAddress)
808 SanOpts.set(SanitizerKind::KernelHWAddress, false);
809 if (no_sanitize_mask & SanitizerKind::KernelHWAddress)
810 SanOpts.set(SanitizerKind::HWAddress, false);
811
812 if (SanitizeBounds && !SanOpts.hasOneOf(SanitizerKind::Bounds))
813 Fn->addFnAttr(llvm::Attribute::NoSanitizeBounds);
814
815 if (NoSanitizeCoverage && CGM.getCodeGenOpts().hasSanitizeCoverage())
816 Fn->addFnAttr(llvm::Attribute::NoSanitizeCoverage);
817
818 // Some passes need the non-negated no_sanitize attribute. Pass them on.
819 if (CGM.getCodeGenOpts().hasSanitizeBinaryMetadata()) {
820 if (no_sanitize_mask & SanitizerKind::Thread)
821 Fn->addFnAttr("no_sanitize_thread");
822 }
823 }
824
826 CurFn->addFnAttr(llvm::Attribute::DisableSanitizerInstrumentation);
827 } else {
828 // Apply sanitizer attributes to the function.
829 if (SanOpts.hasOneOf(SanitizerKind::Address | SanitizerKind::KernelAddress))
830 Fn->addFnAttr(llvm::Attribute::SanitizeAddress);
831 if (SanOpts.hasOneOf(SanitizerKind::HWAddress |
832 SanitizerKind::KernelHWAddress))
833 Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress);
834 if (SanOpts.has(SanitizerKind::MemtagStack))
835 Fn->addFnAttr(llvm::Attribute::SanitizeMemTag);
836 if (SanOpts.has(SanitizerKind::Thread))
837 Fn->addFnAttr(llvm::Attribute::SanitizeThread);
838 if (SanOpts.has(SanitizerKind::Type))
839 Fn->addFnAttr(llvm::Attribute::SanitizeType);
840 if (SanOpts.has(SanitizerKind::NumericalStability))
841 Fn->addFnAttr(llvm::Attribute::SanitizeNumericalStability);
842 if (SanOpts.hasOneOf(SanitizerKind::Memory | SanitizerKind::KernelMemory))
843 Fn->addFnAttr(llvm::Attribute::SanitizeMemory);
844 if (SanOpts.has(SanitizerKind::AllocToken))
845 Fn->addFnAttr(llvm::Attribute::SanitizeAllocToken);
846 }
847 if (SanOpts.has(SanitizerKind::SafeStack))
848 Fn->addFnAttr(llvm::Attribute::SafeStack);
849 if (SanOpts.has(SanitizerKind::ShadowCallStack))
850 Fn->addFnAttr(llvm::Attribute::ShadowCallStack);
851
852 if (SanOpts.has(SanitizerKind::Realtime))
853 if (FD && FD->getASTContext().hasAnyFunctionEffects())
854 for (const FunctionEffectWithCondition &Fe : FD->getFunctionEffects()) {
855 if (Fe.Effect.kind() == FunctionEffect::Kind::NonBlocking)
856 Fn->addFnAttr(llvm::Attribute::SanitizeRealtime);
857 else if (Fe.Effect.kind() == FunctionEffect::Kind::Blocking)
858 Fn->addFnAttr(llvm::Attribute::SanitizeRealtimeBlocking);
859 }
860
861 // Apply fuzzing attribute to the function.
862 if (SanOpts.hasOneOf(SanitizerKind::Fuzzer | SanitizerKind::FuzzerNoLink))
863 Fn->addFnAttr(llvm::Attribute::OptForFuzzing);
864
865 // Ignore TSan memory acesses from within ObjC/ObjC++ dealloc, initialize,
866 // .cxx_destruct, __destroy_helper_block_ and all of their calees at run time.
867 if (SanOpts.has(SanitizerKind::Thread)) {
868 if (const auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(D)) {
869 const IdentifierInfo *II = OMD->getSelector().getIdentifierInfoForSlot(0);
870 if (OMD->getMethodFamily() == OMF_dealloc ||
871 OMD->getMethodFamily() == OMF_initialize ||
872 (OMD->getSelector().isUnarySelector() && II->isStr(".cxx_destruct"))) {
874 }
875 }
876 }
877
878 // Ignore unrelated casts in STL allocate() since the allocator must cast
879 // from void* to T* before object initialization completes. Don't match on the
880 // namespace because not all allocators are in std::
881 if (D && SanOpts.has(SanitizerKind::CFIUnrelatedCast)) {
883 SanOpts.Mask &= ~SanitizerKind::CFIUnrelatedCast;
884 }
885
886 // Ignore null checks in coroutine functions since the coroutines passes
887 // are not aware of how to move the extra UBSan instructions across the split
888 // coroutine boundaries.
889 if (D && SanOpts.has(SanitizerKind::Null))
890 if (FD && FD->getBody() &&
891 FD->getBody()->getStmtClass() == Stmt::CoroutineBodyStmtClass)
892 SanOpts.Mask &= ~SanitizerKind::Null;
893
894 // Apply xray attributes to the function (as a string, for now)
895 bool AlwaysXRayAttr = false;
896 if (const auto *XRayAttr = D ? D->getAttr<XRayInstrumentAttr>() : nullptr) {
897 if (CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
899 CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
901 if (XRayAttr->alwaysXRayInstrument() && ShouldXRayInstrumentFunction()) {
902 Fn->addFnAttr("function-instrument", "xray-always");
903 AlwaysXRayAttr = true;
904 }
905 if (XRayAttr->neverXRayInstrument())
906 Fn->addFnAttr("function-instrument", "xray-never");
907 if (const auto *LogArgs = D->getAttr<XRayLogArgsAttr>())
909 Fn->addFnAttr("xray-log-args",
910 llvm::utostr(LogArgs->getArgumentCount()));
911 }
912 } else {
913 if (ShouldXRayInstrumentFunction() && !CGM.imbueXRayAttrs(Fn, Loc))
914 Fn->addFnAttr(
915 "xray-instruction-threshold",
916 llvm::itostr(CGM.getCodeGenOpts().XRayInstructionThreshold));
917 }
918
920 if (CGM.getCodeGenOpts().XRayIgnoreLoops)
921 Fn->addFnAttr("xray-ignore-loops");
922
923 if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
925 Fn->addFnAttr("xray-skip-exit");
926
927 if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
929 Fn->addFnAttr("xray-skip-entry");
930
931 auto FuncGroups = CGM.getCodeGenOpts().XRayTotalFunctionGroups;
932 if (FuncGroups > 1) {
933 auto FuncName = llvm::ArrayRef<uint8_t>(CurFn->getName().bytes_begin(),
934 CurFn->getName().bytes_end());
935 auto Group = crc32(FuncName) % FuncGroups;
936 if (Group != CGM.getCodeGenOpts().XRaySelectedFunctionGroup &&
937 !AlwaysXRayAttr)
938 Fn->addFnAttr("function-instrument", "xray-never");
939 }
940 }
941
942 if (CGM.getCodeGenOpts().getProfileInstr() !=
943 llvm::driver::ProfileInstrKind::ProfileNone) {
944 switch (CGM.isFunctionBlockedFromProfileInstr(Fn, Loc)) {
946 Fn->addFnAttr(llvm::Attribute::SkipProfile);
947 break;
949 Fn->addFnAttr(llvm::Attribute::NoProfile);
950 break;
952 break;
953 }
954 }
955
956 unsigned Count, Offset;
957 StringRef Section;
958 if (const auto *Attr =
959 D ? D->getAttr<PatchableFunctionEntryAttr>() : nullptr) {
960 Count = Attr->getCount();
961 Offset = Attr->getOffset();
962 Section = Attr->getSection();
963 } else {
964 Count = CGM.getCodeGenOpts().PatchableFunctionEntryCount;
965 Offset = CGM.getCodeGenOpts().PatchableFunctionEntryOffset;
966 }
967 if (Section.empty())
968 Section = CGM.getCodeGenOpts().PatchableFunctionEntrySection;
969 if (Count && Offset <= Count) {
970 Fn->addFnAttr("patchable-function-entry", std::to_string(Count - Offset));
971 if (Offset)
972 Fn->addFnAttr("patchable-function-prefix", std::to_string(Offset));
973 if (!Section.empty())
974 Fn->addFnAttr("patchable-function-entry-section", Section);
975 }
976 // Instruct that functions for COFF/CodeView targets should start with a
977 // patchable instruction, but only on x86/x64. Don't forward this to ARM/ARM64
978 // backends as they don't need it -- instructions on these architectures are
979 // always atomically patchable at runtime.
980 if (CGM.getCodeGenOpts().HotPatch &&
981 getContext().getTargetInfo().getTriple().isX86() &&
982 getContext().getTargetInfo().getTriple().getEnvironment() !=
983 llvm::Triple::CODE16)
984 Fn->addFnAttr("patchable-function", "prologue-short-redirect");
985
986 // Add no-jump-tables value.
987 if (CGM.getCodeGenOpts().NoUseJumpTables)
988 Fn->addFnAttr("no-jump-tables", "true");
989
990 // Add no-inline-line-tables value.
991 if (CGM.getCodeGenOpts().NoInlineLineTables)
992 Fn->addFnAttr("no-inline-line-tables");
993
994 // Add profile-sample-accurate value.
995 if (CGM.getCodeGenOpts().ProfileSampleAccurate)
996 Fn->addFnAttr("profile-sample-accurate");
997
998 if (!CGM.getCodeGenOpts().SampleProfileFile.empty())
999 Fn->addFnAttr("use-sample-profile");
1000
1001 if (D && D->hasAttr<CFICanonicalJumpTableAttr>())
1002 Fn->addFnAttr("cfi-canonical-jump-table");
1003
1004 if (D && D->hasAttr<NoProfileFunctionAttr>())
1005 Fn->addFnAttr(llvm::Attribute::NoProfile);
1006
1007 if (D && D->hasAttr<HybridPatchableAttr>())
1008 Fn->addFnAttr(llvm::Attribute::HybridPatchable);
1009
1010 if (D) {
1011 // Function attributes take precedence over command line flags.
1012 if (auto *A = D->getAttr<FunctionReturnThunksAttr>()) {
1013 switch (A->getThunkType()) {
1014 case FunctionReturnThunksAttr::Kind::Keep:
1015 break;
1016 case FunctionReturnThunksAttr::Kind::Extern:
1017 Fn->addFnAttr(llvm::Attribute::FnRetThunkExtern);
1018 break;
1019 }
1020 } else if (CGM.getCodeGenOpts().FunctionReturnThunks)
1021 Fn->addFnAttr(llvm::Attribute::FnRetThunkExtern);
1022 }
1023
1024 if (FD && (getLangOpts().OpenCL ||
1025 (getLangOpts().CUDA &&
1026 getContext().getTargetInfo().getTriple().isSPIRV()) ||
1027 ((getLangOpts().HIP || getLangOpts().OffloadViaLLVM) &&
1028 getLangOpts().CUDAIsDevice))) {
1029 // Add metadata for a kernel function.
1030 EmitKernelMetadata(FD, Fn);
1031 }
1032
1033 if (FD && FD->hasAttr<ClspvLibclcBuiltinAttr>()) {
1034 Fn->setMetadata("clspv_libclc_builtin",
1035 llvm::MDNode::get(getLLVMContext(), {}));
1036 }
1037
1038 // If we are checking function types, emit a function type signature as
1039 // prologue data.
1040 if (FD && SanOpts.has(SanitizerKind::Function) &&
1042 if (llvm::Constant *PrologueSig = getPrologueSignature(CGM, FD)) {
1043 llvm::LLVMContext &Ctx = Fn->getContext();
1044 llvm::MDBuilder MDB(Ctx);
1045 Fn->setMetadata(
1046 llvm::LLVMContext::MD_func_sanitize,
1047 MDB.createRTTIPointerPrologue(
1048 PrologueSig, getUBSanFunctionTypeHash(FD->getType())));
1049 }
1050 }
1051
1052 // If we're checking nullability, we need to know whether we can check the
1053 // return value. Initialize the flag to 'true' and refine it in EmitParmDecl.
1054 if (SanOpts.has(SanitizerKind::NullabilityReturn)) {
1055 auto Nullability = FnRetTy->getNullability();
1056 if (Nullability && *Nullability == NullabilityKind::NonNull &&
1057 !FnRetTy->isRecordType()) {
1058 if (!(SanOpts.has(SanitizerKind::ReturnsNonnullAttribute) &&
1059 CurCodeDecl && CurCodeDecl->getAttr<ReturnsNonNullAttr>()))
1060 RetValNullabilityPrecondition =
1061 llvm::ConstantInt::getTrue(getLLVMContext());
1062 }
1063 }
1064
1065 // If we're in C++ mode and the function name is "main", it is guaranteed
1066 // to be norecurse by the standard (3.6.1.3 "The function main shall not be
1067 // used within a program").
1068 //
1069 // OpenCL C 2.0 v2.2-11 s6.9.i:
1070 // Recursion is not supported.
1071 //
1072 // HLSL
1073 // Recursion is not supported.
1074 //
1075 // SYCL v1.2.1 s3.10:
1076 // kernels cannot include RTTI information, exception classes,
1077 // recursive code, virtual functions or make use of C++ libraries that
1078 // are not compiled for the device.
1079 if (FD &&
1080 ((getLangOpts().CPlusPlus && FD->isMain()) || getLangOpts().OpenCL ||
1081 getLangOpts().HLSL || getLangOpts().SYCLIsDevice ||
1082 (getLangOpts().CUDA && FD->hasAttr<CUDAGlobalAttr>())))
1083 Fn->addFnAttr(llvm::Attribute::NoRecurse);
1084
1085 llvm::RoundingMode RM = getLangOpts().getDefaultRoundingMode();
1086 llvm::fp::ExceptionBehavior FPExceptionBehavior =
1087 ToConstrainedExceptMD(getLangOpts().getDefaultExceptionMode());
1088 Builder.setDefaultConstrainedRounding(RM);
1089 Builder.setDefaultConstrainedExcept(FPExceptionBehavior);
1090 if ((FD && (FD->UsesFPIntrin() || FD->hasAttr<StrictFPAttr>())) ||
1091 (!FD && (FPExceptionBehavior != llvm::fp::ebIgnore ||
1092 RM != llvm::RoundingMode::NearestTiesToEven))) {
1093 Builder.setIsFPConstrained(true);
1094 Fn->addFnAttr(llvm::Attribute::StrictFP);
1095 }
1096
1097 // If a custom alignment is used, force realigning to this alignment on
1098 // any main function which certainly will need it.
1099 if (FD && ((FD->isMain() || FD->isMSVCRTEntryPoint()) &&
1100 CGM.getCodeGenOpts().StackAlignment))
1101 Fn->addFnAttr("stackrealign");
1102
1103 // "main" doesn't need to zero out call-used registers.
1104 if (FD && FD->isMain())
1105 Fn->removeFnAttr("zero-call-used-regs");
1106
1107 // Add vscale_range attribute if appropriate.
1108 llvm::StringMap<bool> FeatureMap;
1109 auto IsArmStreaming = TargetInfo::ArmStreamingKind::NotStreaming;
1110 if (FD) {
1111 getContext().getFunctionFeatureMap(FeatureMap, FD);
1112 if (const auto *T = FD->getType()->getAs<FunctionProtoType>())
1113 if (T->getAArch64SMEAttributes() &
1116
1117 if (IsArmStreamingFunction(FD, true))
1119 }
1120 std::optional<std::pair<unsigned, unsigned>> VScaleRange =
1121 getContext().getTargetInfo().getVScaleRange(getLangOpts(), IsArmStreaming,
1122 &FeatureMap);
1123 if (VScaleRange) {
1124 CurFn->addFnAttr(llvm::Attribute::getWithVScaleRangeArgs(
1125 getLLVMContext(), VScaleRange->first, VScaleRange->second));
1126 }
1127
1128 llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn);
1129
1130 // Create a marker to make it easy to insert allocas into the entryblock
1131 // later. Don't create this with the builder, because we don't want it
1132 // folded.
1133 llvm::Value *Poison = llvm::PoisonValue::get(Int32Ty);
1134 AllocaInsertPt = new llvm::BitCastInst(Poison, Int32Ty, "allocapt", EntryBB);
1135
1137
1138 Builder.SetInsertPoint(EntryBB);
1139
1140 // If we're checking the return value, allocate space for a pointer to a
1141 // precise source location of the checked return statement.
1142 if (requiresReturnValueCheck()) {
1143 ReturnLocation = CreateDefaultAlignTempAlloca(Int8PtrTy, "return.sloc.ptr");
1144 Builder.CreateStore(llvm::ConstantPointerNull::get(Int8PtrTy),
1145 ReturnLocation);
1146 }
1147
1148 // Emit subprogram debug descriptor.
1149 if (CGDebugInfo *DI = getDebugInfo()) {
1150 // Reconstruct the type from the argument list so that implicit parameters,
1151 // such as 'this' and 'vtt', show up in the debug info. Preserve the calling
1152 // convention.
1153 DI->emitFunctionStart(GD, Loc, StartLoc,
1154 DI->getFunctionType(FD, RetTy, Args), CurFn,
1156 }
1157
1159 if (CGM.getCodeGenOpts().InstrumentFunctions)
1160 CurFn->addFnAttr("instrument-function-entry", "__cyg_profile_func_enter");
1161 if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining)
1162 CurFn->addFnAttr("instrument-function-entry-inlined",
1163 "__cyg_profile_func_enter");
1164 if (CGM.getCodeGenOpts().InstrumentFunctionEntryBare)
1165 CurFn->addFnAttr("instrument-function-entry-inlined",
1166 "__cyg_profile_func_enter_bare");
1167 }
1168
1169 // Since emitting the mcount call here impacts optimizations such as function
1170 // inlining, we just add an attribute to insert a mcount call in backend.
1171 // The attribute "counting-function" is set to mcount function name which is
1172 // architecture dependent.
1173 if (CGM.getCodeGenOpts().InstrumentForProfiling) {
1174 // Calls to fentry/mcount should not be generated if function has
1175 // the no_instrument_function attribute.
1176 if (!CurFuncDecl || !CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>()) {
1177 if (CGM.getCodeGenOpts().CallFEntry)
1178 Fn->addFnAttr("fentry-call", "true");
1179 else {
1180 Fn->addFnAttr("instrument-function-entry-inlined",
1181 getTarget().getMCountName());
1182 }
1183 if (CGM.getCodeGenOpts().MNopMCount) {
1184 if (!CGM.getCodeGenOpts().CallFEntry)
1185 CGM.getDiags().Report(diag::err_opt_not_valid_without_opt)
1186 << "-mnop-mcount" << "-mfentry";
1187 Fn->addFnAttr("mnop-mcount");
1188 }
1189
1190 if (CGM.getCodeGenOpts().RecordMCount) {
1191 if (!CGM.getCodeGenOpts().CallFEntry)
1192 CGM.getDiags().Report(diag::err_opt_not_valid_without_opt)
1193 << "-mrecord-mcount" << "-mfentry";
1194 Fn->addFnAttr("mrecord-mcount");
1195 }
1196 }
1197 }
1198
1199 if (CGM.getCodeGenOpts().PackedStack) {
1200 if (getContext().getTargetInfo().getTriple().getArch() !=
1201 llvm::Triple::systemz)
1202 CGM.getDiags().Report(diag::err_opt_not_valid_on_target)
1203 << "-mpacked-stack";
1204 Fn->addFnAttr("packed-stack");
1205 }
1206
1207 if (CGM.getCodeGenOpts().WarnStackSize != UINT_MAX &&
1208 !CGM.getDiags().isIgnored(diag::warn_fe_backend_frame_larger_than, Loc))
1209 Fn->addFnAttr("warn-stack-size",
1210 std::to_string(CGM.getCodeGenOpts().WarnStackSize));
1211
1212 if (RetTy->isVoidType()) {
1213 // Void type; nothing to return.
1215
1216 // Count the implicit return.
1217 if (!endsWithReturn(D))
1218 ++NumReturnExprs;
1219 } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect) {
1220 // Indirect return; emit returned value directly into sret slot.
1221 // This reduces code size, and affects correctness in C++.
1222 auto AI = CurFn->arg_begin();
1223 if (CurFnInfo->getReturnInfo().isSRetAfterThis())
1224 ++AI;
1226 &*AI, RetTy, CurFnInfo->getReturnInfo().getIndirectAlign(), false,
1227 nullptr, nullptr, KnownNonNull);
1228 if (!CurFnInfo->getReturnInfo().getIndirectByVal()) {
1230 CreateDefaultAlignTempAlloca(ReturnValue.getType(), "result.ptr");
1231 Builder.CreateStore(ReturnValue.emitRawPointer(*this),
1233 }
1234 } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::InAlloca &&
1235 !hasScalarEvaluationKind(CurFnInfo->getReturnType())) {
1236 // Load the sret pointer from the argument struct and return into that.
1237 unsigned Idx = CurFnInfo->getReturnInfo().getInAllocaFieldIndex();
1238 llvm::Function::arg_iterator EI = CurFn->arg_end();
1239 --EI;
1240 llvm::Value *Addr = Builder.CreateStructGEP(
1241 CurFnInfo->getArgStruct(), &*EI, Idx);
1242 llvm::Type *Ty =
1243 cast<llvm::GetElementPtrInst>(Addr)->getResultElementType();
1245 Addr = Builder.CreateAlignedLoad(Ty, Addr, getPointerAlign(), "agg.result");
1247 CGM.getNaturalTypeAlignment(RetTy), KnownNonNull);
1248 } else {
1249 ReturnValue = CreateIRTemp(RetTy, "retval");
1250
1251 // Tell the epilog emitter to autorelease the result. We do this
1252 // now so that various specialized functions can suppress it
1253 // during their IR-generation.
1254 if (getLangOpts().ObjCAutoRefCount &&
1255 !CurFnInfo->isReturnsRetained() &&
1256 RetTy->isObjCRetainableType())
1257 AutoreleaseResult = true;
1258 }
1259
1261
1262 PrologueCleanupDepth = EHStack.stable_begin();
1263
1264 // Emit OpenMP specific initialization of the device functions.
1265 if (getLangOpts().OpenMP && CurCodeDecl)
1266 CGM.getOpenMPRuntime().emitFunctionProlog(*this, CurCodeDecl);
1267
1268 if (FD && getLangOpts().HLSL) {
1269 // Handle emitting HLSL entry functions.
1270 if (FD->hasAttr<HLSLShaderAttr>()) {
1271 CGM.getHLSLRuntime().emitEntryFunction(FD, Fn);
1272 }
1273 }
1274
1276
1277 if (const CXXMethodDecl *MD = dyn_cast_if_present<CXXMethodDecl>(D);
1278 MD && !MD->isStatic()) {
1279 bool IsInLambda =
1280 MD->getParent()->isLambda() && MD->getOverloadedOperator() == OO_Call;
1282 CGM.getCXXABI().EmitInstanceFunctionProlog(*this);
1283 if (IsInLambda) {
1284 // We're in a lambda; figure out the captures.
1288 // If the lambda captures the object referred to by '*this' - either by
1289 // value or by reference, make sure CXXThisValue points to the correct
1290 // object.
1291
1292 // Get the lvalue for the field (which is a copy of the enclosing object
1293 // or contains the address of the enclosing object).
1295 if (!LambdaThisCaptureField->getType()->isPointerType()) {
1296 // If the enclosing object was captured by value, just use its
1297 // address. Sign this pointer.
1298 CXXThisValue = ThisFieldLValue.getPointer(*this);
1299 } else {
1300 // Load the lvalue pointed to by the field, since '*this' was captured
1301 // by reference.
1302 CXXThisValue =
1303 EmitLoadOfLValue(ThisFieldLValue, SourceLocation()).getScalarVal();
1304 }
1305 }
1306 for (auto *FD : MD->getParent()->fields()) {
1307 if (FD->hasCapturedVLAType()) {
1308 auto *ExprArg = EmitLoadOfLValue(EmitLValueForLambdaField(FD),
1310 auto VAT = FD->getCapturedVLAType();
1311 VLASizeMap[VAT->getSizeExpr()] = ExprArg;
1312 }
1313 }
1314 } else if (MD->isImplicitObjectMemberFunction()) {
1315 // Not in a lambda; just use 'this' from the method.
1316 // FIXME: Should we generate a new load for each use of 'this'? The
1317 // fast register allocator would be happier...
1318 CXXThisValue = CXXABIThisValue;
1319 }
1320
1321 // Check the 'this' pointer once per function, if it's available.
1322 if (CXXABIThisValue) {
1323 SanitizerSet SkippedChecks;
1324 SkippedChecks.set(SanitizerKind::ObjectSize, true);
1325 QualType ThisTy = MD->getThisType();
1326
1327 // If this is the call operator of a lambda with no captures, it
1328 // may have a static invoker function, which may call this operator with
1329 // a null 'this' pointer.
1331 SkippedChecks.set(SanitizerKind::Null, true);
1332
1335 Loc, CXXABIThisValue, ThisTy, CXXABIThisAlignment, SkippedChecks);
1336 }
1337 }
1338
1339 // If any of the arguments have a variably modified type, make sure to
1340 // emit the type size, but only if the function is not naked. Naked functions
1341 // have no prolog to run this evaluation.
1342 if (!FD || !FD->hasAttr<NakedAttr>()) {
1343 for (const VarDecl *VD : Args) {
1344 // Dig out the type as written from ParmVarDecls; it's unclear whether
1345 // the standard (C99 6.9.1p10) requires this, but we're following the
1346 // precedent set by gcc.
1347 QualType Ty;
1348 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(VD))
1349 Ty = PVD->getOriginalType();
1350 else
1351 Ty = VD->getType();
1352
1353 if (Ty->isVariablyModifiedType())
1355 }
1356 }
1357 // Emit a location at the end of the prologue.
1358 if (CGDebugInfo *DI = getDebugInfo())
1359 DI->EmitLocation(Builder, StartLoc);
1360 // TODO: Do we need to handle this in two places like we do with
1361 // target-features/target-cpu?
1362 if (CurFuncDecl)
1363 if (const auto *VecWidth = CurFuncDecl->getAttr<MinVectorWidthAttr>())
1364 LargestVectorWidth = VecWidth->getVectorWidth();
1365
1366 if (CGM.shouldEmitConvergenceTokens())
1367 ConvergenceTokenStack.push_back(getOrEmitConvergenceEntryToken(CurFn));
1368}
1369
1373 if (const CompoundStmt *S = dyn_cast<CompoundStmt>(Body))
1375 else
1376 EmitStmt(Body);
1377}
1378
1379/// When instrumenting to collect profile data, the counts for some blocks
1380/// such as switch cases need to not include the fall-through counts, so
1381/// emit a branch around the instrumentation code. When not instrumenting,
1382/// this just calls EmitBlock().
1384 const Stmt *S) {
1385 llvm::BasicBlock *SkipCountBB = nullptr;
1386 // Do not skip over the instrumentation when single byte coverage mode is
1387 // enabled.
1388 if (HaveInsertPoint() && CGM.getCodeGenOpts().hasProfileClangInstr() &&
1390 // When instrumenting for profiling, the fallthrough to certain
1391 // statements needs to skip over the instrumentation code so that we
1392 // get an accurate count.
1393 SkipCountBB = createBasicBlock("skipcount");
1394 EmitBranch(SkipCountBB);
1395 }
1396 EmitBlock(BB);
1397 uint64_t CurrentCount = getCurrentProfileCount();
1400 if (SkipCountBB)
1401 EmitBlock(SkipCountBB);
1402}
1403
1404/// Tries to mark the given function nounwind based on the
1405/// non-existence of any throwing calls within it. We believe this is
1406/// lightweight enough to do at -O0.
1407static void TryMarkNoThrow(llvm::Function *F) {
1408 // LLVM treats 'nounwind' on a function as part of the type, so we
1409 // can't do this on functions that can be overwritten.
1410 if (F->isInterposable()) return;
1411
1412 for (llvm::BasicBlock &BB : *F)
1413 for (llvm::Instruction &I : BB)
1414 if (I.mayThrow())
1415 return;
1416
1417 F->setDoesNotThrow();
1418}
1419
1421 FunctionArgList &Args) {
1422 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
1423 QualType ResTy = FD->getReturnType();
1424
1425 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
1426 if (MD && MD->isImplicitObjectMemberFunction()) {
1427 if (CGM.getCXXABI().HasThisReturn(GD))
1428 ResTy = MD->getThisType();
1429 else if (CGM.getCXXABI().hasMostDerivedReturn(GD))
1430 ResTy = CGM.getContext().VoidPtrTy;
1431 CGM.getCXXABI().buildThisParam(*this, Args);
1432 }
1433
1434 // The base version of an inheriting constructor whose constructed base is a
1435 // virtual base is not passed any arguments (because it doesn't actually call
1436 // the inherited constructor).
1437 bool PassedParams = true;
1438 if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
1439 if (auto Inherited = CD->getInheritedConstructor())
1440 PassedParams =
1441 getTypes().inheritingCtorHasParams(Inherited, GD.getCtorType());
1442
1443 if (PassedParams) {
1444 for (auto *Param : FD->parameters()) {
1445 Args.push_back(Param);
1446 if (!Param->hasAttr<PassObjectSizeAttr>())
1447 continue;
1448
1450 getContext(), Param->getDeclContext(), Param->getLocation(),
1451 /*Id=*/nullptr, getContext().getSizeType(), ImplicitParamKind::Other);
1452 SizeArguments[Param] = Implicit;
1453 Args.push_back(Implicit);
1454 }
1455 }
1456
1457 if (MD && (isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD)))
1458 CGM.getCXXABI().addImplicitStructorParams(*this, ResTy, Args);
1459
1460 return ResTy;
1461}
1462
1463void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
1464 const CGFunctionInfo &FnInfo) {
1465 assert(Fn && "generating code for null Function");
1466 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
1467 CurGD = GD;
1468
1469 FunctionArgList Args;
1470 QualType ResTy = BuildFunctionArgList(GD, Args);
1471
1472 CGM.getTargetCodeGenInfo().checkFunctionABI(CGM, FD);
1473
1474 if (FD->isInlineBuiltinDeclaration()) {
1475 // When generating code for a builtin with an inline declaration, use a
1476 // mangled name to hold the actual body, while keeping an external
1477 // definition in case the function pointer is referenced somewhere.
1478 std::string FDInlineName = (Fn->getName() + ".inline").str();
1479 llvm::Module *M = Fn->getParent();
1480 llvm::Function *Clone = M->getFunction(FDInlineName);
1481 if (!Clone) {
1482 Clone = llvm::Function::Create(Fn->getFunctionType(),
1483 llvm::GlobalValue::InternalLinkage,
1484 Fn->getAddressSpace(), FDInlineName, M);
1485 Clone->addFnAttr(llvm::Attribute::AlwaysInline);
1486 }
1487 Fn->setLinkage(llvm::GlobalValue::ExternalLinkage);
1488 Fn = Clone;
1489 } else {
1490 // Detect the unusual situation where an inline version is shadowed by a
1491 // non-inline version. In that case we should pick the external one
1492 // everywhere. That's GCC behavior too. Unfortunately, I cannot find a way
1493 // to detect that situation before we reach codegen, so do some late
1494 // replacement.
1495 for (const FunctionDecl *PD = FD->getPreviousDecl(); PD;
1496 PD = PD->getPreviousDecl()) {
1497 if (LLVM_UNLIKELY(PD->isInlineBuiltinDeclaration())) {
1498 std::string FDInlineName = (Fn->getName() + ".inline").str();
1499 llvm::Module *M = Fn->getParent();
1500 if (llvm::Function *Clone = M->getFunction(FDInlineName)) {
1501 Clone->replaceAllUsesWith(Fn);
1502 Clone->eraseFromParent();
1503 }
1504 break;
1505 }
1506 }
1507 }
1508
1509 // Check if we should generate debug info for this function.
1510 if (FD->hasAttr<NoDebugAttr>()) {
1511 // Clear non-distinct debug info that was possibly attached to the function
1512 // due to an earlier declaration without the nodebug attribute
1513 Fn->setSubprogram(nullptr);
1514 // Disable debug info indefinitely for this function
1515 DebugInfo = nullptr;
1516 }
1517 // Finalize function debug info on exit.
1518 llvm::scope_exit Cleanup([this] {
1519 if (CGDebugInfo *DI = getDebugInfo())
1520 DI->completeFunction();
1521 });
1522
1523 // The function might not have a body if we're generating thunks for a
1524 // function declaration.
1525 SourceRange BodyRange;
1526 if (Stmt *Body = FD->getBody())
1527 BodyRange = Body->getSourceRange();
1528 else
1529 BodyRange = FD->getLocation();
1530 CurEHLocation = BodyRange.getEnd();
1531
1532 // Use the location of the start of the function to determine where
1533 // the function definition is located. By default use the location
1534 // of the declaration as the location for the subprogram. A function
1535 // may lack a declaration in the source code if it is created by code
1536 // gen. (examples: _GLOBAL__I_a, __cxx_global_array_dtor, thunk).
1537 SourceLocation Loc = FD->getLocation();
1538
1539 // If this is a function specialization then use the pattern body
1540 // as the location for the function.
1541 if (const FunctionDecl *SpecDecl = FD->getTemplateInstantiationPattern())
1542 if (SpecDecl->hasBody(SpecDecl))
1543 Loc = SpecDecl->getLocation();
1544
1545 Stmt *Body = FD->getBody();
1546
1547 if (Body) {
1548 // Coroutines always emit lifetime markers.
1549 if (isa<CoroutineBodyStmt>(Body))
1550 ShouldEmitLifetimeMarkers = true;
1551
1552 // Initialize helper which will detect jumps which can cause invalid
1553 // lifetime markers.
1554 if (ShouldEmitLifetimeMarkers)
1555 Bypasses.Init(CGM, Body);
1556 }
1557
1558 // Emit the standard function prologue.
1559 StartFunction(GD, ResTy, Fn, FnInfo, Args, Loc, BodyRange.getBegin());
1560
1561 // Save parameters for coroutine function.
1562 if (Body && isa_and_nonnull<CoroutineBodyStmt>(Body))
1563 llvm::append_range(FnArgs, FD->parameters());
1564
1565 // Ensure that the function adheres to the forward progress guarantee, which
1566 // is required by certain optimizations.
1567 // In C++11 and up, the attribute will be removed if the body contains a
1568 // trivial empty loop.
1570 CurFn->addFnAttr(llvm::Attribute::MustProgress);
1571
1572 // Generate the body of the function.
1573 PGO->assignRegionCounters(GD, CurFn);
1574 if (isa<CXXDestructorDecl>(FD))
1575 EmitDestructorBody(Args);
1576 else if (isa<CXXConstructorDecl>(FD))
1577 EmitConstructorBody(Args);
1578 else if (getLangOpts().CUDA &&
1579 !getLangOpts().CUDAIsDevice &&
1580 FD->hasAttr<CUDAGlobalAttr>())
1581 CGM.getCUDARuntime().emitDeviceStub(*this, Args);
1582 else if (isa<CXXMethodDecl>(FD) &&
1583 cast<CXXMethodDecl>(FD)->isLambdaStaticInvoker()) {
1584 // The lambda static invoker function is special, because it forwards or
1585 // clones the body of the function call operator (but is actually static).
1587 } else if (isa<CXXMethodDecl>(FD) &&
1589 !FnInfo.isDelegateCall() &&
1590 cast<CXXMethodDecl>(FD)->getParent()->getLambdaStaticInvoker() &&
1591 hasInAllocaArg(cast<CXXMethodDecl>(FD))) {
1592 // If emitting a lambda with static invoker on X86 Windows, change
1593 // the call operator body.
1594 // Make sure that this is a call operator with an inalloca arg and check
1595 // for delegate call to make sure this is the original call op and not the
1596 // new forwarding function for the static invoker.
1598 } else if (FD->isDefaulted() && isa<CXXMethodDecl>(FD) &&
1599 (cast<CXXMethodDecl>(FD)->isCopyAssignmentOperator() ||
1600 cast<CXXMethodDecl>(FD)->isMoveAssignmentOperator())) {
1601 // Implicit copy-assignment gets the same special treatment as implicit
1602 // copy-constructors.
1604 } else if (DeviceKernelAttr::isOpenCLSpelling(
1605 FD->getAttr<DeviceKernelAttr>()) &&
1607 CallArgList CallArgs;
1608 for (unsigned i = 0; i < Args.size(); ++i) {
1609 Address ArgAddr = GetAddrOfLocalVar(Args[i]);
1610 QualType ArgQualType = Args[i]->getType();
1611 RValue ArgRValue = convertTempToRValue(ArgAddr, ArgQualType, Loc);
1612 CallArgs.add(ArgRValue, ArgQualType);
1613 }
1615 const FunctionType *FT = cast<FunctionType>(FD->getType());
1616 CGM.getTargetCodeGenInfo().setOCLKernelStubCallingConvention(FT);
1617 const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeFreeFunctionCall(
1618 CallArgs, FT, /*ChainCall=*/false);
1619 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FnInfo);
1620 llvm::Constant *GDStubFunctionPointer =
1621 CGM.getRawFunctionPointer(GDStub, FTy);
1622 CGCallee GDStubCallee = CGCallee::forDirect(GDStubFunctionPointer, GDStub);
1623 EmitCall(FnInfo, GDStubCallee, ReturnValueSlot(), CallArgs, nullptr, false,
1624 Loc);
1625 } else if (Body) {
1626 EmitFunctionBody(Body);
1627 } else
1628 llvm_unreachable("no definition for emitted function");
1629
1630 // C++11 [stmt.return]p2:
1631 // Flowing off the end of a function [...] results in undefined behavior in
1632 // a value-returning function.
1633 // C11 6.9.1p12:
1634 // If the '}' that terminates a function is reached, and the value of the
1635 // function call is used by the caller, the behavior is undefined.
1637 !FD->getReturnType()->isVoidType() && Builder.GetInsertBlock()) {
1638 bool ShouldEmitUnreachable =
1639 CGM.getCodeGenOpts().StrictReturn ||
1640 !CGM.MayDropFunctionReturn(FD->getASTContext(), FD->getReturnType());
1641 if (SanOpts.has(SanitizerKind::Return)) {
1642 auto CheckOrdinal = SanitizerKind::SO_Return;
1643 auto CheckHandler = SanitizerHandler::MissingReturn;
1644 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
1645 llvm::Value *IsFalse = Builder.getFalse();
1646 EmitCheck(std::make_pair(IsFalse, CheckOrdinal), CheckHandler,
1648 } else if (ShouldEmitUnreachable) {
1649 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
1650 EmitTrapCall(llvm::Intrinsic::trap);
1651 }
1652 if (SanOpts.has(SanitizerKind::Return) || ShouldEmitUnreachable) {
1653 Builder.CreateUnreachable();
1654 Builder.ClearInsertionPoint();
1655 }
1656 }
1657
1658 // Emit the standard function epilogue.
1659 FinishFunction(BodyRange.getEnd());
1660
1661 PGO->verifyCounterMap();
1662
1663 // If we haven't marked the function nothrow through other means, do
1664 // a quick pass now to see if we can.
1665 if (!CurFn->doesNotThrow())
1667}
1668
1669/// ContainsLabel - Return true if the statement contains a label in it. If
1670/// this statement is not executed normally, it not containing a label means
1671/// that we can just remove the code.
1672bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) {
1673 // Null statement, not a label!
1674 if (!S) return false;
1675
1676 // If this is a label, we have to emit the code, consider something like:
1677 // if (0) { ... foo: bar(); } goto foo;
1678 //
1679 // TODO: If anyone cared, we could track __label__'s, since we know that you
1680 // can't jump to one from outside their declared region.
1681 if (isa<LabelStmt>(S))
1682 return true;
1683
1684 // If this is a case/default statement, and we haven't seen a switch, we have
1685 // to emit the code.
1686 if (isa<SwitchCase>(S) && !IgnoreCaseStmts)
1687 return true;
1688
1689 // If this is a switch statement, we want to ignore cases below it.
1690 if (isa<SwitchStmt>(S))
1691 IgnoreCaseStmts = true;
1692
1693 // Scan subexpressions for verboten labels.
1694 for (const Stmt *SubStmt : S->children())
1695 if (ContainsLabel(SubStmt, IgnoreCaseStmts))
1696 return true;
1697
1698 return false;
1699}
1700
1701/// containsBreak - Return true if the statement contains a break out of it.
1702/// If the statement (recursively) contains a switch or loop with a break
1703/// inside of it, this is fine.
1705 // Null statement, not a label!
1706 if (!S) return false;
1707
1708 // If this is a switch or loop that defines its own break scope, then we can
1709 // include it and anything inside of it.
1710 if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) || isa<DoStmt>(S) ||
1711 isa<ForStmt>(S))
1712 return false;
1713
1714 if (isa<BreakStmt>(S))
1715 return true;
1716
1717 // Scan subexpressions for verboten breaks.
1718 for (const Stmt *SubStmt : S->children())
1719 if (containsBreak(SubStmt))
1720 return true;
1721
1722 return false;
1723}
1724
1726 if (!S) return false;
1727
1728 // Some statement kinds add a scope and thus never add a decl to the current
1729 // scope. Note, this list is longer than the list of statements that might
1730 // have an unscoped decl nested within them, but this way is conservatively
1731 // correct even if more statement kinds are added.
1732 if (isa<IfStmt>(S) || isa<SwitchStmt>(S) || isa<WhileStmt>(S) ||
1736 return false;
1737
1738 if (isa<DeclStmt>(S))
1739 return true;
1740
1741 for (const Stmt *SubStmt : S->children())
1742 if (mightAddDeclToScope(SubStmt))
1743 return true;
1744
1745 return false;
1746}
1747
1748/// ConstantFoldsToSimpleInteger - If the specified expression does not fold
1749/// to a constant, or if it does but contains a label, return false. If it
1750/// constant folds return true and set the boolean result in Result.
1752 bool &ResultBool,
1753 bool AllowLabels) {
1754 // If MC/DC is enabled, disable folding so that we can instrument all
1755 // conditions to yield complete test vectors. We still keep track of
1756 // folded conditions during region mapping and visualization.
1757 if (!AllowLabels && CGM.getCodeGenOpts().hasProfileClangInstr() &&
1758 CGM.getCodeGenOpts().MCDCCoverage)
1759 return false;
1760
1761 llvm::APSInt ResultInt;
1762 if (!ConstantFoldsToSimpleInteger(Cond, ResultInt, AllowLabels))
1763 return false;
1764
1765 ResultBool = ResultInt.getBoolValue();
1766 return true;
1767}
1768
1769/// ConstantFoldsToSimpleInteger - If the specified expression does not fold
1770/// to a constant, or if it does but contains a label, return false. If it
1771/// constant folds return true and set the folded value.
1773 llvm::APSInt &ResultInt,
1774 bool AllowLabels) {
1775 // FIXME: Rename and handle conversion of other evaluatable things
1776 // to bool.
1778 if (!Cond->EvaluateAsInt(Result, getContext()))
1779 return false; // Not foldable, not integer or not fully evaluatable.
1780
1781 llvm::APSInt Int = Result.Val.getInt();
1782 if (!AllowLabels && CodeGenFunction::ContainsLabel(Cond))
1783 return false; // Contains a label.
1784
1785 PGO->markStmtMaybeUsed(Cond);
1786 ResultInt = Int;
1787 return true;
1788}
1789
1790/// Strip parentheses and simplistic logical-NOT operators.
1792 while (const UnaryOperator *Op = dyn_cast<UnaryOperator>(C->IgnoreParens())) {
1793 if (Op->getOpcode() != UO_LNot)
1794 break;
1795 C = Op->getSubExpr();
1796 }
1797 return C->IgnoreParens();
1798}
1799
1800/// Determine whether the given condition is an instrumentable condition
1801/// (i.e. no "&&" or "||").
1803 const BinaryOperator *BOp = dyn_cast<BinaryOperator>(stripCond(C));
1804 return (!BOp || !BOp->isLogicalOp());
1805}
1806
1807/// EmitBranchToCounterBlock - Emit a conditional branch to a new block that
1808/// increments a profile counter based on the semantics of the given logical
1809/// operator opcode. This is used to instrument branch condition coverage for
1810/// logical operators.
1812 const Expr *Cond, BinaryOperator::Opcode LOp, llvm::BasicBlock *TrueBlock,
1813 llvm::BasicBlock *FalseBlock, uint64_t TrueCount /* = 0 */,
1814 Stmt::Likelihood LH /* =None */, const Expr *CntrIdx /* = nullptr */) {
1815 // If not instrumenting, just emit a branch.
1816 bool InstrumentRegions = CGM.getCodeGenOpts().hasProfileClangInstr();
1817 if (!InstrumentRegions || !isInstrumentedCondition(Cond))
1818 return EmitBranchOnBoolExpr(Cond, TrueBlock, FalseBlock, TrueCount, LH);
1819
1820 const Stmt *CntrStmt = (CntrIdx ? CntrIdx : Cond);
1821
1822 llvm::BasicBlock *ThenBlock = nullptr;
1823 llvm::BasicBlock *ElseBlock = nullptr;
1824 llvm::BasicBlock *NextBlock = nullptr;
1825
1826 // Create the block we'll use to increment the appropriate counter.
1827 llvm::BasicBlock *CounterIncrBlock = createBasicBlock("lop.rhscnt");
1828
1829 // Set block pointers according to Logical-AND (BO_LAnd) semantics. This
1830 // means we need to evaluate the condition and increment the counter on TRUE:
1831 //
1832 // if (Cond)
1833 // goto CounterIncrBlock;
1834 // else
1835 // goto FalseBlock;
1836 //
1837 // CounterIncrBlock:
1838 // Counter++;
1839 // goto TrueBlock;
1840
1841 if (LOp == BO_LAnd) {
1842 ThenBlock = CounterIncrBlock;
1843 ElseBlock = FalseBlock;
1844 NextBlock = TrueBlock;
1845 }
1846
1847 // Set block pointers according to Logical-OR (BO_LOr) semantics. This means
1848 // we need to evaluate the condition and increment the counter on FALSE:
1849 //
1850 // if (Cond)
1851 // goto TrueBlock;
1852 // else
1853 // goto CounterIncrBlock;
1854 //
1855 // CounterIncrBlock:
1856 // Counter++;
1857 // goto FalseBlock;
1858
1859 else if (LOp == BO_LOr) {
1860 ThenBlock = TrueBlock;
1861 ElseBlock = CounterIncrBlock;
1862 NextBlock = FalseBlock;
1863 } else {
1864 llvm_unreachable("Expected Opcode must be that of a Logical Operator");
1865 }
1866
1867 // Emit Branch based on condition.
1868 EmitBranchOnBoolExpr(Cond, ThenBlock, ElseBlock, TrueCount, LH);
1869
1870 // Emit the block containing the counter increment(s).
1871 EmitBlock(CounterIncrBlock);
1872
1873 // Increment corresponding counter; if index not provided, use Cond as index.
1874 incrementProfileCounter(CntrStmt);
1875
1876 // Go to the next block.
1877 EmitBranch(NextBlock);
1878}
1879
1880/// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if
1881/// statement) to the specified blocks. Based on the condition, this might try
1882/// to simplify the codegen of the conditional based on the branch.
1883/// \param LH The value of the likelihood attribute on the True branch.
1884/// \param ConditionalOp Used by MC/DC code coverage to track the result of the
1885/// ConditionalOperator (ternary) through a recursive call for the operator's
1886/// LHS and RHS nodes.
1888 const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock,
1889 uint64_t TrueCount, Stmt::Likelihood LH, const Expr *ConditionalOp,
1890 const VarDecl *ConditionalDecl) {
1891 Cond = Cond->IgnoreParens();
1892
1893 if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) {
1894 // Handle X && Y in a condition.
1895 if (CondBOp->getOpcode() == BO_LAnd) {
1896 MCDCLogOpStack.push_back(CondBOp);
1897
1898 // If we have "1 && X", simplify the code. "0 && X" would have constant
1899 // folded if the case was simple enough.
1900 bool ConstantBool = false;
1901 if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
1902 ConstantBool) {
1903 // br(1 && X) -> br(X).
1904 incrementProfileCounter(CondBOp);
1905 EmitBranchToCounterBlock(CondBOp->getRHS(), BO_LAnd, TrueBlock,
1906 FalseBlock, TrueCount, LH);
1907 MCDCLogOpStack.pop_back();
1908 return;
1909 }
1910
1911 // If we have "X && 1", simplify the code to use an uncond branch.
1912 // "X && 0" would have been constant folded to 0.
1913 if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
1914 ConstantBool) {
1915 // br(X && 1) -> br(X).
1916 EmitBranchToCounterBlock(CondBOp->getLHS(), BO_LAnd, TrueBlock,
1917 FalseBlock, TrueCount, LH, CondBOp);
1918 MCDCLogOpStack.pop_back();
1919 return;
1920 }
1921
1922 // Emit the LHS as a conditional. If the LHS conditional is false, we
1923 // want to jump to the FalseBlock.
1924 llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true");
1925 // The counter tells us how often we evaluate RHS, and all of TrueCount
1926 // can be propagated to that branch.
1927 uint64_t RHSCount = getProfileCount(CondBOp->getRHS());
1928
1929 ConditionalEvaluation eval(*this);
1930 {
1931 ApplyDebugLocation DL(*this, Cond);
1932 // Propagate the likelihood attribute like __builtin_expect
1933 // __builtin_expect(X && Y, 1) -> X and Y are likely
1934 // __builtin_expect(X && Y, 0) -> only Y is unlikely
1935 EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock, RHSCount,
1936 LH == Stmt::LH_Unlikely ? Stmt::LH_None : LH);
1937 EmitBlock(LHSTrue);
1938 }
1939
1940 incrementProfileCounter(CondBOp);
1941 setCurrentProfileCount(getProfileCount(CondBOp->getRHS()));
1942
1943 // Any temporaries created here are conditional.
1944 eval.begin(*this);
1945 EmitBranchToCounterBlock(CondBOp->getRHS(), BO_LAnd, TrueBlock,
1946 FalseBlock, TrueCount, LH);
1947 eval.end(*this);
1948 MCDCLogOpStack.pop_back();
1949 return;
1950 }
1951
1952 if (CondBOp->getOpcode() == BO_LOr) {
1953 MCDCLogOpStack.push_back(CondBOp);
1954
1955 // If we have "0 || X", simplify the code. "1 || X" would have constant
1956 // folded if the case was simple enough.
1957 bool ConstantBool = false;
1958 if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
1959 !ConstantBool) {
1960 // br(0 || X) -> br(X).
1961 incrementProfileCounter(CondBOp);
1962 EmitBranchToCounterBlock(CondBOp->getRHS(), BO_LOr, TrueBlock,
1963 FalseBlock, TrueCount, LH);
1964 MCDCLogOpStack.pop_back();
1965 return;
1966 }
1967
1968 // If we have "X || 0", simplify the code to use an uncond branch.
1969 // "X || 1" would have been constant folded to 1.
1970 if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
1971 !ConstantBool) {
1972 // br(X || 0) -> br(X).
1973 EmitBranchToCounterBlock(CondBOp->getLHS(), BO_LOr, TrueBlock,
1974 FalseBlock, TrueCount, LH, CondBOp);
1975 MCDCLogOpStack.pop_back();
1976 return;
1977 }
1978 // Emit the LHS as a conditional. If the LHS conditional is true, we
1979 // want to jump to the TrueBlock.
1980 llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false");
1981 // We have the count for entry to the RHS and for the whole expression
1982 // being true, so we can divy up True count between the short circuit and
1983 // the RHS.
1984 uint64_t LHSCount =
1985 getCurrentProfileCount() - getProfileCount(CondBOp->getRHS());
1986 uint64_t RHSCount = TrueCount - LHSCount;
1987
1988 ConditionalEvaluation eval(*this);
1989 {
1990 // Propagate the likelihood attribute like __builtin_expect
1991 // __builtin_expect(X || Y, 1) -> only Y is likely
1992 // __builtin_expect(X || Y, 0) -> both X and Y are unlikely
1993 ApplyDebugLocation DL(*this, Cond);
1994 EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse, LHSCount,
1995 LH == Stmt::LH_Likely ? Stmt::LH_None : LH);
1996 EmitBlock(LHSFalse);
1997 }
1998
1999 incrementProfileCounter(CondBOp);
2000 setCurrentProfileCount(getProfileCount(CondBOp->getRHS()));
2001
2002 // Any temporaries created here are conditional.
2003 eval.begin(*this);
2004 EmitBranchToCounterBlock(CondBOp->getRHS(), BO_LOr, TrueBlock, FalseBlock,
2005 RHSCount, LH);
2006
2007 eval.end(*this);
2008 MCDCLogOpStack.pop_back();
2009 return;
2010 }
2011 }
2012
2013 if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) {
2014 // br(!x, t, f) -> br(x, f, t)
2015 // Avoid doing this optimization when instrumenting a condition for MC/DC.
2016 // LNot is taken as part of the condition for simplicity, and changing its
2017 // sense negatively impacts test vector tracking.
2018 bool MCDCCondition = CGM.getCodeGenOpts().hasProfileClangInstr() &&
2019 CGM.getCodeGenOpts().MCDCCoverage &&
2021 if (CondUOp->getOpcode() == UO_LNot && !MCDCCondition) {
2022 // Negate the count.
2023 uint64_t FalseCount = getCurrentProfileCount() - TrueCount;
2024 // The values of the enum are chosen to make this negation possible.
2025 LH = static_cast<Stmt::Likelihood>(-LH);
2026 // Negate the condition and swap the destination blocks.
2027 return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock,
2028 FalseCount, LH);
2029 }
2030 }
2031
2032 if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) {
2033 // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f))
2034 llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true");
2035 llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false");
2036
2037 // The ConditionalOperator itself has no likelihood information for its
2038 // true and false branches. This matches the behavior of __builtin_expect.
2039 ConditionalEvaluation cond(*this);
2040 EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock,
2042
2043 // When computing PGO branch weights, we only know the overall count for
2044 // the true block. This code is essentially doing tail duplication of the
2045 // naive code-gen, introducing new edges for which counts are not
2046 // available. Divide the counts proportionally between the LHS and RHS of
2047 // the conditional operator.
2048 uint64_t LHSScaledTrueCount = 0;
2049 if (TrueCount) {
2050 double LHSRatio =
2051 getProfileCount(CondOp) / (double)getCurrentProfileCount();
2052 LHSScaledTrueCount = TrueCount * LHSRatio;
2053 }
2054
2055 cond.begin(*this);
2056 EmitBlock(LHSBlock);
2058 {
2059 ApplyDebugLocation DL(*this, Cond);
2060 EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock,
2061 LHSScaledTrueCount, LH, CondOp);
2062 }
2063 cond.end(*this);
2064
2065 cond.begin(*this);
2066 EmitBlock(RHSBlock);
2067 EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock,
2068 TrueCount - LHSScaledTrueCount, LH, CondOp);
2069 cond.end(*this);
2070
2071 return;
2072 }
2073
2074 if (const CXXThrowExpr *Throw = dyn_cast<CXXThrowExpr>(Cond)) {
2075 // Conditional operator handling can give us a throw expression as a
2076 // condition for a case like:
2077 // br(c ? throw x : y, t, f) -> br(c, br(throw x, t, f), br(y, t, f)
2078 // Fold this to:
2079 // br(c, throw x, br(y, t, f))
2080 EmitCXXThrowExpr(Throw, /*KeepInsertionPoint*/false);
2081 return;
2082 }
2083
2084 // Emit the code with the fully general case.
2085 llvm::Value *CondV;
2086 {
2087 ApplyDebugLocation DL(*this, Cond);
2088 CondV = EvaluateExprAsBool(Cond);
2089 }
2090
2091 MaybeEmitDeferredVarDeclInit(ConditionalDecl);
2092
2093 // If not at the top of the logical operator nest, update MCDC temp with the
2094 // boolean result of the evaluated condition.
2095 if (!MCDCLogOpStack.empty()) {
2096 const Expr *MCDCBaseExpr = Cond;
2097 // When a nested ConditionalOperator (ternary) is encountered in a boolean
2098 // expression, MC/DC tracks the result of the ternary, and this is tied to
2099 // the ConditionalOperator expression and not the ternary's LHS or RHS. If
2100 // this is the case, the ConditionalOperator expression is passed through
2101 // the ConditionalOp parameter and then used as the MCDC base expression.
2102 if (ConditionalOp)
2103 MCDCBaseExpr = ConditionalOp;
2104
2105 maybeUpdateMCDCCondBitmap(MCDCBaseExpr, CondV);
2106 }
2107
2108 llvm::MDNode *Weights = nullptr;
2109 llvm::MDNode *Unpredictable = nullptr;
2110
2111 // If the branch has a condition wrapped by __builtin_unpredictable,
2112 // create metadata that specifies that the branch is unpredictable.
2113 // Don't bother if not optimizing because that metadata would not be used.
2114 auto *Call = dyn_cast<CallExpr>(Cond->IgnoreImpCasts());
2115 if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) {
2116 auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
2117 if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
2118 llvm::MDBuilder MDHelper(getLLVMContext());
2119 Unpredictable = MDHelper.createUnpredictable();
2120 }
2121 }
2122
2123 // If there is a Likelihood knowledge for the cond, lower it.
2124 // Note that if not optimizing this won't emit anything.
2125 llvm::Value *NewCondV = emitCondLikelihoodViaExpectIntrinsic(CondV, LH);
2126 if (CondV != NewCondV)
2127 CondV = NewCondV;
2128 else {
2129 // Otherwise, lower profile counts. Note that we do this even at -O0.
2130 uint64_t CurrentCount = std::max(getCurrentProfileCount(), TrueCount);
2131 Weights = createProfileWeights(TrueCount, CurrentCount - TrueCount);
2132 }
2133
2134 llvm::Instruction *BrInst = Builder.CreateCondBr(CondV, TrueBlock, FalseBlock,
2135 Weights, Unpredictable);
2136 addInstToNewSourceAtom(BrInst, CondV);
2137
2138 switch (HLSLControlFlowAttr) {
2139 case HLSLControlFlowHintAttr::Microsoft_branch:
2140 case HLSLControlFlowHintAttr::Microsoft_flatten: {
2141 llvm::MDBuilder MDHelper(CGM.getLLVMContext());
2142
2143 llvm::ConstantInt *BranchHintConstant =
2145 HLSLControlFlowHintAttr::Spelling::Microsoft_branch
2146 ? llvm::ConstantInt::get(CGM.Int32Ty, 1)
2147 : llvm::ConstantInt::get(CGM.Int32Ty, 2);
2148
2150 {MDHelper.createString("hlsl.controlflow.hint"),
2151 MDHelper.createConstant(BranchHintConstant)});
2152 BrInst->setMetadata("hlsl.controlflow.hint",
2153 llvm::MDNode::get(CGM.getLLVMContext(), Vals));
2154 break;
2155 }
2156 // This is required to avoid warnings during compilation
2157 case HLSLControlFlowHintAttr::SpellingNotCalculated:
2158 break;
2159 }
2160}
2161
2162llvm::Value *CodeGenFunction::EmitScalarOrConstFoldImmArg(unsigned ICEArguments,
2163 unsigned Idx,
2164 const CallExpr *E) {
2165 llvm::Value *Arg = nullptr;
2166 if ((ICEArguments & (1 << Idx)) == 0) {
2167 Arg = EmitScalarExpr(E->getArg(Idx));
2168 } else {
2169 // If this is required to be a constant, constant fold it so that we
2170 // know that the generated intrinsic gets a ConstantInt.
2171 std::optional<llvm::APSInt> Result =
2173 assert(Result && "Expected argument to be a constant");
2174 Arg = llvm::ConstantInt::get(getLLVMContext(), *Result);
2175 }
2176 return Arg;
2177}
2178
2179/// ErrorUnsupported - Print out an error that codegen doesn't support the
2180/// specified stmt yet.
2181void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type) {
2182 CGM.ErrorUnsupported(S, Type);
2183}
2184
2185/// emitNonZeroVLAInit - Emit the "zero" initialization of a
2186/// variable-length array whose elements have a non-zero bit-pattern.
2187///
2188/// \param baseType the inner-most element type of the array
2189/// \param src - a char* pointing to the bit-pattern for a single
2190/// base element of the array
2191/// \param sizeInChars - the total size of the VLA, in chars
2193 Address dest, Address src,
2194 llvm::Value *sizeInChars) {
2195 CGBuilderTy &Builder = CGF.Builder;
2196
2197 CharUnits baseSize = CGF.getContext().getTypeSizeInChars(baseType);
2198 llvm::Value *baseSizeInChars
2199 = llvm::ConstantInt::get(CGF.IntPtrTy, baseSize.getQuantity());
2200
2201 Address begin = dest.withElementType(CGF.Int8Ty);
2202 llvm::Value *end = Builder.CreateInBoundsGEP(begin.getElementType(),
2203 begin.emitRawPointer(CGF),
2204 sizeInChars, "vla.end");
2205
2206 llvm::BasicBlock *originBB = CGF.Builder.GetInsertBlock();
2207 llvm::BasicBlock *loopBB = CGF.createBasicBlock("vla-init.loop");
2208 llvm::BasicBlock *contBB = CGF.createBasicBlock("vla-init.cont");
2209
2210 // Make a loop over the VLA. C99 guarantees that the VLA element
2211 // count must be nonzero.
2212 CGF.EmitBlock(loopBB);
2213
2214 llvm::PHINode *cur = Builder.CreatePHI(begin.getType(), 2, "vla.cur");
2215 cur->addIncoming(begin.emitRawPointer(CGF), originBB);
2216
2217 CharUnits curAlign =
2218 dest.getAlignment().alignmentOfArrayElement(baseSize);
2219
2220 // memcpy the individual element bit-pattern.
2221 Builder.CreateMemCpy(Address(cur, CGF.Int8Ty, curAlign), src, baseSizeInChars,
2222 /*volatile*/ false);
2223
2224 // Go to the next element.
2225 llvm::Value *next =
2226 Builder.CreateInBoundsGEP(CGF.Int8Ty, cur, baseSizeInChars, "vla.next");
2227
2228 // Leave if that's the end of the VLA.
2229 llvm::Value *done = Builder.CreateICmpEQ(next, end, "vla-init.isdone");
2230 Builder.CreateCondBr(done, contBB, loopBB);
2231 cur->addIncoming(next, loopBB);
2232
2233 CGF.EmitBlock(contBB);
2234}
2235
2236void
2238 // Ignore empty classes in C++.
2239 if (getLangOpts().CPlusPlus)
2240 if (const auto *RD = Ty->getAsCXXRecordDecl(); RD && RD->isEmpty())
2241 return;
2242
2243 if (DestPtr.getElementType() != Int8Ty)
2244 DestPtr = DestPtr.withElementType(Int8Ty);
2245
2246 // Get size and alignment info for this aggregate.
2248
2249 llvm::Value *SizeVal;
2250 const VariableArrayType *vla;
2251
2252 // Don't bother emitting a zero-byte memset.
2253 if (size.isZero()) {
2254 // But note that getTypeInfo returns 0 for a VLA.
2255 if (const VariableArrayType *vlaType =
2256 dyn_cast_or_null<VariableArrayType>(
2257 getContext().getAsArrayType(Ty))) {
2258 auto VlaSize = getVLASize(vlaType);
2259 SizeVal = VlaSize.NumElts;
2260 CharUnits eltSize = getContext().getTypeSizeInChars(VlaSize.Type);
2261 if (!eltSize.isOne())
2262 SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(eltSize));
2263 vla = vlaType;
2264 } else {
2265 return;
2266 }
2267 } else {
2268 SizeVal = CGM.getSize(size);
2269 vla = nullptr;
2270 }
2271
2272 // If the type contains a pointer to data member we can't memset it to zero.
2273 // Instead, create a null constant and copy it to the destination.
2274 // TODO: there are other patterns besides zero that we can usefully memset,
2275 // like -1, which happens to be the pattern used by member-pointers.
2276 if (!CGM.getTypes().isZeroInitializable(Ty)) {
2277 // For a VLA, emit a single element, then splat that over the VLA.
2278 if (vla) Ty = getContext().getBaseElementType(vla);
2279
2280 llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty);
2281
2282 llvm::GlobalVariable *NullVariable =
2283 new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(),
2284 /*isConstant=*/true,
2285 llvm::GlobalVariable::PrivateLinkage,
2286 NullConstant, Twine());
2287 CharUnits NullAlign = DestPtr.getAlignment();
2288 NullVariable->setAlignment(NullAlign.getAsAlign());
2289 Address SrcPtr(NullVariable, Builder.getInt8Ty(), NullAlign);
2290
2291 if (vla) return emitNonZeroVLAInit(*this, Ty, DestPtr, SrcPtr, SizeVal);
2292
2293 // Get and call the appropriate llvm.memcpy overload.
2294 Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, false);
2295 return;
2296 }
2297
2298 // Otherwise, just memset the whole thing to zero. This is legal
2299 // because in LLVM, all default initializers (other than the ones we just
2300 // handled above) are guaranteed to have a bit pattern of all zeros.
2301 Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal, false);
2302}
2303
2304llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelDecl *L) {
2305 // Make sure that there is a block for the indirect goto.
2306 if (!IndirectBranch)
2308
2309 llvm::BasicBlock *BB = getJumpDestForLabel(L).getBlock();
2310
2311 // Make sure the indirect branch includes all of the address-taken blocks.
2312 IndirectBranch->addDestination(BB);
2313 return llvm::BlockAddress::get(CurFn->getType(), BB);
2314}
2315
2317 // If we already made the indirect branch for indirect goto, return its block.
2318 if (IndirectBranch) return IndirectBranch->getParent();
2319
2320 CGBuilderTy TmpBuilder(*this, createBasicBlock("indirectgoto"));
2321
2322 // Create the PHI node that indirect gotos will add entries to.
2323 llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, 0,
2324 "indirect.goto.dest");
2325
2326 // Create the indirect branch instruction.
2327 IndirectBranch = TmpBuilder.CreateIndirectBr(DestVal);
2328 return IndirectBranch->getParent();
2329}
2330
2331/// Computes the length of an array in elements, as well as the base
2332/// element type and a properly-typed first element pointer.
2333llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType,
2334 QualType &baseType,
2335 Address &addr) {
2336 const ArrayType *arrayType = origArrayType;
2337
2338 // If it's a VLA, we have to load the stored size. Note that
2339 // this is the size of the VLA in bytes, not its size in elements.
2340 llvm::Value *numVLAElements = nullptr;
2343
2344 // Walk into all VLAs. This doesn't require changes to addr,
2345 // which has type T* where T is the first non-VLA element type.
2346 do {
2347 QualType elementType = arrayType->getElementType();
2348 arrayType = getContext().getAsArrayType(elementType);
2349
2350 // If we only have VLA components, 'addr' requires no adjustment.
2351 if (!arrayType) {
2352 baseType = elementType;
2353 return numVLAElements;
2354 }
2356
2357 // We get out here only if we find a constant array type
2358 // inside the VLA.
2359 }
2360
2361 // We have some number of constant-length arrays, so addr should
2362 // have LLVM type [M x [N x [...]]]*. Build a GEP that walks
2363 // down to the first element of addr.
2365
2366 // GEP down to the array type.
2367 llvm::ConstantInt *zero = Builder.getInt32(0);
2368 gepIndices.push_back(zero);
2369
2370 uint64_t countFromCLAs = 1;
2371 QualType eltType;
2372
2373 llvm::ArrayType *llvmArrayType =
2374 dyn_cast<llvm::ArrayType>(addr.getElementType());
2375 while (llvmArrayType) {
2377 assert(cast<ConstantArrayType>(arrayType)->getZExtSize() ==
2378 llvmArrayType->getNumElements());
2379
2380 gepIndices.push_back(zero);
2381 countFromCLAs *= llvmArrayType->getNumElements();
2382 eltType = arrayType->getElementType();
2383
2384 llvmArrayType =
2385 dyn_cast<llvm::ArrayType>(llvmArrayType->getElementType());
2386 arrayType = getContext().getAsArrayType(arrayType->getElementType());
2387 assert((!llvmArrayType || arrayType) &&
2388 "LLVM and Clang types are out-of-synch");
2389 }
2390
2391 if (arrayType) {
2392 // From this point onwards, the Clang array type has been emitted
2393 // as some other type (probably a packed struct). Compute the array
2394 // size, and just emit the 'begin' expression as a bitcast.
2395 while (arrayType) {
2396 countFromCLAs *= cast<ConstantArrayType>(arrayType)->getZExtSize();
2397 eltType = arrayType->getElementType();
2398 arrayType = getContext().getAsArrayType(eltType);
2399 }
2400
2401 llvm::Type *baseType = ConvertType(eltType);
2402 addr = addr.withElementType(baseType);
2403 } else {
2404 // Create the actual GEP.
2405 addr = Address(Builder.CreateInBoundsGEP(addr.getElementType(),
2406 addr.emitRawPointer(*this),
2407 gepIndices, "array.begin"),
2408 ConvertTypeForMem(eltType), addr.getAlignment());
2409 }
2410
2411 baseType = eltType;
2412
2413 llvm::Value *numElements
2414 = llvm::ConstantInt::get(SizeTy, countFromCLAs);
2415
2416 // If we had any VLA dimensions, factor them in.
2417 if (numVLAElements)
2418 numElements = Builder.CreateNUWMul(numVLAElements, numElements);
2419
2420 return numElements;
2421}
2422
2425 assert(vla && "type was not a variable array type!");
2426 return getVLASize(vla);
2427}
2428
2431 // The number of elements so far; always size_t.
2432 llvm::Value *numElements = nullptr;
2433
2434 QualType elementType;
2435 do {
2436 elementType = type->getElementType();
2437 llvm::Value *vlaSize = VLASizeMap[type->getSizeExpr()];
2438 assert(vlaSize && "no size for VLA!");
2439 assert(vlaSize->getType() == SizeTy);
2440
2441 if (!numElements) {
2442 numElements = vlaSize;
2443 } else {
2444 // It's undefined behavior if this wraps around, so mark it that way.
2445 // FIXME: Teach -fsanitize=undefined to trap this.
2446 numElements = Builder.CreateNUWMul(numElements, vlaSize);
2447 }
2448 } while ((type = getContext().getAsVariableArrayType(elementType)));
2449
2450 return { numElements, elementType };
2451}
2452
2456 assert(vla && "type was not a variable array type!");
2457 return getVLAElements1D(vla);
2458}
2459
2462 llvm::Value *VlaSize = VLASizeMap[Vla->getSizeExpr()];
2463 assert(VlaSize && "no size for VLA!");
2464 assert(VlaSize->getType() == SizeTy);
2465 return { VlaSize, Vla->getElementType() };
2466}
2467
2469 assert(type->isVariablyModifiedType() &&
2470 "Must pass variably modified type to EmitVLASizes!");
2471
2473
2474 // We're going to walk down into the type and look for VLA
2475 // expressions.
2476 do {
2477 assert(type->isVariablyModifiedType());
2478
2479 const Type *ty = type.getTypePtr();
2480 switch (ty->getTypeClass()) {
2481
2482#define TYPE(Class, Base)
2483#define ABSTRACT_TYPE(Class, Base)
2484#define NON_CANONICAL_TYPE(Class, Base)
2485#define DEPENDENT_TYPE(Class, Base) case Type::Class:
2486#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base)
2487#include "clang/AST/TypeNodes.inc"
2488 llvm_unreachable("unexpected dependent type!");
2489
2490 // These types are never variably-modified.
2491 case Type::Builtin:
2492 case Type::Complex:
2493 case Type::Vector:
2494 case Type::ExtVector:
2495 case Type::ConstantMatrix:
2496 case Type::Record:
2497 case Type::Enum:
2498 case Type::Using:
2499 case Type::TemplateSpecialization:
2500 case Type::ObjCTypeParam:
2501 case Type::ObjCObject:
2502 case Type::ObjCInterface:
2503 case Type::ObjCObjectPointer:
2504 case Type::BitInt:
2505 case Type::HLSLInlineSpirv:
2506 case Type::PredefinedSugar:
2507 llvm_unreachable("type class is never variably-modified!");
2508
2509 case Type::Adjusted:
2510 type = cast<AdjustedType>(ty)->getAdjustedType();
2511 break;
2512
2513 case Type::Decayed:
2514 type = cast<DecayedType>(ty)->getPointeeType();
2515 break;
2516
2517 case Type::Pointer:
2518 type = cast<PointerType>(ty)->getPointeeType();
2519 break;
2520
2521 case Type::BlockPointer:
2522 type = cast<BlockPointerType>(ty)->getPointeeType();
2523 break;
2524
2525 case Type::LValueReference:
2526 case Type::RValueReference:
2527 type = cast<ReferenceType>(ty)->getPointeeType();
2528 break;
2529
2530 case Type::MemberPointer:
2531 type = cast<MemberPointerType>(ty)->getPointeeType();
2532 break;
2533
2534 case Type::ArrayParameter:
2535 case Type::ConstantArray:
2536 case Type::IncompleteArray:
2537 // Losing element qualification here is fine.
2538 type = cast<ArrayType>(ty)->getElementType();
2539 break;
2540
2541 case Type::VariableArray: {
2542 // Losing element qualification here is fine.
2544
2545 // Unknown size indication requires no size computation.
2546 // Otherwise, evaluate and record it.
2547 if (const Expr *sizeExpr = vat->getSizeExpr()) {
2548 // It's possible that we might have emitted this already,
2549 // e.g. with a typedef and a pointer to it.
2550 llvm::Value *&entry = VLASizeMap[sizeExpr];
2551 if (!entry) {
2552 llvm::Value *size = EmitScalarExpr(sizeExpr);
2553
2554 // C11 6.7.6.2p5:
2555 // If the size is an expression that is not an integer constant
2556 // expression [...] each time it is evaluated it shall have a value
2557 // greater than zero.
2558 if (SanOpts.has(SanitizerKind::VLABound)) {
2559 auto CheckOrdinal = SanitizerKind::SO_VLABound;
2560 auto CheckHandler = SanitizerHandler::VLABoundNotPositive;
2561 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
2562 llvm::Value *Zero = llvm::Constant::getNullValue(size->getType());
2563 clang::QualType SEType = sizeExpr->getType();
2564 llvm::Value *CheckCondition =
2565 SEType->isSignedIntegerType()
2566 ? Builder.CreateICmpSGT(size, Zero)
2567 : Builder.CreateICmpUGT(size, Zero);
2568 llvm::Constant *StaticArgs[] = {
2569 EmitCheckSourceLocation(sizeExpr->getBeginLoc()),
2570 EmitCheckTypeDescriptor(SEType)};
2571 EmitCheck(std::make_pair(CheckCondition, CheckOrdinal),
2572 CheckHandler, StaticArgs, size);
2573 }
2574
2575 // Always zexting here would be wrong if it weren't
2576 // undefined behavior to have a negative bound.
2577 // FIXME: What about when size's type is larger than size_t?
2578 entry = Builder.CreateIntCast(size, SizeTy, /*signed*/ false);
2579 }
2580 }
2581 type = vat->getElementType();
2582 break;
2583 }
2584
2585 case Type::FunctionProto:
2586 case Type::FunctionNoProto:
2587 type = cast<FunctionType>(ty)->getReturnType();
2588 break;
2589
2590 case Type::Paren:
2591 case Type::TypeOf:
2592 case Type::UnaryTransform:
2593 case Type::Attributed:
2594 case Type::BTFTagAttributed:
2595 case Type::HLSLAttributedResource:
2596 case Type::SubstTemplateTypeParm:
2597 case Type::MacroQualified:
2598 case Type::CountAttributed:
2599 // Keep walking after single level desugaring.
2600 type = type.getSingleStepDesugaredType(getContext());
2601 break;
2602
2603 case Type::Typedef:
2604 case Type::Decltype:
2605 case Type::Auto:
2606 case Type::DeducedTemplateSpecialization:
2607 case Type::PackIndexing:
2608 // Stop walking: nothing to do.
2609 return;
2610
2611 case Type::TypeOfExpr:
2612 // Stop walking: emit typeof expression.
2613 EmitIgnoredExpr(cast<TypeOfExprType>(ty)->getUnderlyingExpr());
2614 return;
2615
2616 case Type::Atomic:
2617 type = cast<AtomicType>(ty)->getValueType();
2618 break;
2619
2620 case Type::Pipe:
2621 type = cast<PipeType>(ty)->getElementType();
2622 break;
2623 }
2624 } while (type->isVariablyModifiedType());
2625}
2626
2628 if (getContext().getBuiltinVaListType()->isArrayType())
2629 return EmitPointerWithAlignment(E);
2630 return EmitLValue(E).getAddress();
2631}
2632
2636
2638 const APValue &Init) {
2639 assert(Init.hasValue() && "Invalid DeclRefExpr initializer!");
2640 if (CGDebugInfo *Dbg = getDebugInfo())
2641 if (CGM.getCodeGenOpts().hasReducedDebugInfo())
2642 Dbg->EmitGlobalVariable(E->getDecl(), Init);
2643}
2644
2647 // At the moment, the only aggressive peephole we do in IR gen
2648 // is trunc(zext) folding, but if we add more, we can easily
2649 // extend this protection.
2650
2651 if (!rvalue.isScalar()) return PeepholeProtection();
2652 llvm::Value *value = rvalue.getScalarVal();
2653 if (!isa<llvm::ZExtInst>(value)) return PeepholeProtection();
2654
2655 // Just make an extra bitcast.
2656 assert(HaveInsertPoint());
2657 llvm::Instruction *inst = new llvm::BitCastInst(value, value->getType(), "",
2658 Builder.GetInsertBlock());
2659
2660 PeepholeProtection protection;
2661 protection.Inst = inst;
2662 return protection;
2663}
2664
2666 if (!protection.Inst) return;
2667
2668 // In theory, we could try to duplicate the peepholes now, but whatever.
2669 protection.Inst->eraseFromParent();
2670}
2671
2673 QualType Ty, SourceLocation Loc,
2674 SourceLocation AssumptionLoc,
2675 llvm::Value *Alignment,
2676 llvm::Value *OffsetValue) {
2677 if (Alignment->getType() != IntPtrTy)
2678 Alignment =
2679 Builder.CreateIntCast(Alignment, IntPtrTy, false, "casted.align");
2680 if (OffsetValue && OffsetValue->getType() != IntPtrTy)
2681 OffsetValue =
2682 Builder.CreateIntCast(OffsetValue, IntPtrTy, true, "casted.offset");
2683 llvm::Value *TheCheck = nullptr;
2684 if (SanOpts.has(SanitizerKind::Alignment)) {
2685 llvm::Value *PtrIntValue =
2686 Builder.CreatePtrToInt(PtrValue, IntPtrTy, "ptrint");
2687
2688 if (OffsetValue) {
2689 bool IsOffsetZero = false;
2690 if (const auto *CI = dyn_cast<llvm::ConstantInt>(OffsetValue))
2691 IsOffsetZero = CI->isZero();
2692
2693 if (!IsOffsetZero)
2694 PtrIntValue = Builder.CreateSub(PtrIntValue, OffsetValue, "offsetptr");
2695 }
2696
2697 llvm::Value *Zero = llvm::ConstantInt::get(IntPtrTy, 0);
2698 llvm::Value *Mask =
2699 Builder.CreateSub(Alignment, llvm::ConstantInt::get(IntPtrTy, 1));
2700 llvm::Value *MaskedPtr = Builder.CreateAnd(PtrIntValue, Mask, "maskedptr");
2701 TheCheck = Builder.CreateICmpEQ(MaskedPtr, Zero, "maskcond");
2702 }
2703 llvm::Instruction *Assumption = Builder.CreateAlignmentAssumption(
2704 CGM.getDataLayout(), PtrValue, Alignment, OffsetValue);
2705
2706 if (!SanOpts.has(SanitizerKind::Alignment))
2707 return;
2708 emitAlignmentAssumptionCheck(PtrValue, Ty, Loc, AssumptionLoc, Alignment,
2709 OffsetValue, TheCheck, Assumption);
2710}
2711
2713 const Expr *E,
2714 SourceLocation AssumptionLoc,
2715 llvm::Value *Alignment,
2716 llvm::Value *OffsetValue) {
2717 QualType Ty = E->getType();
2718 SourceLocation Loc = E->getExprLoc();
2719
2720 emitAlignmentAssumption(PtrValue, Ty, Loc, AssumptionLoc, Alignment,
2721 OffsetValue);
2722}
2723
2724llvm::Value *CodeGenFunction::EmitAnnotationCall(llvm::Function *AnnotationFn,
2725 llvm::Value *AnnotatedVal,
2726 StringRef AnnotationStr,
2727 SourceLocation Location,
2728 const AnnotateAttr *Attr) {
2730 AnnotatedVal,
2731 CGM.EmitAnnotationString(AnnotationStr),
2732 CGM.EmitAnnotationUnit(Location),
2733 CGM.EmitAnnotationLineNo(Location),
2734 };
2735 if (Attr)
2736 Args.push_back(CGM.EmitAnnotationArgs(Attr));
2737 return Builder.CreateCall(AnnotationFn, Args);
2738}
2739
2740void CodeGenFunction::EmitVarAnnotations(const VarDecl *D, llvm::Value *V) {
2741 assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
2742 for (const auto *I : D->specific_attrs<AnnotateAttr>())
2743 EmitAnnotationCall(CGM.getIntrinsic(llvm::Intrinsic::var_annotation,
2744 {V->getType(), CGM.ConstGlobalsPtrTy}),
2745 V, I->getAnnotation(), D->getLocation(), I);
2746}
2747
2749 Address Addr) {
2750 assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
2751 llvm::Value *V = Addr.emitRawPointer(*this);
2752 llvm::Type *VTy = V->getType();
2753 auto *PTy = dyn_cast<llvm::PointerType>(VTy);
2754 unsigned AS = PTy ? PTy->getAddressSpace() : 0;
2755 llvm::PointerType *IntrinTy =
2756 llvm::PointerType::get(CGM.getLLVMContext(), AS);
2757 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::ptr_annotation,
2758 {IntrinTy, CGM.ConstGlobalsPtrTy});
2759
2760 for (const auto *I : D->specific_attrs<AnnotateAttr>()) {
2761 // FIXME Always emit the cast inst so we can differentiate between
2762 // annotation on the first field of a struct and annotation on the struct
2763 // itself.
2764 if (VTy != IntrinTy)
2765 V = Builder.CreateBitCast(V, IntrinTy);
2766 V = EmitAnnotationCall(F, V, I->getAnnotation(), D->getLocation(), I);
2767 V = Builder.CreateBitCast(V, VTy);
2768 }
2769
2770 return Address(V, Addr.getElementType(), Addr.getAlignment());
2771}
2772
2774
2776 : CGF(CGF) {
2777 assert(!CGF->IsSanitizerScope);
2778 CGF->IsSanitizerScope = true;
2779}
2780
2782 CGF->IsSanitizerScope = false;
2783}
2784
2785void CodeGenFunction::InsertHelper(llvm::Instruction *I,
2786 const llvm::Twine &Name,
2787 llvm::BasicBlock::iterator InsertPt) const {
2788 LoopStack.InsertHelper(I);
2789 if (IsSanitizerScope)
2790 I->setNoSanitizeMetadata();
2791}
2792
2794 llvm::Instruction *I, const llvm::Twine &Name,
2795 llvm::BasicBlock::iterator InsertPt) const {
2796 llvm::IRBuilderDefaultInserter::InsertHelper(I, Name, InsertPt);
2797 if (CGF)
2798 CGF->InsertHelper(I, Name, InsertPt);
2799}
2800
2801// Emits an error if we don't have a valid set of target features for the
2802// called function.
2804 const FunctionDecl *TargetDecl) {
2805 // SemaChecking cannot handle below x86 builtins because they have different
2806 // parameter ranges with different TargetAttribute of caller.
2807 if (CGM.getContext().getTargetInfo().getTriple().isX86()) {
2808 unsigned BuiltinID = TargetDecl->getBuiltinID();
2809 if (BuiltinID == X86::BI__builtin_ia32_cmpps ||
2810 BuiltinID == X86::BI__builtin_ia32_cmpss ||
2811 BuiltinID == X86::BI__builtin_ia32_cmppd ||
2812 BuiltinID == X86::BI__builtin_ia32_cmpsd) {
2813 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl);
2814 llvm::StringMap<bool> TargetFetureMap;
2815 CGM.getContext().getFunctionFeatureMap(TargetFetureMap, FD);
2816 llvm::APSInt Result =
2817 *(E->getArg(2)->getIntegerConstantExpr(CGM.getContext()));
2818 if (Result.getSExtValue() > 7 && !TargetFetureMap.lookup("avx"))
2819 CGM.getDiags().Report(E->getBeginLoc(), diag::err_builtin_needs_feature)
2820 << TargetDecl->getDeclName() << "avx";
2821 }
2822 }
2823 return checkTargetFeatures(E->getBeginLoc(), TargetDecl);
2824}
2825
2826// Emits an error if we don't have a valid set of target features for the
2827// called function.
2829 const FunctionDecl *TargetDecl) {
2830 // Early exit if this is an indirect call.
2831 if (!TargetDecl)
2832 return;
2833
2834 // Get the current enclosing function if it exists. If it doesn't
2835 // we can't check the target features anyhow.
2836 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl);
2837 if (!FD)
2838 return;
2839
2840 bool IsAlwaysInline = TargetDecl->hasAttr<AlwaysInlineAttr>();
2841 bool IsFlatten = FD && FD->hasAttr<FlattenAttr>();
2842
2843 // Grab the required features for the call. For a builtin this is listed in
2844 // the td file with the default cpu, for an always_inline function this is any
2845 // listed cpu and any listed features.
2846 unsigned BuiltinID = TargetDecl->getBuiltinID();
2847 std::string MissingFeature;
2848 llvm::StringMap<bool> CallerFeatureMap;
2849 CGM.getContext().getFunctionFeatureMap(CallerFeatureMap, FD);
2850 // When compiling in HipStdPar mode we have to be conservative in rejecting
2851 // target specific features in the FE, and defer the possible error to the
2852 // AcceleratorCodeSelection pass, wherein iff an unsupported target builtin is
2853 // referenced by an accelerator executable function, we emit an error.
2854 bool IsHipStdPar = getLangOpts().HIPStdPar && getLangOpts().CUDAIsDevice;
2855 if (BuiltinID) {
2856 StringRef FeatureList(CGM.getContext().BuiltinInfo.getRequiredFeatures(BuiltinID));
2858 FeatureList, CallerFeatureMap) && !IsHipStdPar) {
2859 CGM.getDiags().Report(Loc, diag::err_builtin_needs_feature)
2860 << TargetDecl->getDeclName()
2861 << FeatureList;
2862 }
2863 } else if (!TargetDecl->isMultiVersion() &&
2864 TargetDecl->hasAttr<TargetAttr>()) {
2865 // Get the required features for the callee.
2866
2867 const TargetAttr *TD = TargetDecl->getAttr<TargetAttr>();
2869 CGM.getContext().filterFunctionTargetAttrs(TD);
2870
2871 SmallVector<StringRef, 1> ReqFeatures;
2872 llvm::StringMap<bool> CalleeFeatureMap;
2873 CGM.getContext().getFunctionFeatureMap(CalleeFeatureMap, TargetDecl);
2874
2875 for (const auto &F : ParsedAttr.Features) {
2876 if (F[0] == '+' && CalleeFeatureMap.lookup(F.substr(1)))
2877 ReqFeatures.push_back(StringRef(F).substr(1));
2878 }
2879
2880 for (const auto &F : CalleeFeatureMap) {
2881 // Only positive features are "required".
2882 if (F.getValue())
2883 ReqFeatures.push_back(F.getKey());
2884 }
2885 if (!llvm::all_of(ReqFeatures,
2886 [&](StringRef Feature) {
2887 if (!CallerFeatureMap.lookup(Feature)) {
2888 MissingFeature = Feature.str();
2889 return false;
2890 }
2891 return true;
2892 }) &&
2893 !IsHipStdPar) {
2894 if (IsAlwaysInline)
2895 CGM.getDiags().Report(Loc, diag::err_function_needs_feature)
2896 << FD->getDeclName() << TargetDecl->getDeclName() << MissingFeature;
2897 else if (IsFlatten)
2898 CGM.getDiags().Report(Loc, diag::err_flatten_function_needs_feature)
2899 << FD->getDeclName() << TargetDecl->getDeclName() << MissingFeature;
2900 }
2901
2902 } else if (!FD->isMultiVersion() && FD->hasAttr<TargetAttr>()) {
2903 llvm::StringMap<bool> CalleeFeatureMap;
2904 CGM.getContext().getFunctionFeatureMap(CalleeFeatureMap, TargetDecl);
2905
2906 for (const auto &F : CalleeFeatureMap) {
2907 if (F.getValue() &&
2908 (!CallerFeatureMap.lookup(F.getKey()) ||
2909 !CallerFeatureMap.find(F.getKey())->getValue()) &&
2910 !IsHipStdPar) {
2911 if (IsAlwaysInline)
2912 CGM.getDiags().Report(Loc, diag::err_function_needs_feature)
2913 << FD->getDeclName() << TargetDecl->getDeclName() << F.getKey();
2914 else if (IsFlatten)
2915 CGM.getDiags().Report(Loc, diag::err_flatten_function_needs_feature)
2916 << FD->getDeclName() << TargetDecl->getDeclName() << F.getKey();
2917 }
2918 }
2919 }
2920}
2921
2922void CodeGenFunction::EmitSanitizerStatReport(llvm::SanitizerStatKind SSK) {
2923 if (!CGM.getCodeGenOpts().SanitizeStats)
2924 return;
2925
2926 llvm::IRBuilder<> IRB(Builder.GetInsertBlock(), Builder.GetInsertPoint());
2927 IRB.SetCurrentDebugLocation(Builder.getCurrentDebugLocation());
2928 CGM.getSanStats().create(IRB, SSK);
2929}
2930
2932 const CGCallee &Callee, SmallVectorImpl<llvm::OperandBundleDef> &Bundles) {
2933 const CGCalleeInfo &CI = Callee.getAbstractInfo();
2935 if (!FP)
2936 return;
2937
2938 StringRef Salt;
2939 if (const auto &Info = FP->getExtraAttributeInfo())
2940 Salt = Info.CFISalt;
2941
2942 Bundles.emplace_back("kcfi", CGM.CreateKCFITypeId(FP->desugar(), Salt));
2943}
2944
2945llvm::Value *
2946CodeGenFunction::FormAArch64ResolverCondition(const FMVResolverOption &RO) {
2947 return RO.Features.empty() ? nullptr : EmitAArch64CpuSupports(RO.Features);
2948}
2949
2950llvm::Value *
2951CodeGenFunction::FormX86ResolverCondition(const FMVResolverOption &RO) {
2952 llvm::Value *Condition = nullptr;
2953
2954 if (RO.Architecture) {
2955 StringRef Arch = *RO.Architecture;
2956 // If arch= specifies an x86-64 micro-architecture level, test the feature
2957 // with __builtin_cpu_supports, otherwise use __builtin_cpu_is.
2958 if (Arch.starts_with("x86-64"))
2959 Condition = EmitX86CpuSupports({Arch});
2960 else
2961 Condition = EmitX86CpuIs(Arch);
2962 }
2963
2964 if (!RO.Features.empty()) {
2965 llvm::Value *FeatureCond = EmitX86CpuSupports(RO.Features);
2966 Condition =
2967 Condition ? Builder.CreateAnd(Condition, FeatureCond) : FeatureCond;
2968 }
2969 return Condition;
2970}
2971
2973 llvm::Function *Resolver,
2974 CGBuilderTy &Builder,
2975 llvm::Function *FuncToReturn,
2976 bool SupportsIFunc) {
2977 if (SupportsIFunc) {
2978 Builder.CreateRet(FuncToReturn);
2979 return;
2980 }
2981
2983 llvm::make_pointer_range(Resolver->args()));
2984
2985 llvm::CallInst *Result = Builder.CreateCall(FuncToReturn, Args);
2986 Result->setTailCallKind(llvm::CallInst::TCK_MustTail);
2987
2988 if (Resolver->getReturnType()->isVoidTy())
2989 Builder.CreateRetVoid();
2990 else
2991 Builder.CreateRet(Result);
2992}
2993
2995 llvm::Function *Resolver, ArrayRef<FMVResolverOption> Options) {
2996
2997 llvm::Triple::ArchType ArchType =
2998 getContext().getTargetInfo().getTriple().getArch();
2999
3000 switch (ArchType) {
3001 case llvm::Triple::x86:
3002 case llvm::Triple::x86_64:
3003 EmitX86MultiVersionResolver(Resolver, Options);
3004 return;
3005 case llvm::Triple::aarch64:
3006 EmitAArch64MultiVersionResolver(Resolver, Options);
3007 return;
3008 case llvm::Triple::riscv32:
3009 case llvm::Triple::riscv64:
3010 EmitRISCVMultiVersionResolver(Resolver, Options);
3011 return;
3012
3013 default:
3014 assert(false && "Only implemented for x86, AArch64 and RISC-V targets");
3015 }
3016}
3017
3019 llvm::Function *Resolver, ArrayRef<FMVResolverOption> Options) {
3020
3021 if (getContext().getTargetInfo().getTriple().getOS() !=
3022 llvm::Triple::OSType::Linux) {
3023 CGM.getDiags().Report(diag::err_os_unsupport_riscv_fmv);
3024 return;
3025 }
3026
3027 llvm::BasicBlock *CurBlock = createBasicBlock("resolver_entry", Resolver);
3028 Builder.SetInsertPoint(CurBlock);
3030
3031 bool SupportsIFunc = getContext().getTargetInfo().supportsIFunc();
3032 bool HasDefault = false;
3033 unsigned DefaultIndex = 0;
3034
3035 // Check the each candidate function.
3036 for (unsigned Index = 0; Index < Options.size(); Index++) {
3037
3038 if (Options[Index].Features.empty()) {
3039 HasDefault = true;
3040 DefaultIndex = Index;
3041 continue;
3042 }
3043
3044 Builder.SetInsertPoint(CurBlock);
3045
3046 // FeaturesCondition: The bitmask of the required extension has been
3047 // enabled by the runtime object.
3048 // (__riscv_feature_bits.features[i] & REQUIRED_BITMASK) ==
3049 // REQUIRED_BITMASK
3050 //
3051 // When condition is met, return this version of the function.
3052 // Otherwise, try the next version.
3053 //
3054 // if (FeaturesConditionVersion1)
3055 // return Version1;
3056 // else if (FeaturesConditionVersion2)
3057 // return Version2;
3058 // else if (FeaturesConditionVersion3)
3059 // return Version3;
3060 // ...
3061 // else
3062 // return DefaultVersion;
3063
3064 // TODO: Add a condition to check the length before accessing elements.
3065 // Without checking the length first, we may access an incorrect memory
3066 // address when using different versions.
3067 llvm::SmallVector<StringRef, 8> CurrTargetAttrFeats;
3068 llvm::SmallVector<std::string, 8> TargetAttrFeats;
3069
3070 for (StringRef Feat : Options[Index].Features) {
3071 std::vector<std::string> FeatStr =
3073
3074 assert(FeatStr.size() == 1 && "Feature string not delimited");
3075
3076 std::string &CurrFeat = FeatStr.front();
3077 if (CurrFeat[0] == '+')
3078 TargetAttrFeats.push_back(CurrFeat.substr(1));
3079 }
3080
3081 if (TargetAttrFeats.empty())
3082 continue;
3083
3084 for (std::string &Feat : TargetAttrFeats)
3085 CurrTargetAttrFeats.push_back(Feat);
3086
3087 Builder.SetInsertPoint(CurBlock);
3088 llvm::Value *FeatsCondition = EmitRISCVCpuSupports(CurrTargetAttrFeats);
3089
3090 llvm::BasicBlock *RetBlock = createBasicBlock("resolver_return", Resolver);
3091 CGBuilderTy RetBuilder(*this, RetBlock);
3092 CreateMultiVersionResolverReturn(CGM, Resolver, RetBuilder,
3093 Options[Index].Function, SupportsIFunc);
3094 llvm::BasicBlock *ElseBlock = createBasicBlock("resolver_else", Resolver);
3095
3096 Builder.SetInsertPoint(CurBlock);
3097 Builder.CreateCondBr(FeatsCondition, RetBlock, ElseBlock);
3098
3099 CurBlock = ElseBlock;
3100 }
3101
3102 // Finally, emit the default one.
3103 if (HasDefault) {
3104 Builder.SetInsertPoint(CurBlock);
3106 CGM, Resolver, Builder, Options[DefaultIndex].Function, SupportsIFunc);
3107 return;
3108 }
3109
3110 // If no generic/default, emit an unreachable.
3111 Builder.SetInsertPoint(CurBlock);
3112 llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap);
3113 TrapCall->setDoesNotReturn();
3114 TrapCall->setDoesNotThrow();
3115 Builder.CreateUnreachable();
3116 Builder.ClearInsertionPoint();
3117}
3118
3120 llvm::Function *Resolver, ArrayRef<FMVResolverOption> Options) {
3121 assert(!Options.empty() && "No multiversion resolver options found");
3122 assert(Options.back().Features.size() == 0 && "Default case must be last");
3123 bool SupportsIFunc = getContext().getTargetInfo().supportsIFunc();
3124 assert(SupportsIFunc &&
3125 "Multiversion resolver requires target IFUNC support");
3126 bool AArch64CpuInitialized = false;
3127 llvm::BasicBlock *CurBlock = createBasicBlock("resolver_entry", Resolver);
3128
3129 for (const FMVResolverOption &RO : Options) {
3130 Builder.SetInsertPoint(CurBlock);
3131 llvm::Value *Condition = FormAArch64ResolverCondition(RO);
3132
3133 // The 'default' or 'all features enabled' case.
3134 if (!Condition) {
3135 CreateMultiVersionResolverReturn(CGM, Resolver, Builder, RO.Function,
3136 SupportsIFunc);
3137 return;
3138 }
3139
3140 if (!AArch64CpuInitialized) {
3141 Builder.SetInsertPoint(CurBlock, CurBlock->begin());
3142 EmitAArch64CpuInit();
3143 AArch64CpuInitialized = true;
3144 Builder.SetInsertPoint(CurBlock);
3145 }
3146
3147 // Skip unreachable versions.
3148 if (RO.Function == nullptr)
3149 continue;
3150
3151 llvm::BasicBlock *RetBlock = createBasicBlock("resolver_return", Resolver);
3152 CGBuilderTy RetBuilder(*this, RetBlock);
3153 CreateMultiVersionResolverReturn(CGM, Resolver, RetBuilder, RO.Function,
3154 SupportsIFunc);
3155 CurBlock = createBasicBlock("resolver_else", Resolver);
3156 Builder.CreateCondBr(Condition, RetBlock, CurBlock);
3157 }
3158
3159 // If no default, emit an unreachable.
3160 Builder.SetInsertPoint(CurBlock);
3161 llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap);
3162 TrapCall->setDoesNotReturn();
3163 TrapCall->setDoesNotThrow();
3164 Builder.CreateUnreachable();
3165 Builder.ClearInsertionPoint();
3166}
3167
3169 llvm::Function *Resolver, ArrayRef<FMVResolverOption> Options) {
3170
3171 bool SupportsIFunc = getContext().getTargetInfo().supportsIFunc();
3172
3173 // Main function's basic block.
3174 llvm::BasicBlock *CurBlock = createBasicBlock("resolver_entry", Resolver);
3175 Builder.SetInsertPoint(CurBlock);
3176 EmitX86CpuInit();
3177
3178 for (const FMVResolverOption &RO : Options) {
3179 Builder.SetInsertPoint(CurBlock);
3180 llvm::Value *Condition = FormX86ResolverCondition(RO);
3181
3182 // The 'default' or 'generic' case.
3183 if (!Condition) {
3184 assert(&RO == Options.end() - 1 &&
3185 "Default or Generic case must be last");
3186 CreateMultiVersionResolverReturn(CGM, Resolver, Builder, RO.Function,
3187 SupportsIFunc);
3188 return;
3189 }
3190
3191 llvm::BasicBlock *RetBlock = createBasicBlock("resolver_return", Resolver);
3192 CGBuilderTy RetBuilder(*this, RetBlock);
3193 CreateMultiVersionResolverReturn(CGM, Resolver, RetBuilder, RO.Function,
3194 SupportsIFunc);
3195 CurBlock = createBasicBlock("resolver_else", Resolver);
3196 Builder.CreateCondBr(Condition, RetBlock, CurBlock);
3197 }
3198
3199 // If no generic/default, emit an unreachable.
3200 Builder.SetInsertPoint(CurBlock);
3201 llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap);
3202 TrapCall->setDoesNotReturn();
3203 TrapCall->setDoesNotThrow();
3204 Builder.CreateUnreachable();
3205 Builder.ClearInsertionPoint();
3206}
3207
3208// Loc - where the diagnostic will point, where in the source code this
3209// alignment has failed.
3210// SecondaryLoc - if present (will be present if sufficiently different from
3211// Loc), the diagnostic will additionally point a "Note:" to this location.
3212// It should be the location where the __attribute__((assume_aligned))
3213// was written e.g.
3215 llvm::Value *Ptr, QualType Ty, SourceLocation Loc,
3216 SourceLocation SecondaryLoc, llvm::Value *Alignment,
3217 llvm::Value *OffsetValue, llvm::Value *TheCheck,
3218 llvm::Instruction *Assumption) {
3219 assert(isa_and_nonnull<llvm::CallInst>(Assumption) &&
3220 cast<llvm::CallInst>(Assumption)->getCalledOperand() ==
3221 llvm::Intrinsic::getOrInsertDeclaration(
3222 Builder.GetInsertBlock()->getParent()->getParent(),
3223 llvm::Intrinsic::assume) &&
3224 "Assumption should be a call to llvm.assume().");
3225 assert(&(Builder.GetInsertBlock()->back()) == Assumption &&
3226 "Assumption should be the last instruction of the basic block, "
3227 "since the basic block is still being generated.");
3228
3229 if (!SanOpts.has(SanitizerKind::Alignment))
3230 return;
3231
3232 // Don't check pointers to volatile data. The behavior here is implementation-
3233 // defined.
3235 return;
3236
3237 // We need to temorairly remove the assumption so we can insert the
3238 // sanitizer check before it, else the check will be dropped by optimizations.
3239 Assumption->removeFromParent();
3240
3241 {
3242 auto CheckOrdinal = SanitizerKind::SO_Alignment;
3243 auto CheckHandler = SanitizerHandler::AlignmentAssumption;
3244 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
3245
3246 if (!OffsetValue)
3247 OffsetValue = Builder.getInt1(false); // no offset.
3248
3249 llvm::Constant *StaticData[] = {EmitCheckSourceLocation(Loc),
3250 EmitCheckSourceLocation(SecondaryLoc),
3252 llvm::Value *DynamicData[] = {Ptr, Alignment, OffsetValue};
3253 EmitCheck({std::make_pair(TheCheck, CheckOrdinal)}, CheckHandler,
3254 StaticData, DynamicData);
3255 }
3256
3257 // We are now in the (new, empty) "cont" basic block.
3258 // Reintroduce the assumption.
3259 Builder.Insert(Assumption);
3260 // FIXME: Assumption still has it's original basic block as it's Parent.
3261}
3262
3264 if (CGDebugInfo *DI = getDebugInfo())
3265 return DI->SourceLocToDebugLoc(Location);
3266
3267 return llvm::DebugLoc();
3268}
3269
3270llvm::Value *
3271CodeGenFunction::emitCondLikelihoodViaExpectIntrinsic(llvm::Value *Cond,
3272 Stmt::Likelihood LH) {
3273 switch (LH) {
3274 case Stmt::LH_None:
3275 return Cond;
3276 case Stmt::LH_Likely:
3277 case Stmt::LH_Unlikely:
3278 // Don't generate llvm.expect on -O0 as the backend won't use it for
3279 // anything.
3280 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
3281 return Cond;
3282 llvm::Type *CondTy = Cond->getType();
3283 assert(CondTy->isIntegerTy(1) && "expecting condition to be a boolean");
3284 llvm::Function *FnExpect =
3285 CGM.getIntrinsic(llvm::Intrinsic::expect, CondTy);
3286 llvm::Value *ExpectedValueOfCond =
3287 llvm::ConstantInt::getBool(CondTy, LH == Stmt::LH_Likely);
3288 return Builder.CreateCall(FnExpect, {Cond, ExpectedValueOfCond},
3289 Cond->getName() + ".expval");
3290 }
3291 llvm_unreachable("Unknown Likelihood");
3292}
3293
3294llvm::Value *CodeGenFunction::emitBoolVecConversion(llvm::Value *SrcVec,
3295 unsigned NumElementsDst,
3296 const llvm::Twine &Name) {
3297 auto *SrcTy = cast<llvm::FixedVectorType>(SrcVec->getType());
3298 unsigned NumElementsSrc = SrcTy->getNumElements();
3299 if (NumElementsSrc == NumElementsDst)
3300 return SrcVec;
3301
3302 std::vector<int> ShuffleMask(NumElementsDst, -1);
3303 for (unsigned MaskIdx = 0;
3304 MaskIdx < std::min<>(NumElementsDst, NumElementsSrc); ++MaskIdx)
3305 ShuffleMask[MaskIdx] = MaskIdx;
3306
3307 return Builder.CreateShuffleVector(SrcVec, ShuffleMask, Name);
3308}
3309
3311 const CGPointerAuthInfo &PointerAuth,
3313 if (!PointerAuth.isSigned())
3314 return;
3315
3316 auto *Key = Builder.getInt32(PointerAuth.getKey());
3317
3318 llvm::Value *Discriminator = PointerAuth.getDiscriminator();
3319 if (!Discriminator)
3320 Discriminator = Builder.getSize(0);
3321
3322 llvm::Value *Args[] = {Key, Discriminator};
3323 Bundles.emplace_back("ptrauth", Args);
3324}
3325
3327 const CGPointerAuthInfo &PointerAuth,
3328 llvm::Value *Pointer,
3329 unsigned IntrinsicID) {
3330 if (!PointerAuth)
3331 return Pointer;
3332
3333 auto Key = CGF.Builder.getInt32(PointerAuth.getKey());
3334
3335 llvm::Value *Discriminator = PointerAuth.getDiscriminator();
3336 if (!Discriminator) {
3337 Discriminator = CGF.Builder.getSize(0);
3338 }
3339
3340 // Convert the pointer to intptr_t before signing it.
3341 auto OrigType = Pointer->getType();
3342 Pointer = CGF.Builder.CreatePtrToInt(Pointer, CGF.IntPtrTy);
3343
3344 // call i64 @llvm.ptrauth.sign.i64(i64 %pointer, i32 %key, i64 %discriminator)
3345 auto Intrinsic = CGF.CGM.getIntrinsic(IntrinsicID);
3346 Pointer = CGF.EmitRuntimeCall(Intrinsic, {Pointer, Key, Discriminator});
3347
3348 // Convert back to the original type.
3349 Pointer = CGF.Builder.CreateIntToPtr(Pointer, OrigType);
3350 return Pointer;
3351}
3352
3353llvm::Value *
3355 llvm::Value *Pointer) {
3356 if (!PointerAuth.shouldSign())
3357 return Pointer;
3358 return EmitPointerAuthCommon(*this, PointerAuth, Pointer,
3359 llvm::Intrinsic::ptrauth_sign);
3360}
3361
3362static llvm::Value *EmitStrip(CodeGenFunction &CGF,
3363 const CGPointerAuthInfo &PointerAuth,
3364 llvm::Value *Pointer) {
3365 auto StripIntrinsic = CGF.CGM.getIntrinsic(llvm::Intrinsic::ptrauth_strip);
3366
3367 auto Key = CGF.Builder.getInt32(PointerAuth.getKey());
3368 // Convert the pointer to intptr_t before signing it.
3369 auto OrigType = Pointer->getType();
3371 StripIntrinsic, {CGF.Builder.CreatePtrToInt(Pointer, CGF.IntPtrTy), Key});
3372 return CGF.Builder.CreateIntToPtr(Pointer, OrigType);
3373}
3374
3375llvm::Value *
3377 llvm::Value *Pointer) {
3378 if (PointerAuth.shouldStrip()) {
3379 return EmitStrip(*this, PointerAuth, Pointer);
3380 }
3381 if (!PointerAuth.shouldAuth()) {
3382 return Pointer;
3383 }
3384
3385 return EmitPointerAuthCommon(*this, PointerAuth, Pointer,
3386 llvm::Intrinsic::ptrauth_auth);
3387}
3388
3390 llvm::Instruction *KeyInstruction, llvm::Value *Backup) {
3391 if (CGDebugInfo *DI = getDebugInfo())
3392 DI->addInstToCurrentSourceAtom(KeyInstruction, Backup);
3393}
3394
3396 llvm::Instruction *KeyInstruction, llvm::Value *Backup, uint64_t Atom) {
3397 if (CGDebugInfo *DI = getDebugInfo())
3398 DI->addInstToSpecificSourceAtom(KeyInstruction, Backup, Atom);
3399}
3400
3401void CodeGenFunction::addInstToNewSourceAtom(llvm::Instruction *KeyInstruction,
3402 llvm::Value *Backup) {
3403 if (CGDebugInfo *DI = getDebugInfo()) {
3405 DI->addInstToCurrentSourceAtom(KeyInstruction, Backup);
3406 }
3407}
Defines the clang::ASTContext interface.
#define V(N, I)
This file provides some common utility functions for processing Lambda related AST Constructs.
Defines enum values for all the target-independent builtin functions.
static llvm::Value * EmitPointerAuthCommon(CodeGenFunction &CGF, const CGPointerAuthInfo &PointerAuth, llvm::Value *Pointer, unsigned IntrinsicID)
static void CreateMultiVersionResolverReturn(CodeGenModule &CGM, llvm::Function *Resolver, CGBuilderTy &Builder, llvm::Function *FuncToReturn, bool SupportsIFunc)
static llvm::Value * EmitStrip(CodeGenFunction &CGF, const CGPointerAuthInfo &PointerAuth, llvm::Value *Pointer)
static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType, Address dest, Address src, llvm::Value *sizeInChars)
emitNonZeroVLAInit - Emit the "zero" initialization of a variable-length array whose elements have a ...
static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB)
static LValue makeNaturalAlignAddrLValue(llvm::Value *V, QualType T, bool ForPointeeType, bool MightBeSigned, CodeGenFunction &CGF, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
static void TryMarkNoThrow(llvm::Function *F)
Tries to mark the given function nounwind based on the non-existence of any throwing calls within it.
static llvm::Constant * getPrologueSignature(CodeGenModule &CGM, const FunctionDecl *FD)
Return the UBSan prologue signature for FD if one is available.
static bool endsWithReturn(const Decl *F)
Determine whether the function F ends with a return stmt.
static bool shouldEmitLifetimeMarkers(const CodeGenOptions &CGOpts, const LangOptions &LangOpts)
shouldEmitLifetimeMarkers - Decide whether we need emit the life-time markers.
static bool matchesStlAllocatorFn(const Decl *D, const ASTContext &Ctx)
Defines the C++ Decl subclasses, other than those for templates (found in DeclTemplate....
Defines the Objective-C statement AST node classes.
Enumerates target-specific builtins in their own namespaces within namespace clang.
APValue - This class implements a discriminated union of [uninitialized] [APSInt] [APFloat],...
Definition APValue.h:122
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:220
QualType getFunctionTypeWithExceptionSpec(QualType Orig, const FunctionProtoType::ExceptionSpecInfo &ESI) const
Get a function type and produce the equivalent function type with the specified exception specificati...
QualType getBaseElementType(const ArrayType *VAT) const
Return the innermost element type of an array type.
bool hasAnyFunctionEffects() const
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
static bool hasSameType(QualType T1, QualType T2)
Determine whether the given types T1 and T2 are equivalent.
const VariableArrayType * getAsVariableArrayType(QualType T) const
QualType getSizeType() const
Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.
const TargetInfo & getTargetInfo() const
Definition ASTContext.h:909
void getFunctionFeatureMap(llvm::StringMap< bool > &FeatureMap, const FunctionDecl *) const
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition TypeBase.h:3723
QualType getElementType() const
Definition TypeBase.h:3735
Attr - This represents one attribute.
Definition Attr.h:45
A builtin binary operation expression such as "x + y" or "x <= y".
Definition Expr.h:4038
static bool isLogicalOp(Opcode Opc)
Definition Expr.h:4171
BinaryOperatorKind Opcode
Definition Expr.h:4043
Represents a C++ constructor within a class.
Definition DeclCXX.h:2604
Represents a static or instance method of a struct/union/class.
Definition DeclCXX.h:2129
bool isImplicitObjectMemberFunction() const
[C++2b][dcl.fct]/p7 An implicit object member function is a non-static member function without an exp...
Definition DeclCXX.cpp:2710
const CXXRecordDecl * getParent() const
Return the parent of this method declaration, which is the class in which this method is defined.
Definition DeclCXX.h:2255
QualType getThisType() const
Return the type of the this pointer.
Definition DeclCXX.cpp:2809
bool isStatic() const
Definition DeclCXX.cpp:2401
bool isLambda() const
Determine whether this class describes a lambda function object.
Definition DeclCXX.h:1018
void getCaptureFields(llvm::DenseMap< const ValueDecl *, FieldDecl * > &Captures, FieldDecl *&ThisCapture) const
For a closure type, retrieve the mapping from captured variables and this to the non-static data memb...
Definition DeclCXX.cpp:1784
bool isCapturelessLambda() const
Definition DeclCXX.h:1064
bool isEmpty() const
Determine whether this is an empty class in the sense of (C++11 [meta.unary.prop]).
Definition DeclCXX.h:1186
A C++ throw-expression (C++ [except.throw]).
Definition ExprCXX.h:1208
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2943
Expr * getArg(unsigned Arg)
getArg - Return the specified argument.
Definition Expr.h:3147
SourceLocation getBeginLoc() const
Definition Expr.h:3277
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition CharUnits.h:122
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition CharUnits.h:189
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition CharUnits.h:185
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
Definition CharUnits.h:214
bool isOne() const
isOne - Test whether the quantity equals one.
Definition CharUnits.h:125
CodeGenOptions - Track various options which control how the code is optimized and passed to the back...
@ InAlloca
InAlloca - Pass the argument directly using the LLVM inalloca attribute.
@ Indirect
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition Address.h:128
static Address invalid()
Definition Address.h:176
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition Address.h:253
CharUnits getAlignment() const
Definition Address.h:194
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition Address.h:209
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition Address.h:276
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition Address.h:204
A scoped helper to set the current source atom group for CGDebugInfo::addInstToCurrentSourceAtom.
A scoped helper to set the current debug location to the specified location or preferred location of ...
static ApplyDebugLocation CreateDefaultArtificial(CodeGenFunction &CGF, SourceLocation TemporaryLocation)
Apply TemporaryLocation if it is valid.
void InsertHelper(llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock::iterator InsertPt) const override
This forwards to CodeGenFunction::InsertHelper.
llvm::ConstantInt * getSize(CharUnits N)
Definition CGBuilder.h:103
@ RAA_DirectInMemory
Pass it on the stack using its defined layout.
Definition CGCXXABI.h:158
virtual RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const =0
Returns how an argument of the given record type should be passed.
Abstract information about a function or function prototype.
Definition CGCall.h:41
const FunctionProtoType * getCalleeFunctionProtoType() const
Definition CGCall.h:56
All available information about a concrete callee.
Definition CGCall.h:63
static CGCallee forDirect(llvm::Constant *functionPtr, const CGCalleeInfo &abstractInfo=CGCalleeInfo())
Definition CGCall.h:137
This class gathers all debug information during compilation and is responsible for emitting to llvm g...
Definition CGDebugInfo.h:59
CGFunctionInfo - Class to encapsulate the information about a function definition.
llvm::Value * getDiscriminator() const
CallArgList - Type for representing both the value and type of arguments in a call.
Definition CGCall.h:274
void add(RValue rvalue, QualType type)
Definition CGCall.h:302
CGFPOptionsRAII(CodeGenFunction &CGF, FPOptions FPFeatures)
An object to manage conditionally-evaluated expressions.
An object which temporarily prevents a value from being destroyed by aggressive peephole optimization...
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
void EmitRISCVMultiVersionResolver(llvm::Function *Resolver, ArrayRef< FMVResolverOption > Options)
GlobalDecl CurGD
CurGD - The GlobalDecl for the current function being compiled.
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount, Stmt::Likelihood LH=Stmt::LH_None, const Expr *ConditionalOp=nullptr, const VarDecl *ConditionalDecl=nullptr)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
void setCurrentProfileCount(uint64_t Count)
Set the profiler's current count.
llvm::Value * emitBoolVecConversion(llvm::Value *SrcVec, unsigned NumElementsDst, const llvm::Twine &Name="")
void EmitAArch64MultiVersionResolver(llvm::Function *Resolver, ArrayRef< FMVResolverOption > Options)
JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target)
The given basic block lies in the current EH scope, but may be a target of a potentially scope-crossi...
llvm::Value * EmitScalarOrConstFoldImmArg(unsigned ICEArguments, unsigned Idx, const CallExpr *E)
SanitizerSet SanOpts
Sanitizers enabled for this function.
void EmitNullInitialization(Address DestPtr, QualType Ty)
EmitNullInitialization - Generate code to set a value of the given type to null, If the type contains...
RawAddress CreateIRTemp(QualType T, const Twine &Name="tmp")
CreateIRTemp - Create a temporary IR object of the given type, with appropriate alignment.
Definition CGExpr.cpp:184
void checkTargetFeatures(const CallExpr *E, const FunctionDecl *TargetDecl)
static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts=false)
ContainsLabel - Return true if the statement contains a label in it.
bool ShouldSkipSanitizerInstrumentation()
ShouldSkipSanitizerInstrumentation - Return true if the current function should not be instrumented w...
llvm::BlockAddress * GetAddrOfLabel(const LabelDecl *L)
llvm::Value * EmitRISCVCpuSupports(const CallExpr *E)
Definition RISCV.cpp:970
llvm::Value * EmitRISCVCpuInit()
Definition RISCV.cpp:960
static bool hasScalarEvaluationKind(QualType T)
llvm::Type * ConvertType(QualType T)
void GenerateCode(GlobalDecl GD, llvm::Function *Fn, const CGFunctionInfo &FnInfo)
void EmitSanitizerStatReport(llvm::SanitizerStatKind SSK)
void addInstToNewSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup)
Add KeyInstruction and an optional Backup instruction to a new atom group (See ApplyAtomGroup for mor...
PeepholeProtection protectFromPeepholes(RValue rvalue)
protectFromPeepholes - Protect a value that we're intending to store to the side, but which will prob...
void EmitLambdaStaticInvokeBody(const CXXMethodDecl *MD)
Definition CGClass.cpp:3223
bool CurFuncIsThunk
In C++, whether we are code generating a thunk.
LValue MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T)
Given a value of type T* that may not be to a complete object, construct an l-value with the natural ...
JumpDest getJumpDestForLabel(const LabelDecl *S)
getBasicBlockForLabel - Return the LLVM basicblock that the specified label maps to.
Definition CGStmt.cpp:710
void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint=true)
SmallVector< llvm::ConvergenceControlInst *, 4 > ConvergenceTokenStack
Stack to track the controlled convergence tokens.
void unprotectFromPeepholes(PeepholeProtection protection)
RValue convertTempToRValue(Address addr, QualType type, SourceLocation Loc)
Given the address of a temporary variable, produce an r-value of its type.
Definition CGExpr.cpp:6872
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
Definition CGExpr.cpp:3831
llvm::SmallVector< DeferredDeactivateCleanup > DeferredDeactivationCleanupStack
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void addInstToCurrentSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup)
See CGDebugInfo::addInstToCurrentSourceAtom.
const LangOptions & getLangOpts() const
void addInstToSpecificSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup, uint64_t Atom)
See CGDebugInfo::addInstToSpecificSourceAtom.
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
void EmitVarAnnotations(const VarDecl *D, llvm::Value *V)
Emit local annotations for the local variable V, declared by D.
llvm::BasicBlock * EHResumeBlock
EHResumeBlock - Unified block containing a call to llvm.eh.resume.
Address EmitFieldAnnotations(const FieldDecl *D, Address V)
Emit field annotations for the given field & value.
void EmitConstructorBody(FunctionArgList &Args)
EmitConstructorBody - Emits the body of the current constructor.
Definition CGClass.cpp:830
void EmitKCFIOperandBundle(const CGCallee &Callee, SmallVectorImpl< llvm::OperandBundleDef > &Bundles)
void EmitDeclRefExprDbgValue(const DeclRefExpr *E, const APValue &Init)
Address makeNaturalAddressForPointer(llvm::Value *Ptr, QualType T, CharUnits Alignment=CharUnits::Zero(), bool ForPointeeType=false, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
Construct an address with the natural alignment of T.
LValue MakeNaturalAlignPointeeRawAddrLValue(llvm::Value *V, QualType T)
Same as MakeNaturalAlignPointeeAddrLValue except that the pointer is known to be unsigned.
@ TCK_ConstructorCall
Checking the 'this' pointer for a constructor call.
@ TCK_MemberCall
Checking the 'this' pointer for a call to a non-static member function.
const Decl * CurCodeDecl
CurCodeDecl - This is the inner-most code context, which includes blocks.
llvm::AssertingVH< llvm::Instruction > AllocaInsertPt
AllocaInsertPoint - This is an instruction in the entry block before which we prefer to insert alloca...
void EmitFunctionBody(const Stmt *Body)
JumpDest ReturnBlock
ReturnBlock - Unified return block.
llvm::DebugLoc SourceLocToDebugLoc(SourceLocation Location)
Converts Location to a DebugLoc, if debug information is enabled.
llvm::Constant * EmitCheckTypeDescriptor(QualType T)
Emit a description of a type in a format suitable for passing to a runtime sanitizer handler.
Definition CGExpr.cpp:3721
llvm::DebugLoc EmitReturnBlock()
Emit the unified return block, trying to avoid its emission when possible.
RawAddress CreateDefaultAlignTempAlloca(llvm::Type *Ty, const Twine &Name="tmp")
CreateDefaultAlignedTempAlloca - This creates an alloca with the default ABI alignment of the given L...
Definition CGExpr.cpp:177
const TargetInfo & getTarget() const
llvm::Value * EmitAnnotationCall(llvm::Function *AnnotationFn, llvm::Value *AnnotatedVal, StringRef AnnotationStr, SourceLocation Location, const AnnotateAttr *Attr)
Emit an annotation call (intrinsic).
Address EmitCompoundStmtWithoutScope(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
Definition CGStmt.cpp:585
void PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize, std::initializer_list< llvm::Value ** > ValuesToReload={})
Takes the old cleanup stack size and emits the cleanup blocks that have been added.
void maybeCreateMCDCCondBitmap()
Allocate a temp value on the stack that MCDC can use to track condition results.
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
Definition CGExpr.cpp:245
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
Definition CGExpr.cpp:2402
static bool isInstrumentedCondition(const Expr *C)
isInstrumentedCondition - Determine whether the given condition is an instrumentable condition (i....
VlaSizePair getVLAElements1D(const VariableArrayType *vla)
Return the number of elements for a single dimension for the given array type.
bool AlwaysEmitXRayCustomEvents() const
AlwaysEmitXRayCustomEvents - Return true if we must unconditionally emit XRay custom event handling c...
void StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, const CGFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation Loc=SourceLocation(), SourceLocation StartLoc=SourceLocation())
Emit code for the start of a function.
llvm::Value * EmitPointerAuthSign(const CGPointerAuthInfo &Info, llvm::Value *Pointer)
void markAsIgnoreThreadCheckingAtRuntime(llvm::Function *Fn)
Annotate the function with an attribute that disables TSan checking at runtime.
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
Definition CGExpr.cpp:226
void EmitPointerAuthOperandBundle(const CGPointerAuthInfo &Info, SmallVectorImpl< llvm::OperandBundleDef > &Bundles)
void EmitCheck(ArrayRef< std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > Checked, SanitizerHandler Check, ArrayRef< llvm::Constant * > StaticArgs, ArrayRef< llvm::Value * > DynamicArgs, const TrapReason *TR=nullptr)
Create a basic block that will either trap or call a handler function in the UBSan runtime with the p...
Definition CGExpr.cpp:3979
void InsertHelper(llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock::iterator InsertPt) const
CGBuilder insert helper.
SmallVector< const BinaryOperator *, 16 > MCDCLogOpStack
Stack to track the Logical Operator recursion nest for MC/DC.
llvm::Value * emitArrayLength(const ArrayType *arrayType, QualType &baseType, Address &addr)
emitArrayLength - Compute the length of an array, even if it's a VLA, and drill down to the base elem...
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
bool AlwaysEmitXRayTypedEvents() const
AlwaysEmitXRayTypedEvents - Return true if clang must unconditionally emit XRay typed event handling ...
void EmitStartEHSpec(const Decl *D)
EmitStartEHSpec - Emit the start of the exception spec.
void EmitDestructorBody(FunctionArgList &Args)
EmitDestructorBody - Emits the body of the current destructor.
Definition CGClass.cpp:1546
void EmitX86MultiVersionResolver(llvm::Function *Resolver, ArrayRef< FMVResolverOption > Options)
bool ShouldInstrumentFunction()
ShouldInstrumentFunction - Return true if the current function should be instrumented with __cyg_prof...
void maybeUpdateMCDCCondBitmap(const Expr *E, llvm::Value *Val)
Update the MCDC temp value with the condition's evaluated result.
void emitAlignmentAssumptionCheck(llvm::Value *Ptr, QualType Ty, SourceLocation Loc, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue, llvm::Value *TheCheck, llvm::Instruction *Assumption)
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **CallOrInvoke, bool IsMustTail, SourceLocation Loc, bool IsVirtualFunctionPointerThunk=false)
EmitCall - Generate a call of the given function, expecting the given result type,...
Definition CGCall.cpp:5248
llvm::ConstantInt * getUBSanFunctionTypeHash(QualType T) const
Return a type hash constant for a function instrumented by -fsanitize=function.
void EmitBranchToCounterBlock(const Expr *Cond, BinaryOperator::Opcode LOp, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount=0, Stmt::Likelihood LH=Stmt::LH_None, const Expr *CntrIdx=nullptr)
EmitBranchToCounterBlock - Emit a conditional branch to a new block that increments a profile counter...
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler's counter for the given statement by StepV.
VlaSizePair getVLASize(const VariableArrayType *vla)
Returns an LLVM value that corresponds to the size, in non-variably-sized elements,...
void EmitMultiVersionResolver(llvm::Function *Resolver, ArrayRef< FMVResolverOption > Options)
const Decl * CurFuncDecl
CurFuncDecl - Holds the Decl for the current outermost non-closure context.
static const Expr * stripCond(const Expr *C)
Ignore parentheses and logical-NOT to track conditions consistently.
void EmitFunctionProlog(const CGFunctionInfo &FI, llvm::Function *Fn, const FunctionArgList &Args)
EmitFunctionProlog - Emit the target specific LLVM code to load the arguments for the given function.
Definition CGCall.cpp:3113
void SetFastMathFlags(FPOptions FPFeatures)
Set the codegen fast-math flags.
llvm::SmallVector< char, 256 > LifetimeExtendedCleanupStack
Address EmitVAListRef(const Expr *E)
void EmitLambdaInAllocaCallOpBody(const CXXMethodDecl *MD)
Definition CGClass.cpp:3279
Address ReturnValuePointer
ReturnValuePointer - The temporary alloca to hold a pointer to sret.
static bool mightAddDeclToScope(const Stmt *S)
Determine if the given statement might introduce a declaration into the current scope,...
void EmitStmt(const Stmt *S, ArrayRef< const Attr * > Attrs={})
EmitStmt - Emit the code for the statement.
Definition CGStmt.cpp:61
llvm::DenseMap< const ValueDecl *, FieldDecl * > LambdaCaptureFields
bool AutoreleaseResult
In ARC, whether we should autorelease the return value.
llvm::CallInst * EmitRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
uint64_t getCurrentProfileCount()
Get the profiler's current count.
llvm::Type * ConvertTypeForMem(QualType T)
void EmitEndEHSpec(const Decl *D)
EmitEndEHSpec - Emit the end of the exception spec.
LValue EmitLValueForLambdaField(const FieldDecl *Field)
Definition CGExpr.cpp:5396
CodeGenTypes & getTypes() const
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
bool IsSanitizerScope
True if CodeGen currently emits code implementing sanitizer checks.
static bool containsBreak(const Stmt *S)
containsBreak - Return true if the statement contains a break out of it.
void emitImplicitAssignmentOperatorBody(FunctionArgList &Args)
Definition CGClass.cpp:1667
HLSLControlFlowHintAttr::Spelling HLSLControlFlowAttr
HLSL Branch attribute.
void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, LValue LV, QualType Type, SanitizerSet SkippedChecks=SanitizerSet(), llvm::Value *ArraySize=nullptr)
llvm::SmallVector< const ParmVarDecl *, 4 > FnArgs
Save Parameter Decl for coroutine.
void EmitFunctionEpilog(const CGFunctionInfo &FI, bool EmitRetDbgLoc, SourceLocation EndLoc, uint64_t RetKeyInstructionsSourceAtom)
EmitFunctionEpilog - Emit the target specific LLVM code to return the given temporary.
Definition CGCall.cpp:4000
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
Definition CGExpr.cpp:1575
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block,...
Definition CGStmt.cpp:676
RawAddress NormalCleanupDest
i32s containing the indexes of the cleanup destinations.
llvm::Type * convertTypeForLoadStore(QualType ASTTy, llvm::Type *LLVMTy=nullptr)
llvm::BasicBlock * GetIndirectGotoBlock()
EHScopeStack::stable_iterator PrologueCleanupDepth
PrologueCleanupDepth - The cleanup depth enclosing all the cleanups associated with the parameters.
Address EmitMSVAListRef(const Expr *E)
Emit a "reference" to a __builtin_ms_va_list; this is always the value of the expression,...
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
llvm::CallInst * EmitTrapCall(llvm::Intrinsic::ID IntrID)
Emit a call to trap or debugtrap and attach function attribute "trap-func-name" if specified.
Definition CGExpr.cpp:4394
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void FinishFunction(SourceLocation EndLoc=SourceLocation())
FinishFunction - Complete IR generation of the current function.
const CGFunctionInfo * CurFnInfo
uint64_t getProfileCount(const Stmt *S)
Get the profiler's count for the given statement.
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result, bool AllowLabels=false)
ConstantFoldsToSimpleInteger - If the specified expression does not fold to a constant,...
void ErrorUnsupported(const Stmt *S, const char *Type)
ErrorUnsupported - Print out an error that codegen doesn't support the specified stmt yet.
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
Definition CGExpr.cpp:1691
bool ShouldXRayInstrumentFunction() const
ShouldXRayInstrument - Return true if the current function should be instrumented with XRay nop sleds...
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
llvm::LLVMContext & getLLVMContext()
bool SawAsmBlock
Whether we processed a Microsoft-style asm block during CodeGen.
bool checkIfFunctionMustProgress()
Returns true if a function must make progress, which means the mustprogress attribute can be added.
void emitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty, SourceLocation Loc, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue=nullptr)
void EmitVariablyModifiedType(QualType Ty)
EmitVLASize - Capture all the sizes for the VLA expressions in the given variably-modified type and s...
void MaybeEmitDeferredVarDeclInit(const VarDecl *var)
Definition CGDecl.cpp:2074
void EmitBlockWithFallThrough(llvm::BasicBlock *BB, const Stmt *S)
When instrumenting to collect profile data, the counts for some blocks such as switch cases need to n...
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
Definition CGStmt.cpp:656
LValue MakeNaturalAlignRawAddrLValue(llvm::Value *V, QualType T)
QualType BuildFunctionArgList(GlobalDecl GD, FunctionArgList &Args)
llvm::Value * EmitPointerAuthAuth(const CGPointerAuthInfo &Info, llvm::Value *Pointer)
This class organizes the cross-function state that is used while generating LLVM code.
CharUnits getNaturalTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, bool forPointeeType=false)
const TargetCodeGenInfo & getTargetCodeGenInfo()
const CodeGenOptions & getCodeGenOpts() const
void GenKernelArgMetadata(llvm::Function *FN, const FunctionDecl *FD=nullptr, CodeGenFunction *CGF=nullptr)
OpenCL v1.2 s5.6.4.6 allows the compiler to store kernel argument information in the program executab...
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys={})
Per-function PGO state.
Definition CodeGenPGO.h:29
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
bool inheritingCtorHasParams(const InheritedConstructor &Inherited, CXXCtorType Type)
Determine if a C++ inheriting constructor should have parameters matching those of its inherited cons...
Definition CGCall.cpp:392
FunctionArgList - Type for representing both the decl and type of parameters to a function.
Definition CGCall.h:375
LValue - This represents an lvalue references.
Definition CGValue.h:183
llvm::Value * getPointer(CodeGenFunction &CGF) const
Address getAddress() const
Definition CGValue.h:370
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition CGValue.h:42
bool isScalar() const
Definition CGValue.h:64
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition CGValue.h:72
ReturnValueSlot - Contains the address where the return value of a function can be stored,...
Definition CGCall.h:379
virtual llvm::Constant * getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const
Return a constant used by UBSan as a signature to identify functions possessing type information,...
Definition TargetInfo.h:243
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition Stmt.h:1731
ConditionalOperator - The ?
Definition Expr.h:4391
A reference to a declared variable, function, enum, etc.
Definition Expr.h:1270
ValueDecl * getDecl()
Definition Expr.h:1338
Decl - This represents one declaration (or definition), e.g.
Definition DeclBase.h:86
T * getAttr() const
Definition DeclBase.h:573
ASTContext & getASTContext() const LLVM_READONLY
Definition DeclBase.cpp:546
Decl * getNonClosureContext()
Find the innermost non-closure ancestor of this declaration, walking up through blocks,...
llvm::iterator_range< specific_attr_iterator< T > > specific_attrs() const
Definition DeclBase.h:559
SourceLocation getLocation() const
Definition DeclBase.h:439
bool hasAttr() const
Definition DeclBase.h:577
This represents one expression.
Definition Expr.h:112
FPOptions getFPFeaturesInEffect(const LangOptions &LO) const
Returns the set of floating point options that apply to this expression.
Definition Expr.cpp:3968
std::optional< llvm::APSInt > getIntegerConstantExpr(const ASTContext &Ctx) const
isIntegerConstantExpr - Return the value if this expression is a valid integer constant expression.
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:276
QualType getType() const
Definition Expr.h:144
ExtVectorType - Extended vector type.
Definition TypeBase.h:4268
LangOptions::FPExceptionModeKind getExceptionMode() const
bool allowFPContractAcrossStatement() const
RoundingMode getRoundingMode() const
Represents a member of a struct/union/class.
Definition Decl.h:3160
Represents a function declaration or definition.
Definition Decl.h:2000
bool isMultiVersion() const
True if this function is considered a multiversioned function.
Definition Decl.h:2689
Stmt * getBody(const FunctionDecl *&Definition) const
Retrieve the body (definition) of the function.
Definition Decl.cpp:3279
unsigned getBuiltinID(bool ConsiderWrapperFunctions=false) const
Returns a value indicating whether this function corresponds to a builtin function.
Definition Decl.cpp:3762
bool UsesFPIntrin() const
Determine whether the function was declared in source context that requires constrained FP intrinsics...
Definition Decl.h:2909
bool usesSEHTry() const
Indicates the function uses __try.
Definition Decl.h:2518
QualType getReturnType() const
Definition Decl.h:2845
ArrayRef< ParmVarDecl * > parameters() const
Definition Decl.h:2774
FunctionDecl * getTemplateInstantiationPattern(bool ForDefinition=true) const
Retrieve the function declaration from which this function could be instantiated, if it is an instant...
Definition Decl.cpp:4264
FunctionEffectsRef getFunctionEffects() const
Definition Decl.h:3134
bool isMSVCRTEntryPoint() const
Determines whether this function is a MSVCRT user defined entry point.
Definition Decl.cpp:3375
bool isInlineBuiltinDeclaration() const
Determine if this function provides an inline implementation of a builtin.
Definition Decl.cpp:3526
bool hasImplicitReturnZero() const
Whether falling off this function implicitly returns null/zero.
Definition Decl.h:2428
bool isMain() const
Determines whether this function is "main", which is the entry point into an executable program.
Definition Decl.cpp:3368
bool isDefaulted() const
Whether this function is defaulted.
Definition Decl.h:2385
OverloadedOperatorKind getOverloadedOperator() const
getOverloadedOperator - Which C++ overloaded operator this function represents, if any.
Definition Decl.cpp:4130
FunctionDecl * getPreviousDecl()
Return the previous declaration of this declaration or NULL if this is the first declaration.
Represents a prototype with parameter type info, e.g.
Definition TypeBase.h:5269
QualType desugar() const
Definition TypeBase.h:5850
FunctionTypeExtraAttributeInfo getExtraAttributeInfo() const
Return the extra attribute information.
Definition TypeBase.h:5758
FunctionType - C99 6.7.5.3 - Function Declarators.
Definition TypeBase.h:4465
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
CXXCtorType getCtorType() const
Definition GlobalDecl.h:108
KernelReferenceKind getKernelReferenceKind() const
Definition GlobalDecl.h:135
const Decl * getDecl() const
Definition GlobalDecl.h:106
One of these records is kept for each identifier that is lexed.
bool isStr(const char(&Str)[StrLen]) const
Return true if this is the identifier for the specified string.
static ImplicitParamDecl * Create(ASTContext &C, DeclContext *DC, SourceLocation IdLoc, IdentifierInfo *Id, QualType T, ImplicitParamKind ParamKind)
Create implicit parameter.
Definition Decl.cpp:5597
Represents the declaration of a label.
Definition Decl.h:524
FPExceptionModeKind
Possible floating point exception behavior.
@ FPE_Strict
Strictly preserve the floating-point exception semantics.
@ FPE_MayTrap
Transformations do not cause new exceptions but may hide some.
@ FPE_Ignore
Assume that floating-point exceptions are masked.
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
SanitizerSet Sanitize
Set of enabled sanitizers.
RoundingMode getDefaultRoundingMode() const
DeclarationName getDeclName() const
Get the actual, stored name of the declaration, which may be a special name.
Definition Decl.h:340
Represents a parameter to a function.
Definition Decl.h:1790
ParsedAttr - Represents a syntactic attribute.
Definition ParsedAttr.h:119
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition TypeBase.h:3329
@ Forbid
Profiling is forbidden using the noprofile attribute.
Definition ProfileList.h:37
@ Skip
Profiling is skipped using the skipprofile attribute.
Definition ProfileList.h:35
@ Allow
Profiling is allowed.
Definition ProfileList.h:33
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isVolatileQualified() const
Determine whether this type is volatile-qualified.
Definition TypeBase.h:8376
field_range fields() const
Definition Decl.h:4527
Encodes a location in the source.
A trivial tuple used to represent a source range.
Stmt - This represents one statement.
Definition Stmt.h:85
child_range children()
Definition Stmt.cpp:299
StmtClass getStmtClass() const
Definition Stmt.h:1484
Likelihood
The likelihood of a branch being taken.
Definition Stmt.h:1427
@ LH_Unlikely
Branch has the [[unlikely]] attribute.
Definition Stmt.h:1428
@ LH_None
No attribute set or branches of the IfStmt have the same attribute.
Definition Stmt.h:1429
@ LH_Likely
Branch has the [[likely]] attribute.
Definition Stmt.h:1431
bool isMicrosoft() const
Is this ABI an MSVC-compatible ABI?
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
virtual std::optional< std::pair< unsigned, unsigned > > getVScaleRange(const LangOptions &LangOpts, ArmStreamingKind Mode, llvm::StringMap< bool > *FeatureMap=nullptr) const
Returns target-specific min and max values VScale_Range.
bool supportsIFunc() const
Identify whether this target supports IFuncs.
TargetCXXABI getCXXABI() const
Get the C++ ABI currently in use.
virtual ParsedTargetAttr parseTargetAttr(StringRef Str) const
bool isVoidType() const
Definition TypeBase.h:8891
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char,...
Definition Type.cpp:2206
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition Type.h:26
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:753
bool isVariablyModifiedType() const
Whether this type is a variably-modified type (C99 6.7.5).
Definition TypeBase.h:2801
TypeClass getTypeClass() const
Definition TypeBase.h:2385
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9111
bool isObjCRetainableType() const
Definition Type.cpp:5284
bool isFunctionNoProtoType() const
Definition TypeBase.h:2600
bool isCFIUncheckedCalleeFunctionType() const
Definition TypeBase.h:8575
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition Expr.h:2244
QualType getType() const
Definition Decl.h:723
Represents a variable declaration or definition.
Definition Decl.h:926
Represents a C array with a specified size that is not an integer-constant-expression.
Definition TypeBase.h:3967
Expr * getSizeExpr() const
Definition TypeBase.h:3981
QualType getElementType() const
Definition TypeBase.h:4190
Defines the clang::TargetInfo interface.
#define UINT_MAX
Definition limits.h:64
bool evaluateRequiredTargetFeatures(llvm::StringRef RequiredFatures, const llvm::StringMap< bool > &TargetFetureMap)
Returns true if the required target features of a builtin function are enabled.
@ Type
The l-value was considered opaque, so the alignment was determined from a type.
Definition CGValue.h:155
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
Definition CGValue.h:146
TypeEvaluationKind
The kind of evaluation to perform on values of a particular type.
CGBuilderInserter CGBuilderInserterTy
Definition CGBuilder.h:45
constexpr XRayInstrMask Typed
Definition XRayInstr.h:42
constexpr XRayInstrMask FunctionExit
Definition XRayInstr.h:40
constexpr XRayInstrMask FunctionEntry
Definition XRayInstr.h:39
constexpr XRayInstrMask Custom
Definition XRayInstr.h:41
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< ArrayType > arrayType
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
Definition Address.h:330
@ CPlusPlus
@ NonNull
Values of this type can never be null.
Definition Specifiers.h:350
nullptr
This class represents a compute construct, representing a 'Kind' of ‘parallel’, 'serial',...
Expr * Cond
};
bool isLambdaCallOperator(const CXXMethodDecl *MD)
Definition ASTLambda.h:28
@ Result
The result type of a method or function.
Definition TypeBase.h:905
const FunctionProtoType * T
llvm::fp::ExceptionBehavior ToConstrainedExceptMD(LangOptions::FPExceptionModeKind Kind)
U cast(CodeGen::Address addr)
Definition Address.h:327
bool IsArmStreamingFunction(const FunctionDecl *FD, bool IncludeLocallyStreaming)
Returns whether the given FunctionDecl has an __arm[_locally]_streaming attribute.
Definition Decl.cpp:6094
@ Other
Other implicit parameter.
Definition Decl.h:1746
@ EST_None
no exception specification
@ Implicit
An implicit conversion.
Definition Sema.h:439
Diagnostic wrappers for TextAPI types for error reporting.
Definition Dominators.h:30
cl::opt< bool > EnableSingleByteCoverage
A jump destination is an abstract label, branching to which may require a jump out through normal cle...
This structure provides a set of types that are commonly used during IR emission.
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:645
A FunctionEffect plus a potential boolean expression determining whether the effect is declared (e....
Definition TypeBase.h:5006
Contains information gathered from parsing the contents of TargetAttr.
Definition TargetInfo.h:60
std::vector< std::string > Features
Definition TargetInfo.h:61
void set(SanitizerMask K, bool Value)
Enable or disable a certain (single) sanitizer.
Definition Sanitizers.h:187
bool has(SanitizerMask K) const
Check if a certain (single) sanitizer is enabled.
Definition Sanitizers.h:174